1 //===- MemoryDependenceAnalysis.cpp - Mem Deps Implementation -------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements an analysis that determines, for a given memory 10 // operation, what preceding memory operations it depends on. It builds on 11 // alias analysis information, and tries to provide a lazy, caching interface to 12 // a common kind of alias information query. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "llvm/Analysis/MemoryDependenceAnalysis.h" 17 #include "llvm/ADT/DenseMap.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/SmallPtrSet.h" 20 #include "llvm/ADT/SmallVector.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/Analysis/AliasAnalysis.h" 23 #include "llvm/Analysis/AssumptionCache.h" 24 #include "llvm/Analysis/MemoryBuiltins.h" 25 #include "llvm/Analysis/MemoryLocation.h" 26 #include "llvm/Analysis/PHITransAddr.h" 27 #include "llvm/Analysis/PhiValues.h" 28 #include "llvm/Analysis/TargetLibraryInfo.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/IR/Attributes.h" 31 #include "llvm/IR/BasicBlock.h" 32 #include "llvm/IR/Constants.h" 33 #include "llvm/IR/DataLayout.h" 34 #include "llvm/IR/DerivedTypes.h" 35 #include "llvm/IR/Dominators.h" 36 #include "llvm/IR/Function.h" 37 #include "llvm/IR/InstrTypes.h" 38 #include "llvm/IR/Instruction.h" 39 #include "llvm/IR/Instructions.h" 40 #include "llvm/IR/IntrinsicInst.h" 41 #include "llvm/IR/LLVMContext.h" 42 #include "llvm/IR/Metadata.h" 43 #include "llvm/IR/Module.h" 44 #include "llvm/IR/PredIteratorCache.h" 45 #include "llvm/IR/Type.h" 46 #include "llvm/IR/Use.h" 47 #include "llvm/IR/User.h" 48 #include "llvm/IR/Value.h" 49 #include "llvm/InitializePasses.h" 50 #include "llvm/Pass.h" 51 #include "llvm/Support/AtomicOrdering.h" 52 #include "llvm/Support/Casting.h" 53 #include "llvm/Support/CommandLine.h" 54 #include "llvm/Support/Compiler.h" 55 #include "llvm/Support/Debug.h" 56 #include "llvm/Support/MathExtras.h" 57 #include <algorithm> 58 #include <cassert> 59 #include <cstdint> 60 #include <iterator> 61 #include <utility> 62 63 using namespace llvm; 64 65 #define DEBUG_TYPE "memdep" 66 67 STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses"); 68 STATISTIC(NumCacheDirtyNonLocal, "Number of dirty cached non-local responses"); 69 STATISTIC(NumUncacheNonLocal, "Number of uncached non-local responses"); 70 71 STATISTIC(NumCacheNonLocalPtr, 72 "Number of fully cached non-local ptr responses"); 73 STATISTIC(NumCacheDirtyNonLocalPtr, 74 "Number of cached, but dirty, non-local ptr responses"); 75 STATISTIC(NumUncacheNonLocalPtr, "Number of uncached non-local ptr responses"); 76 STATISTIC(NumCacheCompleteNonLocalPtr, 77 "Number of block queries that were completely cached"); 78 79 // Limit for the number of instructions to scan in a block. 80 81 static cl::opt<unsigned> BlockScanLimit( 82 "memdep-block-scan-limit", cl::Hidden, cl::init(100), 83 cl::desc("The number of instructions to scan in a block in memory " 84 "dependency analysis (default = 100)")); 85 86 static cl::opt<unsigned> 87 BlockNumberLimit("memdep-block-number-limit", cl::Hidden, cl::init(1000), 88 cl::desc("The number of blocks to scan during memory " 89 "dependency analysis (default = 1000)")); 90 91 // Limit on the number of memdep results to process. 92 static const unsigned int NumResultsLimit = 100; 93 94 /// This is a helper function that removes Val from 'Inst's set in ReverseMap. 95 /// 96 /// If the set becomes empty, remove Inst's entry. 97 template <typename KeyTy> 98 static void 99 RemoveFromReverseMap(DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>> &ReverseMap, 100 Instruction *Inst, KeyTy Val) { 101 typename DenseMap<Instruction *, SmallPtrSet<KeyTy, 4>>::iterator InstIt = 102 ReverseMap.find(Inst); 103 assert(InstIt != ReverseMap.end() && "Reverse map out of sync?"); 104 bool Found = InstIt->second.erase(Val); 105 assert(Found && "Invalid reverse map!"); 106 (void)Found; 107 if (InstIt->second.empty()) 108 ReverseMap.erase(InstIt); 109 } 110 111 /// If the given instruction references a specific memory location, fill in Loc 112 /// with the details, otherwise set Loc.Ptr to null. 113 /// 114 /// Returns a ModRefInfo value describing the general behavior of the 115 /// instruction. 116 static ModRefInfo GetLocation(const Instruction *Inst, MemoryLocation &Loc, 117 const TargetLibraryInfo &TLI) { 118 if (const LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 119 if (LI->isUnordered()) { 120 Loc = MemoryLocation::get(LI); 121 return ModRefInfo::Ref; 122 } 123 if (LI->getOrdering() == AtomicOrdering::Monotonic) { 124 Loc = MemoryLocation::get(LI); 125 return ModRefInfo::ModRef; 126 } 127 Loc = MemoryLocation(); 128 return ModRefInfo::ModRef; 129 } 130 131 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 132 if (SI->isUnordered()) { 133 Loc = MemoryLocation::get(SI); 134 return ModRefInfo::Mod; 135 } 136 if (SI->getOrdering() == AtomicOrdering::Monotonic) { 137 Loc = MemoryLocation::get(SI); 138 return ModRefInfo::ModRef; 139 } 140 Loc = MemoryLocation(); 141 return ModRefInfo::ModRef; 142 } 143 144 if (const VAArgInst *V = dyn_cast<VAArgInst>(Inst)) { 145 Loc = MemoryLocation::get(V); 146 return ModRefInfo::ModRef; 147 } 148 149 if (const CallInst *CI = isFreeCall(Inst, &TLI)) { 150 // calls to free() deallocate the entire structure 151 Loc = MemoryLocation::getAfter(CI->getArgOperand(0)); 152 return ModRefInfo::Mod; 153 } 154 155 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 156 switch (II->getIntrinsicID()) { 157 case Intrinsic::lifetime_start: 158 case Intrinsic::lifetime_end: 159 case Intrinsic::invariant_start: 160 Loc = MemoryLocation::getForArgument(II, 1, TLI); 161 // These intrinsics don't really modify the memory, but returning Mod 162 // will allow them to be handled conservatively. 163 return ModRefInfo::Mod; 164 case Intrinsic::invariant_end: 165 Loc = MemoryLocation::getForArgument(II, 2, TLI); 166 // These intrinsics don't really modify the memory, but returning Mod 167 // will allow them to be handled conservatively. 168 return ModRefInfo::Mod; 169 case Intrinsic::masked_load: 170 Loc = MemoryLocation::getForArgument(II, 0, TLI); 171 return ModRefInfo::Ref; 172 case Intrinsic::masked_store: 173 Loc = MemoryLocation::getForArgument(II, 1, TLI); 174 return ModRefInfo::Mod; 175 default: 176 break; 177 } 178 } 179 180 // Otherwise, just do the coarse-grained thing that always works. 181 if (Inst->mayWriteToMemory()) 182 return ModRefInfo::ModRef; 183 if (Inst->mayReadFromMemory()) 184 return ModRefInfo::Ref; 185 return ModRefInfo::NoModRef; 186 } 187 188 /// Private helper for finding the local dependencies of a call site. 189 MemDepResult MemoryDependenceResults::getCallDependencyFrom( 190 CallBase *Call, bool isReadOnlyCall, BasicBlock::iterator ScanIt, 191 BasicBlock *BB) { 192 unsigned Limit = getDefaultBlockScanLimit(); 193 194 // Walk backwards through the block, looking for dependencies. 195 while (ScanIt != BB->begin()) { 196 Instruction *Inst = &*--ScanIt; 197 // Debug intrinsics don't cause dependences and should not affect Limit 198 if (isa<DbgInfoIntrinsic>(Inst)) 199 continue; 200 201 // Limit the amount of scanning we do so we don't end up with quadratic 202 // running time on extreme testcases. 203 --Limit; 204 if (!Limit) 205 return MemDepResult::getUnknown(); 206 207 // If this inst is a memory op, get the pointer it accessed 208 MemoryLocation Loc; 209 ModRefInfo MR = GetLocation(Inst, Loc, TLI); 210 if (Loc.Ptr) { 211 // A simple instruction. 212 if (isModOrRefSet(AA.getModRefInfo(Call, Loc))) 213 return MemDepResult::getClobber(Inst); 214 continue; 215 } 216 217 if (auto *CallB = dyn_cast<CallBase>(Inst)) { 218 // If these two calls do not interfere, look past it. 219 if (isNoModRef(AA.getModRefInfo(Call, CallB))) { 220 // If the two calls are the same, return Inst as a Def, so that 221 // Call can be found redundant and eliminated. 222 if (isReadOnlyCall && !isModSet(MR) && 223 Call->isIdenticalToWhenDefined(CallB)) 224 return MemDepResult::getDef(Inst); 225 226 // Otherwise if the two calls don't interact (e.g. CallB is readnone) 227 // keep scanning. 228 continue; 229 } else 230 return MemDepResult::getClobber(Inst); 231 } 232 233 // If we could not obtain a pointer for the instruction and the instruction 234 // touches memory then assume that this is a dependency. 235 if (isModOrRefSet(MR)) 236 return MemDepResult::getClobber(Inst); 237 } 238 239 // No dependence found. If this is the entry block of the function, it is 240 // unknown, otherwise it is non-local. 241 if (BB != &BB->getParent()->getEntryBlock()) 242 return MemDepResult::getNonLocal(); 243 return MemDepResult::getNonFuncLocal(); 244 } 245 246 MemDepResult MemoryDependenceResults::getPointerDependencyFrom( 247 const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt, 248 BasicBlock *BB, Instruction *QueryInst, unsigned *Limit, 249 BatchAAResults &BatchAA) { 250 MemDepResult InvariantGroupDependency = MemDepResult::getUnknown(); 251 if (QueryInst != nullptr) { 252 if (auto *LI = dyn_cast<LoadInst>(QueryInst)) { 253 InvariantGroupDependency = getInvariantGroupPointerDependency(LI, BB); 254 255 if (InvariantGroupDependency.isDef()) 256 return InvariantGroupDependency; 257 } 258 } 259 MemDepResult SimpleDep = getSimplePointerDependencyFrom( 260 MemLoc, isLoad, ScanIt, BB, QueryInst, Limit, BatchAA); 261 if (SimpleDep.isDef()) 262 return SimpleDep; 263 // Non-local invariant group dependency indicates there is non local Def 264 // (it only returns nonLocal if it finds nonLocal def), which is better than 265 // local clobber and everything else. 266 if (InvariantGroupDependency.isNonLocal()) 267 return InvariantGroupDependency; 268 269 assert(InvariantGroupDependency.isUnknown() && 270 "InvariantGroupDependency should be only unknown at this point"); 271 return SimpleDep; 272 } 273 274 MemDepResult MemoryDependenceResults::getPointerDependencyFrom( 275 const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt, 276 BasicBlock *BB, Instruction *QueryInst, unsigned *Limit) { 277 BatchAAResults BatchAA(AA); 278 return getPointerDependencyFrom(MemLoc, isLoad, ScanIt, BB, QueryInst, Limit, 279 BatchAA); 280 } 281 282 MemDepResult 283 MemoryDependenceResults::getInvariantGroupPointerDependency(LoadInst *LI, 284 BasicBlock *BB) { 285 286 if (!LI->hasMetadata(LLVMContext::MD_invariant_group)) 287 return MemDepResult::getUnknown(); 288 289 // Take the ptr operand after all casts and geps 0. This way we can search 290 // cast graph down only. 291 Value *LoadOperand = LI->getPointerOperand()->stripPointerCasts(); 292 293 // It's is not safe to walk the use list of global value, because function 294 // passes aren't allowed to look outside their functions. 295 // FIXME: this could be fixed by filtering instructions from outside 296 // of current function. 297 if (isa<GlobalValue>(LoadOperand)) 298 return MemDepResult::getUnknown(); 299 300 // Queue to process all pointers that are equivalent to load operand. 301 SmallVector<const Value *, 8> LoadOperandsQueue; 302 LoadOperandsQueue.push_back(LoadOperand); 303 304 Instruction *ClosestDependency = nullptr; 305 // Order of instructions in uses list is unpredictible. In order to always 306 // get the same result, we will look for the closest dominance. 307 auto GetClosestDependency = [this](Instruction *Best, Instruction *Other) { 308 assert(Other && "Must call it with not null instruction"); 309 if (Best == nullptr || DT.dominates(Best, Other)) 310 return Other; 311 return Best; 312 }; 313 314 // FIXME: This loop is O(N^2) because dominates can be O(n) and in worst case 315 // we will see all the instructions. This should be fixed in MSSA. 316 while (!LoadOperandsQueue.empty()) { 317 const Value *Ptr = LoadOperandsQueue.pop_back_val(); 318 assert(Ptr && !isa<GlobalValue>(Ptr) && 319 "Null or GlobalValue should not be inserted"); 320 321 for (const Use &Us : Ptr->uses()) { 322 auto *U = dyn_cast<Instruction>(Us.getUser()); 323 if (!U || U == LI || !DT.dominates(U, LI)) 324 continue; 325 326 // Bitcast or gep with zeros are using Ptr. Add to queue to check it's 327 // users. U = bitcast Ptr 328 if (isa<BitCastInst>(U)) { 329 LoadOperandsQueue.push_back(U); 330 continue; 331 } 332 // Gep with zeros is equivalent to bitcast. 333 // FIXME: we are not sure if some bitcast should be canonicalized to gep 0 334 // or gep 0 to bitcast because of SROA, so there are 2 forms. When 335 // typeless pointers will be ready then both cases will be gone 336 // (and this BFS also won't be needed). 337 if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) 338 if (GEP->hasAllZeroIndices()) { 339 LoadOperandsQueue.push_back(U); 340 continue; 341 } 342 343 // If we hit load/store with the same invariant.group metadata (and the 344 // same pointer operand) we can assume that value pointed by pointer 345 // operand didn't change. 346 if ((isa<LoadInst>(U) || 347 (isa<StoreInst>(U) && 348 cast<StoreInst>(U)->getPointerOperand() == Ptr)) && 349 U->hasMetadata(LLVMContext::MD_invariant_group)) 350 ClosestDependency = GetClosestDependency(ClosestDependency, U); 351 } 352 } 353 354 if (!ClosestDependency) 355 return MemDepResult::getUnknown(); 356 if (ClosestDependency->getParent() == BB) 357 return MemDepResult::getDef(ClosestDependency); 358 // Def(U) can't be returned here because it is non-local. If local 359 // dependency won't be found then return nonLocal counting that the 360 // user will call getNonLocalPointerDependency, which will return cached 361 // result. 362 NonLocalDefsCache.try_emplace( 363 LI, NonLocalDepResult(ClosestDependency->getParent(), 364 MemDepResult::getDef(ClosestDependency), nullptr)); 365 ReverseNonLocalDefsCache[ClosestDependency].insert(LI); 366 return MemDepResult::getNonLocal(); 367 } 368 369 MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom( 370 const MemoryLocation &MemLoc, bool isLoad, BasicBlock::iterator ScanIt, 371 BasicBlock *BB, Instruction *QueryInst, unsigned *Limit, 372 BatchAAResults &BatchAA) { 373 bool isInvariantLoad = false; 374 375 unsigned DefaultLimit = getDefaultBlockScanLimit(); 376 if (!Limit) 377 Limit = &DefaultLimit; 378 379 // We must be careful with atomic accesses, as they may allow another thread 380 // to touch this location, clobbering it. We are conservative: if the 381 // QueryInst is not a simple (non-atomic) memory access, we automatically 382 // return getClobber. 383 // If it is simple, we know based on the results of 384 // "Compiler testing via a theory of sound optimisations in the C11/C++11 385 // memory model" in PLDI 2013, that a non-atomic location can only be 386 // clobbered between a pair of a release and an acquire action, with no 387 // access to the location in between. 388 // Here is an example for giving the general intuition behind this rule. 389 // In the following code: 390 // store x 0; 391 // release action; [1] 392 // acquire action; [4] 393 // %val = load x; 394 // It is unsafe to replace %val by 0 because another thread may be running: 395 // acquire action; [2] 396 // store x 42; 397 // release action; [3] 398 // with synchronization from 1 to 2 and from 3 to 4, resulting in %val 399 // being 42. A key property of this program however is that if either 400 // 1 or 4 were missing, there would be a race between the store of 42 401 // either the store of 0 or the load (making the whole program racy). 402 // The paper mentioned above shows that the same property is respected 403 // by every program that can detect any optimization of that kind: either 404 // it is racy (undefined) or there is a release followed by an acquire 405 // between the pair of accesses under consideration. 406 407 // If the load is invariant, we "know" that it doesn't alias *any* write. We 408 // do want to respect mustalias results since defs are useful for value 409 // forwarding, but any mayalias write can be assumed to be noalias. 410 // Arguably, this logic should be pushed inside AliasAnalysis itself. 411 if (isLoad && QueryInst) { 412 LoadInst *LI = dyn_cast<LoadInst>(QueryInst); 413 if (LI && LI->hasMetadata(LLVMContext::MD_invariant_load)) 414 isInvariantLoad = true; 415 } 416 417 // True for volatile instruction. 418 // For Load/Store return true if atomic ordering is stronger than AO, 419 // for other instruction just true if it can read or write to memory. 420 auto isComplexForReordering = [](Instruction * I, AtomicOrdering AO)->bool { 421 if (I->isVolatile()) 422 return true; 423 if (auto *LI = dyn_cast<LoadInst>(I)) 424 return isStrongerThan(LI->getOrdering(), AO); 425 if (auto *SI = dyn_cast<StoreInst>(I)) 426 return isStrongerThan(SI->getOrdering(), AO); 427 return I->mayReadOrWriteMemory(); 428 }; 429 430 // Walk backwards through the basic block, looking for dependencies. 431 while (ScanIt != BB->begin()) { 432 Instruction *Inst = &*--ScanIt; 433 434 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) 435 // Debug intrinsics don't (and can't) cause dependencies. 436 if (isa<DbgInfoIntrinsic>(II)) 437 continue; 438 439 // Limit the amount of scanning we do so we don't end up with quadratic 440 // running time on extreme testcases. 441 --*Limit; 442 if (!*Limit) 443 return MemDepResult::getUnknown(); 444 445 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 446 // If we reach a lifetime begin or end marker, then the query ends here 447 // because the value is undefined. 448 Intrinsic::ID ID = II->getIntrinsicID(); 449 switch (ID) { 450 case Intrinsic::lifetime_start: { 451 // FIXME: This only considers queries directly on the invariant-tagged 452 // pointer, not on query pointers that are indexed off of them. It'd 453 // be nice to handle that at some point (the right approach is to use 454 // GetPointerBaseWithConstantOffset). 455 MemoryLocation ArgLoc = MemoryLocation::getAfter(II->getArgOperand(1)); 456 if (BatchAA.isMustAlias(ArgLoc, MemLoc)) 457 return MemDepResult::getDef(II); 458 continue; 459 } 460 case Intrinsic::masked_load: 461 case Intrinsic::masked_store: { 462 MemoryLocation Loc; 463 /*ModRefInfo MR =*/ GetLocation(II, Loc, TLI); 464 AliasResult R = BatchAA.alias(Loc, MemLoc); 465 if (R == AliasResult::NoAlias) 466 continue; 467 if (R == AliasResult::MustAlias) 468 return MemDepResult::getDef(II); 469 if (ID == Intrinsic::masked_load) 470 continue; 471 return MemDepResult::getClobber(II); 472 } 473 } 474 } 475 476 // Values depend on loads if the pointers are must aliased. This means 477 // that a load depends on another must aliased load from the same value. 478 // One exception is atomic loads: a value can depend on an atomic load that 479 // it does not alias with when this atomic load indicates that another 480 // thread may be accessing the location. 481 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 482 // While volatile access cannot be eliminated, they do not have to clobber 483 // non-aliasing locations, as normal accesses, for example, can be safely 484 // reordered with volatile accesses. 485 if (LI->isVolatile()) { 486 if (!QueryInst) 487 // Original QueryInst *may* be volatile 488 return MemDepResult::getClobber(LI); 489 if (QueryInst->isVolatile()) 490 // Ordering required if QueryInst is itself volatile 491 return MemDepResult::getClobber(LI); 492 // Otherwise, volatile doesn't imply any special ordering 493 } 494 495 // Atomic loads have complications involved. 496 // A Monotonic (or higher) load is OK if the query inst is itself not 497 // atomic. 498 // FIXME: This is overly conservative. 499 if (LI->isAtomic() && isStrongerThanUnordered(LI->getOrdering())) { 500 if (!QueryInst || 501 isComplexForReordering(QueryInst, AtomicOrdering::NotAtomic)) 502 return MemDepResult::getClobber(LI); 503 if (LI->getOrdering() != AtomicOrdering::Monotonic) 504 return MemDepResult::getClobber(LI); 505 } 506 507 MemoryLocation LoadLoc = MemoryLocation::get(LI); 508 509 // If we found a pointer, check if it could be the same as our pointer. 510 AliasResult R = BatchAA.alias(LoadLoc, MemLoc); 511 512 if (isLoad) { 513 if (R == AliasResult::NoAlias) 514 continue; 515 516 // Must aliased loads are defs of each other. 517 if (R == AliasResult::MustAlias) 518 return MemDepResult::getDef(Inst); 519 520 // If we have a partial alias, then return this as a clobber for the 521 // client to handle. 522 if (R == AliasResult::PartialAlias && R.hasOffset()) { 523 ClobberOffsets[LI] = R.getOffset(); 524 return MemDepResult::getClobber(Inst); 525 } 526 527 // Random may-alias loads don't depend on each other without a 528 // dependence. 529 continue; 530 } 531 532 // Stores don't depend on other no-aliased accesses. 533 if (R == AliasResult::NoAlias) 534 continue; 535 536 // Stores don't alias loads from read-only memory. 537 if (BatchAA.pointsToConstantMemory(LoadLoc)) 538 continue; 539 540 // Stores depend on may/must aliased loads. 541 return MemDepResult::getDef(Inst); 542 } 543 544 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 545 // Atomic stores have complications involved. 546 // A Monotonic store is OK if the query inst is itself not atomic. 547 // FIXME: This is overly conservative. 548 if (!SI->isUnordered() && SI->isAtomic()) { 549 if (!QueryInst || 550 isComplexForReordering(QueryInst, AtomicOrdering::Unordered)) 551 return MemDepResult::getClobber(SI); 552 // Ok, if we are here the guard above guarantee us that 553 // QueryInst is a non-atomic or unordered load/store. 554 // SI is atomic with monotonic or release semantic (seq_cst for store 555 // is actually a release semantic plus total order over other seq_cst 556 // instructions, as soon as QueryInst is not seq_cst we can consider it 557 // as simple release semantic). 558 // Monotonic and Release semantic allows re-ordering before store 559 // so we are safe to go further and check the aliasing. It will prohibit 560 // re-ordering in case locations are may or must alias. 561 } 562 563 // While volatile access cannot be eliminated, they do not have to clobber 564 // non-aliasing locations, as normal accesses can for example be reordered 565 // with volatile accesses. 566 if (SI->isVolatile()) 567 if (!QueryInst || QueryInst->isVolatile()) 568 return MemDepResult::getClobber(SI); 569 570 // If alias analysis can tell that this store is guaranteed to not modify 571 // the query pointer, ignore it. Use getModRefInfo to handle cases where 572 // the query pointer points to constant memory etc. 573 if (!isModOrRefSet(BatchAA.getModRefInfo(SI, MemLoc))) 574 continue; 575 576 // Ok, this store might clobber the query pointer. Check to see if it is 577 // a must alias: in this case, we want to return this as a def. 578 // FIXME: Use ModRefInfo::Must bit from getModRefInfo call above. 579 MemoryLocation StoreLoc = MemoryLocation::get(SI); 580 581 // If we found a pointer, check if it could be the same as our pointer. 582 AliasResult R = BatchAA.alias(StoreLoc, MemLoc); 583 584 if (R == AliasResult::NoAlias) 585 continue; 586 if (R == AliasResult::MustAlias) 587 return MemDepResult::getDef(Inst); 588 if (isInvariantLoad) 589 continue; 590 return MemDepResult::getClobber(Inst); 591 } 592 593 // If this is an allocation, and if we know that the accessed pointer is to 594 // the allocation, return Def. This means that there is no dependence and 595 // the access can be optimized based on that. For example, a load could 596 // turn into undef. Note that we can bypass the allocation itself when 597 // looking for a clobber in many cases; that's an alias property and is 598 // handled by BasicAA. 599 if (isa<AllocaInst>(Inst) || isNoAliasCall(Inst)) { 600 const Value *AccessPtr = getUnderlyingObject(MemLoc.Ptr); 601 if (AccessPtr == Inst || BatchAA.isMustAlias(Inst, AccessPtr)) 602 return MemDepResult::getDef(Inst); 603 } 604 605 if (isInvariantLoad) 606 continue; 607 608 // A release fence requires that all stores complete before it, but does 609 // not prevent the reordering of following loads or stores 'before' the 610 // fence. As a result, we look past it when finding a dependency for 611 // loads. DSE uses this to find preceding stores to delete and thus we 612 // can't bypass the fence if the query instruction is a store. 613 if (FenceInst *FI = dyn_cast<FenceInst>(Inst)) 614 if (isLoad && FI->getOrdering() == AtomicOrdering::Release) 615 continue; 616 617 // See if this instruction (e.g. a call or vaarg) mod/ref's the pointer. 618 ModRefInfo MR = BatchAA.getModRefInfo(Inst, MemLoc); 619 // If necessary, perform additional analysis. 620 if (isModAndRefSet(MR)) 621 MR = BatchAA.callCapturesBefore(Inst, MemLoc, &DT); 622 switch (clearMust(MR)) { 623 case ModRefInfo::NoModRef: 624 // If the call has no effect on the queried pointer, just ignore it. 625 continue; 626 case ModRefInfo::Mod: 627 return MemDepResult::getClobber(Inst); 628 case ModRefInfo::Ref: 629 // If the call is known to never store to the pointer, and if this is a 630 // load query, we can safely ignore it (scan past it). 631 if (isLoad) 632 continue; 633 LLVM_FALLTHROUGH; 634 default: 635 // Otherwise, there is a potential dependence. Return a clobber. 636 return MemDepResult::getClobber(Inst); 637 } 638 } 639 640 // No dependence found. If this is the entry block of the function, it is 641 // unknown, otherwise it is non-local. 642 if (BB != &BB->getParent()->getEntryBlock()) 643 return MemDepResult::getNonLocal(); 644 return MemDepResult::getNonFuncLocal(); 645 } 646 647 MemDepResult MemoryDependenceResults::getDependency(Instruction *QueryInst) { 648 ClobberOffsets.clear(); 649 Instruction *ScanPos = QueryInst; 650 651 // Check for a cached result 652 MemDepResult &LocalCache = LocalDeps[QueryInst]; 653 654 // If the cached entry is non-dirty, just return it. Note that this depends 655 // on MemDepResult's default constructing to 'dirty'. 656 if (!LocalCache.isDirty()) 657 return LocalCache; 658 659 // Otherwise, if we have a dirty entry, we know we can start the scan at that 660 // instruction, which may save us some work. 661 if (Instruction *Inst = LocalCache.getInst()) { 662 ScanPos = Inst; 663 664 RemoveFromReverseMap(ReverseLocalDeps, Inst, QueryInst); 665 } 666 667 BasicBlock *QueryParent = QueryInst->getParent(); 668 669 // Do the scan. 670 if (BasicBlock::iterator(QueryInst) == QueryParent->begin()) { 671 // No dependence found. If this is the entry block of the function, it is 672 // unknown, otherwise it is non-local. 673 if (QueryParent != &QueryParent->getParent()->getEntryBlock()) 674 LocalCache = MemDepResult::getNonLocal(); 675 else 676 LocalCache = MemDepResult::getNonFuncLocal(); 677 } else { 678 MemoryLocation MemLoc; 679 ModRefInfo MR = GetLocation(QueryInst, MemLoc, TLI); 680 if (MemLoc.Ptr) { 681 // If we can do a pointer scan, make it happen. 682 bool isLoad = !isModSet(MR); 683 if (auto *II = dyn_cast<IntrinsicInst>(QueryInst)) 684 isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start; 685 686 LocalCache = 687 getPointerDependencyFrom(MemLoc, isLoad, ScanPos->getIterator(), 688 QueryParent, QueryInst, nullptr); 689 } else if (auto *QueryCall = dyn_cast<CallBase>(QueryInst)) { 690 bool isReadOnly = AA.onlyReadsMemory(QueryCall); 691 LocalCache = getCallDependencyFrom(QueryCall, isReadOnly, 692 ScanPos->getIterator(), QueryParent); 693 } else 694 // Non-memory instruction. 695 LocalCache = MemDepResult::getUnknown(); 696 } 697 698 // Remember the result! 699 if (Instruction *I = LocalCache.getInst()) 700 ReverseLocalDeps[I].insert(QueryInst); 701 702 return LocalCache; 703 } 704 705 #ifndef NDEBUG 706 /// This method is used when -debug is specified to verify that cache arrays 707 /// are properly kept sorted. 708 static void AssertSorted(MemoryDependenceResults::NonLocalDepInfo &Cache, 709 int Count = -1) { 710 if (Count == -1) 711 Count = Cache.size(); 712 assert(std::is_sorted(Cache.begin(), Cache.begin() + Count) && 713 "Cache isn't sorted!"); 714 } 715 #endif 716 717 const MemoryDependenceResults::NonLocalDepInfo & 718 MemoryDependenceResults::getNonLocalCallDependency(CallBase *QueryCall) { 719 assert(getDependency(QueryCall).isNonLocal() && 720 "getNonLocalCallDependency should only be used on calls with " 721 "non-local deps!"); 722 PerInstNLInfo &CacheP = NonLocalDepsMap[QueryCall]; 723 NonLocalDepInfo &Cache = CacheP.first; 724 725 // This is the set of blocks that need to be recomputed. In the cached case, 726 // this can happen due to instructions being deleted etc. In the uncached 727 // case, this starts out as the set of predecessors we care about. 728 SmallVector<BasicBlock *, 32> DirtyBlocks; 729 730 if (!Cache.empty()) { 731 // Okay, we have a cache entry. If we know it is not dirty, just return it 732 // with no computation. 733 if (!CacheP.second) { 734 ++NumCacheNonLocal; 735 return Cache; 736 } 737 738 // If we already have a partially computed set of results, scan them to 739 // determine what is dirty, seeding our initial DirtyBlocks worklist. 740 for (auto &Entry : Cache) 741 if (Entry.getResult().isDirty()) 742 DirtyBlocks.push_back(Entry.getBB()); 743 744 // Sort the cache so that we can do fast binary search lookups below. 745 llvm::sort(Cache); 746 747 ++NumCacheDirtyNonLocal; 748 // cerr << "CACHED CASE: " << DirtyBlocks.size() << " dirty: " 749 // << Cache.size() << " cached: " << *QueryInst; 750 } else { 751 // Seed DirtyBlocks with each of the preds of QueryInst's block. 752 BasicBlock *QueryBB = QueryCall->getParent(); 753 append_range(DirtyBlocks, PredCache.get(QueryBB)); 754 ++NumUncacheNonLocal; 755 } 756 757 // isReadonlyCall - If this is a read-only call, we can be more aggressive. 758 bool isReadonlyCall = AA.onlyReadsMemory(QueryCall); 759 760 SmallPtrSet<BasicBlock *, 32> Visited; 761 762 unsigned NumSortedEntries = Cache.size(); 763 LLVM_DEBUG(AssertSorted(Cache)); 764 765 // Iterate while we still have blocks to update. 766 while (!DirtyBlocks.empty()) { 767 BasicBlock *DirtyBB = DirtyBlocks.pop_back_val(); 768 769 // Already processed this block? 770 if (!Visited.insert(DirtyBB).second) 771 continue; 772 773 // Do a binary search to see if we already have an entry for this block in 774 // the cache set. If so, find it. 775 LLVM_DEBUG(AssertSorted(Cache, NumSortedEntries)); 776 NonLocalDepInfo::iterator Entry = 777 std::upper_bound(Cache.begin(), Cache.begin() + NumSortedEntries, 778 NonLocalDepEntry(DirtyBB)); 779 if (Entry != Cache.begin() && std::prev(Entry)->getBB() == DirtyBB) 780 --Entry; 781 782 NonLocalDepEntry *ExistingResult = nullptr; 783 if (Entry != Cache.begin() + NumSortedEntries && 784 Entry->getBB() == DirtyBB) { 785 // If we already have an entry, and if it isn't already dirty, the block 786 // is done. 787 if (!Entry->getResult().isDirty()) 788 continue; 789 790 // Otherwise, remember this slot so we can update the value. 791 ExistingResult = &*Entry; 792 } 793 794 // If the dirty entry has a pointer, start scanning from it so we don't have 795 // to rescan the entire block. 796 BasicBlock::iterator ScanPos = DirtyBB->end(); 797 if (ExistingResult) { 798 if (Instruction *Inst = ExistingResult->getResult().getInst()) { 799 ScanPos = Inst->getIterator(); 800 // We're removing QueryInst's use of Inst. 801 RemoveFromReverseMap<Instruction *>(ReverseNonLocalDeps, Inst, 802 QueryCall); 803 } 804 } 805 806 // Find out if this block has a local dependency for QueryInst. 807 MemDepResult Dep; 808 809 if (ScanPos != DirtyBB->begin()) { 810 Dep = getCallDependencyFrom(QueryCall, isReadonlyCall, ScanPos, DirtyBB); 811 } else if (DirtyBB != &DirtyBB->getParent()->getEntryBlock()) { 812 // No dependence found. If this is the entry block of the function, it is 813 // a clobber, otherwise it is unknown. 814 Dep = MemDepResult::getNonLocal(); 815 } else { 816 Dep = MemDepResult::getNonFuncLocal(); 817 } 818 819 // If we had a dirty entry for the block, update it. Otherwise, just add 820 // a new entry. 821 if (ExistingResult) 822 ExistingResult->setResult(Dep); 823 else 824 Cache.push_back(NonLocalDepEntry(DirtyBB, Dep)); 825 826 // If the block has a dependency (i.e. it isn't completely transparent to 827 // the value), remember the association! 828 if (!Dep.isNonLocal()) { 829 // Keep the ReverseNonLocalDeps map up to date so we can efficiently 830 // update this when we remove instructions. 831 if (Instruction *Inst = Dep.getInst()) 832 ReverseNonLocalDeps[Inst].insert(QueryCall); 833 } else { 834 835 // If the block *is* completely transparent to the load, we need to check 836 // the predecessors of this block. Add them to our worklist. 837 append_range(DirtyBlocks, PredCache.get(DirtyBB)); 838 } 839 } 840 841 return Cache; 842 } 843 844 void MemoryDependenceResults::getNonLocalPointerDependency( 845 Instruction *QueryInst, SmallVectorImpl<NonLocalDepResult> &Result) { 846 const MemoryLocation Loc = MemoryLocation::get(QueryInst); 847 bool isLoad = isa<LoadInst>(QueryInst); 848 BasicBlock *FromBB = QueryInst->getParent(); 849 assert(FromBB); 850 851 assert(Loc.Ptr->getType()->isPointerTy() && 852 "Can't get pointer deps of a non-pointer!"); 853 Result.clear(); 854 { 855 // Check if there is cached Def with invariant.group. 856 auto NonLocalDefIt = NonLocalDefsCache.find(QueryInst); 857 if (NonLocalDefIt != NonLocalDefsCache.end()) { 858 Result.push_back(NonLocalDefIt->second); 859 ReverseNonLocalDefsCache[NonLocalDefIt->second.getResult().getInst()] 860 .erase(QueryInst); 861 NonLocalDefsCache.erase(NonLocalDefIt); 862 return; 863 } 864 } 865 // This routine does not expect to deal with volatile instructions. 866 // Doing so would require piping through the QueryInst all the way through. 867 // TODO: volatiles can't be elided, but they can be reordered with other 868 // non-volatile accesses. 869 870 // We currently give up on any instruction which is ordered, but we do handle 871 // atomic instructions which are unordered. 872 // TODO: Handle ordered instructions 873 auto isOrdered = [](Instruction *Inst) { 874 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 875 return !LI->isUnordered(); 876 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 877 return !SI->isUnordered(); 878 } 879 return false; 880 }; 881 if (QueryInst->isVolatile() || isOrdered(QueryInst)) { 882 Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(), 883 const_cast<Value *>(Loc.Ptr))); 884 return; 885 } 886 const DataLayout &DL = FromBB->getModule()->getDataLayout(); 887 PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL, &AC); 888 889 // This is the set of blocks we've inspected, and the pointer we consider in 890 // each block. Because of critical edges, we currently bail out if querying 891 // a block with multiple different pointers. This can happen during PHI 892 // translation. 893 DenseMap<BasicBlock *, Value *> Visited; 894 if (getNonLocalPointerDepFromBB(QueryInst, Address, Loc, isLoad, FromBB, 895 Result, Visited, true)) 896 return; 897 Result.clear(); 898 Result.push_back(NonLocalDepResult(FromBB, MemDepResult::getUnknown(), 899 const_cast<Value *>(Loc.Ptr))); 900 } 901 902 /// Compute the memdep value for BB with Pointer/PointeeSize using either 903 /// cached information in Cache or by doing a lookup (which may use dirty cache 904 /// info if available). 905 /// 906 /// If we do a lookup, add the result to the cache. 907 MemDepResult MemoryDependenceResults::getNonLocalInfoForBlock( 908 Instruction *QueryInst, const MemoryLocation &Loc, bool isLoad, 909 BasicBlock *BB, NonLocalDepInfo *Cache, unsigned NumSortedEntries, 910 BatchAAResults &BatchAA) { 911 912 bool isInvariantLoad = false; 913 914 if (LoadInst *LI = dyn_cast_or_null<LoadInst>(QueryInst)) 915 isInvariantLoad = LI->getMetadata(LLVMContext::MD_invariant_load); 916 917 // Do a binary search to see if we already have an entry for this block in 918 // the cache set. If so, find it. 919 NonLocalDepInfo::iterator Entry = std::upper_bound( 920 Cache->begin(), Cache->begin() + NumSortedEntries, NonLocalDepEntry(BB)); 921 if (Entry != Cache->begin() && (Entry - 1)->getBB() == BB) 922 --Entry; 923 924 NonLocalDepEntry *ExistingResult = nullptr; 925 if (Entry != Cache->begin() + NumSortedEntries && Entry->getBB() == BB) 926 ExistingResult = &*Entry; 927 928 // Use cached result for invariant load only if there is no dependency for non 929 // invariant load. In this case invariant load can not have any dependency as 930 // well. 931 if (ExistingResult && isInvariantLoad && 932 !ExistingResult->getResult().isNonFuncLocal()) 933 ExistingResult = nullptr; 934 935 // If we have a cached entry, and it is non-dirty, use it as the value for 936 // this dependency. 937 if (ExistingResult && !ExistingResult->getResult().isDirty()) { 938 ++NumCacheNonLocalPtr; 939 return ExistingResult->getResult(); 940 } 941 942 // Otherwise, we have to scan for the value. If we have a dirty cache 943 // entry, start scanning from its position, otherwise we scan from the end 944 // of the block. 945 BasicBlock::iterator ScanPos = BB->end(); 946 if (ExistingResult && ExistingResult->getResult().getInst()) { 947 assert(ExistingResult->getResult().getInst()->getParent() == BB && 948 "Instruction invalidated?"); 949 ++NumCacheDirtyNonLocalPtr; 950 ScanPos = ExistingResult->getResult().getInst()->getIterator(); 951 952 // Eliminating the dirty entry from 'Cache', so update the reverse info. 953 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad); 954 RemoveFromReverseMap(ReverseNonLocalPtrDeps, &*ScanPos, CacheKey); 955 } else { 956 ++NumUncacheNonLocalPtr; 957 } 958 959 // Scan the block for the dependency. 960 MemDepResult Dep = getPointerDependencyFrom(Loc, isLoad, ScanPos, BB, 961 QueryInst, nullptr, BatchAA); 962 963 // Don't cache results for invariant load. 964 if (isInvariantLoad) 965 return Dep; 966 967 // If we had a dirty entry for the block, update it. Otherwise, just add 968 // a new entry. 969 if (ExistingResult) 970 ExistingResult->setResult(Dep); 971 else 972 Cache->push_back(NonLocalDepEntry(BB, Dep)); 973 974 // If the block has a dependency (i.e. it isn't completely transparent to 975 // the value), remember the reverse association because we just added it 976 // to Cache! 977 if (!Dep.isDef() && !Dep.isClobber()) 978 return Dep; 979 980 // Keep the ReverseNonLocalPtrDeps map up to date so we can efficiently 981 // update MemDep when we remove instructions. 982 Instruction *Inst = Dep.getInst(); 983 assert(Inst && "Didn't depend on anything?"); 984 ValueIsLoadPair CacheKey(Loc.Ptr, isLoad); 985 ReverseNonLocalPtrDeps[Inst].insert(CacheKey); 986 return Dep; 987 } 988 989 /// Sort the NonLocalDepInfo cache, given a certain number of elements in the 990 /// array that are already properly ordered. 991 /// 992 /// This is optimized for the case when only a few entries are added. 993 static void 994 SortNonLocalDepInfoCache(MemoryDependenceResults::NonLocalDepInfo &Cache, 995 unsigned NumSortedEntries) { 996 switch (Cache.size() - NumSortedEntries) { 997 case 0: 998 // done, no new entries. 999 break; 1000 case 2: { 1001 // Two new entries, insert the last one into place. 1002 NonLocalDepEntry Val = Cache.back(); 1003 Cache.pop_back(); 1004 MemoryDependenceResults::NonLocalDepInfo::iterator Entry = 1005 std::upper_bound(Cache.begin(), Cache.end() - 1, Val); 1006 Cache.insert(Entry, Val); 1007 LLVM_FALLTHROUGH; 1008 } 1009 case 1: 1010 // One new entry, Just insert the new value at the appropriate position. 1011 if (Cache.size() != 1) { 1012 NonLocalDepEntry Val = Cache.back(); 1013 Cache.pop_back(); 1014 MemoryDependenceResults::NonLocalDepInfo::iterator Entry = 1015 llvm::upper_bound(Cache, Val); 1016 Cache.insert(Entry, Val); 1017 } 1018 break; 1019 default: 1020 // Added many values, do a full scale sort. 1021 llvm::sort(Cache); 1022 break; 1023 } 1024 } 1025 1026 /// Perform a dependency query based on pointer/pointeesize starting at the end 1027 /// of StartBB. 1028 /// 1029 /// Add any clobber/def results to the results vector and keep track of which 1030 /// blocks are visited in 'Visited'. 1031 /// 1032 /// This has special behavior for the first block queries (when SkipFirstBlock 1033 /// is true). In this special case, it ignores the contents of the specified 1034 /// block and starts returning dependence info for its predecessors. 1035 /// 1036 /// This function returns true on success, or false to indicate that it could 1037 /// not compute dependence information for some reason. This should be treated 1038 /// as a clobber dependence on the first instruction in the predecessor block. 1039 bool MemoryDependenceResults::getNonLocalPointerDepFromBB( 1040 Instruction *QueryInst, const PHITransAddr &Pointer, 1041 const MemoryLocation &Loc, bool isLoad, BasicBlock *StartBB, 1042 SmallVectorImpl<NonLocalDepResult> &Result, 1043 DenseMap<BasicBlock *, Value *> &Visited, bool SkipFirstBlock, 1044 bool IsIncomplete) { 1045 // Look up the cached info for Pointer. 1046 ValueIsLoadPair CacheKey(Pointer.getAddr(), isLoad); 1047 1048 // Set up a temporary NLPI value. If the map doesn't yet have an entry for 1049 // CacheKey, this value will be inserted as the associated value. Otherwise, 1050 // it'll be ignored, and we'll have to check to see if the cached size and 1051 // aa tags are consistent with the current query. 1052 NonLocalPointerInfo InitialNLPI; 1053 InitialNLPI.Size = Loc.Size; 1054 InitialNLPI.AATags = Loc.AATags; 1055 1056 bool isInvariantLoad = false; 1057 if (LoadInst *LI = dyn_cast_or_null<LoadInst>(QueryInst)) 1058 isInvariantLoad = LI->getMetadata(LLVMContext::MD_invariant_load); 1059 1060 // Get the NLPI for CacheKey, inserting one into the map if it doesn't 1061 // already have one. 1062 std::pair<CachedNonLocalPointerInfo::iterator, bool> Pair = 1063 NonLocalPointerDeps.insert(std::make_pair(CacheKey, InitialNLPI)); 1064 NonLocalPointerInfo *CacheInfo = &Pair.first->second; 1065 1066 // If we already have a cache entry for this CacheKey, we may need to do some 1067 // work to reconcile the cache entry and the current query. 1068 // Invariant loads don't participate in caching. Thus no need to reconcile. 1069 if (!isInvariantLoad && !Pair.second) { 1070 if (CacheInfo->Size != Loc.Size) { 1071 bool ThrowOutEverything; 1072 if (CacheInfo->Size.hasValue() && Loc.Size.hasValue()) { 1073 // FIXME: We may be able to do better in the face of results with mixed 1074 // precision. We don't appear to get them in practice, though, so just 1075 // be conservative. 1076 ThrowOutEverything = 1077 CacheInfo->Size.isPrecise() != Loc.Size.isPrecise() || 1078 CacheInfo->Size.getValue() < Loc.Size.getValue(); 1079 } else { 1080 // For our purposes, unknown size > all others. 1081 ThrowOutEverything = !Loc.Size.hasValue(); 1082 } 1083 1084 if (ThrowOutEverything) { 1085 // The query's Size is greater than the cached one. Throw out the 1086 // cached data and proceed with the query at the greater size. 1087 CacheInfo->Pair = BBSkipFirstBlockPair(); 1088 CacheInfo->Size = Loc.Size; 1089 for (auto &Entry : CacheInfo->NonLocalDeps) 1090 if (Instruction *Inst = Entry.getResult().getInst()) 1091 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey); 1092 CacheInfo->NonLocalDeps.clear(); 1093 // The cache is cleared (in the above line) so we will have lost 1094 // information about blocks we have already visited. We therefore must 1095 // assume that the cache information is incomplete. 1096 IsIncomplete = true; 1097 } else { 1098 // This query's Size is less than the cached one. Conservatively restart 1099 // the query using the greater size. 1100 return getNonLocalPointerDepFromBB( 1101 QueryInst, Pointer, Loc.getWithNewSize(CacheInfo->Size), isLoad, 1102 StartBB, Result, Visited, SkipFirstBlock, IsIncomplete); 1103 } 1104 } 1105 1106 // If the query's AATags are inconsistent with the cached one, 1107 // conservatively throw out the cached data and restart the query with 1108 // no tag if needed. 1109 if (CacheInfo->AATags != Loc.AATags) { 1110 if (CacheInfo->AATags) { 1111 CacheInfo->Pair = BBSkipFirstBlockPair(); 1112 CacheInfo->AATags = AAMDNodes(); 1113 for (auto &Entry : CacheInfo->NonLocalDeps) 1114 if (Instruction *Inst = Entry.getResult().getInst()) 1115 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Inst, CacheKey); 1116 CacheInfo->NonLocalDeps.clear(); 1117 // The cache is cleared (in the above line) so we will have lost 1118 // information about blocks we have already visited. We therefore must 1119 // assume that the cache information is incomplete. 1120 IsIncomplete = true; 1121 } 1122 if (Loc.AATags) 1123 return getNonLocalPointerDepFromBB( 1124 QueryInst, Pointer, Loc.getWithoutAATags(), isLoad, StartBB, Result, 1125 Visited, SkipFirstBlock, IsIncomplete); 1126 } 1127 } 1128 1129 NonLocalDepInfo *Cache = &CacheInfo->NonLocalDeps; 1130 1131 // If we have valid cached information for exactly the block we are 1132 // investigating, just return it with no recomputation. 1133 // Don't use cached information for invariant loads since it is valid for 1134 // non-invariant loads only. 1135 if (!IsIncomplete && !isInvariantLoad && 1136 CacheInfo->Pair == BBSkipFirstBlockPair(StartBB, SkipFirstBlock)) { 1137 // We have a fully cached result for this query then we can just return the 1138 // cached results and populate the visited set. However, we have to verify 1139 // that we don't already have conflicting results for these blocks. Check 1140 // to ensure that if a block in the results set is in the visited set that 1141 // it was for the same pointer query. 1142 if (!Visited.empty()) { 1143 for (auto &Entry : *Cache) { 1144 DenseMap<BasicBlock *, Value *>::iterator VI = 1145 Visited.find(Entry.getBB()); 1146 if (VI == Visited.end() || VI->second == Pointer.getAddr()) 1147 continue; 1148 1149 // We have a pointer mismatch in a block. Just return false, saying 1150 // that something was clobbered in this result. We could also do a 1151 // non-fully cached query, but there is little point in doing this. 1152 return false; 1153 } 1154 } 1155 1156 Value *Addr = Pointer.getAddr(); 1157 for (auto &Entry : *Cache) { 1158 Visited.insert(std::make_pair(Entry.getBB(), Addr)); 1159 if (Entry.getResult().isNonLocal()) { 1160 continue; 1161 } 1162 1163 if (DT.isReachableFromEntry(Entry.getBB())) { 1164 Result.push_back( 1165 NonLocalDepResult(Entry.getBB(), Entry.getResult(), Addr)); 1166 } 1167 } 1168 ++NumCacheCompleteNonLocalPtr; 1169 return true; 1170 } 1171 1172 // Otherwise, either this is a new block, a block with an invalid cache 1173 // pointer or one that we're about to invalidate by putting more info into 1174 // it than its valid cache info. If empty and not explicitly indicated as 1175 // incomplete, the result will be valid cache info, otherwise it isn't. 1176 // 1177 // Invariant loads don't affect cache in any way thus no need to update 1178 // CacheInfo as well. 1179 if (!isInvariantLoad) { 1180 if (!IsIncomplete && Cache->empty()) 1181 CacheInfo->Pair = BBSkipFirstBlockPair(StartBB, SkipFirstBlock); 1182 else 1183 CacheInfo->Pair = BBSkipFirstBlockPair(); 1184 } 1185 1186 SmallVector<BasicBlock *, 32> Worklist; 1187 Worklist.push_back(StartBB); 1188 1189 // PredList used inside loop. 1190 SmallVector<std::pair<BasicBlock *, PHITransAddr>, 16> PredList; 1191 1192 // Keep track of the entries that we know are sorted. Previously cached 1193 // entries will all be sorted. The entries we add we only sort on demand (we 1194 // don't insert every element into its sorted position). We know that we 1195 // won't get any reuse from currently inserted values, because we don't 1196 // revisit blocks after we insert info for them. 1197 unsigned NumSortedEntries = Cache->size(); 1198 unsigned WorklistEntries = BlockNumberLimit; 1199 bool GotWorklistLimit = false; 1200 LLVM_DEBUG(AssertSorted(*Cache)); 1201 1202 BatchAAResults BatchAA(AA); 1203 while (!Worklist.empty()) { 1204 BasicBlock *BB = Worklist.pop_back_val(); 1205 1206 // If we do process a large number of blocks it becomes very expensive and 1207 // likely it isn't worth worrying about 1208 if (Result.size() > NumResultsLimit) { 1209 Worklist.clear(); 1210 // Sort it now (if needed) so that recursive invocations of 1211 // getNonLocalPointerDepFromBB and other routines that could reuse the 1212 // cache value will only see properly sorted cache arrays. 1213 if (Cache && NumSortedEntries != Cache->size()) { 1214 SortNonLocalDepInfoCache(*Cache, NumSortedEntries); 1215 } 1216 // Since we bail out, the "Cache" set won't contain all of the 1217 // results for the query. This is ok (we can still use it to accelerate 1218 // specific block queries) but we can't do the fastpath "return all 1219 // results from the set". Clear out the indicator for this. 1220 CacheInfo->Pair = BBSkipFirstBlockPair(); 1221 return false; 1222 } 1223 1224 // Skip the first block if we have it. 1225 if (!SkipFirstBlock) { 1226 // Analyze the dependency of *Pointer in FromBB. See if we already have 1227 // been here. 1228 assert(Visited.count(BB) && "Should check 'visited' before adding to WL"); 1229 1230 // Get the dependency info for Pointer in BB. If we have cached 1231 // information, we will use it, otherwise we compute it. 1232 LLVM_DEBUG(AssertSorted(*Cache, NumSortedEntries)); 1233 MemDepResult Dep = getNonLocalInfoForBlock( 1234 QueryInst, Loc, isLoad, BB, Cache, NumSortedEntries, BatchAA); 1235 1236 // If we got a Def or Clobber, add this to the list of results. 1237 if (!Dep.isNonLocal()) { 1238 if (DT.isReachableFromEntry(BB)) { 1239 Result.push_back(NonLocalDepResult(BB, Dep, Pointer.getAddr())); 1240 continue; 1241 } 1242 } 1243 } 1244 1245 // If 'Pointer' is an instruction defined in this block, then we need to do 1246 // phi translation to change it into a value live in the predecessor block. 1247 // If not, we just add the predecessors to the worklist and scan them with 1248 // the same Pointer. 1249 if (!Pointer.NeedsPHITranslationFromBlock(BB)) { 1250 SkipFirstBlock = false; 1251 SmallVector<BasicBlock *, 16> NewBlocks; 1252 for (BasicBlock *Pred : PredCache.get(BB)) { 1253 // Verify that we haven't looked at this block yet. 1254 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes = 1255 Visited.insert(std::make_pair(Pred, Pointer.getAddr())); 1256 if (InsertRes.second) { 1257 // First time we've looked at *PI. 1258 NewBlocks.push_back(Pred); 1259 continue; 1260 } 1261 1262 // If we have seen this block before, but it was with a different 1263 // pointer then we have a phi translation failure and we have to treat 1264 // this as a clobber. 1265 if (InsertRes.first->second != Pointer.getAddr()) { 1266 // Make sure to clean up the Visited map before continuing on to 1267 // PredTranslationFailure. 1268 for (unsigned i = 0; i < NewBlocks.size(); i++) 1269 Visited.erase(NewBlocks[i]); 1270 goto PredTranslationFailure; 1271 } 1272 } 1273 if (NewBlocks.size() > WorklistEntries) { 1274 // Make sure to clean up the Visited map before continuing on to 1275 // PredTranslationFailure. 1276 for (unsigned i = 0; i < NewBlocks.size(); i++) 1277 Visited.erase(NewBlocks[i]); 1278 GotWorklistLimit = true; 1279 goto PredTranslationFailure; 1280 } 1281 WorklistEntries -= NewBlocks.size(); 1282 Worklist.append(NewBlocks.begin(), NewBlocks.end()); 1283 continue; 1284 } 1285 1286 // We do need to do phi translation, if we know ahead of time we can't phi 1287 // translate this value, don't even try. 1288 if (!Pointer.IsPotentiallyPHITranslatable()) 1289 goto PredTranslationFailure; 1290 1291 // We may have added values to the cache list before this PHI translation. 1292 // If so, we haven't done anything to ensure that the cache remains sorted. 1293 // Sort it now (if needed) so that recursive invocations of 1294 // getNonLocalPointerDepFromBB and other routines that could reuse the cache 1295 // value will only see properly sorted cache arrays. 1296 if (Cache && NumSortedEntries != Cache->size()) { 1297 SortNonLocalDepInfoCache(*Cache, NumSortedEntries); 1298 NumSortedEntries = Cache->size(); 1299 } 1300 Cache = nullptr; 1301 1302 PredList.clear(); 1303 for (BasicBlock *Pred : PredCache.get(BB)) { 1304 PredList.push_back(std::make_pair(Pred, Pointer)); 1305 1306 // Get the PHI translated pointer in this predecessor. This can fail if 1307 // not translatable, in which case the getAddr() returns null. 1308 PHITransAddr &PredPointer = PredList.back().second; 1309 PredPointer.PHITranslateValue(BB, Pred, &DT, /*MustDominate=*/false); 1310 Value *PredPtrVal = PredPointer.getAddr(); 1311 1312 // Check to see if we have already visited this pred block with another 1313 // pointer. If so, we can't do this lookup. This failure can occur 1314 // with PHI translation when a critical edge exists and the PHI node in 1315 // the successor translates to a pointer value different than the 1316 // pointer the block was first analyzed with. 1317 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> InsertRes = 1318 Visited.insert(std::make_pair(Pred, PredPtrVal)); 1319 1320 if (!InsertRes.second) { 1321 // We found the pred; take it off the list of preds to visit. 1322 PredList.pop_back(); 1323 1324 // If the predecessor was visited with PredPtr, then we already did 1325 // the analysis and can ignore it. 1326 if (InsertRes.first->second == PredPtrVal) 1327 continue; 1328 1329 // Otherwise, the block was previously analyzed with a different 1330 // pointer. We can't represent the result of this case, so we just 1331 // treat this as a phi translation failure. 1332 1333 // Make sure to clean up the Visited map before continuing on to 1334 // PredTranslationFailure. 1335 for (unsigned i = 0, n = PredList.size(); i < n; ++i) 1336 Visited.erase(PredList[i].first); 1337 1338 goto PredTranslationFailure; 1339 } 1340 } 1341 1342 // Actually process results here; this need to be a separate loop to avoid 1343 // calling getNonLocalPointerDepFromBB for blocks we don't want to return 1344 // any results for. (getNonLocalPointerDepFromBB will modify our 1345 // datastructures in ways the code after the PredTranslationFailure label 1346 // doesn't expect.) 1347 for (unsigned i = 0, n = PredList.size(); i < n; ++i) { 1348 BasicBlock *Pred = PredList[i].first; 1349 PHITransAddr &PredPointer = PredList[i].second; 1350 Value *PredPtrVal = PredPointer.getAddr(); 1351 1352 bool CanTranslate = true; 1353 // If PHI translation was unable to find an available pointer in this 1354 // predecessor, then we have to assume that the pointer is clobbered in 1355 // that predecessor. We can still do PRE of the load, which would insert 1356 // a computation of the pointer in this predecessor. 1357 if (!PredPtrVal) 1358 CanTranslate = false; 1359 1360 // FIXME: it is entirely possible that PHI translating will end up with 1361 // the same value. Consider PHI translating something like: 1362 // X = phi [x, bb1], [y, bb2]. PHI translating for bb1 doesn't *need* 1363 // to recurse here, pedantically speaking. 1364 1365 // If getNonLocalPointerDepFromBB fails here, that means the cached 1366 // result conflicted with the Visited list; we have to conservatively 1367 // assume it is unknown, but this also does not block PRE of the load. 1368 if (!CanTranslate || 1369 !getNonLocalPointerDepFromBB(QueryInst, PredPointer, 1370 Loc.getWithNewPtr(PredPtrVal), isLoad, 1371 Pred, Result, Visited)) { 1372 // Add the entry to the Result list. 1373 NonLocalDepResult Entry(Pred, MemDepResult::getUnknown(), PredPtrVal); 1374 Result.push_back(Entry); 1375 1376 // Since we had a phi translation failure, the cache for CacheKey won't 1377 // include all of the entries that we need to immediately satisfy future 1378 // queries. Mark this in NonLocalPointerDeps by setting the 1379 // BBSkipFirstBlockPair pointer to null. This requires reuse of the 1380 // cached value to do more work but not miss the phi trans failure. 1381 NonLocalPointerInfo &NLPI = NonLocalPointerDeps[CacheKey]; 1382 NLPI.Pair = BBSkipFirstBlockPair(); 1383 continue; 1384 } 1385 } 1386 1387 // Refresh the CacheInfo/Cache pointer so that it isn't invalidated. 1388 CacheInfo = &NonLocalPointerDeps[CacheKey]; 1389 Cache = &CacheInfo->NonLocalDeps; 1390 NumSortedEntries = Cache->size(); 1391 1392 // Since we did phi translation, the "Cache" set won't contain all of the 1393 // results for the query. This is ok (we can still use it to accelerate 1394 // specific block queries) but we can't do the fastpath "return all 1395 // results from the set" Clear out the indicator for this. 1396 CacheInfo->Pair = BBSkipFirstBlockPair(); 1397 SkipFirstBlock = false; 1398 continue; 1399 1400 PredTranslationFailure: 1401 // The following code is "failure"; we can't produce a sane translation 1402 // for the given block. It assumes that we haven't modified any of 1403 // our datastructures while processing the current block. 1404 1405 if (!Cache) { 1406 // Refresh the CacheInfo/Cache pointer if it got invalidated. 1407 CacheInfo = &NonLocalPointerDeps[CacheKey]; 1408 Cache = &CacheInfo->NonLocalDeps; 1409 NumSortedEntries = Cache->size(); 1410 } 1411 1412 // Since we failed phi translation, the "Cache" set won't contain all of the 1413 // results for the query. This is ok (we can still use it to accelerate 1414 // specific block queries) but we can't do the fastpath "return all 1415 // results from the set". Clear out the indicator for this. 1416 CacheInfo->Pair = BBSkipFirstBlockPair(); 1417 1418 // If *nothing* works, mark the pointer as unknown. 1419 // 1420 // If this is the magic first block, return this as a clobber of the whole 1421 // incoming value. Since we can't phi translate to one of the predecessors, 1422 // we have to bail out. 1423 if (SkipFirstBlock) 1424 return false; 1425 1426 // Results of invariant loads are not cached thus no need to update cached 1427 // information. 1428 if (!isInvariantLoad) { 1429 for (NonLocalDepEntry &I : llvm::reverse(*Cache)) { 1430 if (I.getBB() != BB) 1431 continue; 1432 1433 assert((GotWorklistLimit || I.getResult().isNonLocal() || 1434 !DT.isReachableFromEntry(BB)) && 1435 "Should only be here with transparent block"); 1436 1437 I.setResult(MemDepResult::getUnknown()); 1438 1439 1440 break; 1441 } 1442 } 1443 (void)GotWorklistLimit; 1444 // Go ahead and report unknown dependence. 1445 Result.push_back( 1446 NonLocalDepResult(BB, MemDepResult::getUnknown(), Pointer.getAddr())); 1447 } 1448 1449 // Okay, we're done now. If we added new values to the cache, re-sort it. 1450 SortNonLocalDepInfoCache(*Cache, NumSortedEntries); 1451 LLVM_DEBUG(AssertSorted(*Cache)); 1452 return true; 1453 } 1454 1455 /// If P exists in CachedNonLocalPointerInfo or NonLocalDefsCache, remove it. 1456 void MemoryDependenceResults::removeCachedNonLocalPointerDependencies( 1457 ValueIsLoadPair P) { 1458 1459 // Most of the time this cache is empty. 1460 if (!NonLocalDefsCache.empty()) { 1461 auto it = NonLocalDefsCache.find(P.getPointer()); 1462 if (it != NonLocalDefsCache.end()) { 1463 RemoveFromReverseMap(ReverseNonLocalDefsCache, 1464 it->second.getResult().getInst(), P.getPointer()); 1465 NonLocalDefsCache.erase(it); 1466 } 1467 1468 if (auto *I = dyn_cast<Instruction>(P.getPointer())) { 1469 auto toRemoveIt = ReverseNonLocalDefsCache.find(I); 1470 if (toRemoveIt != ReverseNonLocalDefsCache.end()) { 1471 for (const auto *entry : toRemoveIt->second) 1472 NonLocalDefsCache.erase(entry); 1473 ReverseNonLocalDefsCache.erase(toRemoveIt); 1474 } 1475 } 1476 } 1477 1478 CachedNonLocalPointerInfo::iterator It = NonLocalPointerDeps.find(P); 1479 if (It == NonLocalPointerDeps.end()) 1480 return; 1481 1482 // Remove all of the entries in the BB->val map. This involves removing 1483 // instructions from the reverse map. 1484 NonLocalDepInfo &PInfo = It->second.NonLocalDeps; 1485 1486 for (const NonLocalDepEntry &DE : PInfo) { 1487 Instruction *Target = DE.getResult().getInst(); 1488 if (!Target) 1489 continue; // Ignore non-local dep results. 1490 assert(Target->getParent() == DE.getBB()); 1491 1492 // Eliminating the dirty entry from 'Cache', so update the reverse info. 1493 RemoveFromReverseMap(ReverseNonLocalPtrDeps, Target, P); 1494 } 1495 1496 // Remove P from NonLocalPointerDeps (which deletes NonLocalDepInfo). 1497 NonLocalPointerDeps.erase(It); 1498 } 1499 1500 void MemoryDependenceResults::invalidateCachedPointerInfo(Value *Ptr) { 1501 // If Ptr isn't really a pointer, just ignore it. 1502 if (!Ptr->getType()->isPointerTy()) 1503 return; 1504 // Flush store info for the pointer. 1505 removeCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, false)); 1506 // Flush load info for the pointer. 1507 removeCachedNonLocalPointerDependencies(ValueIsLoadPair(Ptr, true)); 1508 // Invalidate phis that use the pointer. 1509 PV.invalidateValue(Ptr); 1510 } 1511 1512 void MemoryDependenceResults::invalidateCachedPredecessors() { 1513 PredCache.clear(); 1514 } 1515 1516 void MemoryDependenceResults::removeInstruction(Instruction *RemInst) { 1517 // Walk through the Non-local dependencies, removing this one as the value 1518 // for any cached queries. 1519 NonLocalDepMapType::iterator NLDI = NonLocalDepsMap.find(RemInst); 1520 if (NLDI != NonLocalDepsMap.end()) { 1521 NonLocalDepInfo &BlockMap = NLDI->second.first; 1522 for (auto &Entry : BlockMap) 1523 if (Instruction *Inst = Entry.getResult().getInst()) 1524 RemoveFromReverseMap(ReverseNonLocalDeps, Inst, RemInst); 1525 NonLocalDepsMap.erase(NLDI); 1526 } 1527 1528 // If we have a cached local dependence query for this instruction, remove it. 1529 LocalDepMapType::iterator LocalDepEntry = LocalDeps.find(RemInst); 1530 if (LocalDepEntry != LocalDeps.end()) { 1531 // Remove us from DepInst's reverse set now that the local dep info is gone. 1532 if (Instruction *Inst = LocalDepEntry->second.getInst()) 1533 RemoveFromReverseMap(ReverseLocalDeps, Inst, RemInst); 1534 1535 // Remove this local dependency info. 1536 LocalDeps.erase(LocalDepEntry); 1537 } 1538 1539 // If we have any cached dependencies on this instruction, remove 1540 // them. 1541 1542 // If the instruction is a pointer, remove it from both the load info and the 1543 // store info. 1544 if (RemInst->getType()->isPointerTy()) { 1545 removeCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, false)); 1546 removeCachedNonLocalPointerDependencies(ValueIsLoadPair(RemInst, true)); 1547 } else { 1548 // Otherwise, if the instructions is in the map directly, it must be a load. 1549 // Remove it. 1550 auto toRemoveIt = NonLocalDefsCache.find(RemInst); 1551 if (toRemoveIt != NonLocalDefsCache.end()) { 1552 assert(isa<LoadInst>(RemInst) && 1553 "only load instructions should be added directly"); 1554 const Instruction *DepV = toRemoveIt->second.getResult().getInst(); 1555 ReverseNonLocalDefsCache.find(DepV)->second.erase(RemInst); 1556 NonLocalDefsCache.erase(toRemoveIt); 1557 } 1558 } 1559 1560 // Loop over all of the things that depend on the instruction we're removing. 1561 SmallVector<std::pair<Instruction *, Instruction *>, 8> ReverseDepsToAdd; 1562 1563 // If we find RemInst as a clobber or Def in any of the maps for other values, 1564 // we need to replace its entry with a dirty version of the instruction after 1565 // it. If RemInst is a terminator, we use a null dirty value. 1566 // 1567 // Using a dirty version of the instruction after RemInst saves having to scan 1568 // the entire block to get to this point. 1569 MemDepResult NewDirtyVal; 1570 if (!RemInst->isTerminator()) 1571 NewDirtyVal = MemDepResult::getDirty(&*++RemInst->getIterator()); 1572 1573 ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst); 1574 if (ReverseDepIt != ReverseLocalDeps.end()) { 1575 // RemInst can't be the terminator if it has local stuff depending on it. 1576 assert(!ReverseDepIt->second.empty() && !RemInst->isTerminator() && 1577 "Nothing can locally depend on a terminator"); 1578 1579 for (Instruction *InstDependingOnRemInst : ReverseDepIt->second) { 1580 assert(InstDependingOnRemInst != RemInst && 1581 "Already removed our local dep info"); 1582 1583 LocalDeps[InstDependingOnRemInst] = NewDirtyVal; 1584 1585 // Make sure to remember that new things depend on NewDepInst. 1586 assert(NewDirtyVal.getInst() && 1587 "There is no way something else can have " 1588 "a local dep on this if it is a terminator!"); 1589 ReverseDepsToAdd.push_back( 1590 std::make_pair(NewDirtyVal.getInst(), InstDependingOnRemInst)); 1591 } 1592 1593 ReverseLocalDeps.erase(ReverseDepIt); 1594 1595 // Add new reverse deps after scanning the set, to avoid invalidating the 1596 // 'ReverseDeps' reference. 1597 while (!ReverseDepsToAdd.empty()) { 1598 ReverseLocalDeps[ReverseDepsToAdd.back().first].insert( 1599 ReverseDepsToAdd.back().second); 1600 ReverseDepsToAdd.pop_back(); 1601 } 1602 } 1603 1604 ReverseDepIt = ReverseNonLocalDeps.find(RemInst); 1605 if (ReverseDepIt != ReverseNonLocalDeps.end()) { 1606 for (Instruction *I : ReverseDepIt->second) { 1607 assert(I != RemInst && "Already removed NonLocalDep info for RemInst"); 1608 1609 PerInstNLInfo &INLD = NonLocalDepsMap[I]; 1610 // The information is now dirty! 1611 INLD.second = true; 1612 1613 for (auto &Entry : INLD.first) { 1614 if (Entry.getResult().getInst() != RemInst) 1615 continue; 1616 1617 // Convert to a dirty entry for the subsequent instruction. 1618 Entry.setResult(NewDirtyVal); 1619 1620 if (Instruction *NextI = NewDirtyVal.getInst()) 1621 ReverseDepsToAdd.push_back(std::make_pair(NextI, I)); 1622 } 1623 } 1624 1625 ReverseNonLocalDeps.erase(ReverseDepIt); 1626 1627 // Add new reverse deps after scanning the set, to avoid invalidating 'Set' 1628 while (!ReverseDepsToAdd.empty()) { 1629 ReverseNonLocalDeps[ReverseDepsToAdd.back().first].insert( 1630 ReverseDepsToAdd.back().second); 1631 ReverseDepsToAdd.pop_back(); 1632 } 1633 } 1634 1635 // If the instruction is in ReverseNonLocalPtrDeps then it appears as a 1636 // value in the NonLocalPointerDeps info. 1637 ReverseNonLocalPtrDepTy::iterator ReversePtrDepIt = 1638 ReverseNonLocalPtrDeps.find(RemInst); 1639 if (ReversePtrDepIt != ReverseNonLocalPtrDeps.end()) { 1640 SmallVector<std::pair<Instruction *, ValueIsLoadPair>, 8> 1641 ReversePtrDepsToAdd; 1642 1643 for (ValueIsLoadPair P : ReversePtrDepIt->second) { 1644 assert(P.getPointer() != RemInst && 1645 "Already removed NonLocalPointerDeps info for RemInst"); 1646 1647 NonLocalDepInfo &NLPDI = NonLocalPointerDeps[P].NonLocalDeps; 1648 1649 // The cache is not valid for any specific block anymore. 1650 NonLocalPointerDeps[P].Pair = BBSkipFirstBlockPair(); 1651 1652 // Update any entries for RemInst to use the instruction after it. 1653 for (auto &Entry : NLPDI) { 1654 if (Entry.getResult().getInst() != RemInst) 1655 continue; 1656 1657 // Convert to a dirty entry for the subsequent instruction. 1658 Entry.setResult(NewDirtyVal); 1659 1660 if (Instruction *NewDirtyInst = NewDirtyVal.getInst()) 1661 ReversePtrDepsToAdd.push_back(std::make_pair(NewDirtyInst, P)); 1662 } 1663 1664 // Re-sort the NonLocalDepInfo. Changing the dirty entry to its 1665 // subsequent value may invalidate the sortedness. 1666 llvm::sort(NLPDI); 1667 } 1668 1669 ReverseNonLocalPtrDeps.erase(ReversePtrDepIt); 1670 1671 while (!ReversePtrDepsToAdd.empty()) { 1672 ReverseNonLocalPtrDeps[ReversePtrDepsToAdd.back().first].insert( 1673 ReversePtrDepsToAdd.back().second); 1674 ReversePtrDepsToAdd.pop_back(); 1675 } 1676 } 1677 1678 // Invalidate phis that use the removed instruction. 1679 PV.invalidateValue(RemInst); 1680 1681 assert(!NonLocalDepsMap.count(RemInst) && "RemInst got reinserted?"); 1682 LLVM_DEBUG(verifyRemoved(RemInst)); 1683 } 1684 1685 /// Verify that the specified instruction does not occur in our internal data 1686 /// structures. 1687 /// 1688 /// This function verifies by asserting in debug builds. 1689 void MemoryDependenceResults::verifyRemoved(Instruction *D) const { 1690 #ifndef NDEBUG 1691 for (const auto &DepKV : LocalDeps) { 1692 assert(DepKV.first != D && "Inst occurs in data structures"); 1693 assert(DepKV.second.getInst() != D && "Inst occurs in data structures"); 1694 } 1695 1696 for (const auto &DepKV : NonLocalPointerDeps) { 1697 assert(DepKV.first.getPointer() != D && "Inst occurs in NLPD map key"); 1698 for (const auto &Entry : DepKV.second.NonLocalDeps) 1699 assert(Entry.getResult().getInst() != D && "Inst occurs as NLPD value"); 1700 } 1701 1702 for (const auto &DepKV : NonLocalDepsMap) { 1703 assert(DepKV.first != D && "Inst occurs in data structures"); 1704 const PerInstNLInfo &INLD = DepKV.second; 1705 for (const auto &Entry : INLD.first) 1706 assert(Entry.getResult().getInst() != D && 1707 "Inst occurs in data structures"); 1708 } 1709 1710 for (const auto &DepKV : ReverseLocalDeps) { 1711 assert(DepKV.first != D && "Inst occurs in data structures"); 1712 for (Instruction *Inst : DepKV.second) 1713 assert(Inst != D && "Inst occurs in data structures"); 1714 } 1715 1716 for (const auto &DepKV : ReverseNonLocalDeps) { 1717 assert(DepKV.first != D && "Inst occurs in data structures"); 1718 for (Instruction *Inst : DepKV.second) 1719 assert(Inst != D && "Inst occurs in data structures"); 1720 } 1721 1722 for (const auto &DepKV : ReverseNonLocalPtrDeps) { 1723 assert(DepKV.first != D && "Inst occurs in rev NLPD map"); 1724 1725 for (ValueIsLoadPair P : DepKV.second) 1726 assert(P != ValueIsLoadPair(D, false) && P != ValueIsLoadPair(D, true) && 1727 "Inst occurs in ReverseNonLocalPtrDeps map"); 1728 } 1729 #endif 1730 } 1731 1732 AnalysisKey MemoryDependenceAnalysis::Key; 1733 1734 MemoryDependenceAnalysis::MemoryDependenceAnalysis() 1735 : DefaultBlockScanLimit(BlockScanLimit) {} 1736 1737 MemoryDependenceResults 1738 MemoryDependenceAnalysis::run(Function &F, FunctionAnalysisManager &AM) { 1739 auto &AA = AM.getResult<AAManager>(F); 1740 auto &AC = AM.getResult<AssumptionAnalysis>(F); 1741 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1742 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 1743 auto &PV = AM.getResult<PhiValuesAnalysis>(F); 1744 return MemoryDependenceResults(AA, AC, TLI, DT, PV, DefaultBlockScanLimit); 1745 } 1746 1747 char MemoryDependenceWrapperPass::ID = 0; 1748 1749 INITIALIZE_PASS_BEGIN(MemoryDependenceWrapperPass, "memdep", 1750 "Memory Dependence Analysis", false, true) 1751 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1752 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 1753 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1754 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1755 INITIALIZE_PASS_DEPENDENCY(PhiValuesWrapperPass) 1756 INITIALIZE_PASS_END(MemoryDependenceWrapperPass, "memdep", 1757 "Memory Dependence Analysis", false, true) 1758 1759 MemoryDependenceWrapperPass::MemoryDependenceWrapperPass() : FunctionPass(ID) { 1760 initializeMemoryDependenceWrapperPassPass(*PassRegistry::getPassRegistry()); 1761 } 1762 1763 MemoryDependenceWrapperPass::~MemoryDependenceWrapperPass() = default; 1764 1765 void MemoryDependenceWrapperPass::releaseMemory() { 1766 MemDep.reset(); 1767 } 1768 1769 void MemoryDependenceWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 1770 AU.setPreservesAll(); 1771 AU.addRequired<AssumptionCacheTracker>(); 1772 AU.addRequired<DominatorTreeWrapperPass>(); 1773 AU.addRequired<PhiValuesWrapperPass>(); 1774 AU.addRequiredTransitive<AAResultsWrapperPass>(); 1775 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>(); 1776 } 1777 1778 bool MemoryDependenceResults::invalidate(Function &F, const PreservedAnalyses &PA, 1779 FunctionAnalysisManager::Invalidator &Inv) { 1780 // Check whether our analysis is preserved. 1781 auto PAC = PA.getChecker<MemoryDependenceAnalysis>(); 1782 if (!PAC.preserved() && !PAC.preservedSet<AllAnalysesOn<Function>>()) 1783 // If not, give up now. 1784 return true; 1785 1786 // Check whether the analyses we depend on became invalid for any reason. 1787 if (Inv.invalidate<AAManager>(F, PA) || 1788 Inv.invalidate<AssumptionAnalysis>(F, PA) || 1789 Inv.invalidate<DominatorTreeAnalysis>(F, PA) || 1790 Inv.invalidate<PhiValuesAnalysis>(F, PA)) 1791 return true; 1792 1793 // Otherwise this analysis result remains valid. 1794 return false; 1795 } 1796 1797 unsigned MemoryDependenceResults::getDefaultBlockScanLimit() const { 1798 return DefaultBlockScanLimit; 1799 } 1800 1801 bool MemoryDependenceWrapperPass::runOnFunction(Function &F) { 1802 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 1803 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1804 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); 1805 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1806 auto &PV = getAnalysis<PhiValuesWrapperPass>().getResult(); 1807 MemDep.emplace(AA, AC, TLI, DT, PV, BlockScanLimit); 1808 return false; 1809 } 1810