1 //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the MemorySSA class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Analysis/MemorySSA.h" 15 #include "llvm/ADT/DenseMap.h" 16 #include "llvm/ADT/DenseMapInfo.h" 17 #include "llvm/ADT/DenseSet.h" 18 #include "llvm/ADT/DepthFirstIterator.h" 19 #include "llvm/ADT/Hashing.h" 20 #include "llvm/ADT/None.h" 21 #include "llvm/ADT/Optional.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/ADT/SmallPtrSet.h" 24 #include "llvm/ADT/SmallVector.h" 25 #include "llvm/ADT/iterator.h" 26 #include "llvm/ADT/iterator_range.h" 27 #include "llvm/Analysis/AliasAnalysis.h" 28 #include "llvm/Analysis/IteratedDominanceFrontier.h" 29 #include "llvm/Analysis/MemoryLocation.h" 30 #include "llvm/Config/llvm-config.h" 31 #include "llvm/IR/AssemblyAnnotationWriter.h" 32 #include "llvm/IR/BasicBlock.h" 33 #include "llvm/IR/CallSite.h" 34 #include "llvm/IR/Dominators.h" 35 #include "llvm/IR/Function.h" 36 #include "llvm/IR/Instruction.h" 37 #include "llvm/IR/Instructions.h" 38 #include "llvm/IR/IntrinsicInst.h" 39 #include "llvm/IR/Intrinsics.h" 40 #include "llvm/IR/LLVMContext.h" 41 #include "llvm/IR/PassManager.h" 42 #include "llvm/IR/Use.h" 43 #include "llvm/Pass.h" 44 #include "llvm/Support/AtomicOrdering.h" 45 #include "llvm/Support/Casting.h" 46 #include "llvm/Support/CommandLine.h" 47 #include "llvm/Support/Compiler.h" 48 #include "llvm/Support/Debug.h" 49 #include "llvm/Support/ErrorHandling.h" 50 #include "llvm/Support/FormattedStream.h" 51 #include "llvm/Support/raw_ostream.h" 52 #include <algorithm> 53 #include <cassert> 54 #include <iterator> 55 #include <memory> 56 #include <utility> 57 58 using namespace llvm; 59 60 #define DEBUG_TYPE "memoryssa" 61 62 INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false, 63 true) 64 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 65 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 66 INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false, 67 true) 68 69 INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa", 70 "Memory SSA Printer", false, false) 71 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) 72 INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa", 73 "Memory SSA Printer", false, false) 74 75 static cl::opt<unsigned> MaxCheckLimit( 76 "memssa-check-limit", cl::Hidden, cl::init(100), 77 cl::desc("The maximum number of stores/phis MemorySSA" 78 "will consider trying to walk past (default = 100)")); 79 80 // Always verify MemorySSA if expensive checking is enabled. 81 #ifdef EXPENSIVE_CHECKS 82 bool llvm::VerifyMemorySSA = true; 83 #else 84 bool llvm::VerifyMemorySSA = false; 85 #endif 86 static cl::opt<bool, true> 87 VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA), 88 cl::Hidden, cl::desc("Enable verification of MemorySSA.")); 89 90 namespace llvm { 91 92 /// An assembly annotator class to print Memory SSA information in 93 /// comments. 94 class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter { 95 friend class MemorySSA; 96 97 const MemorySSA *MSSA; 98 99 public: 100 MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {} 101 102 void emitBasicBlockStartAnnot(const BasicBlock *BB, 103 formatted_raw_ostream &OS) override { 104 if (MemoryAccess *MA = MSSA->getMemoryAccess(BB)) 105 OS << "; " << *MA << "\n"; 106 } 107 108 void emitInstructionAnnot(const Instruction *I, 109 formatted_raw_ostream &OS) override { 110 if (MemoryAccess *MA = MSSA->getMemoryAccess(I)) 111 OS << "; " << *MA << "\n"; 112 } 113 }; 114 115 } // end namespace llvm 116 117 namespace { 118 119 /// Our current alias analysis API differentiates heavily between calls and 120 /// non-calls, and functions called on one usually assert on the other. 121 /// This class encapsulates the distinction to simplify other code that wants 122 /// "Memory affecting instructions and related data" to use as a key. 123 /// For example, this class is used as a densemap key in the use optimizer. 124 class MemoryLocOrCall { 125 public: 126 bool IsCall = false; 127 128 MemoryLocOrCall() = default; 129 MemoryLocOrCall(MemoryUseOrDef *MUD) 130 : MemoryLocOrCall(MUD->getMemoryInst()) {} 131 MemoryLocOrCall(const MemoryUseOrDef *MUD) 132 : MemoryLocOrCall(MUD->getMemoryInst()) {} 133 134 MemoryLocOrCall(Instruction *Inst) { 135 if (ImmutableCallSite(Inst)) { 136 IsCall = true; 137 CS = ImmutableCallSite(Inst); 138 } else { 139 IsCall = false; 140 // There is no such thing as a memorylocation for a fence inst, and it is 141 // unique in that regard. 142 if (!isa<FenceInst>(Inst)) 143 Loc = MemoryLocation::get(Inst); 144 } 145 } 146 147 explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {} 148 149 ImmutableCallSite getCS() const { 150 assert(IsCall); 151 return CS; 152 } 153 154 MemoryLocation getLoc() const { 155 assert(!IsCall); 156 return Loc; 157 } 158 159 bool operator==(const MemoryLocOrCall &Other) const { 160 if (IsCall != Other.IsCall) 161 return false; 162 163 if (!IsCall) 164 return Loc == Other.Loc; 165 166 if (CS.getCalledValue() != Other.CS.getCalledValue()) 167 return false; 168 169 return CS.arg_size() == Other.CS.arg_size() && 170 std::equal(CS.arg_begin(), CS.arg_end(), Other.CS.arg_begin()); 171 } 172 173 private: 174 union { 175 ImmutableCallSite CS; 176 MemoryLocation Loc; 177 }; 178 }; 179 180 } // end anonymous namespace 181 182 namespace llvm { 183 184 template <> struct DenseMapInfo<MemoryLocOrCall> { 185 static inline MemoryLocOrCall getEmptyKey() { 186 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey()); 187 } 188 189 static inline MemoryLocOrCall getTombstoneKey() { 190 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey()); 191 } 192 193 static unsigned getHashValue(const MemoryLocOrCall &MLOC) { 194 if (!MLOC.IsCall) 195 return hash_combine( 196 MLOC.IsCall, 197 DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc())); 198 199 hash_code hash = 200 hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue( 201 MLOC.getCS().getCalledValue())); 202 203 for (const Value *Arg : MLOC.getCS().args()) 204 hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg)); 205 return hash; 206 } 207 208 static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) { 209 return LHS == RHS; 210 } 211 }; 212 213 } // end namespace llvm 214 215 /// This does one-way checks to see if Use could theoretically be hoisted above 216 /// MayClobber. This will not check the other way around. 217 /// 218 /// This assumes that, for the purposes of MemorySSA, Use comes directly after 219 /// MayClobber, with no potentially clobbering operations in between them. 220 /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.) 221 static bool areLoadsReorderable(const LoadInst *Use, 222 const LoadInst *MayClobber) { 223 bool VolatileUse = Use->isVolatile(); 224 bool VolatileClobber = MayClobber->isVolatile(); 225 // Volatile operations may never be reordered with other volatile operations. 226 if (VolatileUse && VolatileClobber) 227 return false; 228 // Otherwise, volatile doesn't matter here. From the language reference: 229 // 'optimizers may change the order of volatile operations relative to 230 // non-volatile operations.'" 231 232 // If a load is seq_cst, it cannot be moved above other loads. If its ordering 233 // is weaker, it can be moved above other loads. We just need to be sure that 234 // MayClobber isn't an acquire load, because loads can't be moved above 235 // acquire loads. 236 // 237 // Note that this explicitly *does* allow the free reordering of monotonic (or 238 // weaker) loads of the same address. 239 bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent; 240 bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(), 241 AtomicOrdering::Acquire); 242 return !(SeqCstUse || MayClobberIsAcquire); 243 } 244 245 namespace { 246 247 struct ClobberAlias { 248 bool IsClobber; 249 Optional<AliasResult> AR; 250 }; 251 252 } // end anonymous namespace 253 254 // Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being 255 // ignored if IsClobber = false. 256 static ClobberAlias instructionClobbersQuery(const MemoryDef *MD, 257 const MemoryLocation &UseLoc, 258 const Instruction *UseInst, 259 AliasAnalysis &AA) { 260 Instruction *DefInst = MD->getMemoryInst(); 261 assert(DefInst && "Defining instruction not actually an instruction"); 262 ImmutableCallSite UseCS(UseInst); 263 Optional<AliasResult> AR; 264 265 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) { 266 // These intrinsics will show up as affecting memory, but they are just 267 // markers, mostly. 268 // 269 // FIXME: We probably don't actually want MemorySSA to model these at all 270 // (including creating MemoryAccesses for them): we just end up inventing 271 // clobbers where they don't really exist at all. Please see D43269 for 272 // context. 273 switch (II->getIntrinsicID()) { 274 case Intrinsic::lifetime_start: 275 if (UseCS) 276 return {false, NoAlias}; 277 AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc); 278 return {AR != NoAlias, AR}; 279 case Intrinsic::lifetime_end: 280 case Intrinsic::invariant_start: 281 case Intrinsic::invariant_end: 282 case Intrinsic::assume: 283 return {false, NoAlias}; 284 default: 285 break; 286 } 287 } 288 289 if (UseCS) { 290 ModRefInfo I = AA.getModRefInfo(DefInst, UseCS); 291 AR = isMustSet(I) ? MustAlias : MayAlias; 292 return {isModOrRefSet(I), AR}; 293 } 294 295 if (auto *DefLoad = dyn_cast<LoadInst>(DefInst)) 296 if (auto *UseLoad = dyn_cast<LoadInst>(UseInst)) 297 return {!areLoadsReorderable(UseLoad, DefLoad), MayAlias}; 298 299 ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc); 300 AR = isMustSet(I) ? MustAlias : MayAlias; 301 return {isModSet(I), AR}; 302 } 303 304 static ClobberAlias instructionClobbersQuery(MemoryDef *MD, 305 const MemoryUseOrDef *MU, 306 const MemoryLocOrCall &UseMLOC, 307 AliasAnalysis &AA) { 308 // FIXME: This is a temporary hack to allow a single instructionClobbersQuery 309 // to exist while MemoryLocOrCall is pushed through places. 310 if (UseMLOC.IsCall) 311 return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(), 312 AA); 313 return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(), 314 AA); 315 } 316 317 // Return true when MD may alias MU, return false otherwise. 318 bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, 319 AliasAnalysis &AA) { 320 return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber; 321 } 322 323 namespace { 324 325 struct UpwardsMemoryQuery { 326 // True if our original query started off as a call 327 bool IsCall = false; 328 // The pointer location we started the query with. This will be empty if 329 // IsCall is true. 330 MemoryLocation StartingLoc; 331 // This is the instruction we were querying about. 332 const Instruction *Inst = nullptr; 333 // The MemoryAccess we actually got called with, used to test local domination 334 const MemoryAccess *OriginalAccess = nullptr; 335 Optional<AliasResult> AR = MayAlias; 336 337 UpwardsMemoryQuery() = default; 338 339 UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access) 340 : IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) { 341 if (!IsCall) 342 StartingLoc = MemoryLocation::get(Inst); 343 } 344 }; 345 346 } // end anonymous namespace 347 348 static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc, 349 AliasAnalysis &AA) { 350 Instruction *Inst = MD->getMemoryInst(); 351 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 352 switch (II->getIntrinsicID()) { 353 case Intrinsic::lifetime_end: 354 return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc); 355 default: 356 return false; 357 } 358 } 359 return false; 360 } 361 362 static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA, 363 const Instruction *I) { 364 // If the memory can't be changed, then loads of the memory can't be 365 // clobbered. 366 return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) || 367 AA.pointsToConstantMemory(cast<LoadInst>(I)-> 368 getPointerOperand())); 369 } 370 371 /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing 372 /// inbetween `Start` and `ClobberAt` can clobbers `Start`. 373 /// 374 /// This is meant to be as simple and self-contained as possible. Because it 375 /// uses no cache, etc., it can be relatively expensive. 376 /// 377 /// \param Start The MemoryAccess that we want to walk from. 378 /// \param ClobberAt A clobber for Start. 379 /// \param StartLoc The MemoryLocation for Start. 380 /// \param MSSA The MemorySSA instance that Start and ClobberAt belong to. 381 /// \param Query The UpwardsMemoryQuery we used for our search. 382 /// \param AA The AliasAnalysis we used for our search. 383 static void 384 checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt, 385 const MemoryLocation &StartLoc, const MemorySSA &MSSA, 386 const UpwardsMemoryQuery &Query, AliasAnalysis &AA) { 387 assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?"); 388 389 if (MSSA.isLiveOnEntryDef(Start)) { 390 assert(MSSA.isLiveOnEntryDef(ClobberAt) && 391 "liveOnEntry must clobber itself"); 392 return; 393 } 394 395 bool FoundClobber = false; 396 DenseSet<ConstMemoryAccessPair> VisitedPhis; 397 SmallVector<ConstMemoryAccessPair, 8> Worklist; 398 Worklist.emplace_back(Start, StartLoc); 399 // Walk all paths from Start to ClobberAt, while looking for clobbers. If one 400 // is found, complain. 401 while (!Worklist.empty()) { 402 auto MAP = Worklist.pop_back_val(); 403 // All we care about is that nothing from Start to ClobberAt clobbers Start. 404 // We learn nothing from revisiting nodes. 405 if (!VisitedPhis.insert(MAP).second) 406 continue; 407 408 for (const auto *MA : def_chain(MAP.first)) { 409 if (MA == ClobberAt) { 410 if (const auto *MD = dyn_cast<MemoryDef>(MA)) { 411 // instructionClobbersQuery isn't essentially free, so don't use `|=`, 412 // since it won't let us short-circuit. 413 // 414 // Also, note that this can't be hoisted out of the `Worklist` loop, 415 // since MD may only act as a clobber for 1 of N MemoryLocations. 416 FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD); 417 if (!FoundClobber) { 418 ClobberAlias CA = 419 instructionClobbersQuery(MD, MAP.second, Query.Inst, AA); 420 if (CA.IsClobber) { 421 FoundClobber = true; 422 // Not used: CA.AR; 423 } 424 } 425 } 426 break; 427 } 428 429 // We should never hit liveOnEntry, unless it's the clobber. 430 assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?"); 431 432 if (const auto *MD = dyn_cast<MemoryDef>(MA)) { 433 // If Start is a Def, skip self. 434 if (MD == Start) 435 continue; 436 437 assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) 438 .IsClobber && 439 "Found clobber before reaching ClobberAt!"); 440 continue; 441 } 442 443 if (const auto *MU = dyn_cast<MemoryUse>(MA)) { 444 (void)MU; 445 assert (MU == Start && 446 "Can only find use in def chain if Start is a use"); 447 continue; 448 } 449 450 assert(isa<MemoryPhi>(MA)); 451 Worklist.append( 452 upward_defs_begin({const_cast<MemoryAccess *>(MA), MAP.second}), 453 upward_defs_end()); 454 } 455 } 456 457 // If ClobberAt is a MemoryPhi, we can assume something above it acted as a 458 // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point. 459 assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) && 460 "ClobberAt never acted as a clobber"); 461 } 462 463 namespace { 464 465 /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up 466 /// in one class. 467 class ClobberWalker { 468 /// Save a few bytes by using unsigned instead of size_t. 469 using ListIndex = unsigned; 470 471 /// Represents a span of contiguous MemoryDefs, potentially ending in a 472 /// MemoryPhi. 473 struct DefPath { 474 MemoryLocation Loc; 475 // Note that, because we always walk in reverse, Last will always dominate 476 // First. Also note that First and Last are inclusive. 477 MemoryAccess *First; 478 MemoryAccess *Last; 479 Optional<ListIndex> Previous; 480 481 DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last, 482 Optional<ListIndex> Previous) 483 : Loc(Loc), First(First), Last(Last), Previous(Previous) {} 484 485 DefPath(const MemoryLocation &Loc, MemoryAccess *Init, 486 Optional<ListIndex> Previous) 487 : DefPath(Loc, Init, Init, Previous) {} 488 }; 489 490 const MemorySSA &MSSA; 491 AliasAnalysis &AA; 492 DominatorTree &DT; 493 UpwardsMemoryQuery *Query; 494 495 // Phi optimization bookkeeping 496 SmallVector<DefPath, 32> Paths; 497 DenseSet<ConstMemoryAccessPair> VisitedPhis; 498 499 /// Find the nearest def or phi that `From` can legally be optimized to. 500 const MemoryAccess *getWalkTarget(const MemoryPhi *From) const { 501 assert(From->getNumOperands() && "Phi with no operands?"); 502 503 BasicBlock *BB = From->getBlock(); 504 MemoryAccess *Result = MSSA.getLiveOnEntryDef(); 505 DomTreeNode *Node = DT.getNode(BB); 506 while ((Node = Node->getIDom())) { 507 auto *Defs = MSSA.getBlockDefs(Node->getBlock()); 508 if (Defs) 509 return &*Defs->rbegin(); 510 } 511 return Result; 512 } 513 514 /// Result of calling walkToPhiOrClobber. 515 struct UpwardsWalkResult { 516 /// The "Result" of the walk. Either a clobber, the last thing we walked, or 517 /// both. Include alias info when clobber found. 518 MemoryAccess *Result; 519 bool IsKnownClobber; 520 Optional<AliasResult> AR; 521 }; 522 523 /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last. 524 /// This will update Desc.Last as it walks. It will (optionally) also stop at 525 /// StopAt. 526 /// 527 /// This does not test for whether StopAt is a clobber 528 UpwardsWalkResult 529 walkToPhiOrClobber(DefPath &Desc, 530 const MemoryAccess *StopAt = nullptr) const { 531 assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world"); 532 533 for (MemoryAccess *Current : def_chain(Desc.Last)) { 534 Desc.Last = Current; 535 if (Current == StopAt) 536 return {Current, false, MayAlias}; 537 538 if (auto *MD = dyn_cast<MemoryDef>(Current)) { 539 if (MSSA.isLiveOnEntryDef(MD)) 540 return {MD, true, MustAlias}; 541 ClobberAlias CA = 542 instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA); 543 if (CA.IsClobber) 544 return {MD, true, CA.AR}; 545 } 546 } 547 548 assert(isa<MemoryPhi>(Desc.Last) && 549 "Ended at a non-clobber that's not a phi?"); 550 return {Desc.Last, false, MayAlias}; 551 } 552 553 void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches, 554 ListIndex PriorNode) { 555 auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}), 556 upward_defs_end()); 557 for (const MemoryAccessPair &P : UpwardDefs) { 558 PausedSearches.push_back(Paths.size()); 559 Paths.emplace_back(P.second, P.first, PriorNode); 560 } 561 } 562 563 /// Represents a search that terminated after finding a clobber. This clobber 564 /// may or may not be present in the path of defs from LastNode..SearchStart, 565 /// since it may have been retrieved from cache. 566 struct TerminatedPath { 567 MemoryAccess *Clobber; 568 ListIndex LastNode; 569 }; 570 571 /// Get an access that keeps us from optimizing to the given phi. 572 /// 573 /// PausedSearches is an array of indices into the Paths array. Its incoming 574 /// value is the indices of searches that stopped at the last phi optimization 575 /// target. It's left in an unspecified state. 576 /// 577 /// If this returns None, NewPaused is a vector of searches that terminated 578 /// at StopWhere. Otherwise, NewPaused is left in an unspecified state. 579 Optional<TerminatedPath> 580 getBlockingAccess(const MemoryAccess *StopWhere, 581 SmallVectorImpl<ListIndex> &PausedSearches, 582 SmallVectorImpl<ListIndex> &NewPaused, 583 SmallVectorImpl<TerminatedPath> &Terminated) { 584 assert(!PausedSearches.empty() && "No searches to continue?"); 585 586 // BFS vs DFS really doesn't make a difference here, so just do a DFS with 587 // PausedSearches as our stack. 588 while (!PausedSearches.empty()) { 589 ListIndex PathIndex = PausedSearches.pop_back_val(); 590 DefPath &Node = Paths[PathIndex]; 591 592 // If we've already visited this path with this MemoryLocation, we don't 593 // need to do so again. 594 // 595 // NOTE: That we just drop these paths on the ground makes caching 596 // behavior sporadic. e.g. given a diamond: 597 // A 598 // B C 599 // D 600 // 601 // ...If we walk D, B, A, C, we'll only cache the result of phi 602 // optimization for A, B, and D; C will be skipped because it dies here. 603 // This arguably isn't the worst thing ever, since: 604 // - We generally query things in a top-down order, so if we got below D 605 // without needing cache entries for {C, MemLoc}, then chances are 606 // that those cache entries would end up ultimately unused. 607 // - We still cache things for A, so C only needs to walk up a bit. 608 // If this behavior becomes problematic, we can fix without a ton of extra 609 // work. 610 if (!VisitedPhis.insert({Node.Last, Node.Loc}).second) 611 continue; 612 613 UpwardsWalkResult Res = walkToPhiOrClobber(Node, /*StopAt=*/StopWhere); 614 if (Res.IsKnownClobber) { 615 assert(Res.Result != StopWhere); 616 // If this wasn't a cache hit, we hit a clobber when walking. That's a 617 // failure. 618 TerminatedPath Term{Res.Result, PathIndex}; 619 if (!MSSA.dominates(Res.Result, StopWhere)) 620 return Term; 621 622 // Otherwise, it's a valid thing to potentially optimize to. 623 Terminated.push_back(Term); 624 continue; 625 } 626 627 if (Res.Result == StopWhere) { 628 // We've hit our target. Save this path off for if we want to continue 629 // walking. 630 NewPaused.push_back(PathIndex); 631 continue; 632 } 633 634 assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber"); 635 addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex); 636 } 637 638 return None; 639 } 640 641 template <typename T, typename Walker> 642 struct generic_def_path_iterator 643 : public iterator_facade_base<generic_def_path_iterator<T, Walker>, 644 std::forward_iterator_tag, T *> { 645 generic_def_path_iterator() = default; 646 generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {} 647 648 T &operator*() const { return curNode(); } 649 650 generic_def_path_iterator &operator++() { 651 N = curNode().Previous; 652 return *this; 653 } 654 655 bool operator==(const generic_def_path_iterator &O) const { 656 if (N.hasValue() != O.N.hasValue()) 657 return false; 658 return !N.hasValue() || *N == *O.N; 659 } 660 661 private: 662 T &curNode() const { return W->Paths[*N]; } 663 664 Walker *W = nullptr; 665 Optional<ListIndex> N = None; 666 }; 667 668 using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>; 669 using const_def_path_iterator = 670 generic_def_path_iterator<const DefPath, const ClobberWalker>; 671 672 iterator_range<def_path_iterator> def_path(ListIndex From) { 673 return make_range(def_path_iterator(this, From), def_path_iterator()); 674 } 675 676 iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const { 677 return make_range(const_def_path_iterator(this, From), 678 const_def_path_iterator()); 679 } 680 681 struct OptznResult { 682 /// The path that contains our result. 683 TerminatedPath PrimaryClobber; 684 /// The paths that we can legally cache back from, but that aren't 685 /// necessarily the result of the Phi optimization. 686 SmallVector<TerminatedPath, 4> OtherClobbers; 687 }; 688 689 ListIndex defPathIndex(const DefPath &N) const { 690 // The assert looks nicer if we don't need to do &N 691 const DefPath *NP = &N; 692 assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && 693 "Out of bounds DefPath!"); 694 return NP - &Paths.front(); 695 } 696 697 /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths 698 /// that act as legal clobbers. Note that this won't return *all* clobbers. 699 /// 700 /// Phi optimization algorithm tl;dr: 701 /// - Find the earliest def/phi, A, we can optimize to 702 /// - Find if all paths from the starting memory access ultimately reach A 703 /// - If not, optimization isn't possible. 704 /// - Otherwise, walk from A to another clobber or phi, A'. 705 /// - If A' is a def, we're done. 706 /// - If A' is a phi, try to optimize it. 707 /// 708 /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path 709 /// terminates when a MemoryAccess that clobbers said MemoryLocation is found. 710 OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start, 711 const MemoryLocation &Loc) { 712 assert(Paths.empty() && VisitedPhis.empty() && 713 "Reset the optimization state."); 714 715 Paths.emplace_back(Loc, Start, Phi, None); 716 // Stores how many "valid" optimization nodes we had prior to calling 717 // addSearches/getBlockingAccess. Necessary for caching if we had a blocker. 718 auto PriorPathsSize = Paths.size(); 719 720 SmallVector<ListIndex, 16> PausedSearches; 721 SmallVector<ListIndex, 8> NewPaused; 722 SmallVector<TerminatedPath, 4> TerminatedPaths; 723 724 addSearches(Phi, PausedSearches, 0); 725 726 // Moves the TerminatedPath with the "most dominated" Clobber to the end of 727 // Paths. 728 auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) { 729 assert(!Paths.empty() && "Need a path to move"); 730 auto Dom = Paths.begin(); 731 for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I) 732 if (!MSSA.dominates(I->Clobber, Dom->Clobber)) 733 Dom = I; 734 auto Last = Paths.end() - 1; 735 if (Last != Dom) 736 std::iter_swap(Last, Dom); 737 }; 738 739 MemoryPhi *Current = Phi; 740 while (true) { 741 assert(!MSSA.isLiveOnEntryDef(Current) && 742 "liveOnEntry wasn't treated as a clobber?"); 743 744 const auto *Target = getWalkTarget(Current); 745 // If a TerminatedPath doesn't dominate Target, then it wasn't a legal 746 // optimization for the prior phi. 747 assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) { 748 return MSSA.dominates(P.Clobber, Target); 749 })); 750 751 // FIXME: This is broken, because the Blocker may be reported to be 752 // liveOnEntry, and we'll happily wait for that to disappear (read: never) 753 // For the moment, this is fine, since we do nothing with blocker info. 754 if (Optional<TerminatedPath> Blocker = getBlockingAccess( 755 Target, PausedSearches, NewPaused, TerminatedPaths)) { 756 757 // Find the node we started at. We can't search based on N->Last, since 758 // we may have gone around a loop with a different MemoryLocation. 759 auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) { 760 return defPathIndex(N) < PriorPathsSize; 761 }); 762 assert(Iter != def_path_iterator()); 763 764 DefPath &CurNode = *Iter; 765 assert(CurNode.Last == Current); 766 767 // Two things: 768 // A. We can't reliably cache all of NewPaused back. Consider a case 769 // where we have two paths in NewPaused; one of which can't optimize 770 // above this phi, whereas the other can. If we cache the second path 771 // back, we'll end up with suboptimal cache entries. We can handle 772 // cases like this a bit better when we either try to find all 773 // clobbers that block phi optimization, or when our cache starts 774 // supporting unfinished searches. 775 // B. We can't reliably cache TerminatedPaths back here without doing 776 // extra checks; consider a case like: 777 // T 778 // / \ 779 // D C 780 // \ / 781 // S 782 // Where T is our target, C is a node with a clobber on it, D is a 783 // diamond (with a clobber *only* on the left or right node, N), and 784 // S is our start. Say we walk to D, through the node opposite N 785 // (read: ignoring the clobber), and see a cache entry in the top 786 // node of D. That cache entry gets put into TerminatedPaths. We then 787 // walk up to C (N is later in our worklist), find the clobber, and 788 // quit. If we append TerminatedPaths to OtherClobbers, we'll cache 789 // the bottom part of D to the cached clobber, ignoring the clobber 790 // in N. Again, this problem goes away if we start tracking all 791 // blockers for a given phi optimization. 792 TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)}; 793 return {Result, {}}; 794 } 795 796 // If there's nothing left to search, then all paths led to valid clobbers 797 // that we got from our cache; pick the nearest to the start, and allow 798 // the rest to be cached back. 799 if (NewPaused.empty()) { 800 MoveDominatedPathToEnd(TerminatedPaths); 801 TerminatedPath Result = TerminatedPaths.pop_back_val(); 802 return {Result, std::move(TerminatedPaths)}; 803 } 804 805 MemoryAccess *DefChainEnd = nullptr; 806 SmallVector<TerminatedPath, 4> Clobbers; 807 for (ListIndex Paused : NewPaused) { 808 UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]); 809 if (WR.IsKnownClobber) 810 Clobbers.push_back({WR.Result, Paused}); 811 else 812 // Micro-opt: If we hit the end of the chain, save it. 813 DefChainEnd = WR.Result; 814 } 815 816 if (!TerminatedPaths.empty()) { 817 // If we couldn't find the dominating phi/liveOnEntry in the above loop, 818 // do it now. 819 if (!DefChainEnd) 820 for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target))) 821 DefChainEnd = MA; 822 823 // If any of the terminated paths don't dominate the phi we'll try to 824 // optimize, we need to figure out what they are and quit. 825 const BasicBlock *ChainBB = DefChainEnd->getBlock(); 826 for (const TerminatedPath &TP : TerminatedPaths) { 827 // Because we know that DefChainEnd is as "high" as we can go, we 828 // don't need local dominance checks; BB dominance is sufficient. 829 if (DT.dominates(ChainBB, TP.Clobber->getBlock())) 830 Clobbers.push_back(TP); 831 } 832 } 833 834 // If we have clobbers in the def chain, find the one closest to Current 835 // and quit. 836 if (!Clobbers.empty()) { 837 MoveDominatedPathToEnd(Clobbers); 838 TerminatedPath Result = Clobbers.pop_back_val(); 839 return {Result, std::move(Clobbers)}; 840 } 841 842 assert(all_of(NewPaused, 843 [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })); 844 845 // Because liveOnEntry is a clobber, this must be a phi. 846 auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd); 847 848 PriorPathsSize = Paths.size(); 849 PausedSearches.clear(); 850 for (ListIndex I : NewPaused) 851 addSearches(DefChainPhi, PausedSearches, I); 852 NewPaused.clear(); 853 854 Current = DefChainPhi; 855 } 856 } 857 858 void verifyOptResult(const OptznResult &R) const { 859 assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) { 860 return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); 861 })); 862 } 863 864 void resetPhiOptznState() { 865 Paths.clear(); 866 VisitedPhis.clear(); 867 } 868 869 public: 870 ClobberWalker(const MemorySSA &MSSA, AliasAnalysis &AA, DominatorTree &DT) 871 : MSSA(MSSA), AA(AA), DT(DT) {} 872 873 /// Finds the nearest clobber for the given query, optimizing phis if 874 /// possible. 875 MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q) { 876 Query = &Q; 877 878 MemoryAccess *Current = Start; 879 // This walker pretends uses don't exist. If we're handed one, silently grab 880 // its def. (This has the nice side-effect of ensuring we never cache uses) 881 if (auto *MU = dyn_cast<MemoryUse>(Start)) 882 Current = MU->getDefiningAccess(); 883 884 DefPath FirstDesc(Q.StartingLoc, Current, Current, None); 885 // Fast path for the overly-common case (no crazy phi optimization 886 // necessary) 887 UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc); 888 MemoryAccess *Result; 889 if (WalkResult.IsKnownClobber) { 890 Result = WalkResult.Result; 891 Q.AR = WalkResult.AR; 892 } else { 893 OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last), 894 Current, Q.StartingLoc); 895 verifyOptResult(OptRes); 896 resetPhiOptznState(); 897 Result = OptRes.PrimaryClobber.Clobber; 898 } 899 900 #ifdef EXPENSIVE_CHECKS 901 checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA); 902 #endif 903 return Result; 904 } 905 906 void verify(const MemorySSA *MSSA) { assert(MSSA == &this->MSSA); } 907 }; 908 909 struct RenamePassData { 910 DomTreeNode *DTN; 911 DomTreeNode::const_iterator ChildIt; 912 MemoryAccess *IncomingVal; 913 914 RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It, 915 MemoryAccess *M) 916 : DTN(D), ChildIt(It), IncomingVal(M) {} 917 918 void swap(RenamePassData &RHS) { 919 std::swap(DTN, RHS.DTN); 920 std::swap(ChildIt, RHS.ChildIt); 921 std::swap(IncomingVal, RHS.IncomingVal); 922 } 923 }; 924 925 } // end anonymous namespace 926 927 namespace llvm { 928 929 /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no 930 /// longer does caching on its own, but the name has been retained for the 931 /// moment. 932 class MemorySSA::CachingWalker final : public MemorySSAWalker { 933 ClobberWalker Walker; 934 935 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, UpwardsMemoryQuery &); 936 937 public: 938 CachingWalker(MemorySSA *, AliasAnalysis *, DominatorTree *); 939 ~CachingWalker() override = default; 940 941 using MemorySSAWalker::getClobberingMemoryAccess; 942 943 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override; 944 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, 945 const MemoryLocation &) override; 946 void invalidateInfo(MemoryAccess *) override; 947 948 void verify(const MemorySSA *MSSA) override { 949 MemorySSAWalker::verify(MSSA); 950 Walker.verify(MSSA); 951 } 952 }; 953 954 } // end namespace llvm 955 956 void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal, 957 bool RenameAllUses) { 958 // Pass through values to our successors 959 for (const BasicBlock *S : successors(BB)) { 960 auto It = PerBlockAccesses.find(S); 961 // Rename the phi nodes in our successor block 962 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) 963 continue; 964 AccessList *Accesses = It->second.get(); 965 auto *Phi = cast<MemoryPhi>(&Accesses->front()); 966 if (RenameAllUses) { 967 int PhiIndex = Phi->getBasicBlockIndex(BB); 968 assert(PhiIndex != -1 && "Incomplete phi during partial rename"); 969 Phi->setIncomingValue(PhiIndex, IncomingVal); 970 } else 971 Phi->addIncoming(IncomingVal, BB); 972 } 973 } 974 975 /// Rename a single basic block into MemorySSA form. 976 /// Uses the standard SSA renaming algorithm. 977 /// \returns The new incoming value. 978 MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal, 979 bool RenameAllUses) { 980 auto It = PerBlockAccesses.find(BB); 981 // Skip most processing if the list is empty. 982 if (It != PerBlockAccesses.end()) { 983 AccessList *Accesses = It->second.get(); 984 for (MemoryAccess &L : *Accesses) { 985 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) { 986 if (MUD->getDefiningAccess() == nullptr || RenameAllUses) 987 MUD->setDefiningAccess(IncomingVal); 988 if (isa<MemoryDef>(&L)) 989 IncomingVal = &L; 990 } else { 991 IncomingVal = &L; 992 } 993 } 994 } 995 return IncomingVal; 996 } 997 998 /// This is the standard SSA renaming algorithm. 999 /// 1000 /// We walk the dominator tree in preorder, renaming accesses, and then filling 1001 /// in phi nodes in our successors. 1002 void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal, 1003 SmallPtrSetImpl<BasicBlock *> &Visited, 1004 bool SkipVisited, bool RenameAllUses) { 1005 SmallVector<RenamePassData, 32> WorkStack; 1006 // Skip everything if we already renamed this block and we are skipping. 1007 // Note: You can't sink this into the if, because we need it to occur 1008 // regardless of whether we skip blocks or not. 1009 bool AlreadyVisited = !Visited.insert(Root->getBlock()).second; 1010 if (SkipVisited && AlreadyVisited) 1011 return; 1012 1013 IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses); 1014 renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses); 1015 WorkStack.push_back({Root, Root->begin(), IncomingVal}); 1016 1017 while (!WorkStack.empty()) { 1018 DomTreeNode *Node = WorkStack.back().DTN; 1019 DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt; 1020 IncomingVal = WorkStack.back().IncomingVal; 1021 1022 if (ChildIt == Node->end()) { 1023 WorkStack.pop_back(); 1024 } else { 1025 DomTreeNode *Child = *ChildIt; 1026 ++WorkStack.back().ChildIt; 1027 BasicBlock *BB = Child->getBlock(); 1028 // Note: You can't sink this into the if, because we need it to occur 1029 // regardless of whether we skip blocks or not. 1030 AlreadyVisited = !Visited.insert(BB).second; 1031 if (SkipVisited && AlreadyVisited) { 1032 // We already visited this during our renaming, which can happen when 1033 // being asked to rename multiple blocks. Figure out the incoming val, 1034 // which is the last def. 1035 // Incoming value can only change if there is a block def, and in that 1036 // case, it's the last block def in the list. 1037 if (auto *BlockDefs = getWritableBlockDefs(BB)) 1038 IncomingVal = &*BlockDefs->rbegin(); 1039 } else 1040 IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses); 1041 renameSuccessorPhis(BB, IncomingVal, RenameAllUses); 1042 WorkStack.push_back({Child, Child->begin(), IncomingVal}); 1043 } 1044 } 1045 } 1046 1047 /// This handles unreachable block accesses by deleting phi nodes in 1048 /// unreachable blocks, and marking all other unreachable MemoryAccess's as 1049 /// being uses of the live on entry definition. 1050 void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) { 1051 assert(!DT->isReachableFromEntry(BB) && 1052 "Reachable block found while handling unreachable blocks"); 1053 1054 // Make sure phi nodes in our reachable successors end up with a 1055 // LiveOnEntryDef for our incoming edge, even though our block is forward 1056 // unreachable. We could just disconnect these blocks from the CFG fully, 1057 // but we do not right now. 1058 for (const BasicBlock *S : successors(BB)) { 1059 if (!DT->isReachableFromEntry(S)) 1060 continue; 1061 auto It = PerBlockAccesses.find(S); 1062 // Rename the phi nodes in our successor block 1063 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) 1064 continue; 1065 AccessList *Accesses = It->second.get(); 1066 auto *Phi = cast<MemoryPhi>(&Accesses->front()); 1067 Phi->addIncoming(LiveOnEntryDef.get(), BB); 1068 } 1069 1070 auto It = PerBlockAccesses.find(BB); 1071 if (It == PerBlockAccesses.end()) 1072 return; 1073 1074 auto &Accesses = It->second; 1075 for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) { 1076 auto Next = std::next(AI); 1077 // If we have a phi, just remove it. We are going to replace all 1078 // users with live on entry. 1079 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI)) 1080 UseOrDef->setDefiningAccess(LiveOnEntryDef.get()); 1081 else 1082 Accesses->erase(AI); 1083 AI = Next; 1084 } 1085 } 1086 1087 MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT) 1088 : AA(AA), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr), 1089 NextID(0) { 1090 buildMemorySSA(); 1091 } 1092 1093 MemorySSA::~MemorySSA() { 1094 // Drop all our references 1095 for (const auto &Pair : PerBlockAccesses) 1096 for (MemoryAccess &MA : *Pair.second) 1097 MA.dropAllReferences(); 1098 } 1099 1100 MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) { 1101 auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr)); 1102 1103 if (Res.second) 1104 Res.first->second = llvm::make_unique<AccessList>(); 1105 return Res.first->second.get(); 1106 } 1107 1108 MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) { 1109 auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr)); 1110 1111 if (Res.second) 1112 Res.first->second = llvm::make_unique<DefsList>(); 1113 return Res.first->second.get(); 1114 } 1115 1116 namespace llvm { 1117 1118 /// This class is a batch walker of all MemoryUse's in the program, and points 1119 /// their defining access at the thing that actually clobbers them. Because it 1120 /// is a batch walker that touches everything, it does not operate like the 1121 /// other walkers. This walker is basically performing a top-down SSA renaming 1122 /// pass, where the version stack is used as the cache. This enables it to be 1123 /// significantly more time and memory efficient than using the regular walker, 1124 /// which is walking bottom-up. 1125 class MemorySSA::OptimizeUses { 1126 public: 1127 OptimizeUses(MemorySSA *MSSA, MemorySSAWalker *Walker, AliasAnalysis *AA, 1128 DominatorTree *DT) 1129 : MSSA(MSSA), Walker(Walker), AA(AA), DT(DT) { 1130 Walker = MSSA->getWalker(); 1131 } 1132 1133 void optimizeUses(); 1134 1135 private: 1136 /// This represents where a given memorylocation is in the stack. 1137 struct MemlocStackInfo { 1138 // This essentially is keeping track of versions of the stack. Whenever 1139 // the stack changes due to pushes or pops, these versions increase. 1140 unsigned long StackEpoch; 1141 unsigned long PopEpoch; 1142 // This is the lower bound of places on the stack to check. It is equal to 1143 // the place the last stack walk ended. 1144 // Note: Correctness depends on this being initialized to 0, which densemap 1145 // does 1146 unsigned long LowerBound; 1147 const BasicBlock *LowerBoundBlock; 1148 // This is where the last walk for this memory location ended. 1149 unsigned long LastKill; 1150 bool LastKillValid; 1151 Optional<AliasResult> AR; 1152 }; 1153 1154 void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &, 1155 SmallVectorImpl<MemoryAccess *> &, 1156 DenseMap<MemoryLocOrCall, MemlocStackInfo> &); 1157 1158 MemorySSA *MSSA; 1159 MemorySSAWalker *Walker; 1160 AliasAnalysis *AA; 1161 DominatorTree *DT; 1162 }; 1163 1164 } // end namespace llvm 1165 1166 /// Optimize the uses in a given block This is basically the SSA renaming 1167 /// algorithm, with one caveat: We are able to use a single stack for all 1168 /// MemoryUses. This is because the set of *possible* reaching MemoryDefs is 1169 /// the same for every MemoryUse. The *actual* clobbering MemoryDef is just 1170 /// going to be some position in that stack of possible ones. 1171 /// 1172 /// We track the stack positions that each MemoryLocation needs 1173 /// to check, and last ended at. This is because we only want to check the 1174 /// things that changed since last time. The same MemoryLocation should 1175 /// get clobbered by the same store (getModRefInfo does not use invariantness or 1176 /// things like this, and if they start, we can modify MemoryLocOrCall to 1177 /// include relevant data) 1178 void MemorySSA::OptimizeUses::optimizeUsesInBlock( 1179 const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch, 1180 SmallVectorImpl<MemoryAccess *> &VersionStack, 1181 DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) { 1182 1183 /// If no accesses, nothing to do. 1184 MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB); 1185 if (Accesses == nullptr) 1186 return; 1187 1188 // Pop everything that doesn't dominate the current block off the stack, 1189 // increment the PopEpoch to account for this. 1190 while (true) { 1191 assert( 1192 !VersionStack.empty() && 1193 "Version stack should have liveOnEntry sentinel dominating everything"); 1194 BasicBlock *BackBlock = VersionStack.back()->getBlock(); 1195 if (DT->dominates(BackBlock, BB)) 1196 break; 1197 while (VersionStack.back()->getBlock() == BackBlock) 1198 VersionStack.pop_back(); 1199 ++PopEpoch; 1200 } 1201 1202 for (MemoryAccess &MA : *Accesses) { 1203 auto *MU = dyn_cast<MemoryUse>(&MA); 1204 if (!MU) { 1205 VersionStack.push_back(&MA); 1206 ++StackEpoch; 1207 continue; 1208 } 1209 1210 if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) { 1211 MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None); 1212 continue; 1213 } 1214 1215 MemoryLocOrCall UseMLOC(MU); 1216 auto &LocInfo = LocStackInfo[UseMLOC]; 1217 // If the pop epoch changed, it means we've removed stuff from top of 1218 // stack due to changing blocks. We may have to reset the lower bound or 1219 // last kill info. 1220 if (LocInfo.PopEpoch != PopEpoch) { 1221 LocInfo.PopEpoch = PopEpoch; 1222 LocInfo.StackEpoch = StackEpoch; 1223 // If the lower bound was in something that no longer dominates us, we 1224 // have to reset it. 1225 // We can't simply track stack size, because the stack may have had 1226 // pushes/pops in the meantime. 1227 // XXX: This is non-optimal, but only is slower cases with heavily 1228 // branching dominator trees. To get the optimal number of queries would 1229 // be to make lowerbound and lastkill a per-loc stack, and pop it until 1230 // the top of that stack dominates us. This does not seem worth it ATM. 1231 // A much cheaper optimization would be to always explore the deepest 1232 // branch of the dominator tree first. This will guarantee this resets on 1233 // the smallest set of blocks. 1234 if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB && 1235 !DT->dominates(LocInfo.LowerBoundBlock, BB)) { 1236 // Reset the lower bound of things to check. 1237 // TODO: Some day we should be able to reset to last kill, rather than 1238 // 0. 1239 LocInfo.LowerBound = 0; 1240 LocInfo.LowerBoundBlock = VersionStack[0]->getBlock(); 1241 LocInfo.LastKillValid = false; 1242 } 1243 } else if (LocInfo.StackEpoch != StackEpoch) { 1244 // If all that has changed is the StackEpoch, we only have to check the 1245 // new things on the stack, because we've checked everything before. In 1246 // this case, the lower bound of things to check remains the same. 1247 LocInfo.PopEpoch = PopEpoch; 1248 LocInfo.StackEpoch = StackEpoch; 1249 } 1250 if (!LocInfo.LastKillValid) { 1251 LocInfo.LastKill = VersionStack.size() - 1; 1252 LocInfo.LastKillValid = true; 1253 LocInfo.AR = MayAlias; 1254 } 1255 1256 // At this point, we should have corrected last kill and LowerBound to be 1257 // in bounds. 1258 assert(LocInfo.LowerBound < VersionStack.size() && 1259 "Lower bound out of range"); 1260 assert(LocInfo.LastKill < VersionStack.size() && 1261 "Last kill info out of range"); 1262 // In any case, the new upper bound is the top of the stack. 1263 unsigned long UpperBound = VersionStack.size() - 1; 1264 1265 if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) { 1266 LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " (" 1267 << *(MU->getMemoryInst()) << ")" 1268 << " because there are " 1269 << UpperBound - LocInfo.LowerBound 1270 << " stores to disambiguate\n"); 1271 // Because we did not walk, LastKill is no longer valid, as this may 1272 // have been a kill. 1273 LocInfo.LastKillValid = false; 1274 continue; 1275 } 1276 bool FoundClobberResult = false; 1277 while (UpperBound > LocInfo.LowerBound) { 1278 if (isa<MemoryPhi>(VersionStack[UpperBound])) { 1279 // For phis, use the walker, see where we ended up, go there 1280 Instruction *UseInst = MU->getMemoryInst(); 1281 MemoryAccess *Result = Walker->getClobberingMemoryAccess(UseInst); 1282 // We are guaranteed to find it or something is wrong 1283 while (VersionStack[UpperBound] != Result) { 1284 assert(UpperBound != 0); 1285 --UpperBound; 1286 } 1287 FoundClobberResult = true; 1288 break; 1289 } 1290 1291 MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]); 1292 // If the lifetime of the pointer ends at this instruction, it's live on 1293 // entry. 1294 if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) { 1295 // Reset UpperBound to liveOnEntryDef's place in the stack 1296 UpperBound = 0; 1297 FoundClobberResult = true; 1298 LocInfo.AR = MustAlias; 1299 break; 1300 } 1301 ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA); 1302 if (CA.IsClobber) { 1303 FoundClobberResult = true; 1304 LocInfo.AR = CA.AR; 1305 break; 1306 } 1307 --UpperBound; 1308 } 1309 1310 // Note: Phis always have AliasResult AR set to MayAlias ATM. 1311 1312 // At the end of this loop, UpperBound is either a clobber, or lower bound 1313 // PHI walking may cause it to be < LowerBound, and in fact, < LastKill. 1314 if (FoundClobberResult || UpperBound < LocInfo.LastKill) { 1315 // We were last killed now by where we got to 1316 if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound])) 1317 LocInfo.AR = None; 1318 MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR); 1319 LocInfo.LastKill = UpperBound; 1320 } else { 1321 // Otherwise, we checked all the new ones, and now we know we can get to 1322 // LastKill. 1323 MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR); 1324 } 1325 LocInfo.LowerBound = VersionStack.size() - 1; 1326 LocInfo.LowerBoundBlock = BB; 1327 } 1328 } 1329 1330 /// Optimize uses to point to their actual clobbering definitions. 1331 void MemorySSA::OptimizeUses::optimizeUses() { 1332 SmallVector<MemoryAccess *, 16> VersionStack; 1333 DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo; 1334 VersionStack.push_back(MSSA->getLiveOnEntryDef()); 1335 1336 unsigned long StackEpoch = 1; 1337 unsigned long PopEpoch = 1; 1338 // We perform a non-recursive top-down dominator tree walk. 1339 for (const auto *DomNode : depth_first(DT->getRootNode())) 1340 optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack, 1341 LocStackInfo); 1342 } 1343 1344 void MemorySSA::placePHINodes( 1345 const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) { 1346 // Determine where our MemoryPhi's should go 1347 ForwardIDFCalculator IDFs(*DT); 1348 IDFs.setDefiningBlocks(DefiningBlocks); 1349 SmallVector<BasicBlock *, 32> IDFBlocks; 1350 IDFs.calculate(IDFBlocks); 1351 1352 // Now place MemoryPhi nodes. 1353 for (auto &BB : IDFBlocks) 1354 createMemoryPhi(BB); 1355 } 1356 1357 void MemorySSA::buildMemorySSA() { 1358 // We create an access to represent "live on entry", for things like 1359 // arguments or users of globals, where the memory they use is defined before 1360 // the beginning of the function. We do not actually insert it into the IR. 1361 // We do not define a live on exit for the immediate uses, and thus our 1362 // semantics do *not* imply that something with no immediate uses can simply 1363 // be removed. 1364 BasicBlock &StartingPoint = F.getEntryBlock(); 1365 LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr, 1366 &StartingPoint, NextID++)); 1367 1368 // We maintain lists of memory accesses per-block, trading memory for time. We 1369 // could just look up the memory access for every possible instruction in the 1370 // stream. 1371 SmallPtrSet<BasicBlock *, 32> DefiningBlocks; 1372 // Go through each block, figure out where defs occur, and chain together all 1373 // the accesses. 1374 for (BasicBlock &B : F) { 1375 bool InsertIntoDef = false; 1376 AccessList *Accesses = nullptr; 1377 DefsList *Defs = nullptr; 1378 for (Instruction &I : B) { 1379 MemoryUseOrDef *MUD = createNewAccess(&I); 1380 if (!MUD) 1381 continue; 1382 1383 if (!Accesses) 1384 Accesses = getOrCreateAccessList(&B); 1385 Accesses->push_back(MUD); 1386 if (isa<MemoryDef>(MUD)) { 1387 InsertIntoDef = true; 1388 if (!Defs) 1389 Defs = getOrCreateDefsList(&B); 1390 Defs->push_back(*MUD); 1391 } 1392 } 1393 if (InsertIntoDef) 1394 DefiningBlocks.insert(&B); 1395 } 1396 placePHINodes(DefiningBlocks); 1397 1398 // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get 1399 // filled in with all blocks. 1400 SmallPtrSet<BasicBlock *, 16> Visited; 1401 renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited); 1402 1403 CachingWalker *Walker = getWalkerImpl(); 1404 1405 OptimizeUses(this, Walker, AA, DT).optimizeUses(); 1406 1407 // Mark the uses in unreachable blocks as live on entry, so that they go 1408 // somewhere. 1409 for (auto &BB : F) 1410 if (!Visited.count(&BB)) 1411 markUnreachableAsLiveOnEntry(&BB); 1412 } 1413 1414 MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); } 1415 1416 MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() { 1417 if (Walker) 1418 return Walker.get(); 1419 1420 Walker = llvm::make_unique<CachingWalker>(this, AA, DT); 1421 return Walker.get(); 1422 } 1423 1424 // This is a helper function used by the creation routines. It places NewAccess 1425 // into the access and defs lists for a given basic block, at the given 1426 // insertion point. 1427 void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess, 1428 const BasicBlock *BB, 1429 InsertionPlace Point) { 1430 auto *Accesses = getOrCreateAccessList(BB); 1431 if (Point == Beginning) { 1432 // If it's a phi node, it goes first, otherwise, it goes after any phi 1433 // nodes. 1434 if (isa<MemoryPhi>(NewAccess)) { 1435 Accesses->push_front(NewAccess); 1436 auto *Defs = getOrCreateDefsList(BB); 1437 Defs->push_front(*NewAccess); 1438 } else { 1439 auto AI = find_if_not( 1440 *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); }); 1441 Accesses->insert(AI, NewAccess); 1442 if (!isa<MemoryUse>(NewAccess)) { 1443 auto *Defs = getOrCreateDefsList(BB); 1444 auto DI = find_if_not( 1445 *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); }); 1446 Defs->insert(DI, *NewAccess); 1447 } 1448 } 1449 } else { 1450 Accesses->push_back(NewAccess); 1451 if (!isa<MemoryUse>(NewAccess)) { 1452 auto *Defs = getOrCreateDefsList(BB); 1453 Defs->push_back(*NewAccess); 1454 } 1455 } 1456 BlockNumberingValid.erase(BB); 1457 } 1458 1459 void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB, 1460 AccessList::iterator InsertPt) { 1461 auto *Accesses = getWritableBlockAccesses(BB); 1462 bool WasEnd = InsertPt == Accesses->end(); 1463 Accesses->insert(AccessList::iterator(InsertPt), What); 1464 if (!isa<MemoryUse>(What)) { 1465 auto *Defs = getOrCreateDefsList(BB); 1466 // If we got asked to insert at the end, we have an easy job, just shove it 1467 // at the end. If we got asked to insert before an existing def, we also get 1468 // an iterator. If we got asked to insert before a use, we have to hunt for 1469 // the next def. 1470 if (WasEnd) { 1471 Defs->push_back(*What); 1472 } else if (isa<MemoryDef>(InsertPt)) { 1473 Defs->insert(InsertPt->getDefsIterator(), *What); 1474 } else { 1475 while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt)) 1476 ++InsertPt; 1477 // Either we found a def, or we are inserting at the end 1478 if (InsertPt == Accesses->end()) 1479 Defs->push_back(*What); 1480 else 1481 Defs->insert(InsertPt->getDefsIterator(), *What); 1482 } 1483 } 1484 BlockNumberingValid.erase(BB); 1485 } 1486 1487 void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) { 1488 // Keep it in the lookup tables, remove from the lists 1489 removeFromLists(What, false); 1490 1491 // Note that moving should implicitly invalidate the optimized state of a 1492 // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a 1493 // MemoryDef. 1494 if (auto *MD = dyn_cast<MemoryDef>(What)) 1495 MD->resetOptimized(); 1496 What->setBlock(BB); 1497 } 1498 1499 // Move What before Where in the IR. The end result is that What will belong to 1500 // the right lists and have the right Block set, but will not otherwise be 1501 // correct. It will not have the right defining access, and if it is a def, 1502 // things below it will not properly be updated. 1503 void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB, 1504 AccessList::iterator Where) { 1505 prepareForMoveTo(What, BB); 1506 insertIntoListsBefore(What, BB, Where); 1507 } 1508 1509 void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB, 1510 InsertionPlace Point) { 1511 if (isa<MemoryPhi>(What)) { 1512 assert(Point == Beginning && 1513 "Can only move a Phi at the beginning of the block"); 1514 // Update lookup table entry 1515 ValueToMemoryAccess.erase(What->getBlock()); 1516 bool Inserted = ValueToMemoryAccess.insert({BB, What}).second; 1517 (void)Inserted; 1518 assert(Inserted && "Cannot move a Phi to a block that already has one"); 1519 } 1520 1521 prepareForMoveTo(What, BB); 1522 insertIntoListsForBlock(What, BB, Point); 1523 } 1524 1525 MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) { 1526 assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB"); 1527 MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++); 1528 // Phi's always are placed at the front of the block. 1529 insertIntoListsForBlock(Phi, BB, Beginning); 1530 ValueToMemoryAccess[BB] = Phi; 1531 return Phi; 1532 } 1533 1534 MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I, 1535 MemoryAccess *Definition) { 1536 assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI"); 1537 MemoryUseOrDef *NewAccess = createNewAccess(I); 1538 assert( 1539 NewAccess != nullptr && 1540 "Tried to create a memory access for a non-memory touching instruction"); 1541 NewAccess->setDefiningAccess(Definition); 1542 return NewAccess; 1543 } 1544 1545 // Return true if the instruction has ordering constraints. 1546 // Note specifically that this only considers stores and loads 1547 // because others are still considered ModRef by getModRefInfo. 1548 static inline bool isOrdered(const Instruction *I) { 1549 if (auto *SI = dyn_cast<StoreInst>(I)) { 1550 if (!SI->isUnordered()) 1551 return true; 1552 } else if (auto *LI = dyn_cast<LoadInst>(I)) { 1553 if (!LI->isUnordered()) 1554 return true; 1555 } 1556 return false; 1557 } 1558 1559 /// Helper function to create new memory accesses 1560 MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) { 1561 // The assume intrinsic has a control dependency which we model by claiming 1562 // that it writes arbitrarily. Ignore that fake memory dependency here. 1563 // FIXME: Replace this special casing with a more accurate modelling of 1564 // assume's control dependency. 1565 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 1566 if (II->getIntrinsicID() == Intrinsic::assume) 1567 return nullptr; 1568 1569 // Find out what affect this instruction has on memory. 1570 ModRefInfo ModRef = AA->getModRefInfo(I, None); 1571 // The isOrdered check is used to ensure that volatiles end up as defs 1572 // (atomics end up as ModRef right now anyway). Until we separate the 1573 // ordering chain from the memory chain, this enables people to see at least 1574 // some relative ordering to volatiles. Note that getClobberingMemoryAccess 1575 // will still give an answer that bypasses other volatile loads. TODO: 1576 // Separate memory aliasing and ordering into two different chains so that we 1577 // can precisely represent both "what memory will this read/write/is clobbered 1578 // by" and "what instructions can I move this past". 1579 bool Def = isModSet(ModRef) || isOrdered(I); 1580 bool Use = isRefSet(ModRef); 1581 1582 // It's possible for an instruction to not modify memory at all. During 1583 // construction, we ignore them. 1584 if (!Def && !Use) 1585 return nullptr; 1586 1587 MemoryUseOrDef *MUD; 1588 if (Def) 1589 MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++); 1590 else 1591 MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent()); 1592 ValueToMemoryAccess[I] = MUD; 1593 return MUD; 1594 } 1595 1596 /// Returns true if \p Replacer dominates \p Replacee . 1597 bool MemorySSA::dominatesUse(const MemoryAccess *Replacer, 1598 const MemoryAccess *Replacee) const { 1599 if (isa<MemoryUseOrDef>(Replacee)) 1600 return DT->dominates(Replacer->getBlock(), Replacee->getBlock()); 1601 const auto *MP = cast<MemoryPhi>(Replacee); 1602 // For a phi node, the use occurs in the predecessor block of the phi node. 1603 // Since we may occur multiple times in the phi node, we have to check each 1604 // operand to ensure Replacer dominates each operand where Replacee occurs. 1605 for (const Use &Arg : MP->operands()) { 1606 if (Arg.get() != Replacee && 1607 !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg))) 1608 return false; 1609 } 1610 return true; 1611 } 1612 1613 /// Properly remove \p MA from all of MemorySSA's lookup tables. 1614 void MemorySSA::removeFromLookups(MemoryAccess *MA) { 1615 assert(MA->use_empty() && 1616 "Trying to remove memory access that still has uses"); 1617 BlockNumbering.erase(MA); 1618 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) 1619 MUD->setDefiningAccess(nullptr); 1620 // Invalidate our walker's cache if necessary 1621 if (!isa<MemoryUse>(MA)) 1622 Walker->invalidateInfo(MA); 1623 1624 Value *MemoryInst; 1625 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) 1626 MemoryInst = MUD->getMemoryInst(); 1627 else 1628 MemoryInst = MA->getBlock(); 1629 1630 auto VMA = ValueToMemoryAccess.find(MemoryInst); 1631 if (VMA->second == MA) 1632 ValueToMemoryAccess.erase(VMA); 1633 } 1634 1635 /// Properly remove \p MA from all of MemorySSA's lists. 1636 /// 1637 /// Because of the way the intrusive list and use lists work, it is important to 1638 /// do removal in the right order. 1639 /// ShouldDelete defaults to true, and will cause the memory access to also be 1640 /// deleted, not just removed. 1641 void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) { 1642 BasicBlock *BB = MA->getBlock(); 1643 // The access list owns the reference, so we erase it from the non-owning list 1644 // first. 1645 if (!isa<MemoryUse>(MA)) { 1646 auto DefsIt = PerBlockDefs.find(BB); 1647 std::unique_ptr<DefsList> &Defs = DefsIt->second; 1648 Defs->remove(*MA); 1649 if (Defs->empty()) 1650 PerBlockDefs.erase(DefsIt); 1651 } 1652 1653 // The erase call here will delete it. If we don't want it deleted, we call 1654 // remove instead. 1655 auto AccessIt = PerBlockAccesses.find(BB); 1656 std::unique_ptr<AccessList> &Accesses = AccessIt->second; 1657 if (ShouldDelete) 1658 Accesses->erase(MA); 1659 else 1660 Accesses->remove(MA); 1661 1662 if (Accesses->empty()) { 1663 PerBlockAccesses.erase(AccessIt); 1664 BlockNumberingValid.erase(BB); 1665 } 1666 } 1667 1668 void MemorySSA::print(raw_ostream &OS) const { 1669 MemorySSAAnnotatedWriter Writer(this); 1670 F.print(OS, &Writer); 1671 } 1672 1673 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1674 LLVM_DUMP_METHOD void MemorySSA::dump() const { print(dbgs()); } 1675 #endif 1676 1677 void MemorySSA::verifyMemorySSA() const { 1678 verifyDefUses(F); 1679 verifyDomination(F); 1680 verifyOrdering(F); 1681 verifyDominationNumbers(F); 1682 Walker->verify(this); 1683 verifyClobberSanity(F); 1684 } 1685 1686 /// Check sanity of the clobbering instruction for access MA. 1687 void MemorySSA::checkClobberSanityAccess(const MemoryAccess *MA) const { 1688 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) { 1689 if (!MUD->isOptimized()) 1690 return; 1691 auto *I = MUD->getMemoryInst(); 1692 auto Loc = MemoryLocation::getOrNone(I); 1693 if (Loc == None) 1694 return; 1695 auto *Clobber = MUD->getOptimized(); 1696 UpwardsMemoryQuery Q(I, MUD); 1697 checkClobberSanity(MUD, Clobber, *Loc, *this, Q, *AA); 1698 } 1699 } 1700 1701 void MemorySSA::verifyClobberSanity(const Function &F) const { 1702 #if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS) 1703 for (const BasicBlock &BB : F) { 1704 const AccessList *Accesses = getBlockAccesses(&BB); 1705 if (!Accesses) 1706 continue; 1707 for (const MemoryAccess &MA : *Accesses) 1708 checkClobberSanityAccess(&MA); 1709 } 1710 #endif 1711 } 1712 1713 /// Verify that all of the blocks we believe to have valid domination numbers 1714 /// actually have valid domination numbers. 1715 void MemorySSA::verifyDominationNumbers(const Function &F) const { 1716 #ifndef NDEBUG 1717 if (BlockNumberingValid.empty()) 1718 return; 1719 1720 SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid; 1721 for (const BasicBlock &BB : F) { 1722 if (!ValidBlocks.count(&BB)) 1723 continue; 1724 1725 ValidBlocks.erase(&BB); 1726 1727 const AccessList *Accesses = getBlockAccesses(&BB); 1728 // It's correct to say an empty block has valid numbering. 1729 if (!Accesses) 1730 continue; 1731 1732 // Block numbering starts at 1. 1733 unsigned long LastNumber = 0; 1734 for (const MemoryAccess &MA : *Accesses) { 1735 auto ThisNumberIter = BlockNumbering.find(&MA); 1736 assert(ThisNumberIter != BlockNumbering.end() && 1737 "MemoryAccess has no domination number in a valid block!"); 1738 1739 unsigned long ThisNumber = ThisNumberIter->second; 1740 assert(ThisNumber > LastNumber && 1741 "Domination numbers should be strictly increasing!"); 1742 LastNumber = ThisNumber; 1743 } 1744 } 1745 1746 assert(ValidBlocks.empty() && 1747 "All valid BasicBlocks should exist in F -- dangling pointers?"); 1748 #endif 1749 } 1750 1751 /// Verify that the order and existence of MemoryAccesses matches the 1752 /// order and existence of memory affecting instructions. 1753 void MemorySSA::verifyOrdering(Function &F) const { 1754 #ifndef NDEBUG 1755 // Walk all the blocks, comparing what the lookups think and what the access 1756 // lists think, as well as the order in the blocks vs the order in the access 1757 // lists. 1758 SmallVector<MemoryAccess *, 32> ActualAccesses; 1759 SmallVector<MemoryAccess *, 32> ActualDefs; 1760 for (BasicBlock &B : F) { 1761 const AccessList *AL = getBlockAccesses(&B); 1762 const auto *DL = getBlockDefs(&B); 1763 MemoryAccess *Phi = getMemoryAccess(&B); 1764 if (Phi) { 1765 ActualAccesses.push_back(Phi); 1766 ActualDefs.push_back(Phi); 1767 } 1768 1769 for (Instruction &I : B) { 1770 MemoryAccess *MA = getMemoryAccess(&I); 1771 assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) && 1772 "We have memory affecting instructions " 1773 "in this block but they are not in the " 1774 "access list or defs list"); 1775 if (MA) { 1776 ActualAccesses.push_back(MA); 1777 if (isa<MemoryDef>(MA)) 1778 ActualDefs.push_back(MA); 1779 } 1780 } 1781 // Either we hit the assert, really have no accesses, or we have both 1782 // accesses and an access list. 1783 // Same with defs. 1784 if (!AL && !DL) 1785 continue; 1786 assert(AL->size() == ActualAccesses.size() && 1787 "We don't have the same number of accesses in the block as on the " 1788 "access list"); 1789 assert((DL || ActualDefs.size() == 0) && 1790 "Either we should have a defs list, or we should have no defs"); 1791 assert((!DL || DL->size() == ActualDefs.size()) && 1792 "We don't have the same number of defs in the block as on the " 1793 "def list"); 1794 auto ALI = AL->begin(); 1795 auto AAI = ActualAccesses.begin(); 1796 while (ALI != AL->end() && AAI != ActualAccesses.end()) { 1797 assert(&*ALI == *AAI && "Not the same accesses in the same order"); 1798 ++ALI; 1799 ++AAI; 1800 } 1801 ActualAccesses.clear(); 1802 if (DL) { 1803 auto DLI = DL->begin(); 1804 auto ADI = ActualDefs.begin(); 1805 while (DLI != DL->end() && ADI != ActualDefs.end()) { 1806 assert(&*DLI == *ADI && "Not the same defs in the same order"); 1807 ++DLI; 1808 ++ADI; 1809 } 1810 } 1811 ActualDefs.clear(); 1812 } 1813 #endif 1814 } 1815 1816 /// Verify the domination properties of MemorySSA by checking that each 1817 /// definition dominates all of its uses. 1818 void MemorySSA::verifyDomination(Function &F) const { 1819 #ifndef NDEBUG 1820 for (BasicBlock &B : F) { 1821 // Phi nodes are attached to basic blocks 1822 if (MemoryPhi *MP = getMemoryAccess(&B)) 1823 for (const Use &U : MP->uses()) 1824 assert(dominates(MP, U) && "Memory PHI does not dominate it's uses"); 1825 1826 for (Instruction &I : B) { 1827 MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I)); 1828 if (!MD) 1829 continue; 1830 1831 for (const Use &U : MD->uses()) 1832 assert(dominates(MD, U) && "Memory Def does not dominate it's uses"); 1833 } 1834 } 1835 #endif 1836 } 1837 1838 /// Verify the def-use lists in MemorySSA, by verifying that \p Use 1839 /// appears in the use list of \p Def. 1840 void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const { 1841 #ifndef NDEBUG 1842 // The live on entry use may cause us to get a NULL def here 1843 if (!Def) 1844 assert(isLiveOnEntryDef(Use) && 1845 "Null def but use not point to live on entry def"); 1846 else 1847 assert(is_contained(Def->users(), Use) && 1848 "Did not find use in def's use list"); 1849 #endif 1850 } 1851 1852 /// Verify the immediate use information, by walking all the memory 1853 /// accesses and verifying that, for each use, it appears in the 1854 /// appropriate def's use list 1855 void MemorySSA::verifyDefUses(Function &F) const { 1856 #ifndef NDEBUG 1857 for (BasicBlock &B : F) { 1858 // Phi nodes are attached to basic blocks 1859 if (MemoryPhi *Phi = getMemoryAccess(&B)) { 1860 assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance( 1861 pred_begin(&B), pred_end(&B))) && 1862 "Incomplete MemoryPhi Node"); 1863 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) { 1864 verifyUseInDefs(Phi->getIncomingValue(I), Phi); 1865 assert(find(predecessors(&B), Phi->getIncomingBlock(I)) != 1866 pred_end(&B) && 1867 "Incoming phi block not a block predecessor"); 1868 } 1869 } 1870 1871 for (Instruction &I : B) { 1872 if (MemoryUseOrDef *MA = getMemoryAccess(&I)) { 1873 verifyUseInDefs(MA->getDefiningAccess(), MA); 1874 } 1875 } 1876 } 1877 #endif 1878 } 1879 1880 /// Perform a local numbering on blocks so that instruction ordering can be 1881 /// determined in constant time. 1882 /// TODO: We currently just number in order. If we numbered by N, we could 1883 /// allow at least N-1 sequences of insertBefore or insertAfter (and at least 1884 /// log2(N) sequences of mixed before and after) without needing to invalidate 1885 /// the numbering. 1886 void MemorySSA::renumberBlock(const BasicBlock *B) const { 1887 // The pre-increment ensures the numbers really start at 1. 1888 unsigned long CurrentNumber = 0; 1889 const AccessList *AL = getBlockAccesses(B); 1890 assert(AL != nullptr && "Asking to renumber an empty block"); 1891 for (const auto &I : *AL) 1892 BlockNumbering[&I] = ++CurrentNumber; 1893 BlockNumberingValid.insert(B); 1894 } 1895 1896 /// Determine, for two memory accesses in the same block, 1897 /// whether \p Dominator dominates \p Dominatee. 1898 /// \returns True if \p Dominator dominates \p Dominatee. 1899 bool MemorySSA::locallyDominates(const MemoryAccess *Dominator, 1900 const MemoryAccess *Dominatee) const { 1901 const BasicBlock *DominatorBlock = Dominator->getBlock(); 1902 1903 assert((DominatorBlock == Dominatee->getBlock()) && 1904 "Asking for local domination when accesses are in different blocks!"); 1905 // A node dominates itself. 1906 if (Dominatee == Dominator) 1907 return true; 1908 1909 // When Dominatee is defined on function entry, it is not dominated by another 1910 // memory access. 1911 if (isLiveOnEntryDef(Dominatee)) 1912 return false; 1913 1914 // When Dominator is defined on function entry, it dominates the other memory 1915 // access. 1916 if (isLiveOnEntryDef(Dominator)) 1917 return true; 1918 1919 if (!BlockNumberingValid.count(DominatorBlock)) 1920 renumberBlock(DominatorBlock); 1921 1922 unsigned long DominatorNum = BlockNumbering.lookup(Dominator); 1923 // All numbers start with 1 1924 assert(DominatorNum != 0 && "Block was not numbered properly"); 1925 unsigned long DominateeNum = BlockNumbering.lookup(Dominatee); 1926 assert(DominateeNum != 0 && "Block was not numbered properly"); 1927 return DominatorNum < DominateeNum; 1928 } 1929 1930 bool MemorySSA::dominates(const MemoryAccess *Dominator, 1931 const MemoryAccess *Dominatee) const { 1932 if (Dominator == Dominatee) 1933 return true; 1934 1935 if (isLiveOnEntryDef(Dominatee)) 1936 return false; 1937 1938 if (Dominator->getBlock() != Dominatee->getBlock()) 1939 return DT->dominates(Dominator->getBlock(), Dominatee->getBlock()); 1940 return locallyDominates(Dominator, Dominatee); 1941 } 1942 1943 bool MemorySSA::dominates(const MemoryAccess *Dominator, 1944 const Use &Dominatee) const { 1945 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) { 1946 BasicBlock *UseBB = MP->getIncomingBlock(Dominatee); 1947 // The def must dominate the incoming block of the phi. 1948 if (UseBB != Dominator->getBlock()) 1949 return DT->dominates(Dominator->getBlock(), UseBB); 1950 // If the UseBB and the DefBB are the same, compare locally. 1951 return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee)); 1952 } 1953 // If it's not a PHI node use, the normal dominates can already handle it. 1954 return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser())); 1955 } 1956 1957 const static char LiveOnEntryStr[] = "liveOnEntry"; 1958 1959 void MemoryAccess::print(raw_ostream &OS) const { 1960 switch (getValueID()) { 1961 case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS); 1962 case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS); 1963 case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS); 1964 } 1965 llvm_unreachable("invalid value id"); 1966 } 1967 1968 void MemoryDef::print(raw_ostream &OS) const { 1969 MemoryAccess *UO = getDefiningAccess(); 1970 1971 auto printID = [&OS](MemoryAccess *A) { 1972 if (A && A->getID()) 1973 OS << A->getID(); 1974 else 1975 OS << LiveOnEntryStr; 1976 }; 1977 1978 OS << getID() << " = MemoryDef("; 1979 printID(UO); 1980 OS << ")"; 1981 1982 if (isOptimized()) { 1983 OS << "->"; 1984 printID(getOptimized()); 1985 1986 if (Optional<AliasResult> AR = getOptimizedAccessType()) 1987 OS << " " << *AR; 1988 } 1989 } 1990 1991 void MemoryPhi::print(raw_ostream &OS) const { 1992 bool First = true; 1993 OS << getID() << " = MemoryPhi("; 1994 for (const auto &Op : operands()) { 1995 BasicBlock *BB = getIncomingBlock(Op); 1996 MemoryAccess *MA = cast<MemoryAccess>(Op); 1997 if (!First) 1998 OS << ','; 1999 else 2000 First = false; 2001 2002 OS << '{'; 2003 if (BB->hasName()) 2004 OS << BB->getName(); 2005 else 2006 BB->printAsOperand(OS, false); 2007 OS << ','; 2008 if (unsigned ID = MA->getID()) 2009 OS << ID; 2010 else 2011 OS << LiveOnEntryStr; 2012 OS << '}'; 2013 } 2014 OS << ')'; 2015 } 2016 2017 void MemoryUse::print(raw_ostream &OS) const { 2018 MemoryAccess *UO = getDefiningAccess(); 2019 OS << "MemoryUse("; 2020 if (UO && UO->getID()) 2021 OS << UO->getID(); 2022 else 2023 OS << LiveOnEntryStr; 2024 OS << ')'; 2025 2026 if (Optional<AliasResult> AR = getOptimizedAccessType()) 2027 OS << " " << *AR; 2028 } 2029 2030 void MemoryAccess::dump() const { 2031 // Cannot completely remove virtual function even in release mode. 2032 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2033 print(dbgs()); 2034 dbgs() << "\n"; 2035 #endif 2036 } 2037 2038 char MemorySSAPrinterLegacyPass::ID = 0; 2039 2040 MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) { 2041 initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry()); 2042 } 2043 2044 void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const { 2045 AU.setPreservesAll(); 2046 AU.addRequired<MemorySSAWrapperPass>(); 2047 } 2048 2049 bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) { 2050 auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA(); 2051 MSSA.print(dbgs()); 2052 if (VerifyMemorySSA) 2053 MSSA.verifyMemorySSA(); 2054 return false; 2055 } 2056 2057 AnalysisKey MemorySSAAnalysis::Key; 2058 2059 MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F, 2060 FunctionAnalysisManager &AM) { 2061 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 2062 auto &AA = AM.getResult<AAManager>(F); 2063 return MemorySSAAnalysis::Result(llvm::make_unique<MemorySSA>(F, &AA, &DT)); 2064 } 2065 2066 PreservedAnalyses MemorySSAPrinterPass::run(Function &F, 2067 FunctionAnalysisManager &AM) { 2068 OS << "MemorySSA for function: " << F.getName() << "\n"; 2069 AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS); 2070 2071 return PreservedAnalyses::all(); 2072 } 2073 2074 PreservedAnalyses MemorySSAVerifierPass::run(Function &F, 2075 FunctionAnalysisManager &AM) { 2076 AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA(); 2077 2078 return PreservedAnalyses::all(); 2079 } 2080 2081 char MemorySSAWrapperPass::ID = 0; 2082 2083 MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) { 2084 initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry()); 2085 } 2086 2087 void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); } 2088 2089 void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 2090 AU.setPreservesAll(); 2091 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 2092 AU.addRequiredTransitive<AAResultsWrapperPass>(); 2093 } 2094 2095 bool MemorySSAWrapperPass::runOnFunction(Function &F) { 2096 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2097 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 2098 MSSA.reset(new MemorySSA(F, &AA, &DT)); 2099 return false; 2100 } 2101 2102 void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); } 2103 2104 void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const { 2105 MSSA->print(OS); 2106 } 2107 2108 MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {} 2109 2110 MemorySSA::CachingWalker::CachingWalker(MemorySSA *M, AliasAnalysis *A, 2111 DominatorTree *D) 2112 : MemorySSAWalker(M), Walker(*M, *A, *D) {} 2113 2114 void MemorySSA::CachingWalker::invalidateInfo(MemoryAccess *MA) { 2115 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) 2116 MUD->resetOptimized(); 2117 } 2118 2119 /// Walk the use-def chains starting at \p MA and find 2120 /// the MemoryAccess that actually clobbers Loc. 2121 /// 2122 /// \returns our clobbering memory access 2123 MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess( 2124 MemoryAccess *StartingAccess, UpwardsMemoryQuery &Q) { 2125 return Walker.findClobber(StartingAccess, Q); 2126 } 2127 2128 MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess( 2129 MemoryAccess *StartingAccess, const MemoryLocation &Loc) { 2130 if (isa<MemoryPhi>(StartingAccess)) 2131 return StartingAccess; 2132 2133 auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess); 2134 if (MSSA->isLiveOnEntryDef(StartingUseOrDef)) 2135 return StartingUseOrDef; 2136 2137 Instruction *I = StartingUseOrDef->getMemoryInst(); 2138 2139 // Conservatively, fences are always clobbers, so don't perform the walk if we 2140 // hit a fence. 2141 if (!ImmutableCallSite(I) && I->isFenceLike()) 2142 return StartingUseOrDef; 2143 2144 UpwardsMemoryQuery Q; 2145 Q.OriginalAccess = StartingUseOrDef; 2146 Q.StartingLoc = Loc; 2147 Q.Inst = I; 2148 Q.IsCall = false; 2149 2150 // Unlike the other function, do not walk to the def of a def, because we are 2151 // handed something we already believe is the clobbering access. 2152 MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef) 2153 ? StartingUseOrDef->getDefiningAccess() 2154 : StartingUseOrDef; 2155 2156 MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q); 2157 LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is "); 2158 LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n"); 2159 LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is "); 2160 LLVM_DEBUG(dbgs() << *Clobber << "\n"); 2161 return Clobber; 2162 } 2163 2164 MemoryAccess * 2165 MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) { 2166 auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA); 2167 // If this is a MemoryPhi, we can't do anything. 2168 if (!StartingAccess) 2169 return MA; 2170 2171 // If this is an already optimized use or def, return the optimized result. 2172 // Note: Currently, we store the optimized def result in a separate field, 2173 // since we can't use the defining access. 2174 if (StartingAccess->isOptimized()) 2175 return StartingAccess->getOptimized(); 2176 2177 const Instruction *I = StartingAccess->getMemoryInst(); 2178 UpwardsMemoryQuery Q(I, StartingAccess); 2179 // We can't sanely do anything with a fence, since they conservatively clobber 2180 // all memory, and have no locations to get pointers from to try to 2181 // disambiguate. 2182 if (!Q.IsCall && I->isFenceLike()) 2183 return StartingAccess; 2184 2185 if (isUseTriviallyOptimizableToLiveOnEntry(*MSSA->AA, I)) { 2186 MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef(); 2187 StartingAccess->setOptimized(LiveOnEntry); 2188 StartingAccess->setOptimizedAccessType(None); 2189 return LiveOnEntry; 2190 } 2191 2192 // Start with the thing we already think clobbers this location 2193 MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess(); 2194 2195 // At this point, DefiningAccess may be the live on entry def. 2196 // If it is, we will not get a better result. 2197 if (MSSA->isLiveOnEntryDef(DefiningAccess)) { 2198 StartingAccess->setOptimized(DefiningAccess); 2199 StartingAccess->setOptimizedAccessType(None); 2200 return DefiningAccess; 2201 } 2202 2203 MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q); 2204 LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is "); 2205 LLVM_DEBUG(dbgs() << *DefiningAccess << "\n"); 2206 LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is "); 2207 LLVM_DEBUG(dbgs() << *Result << "\n"); 2208 2209 StartingAccess->setOptimized(Result); 2210 if (MSSA->isLiveOnEntryDef(Result)) 2211 StartingAccess->setOptimizedAccessType(None); 2212 else if (Q.AR == MustAlias) 2213 StartingAccess->setOptimizedAccessType(MustAlias); 2214 2215 return Result; 2216 } 2217 2218 MemoryAccess * 2219 DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) { 2220 if (auto *Use = dyn_cast<MemoryUseOrDef>(MA)) 2221 return Use->getDefiningAccess(); 2222 return MA; 2223 } 2224 2225 MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess( 2226 MemoryAccess *StartingAccess, const MemoryLocation &) { 2227 if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess)) 2228 return Use->getDefiningAccess(); 2229 return StartingAccess; 2230 } 2231 2232 void MemoryPhi::deleteMe(DerivedUser *Self) { 2233 delete static_cast<MemoryPhi *>(Self); 2234 } 2235 2236 void MemoryDef::deleteMe(DerivedUser *Self) { 2237 delete static_cast<MemoryDef *>(Self); 2238 } 2239 2240 void MemoryUse::deleteMe(DerivedUser *Self) { 2241 delete static_cast<MemoryUse *>(Self); 2242 } 2243