1 //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the MemorySSA class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Analysis/MemorySSA.h" 14 #include "llvm/ADT/DenseMap.h" 15 #include "llvm/ADT/DenseMapInfo.h" 16 #include "llvm/ADT/DenseSet.h" 17 #include "llvm/ADT/DepthFirstIterator.h" 18 #include "llvm/ADT/Hashing.h" 19 #include "llvm/ADT/None.h" 20 #include "llvm/ADT/Optional.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/iterator.h" 25 #include "llvm/ADT/iterator_range.h" 26 #include "llvm/Analysis/AliasAnalysis.h" 27 #include "llvm/Analysis/IteratedDominanceFrontier.h" 28 #include "llvm/Analysis/MemoryLocation.h" 29 #include "llvm/Config/llvm-config.h" 30 #include "llvm/IR/AssemblyAnnotationWriter.h" 31 #include "llvm/IR/BasicBlock.h" 32 #include "llvm/IR/Dominators.h" 33 #include "llvm/IR/Function.h" 34 #include "llvm/IR/Instruction.h" 35 #include "llvm/IR/Instructions.h" 36 #include "llvm/IR/IntrinsicInst.h" 37 #include "llvm/IR/Intrinsics.h" 38 #include "llvm/IR/LLVMContext.h" 39 #include "llvm/IR/PassManager.h" 40 #include "llvm/IR/Use.h" 41 #include "llvm/Pass.h" 42 #include "llvm/Support/AtomicOrdering.h" 43 #include "llvm/Support/Casting.h" 44 #include "llvm/Support/CommandLine.h" 45 #include "llvm/Support/Compiler.h" 46 #include "llvm/Support/Debug.h" 47 #include "llvm/Support/ErrorHandling.h" 48 #include "llvm/Support/FormattedStream.h" 49 #include "llvm/Support/raw_ostream.h" 50 #include <algorithm> 51 #include <cassert> 52 #include <iterator> 53 #include <memory> 54 #include <utility> 55 56 using namespace llvm; 57 58 #define DEBUG_TYPE "memoryssa" 59 60 INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false, 61 true) 62 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 63 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 64 INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false, 65 true) 66 67 INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa", 68 "Memory SSA Printer", false, false) 69 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) 70 INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa", 71 "Memory SSA Printer", false, false) 72 73 static cl::opt<unsigned> MaxCheckLimit( 74 "memssa-check-limit", cl::Hidden, cl::init(100), 75 cl::desc("The maximum number of stores/phis MemorySSA" 76 "will consider trying to walk past (default = 100)")); 77 78 // Always verify MemorySSA if expensive checking is enabled. 79 #ifdef EXPENSIVE_CHECKS 80 bool llvm::VerifyMemorySSA = true; 81 #else 82 bool llvm::VerifyMemorySSA = false; 83 #endif 84 static cl::opt<bool, true> 85 VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA), 86 cl::Hidden, cl::desc("Enable verification of MemorySSA.")); 87 88 namespace llvm { 89 90 /// An assembly annotator class to print Memory SSA information in 91 /// comments. 92 class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter { 93 friend class MemorySSA; 94 95 const MemorySSA *MSSA; 96 97 public: 98 MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {} 99 100 void emitBasicBlockStartAnnot(const BasicBlock *BB, 101 formatted_raw_ostream &OS) override { 102 if (MemoryAccess *MA = MSSA->getMemoryAccess(BB)) 103 OS << "; " << *MA << "\n"; 104 } 105 106 void emitInstructionAnnot(const Instruction *I, 107 formatted_raw_ostream &OS) override { 108 if (MemoryAccess *MA = MSSA->getMemoryAccess(I)) 109 OS << "; " << *MA << "\n"; 110 } 111 }; 112 113 } // end namespace llvm 114 115 namespace { 116 117 /// Our current alias analysis API differentiates heavily between calls and 118 /// non-calls, and functions called on one usually assert on the other. 119 /// This class encapsulates the distinction to simplify other code that wants 120 /// "Memory affecting instructions and related data" to use as a key. 121 /// For example, this class is used as a densemap key in the use optimizer. 122 class MemoryLocOrCall { 123 public: 124 bool IsCall = false; 125 126 MemoryLocOrCall(MemoryUseOrDef *MUD) 127 : MemoryLocOrCall(MUD->getMemoryInst()) {} 128 MemoryLocOrCall(const MemoryUseOrDef *MUD) 129 : MemoryLocOrCall(MUD->getMemoryInst()) {} 130 131 MemoryLocOrCall(Instruction *Inst) { 132 if (auto *C = dyn_cast<CallBase>(Inst)) { 133 IsCall = true; 134 Call = C; 135 } else { 136 IsCall = false; 137 // There is no such thing as a memorylocation for a fence inst, and it is 138 // unique in that regard. 139 if (!isa<FenceInst>(Inst)) 140 Loc = MemoryLocation::get(Inst); 141 } 142 } 143 144 explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {} 145 146 const CallBase *getCall() const { 147 assert(IsCall); 148 return Call; 149 } 150 151 MemoryLocation getLoc() const { 152 assert(!IsCall); 153 return Loc; 154 } 155 156 bool operator==(const MemoryLocOrCall &Other) const { 157 if (IsCall != Other.IsCall) 158 return false; 159 160 if (!IsCall) 161 return Loc == Other.Loc; 162 163 if (Call->getCalledValue() != Other.Call->getCalledValue()) 164 return false; 165 166 return Call->arg_size() == Other.Call->arg_size() && 167 std::equal(Call->arg_begin(), Call->arg_end(), 168 Other.Call->arg_begin()); 169 } 170 171 private: 172 union { 173 const CallBase *Call; 174 MemoryLocation Loc; 175 }; 176 }; 177 178 } // end anonymous namespace 179 180 namespace llvm { 181 182 template <> struct DenseMapInfo<MemoryLocOrCall> { 183 static inline MemoryLocOrCall getEmptyKey() { 184 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey()); 185 } 186 187 static inline MemoryLocOrCall getTombstoneKey() { 188 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey()); 189 } 190 191 static unsigned getHashValue(const MemoryLocOrCall &MLOC) { 192 if (!MLOC.IsCall) 193 return hash_combine( 194 MLOC.IsCall, 195 DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc())); 196 197 hash_code hash = 198 hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue( 199 MLOC.getCall()->getCalledValue())); 200 201 for (const Value *Arg : MLOC.getCall()->args()) 202 hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg)); 203 return hash; 204 } 205 206 static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) { 207 return LHS == RHS; 208 } 209 }; 210 211 } // end namespace llvm 212 213 /// This does one-way checks to see if Use could theoretically be hoisted above 214 /// MayClobber. This will not check the other way around. 215 /// 216 /// This assumes that, for the purposes of MemorySSA, Use comes directly after 217 /// MayClobber, with no potentially clobbering operations in between them. 218 /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.) 219 static bool areLoadsReorderable(const LoadInst *Use, 220 const LoadInst *MayClobber) { 221 bool VolatileUse = Use->isVolatile(); 222 bool VolatileClobber = MayClobber->isVolatile(); 223 // Volatile operations may never be reordered with other volatile operations. 224 if (VolatileUse && VolatileClobber) 225 return false; 226 // Otherwise, volatile doesn't matter here. From the language reference: 227 // 'optimizers may change the order of volatile operations relative to 228 // non-volatile operations.'" 229 230 // If a load is seq_cst, it cannot be moved above other loads. If its ordering 231 // is weaker, it can be moved above other loads. We just need to be sure that 232 // MayClobber isn't an acquire load, because loads can't be moved above 233 // acquire loads. 234 // 235 // Note that this explicitly *does* allow the free reordering of monotonic (or 236 // weaker) loads of the same address. 237 bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent; 238 bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(), 239 AtomicOrdering::Acquire); 240 return !(SeqCstUse || MayClobberIsAcquire); 241 } 242 243 namespace { 244 245 struct ClobberAlias { 246 bool IsClobber; 247 Optional<AliasResult> AR; 248 }; 249 250 } // end anonymous namespace 251 252 // Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being 253 // ignored if IsClobber = false. 254 template <typename AliasAnalysisType> 255 static ClobberAlias 256 instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc, 257 const Instruction *UseInst, AliasAnalysisType &AA) { 258 Instruction *DefInst = MD->getMemoryInst(); 259 assert(DefInst && "Defining instruction not actually an instruction"); 260 const auto *UseCall = dyn_cast<CallBase>(UseInst); 261 Optional<AliasResult> AR; 262 263 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) { 264 // These intrinsics will show up as affecting memory, but they are just 265 // markers, mostly. 266 // 267 // FIXME: We probably don't actually want MemorySSA to model these at all 268 // (including creating MemoryAccesses for them): we just end up inventing 269 // clobbers where they don't really exist at all. Please see D43269 for 270 // context. 271 switch (II->getIntrinsicID()) { 272 case Intrinsic::lifetime_start: 273 if (UseCall) 274 return {false, NoAlias}; 275 AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc); 276 return {AR != NoAlias, AR}; 277 case Intrinsic::lifetime_end: 278 case Intrinsic::invariant_start: 279 case Intrinsic::invariant_end: 280 case Intrinsic::assume: 281 return {false, NoAlias}; 282 default: 283 break; 284 } 285 } 286 287 if (UseCall) { 288 ModRefInfo I = AA.getModRefInfo(DefInst, UseCall); 289 AR = isMustSet(I) ? MustAlias : MayAlias; 290 return {isModOrRefSet(I), AR}; 291 } 292 293 if (auto *DefLoad = dyn_cast<LoadInst>(DefInst)) 294 if (auto *UseLoad = dyn_cast<LoadInst>(UseInst)) 295 return {!areLoadsReorderable(UseLoad, DefLoad), MayAlias}; 296 297 ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc); 298 AR = isMustSet(I) ? MustAlias : MayAlias; 299 return {isModSet(I), AR}; 300 } 301 302 template <typename AliasAnalysisType> 303 static ClobberAlias instructionClobbersQuery(MemoryDef *MD, 304 const MemoryUseOrDef *MU, 305 const MemoryLocOrCall &UseMLOC, 306 AliasAnalysisType &AA) { 307 // FIXME: This is a temporary hack to allow a single instructionClobbersQuery 308 // to exist while MemoryLocOrCall is pushed through places. 309 if (UseMLOC.IsCall) 310 return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(), 311 AA); 312 return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(), 313 AA); 314 } 315 316 // Return true when MD may alias MU, return false otherwise. 317 bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, 318 AliasAnalysis &AA) { 319 return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber; 320 } 321 322 namespace { 323 324 struct UpwardsMemoryQuery { 325 // True if our original query started off as a call 326 bool IsCall = false; 327 // The pointer location we started the query with. This will be empty if 328 // IsCall is true. 329 MemoryLocation StartingLoc; 330 // This is the instruction we were querying about. 331 const Instruction *Inst = nullptr; 332 // The MemoryAccess we actually got called with, used to test local domination 333 const MemoryAccess *OriginalAccess = nullptr; 334 Optional<AliasResult> AR = MayAlias; 335 bool SkipSelfAccess = false; 336 337 UpwardsMemoryQuery() = default; 338 339 UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access) 340 : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) { 341 if (!IsCall) 342 StartingLoc = MemoryLocation::get(Inst); 343 } 344 }; 345 346 } // end anonymous namespace 347 348 static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc, 349 BatchAAResults &AA) { 350 Instruction *Inst = MD->getMemoryInst(); 351 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 352 switch (II->getIntrinsicID()) { 353 case Intrinsic::lifetime_end: 354 return AA.alias(MemoryLocation(II->getArgOperand(1)), Loc) == MustAlias; 355 default: 356 return false; 357 } 358 } 359 return false; 360 } 361 362 template <typename AliasAnalysisType> 363 static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA, 364 const Instruction *I) { 365 // If the memory can't be changed, then loads of the memory can't be 366 // clobbered. 367 return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) || 368 AA.pointsToConstantMemory(MemoryLocation( 369 cast<LoadInst>(I)->getPointerOperand()))); 370 } 371 372 /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing 373 /// inbetween `Start` and `ClobberAt` can clobbers `Start`. 374 /// 375 /// This is meant to be as simple and self-contained as possible. Because it 376 /// uses no cache, etc., it can be relatively expensive. 377 /// 378 /// \param Start The MemoryAccess that we want to walk from. 379 /// \param ClobberAt A clobber for Start. 380 /// \param StartLoc The MemoryLocation for Start. 381 /// \param MSSA The MemorySSA instance that Start and ClobberAt belong to. 382 /// \param Query The UpwardsMemoryQuery we used for our search. 383 /// \param AA The AliasAnalysis we used for our search. 384 /// \param AllowImpreciseClobber Always false, unless we do relaxed verify. 385 386 template <typename AliasAnalysisType> 387 LLVM_ATTRIBUTE_UNUSED static void 388 checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt, 389 const MemoryLocation &StartLoc, const MemorySSA &MSSA, 390 const UpwardsMemoryQuery &Query, AliasAnalysisType &AA, 391 bool AllowImpreciseClobber = false) { 392 assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?"); 393 394 if (MSSA.isLiveOnEntryDef(Start)) { 395 assert(MSSA.isLiveOnEntryDef(ClobberAt) && 396 "liveOnEntry must clobber itself"); 397 return; 398 } 399 400 bool FoundClobber = false; 401 DenseSet<ConstMemoryAccessPair> VisitedPhis; 402 SmallVector<ConstMemoryAccessPair, 8> Worklist; 403 Worklist.emplace_back(Start, StartLoc); 404 // Walk all paths from Start to ClobberAt, while looking for clobbers. If one 405 // is found, complain. 406 while (!Worklist.empty()) { 407 auto MAP = Worklist.pop_back_val(); 408 // All we care about is that nothing from Start to ClobberAt clobbers Start. 409 // We learn nothing from revisiting nodes. 410 if (!VisitedPhis.insert(MAP).second) 411 continue; 412 413 for (const auto *MA : def_chain(MAP.first)) { 414 if (MA == ClobberAt) { 415 if (const auto *MD = dyn_cast<MemoryDef>(MA)) { 416 // instructionClobbersQuery isn't essentially free, so don't use `|=`, 417 // since it won't let us short-circuit. 418 // 419 // Also, note that this can't be hoisted out of the `Worklist` loop, 420 // since MD may only act as a clobber for 1 of N MemoryLocations. 421 FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD); 422 if (!FoundClobber) { 423 ClobberAlias CA = 424 instructionClobbersQuery(MD, MAP.second, Query.Inst, AA); 425 if (CA.IsClobber) { 426 FoundClobber = true; 427 // Not used: CA.AR; 428 } 429 } 430 } 431 break; 432 } 433 434 // We should never hit liveOnEntry, unless it's the clobber. 435 assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?"); 436 437 if (const auto *MD = dyn_cast<MemoryDef>(MA)) { 438 // If Start is a Def, skip self. 439 if (MD == Start) 440 continue; 441 442 assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) 443 .IsClobber && 444 "Found clobber before reaching ClobberAt!"); 445 continue; 446 } 447 448 if (const auto *MU = dyn_cast<MemoryUse>(MA)) { 449 (void)MU; 450 assert (MU == Start && 451 "Can only find use in def chain if Start is a use"); 452 continue; 453 } 454 455 assert(isa<MemoryPhi>(MA)); 456 Worklist.append( 457 upward_defs_begin({const_cast<MemoryAccess *>(MA), MAP.second}), 458 upward_defs_end()); 459 } 460 } 461 462 // If the verify is done following an optimization, it's possible that 463 // ClobberAt was a conservative clobbering, that we can now infer is not a 464 // true clobbering access. Don't fail the verify if that's the case. 465 // We do have accesses that claim they're optimized, but could be optimized 466 // further. Updating all these can be expensive, so allow it for now (FIXME). 467 if (AllowImpreciseClobber) 468 return; 469 470 // If ClobberAt is a MemoryPhi, we can assume something above it acted as a 471 // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point. 472 assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) && 473 "ClobberAt never acted as a clobber"); 474 } 475 476 namespace { 477 478 /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up 479 /// in one class. 480 template <class AliasAnalysisType> class ClobberWalker { 481 /// Save a few bytes by using unsigned instead of size_t. 482 using ListIndex = unsigned; 483 484 /// Represents a span of contiguous MemoryDefs, potentially ending in a 485 /// MemoryPhi. 486 struct DefPath { 487 MemoryLocation Loc; 488 // Note that, because we always walk in reverse, Last will always dominate 489 // First. Also note that First and Last are inclusive. 490 MemoryAccess *First; 491 MemoryAccess *Last; 492 Optional<ListIndex> Previous; 493 494 DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last, 495 Optional<ListIndex> Previous) 496 : Loc(Loc), First(First), Last(Last), Previous(Previous) {} 497 498 DefPath(const MemoryLocation &Loc, MemoryAccess *Init, 499 Optional<ListIndex> Previous) 500 : DefPath(Loc, Init, Init, Previous) {} 501 }; 502 503 const MemorySSA &MSSA; 504 AliasAnalysisType &AA; 505 DominatorTree &DT; 506 UpwardsMemoryQuery *Query; 507 508 // Phi optimization bookkeeping 509 SmallVector<DefPath, 32> Paths; 510 DenseSet<ConstMemoryAccessPair> VisitedPhis; 511 512 /// Find the nearest def or phi that `From` can legally be optimized to. 513 const MemoryAccess *getWalkTarget(const MemoryPhi *From) const { 514 assert(From->getNumOperands() && "Phi with no operands?"); 515 516 BasicBlock *BB = From->getBlock(); 517 MemoryAccess *Result = MSSA.getLiveOnEntryDef(); 518 DomTreeNode *Node = DT.getNode(BB); 519 while ((Node = Node->getIDom())) { 520 auto *Defs = MSSA.getBlockDefs(Node->getBlock()); 521 if (Defs) 522 return &*Defs->rbegin(); 523 } 524 return Result; 525 } 526 527 /// Result of calling walkToPhiOrClobber. 528 struct UpwardsWalkResult { 529 /// The "Result" of the walk. Either a clobber, the last thing we walked, or 530 /// both. Include alias info when clobber found. 531 MemoryAccess *Result; 532 bool IsKnownClobber; 533 Optional<AliasResult> AR; 534 }; 535 536 /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last. 537 /// This will update Desc.Last as it walks. It will (optionally) also stop at 538 /// StopAt. 539 /// 540 /// This does not test for whether StopAt is a clobber 541 UpwardsWalkResult 542 walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr, 543 const MemoryAccess *SkipStopAt = nullptr) const { 544 assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world"); 545 546 for (MemoryAccess *Current : def_chain(Desc.Last)) { 547 Desc.Last = Current; 548 if (Current == StopAt || Current == SkipStopAt) 549 return {Current, false, MayAlias}; 550 551 if (auto *MD = dyn_cast<MemoryDef>(Current)) { 552 if (MSSA.isLiveOnEntryDef(MD)) 553 return {MD, true, MustAlias}; 554 ClobberAlias CA = 555 instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA); 556 if (CA.IsClobber) 557 return {MD, true, CA.AR}; 558 } 559 } 560 561 assert(isa<MemoryPhi>(Desc.Last) && 562 "Ended at a non-clobber that's not a phi?"); 563 return {Desc.Last, false, MayAlias}; 564 } 565 566 void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches, 567 ListIndex PriorNode) { 568 auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}), 569 upward_defs_end()); 570 for (const MemoryAccessPair &P : UpwardDefs) { 571 PausedSearches.push_back(Paths.size()); 572 Paths.emplace_back(P.second, P.first, PriorNode); 573 } 574 } 575 576 /// Represents a search that terminated after finding a clobber. This clobber 577 /// may or may not be present in the path of defs from LastNode..SearchStart, 578 /// since it may have been retrieved from cache. 579 struct TerminatedPath { 580 MemoryAccess *Clobber; 581 ListIndex LastNode; 582 }; 583 584 /// Get an access that keeps us from optimizing to the given phi. 585 /// 586 /// PausedSearches is an array of indices into the Paths array. Its incoming 587 /// value is the indices of searches that stopped at the last phi optimization 588 /// target. It's left in an unspecified state. 589 /// 590 /// If this returns None, NewPaused is a vector of searches that terminated 591 /// at StopWhere. Otherwise, NewPaused is left in an unspecified state. 592 Optional<TerminatedPath> 593 getBlockingAccess(const MemoryAccess *StopWhere, 594 SmallVectorImpl<ListIndex> &PausedSearches, 595 SmallVectorImpl<ListIndex> &NewPaused, 596 SmallVectorImpl<TerminatedPath> &Terminated) { 597 assert(!PausedSearches.empty() && "No searches to continue?"); 598 599 // BFS vs DFS really doesn't make a difference here, so just do a DFS with 600 // PausedSearches as our stack. 601 while (!PausedSearches.empty()) { 602 ListIndex PathIndex = PausedSearches.pop_back_val(); 603 DefPath &Node = Paths[PathIndex]; 604 605 // If we've already visited this path with this MemoryLocation, we don't 606 // need to do so again. 607 // 608 // NOTE: That we just drop these paths on the ground makes caching 609 // behavior sporadic. e.g. given a diamond: 610 // A 611 // B C 612 // D 613 // 614 // ...If we walk D, B, A, C, we'll only cache the result of phi 615 // optimization for A, B, and D; C will be skipped because it dies here. 616 // This arguably isn't the worst thing ever, since: 617 // - We generally query things in a top-down order, so if we got below D 618 // without needing cache entries for {C, MemLoc}, then chances are 619 // that those cache entries would end up ultimately unused. 620 // - We still cache things for A, so C only needs to walk up a bit. 621 // If this behavior becomes problematic, we can fix without a ton of extra 622 // work. 623 if (!VisitedPhis.insert({Node.Last, Node.Loc}).second) 624 continue; 625 626 const MemoryAccess *SkipStopWhere = nullptr; 627 if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) { 628 assert(isa<MemoryDef>(Query->OriginalAccess)); 629 SkipStopWhere = Query->OriginalAccess; 630 } 631 632 UpwardsWalkResult Res = walkToPhiOrClobber(Node, /*StopAt=*/StopWhere, 633 /*SkipStopAt=*/SkipStopWhere); 634 if (Res.IsKnownClobber) { 635 assert(Res.Result != StopWhere && Res.Result != SkipStopWhere); 636 // If this wasn't a cache hit, we hit a clobber when walking. That's a 637 // failure. 638 TerminatedPath Term{Res.Result, PathIndex}; 639 if (!MSSA.dominates(Res.Result, StopWhere)) 640 return Term; 641 642 // Otherwise, it's a valid thing to potentially optimize to. 643 Terminated.push_back(Term); 644 continue; 645 } 646 647 if (Res.Result == StopWhere || Res.Result == SkipStopWhere) { 648 // We've hit our target. Save this path off for if we want to continue 649 // walking. If we are in the mode of skipping the OriginalAccess, and 650 // we've reached back to the OriginalAccess, do not save path, we've 651 // just looped back to self. 652 if (Res.Result != SkipStopWhere) 653 NewPaused.push_back(PathIndex); 654 continue; 655 } 656 657 assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber"); 658 addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex); 659 } 660 661 return None; 662 } 663 664 template <typename T, typename Walker> 665 struct generic_def_path_iterator 666 : public iterator_facade_base<generic_def_path_iterator<T, Walker>, 667 std::forward_iterator_tag, T *> { 668 generic_def_path_iterator() {} 669 generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {} 670 671 T &operator*() const { return curNode(); } 672 673 generic_def_path_iterator &operator++() { 674 N = curNode().Previous; 675 return *this; 676 } 677 678 bool operator==(const generic_def_path_iterator &O) const { 679 if (N.hasValue() != O.N.hasValue()) 680 return false; 681 return !N.hasValue() || *N == *O.N; 682 } 683 684 private: 685 T &curNode() const { return W->Paths[*N]; } 686 687 Walker *W = nullptr; 688 Optional<ListIndex> N = None; 689 }; 690 691 using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>; 692 using const_def_path_iterator = 693 generic_def_path_iterator<const DefPath, const ClobberWalker>; 694 695 iterator_range<def_path_iterator> def_path(ListIndex From) { 696 return make_range(def_path_iterator(this, From), def_path_iterator()); 697 } 698 699 iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const { 700 return make_range(const_def_path_iterator(this, From), 701 const_def_path_iterator()); 702 } 703 704 struct OptznResult { 705 /// The path that contains our result. 706 TerminatedPath PrimaryClobber; 707 /// The paths that we can legally cache back from, but that aren't 708 /// necessarily the result of the Phi optimization. 709 SmallVector<TerminatedPath, 4> OtherClobbers; 710 }; 711 712 ListIndex defPathIndex(const DefPath &N) const { 713 // The assert looks nicer if we don't need to do &N 714 const DefPath *NP = &N; 715 assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && 716 "Out of bounds DefPath!"); 717 return NP - &Paths.front(); 718 } 719 720 /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths 721 /// that act as legal clobbers. Note that this won't return *all* clobbers. 722 /// 723 /// Phi optimization algorithm tl;dr: 724 /// - Find the earliest def/phi, A, we can optimize to 725 /// - Find if all paths from the starting memory access ultimately reach A 726 /// - If not, optimization isn't possible. 727 /// - Otherwise, walk from A to another clobber or phi, A'. 728 /// - If A' is a def, we're done. 729 /// - If A' is a phi, try to optimize it. 730 /// 731 /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path 732 /// terminates when a MemoryAccess that clobbers said MemoryLocation is found. 733 OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start, 734 const MemoryLocation &Loc) { 735 assert(Paths.empty() && VisitedPhis.empty() && 736 "Reset the optimization state."); 737 738 Paths.emplace_back(Loc, Start, Phi, None); 739 // Stores how many "valid" optimization nodes we had prior to calling 740 // addSearches/getBlockingAccess. Necessary for caching if we had a blocker. 741 auto PriorPathsSize = Paths.size(); 742 743 SmallVector<ListIndex, 16> PausedSearches; 744 SmallVector<ListIndex, 8> NewPaused; 745 SmallVector<TerminatedPath, 4> TerminatedPaths; 746 747 addSearches(Phi, PausedSearches, 0); 748 749 // Moves the TerminatedPath with the "most dominated" Clobber to the end of 750 // Paths. 751 auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) { 752 assert(!Paths.empty() && "Need a path to move"); 753 auto Dom = Paths.begin(); 754 for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I) 755 if (!MSSA.dominates(I->Clobber, Dom->Clobber)) 756 Dom = I; 757 auto Last = Paths.end() - 1; 758 if (Last != Dom) 759 std::iter_swap(Last, Dom); 760 }; 761 762 MemoryPhi *Current = Phi; 763 while (true) { 764 assert(!MSSA.isLiveOnEntryDef(Current) && 765 "liveOnEntry wasn't treated as a clobber?"); 766 767 const auto *Target = getWalkTarget(Current); 768 // If a TerminatedPath doesn't dominate Target, then it wasn't a legal 769 // optimization for the prior phi. 770 assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) { 771 return MSSA.dominates(P.Clobber, Target); 772 })); 773 774 // FIXME: This is broken, because the Blocker may be reported to be 775 // liveOnEntry, and we'll happily wait for that to disappear (read: never) 776 // For the moment, this is fine, since we do nothing with blocker info. 777 if (Optional<TerminatedPath> Blocker = getBlockingAccess( 778 Target, PausedSearches, NewPaused, TerminatedPaths)) { 779 780 // Find the node we started at. We can't search based on N->Last, since 781 // we may have gone around a loop with a different MemoryLocation. 782 auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) { 783 return defPathIndex(N) < PriorPathsSize; 784 }); 785 assert(Iter != def_path_iterator()); 786 787 DefPath &CurNode = *Iter; 788 assert(CurNode.Last == Current); 789 790 // Two things: 791 // A. We can't reliably cache all of NewPaused back. Consider a case 792 // where we have two paths in NewPaused; one of which can't optimize 793 // above this phi, whereas the other can. If we cache the second path 794 // back, we'll end up with suboptimal cache entries. We can handle 795 // cases like this a bit better when we either try to find all 796 // clobbers that block phi optimization, or when our cache starts 797 // supporting unfinished searches. 798 // B. We can't reliably cache TerminatedPaths back here without doing 799 // extra checks; consider a case like: 800 // T 801 // / \ 802 // D C 803 // \ / 804 // S 805 // Where T is our target, C is a node with a clobber on it, D is a 806 // diamond (with a clobber *only* on the left or right node, N), and 807 // S is our start. Say we walk to D, through the node opposite N 808 // (read: ignoring the clobber), and see a cache entry in the top 809 // node of D. That cache entry gets put into TerminatedPaths. We then 810 // walk up to C (N is later in our worklist), find the clobber, and 811 // quit. If we append TerminatedPaths to OtherClobbers, we'll cache 812 // the bottom part of D to the cached clobber, ignoring the clobber 813 // in N. Again, this problem goes away if we start tracking all 814 // blockers for a given phi optimization. 815 TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)}; 816 return {Result, {}}; 817 } 818 819 // If there's nothing left to search, then all paths led to valid clobbers 820 // that we got from our cache; pick the nearest to the start, and allow 821 // the rest to be cached back. 822 if (NewPaused.empty()) { 823 MoveDominatedPathToEnd(TerminatedPaths); 824 TerminatedPath Result = TerminatedPaths.pop_back_val(); 825 return {Result, std::move(TerminatedPaths)}; 826 } 827 828 MemoryAccess *DefChainEnd = nullptr; 829 SmallVector<TerminatedPath, 4> Clobbers; 830 for (ListIndex Paused : NewPaused) { 831 UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]); 832 if (WR.IsKnownClobber) 833 Clobbers.push_back({WR.Result, Paused}); 834 else 835 // Micro-opt: If we hit the end of the chain, save it. 836 DefChainEnd = WR.Result; 837 } 838 839 if (!TerminatedPaths.empty()) { 840 // If we couldn't find the dominating phi/liveOnEntry in the above loop, 841 // do it now. 842 if (!DefChainEnd) 843 for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target))) 844 DefChainEnd = MA; 845 846 // If any of the terminated paths don't dominate the phi we'll try to 847 // optimize, we need to figure out what they are and quit. 848 const BasicBlock *ChainBB = DefChainEnd->getBlock(); 849 for (const TerminatedPath &TP : TerminatedPaths) { 850 // Because we know that DefChainEnd is as "high" as we can go, we 851 // don't need local dominance checks; BB dominance is sufficient. 852 if (DT.dominates(ChainBB, TP.Clobber->getBlock())) 853 Clobbers.push_back(TP); 854 } 855 } 856 857 // If we have clobbers in the def chain, find the one closest to Current 858 // and quit. 859 if (!Clobbers.empty()) { 860 MoveDominatedPathToEnd(Clobbers); 861 TerminatedPath Result = Clobbers.pop_back_val(); 862 return {Result, std::move(Clobbers)}; 863 } 864 865 assert(all_of(NewPaused, 866 [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })); 867 868 // Because liveOnEntry is a clobber, this must be a phi. 869 auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd); 870 871 PriorPathsSize = Paths.size(); 872 PausedSearches.clear(); 873 for (ListIndex I : NewPaused) 874 addSearches(DefChainPhi, PausedSearches, I); 875 NewPaused.clear(); 876 877 Current = DefChainPhi; 878 } 879 } 880 881 void verifyOptResult(const OptznResult &R) const { 882 assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) { 883 return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); 884 })); 885 } 886 887 void resetPhiOptznState() { 888 Paths.clear(); 889 VisitedPhis.clear(); 890 } 891 892 public: 893 ClobberWalker(const MemorySSA &MSSA, AliasAnalysisType &AA, DominatorTree &DT) 894 : MSSA(MSSA), AA(AA), DT(DT) {} 895 896 AliasAnalysisType *getAA() { return &AA; } 897 /// Finds the nearest clobber for the given query, optimizing phis if 898 /// possible. 899 MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q) { 900 Query = &Q; 901 902 MemoryAccess *Current = Start; 903 // This walker pretends uses don't exist. If we're handed one, silently grab 904 // its def. (This has the nice side-effect of ensuring we never cache uses) 905 if (auto *MU = dyn_cast<MemoryUse>(Start)) 906 Current = MU->getDefiningAccess(); 907 908 DefPath FirstDesc(Q.StartingLoc, Current, Current, None); 909 // Fast path for the overly-common case (no crazy phi optimization 910 // necessary) 911 UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc); 912 MemoryAccess *Result; 913 if (WalkResult.IsKnownClobber) { 914 Result = WalkResult.Result; 915 Q.AR = WalkResult.AR; 916 } else { 917 OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last), 918 Current, Q.StartingLoc); 919 verifyOptResult(OptRes); 920 resetPhiOptznState(); 921 Result = OptRes.PrimaryClobber.Clobber; 922 } 923 924 #ifdef EXPENSIVE_CHECKS 925 if (!Q.SkipSelfAccess) 926 checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA); 927 #endif 928 return Result; 929 } 930 }; 931 932 struct RenamePassData { 933 DomTreeNode *DTN; 934 DomTreeNode::const_iterator ChildIt; 935 MemoryAccess *IncomingVal; 936 937 RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It, 938 MemoryAccess *M) 939 : DTN(D), ChildIt(It), IncomingVal(M) {} 940 941 void swap(RenamePassData &RHS) { 942 std::swap(DTN, RHS.DTN); 943 std::swap(ChildIt, RHS.ChildIt); 944 std::swap(IncomingVal, RHS.IncomingVal); 945 } 946 }; 947 948 } // end anonymous namespace 949 950 namespace llvm { 951 952 template <class AliasAnalysisType> class MemorySSA::ClobberWalkerBase { 953 ClobberWalker<AliasAnalysisType> Walker; 954 MemorySSA *MSSA; 955 956 public: 957 ClobberWalkerBase(MemorySSA *M, AliasAnalysisType *A, DominatorTree *D) 958 : Walker(*M, *A, *D), MSSA(M) {} 959 960 MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, 961 const MemoryLocation &); 962 // Second argument (bool), defines whether the clobber search should skip the 963 // original queried access. If true, there will be a follow-up query searching 964 // for a clobber access past "self". Note that the Optimized access is not 965 // updated if a new clobber is found by this SkipSelf search. If this 966 // additional query becomes heavily used we may decide to cache the result. 967 // Walker instantiations will decide how to set the SkipSelf bool. 968 MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, bool); 969 }; 970 971 /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no 972 /// longer does caching on its own, but the name has been retained for the 973 /// moment. 974 template <class AliasAnalysisType> 975 class MemorySSA::CachingWalker final : public MemorySSAWalker { 976 ClobberWalkerBase<AliasAnalysisType> *Walker; 977 978 public: 979 CachingWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W) 980 : MemorySSAWalker(M), Walker(W) {} 981 ~CachingWalker() override = default; 982 983 using MemorySSAWalker::getClobberingMemoryAccess; 984 985 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override { 986 return Walker->getClobberingMemoryAccessBase(MA, false); 987 } 988 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, 989 const MemoryLocation &Loc) override { 990 return Walker->getClobberingMemoryAccessBase(MA, Loc); 991 } 992 993 void invalidateInfo(MemoryAccess *MA) override { 994 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) 995 MUD->resetOptimized(); 996 } 997 }; 998 999 template <class AliasAnalysisType> 1000 class MemorySSA::SkipSelfWalker final : public MemorySSAWalker { 1001 ClobberWalkerBase<AliasAnalysisType> *Walker; 1002 1003 public: 1004 SkipSelfWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W) 1005 : MemorySSAWalker(M), Walker(W) {} 1006 ~SkipSelfWalker() override = default; 1007 1008 using MemorySSAWalker::getClobberingMemoryAccess; 1009 1010 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override { 1011 return Walker->getClobberingMemoryAccessBase(MA, true); 1012 } 1013 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, 1014 const MemoryLocation &Loc) override { 1015 return Walker->getClobberingMemoryAccessBase(MA, Loc); 1016 } 1017 1018 void invalidateInfo(MemoryAccess *MA) override { 1019 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) 1020 MUD->resetOptimized(); 1021 } 1022 }; 1023 1024 } // end namespace llvm 1025 1026 void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal, 1027 bool RenameAllUses) { 1028 // Pass through values to our successors 1029 for (const BasicBlock *S : successors(BB)) { 1030 auto It = PerBlockAccesses.find(S); 1031 // Rename the phi nodes in our successor block 1032 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) 1033 continue; 1034 AccessList *Accesses = It->second.get(); 1035 auto *Phi = cast<MemoryPhi>(&Accesses->front()); 1036 if (RenameAllUses) { 1037 int PhiIndex = Phi->getBasicBlockIndex(BB); 1038 assert(PhiIndex != -1 && "Incomplete phi during partial rename"); 1039 Phi->setIncomingValue(PhiIndex, IncomingVal); 1040 } else 1041 Phi->addIncoming(IncomingVal, BB); 1042 } 1043 } 1044 1045 /// Rename a single basic block into MemorySSA form. 1046 /// Uses the standard SSA renaming algorithm. 1047 /// \returns The new incoming value. 1048 MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal, 1049 bool RenameAllUses) { 1050 auto It = PerBlockAccesses.find(BB); 1051 // Skip most processing if the list is empty. 1052 if (It != PerBlockAccesses.end()) { 1053 AccessList *Accesses = It->second.get(); 1054 for (MemoryAccess &L : *Accesses) { 1055 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) { 1056 if (MUD->getDefiningAccess() == nullptr || RenameAllUses) 1057 MUD->setDefiningAccess(IncomingVal); 1058 if (isa<MemoryDef>(&L)) 1059 IncomingVal = &L; 1060 } else { 1061 IncomingVal = &L; 1062 } 1063 } 1064 } 1065 return IncomingVal; 1066 } 1067 1068 /// This is the standard SSA renaming algorithm. 1069 /// 1070 /// We walk the dominator tree in preorder, renaming accesses, and then filling 1071 /// in phi nodes in our successors. 1072 void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal, 1073 SmallPtrSetImpl<BasicBlock *> &Visited, 1074 bool SkipVisited, bool RenameAllUses) { 1075 SmallVector<RenamePassData, 32> WorkStack; 1076 // Skip everything if we already renamed this block and we are skipping. 1077 // Note: You can't sink this into the if, because we need it to occur 1078 // regardless of whether we skip blocks or not. 1079 bool AlreadyVisited = !Visited.insert(Root->getBlock()).second; 1080 if (SkipVisited && AlreadyVisited) 1081 return; 1082 1083 IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses); 1084 renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses); 1085 WorkStack.push_back({Root, Root->begin(), IncomingVal}); 1086 1087 while (!WorkStack.empty()) { 1088 DomTreeNode *Node = WorkStack.back().DTN; 1089 DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt; 1090 IncomingVal = WorkStack.back().IncomingVal; 1091 1092 if (ChildIt == Node->end()) { 1093 WorkStack.pop_back(); 1094 } else { 1095 DomTreeNode *Child = *ChildIt; 1096 ++WorkStack.back().ChildIt; 1097 BasicBlock *BB = Child->getBlock(); 1098 // Note: You can't sink this into the if, because we need it to occur 1099 // regardless of whether we skip blocks or not. 1100 AlreadyVisited = !Visited.insert(BB).second; 1101 if (SkipVisited && AlreadyVisited) { 1102 // We already visited this during our renaming, which can happen when 1103 // being asked to rename multiple blocks. Figure out the incoming val, 1104 // which is the last def. 1105 // Incoming value can only change if there is a block def, and in that 1106 // case, it's the last block def in the list. 1107 if (auto *BlockDefs = getWritableBlockDefs(BB)) 1108 IncomingVal = &*BlockDefs->rbegin(); 1109 } else 1110 IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses); 1111 renameSuccessorPhis(BB, IncomingVal, RenameAllUses); 1112 WorkStack.push_back({Child, Child->begin(), IncomingVal}); 1113 } 1114 } 1115 } 1116 1117 /// This handles unreachable block accesses by deleting phi nodes in 1118 /// unreachable blocks, and marking all other unreachable MemoryAccess's as 1119 /// being uses of the live on entry definition. 1120 void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) { 1121 assert(!DT->isReachableFromEntry(BB) && 1122 "Reachable block found while handling unreachable blocks"); 1123 1124 // Make sure phi nodes in our reachable successors end up with a 1125 // LiveOnEntryDef for our incoming edge, even though our block is forward 1126 // unreachable. We could just disconnect these blocks from the CFG fully, 1127 // but we do not right now. 1128 for (const BasicBlock *S : successors(BB)) { 1129 if (!DT->isReachableFromEntry(S)) 1130 continue; 1131 auto It = PerBlockAccesses.find(S); 1132 // Rename the phi nodes in our successor block 1133 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) 1134 continue; 1135 AccessList *Accesses = It->second.get(); 1136 auto *Phi = cast<MemoryPhi>(&Accesses->front()); 1137 Phi->addIncoming(LiveOnEntryDef.get(), BB); 1138 } 1139 1140 auto It = PerBlockAccesses.find(BB); 1141 if (It == PerBlockAccesses.end()) 1142 return; 1143 1144 auto &Accesses = It->second; 1145 for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) { 1146 auto Next = std::next(AI); 1147 // If we have a phi, just remove it. We are going to replace all 1148 // users with live on entry. 1149 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI)) 1150 UseOrDef->setDefiningAccess(LiveOnEntryDef.get()); 1151 else 1152 Accesses->erase(AI); 1153 AI = Next; 1154 } 1155 } 1156 1157 MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT) 1158 : AA(nullptr), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr), 1159 SkipWalker(nullptr), NextID(0) { 1160 // Build MemorySSA using a batch alias analysis. This reuses the internal 1161 // state that AA collects during an alias()/getModRefInfo() call. This is 1162 // safe because there are no CFG changes while building MemorySSA and can 1163 // significantly reduce the time spent by the compiler in AA, because we will 1164 // make queries about all the instructions in the Function. 1165 BatchAAResults BatchAA(*AA); 1166 buildMemorySSA(BatchAA); 1167 // Intentionally leave AA to nullptr while building so we don't accidently 1168 // use non-batch AliasAnalysis. 1169 this->AA = AA; 1170 // Also create the walker here. 1171 getWalker(); 1172 } 1173 1174 MemorySSA::~MemorySSA() { 1175 // Drop all our references 1176 for (const auto &Pair : PerBlockAccesses) 1177 for (MemoryAccess &MA : *Pair.second) 1178 MA.dropAllReferences(); 1179 } 1180 1181 MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) { 1182 auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr)); 1183 1184 if (Res.second) 1185 Res.first->second = llvm::make_unique<AccessList>(); 1186 return Res.first->second.get(); 1187 } 1188 1189 MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) { 1190 auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr)); 1191 1192 if (Res.second) 1193 Res.first->second = llvm::make_unique<DefsList>(); 1194 return Res.first->second.get(); 1195 } 1196 1197 namespace llvm { 1198 1199 /// This class is a batch walker of all MemoryUse's in the program, and points 1200 /// their defining access at the thing that actually clobbers them. Because it 1201 /// is a batch walker that touches everything, it does not operate like the 1202 /// other walkers. This walker is basically performing a top-down SSA renaming 1203 /// pass, where the version stack is used as the cache. This enables it to be 1204 /// significantly more time and memory efficient than using the regular walker, 1205 /// which is walking bottom-up. 1206 class MemorySSA::OptimizeUses { 1207 public: 1208 OptimizeUses(MemorySSA *MSSA, MemorySSAWalker *Walker, BatchAAResults *BAA, 1209 DominatorTree *DT) 1210 : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {} 1211 1212 void optimizeUses(); 1213 1214 private: 1215 /// This represents where a given memorylocation is in the stack. 1216 struct MemlocStackInfo { 1217 // This essentially is keeping track of versions of the stack. Whenever 1218 // the stack changes due to pushes or pops, these versions increase. 1219 unsigned long StackEpoch; 1220 unsigned long PopEpoch; 1221 // This is the lower bound of places on the stack to check. It is equal to 1222 // the place the last stack walk ended. 1223 // Note: Correctness depends on this being initialized to 0, which densemap 1224 // does 1225 unsigned long LowerBound; 1226 const BasicBlock *LowerBoundBlock; 1227 // This is where the last walk for this memory location ended. 1228 unsigned long LastKill; 1229 bool LastKillValid; 1230 Optional<AliasResult> AR; 1231 }; 1232 1233 void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &, 1234 SmallVectorImpl<MemoryAccess *> &, 1235 DenseMap<MemoryLocOrCall, MemlocStackInfo> &); 1236 1237 MemorySSA *MSSA; 1238 MemorySSAWalker *Walker; 1239 BatchAAResults *AA; 1240 DominatorTree *DT; 1241 }; 1242 1243 } // end namespace llvm 1244 1245 /// Optimize the uses in a given block This is basically the SSA renaming 1246 /// algorithm, with one caveat: We are able to use a single stack for all 1247 /// MemoryUses. This is because the set of *possible* reaching MemoryDefs is 1248 /// the same for every MemoryUse. The *actual* clobbering MemoryDef is just 1249 /// going to be some position in that stack of possible ones. 1250 /// 1251 /// We track the stack positions that each MemoryLocation needs 1252 /// to check, and last ended at. This is because we only want to check the 1253 /// things that changed since last time. The same MemoryLocation should 1254 /// get clobbered by the same store (getModRefInfo does not use invariantness or 1255 /// things like this, and if they start, we can modify MemoryLocOrCall to 1256 /// include relevant data) 1257 void MemorySSA::OptimizeUses::optimizeUsesInBlock( 1258 const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch, 1259 SmallVectorImpl<MemoryAccess *> &VersionStack, 1260 DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) { 1261 1262 /// If no accesses, nothing to do. 1263 MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB); 1264 if (Accesses == nullptr) 1265 return; 1266 1267 // Pop everything that doesn't dominate the current block off the stack, 1268 // increment the PopEpoch to account for this. 1269 while (true) { 1270 assert( 1271 !VersionStack.empty() && 1272 "Version stack should have liveOnEntry sentinel dominating everything"); 1273 BasicBlock *BackBlock = VersionStack.back()->getBlock(); 1274 if (DT->dominates(BackBlock, BB)) 1275 break; 1276 while (VersionStack.back()->getBlock() == BackBlock) 1277 VersionStack.pop_back(); 1278 ++PopEpoch; 1279 } 1280 1281 for (MemoryAccess &MA : *Accesses) { 1282 auto *MU = dyn_cast<MemoryUse>(&MA); 1283 if (!MU) { 1284 VersionStack.push_back(&MA); 1285 ++StackEpoch; 1286 continue; 1287 } 1288 1289 if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) { 1290 MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None); 1291 continue; 1292 } 1293 1294 MemoryLocOrCall UseMLOC(MU); 1295 auto &LocInfo = LocStackInfo[UseMLOC]; 1296 // If the pop epoch changed, it means we've removed stuff from top of 1297 // stack due to changing blocks. We may have to reset the lower bound or 1298 // last kill info. 1299 if (LocInfo.PopEpoch != PopEpoch) { 1300 LocInfo.PopEpoch = PopEpoch; 1301 LocInfo.StackEpoch = StackEpoch; 1302 // If the lower bound was in something that no longer dominates us, we 1303 // have to reset it. 1304 // We can't simply track stack size, because the stack may have had 1305 // pushes/pops in the meantime. 1306 // XXX: This is non-optimal, but only is slower cases with heavily 1307 // branching dominator trees. To get the optimal number of queries would 1308 // be to make lowerbound and lastkill a per-loc stack, and pop it until 1309 // the top of that stack dominates us. This does not seem worth it ATM. 1310 // A much cheaper optimization would be to always explore the deepest 1311 // branch of the dominator tree first. This will guarantee this resets on 1312 // the smallest set of blocks. 1313 if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB && 1314 !DT->dominates(LocInfo.LowerBoundBlock, BB)) { 1315 // Reset the lower bound of things to check. 1316 // TODO: Some day we should be able to reset to last kill, rather than 1317 // 0. 1318 LocInfo.LowerBound = 0; 1319 LocInfo.LowerBoundBlock = VersionStack[0]->getBlock(); 1320 LocInfo.LastKillValid = false; 1321 } 1322 } else if (LocInfo.StackEpoch != StackEpoch) { 1323 // If all that has changed is the StackEpoch, we only have to check the 1324 // new things on the stack, because we've checked everything before. In 1325 // this case, the lower bound of things to check remains the same. 1326 LocInfo.PopEpoch = PopEpoch; 1327 LocInfo.StackEpoch = StackEpoch; 1328 } 1329 if (!LocInfo.LastKillValid) { 1330 LocInfo.LastKill = VersionStack.size() - 1; 1331 LocInfo.LastKillValid = true; 1332 LocInfo.AR = MayAlias; 1333 } 1334 1335 // At this point, we should have corrected last kill and LowerBound to be 1336 // in bounds. 1337 assert(LocInfo.LowerBound < VersionStack.size() && 1338 "Lower bound out of range"); 1339 assert(LocInfo.LastKill < VersionStack.size() && 1340 "Last kill info out of range"); 1341 // In any case, the new upper bound is the top of the stack. 1342 unsigned long UpperBound = VersionStack.size() - 1; 1343 1344 if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) { 1345 LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " (" 1346 << *(MU->getMemoryInst()) << ")" 1347 << " because there are " 1348 << UpperBound - LocInfo.LowerBound 1349 << " stores to disambiguate\n"); 1350 // Because we did not walk, LastKill is no longer valid, as this may 1351 // have been a kill. 1352 LocInfo.LastKillValid = false; 1353 continue; 1354 } 1355 bool FoundClobberResult = false; 1356 while (UpperBound > LocInfo.LowerBound) { 1357 if (isa<MemoryPhi>(VersionStack[UpperBound])) { 1358 // For phis, use the walker, see where we ended up, go there 1359 Instruction *UseInst = MU->getMemoryInst(); 1360 MemoryAccess *Result = Walker->getClobberingMemoryAccess(UseInst); 1361 // We are guaranteed to find it or something is wrong 1362 while (VersionStack[UpperBound] != Result) { 1363 assert(UpperBound != 0); 1364 --UpperBound; 1365 } 1366 FoundClobberResult = true; 1367 break; 1368 } 1369 1370 MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]); 1371 // If the lifetime of the pointer ends at this instruction, it's live on 1372 // entry. 1373 if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) { 1374 // Reset UpperBound to liveOnEntryDef's place in the stack 1375 UpperBound = 0; 1376 FoundClobberResult = true; 1377 LocInfo.AR = MustAlias; 1378 break; 1379 } 1380 ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA); 1381 if (CA.IsClobber) { 1382 FoundClobberResult = true; 1383 LocInfo.AR = CA.AR; 1384 break; 1385 } 1386 --UpperBound; 1387 } 1388 1389 // Note: Phis always have AliasResult AR set to MayAlias ATM. 1390 1391 // At the end of this loop, UpperBound is either a clobber, or lower bound 1392 // PHI walking may cause it to be < LowerBound, and in fact, < LastKill. 1393 if (FoundClobberResult || UpperBound < LocInfo.LastKill) { 1394 // We were last killed now by where we got to 1395 if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound])) 1396 LocInfo.AR = None; 1397 MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR); 1398 LocInfo.LastKill = UpperBound; 1399 } else { 1400 // Otherwise, we checked all the new ones, and now we know we can get to 1401 // LastKill. 1402 MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR); 1403 } 1404 LocInfo.LowerBound = VersionStack.size() - 1; 1405 LocInfo.LowerBoundBlock = BB; 1406 } 1407 } 1408 1409 /// Optimize uses to point to their actual clobbering definitions. 1410 void MemorySSA::OptimizeUses::optimizeUses() { 1411 SmallVector<MemoryAccess *, 16> VersionStack; 1412 DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo; 1413 VersionStack.push_back(MSSA->getLiveOnEntryDef()); 1414 1415 unsigned long StackEpoch = 1; 1416 unsigned long PopEpoch = 1; 1417 // We perform a non-recursive top-down dominator tree walk. 1418 for (const auto *DomNode : depth_first(DT->getRootNode())) 1419 optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack, 1420 LocStackInfo); 1421 } 1422 1423 void MemorySSA::placePHINodes( 1424 const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) { 1425 // Determine where our MemoryPhi's should go 1426 ForwardIDFCalculator IDFs(*DT); 1427 IDFs.setDefiningBlocks(DefiningBlocks); 1428 SmallVector<BasicBlock *, 32> IDFBlocks; 1429 IDFs.calculate(IDFBlocks); 1430 1431 // Now place MemoryPhi nodes. 1432 for (auto &BB : IDFBlocks) 1433 createMemoryPhi(BB); 1434 } 1435 1436 void MemorySSA::buildMemorySSA(BatchAAResults &BAA) { 1437 // We create an access to represent "live on entry", for things like 1438 // arguments or users of globals, where the memory they use is defined before 1439 // the beginning of the function. We do not actually insert it into the IR. 1440 // We do not define a live on exit for the immediate uses, and thus our 1441 // semantics do *not* imply that something with no immediate uses can simply 1442 // be removed. 1443 BasicBlock &StartingPoint = F.getEntryBlock(); 1444 LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr, 1445 &StartingPoint, NextID++)); 1446 1447 // We maintain lists of memory accesses per-block, trading memory for time. We 1448 // could just look up the memory access for every possible instruction in the 1449 // stream. 1450 SmallPtrSet<BasicBlock *, 32> DefiningBlocks; 1451 // Go through each block, figure out where defs occur, and chain together all 1452 // the accesses. 1453 for (BasicBlock &B : F) { 1454 bool InsertIntoDef = false; 1455 AccessList *Accesses = nullptr; 1456 DefsList *Defs = nullptr; 1457 for (Instruction &I : B) { 1458 MemoryUseOrDef *MUD = createNewAccess(&I, &BAA); 1459 if (!MUD) 1460 continue; 1461 1462 if (!Accesses) 1463 Accesses = getOrCreateAccessList(&B); 1464 Accesses->push_back(MUD); 1465 if (isa<MemoryDef>(MUD)) { 1466 InsertIntoDef = true; 1467 if (!Defs) 1468 Defs = getOrCreateDefsList(&B); 1469 Defs->push_back(*MUD); 1470 } 1471 } 1472 if (InsertIntoDef) 1473 DefiningBlocks.insert(&B); 1474 } 1475 placePHINodes(DefiningBlocks); 1476 1477 // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get 1478 // filled in with all blocks. 1479 SmallPtrSet<BasicBlock *, 16> Visited; 1480 renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited); 1481 1482 ClobberWalkerBase<BatchAAResults> WalkerBase(this, &BAA, DT); 1483 CachingWalker<BatchAAResults> WalkerLocal(this, &WalkerBase); 1484 OptimizeUses(this, &WalkerLocal, &BAA, DT).optimizeUses(); 1485 1486 // Mark the uses in unreachable blocks as live on entry, so that they go 1487 // somewhere. 1488 for (auto &BB : F) 1489 if (!Visited.count(&BB)) 1490 markUnreachableAsLiveOnEntry(&BB); 1491 } 1492 1493 MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); } 1494 1495 MemorySSA::CachingWalker<AliasAnalysis> *MemorySSA::getWalkerImpl() { 1496 if (Walker) 1497 return Walker.get(); 1498 1499 if (!WalkerBase) 1500 WalkerBase = 1501 llvm::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT); 1502 1503 Walker = 1504 llvm::make_unique<CachingWalker<AliasAnalysis>>(this, WalkerBase.get()); 1505 return Walker.get(); 1506 } 1507 1508 MemorySSAWalker *MemorySSA::getSkipSelfWalker() { 1509 if (SkipWalker) 1510 return SkipWalker.get(); 1511 1512 if (!WalkerBase) 1513 WalkerBase = 1514 llvm::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT); 1515 1516 SkipWalker = 1517 llvm::make_unique<SkipSelfWalker<AliasAnalysis>>(this, WalkerBase.get()); 1518 return SkipWalker.get(); 1519 } 1520 1521 1522 // This is a helper function used by the creation routines. It places NewAccess 1523 // into the access and defs lists for a given basic block, at the given 1524 // insertion point. 1525 void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess, 1526 const BasicBlock *BB, 1527 InsertionPlace Point) { 1528 auto *Accesses = getOrCreateAccessList(BB); 1529 if (Point == Beginning) { 1530 // If it's a phi node, it goes first, otherwise, it goes after any phi 1531 // nodes. 1532 if (isa<MemoryPhi>(NewAccess)) { 1533 Accesses->push_front(NewAccess); 1534 auto *Defs = getOrCreateDefsList(BB); 1535 Defs->push_front(*NewAccess); 1536 } else { 1537 auto AI = find_if_not( 1538 *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); }); 1539 Accesses->insert(AI, NewAccess); 1540 if (!isa<MemoryUse>(NewAccess)) { 1541 auto *Defs = getOrCreateDefsList(BB); 1542 auto DI = find_if_not( 1543 *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); }); 1544 Defs->insert(DI, *NewAccess); 1545 } 1546 } 1547 } else { 1548 Accesses->push_back(NewAccess); 1549 if (!isa<MemoryUse>(NewAccess)) { 1550 auto *Defs = getOrCreateDefsList(BB); 1551 Defs->push_back(*NewAccess); 1552 } 1553 } 1554 BlockNumberingValid.erase(BB); 1555 } 1556 1557 void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB, 1558 AccessList::iterator InsertPt) { 1559 auto *Accesses = getWritableBlockAccesses(BB); 1560 bool WasEnd = InsertPt == Accesses->end(); 1561 Accesses->insert(AccessList::iterator(InsertPt), What); 1562 if (!isa<MemoryUse>(What)) { 1563 auto *Defs = getOrCreateDefsList(BB); 1564 // If we got asked to insert at the end, we have an easy job, just shove it 1565 // at the end. If we got asked to insert before an existing def, we also get 1566 // an iterator. If we got asked to insert before a use, we have to hunt for 1567 // the next def. 1568 if (WasEnd) { 1569 Defs->push_back(*What); 1570 } else if (isa<MemoryDef>(InsertPt)) { 1571 Defs->insert(InsertPt->getDefsIterator(), *What); 1572 } else { 1573 while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt)) 1574 ++InsertPt; 1575 // Either we found a def, or we are inserting at the end 1576 if (InsertPt == Accesses->end()) 1577 Defs->push_back(*What); 1578 else 1579 Defs->insert(InsertPt->getDefsIterator(), *What); 1580 } 1581 } 1582 BlockNumberingValid.erase(BB); 1583 } 1584 1585 void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) { 1586 // Keep it in the lookup tables, remove from the lists 1587 removeFromLists(What, false); 1588 1589 // Note that moving should implicitly invalidate the optimized state of a 1590 // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a 1591 // MemoryDef. 1592 if (auto *MD = dyn_cast<MemoryDef>(What)) 1593 MD->resetOptimized(); 1594 What->setBlock(BB); 1595 } 1596 1597 // Move What before Where in the IR. The end result is that What will belong to 1598 // the right lists and have the right Block set, but will not otherwise be 1599 // correct. It will not have the right defining access, and if it is a def, 1600 // things below it will not properly be updated. 1601 void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB, 1602 AccessList::iterator Where) { 1603 prepareForMoveTo(What, BB); 1604 insertIntoListsBefore(What, BB, Where); 1605 } 1606 1607 void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB, 1608 InsertionPlace Point) { 1609 if (isa<MemoryPhi>(What)) { 1610 assert(Point == Beginning && 1611 "Can only move a Phi at the beginning of the block"); 1612 // Update lookup table entry 1613 ValueToMemoryAccess.erase(What->getBlock()); 1614 bool Inserted = ValueToMemoryAccess.insert({BB, What}).second; 1615 (void)Inserted; 1616 assert(Inserted && "Cannot move a Phi to a block that already has one"); 1617 } 1618 1619 prepareForMoveTo(What, BB); 1620 insertIntoListsForBlock(What, BB, Point); 1621 } 1622 1623 MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) { 1624 assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB"); 1625 MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++); 1626 // Phi's always are placed at the front of the block. 1627 insertIntoListsForBlock(Phi, BB, Beginning); 1628 ValueToMemoryAccess[BB] = Phi; 1629 return Phi; 1630 } 1631 1632 MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I, 1633 MemoryAccess *Definition, 1634 const MemoryUseOrDef *Template) { 1635 assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI"); 1636 MemoryUseOrDef *NewAccess = createNewAccess(I, AA, Template); 1637 assert( 1638 NewAccess != nullptr && 1639 "Tried to create a memory access for a non-memory touching instruction"); 1640 NewAccess->setDefiningAccess(Definition); 1641 return NewAccess; 1642 } 1643 1644 // Return true if the instruction has ordering constraints. 1645 // Note specifically that this only considers stores and loads 1646 // because others are still considered ModRef by getModRefInfo. 1647 static inline bool isOrdered(const Instruction *I) { 1648 if (auto *SI = dyn_cast<StoreInst>(I)) { 1649 if (!SI->isUnordered()) 1650 return true; 1651 } else if (auto *LI = dyn_cast<LoadInst>(I)) { 1652 if (!LI->isUnordered()) 1653 return true; 1654 } 1655 return false; 1656 } 1657 1658 /// Helper function to create new memory accesses 1659 template <typename AliasAnalysisType> 1660 MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I, 1661 AliasAnalysisType *AAP, 1662 const MemoryUseOrDef *Template) { 1663 // The assume intrinsic has a control dependency which we model by claiming 1664 // that it writes arbitrarily. Ignore that fake memory dependency here. 1665 // FIXME: Replace this special casing with a more accurate modelling of 1666 // assume's control dependency. 1667 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 1668 if (II->getIntrinsicID() == Intrinsic::assume) 1669 return nullptr; 1670 1671 bool Def, Use; 1672 if (Template) { 1673 Def = dyn_cast_or_null<MemoryDef>(Template) != nullptr; 1674 Use = dyn_cast_or_null<MemoryUse>(Template) != nullptr; 1675 #if !defined(NDEBUG) 1676 ModRefInfo ModRef = AAP->getModRefInfo(I, None); 1677 bool DefCheck, UseCheck; 1678 DefCheck = isModSet(ModRef) || isOrdered(I); 1679 UseCheck = isRefSet(ModRef); 1680 assert(Def == DefCheck && (Def || Use == UseCheck) && "Invalid template"); 1681 #endif 1682 } else { 1683 // Find out what affect this instruction has on memory. 1684 ModRefInfo ModRef = AAP->getModRefInfo(I, None); 1685 // The isOrdered check is used to ensure that volatiles end up as defs 1686 // (atomics end up as ModRef right now anyway). Until we separate the 1687 // ordering chain from the memory chain, this enables people to see at least 1688 // some relative ordering to volatiles. Note that getClobberingMemoryAccess 1689 // will still give an answer that bypasses other volatile loads. TODO: 1690 // Separate memory aliasing and ordering into two different chains so that 1691 // we can precisely represent both "what memory will this read/write/is 1692 // clobbered by" and "what instructions can I move this past". 1693 Def = isModSet(ModRef) || isOrdered(I); 1694 Use = isRefSet(ModRef); 1695 } 1696 1697 // It's possible for an instruction to not modify memory at all. During 1698 // construction, we ignore them. 1699 if (!Def && !Use) 1700 return nullptr; 1701 1702 MemoryUseOrDef *MUD; 1703 if (Def) 1704 MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++); 1705 else 1706 MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent()); 1707 ValueToMemoryAccess[I] = MUD; 1708 return MUD; 1709 } 1710 1711 /// Returns true if \p Replacer dominates \p Replacee . 1712 bool MemorySSA::dominatesUse(const MemoryAccess *Replacer, 1713 const MemoryAccess *Replacee) const { 1714 if (isa<MemoryUseOrDef>(Replacee)) 1715 return DT->dominates(Replacer->getBlock(), Replacee->getBlock()); 1716 const auto *MP = cast<MemoryPhi>(Replacee); 1717 // For a phi node, the use occurs in the predecessor block of the phi node. 1718 // Since we may occur multiple times in the phi node, we have to check each 1719 // operand to ensure Replacer dominates each operand where Replacee occurs. 1720 for (const Use &Arg : MP->operands()) { 1721 if (Arg.get() != Replacee && 1722 !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg))) 1723 return false; 1724 } 1725 return true; 1726 } 1727 1728 /// Properly remove \p MA from all of MemorySSA's lookup tables. 1729 void MemorySSA::removeFromLookups(MemoryAccess *MA) { 1730 assert(MA->use_empty() && 1731 "Trying to remove memory access that still has uses"); 1732 BlockNumbering.erase(MA); 1733 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) 1734 MUD->setDefiningAccess(nullptr); 1735 // Invalidate our walker's cache if necessary 1736 if (!isa<MemoryUse>(MA)) 1737 getWalker()->invalidateInfo(MA); 1738 1739 Value *MemoryInst; 1740 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) 1741 MemoryInst = MUD->getMemoryInst(); 1742 else 1743 MemoryInst = MA->getBlock(); 1744 1745 auto VMA = ValueToMemoryAccess.find(MemoryInst); 1746 if (VMA->second == MA) 1747 ValueToMemoryAccess.erase(VMA); 1748 } 1749 1750 /// Properly remove \p MA from all of MemorySSA's lists. 1751 /// 1752 /// Because of the way the intrusive list and use lists work, it is important to 1753 /// do removal in the right order. 1754 /// ShouldDelete defaults to true, and will cause the memory access to also be 1755 /// deleted, not just removed. 1756 void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) { 1757 BasicBlock *BB = MA->getBlock(); 1758 // The access list owns the reference, so we erase it from the non-owning list 1759 // first. 1760 if (!isa<MemoryUse>(MA)) { 1761 auto DefsIt = PerBlockDefs.find(BB); 1762 std::unique_ptr<DefsList> &Defs = DefsIt->second; 1763 Defs->remove(*MA); 1764 if (Defs->empty()) 1765 PerBlockDefs.erase(DefsIt); 1766 } 1767 1768 // The erase call here will delete it. If we don't want it deleted, we call 1769 // remove instead. 1770 auto AccessIt = PerBlockAccesses.find(BB); 1771 std::unique_ptr<AccessList> &Accesses = AccessIt->second; 1772 if (ShouldDelete) 1773 Accesses->erase(MA); 1774 else 1775 Accesses->remove(MA); 1776 1777 if (Accesses->empty()) { 1778 PerBlockAccesses.erase(AccessIt); 1779 BlockNumberingValid.erase(BB); 1780 } 1781 } 1782 1783 void MemorySSA::print(raw_ostream &OS) const { 1784 MemorySSAAnnotatedWriter Writer(this); 1785 F.print(OS, &Writer); 1786 } 1787 1788 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1789 LLVM_DUMP_METHOD void MemorySSA::dump() const { print(dbgs()); } 1790 #endif 1791 1792 void MemorySSA::verifyMemorySSA() const { 1793 verifyDefUses(F); 1794 verifyDomination(F); 1795 verifyOrdering(F); 1796 verifyDominationNumbers(F); 1797 // Previously, the verification used to also verify that the clobberingAccess 1798 // cached by MemorySSA is the same as the clobberingAccess found at a later 1799 // query to AA. This does not hold true in general due to the current fragility 1800 // of BasicAA which has arbitrary caps on the things it analyzes before giving 1801 // up. As a result, transformations that are correct, will lead to BasicAA 1802 // returning different Alias answers before and after that transformation. 1803 // Invalidating MemorySSA is not an option, as the results in BasicAA can be so 1804 // random, in the worst case we'd need to rebuild MemorySSA from scratch after 1805 // every transformation, which defeats the purpose of using it. For such an 1806 // example, see test4 added in D51960. 1807 } 1808 1809 /// Verify that all of the blocks we believe to have valid domination numbers 1810 /// actually have valid domination numbers. 1811 void MemorySSA::verifyDominationNumbers(const Function &F) const { 1812 #ifndef NDEBUG 1813 if (BlockNumberingValid.empty()) 1814 return; 1815 1816 SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid; 1817 for (const BasicBlock &BB : F) { 1818 if (!ValidBlocks.count(&BB)) 1819 continue; 1820 1821 ValidBlocks.erase(&BB); 1822 1823 const AccessList *Accesses = getBlockAccesses(&BB); 1824 // It's correct to say an empty block has valid numbering. 1825 if (!Accesses) 1826 continue; 1827 1828 // Block numbering starts at 1. 1829 unsigned long LastNumber = 0; 1830 for (const MemoryAccess &MA : *Accesses) { 1831 auto ThisNumberIter = BlockNumbering.find(&MA); 1832 assert(ThisNumberIter != BlockNumbering.end() && 1833 "MemoryAccess has no domination number in a valid block!"); 1834 1835 unsigned long ThisNumber = ThisNumberIter->second; 1836 assert(ThisNumber > LastNumber && 1837 "Domination numbers should be strictly increasing!"); 1838 LastNumber = ThisNumber; 1839 } 1840 } 1841 1842 assert(ValidBlocks.empty() && 1843 "All valid BasicBlocks should exist in F -- dangling pointers?"); 1844 #endif 1845 } 1846 1847 /// Verify that the order and existence of MemoryAccesses matches the 1848 /// order and existence of memory affecting instructions. 1849 void MemorySSA::verifyOrdering(Function &F) const { 1850 #ifndef NDEBUG 1851 // Walk all the blocks, comparing what the lookups think and what the access 1852 // lists think, as well as the order in the blocks vs the order in the access 1853 // lists. 1854 SmallVector<MemoryAccess *, 32> ActualAccesses; 1855 SmallVector<MemoryAccess *, 32> ActualDefs; 1856 for (BasicBlock &B : F) { 1857 const AccessList *AL = getBlockAccesses(&B); 1858 const auto *DL = getBlockDefs(&B); 1859 MemoryAccess *Phi = getMemoryAccess(&B); 1860 if (Phi) { 1861 ActualAccesses.push_back(Phi); 1862 ActualDefs.push_back(Phi); 1863 } 1864 1865 for (Instruction &I : B) { 1866 MemoryAccess *MA = getMemoryAccess(&I); 1867 assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) && 1868 "We have memory affecting instructions " 1869 "in this block but they are not in the " 1870 "access list or defs list"); 1871 if (MA) { 1872 ActualAccesses.push_back(MA); 1873 if (isa<MemoryDef>(MA)) 1874 ActualDefs.push_back(MA); 1875 } 1876 } 1877 // Either we hit the assert, really have no accesses, or we have both 1878 // accesses and an access list. 1879 // Same with defs. 1880 if (!AL && !DL) 1881 continue; 1882 assert(AL->size() == ActualAccesses.size() && 1883 "We don't have the same number of accesses in the block as on the " 1884 "access list"); 1885 assert((DL || ActualDefs.size() == 0) && 1886 "Either we should have a defs list, or we should have no defs"); 1887 assert((!DL || DL->size() == ActualDefs.size()) && 1888 "We don't have the same number of defs in the block as on the " 1889 "def list"); 1890 auto ALI = AL->begin(); 1891 auto AAI = ActualAccesses.begin(); 1892 while (ALI != AL->end() && AAI != ActualAccesses.end()) { 1893 assert(&*ALI == *AAI && "Not the same accesses in the same order"); 1894 ++ALI; 1895 ++AAI; 1896 } 1897 ActualAccesses.clear(); 1898 if (DL) { 1899 auto DLI = DL->begin(); 1900 auto ADI = ActualDefs.begin(); 1901 while (DLI != DL->end() && ADI != ActualDefs.end()) { 1902 assert(&*DLI == *ADI && "Not the same defs in the same order"); 1903 ++DLI; 1904 ++ADI; 1905 } 1906 } 1907 ActualDefs.clear(); 1908 } 1909 #endif 1910 } 1911 1912 /// Verify the domination properties of MemorySSA by checking that each 1913 /// definition dominates all of its uses. 1914 void MemorySSA::verifyDomination(Function &F) const { 1915 #ifndef NDEBUG 1916 for (BasicBlock &B : F) { 1917 // Phi nodes are attached to basic blocks 1918 if (MemoryPhi *MP = getMemoryAccess(&B)) 1919 for (const Use &U : MP->uses()) 1920 assert(dominates(MP, U) && "Memory PHI does not dominate it's uses"); 1921 1922 for (Instruction &I : B) { 1923 MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I)); 1924 if (!MD) 1925 continue; 1926 1927 for (const Use &U : MD->uses()) 1928 assert(dominates(MD, U) && "Memory Def does not dominate it's uses"); 1929 } 1930 } 1931 #endif 1932 } 1933 1934 /// Verify the def-use lists in MemorySSA, by verifying that \p Use 1935 /// appears in the use list of \p Def. 1936 void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const { 1937 #ifndef NDEBUG 1938 // The live on entry use may cause us to get a NULL def here 1939 if (!Def) 1940 assert(isLiveOnEntryDef(Use) && 1941 "Null def but use not point to live on entry def"); 1942 else 1943 assert(is_contained(Def->users(), Use) && 1944 "Did not find use in def's use list"); 1945 #endif 1946 } 1947 1948 /// Verify the immediate use information, by walking all the memory 1949 /// accesses and verifying that, for each use, it appears in the 1950 /// appropriate def's use list 1951 void MemorySSA::verifyDefUses(Function &F) const { 1952 #ifndef NDEBUG 1953 for (BasicBlock &B : F) { 1954 // Phi nodes are attached to basic blocks 1955 if (MemoryPhi *Phi = getMemoryAccess(&B)) { 1956 assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance( 1957 pred_begin(&B), pred_end(&B))) && 1958 "Incomplete MemoryPhi Node"); 1959 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) { 1960 verifyUseInDefs(Phi->getIncomingValue(I), Phi); 1961 assert(find(predecessors(&B), Phi->getIncomingBlock(I)) != 1962 pred_end(&B) && 1963 "Incoming phi block not a block predecessor"); 1964 } 1965 } 1966 1967 for (Instruction &I : B) { 1968 if (MemoryUseOrDef *MA = getMemoryAccess(&I)) { 1969 verifyUseInDefs(MA->getDefiningAccess(), MA); 1970 } 1971 } 1972 } 1973 #endif 1974 } 1975 1976 /// Perform a local numbering on blocks so that instruction ordering can be 1977 /// determined in constant time. 1978 /// TODO: We currently just number in order. If we numbered by N, we could 1979 /// allow at least N-1 sequences of insertBefore or insertAfter (and at least 1980 /// log2(N) sequences of mixed before and after) without needing to invalidate 1981 /// the numbering. 1982 void MemorySSA::renumberBlock(const BasicBlock *B) const { 1983 // The pre-increment ensures the numbers really start at 1. 1984 unsigned long CurrentNumber = 0; 1985 const AccessList *AL = getBlockAccesses(B); 1986 assert(AL != nullptr && "Asking to renumber an empty block"); 1987 for (const auto &I : *AL) 1988 BlockNumbering[&I] = ++CurrentNumber; 1989 BlockNumberingValid.insert(B); 1990 } 1991 1992 /// Determine, for two memory accesses in the same block, 1993 /// whether \p Dominator dominates \p Dominatee. 1994 /// \returns True if \p Dominator dominates \p Dominatee. 1995 bool MemorySSA::locallyDominates(const MemoryAccess *Dominator, 1996 const MemoryAccess *Dominatee) const { 1997 const BasicBlock *DominatorBlock = Dominator->getBlock(); 1998 1999 assert((DominatorBlock == Dominatee->getBlock()) && 2000 "Asking for local domination when accesses are in different blocks!"); 2001 // A node dominates itself. 2002 if (Dominatee == Dominator) 2003 return true; 2004 2005 // When Dominatee is defined on function entry, it is not dominated by another 2006 // memory access. 2007 if (isLiveOnEntryDef(Dominatee)) 2008 return false; 2009 2010 // When Dominator is defined on function entry, it dominates the other memory 2011 // access. 2012 if (isLiveOnEntryDef(Dominator)) 2013 return true; 2014 2015 if (!BlockNumberingValid.count(DominatorBlock)) 2016 renumberBlock(DominatorBlock); 2017 2018 unsigned long DominatorNum = BlockNumbering.lookup(Dominator); 2019 // All numbers start with 1 2020 assert(DominatorNum != 0 && "Block was not numbered properly"); 2021 unsigned long DominateeNum = BlockNumbering.lookup(Dominatee); 2022 assert(DominateeNum != 0 && "Block was not numbered properly"); 2023 return DominatorNum < DominateeNum; 2024 } 2025 2026 bool MemorySSA::dominates(const MemoryAccess *Dominator, 2027 const MemoryAccess *Dominatee) const { 2028 if (Dominator == Dominatee) 2029 return true; 2030 2031 if (isLiveOnEntryDef(Dominatee)) 2032 return false; 2033 2034 if (Dominator->getBlock() != Dominatee->getBlock()) 2035 return DT->dominates(Dominator->getBlock(), Dominatee->getBlock()); 2036 return locallyDominates(Dominator, Dominatee); 2037 } 2038 2039 bool MemorySSA::dominates(const MemoryAccess *Dominator, 2040 const Use &Dominatee) const { 2041 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) { 2042 BasicBlock *UseBB = MP->getIncomingBlock(Dominatee); 2043 // The def must dominate the incoming block of the phi. 2044 if (UseBB != Dominator->getBlock()) 2045 return DT->dominates(Dominator->getBlock(), UseBB); 2046 // If the UseBB and the DefBB are the same, compare locally. 2047 return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee)); 2048 } 2049 // If it's not a PHI node use, the normal dominates can already handle it. 2050 return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser())); 2051 } 2052 2053 const static char LiveOnEntryStr[] = "liveOnEntry"; 2054 2055 void MemoryAccess::print(raw_ostream &OS) const { 2056 switch (getValueID()) { 2057 case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS); 2058 case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS); 2059 case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS); 2060 } 2061 llvm_unreachable("invalid value id"); 2062 } 2063 2064 void MemoryDef::print(raw_ostream &OS) const { 2065 MemoryAccess *UO = getDefiningAccess(); 2066 2067 auto printID = [&OS](MemoryAccess *A) { 2068 if (A && A->getID()) 2069 OS << A->getID(); 2070 else 2071 OS << LiveOnEntryStr; 2072 }; 2073 2074 OS << getID() << " = MemoryDef("; 2075 printID(UO); 2076 OS << ")"; 2077 2078 if (isOptimized()) { 2079 OS << "->"; 2080 printID(getOptimized()); 2081 2082 if (Optional<AliasResult> AR = getOptimizedAccessType()) 2083 OS << " " << *AR; 2084 } 2085 } 2086 2087 void MemoryPhi::print(raw_ostream &OS) const { 2088 bool First = true; 2089 OS << getID() << " = MemoryPhi("; 2090 for (const auto &Op : operands()) { 2091 BasicBlock *BB = getIncomingBlock(Op); 2092 MemoryAccess *MA = cast<MemoryAccess>(Op); 2093 if (!First) 2094 OS << ','; 2095 else 2096 First = false; 2097 2098 OS << '{'; 2099 if (BB->hasName()) 2100 OS << BB->getName(); 2101 else 2102 BB->printAsOperand(OS, false); 2103 OS << ','; 2104 if (unsigned ID = MA->getID()) 2105 OS << ID; 2106 else 2107 OS << LiveOnEntryStr; 2108 OS << '}'; 2109 } 2110 OS << ')'; 2111 } 2112 2113 void MemoryUse::print(raw_ostream &OS) const { 2114 MemoryAccess *UO = getDefiningAccess(); 2115 OS << "MemoryUse("; 2116 if (UO && UO->getID()) 2117 OS << UO->getID(); 2118 else 2119 OS << LiveOnEntryStr; 2120 OS << ')'; 2121 2122 if (Optional<AliasResult> AR = getOptimizedAccessType()) 2123 OS << " " << *AR; 2124 } 2125 2126 void MemoryAccess::dump() const { 2127 // Cannot completely remove virtual function even in release mode. 2128 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2129 print(dbgs()); 2130 dbgs() << "\n"; 2131 #endif 2132 } 2133 2134 char MemorySSAPrinterLegacyPass::ID = 0; 2135 2136 MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) { 2137 initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry()); 2138 } 2139 2140 void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const { 2141 AU.setPreservesAll(); 2142 AU.addRequired<MemorySSAWrapperPass>(); 2143 } 2144 2145 bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) { 2146 auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA(); 2147 MSSA.print(dbgs()); 2148 if (VerifyMemorySSA) 2149 MSSA.verifyMemorySSA(); 2150 return false; 2151 } 2152 2153 AnalysisKey MemorySSAAnalysis::Key; 2154 2155 MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F, 2156 FunctionAnalysisManager &AM) { 2157 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 2158 auto &AA = AM.getResult<AAManager>(F); 2159 return MemorySSAAnalysis::Result(llvm::make_unique<MemorySSA>(F, &AA, &DT)); 2160 } 2161 2162 PreservedAnalyses MemorySSAPrinterPass::run(Function &F, 2163 FunctionAnalysisManager &AM) { 2164 OS << "MemorySSA for function: " << F.getName() << "\n"; 2165 AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS); 2166 2167 return PreservedAnalyses::all(); 2168 } 2169 2170 PreservedAnalyses MemorySSAVerifierPass::run(Function &F, 2171 FunctionAnalysisManager &AM) { 2172 AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA(); 2173 2174 return PreservedAnalyses::all(); 2175 } 2176 2177 char MemorySSAWrapperPass::ID = 0; 2178 2179 MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) { 2180 initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry()); 2181 } 2182 2183 void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); } 2184 2185 void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 2186 AU.setPreservesAll(); 2187 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 2188 AU.addRequiredTransitive<AAResultsWrapperPass>(); 2189 } 2190 2191 bool MemorySSAWrapperPass::runOnFunction(Function &F) { 2192 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2193 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 2194 MSSA.reset(new MemorySSA(F, &AA, &DT)); 2195 return false; 2196 } 2197 2198 void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); } 2199 2200 void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const { 2201 MSSA->print(OS); 2202 } 2203 2204 MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {} 2205 2206 /// Walk the use-def chains starting at \p StartingAccess and find 2207 /// the MemoryAccess that actually clobbers Loc. 2208 /// 2209 /// \returns our clobbering memory access 2210 template <typename AliasAnalysisType> 2211 MemoryAccess * 2212 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase( 2213 MemoryAccess *StartingAccess, const MemoryLocation &Loc) { 2214 if (isa<MemoryPhi>(StartingAccess)) 2215 return StartingAccess; 2216 2217 auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess); 2218 if (MSSA->isLiveOnEntryDef(StartingUseOrDef)) 2219 return StartingUseOrDef; 2220 2221 Instruction *I = StartingUseOrDef->getMemoryInst(); 2222 2223 // Conservatively, fences are always clobbers, so don't perform the walk if we 2224 // hit a fence. 2225 if (!isa<CallBase>(I) && I->isFenceLike()) 2226 return StartingUseOrDef; 2227 2228 UpwardsMemoryQuery Q; 2229 Q.OriginalAccess = StartingUseOrDef; 2230 Q.StartingLoc = Loc; 2231 Q.Inst = I; 2232 Q.IsCall = false; 2233 2234 // Unlike the other function, do not walk to the def of a def, because we are 2235 // handed something we already believe is the clobbering access. 2236 // We never set SkipSelf to true in Q in this method. 2237 MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef) 2238 ? StartingUseOrDef->getDefiningAccess() 2239 : StartingUseOrDef; 2240 2241 MemoryAccess *Clobber = Walker.findClobber(DefiningAccess, Q); 2242 LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is "); 2243 LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n"); 2244 LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is "); 2245 LLVM_DEBUG(dbgs() << *Clobber << "\n"); 2246 return Clobber; 2247 } 2248 2249 template <typename AliasAnalysisType> 2250 MemoryAccess * 2251 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase( 2252 MemoryAccess *MA, bool SkipSelf) { 2253 auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA); 2254 // If this is a MemoryPhi, we can't do anything. 2255 if (!StartingAccess) 2256 return MA; 2257 2258 bool IsOptimized = false; 2259 2260 // If this is an already optimized use or def, return the optimized result. 2261 // Note: Currently, we store the optimized def result in a separate field, 2262 // since we can't use the defining access. 2263 if (StartingAccess->isOptimized()) { 2264 if (!SkipSelf || !isa<MemoryDef>(StartingAccess)) 2265 return StartingAccess->getOptimized(); 2266 IsOptimized = true; 2267 } 2268 2269 const Instruction *I = StartingAccess->getMemoryInst(); 2270 // We can't sanely do anything with a fence, since they conservatively clobber 2271 // all memory, and have no locations to get pointers from to try to 2272 // disambiguate. 2273 if (!isa<CallBase>(I) && I->isFenceLike()) 2274 return StartingAccess; 2275 2276 UpwardsMemoryQuery Q(I, StartingAccess); 2277 2278 if (isUseTriviallyOptimizableToLiveOnEntry(*Walker.getAA(), I)) { 2279 MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef(); 2280 StartingAccess->setOptimized(LiveOnEntry); 2281 StartingAccess->setOptimizedAccessType(None); 2282 return LiveOnEntry; 2283 } 2284 2285 MemoryAccess *OptimizedAccess; 2286 if (!IsOptimized) { 2287 // Start with the thing we already think clobbers this location 2288 MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess(); 2289 2290 // At this point, DefiningAccess may be the live on entry def. 2291 // If it is, we will not get a better result. 2292 if (MSSA->isLiveOnEntryDef(DefiningAccess)) { 2293 StartingAccess->setOptimized(DefiningAccess); 2294 StartingAccess->setOptimizedAccessType(None); 2295 return DefiningAccess; 2296 } 2297 2298 OptimizedAccess = Walker.findClobber(DefiningAccess, Q); 2299 StartingAccess->setOptimized(OptimizedAccess); 2300 if (MSSA->isLiveOnEntryDef(OptimizedAccess)) 2301 StartingAccess->setOptimizedAccessType(None); 2302 else if (Q.AR == MustAlias) 2303 StartingAccess->setOptimizedAccessType(MustAlias); 2304 } else 2305 OptimizedAccess = StartingAccess->getOptimized(); 2306 2307 LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is "); 2308 LLVM_DEBUG(dbgs() << *StartingAccess << "\n"); 2309 LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is "); 2310 LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n"); 2311 2312 MemoryAccess *Result; 2313 if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) && 2314 isa<MemoryDef>(StartingAccess)) { 2315 assert(isa<MemoryDef>(Q.OriginalAccess)); 2316 Q.SkipSelfAccess = true; 2317 Result = Walker.findClobber(OptimizedAccess, Q); 2318 } else 2319 Result = OptimizedAccess; 2320 2321 LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf); 2322 LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n"); 2323 2324 return Result; 2325 } 2326 2327 MemoryAccess * 2328 DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) { 2329 if (auto *Use = dyn_cast<MemoryUseOrDef>(MA)) 2330 return Use->getDefiningAccess(); 2331 return MA; 2332 } 2333 2334 MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess( 2335 MemoryAccess *StartingAccess, const MemoryLocation &) { 2336 if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess)) 2337 return Use->getDefiningAccess(); 2338 return StartingAccess; 2339 } 2340 2341 void MemoryPhi::deleteMe(DerivedUser *Self) { 2342 delete static_cast<MemoryPhi *>(Self); 2343 } 2344 2345 void MemoryDef::deleteMe(DerivedUser *Self) { 2346 delete static_cast<MemoryDef *>(Self); 2347 } 2348 2349 void MemoryUse::deleteMe(DerivedUser *Self) { 2350 delete static_cast<MemoryUse *>(Self); 2351 } 2352