1 //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the MemorySSA class. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Analysis/MemorySSA.h" 14 #include "llvm/ADT/DenseMap.h" 15 #include "llvm/ADT/DenseMapInfo.h" 16 #include "llvm/ADT/DenseSet.h" 17 #include "llvm/ADT/DepthFirstIterator.h" 18 #include "llvm/ADT/Hashing.h" 19 #include "llvm/ADT/None.h" 20 #include "llvm/ADT/Optional.h" 21 #include "llvm/ADT/STLExtras.h" 22 #include "llvm/ADT/SmallPtrSet.h" 23 #include "llvm/ADT/SmallVector.h" 24 #include "llvm/ADT/StringExtras.h" 25 #include "llvm/ADT/iterator.h" 26 #include "llvm/ADT/iterator_range.h" 27 #include "llvm/Analysis/AliasAnalysis.h" 28 #include "llvm/Analysis/CFGPrinter.h" 29 #include "llvm/Analysis/IteratedDominanceFrontier.h" 30 #include "llvm/Analysis/MemoryLocation.h" 31 #include "llvm/Config/llvm-config.h" 32 #include "llvm/IR/AssemblyAnnotationWriter.h" 33 #include "llvm/IR/BasicBlock.h" 34 #include "llvm/IR/Dominators.h" 35 #include "llvm/IR/Function.h" 36 #include "llvm/IR/Instruction.h" 37 #include "llvm/IR/Instructions.h" 38 #include "llvm/IR/IntrinsicInst.h" 39 #include "llvm/IR/Intrinsics.h" 40 #include "llvm/IR/LLVMContext.h" 41 #include "llvm/IR/PassManager.h" 42 #include "llvm/IR/Use.h" 43 #include "llvm/InitializePasses.h" 44 #include "llvm/Pass.h" 45 #include "llvm/Support/AtomicOrdering.h" 46 #include "llvm/Support/Casting.h" 47 #include "llvm/Support/CommandLine.h" 48 #include "llvm/Support/Compiler.h" 49 #include "llvm/Support/Debug.h" 50 #include "llvm/Support/ErrorHandling.h" 51 #include "llvm/Support/FormattedStream.h" 52 #include "llvm/Support/raw_ostream.h" 53 #include <algorithm> 54 #include <cassert> 55 #include <cstdlib> 56 #include <iterator> 57 #include <memory> 58 #include <utility> 59 60 using namespace llvm; 61 62 #define DEBUG_TYPE "memoryssa" 63 64 static cl::opt<std::string> 65 DotCFGMSSA("dot-cfg-mssa", 66 cl::value_desc("file name for generated dot file"), 67 cl::desc("file name for generated dot file"), cl::init("")); 68 69 INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false, 70 true) 71 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 72 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 73 INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false, 74 true) 75 76 INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa", 77 "Memory SSA Printer", false, false) 78 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) 79 INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa", 80 "Memory SSA Printer", false, false) 81 82 static cl::opt<unsigned> MaxCheckLimit( 83 "memssa-check-limit", cl::Hidden, cl::init(100), 84 cl::desc("The maximum number of stores/phis MemorySSA" 85 "will consider trying to walk past (default = 100)")); 86 87 // Always verify MemorySSA if expensive checking is enabled. 88 #ifdef EXPENSIVE_CHECKS 89 bool llvm::VerifyMemorySSA = true; 90 #else 91 bool llvm::VerifyMemorySSA = false; 92 #endif 93 /// Enables memory ssa as a dependency for loop passes in legacy pass manager. 94 cl::opt<bool> llvm::EnableMSSALoopDependency( 95 "enable-mssa-loop-dependency", cl::Hidden, cl::init(true), 96 cl::desc("Enable MemorySSA dependency for loop pass manager")); 97 98 static cl::opt<bool, true> 99 VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA), 100 cl::Hidden, cl::desc("Enable verification of MemorySSA.")); 101 102 namespace llvm { 103 104 /// An assembly annotator class to print Memory SSA information in 105 /// comments. 106 class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter { 107 friend class MemorySSA; 108 109 const MemorySSA *MSSA; 110 111 public: 112 MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {} 113 114 void emitBasicBlockStartAnnot(const BasicBlock *BB, 115 formatted_raw_ostream &OS) override { 116 if (MemoryAccess *MA = MSSA->getMemoryAccess(BB)) 117 OS << "; " << *MA << "\n"; 118 } 119 120 void emitInstructionAnnot(const Instruction *I, 121 formatted_raw_ostream &OS) override { 122 if (MemoryAccess *MA = MSSA->getMemoryAccess(I)) 123 OS << "; " << *MA << "\n"; 124 } 125 }; 126 127 } // end namespace llvm 128 129 namespace { 130 131 /// Our current alias analysis API differentiates heavily between calls and 132 /// non-calls, and functions called on one usually assert on the other. 133 /// This class encapsulates the distinction to simplify other code that wants 134 /// "Memory affecting instructions and related data" to use as a key. 135 /// For example, this class is used as a densemap key in the use optimizer. 136 class MemoryLocOrCall { 137 public: 138 bool IsCall = false; 139 140 MemoryLocOrCall(MemoryUseOrDef *MUD) 141 : MemoryLocOrCall(MUD->getMemoryInst()) {} 142 MemoryLocOrCall(const MemoryUseOrDef *MUD) 143 : MemoryLocOrCall(MUD->getMemoryInst()) {} 144 145 MemoryLocOrCall(Instruction *Inst) { 146 if (auto *C = dyn_cast<CallBase>(Inst)) { 147 IsCall = true; 148 Call = C; 149 } else { 150 IsCall = false; 151 // There is no such thing as a memorylocation for a fence inst, and it is 152 // unique in that regard. 153 if (!isa<FenceInst>(Inst)) 154 Loc = MemoryLocation::get(Inst); 155 } 156 } 157 158 explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {} 159 160 const CallBase *getCall() const { 161 assert(IsCall); 162 return Call; 163 } 164 165 MemoryLocation getLoc() const { 166 assert(!IsCall); 167 return Loc; 168 } 169 170 bool operator==(const MemoryLocOrCall &Other) const { 171 if (IsCall != Other.IsCall) 172 return false; 173 174 if (!IsCall) 175 return Loc == Other.Loc; 176 177 if (Call->getCalledOperand() != Other.Call->getCalledOperand()) 178 return false; 179 180 return Call->arg_size() == Other.Call->arg_size() && 181 std::equal(Call->arg_begin(), Call->arg_end(), 182 Other.Call->arg_begin()); 183 } 184 185 private: 186 union { 187 const CallBase *Call; 188 MemoryLocation Loc; 189 }; 190 }; 191 192 } // end anonymous namespace 193 194 namespace llvm { 195 196 template <> struct DenseMapInfo<MemoryLocOrCall> { 197 static inline MemoryLocOrCall getEmptyKey() { 198 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey()); 199 } 200 201 static inline MemoryLocOrCall getTombstoneKey() { 202 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey()); 203 } 204 205 static unsigned getHashValue(const MemoryLocOrCall &MLOC) { 206 if (!MLOC.IsCall) 207 return hash_combine( 208 MLOC.IsCall, 209 DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc())); 210 211 hash_code hash = 212 hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue( 213 MLOC.getCall()->getCalledOperand())); 214 215 for (const Value *Arg : MLOC.getCall()->args()) 216 hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg)); 217 return hash; 218 } 219 220 static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) { 221 return LHS == RHS; 222 } 223 }; 224 225 } // end namespace llvm 226 227 /// This does one-way checks to see if Use could theoretically be hoisted above 228 /// MayClobber. This will not check the other way around. 229 /// 230 /// This assumes that, for the purposes of MemorySSA, Use comes directly after 231 /// MayClobber, with no potentially clobbering operations in between them. 232 /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.) 233 static bool areLoadsReorderable(const LoadInst *Use, 234 const LoadInst *MayClobber) { 235 bool VolatileUse = Use->isVolatile(); 236 bool VolatileClobber = MayClobber->isVolatile(); 237 // Volatile operations may never be reordered with other volatile operations. 238 if (VolatileUse && VolatileClobber) 239 return false; 240 // Otherwise, volatile doesn't matter here. From the language reference: 241 // 'optimizers may change the order of volatile operations relative to 242 // non-volatile operations.'" 243 244 // If a load is seq_cst, it cannot be moved above other loads. If its ordering 245 // is weaker, it can be moved above other loads. We just need to be sure that 246 // MayClobber isn't an acquire load, because loads can't be moved above 247 // acquire loads. 248 // 249 // Note that this explicitly *does* allow the free reordering of monotonic (or 250 // weaker) loads of the same address. 251 bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent; 252 bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(), 253 AtomicOrdering::Acquire); 254 return !(SeqCstUse || MayClobberIsAcquire); 255 } 256 257 namespace { 258 259 struct ClobberAlias { 260 bool IsClobber; 261 Optional<AliasResult> AR; 262 }; 263 264 } // end anonymous namespace 265 266 // Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being 267 // ignored if IsClobber = false. 268 template <typename AliasAnalysisType> 269 static ClobberAlias 270 instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc, 271 const Instruction *UseInst, AliasAnalysisType &AA) { 272 Instruction *DefInst = MD->getMemoryInst(); 273 assert(DefInst && "Defining instruction not actually an instruction"); 274 Optional<AliasResult> AR; 275 276 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) { 277 // These intrinsics will show up as affecting memory, but they are just 278 // markers, mostly. 279 // 280 // FIXME: We probably don't actually want MemorySSA to model these at all 281 // (including creating MemoryAccesses for them): we just end up inventing 282 // clobbers where they don't really exist at all. Please see D43269 for 283 // context. 284 switch (II->getIntrinsicID()) { 285 case Intrinsic::invariant_start: 286 case Intrinsic::invariant_end: 287 case Intrinsic::assume: 288 case Intrinsic::experimental_noalias_scope_decl: 289 return {false, NoAlias}; 290 case Intrinsic::dbg_addr: 291 case Intrinsic::dbg_declare: 292 case Intrinsic::dbg_label: 293 case Intrinsic::dbg_value: 294 llvm_unreachable("debuginfo shouldn't have associated defs!"); 295 default: 296 break; 297 } 298 } 299 300 if (auto *CB = dyn_cast_or_null<CallBase>(UseInst)) { 301 ModRefInfo I = AA.getModRefInfo(DefInst, CB); 302 AR = isMustSet(I) ? MustAlias : MayAlias; 303 return {isModOrRefSet(I), AR}; 304 } 305 306 if (auto *DefLoad = dyn_cast<LoadInst>(DefInst)) 307 if (auto *UseLoad = dyn_cast_or_null<LoadInst>(UseInst)) 308 return {!areLoadsReorderable(UseLoad, DefLoad), MayAlias}; 309 310 ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc); 311 AR = isMustSet(I) ? MustAlias : MayAlias; 312 return {isModSet(I), AR}; 313 } 314 315 template <typename AliasAnalysisType> 316 static ClobberAlias instructionClobbersQuery(MemoryDef *MD, 317 const MemoryUseOrDef *MU, 318 const MemoryLocOrCall &UseMLOC, 319 AliasAnalysisType &AA) { 320 // FIXME: This is a temporary hack to allow a single instructionClobbersQuery 321 // to exist while MemoryLocOrCall is pushed through places. 322 if (UseMLOC.IsCall) 323 return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(), 324 AA); 325 return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(), 326 AA); 327 } 328 329 // Return true when MD may alias MU, return false otherwise. 330 bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU, 331 AliasAnalysis &AA) { 332 return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber; 333 } 334 335 namespace { 336 337 struct UpwardsMemoryQuery { 338 // True if our original query started off as a call 339 bool IsCall = false; 340 // The pointer location we started the query with. This will be empty if 341 // IsCall is true. 342 MemoryLocation StartingLoc; 343 // This is the instruction we were querying about. 344 const Instruction *Inst = nullptr; 345 // The MemoryAccess we actually got called with, used to test local domination 346 const MemoryAccess *OriginalAccess = nullptr; 347 Optional<AliasResult> AR = MayAlias; 348 bool SkipSelfAccess = false; 349 350 UpwardsMemoryQuery() = default; 351 352 UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access) 353 : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) { 354 if (!IsCall) 355 StartingLoc = MemoryLocation::get(Inst); 356 } 357 }; 358 359 } // end anonymous namespace 360 361 template <typename AliasAnalysisType> 362 static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA, 363 const Instruction *I) { 364 // If the memory can't be changed, then loads of the memory can't be 365 // clobbered. 366 if (auto *LI = dyn_cast<LoadInst>(I)) 367 return I->hasMetadata(LLVMContext::MD_invariant_load) || 368 AA.pointsToConstantMemory(MemoryLocation::get(LI)); 369 return false; 370 } 371 372 /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing 373 /// inbetween `Start` and `ClobberAt` can clobbers `Start`. 374 /// 375 /// This is meant to be as simple and self-contained as possible. Because it 376 /// uses no cache, etc., it can be relatively expensive. 377 /// 378 /// \param Start The MemoryAccess that we want to walk from. 379 /// \param ClobberAt A clobber for Start. 380 /// \param StartLoc The MemoryLocation for Start. 381 /// \param MSSA The MemorySSA instance that Start and ClobberAt belong to. 382 /// \param Query The UpwardsMemoryQuery we used for our search. 383 /// \param AA The AliasAnalysis we used for our search. 384 /// \param AllowImpreciseClobber Always false, unless we do relaxed verify. 385 386 template <typename AliasAnalysisType> 387 LLVM_ATTRIBUTE_UNUSED static void 388 checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt, 389 const MemoryLocation &StartLoc, const MemorySSA &MSSA, 390 const UpwardsMemoryQuery &Query, AliasAnalysisType &AA, 391 bool AllowImpreciseClobber = false) { 392 assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?"); 393 394 if (MSSA.isLiveOnEntryDef(Start)) { 395 assert(MSSA.isLiveOnEntryDef(ClobberAt) && 396 "liveOnEntry must clobber itself"); 397 return; 398 } 399 400 bool FoundClobber = false; 401 DenseSet<ConstMemoryAccessPair> VisitedPhis; 402 SmallVector<ConstMemoryAccessPair, 8> Worklist; 403 Worklist.emplace_back(Start, StartLoc); 404 // Walk all paths from Start to ClobberAt, while looking for clobbers. If one 405 // is found, complain. 406 while (!Worklist.empty()) { 407 auto MAP = Worklist.pop_back_val(); 408 // All we care about is that nothing from Start to ClobberAt clobbers Start. 409 // We learn nothing from revisiting nodes. 410 if (!VisitedPhis.insert(MAP).second) 411 continue; 412 413 for (const auto *MA : def_chain(MAP.first)) { 414 if (MA == ClobberAt) { 415 if (const auto *MD = dyn_cast<MemoryDef>(MA)) { 416 // instructionClobbersQuery isn't essentially free, so don't use `|=`, 417 // since it won't let us short-circuit. 418 // 419 // Also, note that this can't be hoisted out of the `Worklist` loop, 420 // since MD may only act as a clobber for 1 of N MemoryLocations. 421 FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD); 422 if (!FoundClobber) { 423 ClobberAlias CA = 424 instructionClobbersQuery(MD, MAP.second, Query.Inst, AA); 425 if (CA.IsClobber) { 426 FoundClobber = true; 427 // Not used: CA.AR; 428 } 429 } 430 } 431 break; 432 } 433 434 // We should never hit liveOnEntry, unless it's the clobber. 435 assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?"); 436 437 if (const auto *MD = dyn_cast<MemoryDef>(MA)) { 438 // If Start is a Def, skip self. 439 if (MD == Start) 440 continue; 441 442 assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA) 443 .IsClobber && 444 "Found clobber before reaching ClobberAt!"); 445 continue; 446 } 447 448 if (const auto *MU = dyn_cast<MemoryUse>(MA)) { 449 (void)MU; 450 assert (MU == Start && 451 "Can only find use in def chain if Start is a use"); 452 continue; 453 } 454 455 assert(isa<MemoryPhi>(MA)); 456 457 // Add reachable phi predecessors 458 for (auto ItB = upward_defs_begin( 459 {const_cast<MemoryAccess *>(MA), MAP.second}, 460 MSSA.getDomTree()), 461 ItE = upward_defs_end(); 462 ItB != ItE; ++ItB) 463 if (MSSA.getDomTree().isReachableFromEntry(ItB.getPhiArgBlock())) 464 Worklist.emplace_back(*ItB); 465 } 466 } 467 468 // If the verify is done following an optimization, it's possible that 469 // ClobberAt was a conservative clobbering, that we can now infer is not a 470 // true clobbering access. Don't fail the verify if that's the case. 471 // We do have accesses that claim they're optimized, but could be optimized 472 // further. Updating all these can be expensive, so allow it for now (FIXME). 473 if (AllowImpreciseClobber) 474 return; 475 476 // If ClobberAt is a MemoryPhi, we can assume something above it acted as a 477 // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point. 478 assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) && 479 "ClobberAt never acted as a clobber"); 480 } 481 482 namespace { 483 484 /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up 485 /// in one class. 486 template <class AliasAnalysisType> class ClobberWalker { 487 /// Save a few bytes by using unsigned instead of size_t. 488 using ListIndex = unsigned; 489 490 /// Represents a span of contiguous MemoryDefs, potentially ending in a 491 /// MemoryPhi. 492 struct DefPath { 493 MemoryLocation Loc; 494 // Note that, because we always walk in reverse, Last will always dominate 495 // First. Also note that First and Last are inclusive. 496 MemoryAccess *First; 497 MemoryAccess *Last; 498 Optional<ListIndex> Previous; 499 500 DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last, 501 Optional<ListIndex> Previous) 502 : Loc(Loc), First(First), Last(Last), Previous(Previous) {} 503 504 DefPath(const MemoryLocation &Loc, MemoryAccess *Init, 505 Optional<ListIndex> Previous) 506 : DefPath(Loc, Init, Init, Previous) {} 507 }; 508 509 const MemorySSA &MSSA; 510 AliasAnalysisType &AA; 511 DominatorTree &DT; 512 UpwardsMemoryQuery *Query; 513 unsigned *UpwardWalkLimit; 514 515 // Phi optimization bookkeeping: 516 // List of DefPath to process during the current phi optimization walk. 517 SmallVector<DefPath, 32> Paths; 518 // List of visited <Access, Location> pairs; we can skip paths already 519 // visited with the same memory location. 520 DenseSet<ConstMemoryAccessPair> VisitedPhis; 521 // Record if phi translation has been performed during the current phi 522 // optimization walk, as merging alias results after phi translation can 523 // yield incorrect results. Context in PR46156. 524 bool PerformedPhiTranslation = false; 525 526 /// Find the nearest def or phi that `From` can legally be optimized to. 527 const MemoryAccess *getWalkTarget(const MemoryPhi *From) const { 528 assert(From->getNumOperands() && "Phi with no operands?"); 529 530 BasicBlock *BB = From->getBlock(); 531 MemoryAccess *Result = MSSA.getLiveOnEntryDef(); 532 DomTreeNode *Node = DT.getNode(BB); 533 while ((Node = Node->getIDom())) { 534 auto *Defs = MSSA.getBlockDefs(Node->getBlock()); 535 if (Defs) 536 return &*Defs->rbegin(); 537 } 538 return Result; 539 } 540 541 /// Result of calling walkToPhiOrClobber. 542 struct UpwardsWalkResult { 543 /// The "Result" of the walk. Either a clobber, the last thing we walked, or 544 /// both. Include alias info when clobber found. 545 MemoryAccess *Result; 546 bool IsKnownClobber; 547 Optional<AliasResult> AR; 548 }; 549 550 /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last. 551 /// This will update Desc.Last as it walks. It will (optionally) also stop at 552 /// StopAt. 553 /// 554 /// This does not test for whether StopAt is a clobber 555 UpwardsWalkResult 556 walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr, 557 const MemoryAccess *SkipStopAt = nullptr) const { 558 assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world"); 559 assert(UpwardWalkLimit && "Need a valid walk limit"); 560 bool LimitAlreadyReached = false; 561 // (*UpwardWalkLimit) may be 0 here, due to the loop in tryOptimizePhi. Set 562 // it to 1. This will not do any alias() calls. It either returns in the 563 // first iteration in the loop below, or is set back to 0 if all def chains 564 // are free of MemoryDefs. 565 if (!*UpwardWalkLimit) { 566 *UpwardWalkLimit = 1; 567 LimitAlreadyReached = true; 568 } 569 570 for (MemoryAccess *Current : def_chain(Desc.Last)) { 571 Desc.Last = Current; 572 if (Current == StopAt || Current == SkipStopAt) 573 return {Current, false, MayAlias}; 574 575 if (auto *MD = dyn_cast<MemoryDef>(Current)) { 576 if (MSSA.isLiveOnEntryDef(MD)) 577 return {MD, true, MustAlias}; 578 579 if (!--*UpwardWalkLimit) 580 return {Current, true, MayAlias}; 581 582 ClobberAlias CA = 583 instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA); 584 if (CA.IsClobber) 585 return {MD, true, CA.AR}; 586 } 587 } 588 589 if (LimitAlreadyReached) 590 *UpwardWalkLimit = 0; 591 592 assert(isa<MemoryPhi>(Desc.Last) && 593 "Ended at a non-clobber that's not a phi?"); 594 return {Desc.Last, false, MayAlias}; 595 } 596 597 void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches, 598 ListIndex PriorNode) { 599 auto UpwardDefsBegin = upward_defs_begin({Phi, Paths[PriorNode].Loc}, DT, 600 &PerformedPhiTranslation); 601 auto UpwardDefs = make_range(UpwardDefsBegin, upward_defs_end()); 602 for (const MemoryAccessPair &P : UpwardDefs) { 603 PausedSearches.push_back(Paths.size()); 604 Paths.emplace_back(P.second, P.first, PriorNode); 605 } 606 } 607 608 /// Represents a search that terminated after finding a clobber. This clobber 609 /// may or may not be present in the path of defs from LastNode..SearchStart, 610 /// since it may have been retrieved from cache. 611 struct TerminatedPath { 612 MemoryAccess *Clobber; 613 ListIndex LastNode; 614 }; 615 616 /// Get an access that keeps us from optimizing to the given phi. 617 /// 618 /// PausedSearches is an array of indices into the Paths array. Its incoming 619 /// value is the indices of searches that stopped at the last phi optimization 620 /// target. It's left in an unspecified state. 621 /// 622 /// If this returns None, NewPaused is a vector of searches that terminated 623 /// at StopWhere. Otherwise, NewPaused is left in an unspecified state. 624 Optional<TerminatedPath> 625 getBlockingAccess(const MemoryAccess *StopWhere, 626 SmallVectorImpl<ListIndex> &PausedSearches, 627 SmallVectorImpl<ListIndex> &NewPaused, 628 SmallVectorImpl<TerminatedPath> &Terminated) { 629 assert(!PausedSearches.empty() && "No searches to continue?"); 630 631 // BFS vs DFS really doesn't make a difference here, so just do a DFS with 632 // PausedSearches as our stack. 633 while (!PausedSearches.empty()) { 634 ListIndex PathIndex = PausedSearches.pop_back_val(); 635 DefPath &Node = Paths[PathIndex]; 636 637 // If we've already visited this path with this MemoryLocation, we don't 638 // need to do so again. 639 // 640 // NOTE: That we just drop these paths on the ground makes caching 641 // behavior sporadic. e.g. given a diamond: 642 // A 643 // B C 644 // D 645 // 646 // ...If we walk D, B, A, C, we'll only cache the result of phi 647 // optimization for A, B, and D; C will be skipped because it dies here. 648 // This arguably isn't the worst thing ever, since: 649 // - We generally query things in a top-down order, so if we got below D 650 // without needing cache entries for {C, MemLoc}, then chances are 651 // that those cache entries would end up ultimately unused. 652 // - We still cache things for A, so C only needs to walk up a bit. 653 // If this behavior becomes problematic, we can fix without a ton of extra 654 // work. 655 if (!VisitedPhis.insert({Node.Last, Node.Loc}).second) { 656 if (PerformedPhiTranslation) { 657 // If visiting this path performed Phi translation, don't continue, 658 // since it may not be correct to merge results from two paths if one 659 // relies on the phi translation. 660 TerminatedPath Term{Node.Last, PathIndex}; 661 return Term; 662 } 663 continue; 664 } 665 666 const MemoryAccess *SkipStopWhere = nullptr; 667 if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) { 668 assert(isa<MemoryDef>(Query->OriginalAccess)); 669 SkipStopWhere = Query->OriginalAccess; 670 } 671 672 UpwardsWalkResult Res = walkToPhiOrClobber(Node, 673 /*StopAt=*/StopWhere, 674 /*SkipStopAt=*/SkipStopWhere); 675 if (Res.IsKnownClobber) { 676 assert(Res.Result != StopWhere && Res.Result != SkipStopWhere); 677 678 // If this wasn't a cache hit, we hit a clobber when walking. That's a 679 // failure. 680 TerminatedPath Term{Res.Result, PathIndex}; 681 if (!MSSA.dominates(Res.Result, StopWhere)) 682 return Term; 683 684 // Otherwise, it's a valid thing to potentially optimize to. 685 Terminated.push_back(Term); 686 continue; 687 } 688 689 if (Res.Result == StopWhere || Res.Result == SkipStopWhere) { 690 // We've hit our target. Save this path off for if we want to continue 691 // walking. If we are in the mode of skipping the OriginalAccess, and 692 // we've reached back to the OriginalAccess, do not save path, we've 693 // just looped back to self. 694 if (Res.Result != SkipStopWhere) 695 NewPaused.push_back(PathIndex); 696 continue; 697 } 698 699 assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber"); 700 addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex); 701 } 702 703 return None; 704 } 705 706 template <typename T, typename Walker> 707 struct generic_def_path_iterator 708 : public iterator_facade_base<generic_def_path_iterator<T, Walker>, 709 std::forward_iterator_tag, T *> { 710 generic_def_path_iterator() {} 711 generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {} 712 713 T &operator*() const { return curNode(); } 714 715 generic_def_path_iterator &operator++() { 716 N = curNode().Previous; 717 return *this; 718 } 719 720 bool operator==(const generic_def_path_iterator &O) const { 721 if (N.hasValue() != O.N.hasValue()) 722 return false; 723 return !N.hasValue() || *N == *O.N; 724 } 725 726 private: 727 T &curNode() const { return W->Paths[*N]; } 728 729 Walker *W = nullptr; 730 Optional<ListIndex> N = None; 731 }; 732 733 using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>; 734 using const_def_path_iterator = 735 generic_def_path_iterator<const DefPath, const ClobberWalker>; 736 737 iterator_range<def_path_iterator> def_path(ListIndex From) { 738 return make_range(def_path_iterator(this, From), def_path_iterator()); 739 } 740 741 iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const { 742 return make_range(const_def_path_iterator(this, From), 743 const_def_path_iterator()); 744 } 745 746 struct OptznResult { 747 /// The path that contains our result. 748 TerminatedPath PrimaryClobber; 749 /// The paths that we can legally cache back from, but that aren't 750 /// necessarily the result of the Phi optimization. 751 SmallVector<TerminatedPath, 4> OtherClobbers; 752 }; 753 754 ListIndex defPathIndex(const DefPath &N) const { 755 // The assert looks nicer if we don't need to do &N 756 const DefPath *NP = &N; 757 assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() && 758 "Out of bounds DefPath!"); 759 return NP - &Paths.front(); 760 } 761 762 /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths 763 /// that act as legal clobbers. Note that this won't return *all* clobbers. 764 /// 765 /// Phi optimization algorithm tl;dr: 766 /// - Find the earliest def/phi, A, we can optimize to 767 /// - Find if all paths from the starting memory access ultimately reach A 768 /// - If not, optimization isn't possible. 769 /// - Otherwise, walk from A to another clobber or phi, A'. 770 /// - If A' is a def, we're done. 771 /// - If A' is a phi, try to optimize it. 772 /// 773 /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path 774 /// terminates when a MemoryAccess that clobbers said MemoryLocation is found. 775 OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start, 776 const MemoryLocation &Loc) { 777 assert(Paths.empty() && VisitedPhis.empty() && !PerformedPhiTranslation && 778 "Reset the optimization state."); 779 780 Paths.emplace_back(Loc, Start, Phi, None); 781 // Stores how many "valid" optimization nodes we had prior to calling 782 // addSearches/getBlockingAccess. Necessary for caching if we had a blocker. 783 auto PriorPathsSize = Paths.size(); 784 785 SmallVector<ListIndex, 16> PausedSearches; 786 SmallVector<ListIndex, 8> NewPaused; 787 SmallVector<TerminatedPath, 4> TerminatedPaths; 788 789 addSearches(Phi, PausedSearches, 0); 790 791 // Moves the TerminatedPath with the "most dominated" Clobber to the end of 792 // Paths. 793 auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) { 794 assert(!Paths.empty() && "Need a path to move"); 795 auto Dom = Paths.begin(); 796 for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I) 797 if (!MSSA.dominates(I->Clobber, Dom->Clobber)) 798 Dom = I; 799 auto Last = Paths.end() - 1; 800 if (Last != Dom) 801 std::iter_swap(Last, Dom); 802 }; 803 804 MemoryPhi *Current = Phi; 805 while (true) { 806 assert(!MSSA.isLiveOnEntryDef(Current) && 807 "liveOnEntry wasn't treated as a clobber?"); 808 809 const auto *Target = getWalkTarget(Current); 810 // If a TerminatedPath doesn't dominate Target, then it wasn't a legal 811 // optimization for the prior phi. 812 assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) { 813 return MSSA.dominates(P.Clobber, Target); 814 })); 815 816 // FIXME: This is broken, because the Blocker may be reported to be 817 // liveOnEntry, and we'll happily wait for that to disappear (read: never) 818 // For the moment, this is fine, since we do nothing with blocker info. 819 if (Optional<TerminatedPath> Blocker = getBlockingAccess( 820 Target, PausedSearches, NewPaused, TerminatedPaths)) { 821 822 // Find the node we started at. We can't search based on N->Last, since 823 // we may have gone around a loop with a different MemoryLocation. 824 auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) { 825 return defPathIndex(N) < PriorPathsSize; 826 }); 827 assert(Iter != def_path_iterator()); 828 829 DefPath &CurNode = *Iter; 830 assert(CurNode.Last == Current); 831 832 // Two things: 833 // A. We can't reliably cache all of NewPaused back. Consider a case 834 // where we have two paths in NewPaused; one of which can't optimize 835 // above this phi, whereas the other can. If we cache the second path 836 // back, we'll end up with suboptimal cache entries. We can handle 837 // cases like this a bit better when we either try to find all 838 // clobbers that block phi optimization, or when our cache starts 839 // supporting unfinished searches. 840 // B. We can't reliably cache TerminatedPaths back here without doing 841 // extra checks; consider a case like: 842 // T 843 // / \ 844 // D C 845 // \ / 846 // S 847 // Where T is our target, C is a node with a clobber on it, D is a 848 // diamond (with a clobber *only* on the left or right node, N), and 849 // S is our start. Say we walk to D, through the node opposite N 850 // (read: ignoring the clobber), and see a cache entry in the top 851 // node of D. That cache entry gets put into TerminatedPaths. We then 852 // walk up to C (N is later in our worklist), find the clobber, and 853 // quit. If we append TerminatedPaths to OtherClobbers, we'll cache 854 // the bottom part of D to the cached clobber, ignoring the clobber 855 // in N. Again, this problem goes away if we start tracking all 856 // blockers for a given phi optimization. 857 TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)}; 858 return {Result, {}}; 859 } 860 861 // If there's nothing left to search, then all paths led to valid clobbers 862 // that we got from our cache; pick the nearest to the start, and allow 863 // the rest to be cached back. 864 if (NewPaused.empty()) { 865 MoveDominatedPathToEnd(TerminatedPaths); 866 TerminatedPath Result = TerminatedPaths.pop_back_val(); 867 return {Result, std::move(TerminatedPaths)}; 868 } 869 870 MemoryAccess *DefChainEnd = nullptr; 871 SmallVector<TerminatedPath, 4> Clobbers; 872 for (ListIndex Paused : NewPaused) { 873 UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]); 874 if (WR.IsKnownClobber) 875 Clobbers.push_back({WR.Result, Paused}); 876 else 877 // Micro-opt: If we hit the end of the chain, save it. 878 DefChainEnd = WR.Result; 879 } 880 881 if (!TerminatedPaths.empty()) { 882 // If we couldn't find the dominating phi/liveOnEntry in the above loop, 883 // do it now. 884 if (!DefChainEnd) 885 for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target))) 886 DefChainEnd = MA; 887 assert(DefChainEnd && "Failed to find dominating phi/liveOnEntry"); 888 889 // If any of the terminated paths don't dominate the phi we'll try to 890 // optimize, we need to figure out what they are and quit. 891 const BasicBlock *ChainBB = DefChainEnd->getBlock(); 892 for (const TerminatedPath &TP : TerminatedPaths) { 893 // Because we know that DefChainEnd is as "high" as we can go, we 894 // don't need local dominance checks; BB dominance is sufficient. 895 if (DT.dominates(ChainBB, TP.Clobber->getBlock())) 896 Clobbers.push_back(TP); 897 } 898 } 899 900 // If we have clobbers in the def chain, find the one closest to Current 901 // and quit. 902 if (!Clobbers.empty()) { 903 MoveDominatedPathToEnd(Clobbers); 904 TerminatedPath Result = Clobbers.pop_back_val(); 905 return {Result, std::move(Clobbers)}; 906 } 907 908 assert(all_of(NewPaused, 909 [&](ListIndex I) { return Paths[I].Last == DefChainEnd; })); 910 911 // Because liveOnEntry is a clobber, this must be a phi. 912 auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd); 913 914 PriorPathsSize = Paths.size(); 915 PausedSearches.clear(); 916 for (ListIndex I : NewPaused) 917 addSearches(DefChainPhi, PausedSearches, I); 918 NewPaused.clear(); 919 920 Current = DefChainPhi; 921 } 922 } 923 924 void verifyOptResult(const OptznResult &R) const { 925 assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) { 926 return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber); 927 })); 928 } 929 930 void resetPhiOptznState() { 931 Paths.clear(); 932 VisitedPhis.clear(); 933 PerformedPhiTranslation = false; 934 } 935 936 public: 937 ClobberWalker(const MemorySSA &MSSA, AliasAnalysisType &AA, DominatorTree &DT) 938 : MSSA(MSSA), AA(AA), DT(DT) {} 939 940 AliasAnalysisType *getAA() { return &AA; } 941 /// Finds the nearest clobber for the given query, optimizing phis if 942 /// possible. 943 MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q, 944 unsigned &UpWalkLimit) { 945 Query = &Q; 946 UpwardWalkLimit = &UpWalkLimit; 947 // Starting limit must be > 0. 948 if (!UpWalkLimit) 949 UpWalkLimit++; 950 951 MemoryAccess *Current = Start; 952 // This walker pretends uses don't exist. If we're handed one, silently grab 953 // its def. (This has the nice side-effect of ensuring we never cache uses) 954 if (auto *MU = dyn_cast<MemoryUse>(Start)) 955 Current = MU->getDefiningAccess(); 956 957 DefPath FirstDesc(Q.StartingLoc, Current, Current, None); 958 // Fast path for the overly-common case (no crazy phi optimization 959 // necessary) 960 UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc); 961 MemoryAccess *Result; 962 if (WalkResult.IsKnownClobber) { 963 Result = WalkResult.Result; 964 Q.AR = WalkResult.AR; 965 } else { 966 OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last), 967 Current, Q.StartingLoc); 968 verifyOptResult(OptRes); 969 resetPhiOptznState(); 970 Result = OptRes.PrimaryClobber.Clobber; 971 } 972 973 #ifdef EXPENSIVE_CHECKS 974 if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0) 975 checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA); 976 #endif 977 return Result; 978 } 979 }; 980 981 struct RenamePassData { 982 DomTreeNode *DTN; 983 DomTreeNode::const_iterator ChildIt; 984 MemoryAccess *IncomingVal; 985 986 RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It, 987 MemoryAccess *M) 988 : DTN(D), ChildIt(It), IncomingVal(M) {} 989 990 void swap(RenamePassData &RHS) { 991 std::swap(DTN, RHS.DTN); 992 std::swap(ChildIt, RHS.ChildIt); 993 std::swap(IncomingVal, RHS.IncomingVal); 994 } 995 }; 996 997 } // end anonymous namespace 998 999 namespace llvm { 1000 1001 template <class AliasAnalysisType> class MemorySSA::ClobberWalkerBase { 1002 ClobberWalker<AliasAnalysisType> Walker; 1003 MemorySSA *MSSA; 1004 1005 public: 1006 ClobberWalkerBase(MemorySSA *M, AliasAnalysisType *A, DominatorTree *D) 1007 : Walker(*M, *A, *D), MSSA(M) {} 1008 1009 MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, 1010 const MemoryLocation &, 1011 unsigned &); 1012 // Third argument (bool), defines whether the clobber search should skip the 1013 // original queried access. If true, there will be a follow-up query searching 1014 // for a clobber access past "self". Note that the Optimized access is not 1015 // updated if a new clobber is found by this SkipSelf search. If this 1016 // additional query becomes heavily used we may decide to cache the result. 1017 // Walker instantiations will decide how to set the SkipSelf bool. 1018 MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, unsigned &, bool); 1019 }; 1020 1021 /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no 1022 /// longer does caching on its own, but the name has been retained for the 1023 /// moment. 1024 template <class AliasAnalysisType> 1025 class MemorySSA::CachingWalker final : public MemorySSAWalker { 1026 ClobberWalkerBase<AliasAnalysisType> *Walker; 1027 1028 public: 1029 CachingWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W) 1030 : MemorySSAWalker(M), Walker(W) {} 1031 ~CachingWalker() override = default; 1032 1033 using MemorySSAWalker::getClobberingMemoryAccess; 1034 1035 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) { 1036 return Walker->getClobberingMemoryAccessBase(MA, UWL, false); 1037 } 1038 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, 1039 const MemoryLocation &Loc, 1040 unsigned &UWL) { 1041 return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL); 1042 } 1043 1044 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override { 1045 unsigned UpwardWalkLimit = MaxCheckLimit; 1046 return getClobberingMemoryAccess(MA, UpwardWalkLimit); 1047 } 1048 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, 1049 const MemoryLocation &Loc) override { 1050 unsigned UpwardWalkLimit = MaxCheckLimit; 1051 return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit); 1052 } 1053 1054 void invalidateInfo(MemoryAccess *MA) override { 1055 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) 1056 MUD->resetOptimized(); 1057 } 1058 }; 1059 1060 template <class AliasAnalysisType> 1061 class MemorySSA::SkipSelfWalker final : public MemorySSAWalker { 1062 ClobberWalkerBase<AliasAnalysisType> *Walker; 1063 1064 public: 1065 SkipSelfWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W) 1066 : MemorySSAWalker(M), Walker(W) {} 1067 ~SkipSelfWalker() override = default; 1068 1069 using MemorySSAWalker::getClobberingMemoryAccess; 1070 1071 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) { 1072 return Walker->getClobberingMemoryAccessBase(MA, UWL, true); 1073 } 1074 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, 1075 const MemoryLocation &Loc, 1076 unsigned &UWL) { 1077 return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL); 1078 } 1079 1080 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override { 1081 unsigned UpwardWalkLimit = MaxCheckLimit; 1082 return getClobberingMemoryAccess(MA, UpwardWalkLimit); 1083 } 1084 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, 1085 const MemoryLocation &Loc) override { 1086 unsigned UpwardWalkLimit = MaxCheckLimit; 1087 return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit); 1088 } 1089 1090 void invalidateInfo(MemoryAccess *MA) override { 1091 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) 1092 MUD->resetOptimized(); 1093 } 1094 }; 1095 1096 } // end namespace llvm 1097 1098 void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal, 1099 bool RenameAllUses) { 1100 // Pass through values to our successors 1101 for (const BasicBlock *S : successors(BB)) { 1102 auto It = PerBlockAccesses.find(S); 1103 // Rename the phi nodes in our successor block 1104 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) 1105 continue; 1106 AccessList *Accesses = It->second.get(); 1107 auto *Phi = cast<MemoryPhi>(&Accesses->front()); 1108 if (RenameAllUses) { 1109 bool ReplacementDone = false; 1110 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) 1111 if (Phi->getIncomingBlock(I) == BB) { 1112 Phi->setIncomingValue(I, IncomingVal); 1113 ReplacementDone = true; 1114 } 1115 (void) ReplacementDone; 1116 assert(ReplacementDone && "Incomplete phi during partial rename"); 1117 } else 1118 Phi->addIncoming(IncomingVal, BB); 1119 } 1120 } 1121 1122 /// Rename a single basic block into MemorySSA form. 1123 /// Uses the standard SSA renaming algorithm. 1124 /// \returns The new incoming value. 1125 MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal, 1126 bool RenameAllUses) { 1127 auto It = PerBlockAccesses.find(BB); 1128 // Skip most processing if the list is empty. 1129 if (It != PerBlockAccesses.end()) { 1130 AccessList *Accesses = It->second.get(); 1131 for (MemoryAccess &L : *Accesses) { 1132 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) { 1133 if (MUD->getDefiningAccess() == nullptr || RenameAllUses) 1134 MUD->setDefiningAccess(IncomingVal); 1135 if (isa<MemoryDef>(&L)) 1136 IncomingVal = &L; 1137 } else { 1138 IncomingVal = &L; 1139 } 1140 } 1141 } 1142 return IncomingVal; 1143 } 1144 1145 /// This is the standard SSA renaming algorithm. 1146 /// 1147 /// We walk the dominator tree in preorder, renaming accesses, and then filling 1148 /// in phi nodes in our successors. 1149 void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal, 1150 SmallPtrSetImpl<BasicBlock *> &Visited, 1151 bool SkipVisited, bool RenameAllUses) { 1152 assert(Root && "Trying to rename accesses in an unreachable block"); 1153 1154 SmallVector<RenamePassData, 32> WorkStack; 1155 // Skip everything if we already renamed this block and we are skipping. 1156 // Note: You can't sink this into the if, because we need it to occur 1157 // regardless of whether we skip blocks or not. 1158 bool AlreadyVisited = !Visited.insert(Root->getBlock()).second; 1159 if (SkipVisited && AlreadyVisited) 1160 return; 1161 1162 IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses); 1163 renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses); 1164 WorkStack.push_back({Root, Root->begin(), IncomingVal}); 1165 1166 while (!WorkStack.empty()) { 1167 DomTreeNode *Node = WorkStack.back().DTN; 1168 DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt; 1169 IncomingVal = WorkStack.back().IncomingVal; 1170 1171 if (ChildIt == Node->end()) { 1172 WorkStack.pop_back(); 1173 } else { 1174 DomTreeNode *Child = *ChildIt; 1175 ++WorkStack.back().ChildIt; 1176 BasicBlock *BB = Child->getBlock(); 1177 // Note: You can't sink this into the if, because we need it to occur 1178 // regardless of whether we skip blocks or not. 1179 AlreadyVisited = !Visited.insert(BB).second; 1180 if (SkipVisited && AlreadyVisited) { 1181 // We already visited this during our renaming, which can happen when 1182 // being asked to rename multiple blocks. Figure out the incoming val, 1183 // which is the last def. 1184 // Incoming value can only change if there is a block def, and in that 1185 // case, it's the last block def in the list. 1186 if (auto *BlockDefs = getWritableBlockDefs(BB)) 1187 IncomingVal = &*BlockDefs->rbegin(); 1188 } else 1189 IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses); 1190 renameSuccessorPhis(BB, IncomingVal, RenameAllUses); 1191 WorkStack.push_back({Child, Child->begin(), IncomingVal}); 1192 } 1193 } 1194 } 1195 1196 /// This handles unreachable block accesses by deleting phi nodes in 1197 /// unreachable blocks, and marking all other unreachable MemoryAccess's as 1198 /// being uses of the live on entry definition. 1199 void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) { 1200 assert(!DT->isReachableFromEntry(BB) && 1201 "Reachable block found while handling unreachable blocks"); 1202 1203 // Make sure phi nodes in our reachable successors end up with a 1204 // LiveOnEntryDef for our incoming edge, even though our block is forward 1205 // unreachable. We could just disconnect these blocks from the CFG fully, 1206 // but we do not right now. 1207 for (const BasicBlock *S : successors(BB)) { 1208 if (!DT->isReachableFromEntry(S)) 1209 continue; 1210 auto It = PerBlockAccesses.find(S); 1211 // Rename the phi nodes in our successor block 1212 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front())) 1213 continue; 1214 AccessList *Accesses = It->second.get(); 1215 auto *Phi = cast<MemoryPhi>(&Accesses->front()); 1216 Phi->addIncoming(LiveOnEntryDef.get(), BB); 1217 } 1218 1219 auto It = PerBlockAccesses.find(BB); 1220 if (It == PerBlockAccesses.end()) 1221 return; 1222 1223 auto &Accesses = It->second; 1224 for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) { 1225 auto Next = std::next(AI); 1226 // If we have a phi, just remove it. We are going to replace all 1227 // users with live on entry. 1228 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI)) 1229 UseOrDef->setDefiningAccess(LiveOnEntryDef.get()); 1230 else 1231 Accesses->erase(AI); 1232 AI = Next; 1233 } 1234 } 1235 1236 MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT) 1237 : AA(nullptr), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr), 1238 SkipWalker(nullptr), NextID(0) { 1239 // Build MemorySSA using a batch alias analysis. This reuses the internal 1240 // state that AA collects during an alias()/getModRefInfo() call. This is 1241 // safe because there are no CFG changes while building MemorySSA and can 1242 // significantly reduce the time spent by the compiler in AA, because we will 1243 // make queries about all the instructions in the Function. 1244 assert(AA && "No alias analysis?"); 1245 BatchAAResults BatchAA(*AA); 1246 buildMemorySSA(BatchAA); 1247 // Intentionally leave AA to nullptr while building so we don't accidently 1248 // use non-batch AliasAnalysis. 1249 this->AA = AA; 1250 // Also create the walker here. 1251 getWalker(); 1252 } 1253 1254 MemorySSA::~MemorySSA() { 1255 // Drop all our references 1256 for (const auto &Pair : PerBlockAccesses) 1257 for (MemoryAccess &MA : *Pair.second) 1258 MA.dropAllReferences(); 1259 } 1260 1261 MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) { 1262 auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr)); 1263 1264 if (Res.second) 1265 Res.first->second = std::make_unique<AccessList>(); 1266 return Res.first->second.get(); 1267 } 1268 1269 MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) { 1270 auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr)); 1271 1272 if (Res.second) 1273 Res.first->second = std::make_unique<DefsList>(); 1274 return Res.first->second.get(); 1275 } 1276 1277 namespace llvm { 1278 1279 /// This class is a batch walker of all MemoryUse's in the program, and points 1280 /// their defining access at the thing that actually clobbers them. Because it 1281 /// is a batch walker that touches everything, it does not operate like the 1282 /// other walkers. This walker is basically performing a top-down SSA renaming 1283 /// pass, where the version stack is used as the cache. This enables it to be 1284 /// significantly more time and memory efficient than using the regular walker, 1285 /// which is walking bottom-up. 1286 class MemorySSA::OptimizeUses { 1287 public: 1288 OptimizeUses(MemorySSA *MSSA, CachingWalker<BatchAAResults> *Walker, 1289 BatchAAResults *BAA, DominatorTree *DT) 1290 : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {} 1291 1292 void optimizeUses(); 1293 1294 private: 1295 /// This represents where a given memorylocation is in the stack. 1296 struct MemlocStackInfo { 1297 // This essentially is keeping track of versions of the stack. Whenever 1298 // the stack changes due to pushes or pops, these versions increase. 1299 unsigned long StackEpoch; 1300 unsigned long PopEpoch; 1301 // This is the lower bound of places on the stack to check. It is equal to 1302 // the place the last stack walk ended. 1303 // Note: Correctness depends on this being initialized to 0, which densemap 1304 // does 1305 unsigned long LowerBound; 1306 const BasicBlock *LowerBoundBlock; 1307 // This is where the last walk for this memory location ended. 1308 unsigned long LastKill; 1309 bool LastKillValid; 1310 Optional<AliasResult> AR; 1311 }; 1312 1313 void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &, 1314 SmallVectorImpl<MemoryAccess *> &, 1315 DenseMap<MemoryLocOrCall, MemlocStackInfo> &); 1316 1317 MemorySSA *MSSA; 1318 CachingWalker<BatchAAResults> *Walker; 1319 BatchAAResults *AA; 1320 DominatorTree *DT; 1321 }; 1322 1323 } // end namespace llvm 1324 1325 /// Optimize the uses in a given block This is basically the SSA renaming 1326 /// algorithm, with one caveat: We are able to use a single stack for all 1327 /// MemoryUses. This is because the set of *possible* reaching MemoryDefs is 1328 /// the same for every MemoryUse. The *actual* clobbering MemoryDef is just 1329 /// going to be some position in that stack of possible ones. 1330 /// 1331 /// We track the stack positions that each MemoryLocation needs 1332 /// to check, and last ended at. This is because we only want to check the 1333 /// things that changed since last time. The same MemoryLocation should 1334 /// get clobbered by the same store (getModRefInfo does not use invariantness or 1335 /// things like this, and if they start, we can modify MemoryLocOrCall to 1336 /// include relevant data) 1337 void MemorySSA::OptimizeUses::optimizeUsesInBlock( 1338 const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch, 1339 SmallVectorImpl<MemoryAccess *> &VersionStack, 1340 DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) { 1341 1342 /// If no accesses, nothing to do. 1343 MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB); 1344 if (Accesses == nullptr) 1345 return; 1346 1347 // Pop everything that doesn't dominate the current block off the stack, 1348 // increment the PopEpoch to account for this. 1349 while (true) { 1350 assert( 1351 !VersionStack.empty() && 1352 "Version stack should have liveOnEntry sentinel dominating everything"); 1353 BasicBlock *BackBlock = VersionStack.back()->getBlock(); 1354 if (DT->dominates(BackBlock, BB)) 1355 break; 1356 while (VersionStack.back()->getBlock() == BackBlock) 1357 VersionStack.pop_back(); 1358 ++PopEpoch; 1359 } 1360 1361 for (MemoryAccess &MA : *Accesses) { 1362 auto *MU = dyn_cast<MemoryUse>(&MA); 1363 if (!MU) { 1364 VersionStack.push_back(&MA); 1365 ++StackEpoch; 1366 continue; 1367 } 1368 1369 if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) { 1370 MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None); 1371 continue; 1372 } 1373 1374 MemoryLocOrCall UseMLOC(MU); 1375 auto &LocInfo = LocStackInfo[UseMLOC]; 1376 // If the pop epoch changed, it means we've removed stuff from top of 1377 // stack due to changing blocks. We may have to reset the lower bound or 1378 // last kill info. 1379 if (LocInfo.PopEpoch != PopEpoch) { 1380 LocInfo.PopEpoch = PopEpoch; 1381 LocInfo.StackEpoch = StackEpoch; 1382 // If the lower bound was in something that no longer dominates us, we 1383 // have to reset it. 1384 // We can't simply track stack size, because the stack may have had 1385 // pushes/pops in the meantime. 1386 // XXX: This is non-optimal, but only is slower cases with heavily 1387 // branching dominator trees. To get the optimal number of queries would 1388 // be to make lowerbound and lastkill a per-loc stack, and pop it until 1389 // the top of that stack dominates us. This does not seem worth it ATM. 1390 // A much cheaper optimization would be to always explore the deepest 1391 // branch of the dominator tree first. This will guarantee this resets on 1392 // the smallest set of blocks. 1393 if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB && 1394 !DT->dominates(LocInfo.LowerBoundBlock, BB)) { 1395 // Reset the lower bound of things to check. 1396 // TODO: Some day we should be able to reset to last kill, rather than 1397 // 0. 1398 LocInfo.LowerBound = 0; 1399 LocInfo.LowerBoundBlock = VersionStack[0]->getBlock(); 1400 LocInfo.LastKillValid = false; 1401 } 1402 } else if (LocInfo.StackEpoch != StackEpoch) { 1403 // If all that has changed is the StackEpoch, we only have to check the 1404 // new things on the stack, because we've checked everything before. In 1405 // this case, the lower bound of things to check remains the same. 1406 LocInfo.PopEpoch = PopEpoch; 1407 LocInfo.StackEpoch = StackEpoch; 1408 } 1409 if (!LocInfo.LastKillValid) { 1410 LocInfo.LastKill = VersionStack.size() - 1; 1411 LocInfo.LastKillValid = true; 1412 LocInfo.AR = MayAlias; 1413 } 1414 1415 // At this point, we should have corrected last kill and LowerBound to be 1416 // in bounds. 1417 assert(LocInfo.LowerBound < VersionStack.size() && 1418 "Lower bound out of range"); 1419 assert(LocInfo.LastKill < VersionStack.size() && 1420 "Last kill info out of range"); 1421 // In any case, the new upper bound is the top of the stack. 1422 unsigned long UpperBound = VersionStack.size() - 1; 1423 1424 if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) { 1425 LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " (" 1426 << *(MU->getMemoryInst()) << ")" 1427 << " because there are " 1428 << UpperBound - LocInfo.LowerBound 1429 << " stores to disambiguate\n"); 1430 // Because we did not walk, LastKill is no longer valid, as this may 1431 // have been a kill. 1432 LocInfo.LastKillValid = false; 1433 continue; 1434 } 1435 bool FoundClobberResult = false; 1436 unsigned UpwardWalkLimit = MaxCheckLimit; 1437 while (UpperBound > LocInfo.LowerBound) { 1438 if (isa<MemoryPhi>(VersionStack[UpperBound])) { 1439 // For phis, use the walker, see where we ended up, go there 1440 MemoryAccess *Result = 1441 Walker->getClobberingMemoryAccess(MU, UpwardWalkLimit); 1442 // We are guaranteed to find it or something is wrong 1443 while (VersionStack[UpperBound] != Result) { 1444 assert(UpperBound != 0); 1445 --UpperBound; 1446 } 1447 FoundClobberResult = true; 1448 break; 1449 } 1450 1451 MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]); 1452 ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA); 1453 if (CA.IsClobber) { 1454 FoundClobberResult = true; 1455 LocInfo.AR = CA.AR; 1456 break; 1457 } 1458 --UpperBound; 1459 } 1460 1461 // Note: Phis always have AliasResult AR set to MayAlias ATM. 1462 1463 // At the end of this loop, UpperBound is either a clobber, or lower bound 1464 // PHI walking may cause it to be < LowerBound, and in fact, < LastKill. 1465 if (FoundClobberResult || UpperBound < LocInfo.LastKill) { 1466 // We were last killed now by where we got to 1467 if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound])) 1468 LocInfo.AR = None; 1469 MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR); 1470 LocInfo.LastKill = UpperBound; 1471 } else { 1472 // Otherwise, we checked all the new ones, and now we know we can get to 1473 // LastKill. 1474 MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR); 1475 } 1476 LocInfo.LowerBound = VersionStack.size() - 1; 1477 LocInfo.LowerBoundBlock = BB; 1478 } 1479 } 1480 1481 /// Optimize uses to point to their actual clobbering definitions. 1482 void MemorySSA::OptimizeUses::optimizeUses() { 1483 SmallVector<MemoryAccess *, 16> VersionStack; 1484 DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo; 1485 VersionStack.push_back(MSSA->getLiveOnEntryDef()); 1486 1487 unsigned long StackEpoch = 1; 1488 unsigned long PopEpoch = 1; 1489 // We perform a non-recursive top-down dominator tree walk. 1490 for (const auto *DomNode : depth_first(DT->getRootNode())) 1491 optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack, 1492 LocStackInfo); 1493 } 1494 1495 void MemorySSA::placePHINodes( 1496 const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) { 1497 // Determine where our MemoryPhi's should go 1498 ForwardIDFCalculator IDFs(*DT); 1499 IDFs.setDefiningBlocks(DefiningBlocks); 1500 SmallVector<BasicBlock *, 32> IDFBlocks; 1501 IDFs.calculate(IDFBlocks); 1502 1503 // Now place MemoryPhi nodes. 1504 for (auto &BB : IDFBlocks) 1505 createMemoryPhi(BB); 1506 } 1507 1508 void MemorySSA::buildMemorySSA(BatchAAResults &BAA) { 1509 // We create an access to represent "live on entry", for things like 1510 // arguments or users of globals, where the memory they use is defined before 1511 // the beginning of the function. We do not actually insert it into the IR. 1512 // We do not define a live on exit for the immediate uses, and thus our 1513 // semantics do *not* imply that something with no immediate uses can simply 1514 // be removed. 1515 BasicBlock &StartingPoint = F.getEntryBlock(); 1516 LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr, 1517 &StartingPoint, NextID++)); 1518 1519 // We maintain lists of memory accesses per-block, trading memory for time. We 1520 // could just look up the memory access for every possible instruction in the 1521 // stream. 1522 SmallPtrSet<BasicBlock *, 32> DefiningBlocks; 1523 // Go through each block, figure out where defs occur, and chain together all 1524 // the accesses. 1525 for (BasicBlock &B : F) { 1526 bool InsertIntoDef = false; 1527 AccessList *Accesses = nullptr; 1528 DefsList *Defs = nullptr; 1529 for (Instruction &I : B) { 1530 MemoryUseOrDef *MUD = createNewAccess(&I, &BAA); 1531 if (!MUD) 1532 continue; 1533 1534 if (!Accesses) 1535 Accesses = getOrCreateAccessList(&B); 1536 Accesses->push_back(MUD); 1537 if (isa<MemoryDef>(MUD)) { 1538 InsertIntoDef = true; 1539 if (!Defs) 1540 Defs = getOrCreateDefsList(&B); 1541 Defs->push_back(*MUD); 1542 } 1543 } 1544 if (InsertIntoDef) 1545 DefiningBlocks.insert(&B); 1546 } 1547 placePHINodes(DefiningBlocks); 1548 1549 // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get 1550 // filled in with all blocks. 1551 SmallPtrSet<BasicBlock *, 16> Visited; 1552 renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited); 1553 1554 ClobberWalkerBase<BatchAAResults> WalkerBase(this, &BAA, DT); 1555 CachingWalker<BatchAAResults> WalkerLocal(this, &WalkerBase); 1556 OptimizeUses(this, &WalkerLocal, &BAA, DT).optimizeUses(); 1557 1558 // Mark the uses in unreachable blocks as live on entry, so that they go 1559 // somewhere. 1560 for (auto &BB : F) 1561 if (!Visited.count(&BB)) 1562 markUnreachableAsLiveOnEntry(&BB); 1563 } 1564 1565 MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); } 1566 1567 MemorySSA::CachingWalker<AliasAnalysis> *MemorySSA::getWalkerImpl() { 1568 if (Walker) 1569 return Walker.get(); 1570 1571 if (!WalkerBase) 1572 WalkerBase = 1573 std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT); 1574 1575 Walker = 1576 std::make_unique<CachingWalker<AliasAnalysis>>(this, WalkerBase.get()); 1577 return Walker.get(); 1578 } 1579 1580 MemorySSAWalker *MemorySSA::getSkipSelfWalker() { 1581 if (SkipWalker) 1582 return SkipWalker.get(); 1583 1584 if (!WalkerBase) 1585 WalkerBase = 1586 std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT); 1587 1588 SkipWalker = 1589 std::make_unique<SkipSelfWalker<AliasAnalysis>>(this, WalkerBase.get()); 1590 return SkipWalker.get(); 1591 } 1592 1593 1594 // This is a helper function used by the creation routines. It places NewAccess 1595 // into the access and defs lists for a given basic block, at the given 1596 // insertion point. 1597 void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess, 1598 const BasicBlock *BB, 1599 InsertionPlace Point) { 1600 auto *Accesses = getOrCreateAccessList(BB); 1601 if (Point == Beginning) { 1602 // If it's a phi node, it goes first, otherwise, it goes after any phi 1603 // nodes. 1604 if (isa<MemoryPhi>(NewAccess)) { 1605 Accesses->push_front(NewAccess); 1606 auto *Defs = getOrCreateDefsList(BB); 1607 Defs->push_front(*NewAccess); 1608 } else { 1609 auto AI = find_if_not( 1610 *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); }); 1611 Accesses->insert(AI, NewAccess); 1612 if (!isa<MemoryUse>(NewAccess)) { 1613 auto *Defs = getOrCreateDefsList(BB); 1614 auto DI = find_if_not( 1615 *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); }); 1616 Defs->insert(DI, *NewAccess); 1617 } 1618 } 1619 } else { 1620 Accesses->push_back(NewAccess); 1621 if (!isa<MemoryUse>(NewAccess)) { 1622 auto *Defs = getOrCreateDefsList(BB); 1623 Defs->push_back(*NewAccess); 1624 } 1625 } 1626 BlockNumberingValid.erase(BB); 1627 } 1628 1629 void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB, 1630 AccessList::iterator InsertPt) { 1631 auto *Accesses = getWritableBlockAccesses(BB); 1632 bool WasEnd = InsertPt == Accesses->end(); 1633 Accesses->insert(AccessList::iterator(InsertPt), What); 1634 if (!isa<MemoryUse>(What)) { 1635 auto *Defs = getOrCreateDefsList(BB); 1636 // If we got asked to insert at the end, we have an easy job, just shove it 1637 // at the end. If we got asked to insert before an existing def, we also get 1638 // an iterator. If we got asked to insert before a use, we have to hunt for 1639 // the next def. 1640 if (WasEnd) { 1641 Defs->push_back(*What); 1642 } else if (isa<MemoryDef>(InsertPt)) { 1643 Defs->insert(InsertPt->getDefsIterator(), *What); 1644 } else { 1645 while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt)) 1646 ++InsertPt; 1647 // Either we found a def, or we are inserting at the end 1648 if (InsertPt == Accesses->end()) 1649 Defs->push_back(*What); 1650 else 1651 Defs->insert(InsertPt->getDefsIterator(), *What); 1652 } 1653 } 1654 BlockNumberingValid.erase(BB); 1655 } 1656 1657 void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) { 1658 // Keep it in the lookup tables, remove from the lists 1659 removeFromLists(What, false); 1660 1661 // Note that moving should implicitly invalidate the optimized state of a 1662 // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a 1663 // MemoryDef. 1664 if (auto *MD = dyn_cast<MemoryDef>(What)) 1665 MD->resetOptimized(); 1666 What->setBlock(BB); 1667 } 1668 1669 // Move What before Where in the IR. The end result is that What will belong to 1670 // the right lists and have the right Block set, but will not otherwise be 1671 // correct. It will not have the right defining access, and if it is a def, 1672 // things below it will not properly be updated. 1673 void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB, 1674 AccessList::iterator Where) { 1675 prepareForMoveTo(What, BB); 1676 insertIntoListsBefore(What, BB, Where); 1677 } 1678 1679 void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB, 1680 InsertionPlace Point) { 1681 if (isa<MemoryPhi>(What)) { 1682 assert(Point == Beginning && 1683 "Can only move a Phi at the beginning of the block"); 1684 // Update lookup table entry 1685 ValueToMemoryAccess.erase(What->getBlock()); 1686 bool Inserted = ValueToMemoryAccess.insert({BB, What}).second; 1687 (void)Inserted; 1688 assert(Inserted && "Cannot move a Phi to a block that already has one"); 1689 } 1690 1691 prepareForMoveTo(What, BB); 1692 insertIntoListsForBlock(What, BB, Point); 1693 } 1694 1695 MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) { 1696 assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB"); 1697 MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++); 1698 // Phi's always are placed at the front of the block. 1699 insertIntoListsForBlock(Phi, BB, Beginning); 1700 ValueToMemoryAccess[BB] = Phi; 1701 return Phi; 1702 } 1703 1704 MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I, 1705 MemoryAccess *Definition, 1706 const MemoryUseOrDef *Template, 1707 bool CreationMustSucceed) { 1708 assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI"); 1709 MemoryUseOrDef *NewAccess = createNewAccess(I, AA, Template); 1710 if (CreationMustSucceed) 1711 assert(NewAccess != nullptr && "Tried to create a memory access for a " 1712 "non-memory touching instruction"); 1713 if (NewAccess) { 1714 assert((!Definition || !isa<MemoryUse>(Definition)) && 1715 "A use cannot be a defining access"); 1716 NewAccess->setDefiningAccess(Definition); 1717 } 1718 return NewAccess; 1719 } 1720 1721 // Return true if the instruction has ordering constraints. 1722 // Note specifically that this only considers stores and loads 1723 // because others are still considered ModRef by getModRefInfo. 1724 static inline bool isOrdered(const Instruction *I) { 1725 if (auto *SI = dyn_cast<StoreInst>(I)) { 1726 if (!SI->isUnordered()) 1727 return true; 1728 } else if (auto *LI = dyn_cast<LoadInst>(I)) { 1729 if (!LI->isUnordered()) 1730 return true; 1731 } 1732 return false; 1733 } 1734 1735 /// Helper function to create new memory accesses 1736 template <typename AliasAnalysisType> 1737 MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I, 1738 AliasAnalysisType *AAP, 1739 const MemoryUseOrDef *Template) { 1740 // The assume intrinsic has a control dependency which we model by claiming 1741 // that it writes arbitrarily. Debuginfo intrinsics may be considered 1742 // clobbers when we have a nonstandard AA pipeline. Ignore these fake memory 1743 // dependencies here. 1744 // FIXME: Replace this special casing with a more accurate modelling of 1745 // assume's control dependency. 1746 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 1747 switch (II->getIntrinsicID()) { 1748 default: 1749 break; 1750 case Intrinsic::assume: 1751 case Intrinsic::experimental_noalias_scope_decl: 1752 return nullptr; 1753 } 1754 } 1755 1756 // Using a nonstandard AA pipelines might leave us with unexpected modref 1757 // results for I, so add a check to not model instructions that may not read 1758 // from or write to memory. This is necessary for correctness. 1759 if (!I->mayReadFromMemory() && !I->mayWriteToMemory()) 1760 return nullptr; 1761 1762 bool Def, Use; 1763 if (Template) { 1764 Def = isa<MemoryDef>(Template); 1765 Use = isa<MemoryUse>(Template); 1766 #if !defined(NDEBUG) 1767 ModRefInfo ModRef = AAP->getModRefInfo(I, None); 1768 bool DefCheck, UseCheck; 1769 DefCheck = isModSet(ModRef) || isOrdered(I); 1770 UseCheck = isRefSet(ModRef); 1771 assert(Def == DefCheck && (Def || Use == UseCheck) && "Invalid template"); 1772 #endif 1773 } else { 1774 // Find out what affect this instruction has on memory. 1775 ModRefInfo ModRef = AAP->getModRefInfo(I, None); 1776 // The isOrdered check is used to ensure that volatiles end up as defs 1777 // (atomics end up as ModRef right now anyway). Until we separate the 1778 // ordering chain from the memory chain, this enables people to see at least 1779 // some relative ordering to volatiles. Note that getClobberingMemoryAccess 1780 // will still give an answer that bypasses other volatile loads. TODO: 1781 // Separate memory aliasing and ordering into two different chains so that 1782 // we can precisely represent both "what memory will this read/write/is 1783 // clobbered by" and "what instructions can I move this past". 1784 Def = isModSet(ModRef) || isOrdered(I); 1785 Use = isRefSet(ModRef); 1786 } 1787 1788 // It's possible for an instruction to not modify memory at all. During 1789 // construction, we ignore them. 1790 if (!Def && !Use) 1791 return nullptr; 1792 1793 MemoryUseOrDef *MUD; 1794 if (Def) 1795 MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++); 1796 else 1797 MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent()); 1798 ValueToMemoryAccess[I] = MUD; 1799 return MUD; 1800 } 1801 1802 /// Properly remove \p MA from all of MemorySSA's lookup tables. 1803 void MemorySSA::removeFromLookups(MemoryAccess *MA) { 1804 assert(MA->use_empty() && 1805 "Trying to remove memory access that still has uses"); 1806 BlockNumbering.erase(MA); 1807 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) 1808 MUD->setDefiningAccess(nullptr); 1809 // Invalidate our walker's cache if necessary 1810 if (!isa<MemoryUse>(MA)) 1811 getWalker()->invalidateInfo(MA); 1812 1813 Value *MemoryInst; 1814 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) 1815 MemoryInst = MUD->getMemoryInst(); 1816 else 1817 MemoryInst = MA->getBlock(); 1818 1819 auto VMA = ValueToMemoryAccess.find(MemoryInst); 1820 if (VMA->second == MA) 1821 ValueToMemoryAccess.erase(VMA); 1822 } 1823 1824 /// Properly remove \p MA from all of MemorySSA's lists. 1825 /// 1826 /// Because of the way the intrusive list and use lists work, it is important to 1827 /// do removal in the right order. 1828 /// ShouldDelete defaults to true, and will cause the memory access to also be 1829 /// deleted, not just removed. 1830 void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) { 1831 BasicBlock *BB = MA->getBlock(); 1832 // The access list owns the reference, so we erase it from the non-owning list 1833 // first. 1834 if (!isa<MemoryUse>(MA)) { 1835 auto DefsIt = PerBlockDefs.find(BB); 1836 std::unique_ptr<DefsList> &Defs = DefsIt->second; 1837 Defs->remove(*MA); 1838 if (Defs->empty()) 1839 PerBlockDefs.erase(DefsIt); 1840 } 1841 1842 // The erase call here will delete it. If we don't want it deleted, we call 1843 // remove instead. 1844 auto AccessIt = PerBlockAccesses.find(BB); 1845 std::unique_ptr<AccessList> &Accesses = AccessIt->second; 1846 if (ShouldDelete) 1847 Accesses->erase(MA); 1848 else 1849 Accesses->remove(MA); 1850 1851 if (Accesses->empty()) { 1852 PerBlockAccesses.erase(AccessIt); 1853 BlockNumberingValid.erase(BB); 1854 } 1855 } 1856 1857 void MemorySSA::print(raw_ostream &OS) const { 1858 MemorySSAAnnotatedWriter Writer(this); 1859 F.print(OS, &Writer); 1860 } 1861 1862 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1863 LLVM_DUMP_METHOD void MemorySSA::dump() const { print(dbgs()); } 1864 #endif 1865 1866 void MemorySSA::verifyMemorySSA() const { 1867 verifyOrderingDominationAndDefUses(F); 1868 verifyDominationNumbers(F); 1869 verifyPrevDefInPhis(F); 1870 // Previously, the verification used to also verify that the clobberingAccess 1871 // cached by MemorySSA is the same as the clobberingAccess found at a later 1872 // query to AA. This does not hold true in general due to the current fragility 1873 // of BasicAA which has arbitrary caps on the things it analyzes before giving 1874 // up. As a result, transformations that are correct, will lead to BasicAA 1875 // returning different Alias answers before and after that transformation. 1876 // Invalidating MemorySSA is not an option, as the results in BasicAA can be so 1877 // random, in the worst case we'd need to rebuild MemorySSA from scratch after 1878 // every transformation, which defeats the purpose of using it. For such an 1879 // example, see test4 added in D51960. 1880 } 1881 1882 void MemorySSA::verifyPrevDefInPhis(Function &F) const { 1883 #if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS) 1884 for (const BasicBlock &BB : F) { 1885 if (MemoryPhi *Phi = getMemoryAccess(&BB)) { 1886 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) { 1887 auto *Pred = Phi->getIncomingBlock(I); 1888 auto *IncAcc = Phi->getIncomingValue(I); 1889 // If Pred has no unreachable predecessors, get last def looking at 1890 // IDoms. If, while walkings IDoms, any of these has an unreachable 1891 // predecessor, then the incoming def can be any access. 1892 if (auto *DTNode = DT->getNode(Pred)) { 1893 while (DTNode) { 1894 if (auto *DefList = getBlockDefs(DTNode->getBlock())) { 1895 auto *LastAcc = &*(--DefList->end()); 1896 assert(LastAcc == IncAcc && 1897 "Incorrect incoming access into phi."); 1898 break; 1899 } 1900 DTNode = DTNode->getIDom(); 1901 } 1902 } else { 1903 // If Pred has unreachable predecessors, but has at least a Def, the 1904 // incoming access can be the last Def in Pred, or it could have been 1905 // optimized to LoE. After an update, though, the LoE may have been 1906 // replaced by another access, so IncAcc may be any access. 1907 // If Pred has unreachable predecessors and no Defs, incoming access 1908 // should be LoE; However, after an update, it may be any access. 1909 } 1910 } 1911 } 1912 } 1913 #endif 1914 } 1915 1916 /// Verify that all of the blocks we believe to have valid domination numbers 1917 /// actually have valid domination numbers. 1918 void MemorySSA::verifyDominationNumbers(const Function &F) const { 1919 #ifndef NDEBUG 1920 if (BlockNumberingValid.empty()) 1921 return; 1922 1923 SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid; 1924 for (const BasicBlock &BB : F) { 1925 if (!ValidBlocks.count(&BB)) 1926 continue; 1927 1928 ValidBlocks.erase(&BB); 1929 1930 const AccessList *Accesses = getBlockAccesses(&BB); 1931 // It's correct to say an empty block has valid numbering. 1932 if (!Accesses) 1933 continue; 1934 1935 // Block numbering starts at 1. 1936 unsigned long LastNumber = 0; 1937 for (const MemoryAccess &MA : *Accesses) { 1938 auto ThisNumberIter = BlockNumbering.find(&MA); 1939 assert(ThisNumberIter != BlockNumbering.end() && 1940 "MemoryAccess has no domination number in a valid block!"); 1941 1942 unsigned long ThisNumber = ThisNumberIter->second; 1943 assert(ThisNumber > LastNumber && 1944 "Domination numbers should be strictly increasing!"); 1945 LastNumber = ThisNumber; 1946 } 1947 } 1948 1949 assert(ValidBlocks.empty() && 1950 "All valid BasicBlocks should exist in F -- dangling pointers?"); 1951 #endif 1952 } 1953 1954 /// Verify ordering: the order and existence of MemoryAccesses matches the 1955 /// order and existence of memory affecting instructions. 1956 /// Verify domination: each definition dominates all of its uses. 1957 /// Verify def-uses: the immediate use information - walk all the memory 1958 /// accesses and verifying that, for each use, it appears in the appropriate 1959 /// def's use list 1960 void MemorySSA::verifyOrderingDominationAndDefUses(Function &F) const { 1961 #if !defined(NDEBUG) 1962 // Walk all the blocks, comparing what the lookups think and what the access 1963 // lists think, as well as the order in the blocks vs the order in the access 1964 // lists. 1965 SmallVector<MemoryAccess *, 32> ActualAccesses; 1966 SmallVector<MemoryAccess *, 32> ActualDefs; 1967 for (BasicBlock &B : F) { 1968 const AccessList *AL = getBlockAccesses(&B); 1969 const auto *DL = getBlockDefs(&B); 1970 MemoryPhi *Phi = getMemoryAccess(&B); 1971 if (Phi) { 1972 // Verify ordering. 1973 ActualAccesses.push_back(Phi); 1974 ActualDefs.push_back(Phi); 1975 // Verify domination 1976 for (const Use &U : Phi->uses()) 1977 assert(dominates(Phi, U) && "Memory PHI does not dominate it's uses"); 1978 #if defined(EXPENSIVE_CHECKS) 1979 // Verify def-uses. 1980 assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance( 1981 pred_begin(&B), pred_end(&B))) && 1982 "Incomplete MemoryPhi Node"); 1983 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) { 1984 verifyUseInDefs(Phi->getIncomingValue(I), Phi); 1985 assert(is_contained(predecessors(&B), Phi->getIncomingBlock(I)) && 1986 "Incoming phi block not a block predecessor"); 1987 } 1988 #endif 1989 } 1990 1991 for (Instruction &I : B) { 1992 MemoryUseOrDef *MA = getMemoryAccess(&I); 1993 assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) && 1994 "We have memory affecting instructions " 1995 "in this block but they are not in the " 1996 "access list or defs list"); 1997 if (MA) { 1998 // Verify ordering. 1999 ActualAccesses.push_back(MA); 2000 if (MemoryAccess *MD = dyn_cast<MemoryDef>(MA)) { 2001 // Verify ordering. 2002 ActualDefs.push_back(MA); 2003 // Verify domination. 2004 for (const Use &U : MD->uses()) 2005 assert(dominates(MD, U) && 2006 "Memory Def does not dominate it's uses"); 2007 } 2008 #if defined(EXPENSIVE_CHECKS) 2009 // Verify def-uses. 2010 verifyUseInDefs(MA->getDefiningAccess(), MA); 2011 #endif 2012 } 2013 } 2014 // Either we hit the assert, really have no accesses, or we have both 2015 // accesses and an access list. Same with defs. 2016 if (!AL && !DL) 2017 continue; 2018 // Verify ordering. 2019 assert(AL->size() == ActualAccesses.size() && 2020 "We don't have the same number of accesses in the block as on the " 2021 "access list"); 2022 assert((DL || ActualDefs.size() == 0) && 2023 "Either we should have a defs list, or we should have no defs"); 2024 assert((!DL || DL->size() == ActualDefs.size()) && 2025 "We don't have the same number of defs in the block as on the " 2026 "def list"); 2027 auto ALI = AL->begin(); 2028 auto AAI = ActualAccesses.begin(); 2029 while (ALI != AL->end() && AAI != ActualAccesses.end()) { 2030 assert(&*ALI == *AAI && "Not the same accesses in the same order"); 2031 ++ALI; 2032 ++AAI; 2033 } 2034 ActualAccesses.clear(); 2035 if (DL) { 2036 auto DLI = DL->begin(); 2037 auto ADI = ActualDefs.begin(); 2038 while (DLI != DL->end() && ADI != ActualDefs.end()) { 2039 assert(&*DLI == *ADI && "Not the same defs in the same order"); 2040 ++DLI; 2041 ++ADI; 2042 } 2043 } 2044 ActualDefs.clear(); 2045 } 2046 #endif 2047 } 2048 2049 /// Verify the def-use lists in MemorySSA, by verifying that \p Use 2050 /// appears in the use list of \p Def. 2051 void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const { 2052 #ifndef NDEBUG 2053 // The live on entry use may cause us to get a NULL def here 2054 if (!Def) 2055 assert(isLiveOnEntryDef(Use) && 2056 "Null def but use not point to live on entry def"); 2057 else 2058 assert(is_contained(Def->users(), Use) && 2059 "Did not find use in def's use list"); 2060 #endif 2061 } 2062 2063 /// Perform a local numbering on blocks so that instruction ordering can be 2064 /// determined in constant time. 2065 /// TODO: We currently just number in order. If we numbered by N, we could 2066 /// allow at least N-1 sequences of insertBefore or insertAfter (and at least 2067 /// log2(N) sequences of mixed before and after) without needing to invalidate 2068 /// the numbering. 2069 void MemorySSA::renumberBlock(const BasicBlock *B) const { 2070 // The pre-increment ensures the numbers really start at 1. 2071 unsigned long CurrentNumber = 0; 2072 const AccessList *AL = getBlockAccesses(B); 2073 assert(AL != nullptr && "Asking to renumber an empty block"); 2074 for (const auto &I : *AL) 2075 BlockNumbering[&I] = ++CurrentNumber; 2076 BlockNumberingValid.insert(B); 2077 } 2078 2079 /// Determine, for two memory accesses in the same block, 2080 /// whether \p Dominator dominates \p Dominatee. 2081 /// \returns True if \p Dominator dominates \p Dominatee. 2082 bool MemorySSA::locallyDominates(const MemoryAccess *Dominator, 2083 const MemoryAccess *Dominatee) const { 2084 const BasicBlock *DominatorBlock = Dominator->getBlock(); 2085 2086 assert((DominatorBlock == Dominatee->getBlock()) && 2087 "Asking for local domination when accesses are in different blocks!"); 2088 // A node dominates itself. 2089 if (Dominatee == Dominator) 2090 return true; 2091 2092 // When Dominatee is defined on function entry, it is not dominated by another 2093 // memory access. 2094 if (isLiveOnEntryDef(Dominatee)) 2095 return false; 2096 2097 // When Dominator is defined on function entry, it dominates the other memory 2098 // access. 2099 if (isLiveOnEntryDef(Dominator)) 2100 return true; 2101 2102 if (!BlockNumberingValid.count(DominatorBlock)) 2103 renumberBlock(DominatorBlock); 2104 2105 unsigned long DominatorNum = BlockNumbering.lookup(Dominator); 2106 // All numbers start with 1 2107 assert(DominatorNum != 0 && "Block was not numbered properly"); 2108 unsigned long DominateeNum = BlockNumbering.lookup(Dominatee); 2109 assert(DominateeNum != 0 && "Block was not numbered properly"); 2110 return DominatorNum < DominateeNum; 2111 } 2112 2113 bool MemorySSA::dominates(const MemoryAccess *Dominator, 2114 const MemoryAccess *Dominatee) const { 2115 if (Dominator == Dominatee) 2116 return true; 2117 2118 if (isLiveOnEntryDef(Dominatee)) 2119 return false; 2120 2121 if (Dominator->getBlock() != Dominatee->getBlock()) 2122 return DT->dominates(Dominator->getBlock(), Dominatee->getBlock()); 2123 return locallyDominates(Dominator, Dominatee); 2124 } 2125 2126 bool MemorySSA::dominates(const MemoryAccess *Dominator, 2127 const Use &Dominatee) const { 2128 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) { 2129 BasicBlock *UseBB = MP->getIncomingBlock(Dominatee); 2130 // The def must dominate the incoming block of the phi. 2131 if (UseBB != Dominator->getBlock()) 2132 return DT->dominates(Dominator->getBlock(), UseBB); 2133 // If the UseBB and the DefBB are the same, compare locally. 2134 return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee)); 2135 } 2136 // If it's not a PHI node use, the normal dominates can already handle it. 2137 return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser())); 2138 } 2139 2140 const static char LiveOnEntryStr[] = "liveOnEntry"; 2141 2142 void MemoryAccess::print(raw_ostream &OS) const { 2143 switch (getValueID()) { 2144 case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS); 2145 case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS); 2146 case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS); 2147 } 2148 llvm_unreachable("invalid value id"); 2149 } 2150 2151 void MemoryDef::print(raw_ostream &OS) const { 2152 MemoryAccess *UO = getDefiningAccess(); 2153 2154 auto printID = [&OS](MemoryAccess *A) { 2155 if (A && A->getID()) 2156 OS << A->getID(); 2157 else 2158 OS << LiveOnEntryStr; 2159 }; 2160 2161 OS << getID() << " = MemoryDef("; 2162 printID(UO); 2163 OS << ")"; 2164 2165 if (isOptimized()) { 2166 OS << "->"; 2167 printID(getOptimized()); 2168 2169 if (Optional<AliasResult> AR = getOptimizedAccessType()) 2170 OS << " " << *AR; 2171 } 2172 } 2173 2174 void MemoryPhi::print(raw_ostream &OS) const { 2175 ListSeparator LS(","); 2176 OS << getID() << " = MemoryPhi("; 2177 for (const auto &Op : operands()) { 2178 BasicBlock *BB = getIncomingBlock(Op); 2179 MemoryAccess *MA = cast<MemoryAccess>(Op); 2180 2181 OS << LS << '{'; 2182 if (BB->hasName()) 2183 OS << BB->getName(); 2184 else 2185 BB->printAsOperand(OS, false); 2186 OS << ','; 2187 if (unsigned ID = MA->getID()) 2188 OS << ID; 2189 else 2190 OS << LiveOnEntryStr; 2191 OS << '}'; 2192 } 2193 OS << ')'; 2194 } 2195 2196 void MemoryUse::print(raw_ostream &OS) const { 2197 MemoryAccess *UO = getDefiningAccess(); 2198 OS << "MemoryUse("; 2199 if (UO && UO->getID()) 2200 OS << UO->getID(); 2201 else 2202 OS << LiveOnEntryStr; 2203 OS << ')'; 2204 2205 if (Optional<AliasResult> AR = getOptimizedAccessType()) 2206 OS << " " << *AR; 2207 } 2208 2209 void MemoryAccess::dump() const { 2210 // Cannot completely remove virtual function even in release mode. 2211 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 2212 print(dbgs()); 2213 dbgs() << "\n"; 2214 #endif 2215 } 2216 2217 char MemorySSAPrinterLegacyPass::ID = 0; 2218 2219 MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) { 2220 initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry()); 2221 } 2222 2223 void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const { 2224 AU.setPreservesAll(); 2225 AU.addRequired<MemorySSAWrapperPass>(); 2226 } 2227 2228 class DOTFuncMSSAInfo { 2229 private: 2230 const Function &F; 2231 MemorySSAAnnotatedWriter MSSAWriter; 2232 2233 public: 2234 DOTFuncMSSAInfo(const Function &F, MemorySSA &MSSA) 2235 : F(F), MSSAWriter(&MSSA) {} 2236 2237 const Function *getFunction() { return &F; } 2238 MemorySSAAnnotatedWriter &getWriter() { return MSSAWriter; } 2239 }; 2240 2241 namespace llvm { 2242 2243 template <> 2244 struct GraphTraits<DOTFuncMSSAInfo *> : public GraphTraits<const BasicBlock *> { 2245 static NodeRef getEntryNode(DOTFuncMSSAInfo *CFGInfo) { 2246 return &(CFGInfo->getFunction()->getEntryBlock()); 2247 } 2248 2249 // nodes_iterator/begin/end - Allow iteration over all nodes in the graph 2250 using nodes_iterator = pointer_iterator<Function::const_iterator>; 2251 2252 static nodes_iterator nodes_begin(DOTFuncMSSAInfo *CFGInfo) { 2253 return nodes_iterator(CFGInfo->getFunction()->begin()); 2254 } 2255 2256 static nodes_iterator nodes_end(DOTFuncMSSAInfo *CFGInfo) { 2257 return nodes_iterator(CFGInfo->getFunction()->end()); 2258 } 2259 2260 static size_t size(DOTFuncMSSAInfo *CFGInfo) { 2261 return CFGInfo->getFunction()->size(); 2262 } 2263 }; 2264 2265 template <> 2266 struct DOTGraphTraits<DOTFuncMSSAInfo *> : public DefaultDOTGraphTraits { 2267 2268 DOTGraphTraits(bool IsSimple = false) : DefaultDOTGraphTraits(IsSimple) {} 2269 2270 static std::string getGraphName(DOTFuncMSSAInfo *CFGInfo) { 2271 return "MSSA CFG for '" + CFGInfo->getFunction()->getName().str() + 2272 "' function"; 2273 } 2274 2275 std::string getNodeLabel(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo) { 2276 return DOTGraphTraits<DOTFuncInfo *>::getCompleteNodeLabel( 2277 Node, nullptr, 2278 [CFGInfo](raw_string_ostream &OS, const BasicBlock &BB) -> void { 2279 BB.print(OS, &CFGInfo->getWriter(), true, true); 2280 }, 2281 [](std::string &S, unsigned &I, unsigned Idx) -> void { 2282 std::string Str = S.substr(I, Idx - I); 2283 StringRef SR = Str; 2284 if (SR.count(" = MemoryDef(") || SR.count(" = MemoryPhi(") || 2285 SR.count("MemoryUse(")) 2286 return; 2287 DOTGraphTraits<DOTFuncInfo *>::eraseComment(S, I, Idx); 2288 }); 2289 } 2290 2291 static std::string getEdgeSourceLabel(const BasicBlock *Node, 2292 const_succ_iterator I) { 2293 return DOTGraphTraits<DOTFuncInfo *>::getEdgeSourceLabel(Node, I); 2294 } 2295 2296 /// Display the raw branch weights from PGO. 2297 std::string getEdgeAttributes(const BasicBlock *Node, const_succ_iterator I, 2298 DOTFuncMSSAInfo *CFGInfo) { 2299 return ""; 2300 } 2301 2302 std::string getNodeAttributes(const BasicBlock *Node, 2303 DOTFuncMSSAInfo *CFGInfo) { 2304 return getNodeLabel(Node, CFGInfo).find(';') != std::string::npos 2305 ? "style=filled, fillcolor=lightpink" 2306 : ""; 2307 } 2308 }; 2309 2310 } // namespace llvm 2311 2312 bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) { 2313 auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA(); 2314 if (DotCFGMSSA != "") { 2315 DOTFuncMSSAInfo CFGInfo(F, MSSA); 2316 WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA); 2317 } else 2318 MSSA.print(dbgs()); 2319 2320 if (VerifyMemorySSA) 2321 MSSA.verifyMemorySSA(); 2322 return false; 2323 } 2324 2325 AnalysisKey MemorySSAAnalysis::Key; 2326 2327 MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F, 2328 FunctionAnalysisManager &AM) { 2329 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 2330 auto &AA = AM.getResult<AAManager>(F); 2331 return MemorySSAAnalysis::Result(std::make_unique<MemorySSA>(F, &AA, &DT)); 2332 } 2333 2334 bool MemorySSAAnalysis::Result::invalidate( 2335 Function &F, const PreservedAnalyses &PA, 2336 FunctionAnalysisManager::Invalidator &Inv) { 2337 auto PAC = PA.getChecker<MemorySSAAnalysis>(); 2338 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) || 2339 Inv.invalidate<AAManager>(F, PA) || 2340 Inv.invalidate<DominatorTreeAnalysis>(F, PA); 2341 } 2342 2343 PreservedAnalyses MemorySSAPrinterPass::run(Function &F, 2344 FunctionAnalysisManager &AM) { 2345 auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA(); 2346 if (DotCFGMSSA != "") { 2347 DOTFuncMSSAInfo CFGInfo(F, MSSA); 2348 WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA); 2349 } else { 2350 OS << "MemorySSA for function: " << F.getName() << "\n"; 2351 MSSA.print(OS); 2352 } 2353 2354 return PreservedAnalyses::all(); 2355 } 2356 2357 PreservedAnalyses MemorySSAVerifierPass::run(Function &F, 2358 FunctionAnalysisManager &AM) { 2359 AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA(); 2360 2361 return PreservedAnalyses::all(); 2362 } 2363 2364 char MemorySSAWrapperPass::ID = 0; 2365 2366 MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) { 2367 initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry()); 2368 } 2369 2370 void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); } 2371 2372 void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const { 2373 AU.setPreservesAll(); 2374 AU.addRequiredTransitive<DominatorTreeWrapperPass>(); 2375 AU.addRequiredTransitive<AAResultsWrapperPass>(); 2376 } 2377 2378 bool MemorySSAWrapperPass::runOnFunction(Function &F) { 2379 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 2380 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 2381 MSSA.reset(new MemorySSA(F, &AA, &DT)); 2382 return false; 2383 } 2384 2385 void MemorySSAWrapperPass::verifyAnalysis() const { 2386 if (VerifyMemorySSA) 2387 MSSA->verifyMemorySSA(); 2388 } 2389 2390 void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const { 2391 MSSA->print(OS); 2392 } 2393 2394 MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {} 2395 2396 /// Walk the use-def chains starting at \p StartingAccess and find 2397 /// the MemoryAccess that actually clobbers Loc. 2398 /// 2399 /// \returns our clobbering memory access 2400 template <typename AliasAnalysisType> 2401 MemoryAccess * 2402 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase( 2403 MemoryAccess *StartingAccess, const MemoryLocation &Loc, 2404 unsigned &UpwardWalkLimit) { 2405 if (isa<MemoryPhi>(StartingAccess)) 2406 return StartingAccess; 2407 2408 auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess); 2409 if (MSSA->isLiveOnEntryDef(StartingUseOrDef)) 2410 return StartingUseOrDef; 2411 2412 Instruction *I = StartingUseOrDef->getMemoryInst(); 2413 2414 // Conservatively, fences are always clobbers, so don't perform the walk if we 2415 // hit a fence. 2416 if (!isa<CallBase>(I) && I->isFenceLike()) 2417 return StartingUseOrDef; 2418 2419 UpwardsMemoryQuery Q; 2420 Q.OriginalAccess = StartingUseOrDef; 2421 Q.StartingLoc = Loc; 2422 Q.Inst = nullptr; 2423 Q.IsCall = false; 2424 2425 // Unlike the other function, do not walk to the def of a def, because we are 2426 // handed something we already believe is the clobbering access. 2427 // We never set SkipSelf to true in Q in this method. 2428 MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef) 2429 ? StartingUseOrDef->getDefiningAccess() 2430 : StartingUseOrDef; 2431 2432 MemoryAccess *Clobber = 2433 Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit); 2434 LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is "); 2435 LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n"); 2436 LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is "); 2437 LLVM_DEBUG(dbgs() << *Clobber << "\n"); 2438 return Clobber; 2439 } 2440 2441 template <typename AliasAnalysisType> 2442 MemoryAccess * 2443 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase( 2444 MemoryAccess *MA, unsigned &UpwardWalkLimit, bool SkipSelf) { 2445 auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA); 2446 // If this is a MemoryPhi, we can't do anything. 2447 if (!StartingAccess) 2448 return MA; 2449 2450 bool IsOptimized = false; 2451 2452 // If this is an already optimized use or def, return the optimized result. 2453 // Note: Currently, we store the optimized def result in a separate field, 2454 // since we can't use the defining access. 2455 if (StartingAccess->isOptimized()) { 2456 if (!SkipSelf || !isa<MemoryDef>(StartingAccess)) 2457 return StartingAccess->getOptimized(); 2458 IsOptimized = true; 2459 } 2460 2461 const Instruction *I = StartingAccess->getMemoryInst(); 2462 // We can't sanely do anything with a fence, since they conservatively clobber 2463 // all memory, and have no locations to get pointers from to try to 2464 // disambiguate. 2465 if (!isa<CallBase>(I) && I->isFenceLike()) 2466 return StartingAccess; 2467 2468 UpwardsMemoryQuery Q(I, StartingAccess); 2469 2470 if (isUseTriviallyOptimizableToLiveOnEntry(*Walker.getAA(), I)) { 2471 MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef(); 2472 StartingAccess->setOptimized(LiveOnEntry); 2473 StartingAccess->setOptimizedAccessType(None); 2474 return LiveOnEntry; 2475 } 2476 2477 MemoryAccess *OptimizedAccess; 2478 if (!IsOptimized) { 2479 // Start with the thing we already think clobbers this location 2480 MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess(); 2481 2482 // At this point, DefiningAccess may be the live on entry def. 2483 // If it is, we will not get a better result. 2484 if (MSSA->isLiveOnEntryDef(DefiningAccess)) { 2485 StartingAccess->setOptimized(DefiningAccess); 2486 StartingAccess->setOptimizedAccessType(None); 2487 return DefiningAccess; 2488 } 2489 2490 OptimizedAccess = Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit); 2491 StartingAccess->setOptimized(OptimizedAccess); 2492 if (MSSA->isLiveOnEntryDef(OptimizedAccess)) 2493 StartingAccess->setOptimizedAccessType(None); 2494 else if (Q.AR == MustAlias) 2495 StartingAccess->setOptimizedAccessType(MustAlias); 2496 } else 2497 OptimizedAccess = StartingAccess->getOptimized(); 2498 2499 LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is "); 2500 LLVM_DEBUG(dbgs() << *StartingAccess << "\n"); 2501 LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is "); 2502 LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n"); 2503 2504 MemoryAccess *Result; 2505 if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) && 2506 isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) { 2507 assert(isa<MemoryDef>(Q.OriginalAccess)); 2508 Q.SkipSelfAccess = true; 2509 Result = Walker.findClobber(OptimizedAccess, Q, UpwardWalkLimit); 2510 } else 2511 Result = OptimizedAccess; 2512 2513 LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf); 2514 LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n"); 2515 2516 return Result; 2517 } 2518 2519 MemoryAccess * 2520 DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) { 2521 if (auto *Use = dyn_cast<MemoryUseOrDef>(MA)) 2522 return Use->getDefiningAccess(); 2523 return MA; 2524 } 2525 2526 MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess( 2527 MemoryAccess *StartingAccess, const MemoryLocation &) { 2528 if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess)) 2529 return Use->getDefiningAccess(); 2530 return StartingAccess; 2531 } 2532 2533 void MemoryPhi::deleteMe(DerivedUser *Self) { 2534 delete static_cast<MemoryPhi *>(Self); 2535 } 2536 2537 void MemoryDef::deleteMe(DerivedUser *Self) { 2538 delete static_cast<MemoryDef *>(Self); 2539 } 2540 2541 void MemoryUse::deleteMe(DerivedUser *Self) { 2542 delete static_cast<MemoryUse *>(Self); 2543 } 2544 2545 bool upward_defs_iterator::IsGuaranteedLoopInvariant(Value *Ptr) const { 2546 auto IsGuaranteedLoopInvariantBase = [](Value *Ptr) { 2547 Ptr = Ptr->stripPointerCasts(); 2548 if (!isa<Instruction>(Ptr)) 2549 return true; 2550 return isa<AllocaInst>(Ptr); 2551 }; 2552 2553 Ptr = Ptr->stripPointerCasts(); 2554 if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) { 2555 return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) && 2556 GEP->hasAllConstantIndices(); 2557 } 2558 return IsGuaranteedLoopInvariantBase(Ptr); 2559 } 2560