1 //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the MemorySSA class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/MemorySSA.h"
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/DenseMapInfo.h"
16 #include "llvm/ADT/DenseSet.h"
17 #include "llvm/ADT/DepthFirstIterator.h"
18 #include "llvm/ADT/Hashing.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/iterator.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/IteratedDominanceFrontier.h"
28 #include "llvm/Analysis/MemoryLocation.h"
29 #include "llvm/Config/llvm-config.h"
30 #include "llvm/IR/AssemblyAnnotationWriter.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/Dominators.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/Instruction.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/IR/Intrinsics.h"
38 #include "llvm/IR/LLVMContext.h"
39 #include "llvm/IR/PassManager.h"
40 #include "llvm/IR/Use.h"
41 #include "llvm/InitializePasses.h"
42 #include "llvm/Pass.h"
43 #include "llvm/Support/AtomicOrdering.h"
44 #include "llvm/Support/Casting.h"
45 #include "llvm/Support/CommandLine.h"
46 #include "llvm/Support/Compiler.h"
47 #include "llvm/Support/Debug.h"
48 #include "llvm/Support/ErrorHandling.h"
49 #include "llvm/Support/FormattedStream.h"
50 #include "llvm/Support/raw_ostream.h"
51 #include <algorithm>
52 #include <cassert>
53 #include <cstdlib>
54 #include <iterator>
55 #include <memory>
56 #include <utility>
57 
58 using namespace llvm;
59 
60 #define DEBUG_TYPE "memoryssa"
61 
62 INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
63                       true)
64 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
65 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
66 INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
67                     true)
68 
69 INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",
70                       "Memory SSA Printer", false, false)
71 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
72 INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",
73                     "Memory SSA Printer", false, false)
74 
75 static cl::opt<unsigned> MaxCheckLimit(
76     "memssa-check-limit", cl::Hidden, cl::init(100),
77     cl::desc("The maximum number of stores/phis MemorySSA"
78              "will consider trying to walk past (default = 100)"));
79 
80 // Always verify MemorySSA if expensive checking is enabled.
81 #ifdef EXPENSIVE_CHECKS
82 bool llvm::VerifyMemorySSA = true;
83 #else
84 bool llvm::VerifyMemorySSA = false;
85 #endif
86 /// Enables memory ssa as a dependency for loop passes in legacy pass manager.
87 cl::opt<bool> llvm::EnableMSSALoopDependency(
88     "enable-mssa-loop-dependency", cl::Hidden, cl::init(true),
89     cl::desc("Enable MemorySSA dependency for loop pass manager"));
90 
91 static cl::opt<bool, true>
92     VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA),
93                      cl::Hidden, cl::desc("Enable verification of MemorySSA."));
94 
95 namespace llvm {
96 
97 /// An assembly annotator class to print Memory SSA information in
98 /// comments.
99 class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
100   friend class MemorySSA;
101 
102   const MemorySSA *MSSA;
103 
104 public:
105   MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
106 
107   void emitBasicBlockStartAnnot(const BasicBlock *BB,
108                                 formatted_raw_ostream &OS) override {
109     if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
110       OS << "; " << *MA << "\n";
111   }
112 
113   void emitInstructionAnnot(const Instruction *I,
114                             formatted_raw_ostream &OS) override {
115     if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
116       OS << "; " << *MA << "\n";
117   }
118 };
119 
120 } // end namespace llvm
121 
122 namespace {
123 
124 /// Our current alias analysis API differentiates heavily between calls and
125 /// non-calls, and functions called on one usually assert on the other.
126 /// This class encapsulates the distinction to simplify other code that wants
127 /// "Memory affecting instructions and related data" to use as a key.
128 /// For example, this class is used as a densemap key in the use optimizer.
129 class MemoryLocOrCall {
130 public:
131   bool IsCall = false;
132 
133   MemoryLocOrCall(MemoryUseOrDef *MUD)
134       : MemoryLocOrCall(MUD->getMemoryInst()) {}
135   MemoryLocOrCall(const MemoryUseOrDef *MUD)
136       : MemoryLocOrCall(MUD->getMemoryInst()) {}
137 
138   MemoryLocOrCall(Instruction *Inst) {
139     if (auto *C = dyn_cast<CallBase>(Inst)) {
140       IsCall = true;
141       Call = C;
142     } else {
143       IsCall = false;
144       // There is no such thing as a memorylocation for a fence inst, and it is
145       // unique in that regard.
146       if (!isa<FenceInst>(Inst))
147         Loc = MemoryLocation::get(Inst);
148     }
149   }
150 
151   explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
152 
153   const CallBase *getCall() const {
154     assert(IsCall);
155     return Call;
156   }
157 
158   MemoryLocation getLoc() const {
159     assert(!IsCall);
160     return Loc;
161   }
162 
163   bool operator==(const MemoryLocOrCall &Other) const {
164     if (IsCall != Other.IsCall)
165       return false;
166 
167     if (!IsCall)
168       return Loc == Other.Loc;
169 
170     if (Call->getCalledOperand() != Other.Call->getCalledOperand())
171       return false;
172 
173     return Call->arg_size() == Other.Call->arg_size() &&
174            std::equal(Call->arg_begin(), Call->arg_end(),
175                       Other.Call->arg_begin());
176   }
177 
178 private:
179   union {
180     const CallBase *Call;
181     MemoryLocation Loc;
182   };
183 };
184 
185 } // end anonymous namespace
186 
187 namespace llvm {
188 
189 template <> struct DenseMapInfo<MemoryLocOrCall> {
190   static inline MemoryLocOrCall getEmptyKey() {
191     return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
192   }
193 
194   static inline MemoryLocOrCall getTombstoneKey() {
195     return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
196   }
197 
198   static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
199     if (!MLOC.IsCall)
200       return hash_combine(
201           MLOC.IsCall,
202           DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
203 
204     hash_code hash =
205         hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue(
206                                       MLOC.getCall()->getCalledOperand()));
207 
208     for (const Value *Arg : MLOC.getCall()->args())
209       hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg));
210     return hash;
211   }
212 
213   static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
214     return LHS == RHS;
215   }
216 };
217 
218 } // end namespace llvm
219 
220 /// This does one-way checks to see if Use could theoretically be hoisted above
221 /// MayClobber. This will not check the other way around.
222 ///
223 /// This assumes that, for the purposes of MemorySSA, Use comes directly after
224 /// MayClobber, with no potentially clobbering operations in between them.
225 /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
226 static bool areLoadsReorderable(const LoadInst *Use,
227                                 const LoadInst *MayClobber) {
228   bool VolatileUse = Use->isVolatile();
229   bool VolatileClobber = MayClobber->isVolatile();
230   // Volatile operations may never be reordered with other volatile operations.
231   if (VolatileUse && VolatileClobber)
232     return false;
233   // Otherwise, volatile doesn't matter here. From the language reference:
234   // 'optimizers may change the order of volatile operations relative to
235   // non-volatile operations.'"
236 
237   // If a load is seq_cst, it cannot be moved above other loads. If its ordering
238   // is weaker, it can be moved above other loads. We just need to be sure that
239   // MayClobber isn't an acquire load, because loads can't be moved above
240   // acquire loads.
241   //
242   // Note that this explicitly *does* allow the free reordering of monotonic (or
243   // weaker) loads of the same address.
244   bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
245   bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
246                                                      AtomicOrdering::Acquire);
247   return !(SeqCstUse || MayClobberIsAcquire);
248 }
249 
250 namespace {
251 
252 struct ClobberAlias {
253   bool IsClobber;
254   Optional<AliasResult> AR;
255 };
256 
257 } // end anonymous namespace
258 
259 // Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being
260 // ignored if IsClobber = false.
261 template <typename AliasAnalysisType>
262 static ClobberAlias
263 instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc,
264                          const Instruction *UseInst, AliasAnalysisType &AA) {
265   Instruction *DefInst = MD->getMemoryInst();
266   assert(DefInst && "Defining instruction not actually an instruction");
267   const auto *UseCall = dyn_cast<CallBase>(UseInst);
268   Optional<AliasResult> AR;
269 
270   if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
271     // These intrinsics will show up as affecting memory, but they are just
272     // markers, mostly.
273     //
274     // FIXME: We probably don't actually want MemorySSA to model these at all
275     // (including creating MemoryAccesses for them): we just end up inventing
276     // clobbers where they don't really exist at all. Please see D43269 for
277     // context.
278     switch (II->getIntrinsicID()) {
279     case Intrinsic::lifetime_start:
280       if (UseCall)
281         return {false, NoAlias};
282       AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc);
283       return {AR != NoAlias, AR};
284     case Intrinsic::lifetime_end:
285     case Intrinsic::invariant_start:
286     case Intrinsic::invariant_end:
287     case Intrinsic::assume:
288       return {false, NoAlias};
289     case Intrinsic::dbg_addr:
290     case Intrinsic::dbg_declare:
291     case Intrinsic::dbg_label:
292     case Intrinsic::dbg_value:
293       llvm_unreachable("debuginfo shouldn't have associated defs!");
294     default:
295       break;
296     }
297   }
298 
299   if (UseCall) {
300     ModRefInfo I = AA.getModRefInfo(DefInst, UseCall);
301     AR = isMustSet(I) ? MustAlias : MayAlias;
302     return {isModOrRefSet(I), AR};
303   }
304 
305   if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
306     if (auto *UseLoad = dyn_cast<LoadInst>(UseInst))
307       return {!areLoadsReorderable(UseLoad, DefLoad), MayAlias};
308 
309   ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
310   AR = isMustSet(I) ? MustAlias : MayAlias;
311   return {isModSet(I), AR};
312 }
313 
314 template <typename AliasAnalysisType>
315 static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
316                                              const MemoryUseOrDef *MU,
317                                              const MemoryLocOrCall &UseMLOC,
318                                              AliasAnalysisType &AA) {
319   // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
320   // to exist while MemoryLocOrCall is pushed through places.
321   if (UseMLOC.IsCall)
322     return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(),
323                                     AA);
324   return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
325                                   AA);
326 }
327 
328 // Return true when MD may alias MU, return false otherwise.
329 bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
330                                         AliasAnalysis &AA) {
331   return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber;
332 }
333 
334 namespace {
335 
336 struct UpwardsMemoryQuery {
337   // True if our original query started off as a call
338   bool IsCall = false;
339   // The pointer location we started the query with. This will be empty if
340   // IsCall is true.
341   MemoryLocation StartingLoc;
342   // This is the instruction we were querying about.
343   const Instruction *Inst = nullptr;
344   // The MemoryAccess we actually got called with, used to test local domination
345   const MemoryAccess *OriginalAccess = nullptr;
346   Optional<AliasResult> AR = MayAlias;
347   bool SkipSelfAccess = false;
348 
349   UpwardsMemoryQuery() = default;
350 
351   UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
352       : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {
353     if (!IsCall)
354       StartingLoc = MemoryLocation::get(Inst);
355   }
356 };
357 
358 } // end anonymous namespace
359 
360 static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
361                            BatchAAResults &AA) {
362   Instruction *Inst = MD->getMemoryInst();
363   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
364     switch (II->getIntrinsicID()) {
365     case Intrinsic::lifetime_end:
366       return AA.alias(MemoryLocation(II->getArgOperand(1)), Loc) == MustAlias;
367     default:
368       return false;
369     }
370   }
371   return false;
372 }
373 
374 template <typename AliasAnalysisType>
375 static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA,
376                                                    const Instruction *I) {
377   // If the memory can't be changed, then loads of the memory can't be
378   // clobbered.
379   return isa<LoadInst>(I) && (I->hasMetadata(LLVMContext::MD_invariant_load) ||
380                               AA.pointsToConstantMemory(MemoryLocation(
381                                   cast<LoadInst>(I)->getPointerOperand())));
382 }
383 
384 /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
385 /// inbetween `Start` and `ClobberAt` can clobbers `Start`.
386 ///
387 /// This is meant to be as simple and self-contained as possible. Because it
388 /// uses no cache, etc., it can be relatively expensive.
389 ///
390 /// \param Start     The MemoryAccess that we want to walk from.
391 /// \param ClobberAt A clobber for Start.
392 /// \param StartLoc  The MemoryLocation for Start.
393 /// \param MSSA      The MemorySSA instance that Start and ClobberAt belong to.
394 /// \param Query     The UpwardsMemoryQuery we used for our search.
395 /// \param AA        The AliasAnalysis we used for our search.
396 /// \param AllowImpreciseClobber Always false, unless we do relaxed verify.
397 
398 template <typename AliasAnalysisType>
399 LLVM_ATTRIBUTE_UNUSED static void
400 checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt,
401                    const MemoryLocation &StartLoc, const MemorySSA &MSSA,
402                    const UpwardsMemoryQuery &Query, AliasAnalysisType &AA,
403                    bool AllowImpreciseClobber = false) {
404   assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
405 
406   if (MSSA.isLiveOnEntryDef(Start)) {
407     assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
408            "liveOnEntry must clobber itself");
409     return;
410   }
411 
412   bool FoundClobber = false;
413   DenseSet<ConstMemoryAccessPair> VisitedPhis;
414   SmallVector<ConstMemoryAccessPair, 8> Worklist;
415   Worklist.emplace_back(Start, StartLoc);
416   // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
417   // is found, complain.
418   while (!Worklist.empty()) {
419     auto MAP = Worklist.pop_back_val();
420     // All we care about is that nothing from Start to ClobberAt clobbers Start.
421     // We learn nothing from revisiting nodes.
422     if (!VisitedPhis.insert(MAP).second)
423       continue;
424 
425     for (const auto *MA : def_chain(MAP.first)) {
426       if (MA == ClobberAt) {
427         if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
428           // instructionClobbersQuery isn't essentially free, so don't use `|=`,
429           // since it won't let us short-circuit.
430           //
431           // Also, note that this can't be hoisted out of the `Worklist` loop,
432           // since MD may only act as a clobber for 1 of N MemoryLocations.
433           FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD);
434           if (!FoundClobber) {
435             ClobberAlias CA =
436                 instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
437             if (CA.IsClobber) {
438               FoundClobber = true;
439               // Not used: CA.AR;
440             }
441           }
442         }
443         break;
444       }
445 
446       // We should never hit liveOnEntry, unless it's the clobber.
447       assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
448 
449       if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
450         // If Start is a Def, skip self.
451         if (MD == Start)
452           continue;
453 
454         assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)
455                     .IsClobber &&
456                "Found clobber before reaching ClobberAt!");
457         continue;
458       }
459 
460       if (const auto *MU = dyn_cast<MemoryUse>(MA)) {
461         (void)MU;
462         assert (MU == Start &&
463                 "Can only find use in def chain if Start is a use");
464         continue;
465       }
466 
467       assert(isa<MemoryPhi>(MA));
468 
469       // Add reachable phi predecessors
470       for (auto ItB = upward_defs_begin(
471                     {const_cast<MemoryAccess *>(MA), MAP.second},
472                     MSSA.getDomTree()),
473                 ItE = upward_defs_end();
474            ItB != ItE; ++ItB)
475         if (MSSA.getDomTree().isReachableFromEntry(ItB.getPhiArgBlock()))
476           Worklist.emplace_back(*ItB);
477     }
478   }
479 
480   // If the verify is done following an optimization, it's possible that
481   // ClobberAt was a conservative clobbering, that we can now infer is not a
482   // true clobbering access. Don't fail the verify if that's the case.
483   // We do have accesses that claim they're optimized, but could be optimized
484   // further. Updating all these can be expensive, so allow it for now (FIXME).
485   if (AllowImpreciseClobber)
486     return;
487 
488   // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
489   // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
490   assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
491          "ClobberAt never acted as a clobber");
492 }
493 
494 namespace {
495 
496 /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
497 /// in one class.
498 template <class AliasAnalysisType> class ClobberWalker {
499   /// Save a few bytes by using unsigned instead of size_t.
500   using ListIndex = unsigned;
501 
502   /// Represents a span of contiguous MemoryDefs, potentially ending in a
503   /// MemoryPhi.
504   struct DefPath {
505     MemoryLocation Loc;
506     // Note that, because we always walk in reverse, Last will always dominate
507     // First. Also note that First and Last are inclusive.
508     MemoryAccess *First;
509     MemoryAccess *Last;
510     Optional<ListIndex> Previous;
511 
512     DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
513             Optional<ListIndex> Previous)
514         : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
515 
516     DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
517             Optional<ListIndex> Previous)
518         : DefPath(Loc, Init, Init, Previous) {}
519   };
520 
521   const MemorySSA &MSSA;
522   AliasAnalysisType &AA;
523   DominatorTree &DT;
524   UpwardsMemoryQuery *Query;
525   unsigned *UpwardWalkLimit;
526 
527   // Phi optimization bookkeeping:
528   // List of DefPath to process during the current phi optimization walk.
529   SmallVector<DefPath, 32> Paths;
530   // List of visited <Access, Location> pairs; we can skip paths already
531   // visited with the same memory location.
532   DenseSet<ConstMemoryAccessPair> VisitedPhis;
533   // Record if phi translation has been performed during the current phi
534   // optimization walk, as merging alias results after phi translation can
535   // yield incorrect results. Context in PR46156.
536   bool PerformedPhiTranslation = false;
537 
538   /// Find the nearest def or phi that `From` can legally be optimized to.
539   const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
540     assert(From->getNumOperands() && "Phi with no operands?");
541 
542     BasicBlock *BB = From->getBlock();
543     MemoryAccess *Result = MSSA.getLiveOnEntryDef();
544     DomTreeNode *Node = DT.getNode(BB);
545     while ((Node = Node->getIDom())) {
546       auto *Defs = MSSA.getBlockDefs(Node->getBlock());
547       if (Defs)
548         return &*Defs->rbegin();
549     }
550     return Result;
551   }
552 
553   /// Result of calling walkToPhiOrClobber.
554   struct UpwardsWalkResult {
555     /// The "Result" of the walk. Either a clobber, the last thing we walked, or
556     /// both. Include alias info when clobber found.
557     MemoryAccess *Result;
558     bool IsKnownClobber;
559     Optional<AliasResult> AR;
560   };
561 
562   /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
563   /// This will update Desc.Last as it walks. It will (optionally) also stop at
564   /// StopAt.
565   ///
566   /// This does not test for whether StopAt is a clobber
567   UpwardsWalkResult
568   walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr,
569                      const MemoryAccess *SkipStopAt = nullptr) const {
570     assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
571     assert(UpwardWalkLimit && "Need a valid walk limit");
572     bool LimitAlreadyReached = false;
573     // (*UpwardWalkLimit) may be 0 here, due to the loop in tryOptimizePhi. Set
574     // it to 1. This will not do any alias() calls. It either returns in the
575     // first iteration in the loop below, or is set back to 0 if all def chains
576     // are free of MemoryDefs.
577     if (!*UpwardWalkLimit) {
578       *UpwardWalkLimit = 1;
579       LimitAlreadyReached = true;
580     }
581 
582     for (MemoryAccess *Current : def_chain(Desc.Last)) {
583       Desc.Last = Current;
584       if (Current == StopAt || Current == SkipStopAt)
585         return {Current, false, MayAlias};
586 
587       if (auto *MD = dyn_cast<MemoryDef>(Current)) {
588         if (MSSA.isLiveOnEntryDef(MD))
589           return {MD, true, MustAlias};
590 
591         if (!--*UpwardWalkLimit)
592           return {Current, true, MayAlias};
593 
594         ClobberAlias CA =
595             instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA);
596         if (CA.IsClobber)
597           return {MD, true, CA.AR};
598       }
599     }
600 
601     if (LimitAlreadyReached)
602       *UpwardWalkLimit = 0;
603 
604     assert(isa<MemoryPhi>(Desc.Last) &&
605            "Ended at a non-clobber that's not a phi?");
606     return {Desc.Last, false, MayAlias};
607   }
608 
609   void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
610                    ListIndex PriorNode) {
611     auto UpwardDefsBegin = upward_defs_begin({Phi, Paths[PriorNode].Loc}, DT,
612                                              &PerformedPhiTranslation);
613     auto UpwardDefs = make_range(UpwardDefsBegin, upward_defs_end());
614     for (const MemoryAccessPair &P : UpwardDefs) {
615       PausedSearches.push_back(Paths.size());
616       Paths.emplace_back(P.second, P.first, PriorNode);
617     }
618   }
619 
620   /// Represents a search that terminated after finding a clobber. This clobber
621   /// may or may not be present in the path of defs from LastNode..SearchStart,
622   /// since it may have been retrieved from cache.
623   struct TerminatedPath {
624     MemoryAccess *Clobber;
625     ListIndex LastNode;
626   };
627 
628   /// Get an access that keeps us from optimizing to the given phi.
629   ///
630   /// PausedSearches is an array of indices into the Paths array. Its incoming
631   /// value is the indices of searches that stopped at the last phi optimization
632   /// target. It's left in an unspecified state.
633   ///
634   /// If this returns None, NewPaused is a vector of searches that terminated
635   /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
636   Optional<TerminatedPath>
637   getBlockingAccess(const MemoryAccess *StopWhere,
638                     SmallVectorImpl<ListIndex> &PausedSearches,
639                     SmallVectorImpl<ListIndex> &NewPaused,
640                     SmallVectorImpl<TerminatedPath> &Terminated) {
641     assert(!PausedSearches.empty() && "No searches to continue?");
642 
643     // BFS vs DFS really doesn't make a difference here, so just do a DFS with
644     // PausedSearches as our stack.
645     while (!PausedSearches.empty()) {
646       ListIndex PathIndex = PausedSearches.pop_back_val();
647       DefPath &Node = Paths[PathIndex];
648 
649       // If we've already visited this path with this MemoryLocation, we don't
650       // need to do so again.
651       //
652       // NOTE: That we just drop these paths on the ground makes caching
653       // behavior sporadic. e.g. given a diamond:
654       //  A
655       // B C
656       //  D
657       //
658       // ...If we walk D, B, A, C, we'll only cache the result of phi
659       // optimization for A, B, and D; C will be skipped because it dies here.
660       // This arguably isn't the worst thing ever, since:
661       //   - We generally query things in a top-down order, so if we got below D
662       //     without needing cache entries for {C, MemLoc}, then chances are
663       //     that those cache entries would end up ultimately unused.
664       //   - We still cache things for A, so C only needs to walk up a bit.
665       // If this behavior becomes problematic, we can fix without a ton of extra
666       // work.
667       if (!VisitedPhis.insert({Node.Last, Node.Loc}).second) {
668         if (PerformedPhiTranslation) {
669           // If visiting this path performed Phi translation, don't continue,
670           // since it may not be correct to merge results from two paths if one
671           // relies on the phi translation.
672           TerminatedPath Term{Node.Last, PathIndex};
673           return Term;
674         }
675         continue;
676       }
677 
678       const MemoryAccess *SkipStopWhere = nullptr;
679       if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) {
680         assert(isa<MemoryDef>(Query->OriginalAccess));
681         SkipStopWhere = Query->OriginalAccess;
682       }
683 
684       UpwardsWalkResult Res = walkToPhiOrClobber(Node,
685                                                  /*StopAt=*/StopWhere,
686                                                  /*SkipStopAt=*/SkipStopWhere);
687       if (Res.IsKnownClobber) {
688         assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
689 
690         // If this wasn't a cache hit, we hit a clobber when walking. That's a
691         // failure.
692         TerminatedPath Term{Res.Result, PathIndex};
693         if (!MSSA.dominates(Res.Result, StopWhere))
694           return Term;
695 
696         // Otherwise, it's a valid thing to potentially optimize to.
697         Terminated.push_back(Term);
698         continue;
699       }
700 
701       if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
702         // We've hit our target. Save this path off for if we want to continue
703         // walking. If we are in the mode of skipping the OriginalAccess, and
704         // we've reached back to the OriginalAccess, do not save path, we've
705         // just looped back to self.
706         if (Res.Result != SkipStopWhere)
707           NewPaused.push_back(PathIndex);
708         continue;
709       }
710 
711       assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
712       addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
713     }
714 
715     return None;
716   }
717 
718   template <typename T, typename Walker>
719   struct generic_def_path_iterator
720       : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
721                                     std::forward_iterator_tag, T *> {
722     generic_def_path_iterator() {}
723     generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
724 
725     T &operator*() const { return curNode(); }
726 
727     generic_def_path_iterator &operator++() {
728       N = curNode().Previous;
729       return *this;
730     }
731 
732     bool operator==(const generic_def_path_iterator &O) const {
733       if (N.hasValue() != O.N.hasValue())
734         return false;
735       return !N.hasValue() || *N == *O.N;
736     }
737 
738   private:
739     T &curNode() const { return W->Paths[*N]; }
740 
741     Walker *W = nullptr;
742     Optional<ListIndex> N = None;
743   };
744 
745   using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
746   using const_def_path_iterator =
747       generic_def_path_iterator<const DefPath, const ClobberWalker>;
748 
749   iterator_range<def_path_iterator> def_path(ListIndex From) {
750     return make_range(def_path_iterator(this, From), def_path_iterator());
751   }
752 
753   iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
754     return make_range(const_def_path_iterator(this, From),
755                       const_def_path_iterator());
756   }
757 
758   struct OptznResult {
759     /// The path that contains our result.
760     TerminatedPath PrimaryClobber;
761     /// The paths that we can legally cache back from, but that aren't
762     /// necessarily the result of the Phi optimization.
763     SmallVector<TerminatedPath, 4> OtherClobbers;
764   };
765 
766   ListIndex defPathIndex(const DefPath &N) const {
767     // The assert looks nicer if we don't need to do &N
768     const DefPath *NP = &N;
769     assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
770            "Out of bounds DefPath!");
771     return NP - &Paths.front();
772   }
773 
774   /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
775   /// that act as legal clobbers. Note that this won't return *all* clobbers.
776   ///
777   /// Phi optimization algorithm tl;dr:
778   ///   - Find the earliest def/phi, A, we can optimize to
779   ///   - Find if all paths from the starting memory access ultimately reach A
780   ///     - If not, optimization isn't possible.
781   ///     - Otherwise, walk from A to another clobber or phi, A'.
782   ///       - If A' is a def, we're done.
783   ///       - If A' is a phi, try to optimize it.
784   ///
785   /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
786   /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
787   OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
788                              const MemoryLocation &Loc) {
789     assert(Paths.empty() && VisitedPhis.empty() && !PerformedPhiTranslation &&
790            "Reset the optimization state.");
791 
792     Paths.emplace_back(Loc, Start, Phi, None);
793     // Stores how many "valid" optimization nodes we had prior to calling
794     // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
795     auto PriorPathsSize = Paths.size();
796 
797     SmallVector<ListIndex, 16> PausedSearches;
798     SmallVector<ListIndex, 8> NewPaused;
799     SmallVector<TerminatedPath, 4> TerminatedPaths;
800 
801     addSearches(Phi, PausedSearches, 0);
802 
803     // Moves the TerminatedPath with the "most dominated" Clobber to the end of
804     // Paths.
805     auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
806       assert(!Paths.empty() && "Need a path to move");
807       auto Dom = Paths.begin();
808       for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
809         if (!MSSA.dominates(I->Clobber, Dom->Clobber))
810           Dom = I;
811       auto Last = Paths.end() - 1;
812       if (Last != Dom)
813         std::iter_swap(Last, Dom);
814     };
815 
816     MemoryPhi *Current = Phi;
817     while (true) {
818       assert(!MSSA.isLiveOnEntryDef(Current) &&
819              "liveOnEntry wasn't treated as a clobber?");
820 
821       const auto *Target = getWalkTarget(Current);
822       // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
823       // optimization for the prior phi.
824       assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
825         return MSSA.dominates(P.Clobber, Target);
826       }));
827 
828       // FIXME: This is broken, because the Blocker may be reported to be
829       // liveOnEntry, and we'll happily wait for that to disappear (read: never)
830       // For the moment, this is fine, since we do nothing with blocker info.
831       if (Optional<TerminatedPath> Blocker = getBlockingAccess(
832               Target, PausedSearches, NewPaused, TerminatedPaths)) {
833 
834         // Find the node we started at. We can't search based on N->Last, since
835         // we may have gone around a loop with a different MemoryLocation.
836         auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
837           return defPathIndex(N) < PriorPathsSize;
838         });
839         assert(Iter != def_path_iterator());
840 
841         DefPath &CurNode = *Iter;
842         assert(CurNode.Last == Current);
843 
844         // Two things:
845         // A. We can't reliably cache all of NewPaused back. Consider a case
846         //    where we have two paths in NewPaused; one of which can't optimize
847         //    above this phi, whereas the other can. If we cache the second path
848         //    back, we'll end up with suboptimal cache entries. We can handle
849         //    cases like this a bit better when we either try to find all
850         //    clobbers that block phi optimization, or when our cache starts
851         //    supporting unfinished searches.
852         // B. We can't reliably cache TerminatedPaths back here without doing
853         //    extra checks; consider a case like:
854         //       T
855         //      / \
856         //     D   C
857         //      \ /
858         //       S
859         //    Where T is our target, C is a node with a clobber on it, D is a
860         //    diamond (with a clobber *only* on the left or right node, N), and
861         //    S is our start. Say we walk to D, through the node opposite N
862         //    (read: ignoring the clobber), and see a cache entry in the top
863         //    node of D. That cache entry gets put into TerminatedPaths. We then
864         //    walk up to C (N is later in our worklist), find the clobber, and
865         //    quit. If we append TerminatedPaths to OtherClobbers, we'll cache
866         //    the bottom part of D to the cached clobber, ignoring the clobber
867         //    in N. Again, this problem goes away if we start tracking all
868         //    blockers for a given phi optimization.
869         TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
870         return {Result, {}};
871       }
872 
873       // If there's nothing left to search, then all paths led to valid clobbers
874       // that we got from our cache; pick the nearest to the start, and allow
875       // the rest to be cached back.
876       if (NewPaused.empty()) {
877         MoveDominatedPathToEnd(TerminatedPaths);
878         TerminatedPath Result = TerminatedPaths.pop_back_val();
879         return {Result, std::move(TerminatedPaths)};
880       }
881 
882       MemoryAccess *DefChainEnd = nullptr;
883       SmallVector<TerminatedPath, 4> Clobbers;
884       for (ListIndex Paused : NewPaused) {
885         UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
886         if (WR.IsKnownClobber)
887           Clobbers.push_back({WR.Result, Paused});
888         else
889           // Micro-opt: If we hit the end of the chain, save it.
890           DefChainEnd = WR.Result;
891       }
892 
893       if (!TerminatedPaths.empty()) {
894         // If we couldn't find the dominating phi/liveOnEntry in the above loop,
895         // do it now.
896         if (!DefChainEnd)
897           for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
898             DefChainEnd = MA;
899         assert(DefChainEnd && "Failed to find dominating phi/liveOnEntry");
900 
901         // If any of the terminated paths don't dominate the phi we'll try to
902         // optimize, we need to figure out what they are and quit.
903         const BasicBlock *ChainBB = DefChainEnd->getBlock();
904         for (const TerminatedPath &TP : TerminatedPaths) {
905           // Because we know that DefChainEnd is as "high" as we can go, we
906           // don't need local dominance checks; BB dominance is sufficient.
907           if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
908             Clobbers.push_back(TP);
909         }
910       }
911 
912       // If we have clobbers in the def chain, find the one closest to Current
913       // and quit.
914       if (!Clobbers.empty()) {
915         MoveDominatedPathToEnd(Clobbers);
916         TerminatedPath Result = Clobbers.pop_back_val();
917         return {Result, std::move(Clobbers)};
918       }
919 
920       assert(all_of(NewPaused,
921                     [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
922 
923       // Because liveOnEntry is a clobber, this must be a phi.
924       auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
925 
926       PriorPathsSize = Paths.size();
927       PausedSearches.clear();
928       for (ListIndex I : NewPaused)
929         addSearches(DefChainPhi, PausedSearches, I);
930       NewPaused.clear();
931 
932       Current = DefChainPhi;
933     }
934   }
935 
936   void verifyOptResult(const OptznResult &R) const {
937     assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
938       return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
939     }));
940   }
941 
942   void resetPhiOptznState() {
943     Paths.clear();
944     VisitedPhis.clear();
945     PerformedPhiTranslation = false;
946   }
947 
948 public:
949   ClobberWalker(const MemorySSA &MSSA, AliasAnalysisType &AA, DominatorTree &DT)
950       : MSSA(MSSA), AA(AA), DT(DT) {}
951 
952   AliasAnalysisType *getAA() { return &AA; }
953   /// Finds the nearest clobber for the given query, optimizing phis if
954   /// possible.
955   MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q,
956                             unsigned &UpWalkLimit) {
957     Query = &Q;
958     UpwardWalkLimit = &UpWalkLimit;
959     // Starting limit must be > 0.
960     if (!UpWalkLimit)
961       UpWalkLimit++;
962 
963     MemoryAccess *Current = Start;
964     // This walker pretends uses don't exist. If we're handed one, silently grab
965     // its def. (This has the nice side-effect of ensuring we never cache uses)
966     if (auto *MU = dyn_cast<MemoryUse>(Start))
967       Current = MU->getDefiningAccess();
968 
969     DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
970     // Fast path for the overly-common case (no crazy phi optimization
971     // necessary)
972     UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
973     MemoryAccess *Result;
974     if (WalkResult.IsKnownClobber) {
975       Result = WalkResult.Result;
976       Q.AR = WalkResult.AR;
977     } else {
978       OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
979                                           Current, Q.StartingLoc);
980       verifyOptResult(OptRes);
981       resetPhiOptznState();
982       Result = OptRes.PrimaryClobber.Clobber;
983     }
984 
985 #ifdef EXPENSIVE_CHECKS
986     if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0)
987       checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
988 #endif
989     return Result;
990   }
991 };
992 
993 struct RenamePassData {
994   DomTreeNode *DTN;
995   DomTreeNode::const_iterator ChildIt;
996   MemoryAccess *IncomingVal;
997 
998   RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
999                  MemoryAccess *M)
1000       : DTN(D), ChildIt(It), IncomingVal(M) {}
1001 
1002   void swap(RenamePassData &RHS) {
1003     std::swap(DTN, RHS.DTN);
1004     std::swap(ChildIt, RHS.ChildIt);
1005     std::swap(IncomingVal, RHS.IncomingVal);
1006   }
1007 };
1008 
1009 } // end anonymous namespace
1010 
1011 namespace llvm {
1012 
1013 template <class AliasAnalysisType> class MemorySSA::ClobberWalkerBase {
1014   ClobberWalker<AliasAnalysisType> Walker;
1015   MemorySSA *MSSA;
1016 
1017 public:
1018   ClobberWalkerBase(MemorySSA *M, AliasAnalysisType *A, DominatorTree *D)
1019       : Walker(*M, *A, *D), MSSA(M) {}
1020 
1021   MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
1022                                               const MemoryLocation &,
1023                                               unsigned &);
1024   // Third argument (bool), defines whether the clobber search should skip the
1025   // original queried access. If true, there will be a follow-up query searching
1026   // for a clobber access past "self". Note that the Optimized access is not
1027   // updated if a new clobber is found by this SkipSelf search. If this
1028   // additional query becomes heavily used we may decide to cache the result.
1029   // Walker instantiations will decide how to set the SkipSelf bool.
1030   MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, unsigned &, bool);
1031 };
1032 
1033 /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
1034 /// longer does caching on its own, but the name has been retained for the
1035 /// moment.
1036 template <class AliasAnalysisType>
1037 class MemorySSA::CachingWalker final : public MemorySSAWalker {
1038   ClobberWalkerBase<AliasAnalysisType> *Walker;
1039 
1040 public:
1041   CachingWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
1042       : MemorySSAWalker(M), Walker(W) {}
1043   ~CachingWalker() override = default;
1044 
1045   using MemorySSAWalker::getClobberingMemoryAccess;
1046 
1047   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1048     return Walker->getClobberingMemoryAccessBase(MA, UWL, false);
1049   }
1050   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1051                                           const MemoryLocation &Loc,
1052                                           unsigned &UWL) {
1053     return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1054   }
1055 
1056   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
1057     unsigned UpwardWalkLimit = MaxCheckLimit;
1058     return getClobberingMemoryAccess(MA, UpwardWalkLimit);
1059   }
1060   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1061                                           const MemoryLocation &Loc) override {
1062     unsigned UpwardWalkLimit = MaxCheckLimit;
1063     return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
1064   }
1065 
1066   void invalidateInfo(MemoryAccess *MA) override {
1067     if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1068       MUD->resetOptimized();
1069   }
1070 };
1071 
1072 template <class AliasAnalysisType>
1073 class MemorySSA::SkipSelfWalker final : public MemorySSAWalker {
1074   ClobberWalkerBase<AliasAnalysisType> *Walker;
1075 
1076 public:
1077   SkipSelfWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
1078       : MemorySSAWalker(M), Walker(W) {}
1079   ~SkipSelfWalker() override = default;
1080 
1081   using MemorySSAWalker::getClobberingMemoryAccess;
1082 
1083   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1084     return Walker->getClobberingMemoryAccessBase(MA, UWL, true);
1085   }
1086   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1087                                           const MemoryLocation &Loc,
1088                                           unsigned &UWL) {
1089     return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1090   }
1091 
1092   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
1093     unsigned UpwardWalkLimit = MaxCheckLimit;
1094     return getClobberingMemoryAccess(MA, UpwardWalkLimit);
1095   }
1096   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1097                                           const MemoryLocation &Loc) override {
1098     unsigned UpwardWalkLimit = MaxCheckLimit;
1099     return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
1100   }
1101 
1102   void invalidateInfo(MemoryAccess *MA) override {
1103     if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1104       MUD->resetOptimized();
1105   }
1106 };
1107 
1108 } // end namespace llvm
1109 
1110 void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
1111                                     bool RenameAllUses) {
1112   // Pass through values to our successors
1113   for (const BasicBlock *S : successors(BB)) {
1114     auto It = PerBlockAccesses.find(S);
1115     // Rename the phi nodes in our successor block
1116     if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1117       continue;
1118     AccessList *Accesses = It->second.get();
1119     auto *Phi = cast<MemoryPhi>(&Accesses->front());
1120     if (RenameAllUses) {
1121       bool ReplacementDone = false;
1122       for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I)
1123         if (Phi->getIncomingBlock(I) == BB) {
1124           Phi->setIncomingValue(I, IncomingVal);
1125           ReplacementDone = true;
1126         }
1127       (void) ReplacementDone;
1128       assert(ReplacementDone && "Incomplete phi during partial rename");
1129     } else
1130       Phi->addIncoming(IncomingVal, BB);
1131   }
1132 }
1133 
1134 /// Rename a single basic block into MemorySSA form.
1135 /// Uses the standard SSA renaming algorithm.
1136 /// \returns The new incoming value.
1137 MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
1138                                      bool RenameAllUses) {
1139   auto It = PerBlockAccesses.find(BB);
1140   // Skip most processing if the list is empty.
1141   if (It != PerBlockAccesses.end()) {
1142     AccessList *Accesses = It->second.get();
1143     for (MemoryAccess &L : *Accesses) {
1144       if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
1145         if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
1146           MUD->setDefiningAccess(IncomingVal);
1147         if (isa<MemoryDef>(&L))
1148           IncomingVal = &L;
1149       } else {
1150         IncomingVal = &L;
1151       }
1152     }
1153   }
1154   return IncomingVal;
1155 }
1156 
1157 /// This is the standard SSA renaming algorithm.
1158 ///
1159 /// We walk the dominator tree in preorder, renaming accesses, and then filling
1160 /// in phi nodes in our successors.
1161 void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
1162                            SmallPtrSetImpl<BasicBlock *> &Visited,
1163                            bool SkipVisited, bool RenameAllUses) {
1164   assert(Root && "Trying to rename accesses in an unreachable block");
1165 
1166   SmallVector<RenamePassData, 32> WorkStack;
1167   // Skip everything if we already renamed this block and we are skipping.
1168   // Note: You can't sink this into the if, because we need it to occur
1169   // regardless of whether we skip blocks or not.
1170   bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
1171   if (SkipVisited && AlreadyVisited)
1172     return;
1173 
1174   IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
1175   renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
1176   WorkStack.push_back({Root, Root->begin(), IncomingVal});
1177 
1178   while (!WorkStack.empty()) {
1179     DomTreeNode *Node = WorkStack.back().DTN;
1180     DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
1181     IncomingVal = WorkStack.back().IncomingVal;
1182 
1183     if (ChildIt == Node->end()) {
1184       WorkStack.pop_back();
1185     } else {
1186       DomTreeNode *Child = *ChildIt;
1187       ++WorkStack.back().ChildIt;
1188       BasicBlock *BB = Child->getBlock();
1189       // Note: You can't sink this into the if, because we need it to occur
1190       // regardless of whether we skip blocks or not.
1191       AlreadyVisited = !Visited.insert(BB).second;
1192       if (SkipVisited && AlreadyVisited) {
1193         // We already visited this during our renaming, which can happen when
1194         // being asked to rename multiple blocks. Figure out the incoming val,
1195         // which is the last def.
1196         // Incoming value can only change if there is a block def, and in that
1197         // case, it's the last block def in the list.
1198         if (auto *BlockDefs = getWritableBlockDefs(BB))
1199           IncomingVal = &*BlockDefs->rbegin();
1200       } else
1201         IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1202       renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1203       WorkStack.push_back({Child, Child->begin(), IncomingVal});
1204     }
1205   }
1206 }
1207 
1208 /// This handles unreachable block accesses by deleting phi nodes in
1209 /// unreachable blocks, and marking all other unreachable MemoryAccess's as
1210 /// being uses of the live on entry definition.
1211 void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
1212   assert(!DT->isReachableFromEntry(BB) &&
1213          "Reachable block found while handling unreachable blocks");
1214 
1215   // Make sure phi nodes in our reachable successors end up with a
1216   // LiveOnEntryDef for our incoming edge, even though our block is forward
1217   // unreachable.  We could just disconnect these blocks from the CFG fully,
1218   // but we do not right now.
1219   for (const BasicBlock *S : successors(BB)) {
1220     if (!DT->isReachableFromEntry(S))
1221       continue;
1222     auto It = PerBlockAccesses.find(S);
1223     // Rename the phi nodes in our successor block
1224     if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1225       continue;
1226     AccessList *Accesses = It->second.get();
1227     auto *Phi = cast<MemoryPhi>(&Accesses->front());
1228     Phi->addIncoming(LiveOnEntryDef.get(), BB);
1229   }
1230 
1231   auto It = PerBlockAccesses.find(BB);
1232   if (It == PerBlockAccesses.end())
1233     return;
1234 
1235   auto &Accesses = It->second;
1236   for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1237     auto Next = std::next(AI);
1238     // If we have a phi, just remove it. We are going to replace all
1239     // users with live on entry.
1240     if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1241       UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1242     else
1243       Accesses->erase(AI);
1244     AI = Next;
1245   }
1246 }
1247 
1248 MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
1249     : AA(nullptr), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1250       SkipWalker(nullptr), NextID(0) {
1251   // Build MemorySSA using a batch alias analysis. This reuses the internal
1252   // state that AA collects during an alias()/getModRefInfo() call. This is
1253   // safe because there are no CFG changes while building MemorySSA and can
1254   // significantly reduce the time spent by the compiler in AA, because we will
1255   // make queries about all the instructions in the Function.
1256   assert(AA && "No alias analysis?");
1257   BatchAAResults BatchAA(*AA);
1258   buildMemorySSA(BatchAA);
1259   // Intentionally leave AA to nullptr while building so we don't accidently
1260   // use non-batch AliasAnalysis.
1261   this->AA = AA;
1262   // Also create the walker here.
1263   getWalker();
1264 }
1265 
1266 MemorySSA::~MemorySSA() {
1267   // Drop all our references
1268   for (const auto &Pair : PerBlockAccesses)
1269     for (MemoryAccess &MA : *Pair.second)
1270       MA.dropAllReferences();
1271 }
1272 
1273 MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
1274   auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
1275 
1276   if (Res.second)
1277     Res.first->second = std::make_unique<AccessList>();
1278   return Res.first->second.get();
1279 }
1280 
1281 MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1282   auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
1283 
1284   if (Res.second)
1285     Res.first->second = std::make_unique<DefsList>();
1286   return Res.first->second.get();
1287 }
1288 
1289 namespace llvm {
1290 
1291 /// This class is a batch walker of all MemoryUse's in the program, and points
1292 /// their defining access at the thing that actually clobbers them.  Because it
1293 /// is a batch walker that touches everything, it does not operate like the
1294 /// other walkers.  This walker is basically performing a top-down SSA renaming
1295 /// pass, where the version stack is used as the cache.  This enables it to be
1296 /// significantly more time and memory efficient than using the regular walker,
1297 /// which is walking bottom-up.
1298 class MemorySSA::OptimizeUses {
1299 public:
1300   OptimizeUses(MemorySSA *MSSA, CachingWalker<BatchAAResults> *Walker,
1301                BatchAAResults *BAA, DominatorTree *DT)
1302       : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}
1303 
1304   void optimizeUses();
1305 
1306 private:
1307   /// This represents where a given memorylocation is in the stack.
1308   struct MemlocStackInfo {
1309     // This essentially is keeping track of versions of the stack. Whenever
1310     // the stack changes due to pushes or pops, these versions increase.
1311     unsigned long StackEpoch;
1312     unsigned long PopEpoch;
1313     // This is the lower bound of places on the stack to check. It is equal to
1314     // the place the last stack walk ended.
1315     // Note: Correctness depends on this being initialized to 0, which densemap
1316     // does
1317     unsigned long LowerBound;
1318     const BasicBlock *LowerBoundBlock;
1319     // This is where the last walk for this memory location ended.
1320     unsigned long LastKill;
1321     bool LastKillValid;
1322     Optional<AliasResult> AR;
1323   };
1324 
1325   void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1326                            SmallVectorImpl<MemoryAccess *> &,
1327                            DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
1328 
1329   MemorySSA *MSSA;
1330   CachingWalker<BatchAAResults> *Walker;
1331   BatchAAResults *AA;
1332   DominatorTree *DT;
1333 };
1334 
1335 } // end namespace llvm
1336 
1337 /// Optimize the uses in a given block This is basically the SSA renaming
1338 /// algorithm, with one caveat: We are able to use a single stack for all
1339 /// MemoryUses.  This is because the set of *possible* reaching MemoryDefs is
1340 /// the same for every MemoryUse.  The *actual* clobbering MemoryDef is just
1341 /// going to be some position in that stack of possible ones.
1342 ///
1343 /// We track the stack positions that each MemoryLocation needs
1344 /// to check, and last ended at.  This is because we only want to check the
1345 /// things that changed since last time.  The same MemoryLocation should
1346 /// get clobbered by the same store (getModRefInfo does not use invariantness or
1347 /// things like this, and if they start, we can modify MemoryLocOrCall to
1348 /// include relevant data)
1349 void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1350     const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1351     SmallVectorImpl<MemoryAccess *> &VersionStack,
1352     DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) {
1353 
1354   /// If no accesses, nothing to do.
1355   MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1356   if (Accesses == nullptr)
1357     return;
1358 
1359   // Pop everything that doesn't dominate the current block off the stack,
1360   // increment the PopEpoch to account for this.
1361   while (true) {
1362     assert(
1363         !VersionStack.empty() &&
1364         "Version stack should have liveOnEntry sentinel dominating everything");
1365     BasicBlock *BackBlock = VersionStack.back()->getBlock();
1366     if (DT->dominates(BackBlock, BB))
1367       break;
1368     while (VersionStack.back()->getBlock() == BackBlock)
1369       VersionStack.pop_back();
1370     ++PopEpoch;
1371   }
1372 
1373   for (MemoryAccess &MA : *Accesses) {
1374     auto *MU = dyn_cast<MemoryUse>(&MA);
1375     if (!MU) {
1376       VersionStack.push_back(&MA);
1377       ++StackEpoch;
1378       continue;
1379     }
1380 
1381     if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
1382       MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None);
1383       continue;
1384     }
1385 
1386     MemoryLocOrCall UseMLOC(MU);
1387     auto &LocInfo = LocStackInfo[UseMLOC];
1388     // If the pop epoch changed, it means we've removed stuff from top of
1389     // stack due to changing blocks. We may have to reset the lower bound or
1390     // last kill info.
1391     if (LocInfo.PopEpoch != PopEpoch) {
1392       LocInfo.PopEpoch = PopEpoch;
1393       LocInfo.StackEpoch = StackEpoch;
1394       // If the lower bound was in something that no longer dominates us, we
1395       // have to reset it.
1396       // We can't simply track stack size, because the stack may have had
1397       // pushes/pops in the meantime.
1398       // XXX: This is non-optimal, but only is slower cases with heavily
1399       // branching dominator trees.  To get the optimal number of queries would
1400       // be to make lowerbound and lastkill a per-loc stack, and pop it until
1401       // the top of that stack dominates us.  This does not seem worth it ATM.
1402       // A much cheaper optimization would be to always explore the deepest
1403       // branch of the dominator tree first. This will guarantee this resets on
1404       // the smallest set of blocks.
1405       if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1406           !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1407         // Reset the lower bound of things to check.
1408         // TODO: Some day we should be able to reset to last kill, rather than
1409         // 0.
1410         LocInfo.LowerBound = 0;
1411         LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1412         LocInfo.LastKillValid = false;
1413       }
1414     } else if (LocInfo.StackEpoch != StackEpoch) {
1415       // If all that has changed is the StackEpoch, we only have to check the
1416       // new things on the stack, because we've checked everything before.  In
1417       // this case, the lower bound of things to check remains the same.
1418       LocInfo.PopEpoch = PopEpoch;
1419       LocInfo.StackEpoch = StackEpoch;
1420     }
1421     if (!LocInfo.LastKillValid) {
1422       LocInfo.LastKill = VersionStack.size() - 1;
1423       LocInfo.LastKillValid = true;
1424       LocInfo.AR = MayAlias;
1425     }
1426 
1427     // At this point, we should have corrected last kill and LowerBound to be
1428     // in bounds.
1429     assert(LocInfo.LowerBound < VersionStack.size() &&
1430            "Lower bound out of range");
1431     assert(LocInfo.LastKill < VersionStack.size() &&
1432            "Last kill info out of range");
1433     // In any case, the new upper bound is the top of the stack.
1434     unsigned long UpperBound = VersionStack.size() - 1;
1435 
1436     if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
1437       LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
1438                         << *(MU->getMemoryInst()) << ")"
1439                         << " because there are "
1440                         << UpperBound - LocInfo.LowerBound
1441                         << " stores to disambiguate\n");
1442       // Because we did not walk, LastKill is no longer valid, as this may
1443       // have been a kill.
1444       LocInfo.LastKillValid = false;
1445       continue;
1446     }
1447     bool FoundClobberResult = false;
1448     unsigned UpwardWalkLimit = MaxCheckLimit;
1449     while (UpperBound > LocInfo.LowerBound) {
1450       if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1451         // For phis, use the walker, see where we ended up, go there
1452         MemoryAccess *Result =
1453             Walker->getClobberingMemoryAccess(MU, UpwardWalkLimit);
1454         // We are guaranteed to find it or something is wrong
1455         while (VersionStack[UpperBound] != Result) {
1456           assert(UpperBound != 0);
1457           --UpperBound;
1458         }
1459         FoundClobberResult = true;
1460         break;
1461       }
1462 
1463       MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1464       // If the lifetime of the pointer ends at this instruction, it's live on
1465       // entry.
1466       if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) {
1467         // Reset UpperBound to liveOnEntryDef's place in the stack
1468         UpperBound = 0;
1469         FoundClobberResult = true;
1470         LocInfo.AR = MustAlias;
1471         break;
1472       }
1473       ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA);
1474       if (CA.IsClobber) {
1475         FoundClobberResult = true;
1476         LocInfo.AR = CA.AR;
1477         break;
1478       }
1479       --UpperBound;
1480     }
1481 
1482     // Note: Phis always have AliasResult AR set to MayAlias ATM.
1483 
1484     // At the end of this loop, UpperBound is either a clobber, or lower bound
1485     // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1486     if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1487       // We were last killed now by where we got to
1488       if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound]))
1489         LocInfo.AR = None;
1490       MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR);
1491       LocInfo.LastKill = UpperBound;
1492     } else {
1493       // Otherwise, we checked all the new ones, and now we know we can get to
1494       // LastKill.
1495       MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR);
1496     }
1497     LocInfo.LowerBound = VersionStack.size() - 1;
1498     LocInfo.LowerBoundBlock = BB;
1499   }
1500 }
1501 
1502 /// Optimize uses to point to their actual clobbering definitions.
1503 void MemorySSA::OptimizeUses::optimizeUses() {
1504   SmallVector<MemoryAccess *, 16> VersionStack;
1505   DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo;
1506   VersionStack.push_back(MSSA->getLiveOnEntryDef());
1507 
1508   unsigned long StackEpoch = 1;
1509   unsigned long PopEpoch = 1;
1510   // We perform a non-recursive top-down dominator tree walk.
1511   for (const auto *DomNode : depth_first(DT->getRootNode()))
1512     optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1513                         LocStackInfo);
1514 }
1515 
1516 void MemorySSA::placePHINodes(
1517     const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) {
1518   // Determine where our MemoryPhi's should go
1519   ForwardIDFCalculator IDFs(*DT);
1520   IDFs.setDefiningBlocks(DefiningBlocks);
1521   SmallVector<BasicBlock *, 32> IDFBlocks;
1522   IDFs.calculate(IDFBlocks);
1523 
1524   // Now place MemoryPhi nodes.
1525   for (auto &BB : IDFBlocks)
1526     createMemoryPhi(BB);
1527 }
1528 
1529 void MemorySSA::buildMemorySSA(BatchAAResults &BAA) {
1530   // We create an access to represent "live on entry", for things like
1531   // arguments or users of globals, where the memory they use is defined before
1532   // the beginning of the function. We do not actually insert it into the IR.
1533   // We do not define a live on exit for the immediate uses, and thus our
1534   // semantics do *not* imply that something with no immediate uses can simply
1535   // be removed.
1536   BasicBlock &StartingPoint = F.getEntryBlock();
1537   LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr,
1538                                      &StartingPoint, NextID++));
1539 
1540   // We maintain lists of memory accesses per-block, trading memory for time. We
1541   // could just look up the memory access for every possible instruction in the
1542   // stream.
1543   SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1544   // Go through each block, figure out where defs occur, and chain together all
1545   // the accesses.
1546   for (BasicBlock &B : F) {
1547     bool InsertIntoDef = false;
1548     AccessList *Accesses = nullptr;
1549     DefsList *Defs = nullptr;
1550     for (Instruction &I : B) {
1551       MemoryUseOrDef *MUD = createNewAccess(&I, &BAA);
1552       if (!MUD)
1553         continue;
1554 
1555       if (!Accesses)
1556         Accesses = getOrCreateAccessList(&B);
1557       Accesses->push_back(MUD);
1558       if (isa<MemoryDef>(MUD)) {
1559         InsertIntoDef = true;
1560         if (!Defs)
1561           Defs = getOrCreateDefsList(&B);
1562         Defs->push_back(*MUD);
1563       }
1564     }
1565     if (InsertIntoDef)
1566       DefiningBlocks.insert(&B);
1567   }
1568   placePHINodes(DefiningBlocks);
1569 
1570   // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1571   // filled in with all blocks.
1572   SmallPtrSet<BasicBlock *, 16> Visited;
1573   renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1574 
1575   ClobberWalkerBase<BatchAAResults> WalkerBase(this, &BAA, DT);
1576   CachingWalker<BatchAAResults> WalkerLocal(this, &WalkerBase);
1577   OptimizeUses(this, &WalkerLocal, &BAA, DT).optimizeUses();
1578 
1579   // Mark the uses in unreachable blocks as live on entry, so that they go
1580   // somewhere.
1581   for (auto &BB : F)
1582     if (!Visited.count(&BB))
1583       markUnreachableAsLiveOnEntry(&BB);
1584 }
1585 
1586 MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1587 
1588 MemorySSA::CachingWalker<AliasAnalysis> *MemorySSA::getWalkerImpl() {
1589   if (Walker)
1590     return Walker.get();
1591 
1592   if (!WalkerBase)
1593     WalkerBase =
1594         std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
1595 
1596   Walker =
1597       std::make_unique<CachingWalker<AliasAnalysis>>(this, WalkerBase.get());
1598   return Walker.get();
1599 }
1600 
1601 MemorySSAWalker *MemorySSA::getSkipSelfWalker() {
1602   if (SkipWalker)
1603     return SkipWalker.get();
1604 
1605   if (!WalkerBase)
1606     WalkerBase =
1607         std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
1608 
1609   SkipWalker =
1610       std::make_unique<SkipSelfWalker<AliasAnalysis>>(this, WalkerBase.get());
1611   return SkipWalker.get();
1612  }
1613 
1614 
1615 // This is a helper function used by the creation routines. It places NewAccess
1616 // into the access and defs lists for a given basic block, at the given
1617 // insertion point.
1618 void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess,
1619                                         const BasicBlock *BB,
1620                                         InsertionPlace Point) {
1621   auto *Accesses = getOrCreateAccessList(BB);
1622   if (Point == Beginning) {
1623     // If it's a phi node, it goes first, otherwise, it goes after any phi
1624     // nodes.
1625     if (isa<MemoryPhi>(NewAccess)) {
1626       Accesses->push_front(NewAccess);
1627       auto *Defs = getOrCreateDefsList(BB);
1628       Defs->push_front(*NewAccess);
1629     } else {
1630       auto AI = find_if_not(
1631           *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1632       Accesses->insert(AI, NewAccess);
1633       if (!isa<MemoryUse>(NewAccess)) {
1634         auto *Defs = getOrCreateDefsList(BB);
1635         auto DI = find_if_not(
1636             *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1637         Defs->insert(DI, *NewAccess);
1638       }
1639     }
1640   } else {
1641     Accesses->push_back(NewAccess);
1642     if (!isa<MemoryUse>(NewAccess)) {
1643       auto *Defs = getOrCreateDefsList(BB);
1644       Defs->push_back(*NewAccess);
1645     }
1646   }
1647   BlockNumberingValid.erase(BB);
1648 }
1649 
1650 void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB,
1651                                       AccessList::iterator InsertPt) {
1652   auto *Accesses = getWritableBlockAccesses(BB);
1653   bool WasEnd = InsertPt == Accesses->end();
1654   Accesses->insert(AccessList::iterator(InsertPt), What);
1655   if (!isa<MemoryUse>(What)) {
1656     auto *Defs = getOrCreateDefsList(BB);
1657     // If we got asked to insert at the end, we have an easy job, just shove it
1658     // at the end. If we got asked to insert before an existing def, we also get
1659     // an iterator. If we got asked to insert before a use, we have to hunt for
1660     // the next def.
1661     if (WasEnd) {
1662       Defs->push_back(*What);
1663     } else if (isa<MemoryDef>(InsertPt)) {
1664       Defs->insert(InsertPt->getDefsIterator(), *What);
1665     } else {
1666       while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1667         ++InsertPt;
1668       // Either we found a def, or we are inserting at the end
1669       if (InsertPt == Accesses->end())
1670         Defs->push_back(*What);
1671       else
1672         Defs->insert(InsertPt->getDefsIterator(), *What);
1673     }
1674   }
1675   BlockNumberingValid.erase(BB);
1676 }
1677 
1678 void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) {
1679   // Keep it in the lookup tables, remove from the lists
1680   removeFromLists(What, false);
1681 
1682   // Note that moving should implicitly invalidate the optimized state of a
1683   // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a
1684   // MemoryDef.
1685   if (auto *MD = dyn_cast<MemoryDef>(What))
1686     MD->resetOptimized();
1687   What->setBlock(BB);
1688 }
1689 
1690 // Move What before Where in the IR.  The end result is that What will belong to
1691 // the right lists and have the right Block set, but will not otherwise be
1692 // correct. It will not have the right defining access, and if it is a def,
1693 // things below it will not properly be updated.
1694 void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1695                        AccessList::iterator Where) {
1696   prepareForMoveTo(What, BB);
1697   insertIntoListsBefore(What, BB, Where);
1698 }
1699 
1700 void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB,
1701                        InsertionPlace Point) {
1702   if (isa<MemoryPhi>(What)) {
1703     assert(Point == Beginning &&
1704            "Can only move a Phi at the beginning of the block");
1705     // Update lookup table entry
1706     ValueToMemoryAccess.erase(What->getBlock());
1707     bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;
1708     (void)Inserted;
1709     assert(Inserted && "Cannot move a Phi to a block that already has one");
1710   }
1711 
1712   prepareForMoveTo(What, BB);
1713   insertIntoListsForBlock(What, BB, Point);
1714 }
1715 
1716 MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1717   assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
1718   MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
1719   // Phi's always are placed at the front of the block.
1720   insertIntoListsForBlock(Phi, BB, Beginning);
1721   ValueToMemoryAccess[BB] = Phi;
1722   return Phi;
1723 }
1724 
1725 MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
1726                                                MemoryAccess *Definition,
1727                                                const MemoryUseOrDef *Template,
1728                                                bool CreationMustSucceed) {
1729   assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
1730   MemoryUseOrDef *NewAccess = createNewAccess(I, AA, Template);
1731   if (CreationMustSucceed)
1732     assert(NewAccess != nullptr && "Tried to create a memory access for a "
1733                                    "non-memory touching instruction");
1734   if (NewAccess) {
1735     assert((!Definition || !isa<MemoryUse>(Definition)) &&
1736            "A use cannot be a defining access");
1737     NewAccess->setDefiningAccess(Definition);
1738   }
1739   return NewAccess;
1740 }
1741 
1742 // Return true if the instruction has ordering constraints.
1743 // Note specifically that this only considers stores and loads
1744 // because others are still considered ModRef by getModRefInfo.
1745 static inline bool isOrdered(const Instruction *I) {
1746   if (auto *SI = dyn_cast<StoreInst>(I)) {
1747     if (!SI->isUnordered())
1748       return true;
1749   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1750     if (!LI->isUnordered())
1751       return true;
1752   }
1753   return false;
1754 }
1755 
1756 /// Helper function to create new memory accesses
1757 template <typename AliasAnalysisType>
1758 MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
1759                                            AliasAnalysisType *AAP,
1760                                            const MemoryUseOrDef *Template) {
1761   // The assume intrinsic has a control dependency which we model by claiming
1762   // that it writes arbitrarily. Debuginfo intrinsics may be considered
1763   // clobbers when we have a nonstandard AA pipeline. Ignore these fake memory
1764   // dependencies here.
1765   // FIXME: Replace this special casing with a more accurate modelling of
1766   // assume's control dependency.
1767   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1768     if (II->getIntrinsicID() == Intrinsic::assume)
1769       return nullptr;
1770 
1771   // Using a nonstandard AA pipelines might leave us with unexpected modref
1772   // results for I, so add a check to not model instructions that may not read
1773   // from or write to memory. This is necessary for correctness.
1774   if (!I->mayReadFromMemory() && !I->mayWriteToMemory())
1775     return nullptr;
1776 
1777   bool Def, Use;
1778   if (Template) {
1779     Def = dyn_cast_or_null<MemoryDef>(Template) != nullptr;
1780     Use = dyn_cast_or_null<MemoryUse>(Template) != nullptr;
1781 #if !defined(NDEBUG)
1782     ModRefInfo ModRef = AAP->getModRefInfo(I, None);
1783     bool DefCheck, UseCheck;
1784     DefCheck = isModSet(ModRef) || isOrdered(I);
1785     UseCheck = isRefSet(ModRef);
1786     assert(Def == DefCheck && (Def || Use == UseCheck) && "Invalid template");
1787 #endif
1788   } else {
1789     // Find out what affect this instruction has on memory.
1790     ModRefInfo ModRef = AAP->getModRefInfo(I, None);
1791     // The isOrdered check is used to ensure that volatiles end up as defs
1792     // (atomics end up as ModRef right now anyway).  Until we separate the
1793     // ordering chain from the memory chain, this enables people to see at least
1794     // some relative ordering to volatiles.  Note that getClobberingMemoryAccess
1795     // will still give an answer that bypasses other volatile loads.  TODO:
1796     // Separate memory aliasing and ordering into two different chains so that
1797     // we can precisely represent both "what memory will this read/write/is
1798     // clobbered by" and "what instructions can I move this past".
1799     Def = isModSet(ModRef) || isOrdered(I);
1800     Use = isRefSet(ModRef);
1801   }
1802 
1803   // It's possible for an instruction to not modify memory at all. During
1804   // construction, we ignore them.
1805   if (!Def && !Use)
1806     return nullptr;
1807 
1808   MemoryUseOrDef *MUD;
1809   if (Def)
1810     MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
1811   else
1812     MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
1813   ValueToMemoryAccess[I] = MUD;
1814   return MUD;
1815 }
1816 
1817 /// Returns true if \p Replacer dominates \p Replacee .
1818 bool MemorySSA::dominatesUse(const MemoryAccess *Replacer,
1819                              const MemoryAccess *Replacee) const {
1820   if (isa<MemoryUseOrDef>(Replacee))
1821     return DT->dominates(Replacer->getBlock(), Replacee->getBlock());
1822   const auto *MP = cast<MemoryPhi>(Replacee);
1823   // For a phi node, the use occurs in the predecessor block of the phi node.
1824   // Since we may occur multiple times in the phi node, we have to check each
1825   // operand to ensure Replacer dominates each operand where Replacee occurs.
1826   for (const Use &Arg : MP->operands()) {
1827     if (Arg.get() != Replacee &&
1828         !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg)))
1829       return false;
1830   }
1831   return true;
1832 }
1833 
1834 /// Properly remove \p MA from all of MemorySSA's lookup tables.
1835 void MemorySSA::removeFromLookups(MemoryAccess *MA) {
1836   assert(MA->use_empty() &&
1837          "Trying to remove memory access that still has uses");
1838   BlockNumbering.erase(MA);
1839   if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1840     MUD->setDefiningAccess(nullptr);
1841   // Invalidate our walker's cache if necessary
1842   if (!isa<MemoryUse>(MA))
1843     getWalker()->invalidateInfo(MA);
1844 
1845   Value *MemoryInst;
1846   if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1847     MemoryInst = MUD->getMemoryInst();
1848   else
1849     MemoryInst = MA->getBlock();
1850 
1851   auto VMA = ValueToMemoryAccess.find(MemoryInst);
1852   if (VMA->second == MA)
1853     ValueToMemoryAccess.erase(VMA);
1854 }
1855 
1856 /// Properly remove \p MA from all of MemorySSA's lists.
1857 ///
1858 /// Because of the way the intrusive list and use lists work, it is important to
1859 /// do removal in the right order.
1860 /// ShouldDelete defaults to true, and will cause the memory access to also be
1861 /// deleted, not just removed.
1862 void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
1863   BasicBlock *BB = MA->getBlock();
1864   // The access list owns the reference, so we erase it from the non-owning list
1865   // first.
1866   if (!isa<MemoryUse>(MA)) {
1867     auto DefsIt = PerBlockDefs.find(BB);
1868     std::unique_ptr<DefsList> &Defs = DefsIt->second;
1869     Defs->remove(*MA);
1870     if (Defs->empty())
1871       PerBlockDefs.erase(DefsIt);
1872   }
1873 
1874   // The erase call here will delete it. If we don't want it deleted, we call
1875   // remove instead.
1876   auto AccessIt = PerBlockAccesses.find(BB);
1877   std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1878   if (ShouldDelete)
1879     Accesses->erase(MA);
1880   else
1881     Accesses->remove(MA);
1882 
1883   if (Accesses->empty()) {
1884     PerBlockAccesses.erase(AccessIt);
1885     BlockNumberingValid.erase(BB);
1886   }
1887 }
1888 
1889 void MemorySSA::print(raw_ostream &OS) const {
1890   MemorySSAAnnotatedWriter Writer(this);
1891   F.print(OS, &Writer);
1892 }
1893 
1894 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1895 LLVM_DUMP_METHOD void MemorySSA::dump() const { print(dbgs()); }
1896 #endif
1897 
1898 void MemorySSA::verifyMemorySSA() const {
1899   verifyOrderingDominationAndDefUses(F);
1900   verifyDominationNumbers(F);
1901   verifyPrevDefInPhis(F);
1902   // Previously, the verification used to also verify that the clobberingAccess
1903   // cached by MemorySSA is the same as the clobberingAccess found at a later
1904   // query to AA. This does not hold true in general due to the current fragility
1905   // of BasicAA which has arbitrary caps on the things it analyzes before giving
1906   // up. As a result, transformations that are correct, will lead to BasicAA
1907   // returning different Alias answers before and after that transformation.
1908   // Invalidating MemorySSA is not an option, as the results in BasicAA can be so
1909   // random, in the worst case we'd need to rebuild MemorySSA from scratch after
1910   // every transformation, which defeats the purpose of using it. For such an
1911   // example, see test4 added in D51960.
1912 }
1913 
1914 void MemorySSA::verifyPrevDefInPhis(Function &F) const {
1915 #if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS)
1916   for (const BasicBlock &BB : F) {
1917     if (MemoryPhi *Phi = getMemoryAccess(&BB)) {
1918       for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1919         auto *Pred = Phi->getIncomingBlock(I);
1920         auto *IncAcc = Phi->getIncomingValue(I);
1921         // If Pred has no unreachable predecessors, get last def looking at
1922         // IDoms. If, while walkings IDoms, any of these has an unreachable
1923         // predecessor, then the incoming def can be any access.
1924         if (auto *DTNode = DT->getNode(Pred)) {
1925           while (DTNode) {
1926             if (auto *DefList = getBlockDefs(DTNode->getBlock())) {
1927               auto *LastAcc = &*(--DefList->end());
1928               assert(LastAcc == IncAcc &&
1929                      "Incorrect incoming access into phi.");
1930               break;
1931             }
1932             DTNode = DTNode->getIDom();
1933           }
1934         } else {
1935           // If Pred has unreachable predecessors, but has at least a Def, the
1936           // incoming access can be the last Def in Pred, or it could have been
1937           // optimized to LoE. After an update, though, the LoE may have been
1938           // replaced by another access, so IncAcc may be any access.
1939           // If Pred has unreachable predecessors and no Defs, incoming access
1940           // should be LoE; However, after an update, it may be any access.
1941         }
1942       }
1943     }
1944   }
1945 #endif
1946 }
1947 
1948 /// Verify that all of the blocks we believe to have valid domination numbers
1949 /// actually have valid domination numbers.
1950 void MemorySSA::verifyDominationNumbers(const Function &F) const {
1951 #ifndef NDEBUG
1952   if (BlockNumberingValid.empty())
1953     return;
1954 
1955   SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid;
1956   for (const BasicBlock &BB : F) {
1957     if (!ValidBlocks.count(&BB))
1958       continue;
1959 
1960     ValidBlocks.erase(&BB);
1961 
1962     const AccessList *Accesses = getBlockAccesses(&BB);
1963     // It's correct to say an empty block has valid numbering.
1964     if (!Accesses)
1965       continue;
1966 
1967     // Block numbering starts at 1.
1968     unsigned long LastNumber = 0;
1969     for (const MemoryAccess &MA : *Accesses) {
1970       auto ThisNumberIter = BlockNumbering.find(&MA);
1971       assert(ThisNumberIter != BlockNumbering.end() &&
1972              "MemoryAccess has no domination number in a valid block!");
1973 
1974       unsigned long ThisNumber = ThisNumberIter->second;
1975       assert(ThisNumber > LastNumber &&
1976              "Domination numbers should be strictly increasing!");
1977       LastNumber = ThisNumber;
1978     }
1979   }
1980 
1981   assert(ValidBlocks.empty() &&
1982          "All valid BasicBlocks should exist in F -- dangling pointers?");
1983 #endif
1984 }
1985 
1986 /// Verify ordering: the order and existence of MemoryAccesses matches the
1987 /// order and existence of memory affecting instructions.
1988 /// Verify domination: each definition dominates all of its uses.
1989 /// Verify def-uses: the immediate use information - walk all the memory
1990 /// accesses and verifying that, for each use, it appears in the appropriate
1991 /// def's use list
1992 void MemorySSA::verifyOrderingDominationAndDefUses(Function &F) const {
1993 #if !defined(NDEBUG)
1994   // Walk all the blocks, comparing what the lookups think and what the access
1995   // lists think, as well as the order in the blocks vs the order in the access
1996   // lists.
1997   SmallVector<MemoryAccess *, 32> ActualAccesses;
1998   SmallVector<MemoryAccess *, 32> ActualDefs;
1999   for (BasicBlock &B : F) {
2000     const AccessList *AL = getBlockAccesses(&B);
2001     const auto *DL = getBlockDefs(&B);
2002     MemoryPhi *Phi = getMemoryAccess(&B);
2003     if (Phi) {
2004       // Verify ordering.
2005       ActualAccesses.push_back(Phi);
2006       ActualDefs.push_back(Phi);
2007       // Verify domination
2008       for (const Use &U : Phi->uses())
2009         assert(dominates(Phi, U) && "Memory PHI does not dominate it's uses");
2010 #if defined(EXPENSIVE_CHECKS)
2011       // Verify def-uses.
2012       assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(
2013                                           pred_begin(&B), pred_end(&B))) &&
2014              "Incomplete MemoryPhi Node");
2015       for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
2016         verifyUseInDefs(Phi->getIncomingValue(I), Phi);
2017         assert(find(predecessors(&B), Phi->getIncomingBlock(I)) !=
2018                    pred_end(&B) &&
2019                "Incoming phi block not a block predecessor");
2020       }
2021 #endif
2022     }
2023 
2024     for (Instruction &I : B) {
2025       MemoryUseOrDef *MA = getMemoryAccess(&I);
2026       assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
2027              "We have memory affecting instructions "
2028              "in this block but they are not in the "
2029              "access list or defs list");
2030       if (MA) {
2031         // Verify ordering.
2032         ActualAccesses.push_back(MA);
2033         if (MemoryAccess *MD = dyn_cast<MemoryDef>(MA)) {
2034           // Verify ordering.
2035           ActualDefs.push_back(MA);
2036           // Verify domination.
2037           for (const Use &U : MD->uses())
2038             assert(dominates(MD, U) &&
2039                    "Memory Def does not dominate it's uses");
2040         }
2041 #if defined(EXPENSIVE_CHECKS)
2042         // Verify def-uses.
2043         verifyUseInDefs(MA->getDefiningAccess(), MA);
2044 #endif
2045       }
2046     }
2047     // Either we hit the assert, really have no accesses, or we have both
2048     // accesses and an access list. Same with defs.
2049     if (!AL && !DL)
2050       continue;
2051     // Verify ordering.
2052     assert(AL->size() == ActualAccesses.size() &&
2053            "We don't have the same number of accesses in the block as on the "
2054            "access list");
2055     assert((DL || ActualDefs.size() == 0) &&
2056            "Either we should have a defs list, or we should have no defs");
2057     assert((!DL || DL->size() == ActualDefs.size()) &&
2058            "We don't have the same number of defs in the block as on the "
2059            "def list");
2060     auto ALI = AL->begin();
2061     auto AAI = ActualAccesses.begin();
2062     while (ALI != AL->end() && AAI != ActualAccesses.end()) {
2063       assert(&*ALI == *AAI && "Not the same accesses in the same order");
2064       ++ALI;
2065       ++AAI;
2066     }
2067     ActualAccesses.clear();
2068     if (DL) {
2069       auto DLI = DL->begin();
2070       auto ADI = ActualDefs.begin();
2071       while (DLI != DL->end() && ADI != ActualDefs.end()) {
2072         assert(&*DLI == *ADI && "Not the same defs in the same order");
2073         ++DLI;
2074         ++ADI;
2075       }
2076     }
2077     ActualDefs.clear();
2078   }
2079 #endif
2080 }
2081 
2082 /// Verify the def-use lists in MemorySSA, by verifying that \p Use
2083 /// appears in the use list of \p Def.
2084 void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
2085 #ifndef NDEBUG
2086   // The live on entry use may cause us to get a NULL def here
2087   if (!Def)
2088     assert(isLiveOnEntryDef(Use) &&
2089            "Null def but use not point to live on entry def");
2090   else
2091     assert(is_contained(Def->users(), Use) &&
2092            "Did not find use in def's use list");
2093 #endif
2094 }
2095 
2096 /// Perform a local numbering on blocks so that instruction ordering can be
2097 /// determined in constant time.
2098 /// TODO: We currently just number in order.  If we numbered by N, we could
2099 /// allow at least N-1 sequences of insertBefore or insertAfter (and at least
2100 /// log2(N) sequences of mixed before and after) without needing to invalidate
2101 /// the numbering.
2102 void MemorySSA::renumberBlock(const BasicBlock *B) const {
2103   // The pre-increment ensures the numbers really start at 1.
2104   unsigned long CurrentNumber = 0;
2105   const AccessList *AL = getBlockAccesses(B);
2106   assert(AL != nullptr && "Asking to renumber an empty block");
2107   for (const auto &I : *AL)
2108     BlockNumbering[&I] = ++CurrentNumber;
2109   BlockNumberingValid.insert(B);
2110 }
2111 
2112 /// Determine, for two memory accesses in the same block,
2113 /// whether \p Dominator dominates \p Dominatee.
2114 /// \returns True if \p Dominator dominates \p Dominatee.
2115 bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
2116                                  const MemoryAccess *Dominatee) const {
2117   const BasicBlock *DominatorBlock = Dominator->getBlock();
2118 
2119   assert((DominatorBlock == Dominatee->getBlock()) &&
2120          "Asking for local domination when accesses are in different blocks!");
2121   // A node dominates itself.
2122   if (Dominatee == Dominator)
2123     return true;
2124 
2125   // When Dominatee is defined on function entry, it is not dominated by another
2126   // memory access.
2127   if (isLiveOnEntryDef(Dominatee))
2128     return false;
2129 
2130   // When Dominator is defined on function entry, it dominates the other memory
2131   // access.
2132   if (isLiveOnEntryDef(Dominator))
2133     return true;
2134 
2135   if (!BlockNumberingValid.count(DominatorBlock))
2136     renumberBlock(DominatorBlock);
2137 
2138   unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
2139   // All numbers start with 1
2140   assert(DominatorNum != 0 && "Block was not numbered properly");
2141   unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
2142   assert(DominateeNum != 0 && "Block was not numbered properly");
2143   return DominatorNum < DominateeNum;
2144 }
2145 
2146 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2147                           const MemoryAccess *Dominatee) const {
2148   if (Dominator == Dominatee)
2149     return true;
2150 
2151   if (isLiveOnEntryDef(Dominatee))
2152     return false;
2153 
2154   if (Dominator->getBlock() != Dominatee->getBlock())
2155     return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
2156   return locallyDominates(Dominator, Dominatee);
2157 }
2158 
2159 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2160                           const Use &Dominatee) const {
2161   if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
2162     BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
2163     // The def must dominate the incoming block of the phi.
2164     if (UseBB != Dominator->getBlock())
2165       return DT->dominates(Dominator->getBlock(), UseBB);
2166     // If the UseBB and the DefBB are the same, compare locally.
2167     return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
2168   }
2169   // If it's not a PHI node use, the normal dominates can already handle it.
2170   return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
2171 }
2172 
2173 const static char LiveOnEntryStr[] = "liveOnEntry";
2174 
2175 void MemoryAccess::print(raw_ostream &OS) const {
2176   switch (getValueID()) {
2177   case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
2178   case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
2179   case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
2180   }
2181   llvm_unreachable("invalid value id");
2182 }
2183 
2184 void MemoryDef::print(raw_ostream &OS) const {
2185   MemoryAccess *UO = getDefiningAccess();
2186 
2187   auto printID = [&OS](MemoryAccess *A) {
2188     if (A && A->getID())
2189       OS << A->getID();
2190     else
2191       OS << LiveOnEntryStr;
2192   };
2193 
2194   OS << getID() << " = MemoryDef(";
2195   printID(UO);
2196   OS << ")";
2197 
2198   if (isOptimized()) {
2199     OS << "->";
2200     printID(getOptimized());
2201 
2202     if (Optional<AliasResult> AR = getOptimizedAccessType())
2203       OS << " " << *AR;
2204   }
2205 }
2206 
2207 void MemoryPhi::print(raw_ostream &OS) const {
2208   bool First = true;
2209   OS << getID() << " = MemoryPhi(";
2210   for (const auto &Op : operands()) {
2211     BasicBlock *BB = getIncomingBlock(Op);
2212     MemoryAccess *MA = cast<MemoryAccess>(Op);
2213     if (!First)
2214       OS << ',';
2215     else
2216       First = false;
2217 
2218     OS << '{';
2219     if (BB->hasName())
2220       OS << BB->getName();
2221     else
2222       BB->printAsOperand(OS, false);
2223     OS << ',';
2224     if (unsigned ID = MA->getID())
2225       OS << ID;
2226     else
2227       OS << LiveOnEntryStr;
2228     OS << '}';
2229   }
2230   OS << ')';
2231 }
2232 
2233 void MemoryUse::print(raw_ostream &OS) const {
2234   MemoryAccess *UO = getDefiningAccess();
2235   OS << "MemoryUse(";
2236   if (UO && UO->getID())
2237     OS << UO->getID();
2238   else
2239     OS << LiveOnEntryStr;
2240   OS << ')';
2241 
2242   if (Optional<AliasResult> AR = getOptimizedAccessType())
2243     OS << " " << *AR;
2244 }
2245 
2246 void MemoryAccess::dump() const {
2247 // Cannot completely remove virtual function even in release mode.
2248 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2249   print(dbgs());
2250   dbgs() << "\n";
2251 #endif
2252 }
2253 
2254 char MemorySSAPrinterLegacyPass::ID = 0;
2255 
2256 MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) {
2257   initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry());
2258 }
2259 
2260 void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
2261   AU.setPreservesAll();
2262   AU.addRequired<MemorySSAWrapperPass>();
2263 }
2264 
2265 bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
2266   auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2267   MSSA.print(dbgs());
2268   if (VerifyMemorySSA)
2269     MSSA.verifyMemorySSA();
2270   return false;
2271 }
2272 
2273 AnalysisKey MemorySSAAnalysis::Key;
2274 
2275 MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F,
2276                                                  FunctionAnalysisManager &AM) {
2277   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
2278   auto &AA = AM.getResult<AAManager>(F);
2279   return MemorySSAAnalysis::Result(std::make_unique<MemorySSA>(F, &AA, &DT));
2280 }
2281 
2282 bool MemorySSAAnalysis::Result::invalidate(
2283     Function &F, const PreservedAnalyses &PA,
2284     FunctionAnalysisManager::Invalidator &Inv) {
2285   auto PAC = PA.getChecker<MemorySSAAnalysis>();
2286   return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
2287          Inv.invalidate<AAManager>(F, PA) ||
2288          Inv.invalidate<DominatorTreeAnalysis>(F, PA);
2289 }
2290 
2291 PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
2292                                             FunctionAnalysisManager &AM) {
2293   OS << "MemorySSA for function: " << F.getName() << "\n";
2294   AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS);
2295 
2296   return PreservedAnalyses::all();
2297 }
2298 
2299 PreservedAnalyses MemorySSAVerifierPass::run(Function &F,
2300                                              FunctionAnalysisManager &AM) {
2301   AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
2302 
2303   return PreservedAnalyses::all();
2304 }
2305 
2306 char MemorySSAWrapperPass::ID = 0;
2307 
2308 MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) {
2309   initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
2310 }
2311 
2312 void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
2313 
2314 void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2315   AU.setPreservesAll();
2316   AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2317   AU.addRequiredTransitive<AAResultsWrapperPass>();
2318 }
2319 
2320 bool MemorySSAWrapperPass::runOnFunction(Function &F) {
2321   auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2322   auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2323   MSSA.reset(new MemorySSA(F, &AA, &DT));
2324   return false;
2325 }
2326 
2327 void MemorySSAWrapperPass::verifyAnalysis() const {
2328   if (VerifyMemorySSA)
2329     MSSA->verifyMemorySSA();
2330 }
2331 
2332 void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
2333   MSSA->print(OS);
2334 }
2335 
2336 MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
2337 
2338 /// Walk the use-def chains starting at \p StartingAccess and find
2339 /// the MemoryAccess that actually clobbers Loc.
2340 ///
2341 /// \returns our clobbering memory access
2342 template <typename AliasAnalysisType>
2343 MemoryAccess *
2344 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
2345     MemoryAccess *StartingAccess, const MemoryLocation &Loc,
2346     unsigned &UpwardWalkLimit) {
2347   if (isa<MemoryPhi>(StartingAccess))
2348     return StartingAccess;
2349 
2350   auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess);
2351   if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
2352     return StartingUseOrDef;
2353 
2354   Instruction *I = StartingUseOrDef->getMemoryInst();
2355 
2356   // Conservatively, fences are always clobbers, so don't perform the walk if we
2357   // hit a fence.
2358   if (!isa<CallBase>(I) && I->isFenceLike())
2359     return StartingUseOrDef;
2360 
2361   UpwardsMemoryQuery Q;
2362   Q.OriginalAccess = StartingUseOrDef;
2363   Q.StartingLoc = Loc;
2364   Q.Inst = I;
2365   Q.IsCall = false;
2366 
2367   // Unlike the other function, do not walk to the def of a def, because we are
2368   // handed something we already believe is the clobbering access.
2369   // We never set SkipSelf to true in Q in this method.
2370   MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
2371                                      ? StartingUseOrDef->getDefiningAccess()
2372                                      : StartingUseOrDef;
2373 
2374   MemoryAccess *Clobber =
2375       Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit);
2376   LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2377   LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n");
2378   LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
2379   LLVM_DEBUG(dbgs() << *Clobber << "\n");
2380   return Clobber;
2381 }
2382 
2383 template <typename AliasAnalysisType>
2384 MemoryAccess *
2385 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
2386     MemoryAccess *MA, unsigned &UpwardWalkLimit, bool SkipSelf) {
2387   auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2388   // If this is a MemoryPhi, we can't do anything.
2389   if (!StartingAccess)
2390     return MA;
2391 
2392   bool IsOptimized = false;
2393 
2394   // If this is an already optimized use or def, return the optimized result.
2395   // Note: Currently, we store the optimized def result in a separate field,
2396   // since we can't use the defining access.
2397   if (StartingAccess->isOptimized()) {
2398     if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
2399       return StartingAccess->getOptimized();
2400     IsOptimized = true;
2401   }
2402 
2403   const Instruction *I = StartingAccess->getMemoryInst();
2404   // We can't sanely do anything with a fence, since they conservatively clobber
2405   // all memory, and have no locations to get pointers from to try to
2406   // disambiguate.
2407   if (!isa<CallBase>(I) && I->isFenceLike())
2408     return StartingAccess;
2409 
2410   UpwardsMemoryQuery Q(I, StartingAccess);
2411 
2412   if (isUseTriviallyOptimizableToLiveOnEntry(*Walker.getAA(), I)) {
2413     MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2414     StartingAccess->setOptimized(LiveOnEntry);
2415     StartingAccess->setOptimizedAccessType(None);
2416     return LiveOnEntry;
2417   }
2418 
2419   MemoryAccess *OptimizedAccess;
2420   if (!IsOptimized) {
2421     // Start with the thing we already think clobbers this location
2422     MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2423 
2424     // At this point, DefiningAccess may be the live on entry def.
2425     // If it is, we will not get a better result.
2426     if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
2427       StartingAccess->setOptimized(DefiningAccess);
2428       StartingAccess->setOptimizedAccessType(None);
2429       return DefiningAccess;
2430     }
2431 
2432     OptimizedAccess = Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit);
2433     StartingAccess->setOptimized(OptimizedAccess);
2434     if (MSSA->isLiveOnEntryDef(OptimizedAccess))
2435       StartingAccess->setOptimizedAccessType(None);
2436     else if (Q.AR == MustAlias)
2437       StartingAccess->setOptimizedAccessType(MustAlias);
2438   } else
2439     OptimizedAccess = StartingAccess->getOptimized();
2440 
2441   LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2442   LLVM_DEBUG(dbgs() << *StartingAccess << "\n");
2443   LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ");
2444   LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n");
2445 
2446   MemoryAccess *Result;
2447   if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
2448       isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) {
2449     assert(isa<MemoryDef>(Q.OriginalAccess));
2450     Q.SkipSelfAccess = true;
2451     Result = Walker.findClobber(OptimizedAccess, Q, UpwardWalkLimit);
2452   } else
2453     Result = OptimizedAccess;
2454 
2455   LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf);
2456   LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n");
2457 
2458   return Result;
2459 }
2460 
2461 MemoryAccess *
2462 DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2463   if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2464     return Use->getDefiningAccess();
2465   return MA;
2466 }
2467 
2468 MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
2469     MemoryAccess *StartingAccess, const MemoryLocation &) {
2470   if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2471     return Use->getDefiningAccess();
2472   return StartingAccess;
2473 }
2474 
2475 void MemoryPhi::deleteMe(DerivedUser *Self) {
2476   delete static_cast<MemoryPhi *>(Self);
2477 }
2478 
2479 void MemoryDef::deleteMe(DerivedUser *Self) {
2480   delete static_cast<MemoryDef *>(Self);
2481 }
2482 
2483 void MemoryUse::deleteMe(DerivedUser *Self) {
2484   delete static_cast<MemoryUse *>(Self);
2485 }
2486