1 //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the MemorySSA class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/MemorySSA.h"
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/DenseMapInfo.h"
16 #include "llvm/ADT/DenseSet.h"
17 #include "llvm/ADT/DepthFirstIterator.h"
18 #include "llvm/ADT/Hashing.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/iterator.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/IteratedDominanceFrontier.h"
28 #include "llvm/Analysis/MemoryLocation.h"
29 #include "llvm/Config/llvm-config.h"
30 #include "llvm/IR/AssemblyAnnotationWriter.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/Dominators.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/Instruction.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/IR/Intrinsics.h"
38 #include "llvm/IR/LLVMContext.h"
39 #include "llvm/IR/PassManager.h"
40 #include "llvm/IR/Use.h"
41 #include "llvm/Pass.h"
42 #include "llvm/Support/AtomicOrdering.h"
43 #include "llvm/Support/Casting.h"
44 #include "llvm/Support/CommandLine.h"
45 #include "llvm/Support/Compiler.h"
46 #include "llvm/Support/Debug.h"
47 #include "llvm/Support/ErrorHandling.h"
48 #include "llvm/Support/FormattedStream.h"
49 #include "llvm/Support/raw_ostream.h"
50 #include <algorithm>
51 #include <cassert>
52 #include <iterator>
53 #include <memory>
54 #include <utility>
55 
56 using namespace llvm;
57 
58 #define DEBUG_TYPE "memoryssa"
59 
60 INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
61                       true)
62 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
63 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
64 INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
65                     true)
66 
67 INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",
68                       "Memory SSA Printer", false, false)
69 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
70 INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",
71                     "Memory SSA Printer", false, false)
72 
73 static cl::opt<unsigned> MaxCheckLimit(
74     "memssa-check-limit", cl::Hidden, cl::init(100),
75     cl::desc("The maximum number of stores/phis MemorySSA"
76              "will consider trying to walk past (default = 100)"));
77 
78 // Always verify MemorySSA if expensive checking is enabled.
79 #ifdef EXPENSIVE_CHECKS
80 bool llvm::VerifyMemorySSA = true;
81 #else
82 bool llvm::VerifyMemorySSA = false;
83 #endif
84 static cl::opt<bool, true>
85     VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA),
86                      cl::Hidden, cl::desc("Enable verification of MemorySSA."));
87 
88 namespace llvm {
89 
90 /// An assembly annotator class to print Memory SSA information in
91 /// comments.
92 class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
93   friend class MemorySSA;
94 
95   const MemorySSA *MSSA;
96 
97 public:
98   MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
99 
100   void emitBasicBlockStartAnnot(const BasicBlock *BB,
101                                 formatted_raw_ostream &OS) override {
102     if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
103       OS << "; " << *MA << "\n";
104   }
105 
106   void emitInstructionAnnot(const Instruction *I,
107                             formatted_raw_ostream &OS) override {
108     if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
109       OS << "; " << *MA << "\n";
110   }
111 };
112 
113 } // end namespace llvm
114 
115 namespace {
116 
117 /// Our current alias analysis API differentiates heavily between calls and
118 /// non-calls, and functions called on one usually assert on the other.
119 /// This class encapsulates the distinction to simplify other code that wants
120 /// "Memory affecting instructions and related data" to use as a key.
121 /// For example, this class is used as a densemap key in the use optimizer.
122 class MemoryLocOrCall {
123 public:
124   bool IsCall = false;
125 
126   MemoryLocOrCall(MemoryUseOrDef *MUD)
127       : MemoryLocOrCall(MUD->getMemoryInst()) {}
128   MemoryLocOrCall(const MemoryUseOrDef *MUD)
129       : MemoryLocOrCall(MUD->getMemoryInst()) {}
130 
131   MemoryLocOrCall(Instruction *Inst) {
132     if (auto *C = dyn_cast<CallBase>(Inst)) {
133       IsCall = true;
134       Call = C;
135     } else {
136       IsCall = false;
137       // There is no such thing as a memorylocation for a fence inst, and it is
138       // unique in that regard.
139       if (!isa<FenceInst>(Inst))
140         Loc = MemoryLocation::get(Inst);
141     }
142   }
143 
144   explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
145 
146   const CallBase *getCall() const {
147     assert(IsCall);
148     return Call;
149   }
150 
151   MemoryLocation getLoc() const {
152     assert(!IsCall);
153     return Loc;
154   }
155 
156   bool operator==(const MemoryLocOrCall &Other) const {
157     if (IsCall != Other.IsCall)
158       return false;
159 
160     if (!IsCall)
161       return Loc == Other.Loc;
162 
163     if (Call->getCalledValue() != Other.Call->getCalledValue())
164       return false;
165 
166     return Call->arg_size() == Other.Call->arg_size() &&
167            std::equal(Call->arg_begin(), Call->arg_end(),
168                       Other.Call->arg_begin());
169   }
170 
171 private:
172   union {
173     const CallBase *Call;
174     MemoryLocation Loc;
175   };
176 };
177 
178 } // end anonymous namespace
179 
180 namespace llvm {
181 
182 template <> struct DenseMapInfo<MemoryLocOrCall> {
183   static inline MemoryLocOrCall getEmptyKey() {
184     return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
185   }
186 
187   static inline MemoryLocOrCall getTombstoneKey() {
188     return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
189   }
190 
191   static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
192     if (!MLOC.IsCall)
193       return hash_combine(
194           MLOC.IsCall,
195           DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
196 
197     hash_code hash =
198         hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue(
199                                       MLOC.getCall()->getCalledValue()));
200 
201     for (const Value *Arg : MLOC.getCall()->args())
202       hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg));
203     return hash;
204   }
205 
206   static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
207     return LHS == RHS;
208   }
209 };
210 
211 } // end namespace llvm
212 
213 /// This does one-way checks to see if Use could theoretically be hoisted above
214 /// MayClobber. This will not check the other way around.
215 ///
216 /// This assumes that, for the purposes of MemorySSA, Use comes directly after
217 /// MayClobber, with no potentially clobbering operations in between them.
218 /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
219 static bool areLoadsReorderable(const LoadInst *Use,
220                                 const LoadInst *MayClobber) {
221   bool VolatileUse = Use->isVolatile();
222   bool VolatileClobber = MayClobber->isVolatile();
223   // Volatile operations may never be reordered with other volatile operations.
224   if (VolatileUse && VolatileClobber)
225     return false;
226   // Otherwise, volatile doesn't matter here. From the language reference:
227   // 'optimizers may change the order of volatile operations relative to
228   // non-volatile operations.'"
229 
230   // If a load is seq_cst, it cannot be moved above other loads. If its ordering
231   // is weaker, it can be moved above other loads. We just need to be sure that
232   // MayClobber isn't an acquire load, because loads can't be moved above
233   // acquire loads.
234   //
235   // Note that this explicitly *does* allow the free reordering of monotonic (or
236   // weaker) loads of the same address.
237   bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
238   bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
239                                                      AtomicOrdering::Acquire);
240   return !(SeqCstUse || MayClobberIsAcquire);
241 }
242 
243 namespace {
244 
245 struct ClobberAlias {
246   bool IsClobber;
247   Optional<AliasResult> AR;
248 };
249 
250 } // end anonymous namespace
251 
252 // Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being
253 // ignored if IsClobber = false.
254 static ClobberAlias instructionClobbersQuery(const MemoryDef *MD,
255                                              const MemoryLocation &UseLoc,
256                                              const Instruction *UseInst,
257                                              AliasAnalysis &AA) {
258   Instruction *DefInst = MD->getMemoryInst();
259   assert(DefInst && "Defining instruction not actually an instruction");
260   const auto *UseCall = dyn_cast<CallBase>(UseInst);
261   Optional<AliasResult> AR;
262 
263   if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
264     // These intrinsics will show up as affecting memory, but they are just
265     // markers, mostly.
266     //
267     // FIXME: We probably don't actually want MemorySSA to model these at all
268     // (including creating MemoryAccesses for them): we just end up inventing
269     // clobbers where they don't really exist at all. Please see D43269 for
270     // context.
271     switch (II->getIntrinsicID()) {
272     case Intrinsic::lifetime_start:
273       if (UseCall)
274         return {false, NoAlias};
275       AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc);
276       return {AR != NoAlias, AR};
277     case Intrinsic::lifetime_end:
278     case Intrinsic::invariant_start:
279     case Intrinsic::invariant_end:
280     case Intrinsic::assume:
281       return {false, NoAlias};
282     default:
283       break;
284     }
285   }
286 
287   if (UseCall) {
288     ModRefInfo I = AA.getModRefInfo(DefInst, UseCall);
289     AR = isMustSet(I) ? MustAlias : MayAlias;
290     return {isModOrRefSet(I), AR};
291   }
292 
293   if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
294     if (auto *UseLoad = dyn_cast<LoadInst>(UseInst))
295       return {!areLoadsReorderable(UseLoad, DefLoad), MayAlias};
296 
297   ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
298   AR = isMustSet(I) ? MustAlias : MayAlias;
299   return {isModSet(I), AR};
300 }
301 
302 static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
303                                              const MemoryUseOrDef *MU,
304                                              const MemoryLocOrCall &UseMLOC,
305                                              AliasAnalysis &AA) {
306   // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
307   // to exist while MemoryLocOrCall is pushed through places.
308   if (UseMLOC.IsCall)
309     return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(),
310                                     AA);
311   return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
312                                   AA);
313 }
314 
315 // Return true when MD may alias MU, return false otherwise.
316 bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
317                                         AliasAnalysis &AA) {
318   return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber;
319 }
320 
321 namespace {
322 
323 struct UpwardsMemoryQuery {
324   // True if our original query started off as a call
325   bool IsCall = false;
326   // The pointer location we started the query with. This will be empty if
327   // IsCall is true.
328   MemoryLocation StartingLoc;
329   // This is the instruction we were querying about.
330   const Instruction *Inst = nullptr;
331   // The MemoryAccess we actually got called with, used to test local domination
332   const MemoryAccess *OriginalAccess = nullptr;
333   Optional<AliasResult> AR = MayAlias;
334   bool SkipSelfAccess = false;
335 
336   UpwardsMemoryQuery() = default;
337 
338   UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
339       : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {
340     if (!IsCall)
341       StartingLoc = MemoryLocation::get(Inst);
342   }
343 };
344 
345 } // end anonymous namespace
346 
347 static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
348                            AliasAnalysis &AA) {
349   Instruction *Inst = MD->getMemoryInst();
350   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
351     switch (II->getIntrinsicID()) {
352     case Intrinsic::lifetime_end:
353       return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc);
354     default:
355       return false;
356     }
357   }
358   return false;
359 }
360 
361 static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA,
362                                                    const Instruction *I) {
363   // If the memory can't be changed, then loads of the memory can't be
364   // clobbered.
365   return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
366                               AA.pointsToConstantMemory(cast<LoadInst>(I)->
367                                                           getPointerOperand()));
368 }
369 
370 /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
371 /// inbetween `Start` and `ClobberAt` can clobbers `Start`.
372 ///
373 /// This is meant to be as simple and self-contained as possible. Because it
374 /// uses no cache, etc., it can be relatively expensive.
375 ///
376 /// \param Start     The MemoryAccess that we want to walk from.
377 /// \param ClobberAt A clobber for Start.
378 /// \param StartLoc  The MemoryLocation for Start.
379 /// \param MSSA      The MemorySSA instance that Start and ClobberAt belong to.
380 /// \param Query     The UpwardsMemoryQuery we used for our search.
381 /// \param AA        The AliasAnalysis we used for our search.
382 /// \param AllowImpreciseClobber Always false, unless we do relaxed verify.
383 LLVM_ATTRIBUTE_UNUSED static void
384 checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt,
385                    const MemoryLocation &StartLoc, const MemorySSA &MSSA,
386                    const UpwardsMemoryQuery &Query, AliasAnalysis &AA,
387                    bool AllowImpreciseClobber = false) {
388   assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
389 
390   if (MSSA.isLiveOnEntryDef(Start)) {
391     assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
392            "liveOnEntry must clobber itself");
393     return;
394   }
395 
396   bool FoundClobber = false;
397   DenseSet<ConstMemoryAccessPair> VisitedPhis;
398   SmallVector<ConstMemoryAccessPair, 8> Worklist;
399   Worklist.emplace_back(Start, StartLoc);
400   // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
401   // is found, complain.
402   while (!Worklist.empty()) {
403     auto MAP = Worklist.pop_back_val();
404     // All we care about is that nothing from Start to ClobberAt clobbers Start.
405     // We learn nothing from revisiting nodes.
406     if (!VisitedPhis.insert(MAP).second)
407       continue;
408 
409     for (const auto *MA : def_chain(MAP.first)) {
410       if (MA == ClobberAt) {
411         if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
412           // instructionClobbersQuery isn't essentially free, so don't use `|=`,
413           // since it won't let us short-circuit.
414           //
415           // Also, note that this can't be hoisted out of the `Worklist` loop,
416           // since MD may only act as a clobber for 1 of N MemoryLocations.
417           FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD);
418           if (!FoundClobber) {
419             ClobberAlias CA =
420                 instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
421             if (CA.IsClobber) {
422               FoundClobber = true;
423               // Not used: CA.AR;
424             }
425           }
426         }
427         break;
428       }
429 
430       // We should never hit liveOnEntry, unless it's the clobber.
431       assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
432 
433       if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
434         // If Start is a Def, skip self.
435         if (MD == Start)
436           continue;
437 
438         assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)
439                     .IsClobber &&
440                "Found clobber before reaching ClobberAt!");
441         continue;
442       }
443 
444       if (const auto *MU = dyn_cast<MemoryUse>(MA)) {
445         (void)MU;
446         assert (MU == Start &&
447                 "Can only find use in def chain if Start is a use");
448         continue;
449       }
450 
451       assert(isa<MemoryPhi>(MA));
452       Worklist.append(
453           upward_defs_begin({const_cast<MemoryAccess *>(MA), MAP.second}),
454           upward_defs_end());
455     }
456   }
457 
458   // If the verify is done following an optimization, it's possible that
459   // ClobberAt was a conservative clobbering, that we can now infer is not a
460   // true clobbering access. Don't fail the verify if that's the case.
461   // We do have accesses that claim they're optimized, but could be optimized
462   // further. Updating all these can be expensive, so allow it for now (FIXME).
463   if (AllowImpreciseClobber)
464     return;
465 
466   // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
467   // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
468   assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
469          "ClobberAt never acted as a clobber");
470 }
471 
472 namespace {
473 
474 /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
475 /// in one class.
476 class ClobberWalker {
477   /// Save a few bytes by using unsigned instead of size_t.
478   using ListIndex = unsigned;
479 
480   /// Represents a span of contiguous MemoryDefs, potentially ending in a
481   /// MemoryPhi.
482   struct DefPath {
483     MemoryLocation Loc;
484     // Note that, because we always walk in reverse, Last will always dominate
485     // First. Also note that First and Last are inclusive.
486     MemoryAccess *First;
487     MemoryAccess *Last;
488     Optional<ListIndex> Previous;
489 
490     DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
491             Optional<ListIndex> Previous)
492         : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
493 
494     DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
495             Optional<ListIndex> Previous)
496         : DefPath(Loc, Init, Init, Previous) {}
497   };
498 
499   const MemorySSA &MSSA;
500   AliasAnalysis &AA;
501   DominatorTree &DT;
502   UpwardsMemoryQuery *Query;
503 
504   // Phi optimization bookkeeping
505   SmallVector<DefPath, 32> Paths;
506   DenseSet<ConstMemoryAccessPair> VisitedPhis;
507 
508   /// Find the nearest def or phi that `From` can legally be optimized to.
509   const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
510     assert(From->getNumOperands() && "Phi with no operands?");
511 
512     BasicBlock *BB = From->getBlock();
513     MemoryAccess *Result = MSSA.getLiveOnEntryDef();
514     DomTreeNode *Node = DT.getNode(BB);
515     while ((Node = Node->getIDom())) {
516       auto *Defs = MSSA.getBlockDefs(Node->getBlock());
517       if (Defs)
518         return &*Defs->rbegin();
519     }
520     return Result;
521   }
522 
523   /// Result of calling walkToPhiOrClobber.
524   struct UpwardsWalkResult {
525     /// The "Result" of the walk. Either a clobber, the last thing we walked, or
526     /// both. Include alias info when clobber found.
527     MemoryAccess *Result;
528     bool IsKnownClobber;
529     Optional<AliasResult> AR;
530   };
531 
532   /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
533   /// This will update Desc.Last as it walks. It will (optionally) also stop at
534   /// StopAt.
535   ///
536   /// This does not test for whether StopAt is a clobber
537   UpwardsWalkResult
538   walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr,
539                      const MemoryAccess *SkipStopAt = nullptr) const {
540     assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
541 
542     for (MemoryAccess *Current : def_chain(Desc.Last)) {
543       Desc.Last = Current;
544       if (Current == StopAt || Current == SkipStopAt)
545         return {Current, false, MayAlias};
546 
547       if (auto *MD = dyn_cast<MemoryDef>(Current)) {
548         if (MSSA.isLiveOnEntryDef(MD))
549           return {MD, true, MustAlias};
550         ClobberAlias CA =
551             instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA);
552         if (CA.IsClobber)
553           return {MD, true, CA.AR};
554       }
555     }
556 
557     assert(isa<MemoryPhi>(Desc.Last) &&
558            "Ended at a non-clobber that's not a phi?");
559     return {Desc.Last, false, MayAlias};
560   }
561 
562   void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
563                    ListIndex PriorNode) {
564     auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}),
565                                  upward_defs_end());
566     for (const MemoryAccessPair &P : UpwardDefs) {
567       PausedSearches.push_back(Paths.size());
568       Paths.emplace_back(P.second, P.first, PriorNode);
569     }
570   }
571 
572   /// Represents a search that terminated after finding a clobber. This clobber
573   /// may or may not be present in the path of defs from LastNode..SearchStart,
574   /// since it may have been retrieved from cache.
575   struct TerminatedPath {
576     MemoryAccess *Clobber;
577     ListIndex LastNode;
578   };
579 
580   /// Get an access that keeps us from optimizing to the given phi.
581   ///
582   /// PausedSearches is an array of indices into the Paths array. Its incoming
583   /// value is the indices of searches that stopped at the last phi optimization
584   /// target. It's left in an unspecified state.
585   ///
586   /// If this returns None, NewPaused is a vector of searches that terminated
587   /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
588   Optional<TerminatedPath>
589   getBlockingAccess(const MemoryAccess *StopWhere,
590                     SmallVectorImpl<ListIndex> &PausedSearches,
591                     SmallVectorImpl<ListIndex> &NewPaused,
592                     SmallVectorImpl<TerminatedPath> &Terminated) {
593     assert(!PausedSearches.empty() && "No searches to continue?");
594 
595     // BFS vs DFS really doesn't make a difference here, so just do a DFS with
596     // PausedSearches as our stack.
597     while (!PausedSearches.empty()) {
598       ListIndex PathIndex = PausedSearches.pop_back_val();
599       DefPath &Node = Paths[PathIndex];
600 
601       // If we've already visited this path with this MemoryLocation, we don't
602       // need to do so again.
603       //
604       // NOTE: That we just drop these paths on the ground makes caching
605       // behavior sporadic. e.g. given a diamond:
606       //  A
607       // B C
608       //  D
609       //
610       // ...If we walk D, B, A, C, we'll only cache the result of phi
611       // optimization for A, B, and D; C will be skipped because it dies here.
612       // This arguably isn't the worst thing ever, since:
613       //   - We generally query things in a top-down order, so if we got below D
614       //     without needing cache entries for {C, MemLoc}, then chances are
615       //     that those cache entries would end up ultimately unused.
616       //   - We still cache things for A, so C only needs to walk up a bit.
617       // If this behavior becomes problematic, we can fix without a ton of extra
618       // work.
619       if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)
620         continue;
621 
622       const MemoryAccess *SkipStopWhere = nullptr;
623       if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) {
624         assert(isa<MemoryDef>(Query->OriginalAccess));
625         SkipStopWhere = Query->OriginalAccess;
626       }
627 
628       UpwardsWalkResult Res = walkToPhiOrClobber(Node, /*StopAt=*/StopWhere,
629                                                  /*SkipStopAt=*/SkipStopWhere);
630       if (Res.IsKnownClobber) {
631         assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
632         // If this wasn't a cache hit, we hit a clobber when walking. That's a
633         // failure.
634         TerminatedPath Term{Res.Result, PathIndex};
635         if (!MSSA.dominates(Res.Result, StopWhere))
636           return Term;
637 
638         // Otherwise, it's a valid thing to potentially optimize to.
639         Terminated.push_back(Term);
640         continue;
641       }
642 
643       if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
644         // We've hit our target. Save this path off for if we want to continue
645         // walking. If we are in the mode of skipping the OriginalAccess, and
646         // we've reached back to the OriginalAccess, do not save path, we've
647         // just looped back to self.
648         if (Res.Result != SkipStopWhere)
649           NewPaused.push_back(PathIndex);
650         continue;
651       }
652 
653       assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
654       addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
655     }
656 
657     return None;
658   }
659 
660   template <typename T, typename Walker>
661   struct generic_def_path_iterator
662       : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
663                                     std::forward_iterator_tag, T *> {
664     generic_def_path_iterator() = default;
665     generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
666 
667     T &operator*() const { return curNode(); }
668 
669     generic_def_path_iterator &operator++() {
670       N = curNode().Previous;
671       return *this;
672     }
673 
674     bool operator==(const generic_def_path_iterator &O) const {
675       if (N.hasValue() != O.N.hasValue())
676         return false;
677       return !N.hasValue() || *N == *O.N;
678     }
679 
680   private:
681     T &curNode() const { return W->Paths[*N]; }
682 
683     Walker *W = nullptr;
684     Optional<ListIndex> N = None;
685   };
686 
687   using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
688   using const_def_path_iterator =
689       generic_def_path_iterator<const DefPath, const ClobberWalker>;
690 
691   iterator_range<def_path_iterator> def_path(ListIndex From) {
692     return make_range(def_path_iterator(this, From), def_path_iterator());
693   }
694 
695   iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
696     return make_range(const_def_path_iterator(this, From),
697                       const_def_path_iterator());
698   }
699 
700   struct OptznResult {
701     /// The path that contains our result.
702     TerminatedPath PrimaryClobber;
703     /// The paths that we can legally cache back from, but that aren't
704     /// necessarily the result of the Phi optimization.
705     SmallVector<TerminatedPath, 4> OtherClobbers;
706   };
707 
708   ListIndex defPathIndex(const DefPath &N) const {
709     // The assert looks nicer if we don't need to do &N
710     const DefPath *NP = &N;
711     assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
712            "Out of bounds DefPath!");
713     return NP - &Paths.front();
714   }
715 
716   /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
717   /// that act as legal clobbers. Note that this won't return *all* clobbers.
718   ///
719   /// Phi optimization algorithm tl;dr:
720   ///   - Find the earliest def/phi, A, we can optimize to
721   ///   - Find if all paths from the starting memory access ultimately reach A
722   ///     - If not, optimization isn't possible.
723   ///     - Otherwise, walk from A to another clobber or phi, A'.
724   ///       - If A' is a def, we're done.
725   ///       - If A' is a phi, try to optimize it.
726   ///
727   /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
728   /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
729   OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
730                              const MemoryLocation &Loc) {
731     assert(Paths.empty() && VisitedPhis.empty() &&
732            "Reset the optimization state.");
733 
734     Paths.emplace_back(Loc, Start, Phi, None);
735     // Stores how many "valid" optimization nodes we had prior to calling
736     // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
737     auto PriorPathsSize = Paths.size();
738 
739     SmallVector<ListIndex, 16> PausedSearches;
740     SmallVector<ListIndex, 8> NewPaused;
741     SmallVector<TerminatedPath, 4> TerminatedPaths;
742 
743     addSearches(Phi, PausedSearches, 0);
744 
745     // Moves the TerminatedPath with the "most dominated" Clobber to the end of
746     // Paths.
747     auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
748       assert(!Paths.empty() && "Need a path to move");
749       auto Dom = Paths.begin();
750       for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
751         if (!MSSA.dominates(I->Clobber, Dom->Clobber))
752           Dom = I;
753       auto Last = Paths.end() - 1;
754       if (Last != Dom)
755         std::iter_swap(Last, Dom);
756     };
757 
758     MemoryPhi *Current = Phi;
759     while (true) {
760       assert(!MSSA.isLiveOnEntryDef(Current) &&
761              "liveOnEntry wasn't treated as a clobber?");
762 
763       const auto *Target = getWalkTarget(Current);
764       // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
765       // optimization for the prior phi.
766       assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
767         return MSSA.dominates(P.Clobber, Target);
768       }));
769 
770       // FIXME: This is broken, because the Blocker may be reported to be
771       // liveOnEntry, and we'll happily wait for that to disappear (read: never)
772       // For the moment, this is fine, since we do nothing with blocker info.
773       if (Optional<TerminatedPath> Blocker = getBlockingAccess(
774               Target, PausedSearches, NewPaused, TerminatedPaths)) {
775 
776         // Find the node we started at. We can't search based on N->Last, since
777         // we may have gone around a loop with a different MemoryLocation.
778         auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
779           return defPathIndex(N) < PriorPathsSize;
780         });
781         assert(Iter != def_path_iterator());
782 
783         DefPath &CurNode = *Iter;
784         assert(CurNode.Last == Current);
785 
786         // Two things:
787         // A. We can't reliably cache all of NewPaused back. Consider a case
788         //    where we have two paths in NewPaused; one of which can't optimize
789         //    above this phi, whereas the other can. If we cache the second path
790         //    back, we'll end up with suboptimal cache entries. We can handle
791         //    cases like this a bit better when we either try to find all
792         //    clobbers that block phi optimization, or when our cache starts
793         //    supporting unfinished searches.
794         // B. We can't reliably cache TerminatedPaths back here without doing
795         //    extra checks; consider a case like:
796         //       T
797         //      / \
798         //     D   C
799         //      \ /
800         //       S
801         //    Where T is our target, C is a node with a clobber on it, D is a
802         //    diamond (with a clobber *only* on the left or right node, N), and
803         //    S is our start. Say we walk to D, through the node opposite N
804         //    (read: ignoring the clobber), and see a cache entry in the top
805         //    node of D. That cache entry gets put into TerminatedPaths. We then
806         //    walk up to C (N is later in our worklist), find the clobber, and
807         //    quit. If we append TerminatedPaths to OtherClobbers, we'll cache
808         //    the bottom part of D to the cached clobber, ignoring the clobber
809         //    in N. Again, this problem goes away if we start tracking all
810         //    blockers for a given phi optimization.
811         TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
812         return {Result, {}};
813       }
814 
815       // If there's nothing left to search, then all paths led to valid clobbers
816       // that we got from our cache; pick the nearest to the start, and allow
817       // the rest to be cached back.
818       if (NewPaused.empty()) {
819         MoveDominatedPathToEnd(TerminatedPaths);
820         TerminatedPath Result = TerminatedPaths.pop_back_val();
821         return {Result, std::move(TerminatedPaths)};
822       }
823 
824       MemoryAccess *DefChainEnd = nullptr;
825       SmallVector<TerminatedPath, 4> Clobbers;
826       for (ListIndex Paused : NewPaused) {
827         UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
828         if (WR.IsKnownClobber)
829           Clobbers.push_back({WR.Result, Paused});
830         else
831           // Micro-opt: If we hit the end of the chain, save it.
832           DefChainEnd = WR.Result;
833       }
834 
835       if (!TerminatedPaths.empty()) {
836         // If we couldn't find the dominating phi/liveOnEntry in the above loop,
837         // do it now.
838         if (!DefChainEnd)
839           for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
840             DefChainEnd = MA;
841 
842         // If any of the terminated paths don't dominate the phi we'll try to
843         // optimize, we need to figure out what they are and quit.
844         const BasicBlock *ChainBB = DefChainEnd->getBlock();
845         for (const TerminatedPath &TP : TerminatedPaths) {
846           // Because we know that DefChainEnd is as "high" as we can go, we
847           // don't need local dominance checks; BB dominance is sufficient.
848           if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
849             Clobbers.push_back(TP);
850         }
851       }
852 
853       // If we have clobbers in the def chain, find the one closest to Current
854       // and quit.
855       if (!Clobbers.empty()) {
856         MoveDominatedPathToEnd(Clobbers);
857         TerminatedPath Result = Clobbers.pop_back_val();
858         return {Result, std::move(Clobbers)};
859       }
860 
861       assert(all_of(NewPaused,
862                     [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
863 
864       // Because liveOnEntry is a clobber, this must be a phi.
865       auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
866 
867       PriorPathsSize = Paths.size();
868       PausedSearches.clear();
869       for (ListIndex I : NewPaused)
870         addSearches(DefChainPhi, PausedSearches, I);
871       NewPaused.clear();
872 
873       Current = DefChainPhi;
874     }
875   }
876 
877   void verifyOptResult(const OptznResult &R) const {
878     assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
879       return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
880     }));
881   }
882 
883   void resetPhiOptznState() {
884     Paths.clear();
885     VisitedPhis.clear();
886   }
887 
888 public:
889   ClobberWalker(const MemorySSA &MSSA, AliasAnalysis &AA, DominatorTree &DT)
890       : MSSA(MSSA), AA(AA), DT(DT) {}
891 
892   /// Finds the nearest clobber for the given query, optimizing phis if
893   /// possible.
894   MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q) {
895     Query = &Q;
896 
897     MemoryAccess *Current = Start;
898     // This walker pretends uses don't exist. If we're handed one, silently grab
899     // its def. (This has the nice side-effect of ensuring we never cache uses)
900     if (auto *MU = dyn_cast<MemoryUse>(Start))
901       Current = MU->getDefiningAccess();
902 
903     DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
904     // Fast path for the overly-common case (no crazy phi optimization
905     // necessary)
906     UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
907     MemoryAccess *Result;
908     if (WalkResult.IsKnownClobber) {
909       Result = WalkResult.Result;
910       Q.AR = WalkResult.AR;
911     } else {
912       OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
913                                           Current, Q.StartingLoc);
914       verifyOptResult(OptRes);
915       resetPhiOptznState();
916       Result = OptRes.PrimaryClobber.Clobber;
917     }
918 
919 #ifdef EXPENSIVE_CHECKS
920     if (!Q.SkipSelfAccess)
921       checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
922 #endif
923     return Result;
924   }
925 };
926 
927 struct RenamePassData {
928   DomTreeNode *DTN;
929   DomTreeNode::const_iterator ChildIt;
930   MemoryAccess *IncomingVal;
931 
932   RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
933                  MemoryAccess *M)
934       : DTN(D), ChildIt(It), IncomingVal(M) {}
935 
936   void swap(RenamePassData &RHS) {
937     std::swap(DTN, RHS.DTN);
938     std::swap(ChildIt, RHS.ChildIt);
939     std::swap(IncomingVal, RHS.IncomingVal);
940   }
941 };
942 
943 } // end anonymous namespace
944 
945 namespace llvm {
946 
947 class MemorySSA::ClobberWalkerBase {
948   ClobberWalker Walker;
949   MemorySSA *MSSA;
950 
951 public:
952   ClobberWalkerBase(MemorySSA *M, AliasAnalysis *A, DominatorTree *D)
953       : Walker(*M, *A, *D), MSSA(M) {}
954 
955   MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
956                                               const MemoryLocation &);
957   // Second argument (bool), defines whether the clobber search should skip the
958   // original queried access. If true, there will be a follow-up query searching
959   // for a clobber access past "self". Note that the Optimized access is not
960   // updated if a new clobber is found by this SkipSelf search. If this
961   // additional query becomes heavily used we may decide to cache the result.
962   // Walker instantiations will decide how to set the SkipSelf bool.
963   MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, bool);
964 };
965 
966 /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
967 /// longer does caching on its own, but the name has been retained for the
968 /// moment.
969 class MemorySSA::CachingWalker final : public MemorySSAWalker {
970   ClobberWalkerBase *Walker;
971 
972 public:
973   CachingWalker(MemorySSA *M, ClobberWalkerBase *W)
974       : MemorySSAWalker(M), Walker(W) {}
975   ~CachingWalker() override = default;
976 
977   using MemorySSAWalker::getClobberingMemoryAccess;
978 
979   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override;
980   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
981                                           const MemoryLocation &Loc) override;
982 
983   void invalidateInfo(MemoryAccess *MA) override {
984     if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
985       MUD->resetOptimized();
986   }
987 };
988 
989 class MemorySSA::SkipSelfWalker final : public MemorySSAWalker {
990   ClobberWalkerBase *Walker;
991 
992 public:
993   SkipSelfWalker(MemorySSA *M, ClobberWalkerBase *W)
994       : MemorySSAWalker(M), Walker(W) {}
995   ~SkipSelfWalker() override = default;
996 
997   using MemorySSAWalker::getClobberingMemoryAccess;
998 
999   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override;
1000   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1001                                           const MemoryLocation &Loc) override;
1002 
1003   void invalidateInfo(MemoryAccess *MA) override {
1004     if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1005       MUD->resetOptimized();
1006   }
1007 };
1008 
1009 } // end namespace llvm
1010 
1011 void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
1012                                     bool RenameAllUses) {
1013   // Pass through values to our successors
1014   for (const BasicBlock *S : successors(BB)) {
1015     auto It = PerBlockAccesses.find(S);
1016     // Rename the phi nodes in our successor block
1017     if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1018       continue;
1019     AccessList *Accesses = It->second.get();
1020     auto *Phi = cast<MemoryPhi>(&Accesses->front());
1021     if (RenameAllUses) {
1022       int PhiIndex = Phi->getBasicBlockIndex(BB);
1023       assert(PhiIndex != -1 && "Incomplete phi during partial rename");
1024       Phi->setIncomingValue(PhiIndex, IncomingVal);
1025     } else
1026       Phi->addIncoming(IncomingVal, BB);
1027   }
1028 }
1029 
1030 /// Rename a single basic block into MemorySSA form.
1031 /// Uses the standard SSA renaming algorithm.
1032 /// \returns The new incoming value.
1033 MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
1034                                      bool RenameAllUses) {
1035   auto It = PerBlockAccesses.find(BB);
1036   // Skip most processing if the list is empty.
1037   if (It != PerBlockAccesses.end()) {
1038     AccessList *Accesses = It->second.get();
1039     for (MemoryAccess &L : *Accesses) {
1040       if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
1041         if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
1042           MUD->setDefiningAccess(IncomingVal);
1043         if (isa<MemoryDef>(&L))
1044           IncomingVal = &L;
1045       } else {
1046         IncomingVal = &L;
1047       }
1048     }
1049   }
1050   return IncomingVal;
1051 }
1052 
1053 /// This is the standard SSA renaming algorithm.
1054 ///
1055 /// We walk the dominator tree in preorder, renaming accesses, and then filling
1056 /// in phi nodes in our successors.
1057 void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
1058                            SmallPtrSetImpl<BasicBlock *> &Visited,
1059                            bool SkipVisited, bool RenameAllUses) {
1060   SmallVector<RenamePassData, 32> WorkStack;
1061   // Skip everything if we already renamed this block and we are skipping.
1062   // Note: You can't sink this into the if, because we need it to occur
1063   // regardless of whether we skip blocks or not.
1064   bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
1065   if (SkipVisited && AlreadyVisited)
1066     return;
1067 
1068   IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
1069   renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
1070   WorkStack.push_back({Root, Root->begin(), IncomingVal});
1071 
1072   while (!WorkStack.empty()) {
1073     DomTreeNode *Node = WorkStack.back().DTN;
1074     DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
1075     IncomingVal = WorkStack.back().IncomingVal;
1076 
1077     if (ChildIt == Node->end()) {
1078       WorkStack.pop_back();
1079     } else {
1080       DomTreeNode *Child = *ChildIt;
1081       ++WorkStack.back().ChildIt;
1082       BasicBlock *BB = Child->getBlock();
1083       // Note: You can't sink this into the if, because we need it to occur
1084       // regardless of whether we skip blocks or not.
1085       AlreadyVisited = !Visited.insert(BB).second;
1086       if (SkipVisited && AlreadyVisited) {
1087         // We already visited this during our renaming, which can happen when
1088         // being asked to rename multiple blocks. Figure out the incoming val,
1089         // which is the last def.
1090         // Incoming value can only change if there is a block def, and in that
1091         // case, it's the last block def in the list.
1092         if (auto *BlockDefs = getWritableBlockDefs(BB))
1093           IncomingVal = &*BlockDefs->rbegin();
1094       } else
1095         IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1096       renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1097       WorkStack.push_back({Child, Child->begin(), IncomingVal});
1098     }
1099   }
1100 }
1101 
1102 /// This handles unreachable block accesses by deleting phi nodes in
1103 /// unreachable blocks, and marking all other unreachable MemoryAccess's as
1104 /// being uses of the live on entry definition.
1105 void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
1106   assert(!DT->isReachableFromEntry(BB) &&
1107          "Reachable block found while handling unreachable blocks");
1108 
1109   // Make sure phi nodes in our reachable successors end up with a
1110   // LiveOnEntryDef for our incoming edge, even though our block is forward
1111   // unreachable.  We could just disconnect these blocks from the CFG fully,
1112   // but we do not right now.
1113   for (const BasicBlock *S : successors(BB)) {
1114     if (!DT->isReachableFromEntry(S))
1115       continue;
1116     auto It = PerBlockAccesses.find(S);
1117     // Rename the phi nodes in our successor block
1118     if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1119       continue;
1120     AccessList *Accesses = It->second.get();
1121     auto *Phi = cast<MemoryPhi>(&Accesses->front());
1122     Phi->addIncoming(LiveOnEntryDef.get(), BB);
1123   }
1124 
1125   auto It = PerBlockAccesses.find(BB);
1126   if (It == PerBlockAccesses.end())
1127     return;
1128 
1129   auto &Accesses = It->second;
1130   for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1131     auto Next = std::next(AI);
1132     // If we have a phi, just remove it. We are going to replace all
1133     // users with live on entry.
1134     if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1135       UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1136     else
1137       Accesses->erase(AI);
1138     AI = Next;
1139   }
1140 }
1141 
1142 MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
1143     : AA(AA), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1144       SkipWalker(nullptr), NextID(0) {
1145   buildMemorySSA();
1146 }
1147 
1148 MemorySSA::~MemorySSA() {
1149   // Drop all our references
1150   for (const auto &Pair : PerBlockAccesses)
1151     for (MemoryAccess &MA : *Pair.second)
1152       MA.dropAllReferences();
1153 }
1154 
1155 MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
1156   auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
1157 
1158   if (Res.second)
1159     Res.first->second = llvm::make_unique<AccessList>();
1160   return Res.first->second.get();
1161 }
1162 
1163 MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1164   auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
1165 
1166   if (Res.second)
1167     Res.first->second = llvm::make_unique<DefsList>();
1168   return Res.first->second.get();
1169 }
1170 
1171 namespace llvm {
1172 
1173 /// This class is a batch walker of all MemoryUse's in the program, and points
1174 /// their defining access at the thing that actually clobbers them.  Because it
1175 /// is a batch walker that touches everything, it does not operate like the
1176 /// other walkers.  This walker is basically performing a top-down SSA renaming
1177 /// pass, where the version stack is used as the cache.  This enables it to be
1178 /// significantly more time and memory efficient than using the regular walker,
1179 /// which is walking bottom-up.
1180 class MemorySSA::OptimizeUses {
1181 public:
1182   OptimizeUses(MemorySSA *MSSA, MemorySSAWalker *Walker, AliasAnalysis *AA,
1183                DominatorTree *DT)
1184       : MSSA(MSSA), Walker(Walker), AA(AA), DT(DT) {}
1185 
1186   void optimizeUses();
1187 
1188 private:
1189   /// This represents where a given memorylocation is in the stack.
1190   struct MemlocStackInfo {
1191     // This essentially is keeping track of versions of the stack. Whenever
1192     // the stack changes due to pushes or pops, these versions increase.
1193     unsigned long StackEpoch;
1194     unsigned long PopEpoch;
1195     // This is the lower bound of places on the stack to check. It is equal to
1196     // the place the last stack walk ended.
1197     // Note: Correctness depends on this being initialized to 0, which densemap
1198     // does
1199     unsigned long LowerBound;
1200     const BasicBlock *LowerBoundBlock;
1201     // This is where the last walk for this memory location ended.
1202     unsigned long LastKill;
1203     bool LastKillValid;
1204     Optional<AliasResult> AR;
1205   };
1206 
1207   void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1208                            SmallVectorImpl<MemoryAccess *> &,
1209                            DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
1210 
1211   MemorySSA *MSSA;
1212   MemorySSAWalker *Walker;
1213   AliasAnalysis *AA;
1214   DominatorTree *DT;
1215 };
1216 
1217 } // end namespace llvm
1218 
1219 /// Optimize the uses in a given block This is basically the SSA renaming
1220 /// algorithm, with one caveat: We are able to use a single stack for all
1221 /// MemoryUses.  This is because the set of *possible* reaching MemoryDefs is
1222 /// the same for every MemoryUse.  The *actual* clobbering MemoryDef is just
1223 /// going to be some position in that stack of possible ones.
1224 ///
1225 /// We track the stack positions that each MemoryLocation needs
1226 /// to check, and last ended at.  This is because we only want to check the
1227 /// things that changed since last time.  The same MemoryLocation should
1228 /// get clobbered by the same store (getModRefInfo does not use invariantness or
1229 /// things like this, and if they start, we can modify MemoryLocOrCall to
1230 /// include relevant data)
1231 void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1232     const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1233     SmallVectorImpl<MemoryAccess *> &VersionStack,
1234     DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) {
1235 
1236   /// If no accesses, nothing to do.
1237   MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1238   if (Accesses == nullptr)
1239     return;
1240 
1241   // Pop everything that doesn't dominate the current block off the stack,
1242   // increment the PopEpoch to account for this.
1243   while (true) {
1244     assert(
1245         !VersionStack.empty() &&
1246         "Version stack should have liveOnEntry sentinel dominating everything");
1247     BasicBlock *BackBlock = VersionStack.back()->getBlock();
1248     if (DT->dominates(BackBlock, BB))
1249       break;
1250     while (VersionStack.back()->getBlock() == BackBlock)
1251       VersionStack.pop_back();
1252     ++PopEpoch;
1253   }
1254 
1255   for (MemoryAccess &MA : *Accesses) {
1256     auto *MU = dyn_cast<MemoryUse>(&MA);
1257     if (!MU) {
1258       VersionStack.push_back(&MA);
1259       ++StackEpoch;
1260       continue;
1261     }
1262 
1263     if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
1264       MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None);
1265       continue;
1266     }
1267 
1268     MemoryLocOrCall UseMLOC(MU);
1269     auto &LocInfo = LocStackInfo[UseMLOC];
1270     // If the pop epoch changed, it means we've removed stuff from top of
1271     // stack due to changing blocks. We may have to reset the lower bound or
1272     // last kill info.
1273     if (LocInfo.PopEpoch != PopEpoch) {
1274       LocInfo.PopEpoch = PopEpoch;
1275       LocInfo.StackEpoch = StackEpoch;
1276       // If the lower bound was in something that no longer dominates us, we
1277       // have to reset it.
1278       // We can't simply track stack size, because the stack may have had
1279       // pushes/pops in the meantime.
1280       // XXX: This is non-optimal, but only is slower cases with heavily
1281       // branching dominator trees.  To get the optimal number of queries would
1282       // be to make lowerbound and lastkill a per-loc stack, and pop it until
1283       // the top of that stack dominates us.  This does not seem worth it ATM.
1284       // A much cheaper optimization would be to always explore the deepest
1285       // branch of the dominator tree first. This will guarantee this resets on
1286       // the smallest set of blocks.
1287       if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1288           !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1289         // Reset the lower bound of things to check.
1290         // TODO: Some day we should be able to reset to last kill, rather than
1291         // 0.
1292         LocInfo.LowerBound = 0;
1293         LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1294         LocInfo.LastKillValid = false;
1295       }
1296     } else if (LocInfo.StackEpoch != StackEpoch) {
1297       // If all that has changed is the StackEpoch, we only have to check the
1298       // new things on the stack, because we've checked everything before.  In
1299       // this case, the lower bound of things to check remains the same.
1300       LocInfo.PopEpoch = PopEpoch;
1301       LocInfo.StackEpoch = StackEpoch;
1302     }
1303     if (!LocInfo.LastKillValid) {
1304       LocInfo.LastKill = VersionStack.size() - 1;
1305       LocInfo.LastKillValid = true;
1306       LocInfo.AR = MayAlias;
1307     }
1308 
1309     // At this point, we should have corrected last kill and LowerBound to be
1310     // in bounds.
1311     assert(LocInfo.LowerBound < VersionStack.size() &&
1312            "Lower bound out of range");
1313     assert(LocInfo.LastKill < VersionStack.size() &&
1314            "Last kill info out of range");
1315     // In any case, the new upper bound is the top of the stack.
1316     unsigned long UpperBound = VersionStack.size() - 1;
1317 
1318     if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
1319       LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
1320                         << *(MU->getMemoryInst()) << ")"
1321                         << " because there are "
1322                         << UpperBound - LocInfo.LowerBound
1323                         << " stores to disambiguate\n");
1324       // Because we did not walk, LastKill is no longer valid, as this may
1325       // have been a kill.
1326       LocInfo.LastKillValid = false;
1327       continue;
1328     }
1329     bool FoundClobberResult = false;
1330     while (UpperBound > LocInfo.LowerBound) {
1331       if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1332         // For phis, use the walker, see where we ended up, go there
1333         Instruction *UseInst = MU->getMemoryInst();
1334         MemoryAccess *Result = Walker->getClobberingMemoryAccess(UseInst);
1335         // We are guaranteed to find it or something is wrong
1336         while (VersionStack[UpperBound] != Result) {
1337           assert(UpperBound != 0);
1338           --UpperBound;
1339         }
1340         FoundClobberResult = true;
1341         break;
1342       }
1343 
1344       MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1345       // If the lifetime of the pointer ends at this instruction, it's live on
1346       // entry.
1347       if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) {
1348         // Reset UpperBound to liveOnEntryDef's place in the stack
1349         UpperBound = 0;
1350         FoundClobberResult = true;
1351         LocInfo.AR = MustAlias;
1352         break;
1353       }
1354       ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA);
1355       if (CA.IsClobber) {
1356         FoundClobberResult = true;
1357         LocInfo.AR = CA.AR;
1358         break;
1359       }
1360       --UpperBound;
1361     }
1362 
1363     // Note: Phis always have AliasResult AR set to MayAlias ATM.
1364 
1365     // At the end of this loop, UpperBound is either a clobber, or lower bound
1366     // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1367     if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1368       // We were last killed now by where we got to
1369       if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound]))
1370         LocInfo.AR = None;
1371       MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR);
1372       LocInfo.LastKill = UpperBound;
1373     } else {
1374       // Otherwise, we checked all the new ones, and now we know we can get to
1375       // LastKill.
1376       MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR);
1377     }
1378     LocInfo.LowerBound = VersionStack.size() - 1;
1379     LocInfo.LowerBoundBlock = BB;
1380   }
1381 }
1382 
1383 /// Optimize uses to point to their actual clobbering definitions.
1384 void MemorySSA::OptimizeUses::optimizeUses() {
1385   SmallVector<MemoryAccess *, 16> VersionStack;
1386   DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo;
1387   VersionStack.push_back(MSSA->getLiveOnEntryDef());
1388 
1389   unsigned long StackEpoch = 1;
1390   unsigned long PopEpoch = 1;
1391   // We perform a non-recursive top-down dominator tree walk.
1392   for (const auto *DomNode : depth_first(DT->getRootNode()))
1393     optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1394                         LocStackInfo);
1395 }
1396 
1397 void MemorySSA::placePHINodes(
1398     const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) {
1399   // Determine where our MemoryPhi's should go
1400   ForwardIDFCalculator IDFs(*DT);
1401   IDFs.setDefiningBlocks(DefiningBlocks);
1402   SmallVector<BasicBlock *, 32> IDFBlocks;
1403   IDFs.calculate(IDFBlocks);
1404 
1405   // Now place MemoryPhi nodes.
1406   for (auto &BB : IDFBlocks)
1407     createMemoryPhi(BB);
1408 }
1409 
1410 void MemorySSA::buildMemorySSA() {
1411   // We create an access to represent "live on entry", for things like
1412   // arguments or users of globals, where the memory they use is defined before
1413   // the beginning of the function. We do not actually insert it into the IR.
1414   // We do not define a live on exit for the immediate uses, and thus our
1415   // semantics do *not* imply that something with no immediate uses can simply
1416   // be removed.
1417   BasicBlock &StartingPoint = F.getEntryBlock();
1418   LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr,
1419                                      &StartingPoint, NextID++));
1420 
1421   // We maintain lists of memory accesses per-block, trading memory for time. We
1422   // could just look up the memory access for every possible instruction in the
1423   // stream.
1424   SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1425   // Go through each block, figure out where defs occur, and chain together all
1426   // the accesses.
1427   for (BasicBlock &B : F) {
1428     bool InsertIntoDef = false;
1429     AccessList *Accesses = nullptr;
1430     DefsList *Defs = nullptr;
1431     for (Instruction &I : B) {
1432       MemoryUseOrDef *MUD = createNewAccess(&I);
1433       if (!MUD)
1434         continue;
1435 
1436       if (!Accesses)
1437         Accesses = getOrCreateAccessList(&B);
1438       Accesses->push_back(MUD);
1439       if (isa<MemoryDef>(MUD)) {
1440         InsertIntoDef = true;
1441         if (!Defs)
1442           Defs = getOrCreateDefsList(&B);
1443         Defs->push_back(*MUD);
1444       }
1445     }
1446     if (InsertIntoDef)
1447       DefiningBlocks.insert(&B);
1448   }
1449   placePHINodes(DefiningBlocks);
1450 
1451   // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1452   // filled in with all blocks.
1453   SmallPtrSet<BasicBlock *, 16> Visited;
1454   renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1455 
1456   CachingWalker *Walker = getWalkerImpl();
1457 
1458   OptimizeUses(this, Walker, AA, DT).optimizeUses();
1459 
1460   // Mark the uses in unreachable blocks as live on entry, so that they go
1461   // somewhere.
1462   for (auto &BB : F)
1463     if (!Visited.count(&BB))
1464       markUnreachableAsLiveOnEntry(&BB);
1465 }
1466 
1467 MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1468 
1469 MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() {
1470   if (Walker)
1471     return Walker.get();
1472 
1473   if (!WalkerBase)
1474     WalkerBase = llvm::make_unique<ClobberWalkerBase>(this, AA, DT);
1475 
1476   Walker = llvm::make_unique<CachingWalker>(this, WalkerBase.get());
1477   return Walker.get();
1478 }
1479 
1480 MemorySSAWalker *MemorySSA::getSkipSelfWalker() {
1481   if (SkipWalker)
1482     return SkipWalker.get();
1483 
1484   if (!WalkerBase)
1485     WalkerBase = llvm::make_unique<ClobberWalkerBase>(this, AA, DT);
1486 
1487   SkipWalker = llvm::make_unique<SkipSelfWalker>(this, WalkerBase.get());
1488   return SkipWalker.get();
1489  }
1490 
1491 
1492 // This is a helper function used by the creation routines. It places NewAccess
1493 // into the access and defs lists for a given basic block, at the given
1494 // insertion point.
1495 void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess,
1496                                         const BasicBlock *BB,
1497                                         InsertionPlace Point) {
1498   auto *Accesses = getOrCreateAccessList(BB);
1499   if (Point == Beginning) {
1500     // If it's a phi node, it goes first, otherwise, it goes after any phi
1501     // nodes.
1502     if (isa<MemoryPhi>(NewAccess)) {
1503       Accesses->push_front(NewAccess);
1504       auto *Defs = getOrCreateDefsList(BB);
1505       Defs->push_front(*NewAccess);
1506     } else {
1507       auto AI = find_if_not(
1508           *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1509       Accesses->insert(AI, NewAccess);
1510       if (!isa<MemoryUse>(NewAccess)) {
1511         auto *Defs = getOrCreateDefsList(BB);
1512         auto DI = find_if_not(
1513             *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1514         Defs->insert(DI, *NewAccess);
1515       }
1516     }
1517   } else {
1518     Accesses->push_back(NewAccess);
1519     if (!isa<MemoryUse>(NewAccess)) {
1520       auto *Defs = getOrCreateDefsList(BB);
1521       Defs->push_back(*NewAccess);
1522     }
1523   }
1524   BlockNumberingValid.erase(BB);
1525 }
1526 
1527 void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB,
1528                                       AccessList::iterator InsertPt) {
1529   auto *Accesses = getWritableBlockAccesses(BB);
1530   bool WasEnd = InsertPt == Accesses->end();
1531   Accesses->insert(AccessList::iterator(InsertPt), What);
1532   if (!isa<MemoryUse>(What)) {
1533     auto *Defs = getOrCreateDefsList(BB);
1534     // If we got asked to insert at the end, we have an easy job, just shove it
1535     // at the end. If we got asked to insert before an existing def, we also get
1536     // an iterator. If we got asked to insert before a use, we have to hunt for
1537     // the next def.
1538     if (WasEnd) {
1539       Defs->push_back(*What);
1540     } else if (isa<MemoryDef>(InsertPt)) {
1541       Defs->insert(InsertPt->getDefsIterator(), *What);
1542     } else {
1543       while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1544         ++InsertPt;
1545       // Either we found a def, or we are inserting at the end
1546       if (InsertPt == Accesses->end())
1547         Defs->push_back(*What);
1548       else
1549         Defs->insert(InsertPt->getDefsIterator(), *What);
1550     }
1551   }
1552   BlockNumberingValid.erase(BB);
1553 }
1554 
1555 void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) {
1556   // Keep it in the lookup tables, remove from the lists
1557   removeFromLists(What, false);
1558 
1559   // Note that moving should implicitly invalidate the optimized state of a
1560   // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a
1561   // MemoryDef.
1562   if (auto *MD = dyn_cast<MemoryDef>(What))
1563     MD->resetOptimized();
1564   What->setBlock(BB);
1565 }
1566 
1567 // Move What before Where in the IR.  The end result is that What will belong to
1568 // the right lists and have the right Block set, but will not otherwise be
1569 // correct. It will not have the right defining access, and if it is a def,
1570 // things below it will not properly be updated.
1571 void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1572                        AccessList::iterator Where) {
1573   prepareForMoveTo(What, BB);
1574   insertIntoListsBefore(What, BB, Where);
1575 }
1576 
1577 void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB,
1578                        InsertionPlace Point) {
1579   if (isa<MemoryPhi>(What)) {
1580     assert(Point == Beginning &&
1581            "Can only move a Phi at the beginning of the block");
1582     // Update lookup table entry
1583     ValueToMemoryAccess.erase(What->getBlock());
1584     bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;
1585     (void)Inserted;
1586     assert(Inserted && "Cannot move a Phi to a block that already has one");
1587   }
1588 
1589   prepareForMoveTo(What, BB);
1590   insertIntoListsForBlock(What, BB, Point);
1591 }
1592 
1593 MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1594   assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
1595   MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
1596   // Phi's always are placed at the front of the block.
1597   insertIntoListsForBlock(Phi, BB, Beginning);
1598   ValueToMemoryAccess[BB] = Phi;
1599   return Phi;
1600 }
1601 
1602 MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
1603                                                MemoryAccess *Definition,
1604                                                const MemoryUseOrDef *Template) {
1605   assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
1606   MemoryUseOrDef *NewAccess = createNewAccess(I, Template);
1607   assert(
1608       NewAccess != nullptr &&
1609       "Tried to create a memory access for a non-memory touching instruction");
1610   NewAccess->setDefiningAccess(Definition);
1611   return NewAccess;
1612 }
1613 
1614 // Return true if the instruction has ordering constraints.
1615 // Note specifically that this only considers stores and loads
1616 // because others are still considered ModRef by getModRefInfo.
1617 static inline bool isOrdered(const Instruction *I) {
1618   if (auto *SI = dyn_cast<StoreInst>(I)) {
1619     if (!SI->isUnordered())
1620       return true;
1621   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1622     if (!LI->isUnordered())
1623       return true;
1624   }
1625   return false;
1626 }
1627 
1628 /// Helper function to create new memory accesses
1629 MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
1630                                            const MemoryUseOrDef *Template) {
1631   // The assume intrinsic has a control dependency which we model by claiming
1632   // that it writes arbitrarily. Ignore that fake memory dependency here.
1633   // FIXME: Replace this special casing with a more accurate modelling of
1634   // assume's control dependency.
1635   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1636     if (II->getIntrinsicID() == Intrinsic::assume)
1637       return nullptr;
1638 
1639   bool Def, Use;
1640   if (Template) {
1641     Def = dyn_cast_or_null<MemoryDef>(Template) != nullptr;
1642     Use = dyn_cast_or_null<MemoryUse>(Template) != nullptr;
1643 #if !defined(NDEBUG)
1644     ModRefInfo ModRef = AA->getModRefInfo(I, None);
1645     bool DefCheck, UseCheck;
1646     DefCheck = isModSet(ModRef) || isOrdered(I);
1647     UseCheck = isRefSet(ModRef);
1648     assert(Def == DefCheck && (Def || Use == UseCheck) && "Invalid template");
1649 #endif
1650   } else {
1651     // Find out what affect this instruction has on memory.
1652     ModRefInfo ModRef = AA->getModRefInfo(I, None);
1653     // The isOrdered check is used to ensure that volatiles end up as defs
1654     // (atomics end up as ModRef right now anyway).  Until we separate the
1655     // ordering chain from the memory chain, this enables people to see at least
1656     // some relative ordering to volatiles.  Note that getClobberingMemoryAccess
1657     // will still give an answer that bypasses other volatile loads.  TODO:
1658     // Separate memory aliasing and ordering into two different chains so that
1659     // we can precisely represent both "what memory will this read/write/is
1660     // clobbered by" and "what instructions can I move this past".
1661     Def = isModSet(ModRef) || isOrdered(I);
1662     Use = isRefSet(ModRef);
1663   }
1664 
1665   // It's possible for an instruction to not modify memory at all. During
1666   // construction, we ignore them.
1667   if (!Def && !Use)
1668     return nullptr;
1669 
1670   MemoryUseOrDef *MUD;
1671   if (Def)
1672     MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
1673   else
1674     MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
1675   ValueToMemoryAccess[I] = MUD;
1676   return MUD;
1677 }
1678 
1679 /// Returns true if \p Replacer dominates \p Replacee .
1680 bool MemorySSA::dominatesUse(const MemoryAccess *Replacer,
1681                              const MemoryAccess *Replacee) const {
1682   if (isa<MemoryUseOrDef>(Replacee))
1683     return DT->dominates(Replacer->getBlock(), Replacee->getBlock());
1684   const auto *MP = cast<MemoryPhi>(Replacee);
1685   // For a phi node, the use occurs in the predecessor block of the phi node.
1686   // Since we may occur multiple times in the phi node, we have to check each
1687   // operand to ensure Replacer dominates each operand where Replacee occurs.
1688   for (const Use &Arg : MP->operands()) {
1689     if (Arg.get() != Replacee &&
1690         !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg)))
1691       return false;
1692   }
1693   return true;
1694 }
1695 
1696 /// Properly remove \p MA from all of MemorySSA's lookup tables.
1697 void MemorySSA::removeFromLookups(MemoryAccess *MA) {
1698   assert(MA->use_empty() &&
1699          "Trying to remove memory access that still has uses");
1700   BlockNumbering.erase(MA);
1701   if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1702     MUD->setDefiningAccess(nullptr);
1703   // Invalidate our walker's cache if necessary
1704   if (!isa<MemoryUse>(MA))
1705     Walker->invalidateInfo(MA);
1706 
1707   Value *MemoryInst;
1708   if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1709     MemoryInst = MUD->getMemoryInst();
1710   else
1711     MemoryInst = MA->getBlock();
1712 
1713   auto VMA = ValueToMemoryAccess.find(MemoryInst);
1714   if (VMA->second == MA)
1715     ValueToMemoryAccess.erase(VMA);
1716 }
1717 
1718 /// Properly remove \p MA from all of MemorySSA's lists.
1719 ///
1720 /// Because of the way the intrusive list and use lists work, it is important to
1721 /// do removal in the right order.
1722 /// ShouldDelete defaults to true, and will cause the memory access to also be
1723 /// deleted, not just removed.
1724 void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
1725   BasicBlock *BB = MA->getBlock();
1726   // The access list owns the reference, so we erase it from the non-owning list
1727   // first.
1728   if (!isa<MemoryUse>(MA)) {
1729     auto DefsIt = PerBlockDefs.find(BB);
1730     std::unique_ptr<DefsList> &Defs = DefsIt->second;
1731     Defs->remove(*MA);
1732     if (Defs->empty())
1733       PerBlockDefs.erase(DefsIt);
1734   }
1735 
1736   // The erase call here will delete it. If we don't want it deleted, we call
1737   // remove instead.
1738   auto AccessIt = PerBlockAccesses.find(BB);
1739   std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1740   if (ShouldDelete)
1741     Accesses->erase(MA);
1742   else
1743     Accesses->remove(MA);
1744 
1745   if (Accesses->empty()) {
1746     PerBlockAccesses.erase(AccessIt);
1747     BlockNumberingValid.erase(BB);
1748   }
1749 }
1750 
1751 void MemorySSA::print(raw_ostream &OS) const {
1752   MemorySSAAnnotatedWriter Writer(this);
1753   F.print(OS, &Writer);
1754 }
1755 
1756 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1757 LLVM_DUMP_METHOD void MemorySSA::dump() const { print(dbgs()); }
1758 #endif
1759 
1760 void MemorySSA::verifyMemorySSA() const {
1761   verifyDefUses(F);
1762   verifyDomination(F);
1763   verifyOrdering(F);
1764   verifyDominationNumbers(F);
1765   // Previously, the verification used to also verify that the clobberingAccess
1766   // cached by MemorySSA is the same as the clobberingAccess found at a later
1767   // query to AA. This does not hold true in general due to the current fragility
1768   // of BasicAA which has arbitrary caps on the things it analyzes before giving
1769   // up. As a result, transformations that are correct, will lead to BasicAA
1770   // returning different Alias answers before and after that transformation.
1771   // Invalidating MemorySSA is not an option, as the results in BasicAA can be so
1772   // random, in the worst case we'd need to rebuild MemorySSA from scratch after
1773   // every transformation, which defeats the purpose of using it. For such an
1774   // example, see test4 added in D51960.
1775 }
1776 
1777 /// Verify that all of the blocks we believe to have valid domination numbers
1778 /// actually have valid domination numbers.
1779 void MemorySSA::verifyDominationNumbers(const Function &F) const {
1780 #ifndef NDEBUG
1781   if (BlockNumberingValid.empty())
1782     return;
1783 
1784   SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid;
1785   for (const BasicBlock &BB : F) {
1786     if (!ValidBlocks.count(&BB))
1787       continue;
1788 
1789     ValidBlocks.erase(&BB);
1790 
1791     const AccessList *Accesses = getBlockAccesses(&BB);
1792     // It's correct to say an empty block has valid numbering.
1793     if (!Accesses)
1794       continue;
1795 
1796     // Block numbering starts at 1.
1797     unsigned long LastNumber = 0;
1798     for (const MemoryAccess &MA : *Accesses) {
1799       auto ThisNumberIter = BlockNumbering.find(&MA);
1800       assert(ThisNumberIter != BlockNumbering.end() &&
1801              "MemoryAccess has no domination number in a valid block!");
1802 
1803       unsigned long ThisNumber = ThisNumberIter->second;
1804       assert(ThisNumber > LastNumber &&
1805              "Domination numbers should be strictly increasing!");
1806       LastNumber = ThisNumber;
1807     }
1808   }
1809 
1810   assert(ValidBlocks.empty() &&
1811          "All valid BasicBlocks should exist in F -- dangling pointers?");
1812 #endif
1813 }
1814 
1815 /// Verify that the order and existence of MemoryAccesses matches the
1816 /// order and existence of memory affecting instructions.
1817 void MemorySSA::verifyOrdering(Function &F) const {
1818 #ifndef NDEBUG
1819   // Walk all the blocks, comparing what the lookups think and what the access
1820   // lists think, as well as the order in the blocks vs the order in the access
1821   // lists.
1822   SmallVector<MemoryAccess *, 32> ActualAccesses;
1823   SmallVector<MemoryAccess *, 32> ActualDefs;
1824   for (BasicBlock &B : F) {
1825     const AccessList *AL = getBlockAccesses(&B);
1826     const auto *DL = getBlockDefs(&B);
1827     MemoryAccess *Phi = getMemoryAccess(&B);
1828     if (Phi) {
1829       ActualAccesses.push_back(Phi);
1830       ActualDefs.push_back(Phi);
1831     }
1832 
1833     for (Instruction &I : B) {
1834       MemoryAccess *MA = getMemoryAccess(&I);
1835       assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
1836              "We have memory affecting instructions "
1837              "in this block but they are not in the "
1838              "access list or defs list");
1839       if (MA) {
1840         ActualAccesses.push_back(MA);
1841         if (isa<MemoryDef>(MA))
1842           ActualDefs.push_back(MA);
1843       }
1844     }
1845     // Either we hit the assert, really have no accesses, or we have both
1846     // accesses and an access list.
1847     // Same with defs.
1848     if (!AL && !DL)
1849       continue;
1850     assert(AL->size() == ActualAccesses.size() &&
1851            "We don't have the same number of accesses in the block as on the "
1852            "access list");
1853     assert((DL || ActualDefs.size() == 0) &&
1854            "Either we should have a defs list, or we should have no defs");
1855     assert((!DL || DL->size() == ActualDefs.size()) &&
1856            "We don't have the same number of defs in the block as on the "
1857            "def list");
1858     auto ALI = AL->begin();
1859     auto AAI = ActualAccesses.begin();
1860     while (ALI != AL->end() && AAI != ActualAccesses.end()) {
1861       assert(&*ALI == *AAI && "Not the same accesses in the same order");
1862       ++ALI;
1863       ++AAI;
1864     }
1865     ActualAccesses.clear();
1866     if (DL) {
1867       auto DLI = DL->begin();
1868       auto ADI = ActualDefs.begin();
1869       while (DLI != DL->end() && ADI != ActualDefs.end()) {
1870         assert(&*DLI == *ADI && "Not the same defs in the same order");
1871         ++DLI;
1872         ++ADI;
1873       }
1874     }
1875     ActualDefs.clear();
1876   }
1877 #endif
1878 }
1879 
1880 /// Verify the domination properties of MemorySSA by checking that each
1881 /// definition dominates all of its uses.
1882 void MemorySSA::verifyDomination(Function &F) const {
1883 #ifndef NDEBUG
1884   for (BasicBlock &B : F) {
1885     // Phi nodes are attached to basic blocks
1886     if (MemoryPhi *MP = getMemoryAccess(&B))
1887       for (const Use &U : MP->uses())
1888         assert(dominates(MP, U) && "Memory PHI does not dominate it's uses");
1889 
1890     for (Instruction &I : B) {
1891       MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I));
1892       if (!MD)
1893         continue;
1894 
1895       for (const Use &U : MD->uses())
1896         assert(dominates(MD, U) && "Memory Def does not dominate it's uses");
1897     }
1898   }
1899 #endif
1900 }
1901 
1902 /// Verify the def-use lists in MemorySSA, by verifying that \p Use
1903 /// appears in the use list of \p Def.
1904 void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
1905 #ifndef NDEBUG
1906   // The live on entry use may cause us to get a NULL def here
1907   if (!Def)
1908     assert(isLiveOnEntryDef(Use) &&
1909            "Null def but use not point to live on entry def");
1910   else
1911     assert(is_contained(Def->users(), Use) &&
1912            "Did not find use in def's use list");
1913 #endif
1914 }
1915 
1916 /// Verify the immediate use information, by walking all the memory
1917 /// accesses and verifying that, for each use, it appears in the
1918 /// appropriate def's use list
1919 void MemorySSA::verifyDefUses(Function &F) const {
1920 #ifndef NDEBUG
1921   for (BasicBlock &B : F) {
1922     // Phi nodes are attached to basic blocks
1923     if (MemoryPhi *Phi = getMemoryAccess(&B)) {
1924       assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(
1925                                           pred_begin(&B), pred_end(&B))) &&
1926              "Incomplete MemoryPhi Node");
1927       for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1928         verifyUseInDefs(Phi->getIncomingValue(I), Phi);
1929         assert(find(predecessors(&B), Phi->getIncomingBlock(I)) !=
1930                    pred_end(&B) &&
1931                "Incoming phi block not a block predecessor");
1932       }
1933     }
1934 
1935     for (Instruction &I : B) {
1936       if (MemoryUseOrDef *MA = getMemoryAccess(&I)) {
1937         verifyUseInDefs(MA->getDefiningAccess(), MA);
1938       }
1939     }
1940   }
1941 #endif
1942 }
1943 
1944 /// Perform a local numbering on blocks so that instruction ordering can be
1945 /// determined in constant time.
1946 /// TODO: We currently just number in order.  If we numbered by N, we could
1947 /// allow at least N-1 sequences of insertBefore or insertAfter (and at least
1948 /// log2(N) sequences of mixed before and after) without needing to invalidate
1949 /// the numbering.
1950 void MemorySSA::renumberBlock(const BasicBlock *B) const {
1951   // The pre-increment ensures the numbers really start at 1.
1952   unsigned long CurrentNumber = 0;
1953   const AccessList *AL = getBlockAccesses(B);
1954   assert(AL != nullptr && "Asking to renumber an empty block");
1955   for (const auto &I : *AL)
1956     BlockNumbering[&I] = ++CurrentNumber;
1957   BlockNumberingValid.insert(B);
1958 }
1959 
1960 /// Determine, for two memory accesses in the same block,
1961 /// whether \p Dominator dominates \p Dominatee.
1962 /// \returns True if \p Dominator dominates \p Dominatee.
1963 bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
1964                                  const MemoryAccess *Dominatee) const {
1965   const BasicBlock *DominatorBlock = Dominator->getBlock();
1966 
1967   assert((DominatorBlock == Dominatee->getBlock()) &&
1968          "Asking for local domination when accesses are in different blocks!");
1969   // A node dominates itself.
1970   if (Dominatee == Dominator)
1971     return true;
1972 
1973   // When Dominatee is defined on function entry, it is not dominated by another
1974   // memory access.
1975   if (isLiveOnEntryDef(Dominatee))
1976     return false;
1977 
1978   // When Dominator is defined on function entry, it dominates the other memory
1979   // access.
1980   if (isLiveOnEntryDef(Dominator))
1981     return true;
1982 
1983   if (!BlockNumberingValid.count(DominatorBlock))
1984     renumberBlock(DominatorBlock);
1985 
1986   unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
1987   // All numbers start with 1
1988   assert(DominatorNum != 0 && "Block was not numbered properly");
1989   unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
1990   assert(DominateeNum != 0 && "Block was not numbered properly");
1991   return DominatorNum < DominateeNum;
1992 }
1993 
1994 bool MemorySSA::dominates(const MemoryAccess *Dominator,
1995                           const MemoryAccess *Dominatee) const {
1996   if (Dominator == Dominatee)
1997     return true;
1998 
1999   if (isLiveOnEntryDef(Dominatee))
2000     return false;
2001 
2002   if (Dominator->getBlock() != Dominatee->getBlock())
2003     return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
2004   return locallyDominates(Dominator, Dominatee);
2005 }
2006 
2007 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2008                           const Use &Dominatee) const {
2009   if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
2010     BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
2011     // The def must dominate the incoming block of the phi.
2012     if (UseBB != Dominator->getBlock())
2013       return DT->dominates(Dominator->getBlock(), UseBB);
2014     // If the UseBB and the DefBB are the same, compare locally.
2015     return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
2016   }
2017   // If it's not a PHI node use, the normal dominates can already handle it.
2018   return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
2019 }
2020 
2021 const static char LiveOnEntryStr[] = "liveOnEntry";
2022 
2023 void MemoryAccess::print(raw_ostream &OS) const {
2024   switch (getValueID()) {
2025   case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
2026   case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
2027   case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
2028   }
2029   llvm_unreachable("invalid value id");
2030 }
2031 
2032 void MemoryDef::print(raw_ostream &OS) const {
2033   MemoryAccess *UO = getDefiningAccess();
2034 
2035   auto printID = [&OS](MemoryAccess *A) {
2036     if (A && A->getID())
2037       OS << A->getID();
2038     else
2039       OS << LiveOnEntryStr;
2040   };
2041 
2042   OS << getID() << " = MemoryDef(";
2043   printID(UO);
2044   OS << ")";
2045 
2046   if (isOptimized()) {
2047     OS << "->";
2048     printID(getOptimized());
2049 
2050     if (Optional<AliasResult> AR = getOptimizedAccessType())
2051       OS << " " << *AR;
2052   }
2053 }
2054 
2055 void MemoryPhi::print(raw_ostream &OS) const {
2056   bool First = true;
2057   OS << getID() << " = MemoryPhi(";
2058   for (const auto &Op : operands()) {
2059     BasicBlock *BB = getIncomingBlock(Op);
2060     MemoryAccess *MA = cast<MemoryAccess>(Op);
2061     if (!First)
2062       OS << ',';
2063     else
2064       First = false;
2065 
2066     OS << '{';
2067     if (BB->hasName())
2068       OS << BB->getName();
2069     else
2070       BB->printAsOperand(OS, false);
2071     OS << ',';
2072     if (unsigned ID = MA->getID())
2073       OS << ID;
2074     else
2075       OS << LiveOnEntryStr;
2076     OS << '}';
2077   }
2078   OS << ')';
2079 }
2080 
2081 void MemoryUse::print(raw_ostream &OS) const {
2082   MemoryAccess *UO = getDefiningAccess();
2083   OS << "MemoryUse(";
2084   if (UO && UO->getID())
2085     OS << UO->getID();
2086   else
2087     OS << LiveOnEntryStr;
2088   OS << ')';
2089 
2090   if (Optional<AliasResult> AR = getOptimizedAccessType())
2091     OS << " " << *AR;
2092 }
2093 
2094 void MemoryAccess::dump() const {
2095 // Cannot completely remove virtual function even in release mode.
2096 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2097   print(dbgs());
2098   dbgs() << "\n";
2099 #endif
2100 }
2101 
2102 char MemorySSAPrinterLegacyPass::ID = 0;
2103 
2104 MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) {
2105   initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry());
2106 }
2107 
2108 void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
2109   AU.setPreservesAll();
2110   AU.addRequired<MemorySSAWrapperPass>();
2111 }
2112 
2113 bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
2114   auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2115   MSSA.print(dbgs());
2116   if (VerifyMemorySSA)
2117     MSSA.verifyMemorySSA();
2118   return false;
2119 }
2120 
2121 AnalysisKey MemorySSAAnalysis::Key;
2122 
2123 MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F,
2124                                                  FunctionAnalysisManager &AM) {
2125   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
2126   auto &AA = AM.getResult<AAManager>(F);
2127   return MemorySSAAnalysis::Result(llvm::make_unique<MemorySSA>(F, &AA, &DT));
2128 }
2129 
2130 PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
2131                                             FunctionAnalysisManager &AM) {
2132   OS << "MemorySSA for function: " << F.getName() << "\n";
2133   AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS);
2134 
2135   return PreservedAnalyses::all();
2136 }
2137 
2138 PreservedAnalyses MemorySSAVerifierPass::run(Function &F,
2139                                              FunctionAnalysisManager &AM) {
2140   AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
2141 
2142   return PreservedAnalyses::all();
2143 }
2144 
2145 char MemorySSAWrapperPass::ID = 0;
2146 
2147 MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) {
2148   initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
2149 }
2150 
2151 void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
2152 
2153 void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2154   AU.setPreservesAll();
2155   AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2156   AU.addRequiredTransitive<AAResultsWrapperPass>();
2157 }
2158 
2159 bool MemorySSAWrapperPass::runOnFunction(Function &F) {
2160   auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2161   auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2162   MSSA.reset(new MemorySSA(F, &AA, &DT));
2163   return false;
2164 }
2165 
2166 void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); }
2167 
2168 void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
2169   MSSA->print(OS);
2170 }
2171 
2172 MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
2173 
2174 /// Walk the use-def chains starting at \p StartingAccess and find
2175 /// the MemoryAccess that actually clobbers Loc.
2176 ///
2177 /// \returns our clobbering memory access
2178 MemoryAccess *MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(
2179     MemoryAccess *StartingAccess, const MemoryLocation &Loc) {
2180   if (isa<MemoryPhi>(StartingAccess))
2181     return StartingAccess;
2182 
2183   auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess);
2184   if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
2185     return StartingUseOrDef;
2186 
2187   Instruction *I = StartingUseOrDef->getMemoryInst();
2188 
2189   // Conservatively, fences are always clobbers, so don't perform the walk if we
2190   // hit a fence.
2191   if (!isa<CallBase>(I) && I->isFenceLike())
2192     return StartingUseOrDef;
2193 
2194   UpwardsMemoryQuery Q;
2195   Q.OriginalAccess = StartingUseOrDef;
2196   Q.StartingLoc = Loc;
2197   Q.Inst = I;
2198   Q.IsCall = false;
2199 
2200   // Unlike the other function, do not walk to the def of a def, because we are
2201   // handed something we already believe is the clobbering access.
2202   // We never set SkipSelf to true in Q in this method.
2203   MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
2204                                      ? StartingUseOrDef->getDefiningAccess()
2205                                      : StartingUseOrDef;
2206 
2207   MemoryAccess *Clobber = Walker.findClobber(DefiningAccess, Q);
2208   LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2209   LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n");
2210   LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
2211   LLVM_DEBUG(dbgs() << *Clobber << "\n");
2212   return Clobber;
2213 }
2214 
2215 MemoryAccess *
2216 MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(MemoryAccess *MA,
2217                                                             bool SkipSelf) {
2218   auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2219   // If this is a MemoryPhi, we can't do anything.
2220   if (!StartingAccess)
2221     return MA;
2222 
2223   bool IsOptimized = false;
2224 
2225   // If this is an already optimized use or def, return the optimized result.
2226   // Note: Currently, we store the optimized def result in a separate field,
2227   // since we can't use the defining access.
2228   if (StartingAccess->isOptimized()) {
2229     if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
2230       return StartingAccess->getOptimized();
2231     IsOptimized = true;
2232   }
2233 
2234   const Instruction *I = StartingAccess->getMemoryInst();
2235   // We can't sanely do anything with a fence, since they conservatively clobber
2236   // all memory, and have no locations to get pointers from to try to
2237   // disambiguate.
2238   if (!isa<CallBase>(I) && I->isFenceLike())
2239     return StartingAccess;
2240 
2241   UpwardsMemoryQuery Q(I, StartingAccess);
2242 
2243   if (isUseTriviallyOptimizableToLiveOnEntry(*MSSA->AA, I)) {
2244     MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2245     StartingAccess->setOptimized(LiveOnEntry);
2246     StartingAccess->setOptimizedAccessType(None);
2247     return LiveOnEntry;
2248   }
2249 
2250   MemoryAccess *OptimizedAccess;
2251   if (!IsOptimized) {
2252     // Start with the thing we already think clobbers this location
2253     MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2254 
2255     // At this point, DefiningAccess may be the live on entry def.
2256     // If it is, we will not get a better result.
2257     if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
2258       StartingAccess->setOptimized(DefiningAccess);
2259       StartingAccess->setOptimizedAccessType(None);
2260       return DefiningAccess;
2261     }
2262 
2263     OptimizedAccess = Walker.findClobber(DefiningAccess, Q);
2264     StartingAccess->setOptimized(OptimizedAccess);
2265     if (MSSA->isLiveOnEntryDef(OptimizedAccess))
2266       StartingAccess->setOptimizedAccessType(None);
2267     else if (Q.AR == MustAlias)
2268       StartingAccess->setOptimizedAccessType(MustAlias);
2269   } else
2270     OptimizedAccess = StartingAccess->getOptimized();
2271 
2272   LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2273   LLVM_DEBUG(dbgs() << *StartingAccess << "\n");
2274   LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ");
2275   LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n");
2276 
2277   MemoryAccess *Result;
2278   if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
2279       isa<MemoryDef>(StartingAccess)) {
2280     assert(isa<MemoryDef>(Q.OriginalAccess));
2281     Q.SkipSelfAccess = true;
2282     Result = Walker.findClobber(OptimizedAccess, Q);
2283   } else
2284     Result = OptimizedAccess;
2285 
2286   LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf);
2287   LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n");
2288 
2289   return Result;
2290 }
2291 
2292 MemoryAccess *
2293 MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2294   return Walker->getClobberingMemoryAccessBase(MA, false);
2295 }
2296 
2297 MemoryAccess *
2298 MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA,
2299                                                     const MemoryLocation &Loc) {
2300   return Walker->getClobberingMemoryAccessBase(MA, Loc);
2301 }
2302 
2303 MemoryAccess *
2304 MemorySSA::SkipSelfWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2305   return Walker->getClobberingMemoryAccessBase(MA, true);
2306 }
2307 
2308 MemoryAccess *
2309 MemorySSA::SkipSelfWalker::getClobberingMemoryAccess(MemoryAccess *MA,
2310                                                     const MemoryLocation &Loc) {
2311   return Walker->getClobberingMemoryAccessBase(MA, Loc);
2312 }
2313 
2314 MemoryAccess *
2315 DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2316   if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2317     return Use->getDefiningAccess();
2318   return MA;
2319 }
2320 
2321 MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
2322     MemoryAccess *StartingAccess, const MemoryLocation &) {
2323   if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2324     return Use->getDefiningAccess();
2325   return StartingAccess;
2326 }
2327 
2328 void MemoryPhi::deleteMe(DerivedUser *Self) {
2329   delete static_cast<MemoryPhi *>(Self);
2330 }
2331 
2332 void MemoryDef::deleteMe(DerivedUser *Self) {
2333   delete static_cast<MemoryDef *>(Self);
2334 }
2335 
2336 void MemoryUse::deleteMe(DerivedUser *Self) {
2337   delete static_cast<MemoryUse *>(Self);
2338 }
2339