1 //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the MemorySSA class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/MemorySSA.h"
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/DenseMapInfo.h"
16 #include "llvm/ADT/DenseSet.h"
17 #include "llvm/ADT/DepthFirstIterator.h"
18 #include "llvm/ADT/Hashing.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/iterator.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/IteratedDominanceFrontier.h"
28 #include "llvm/Analysis/MemoryLocation.h"
29 #include "llvm/Config/llvm-config.h"
30 #include "llvm/IR/AssemblyAnnotationWriter.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/Dominators.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/Instruction.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/IR/Intrinsics.h"
38 #include "llvm/IR/LLVMContext.h"
39 #include "llvm/IR/PassManager.h"
40 #include "llvm/IR/Use.h"
41 #include "llvm/Pass.h"
42 #include "llvm/Support/AtomicOrdering.h"
43 #include "llvm/Support/Casting.h"
44 #include "llvm/Support/CommandLine.h"
45 #include "llvm/Support/Compiler.h"
46 #include "llvm/Support/Debug.h"
47 #include "llvm/Support/ErrorHandling.h"
48 #include "llvm/Support/FormattedStream.h"
49 #include "llvm/Support/raw_ostream.h"
50 #include <algorithm>
51 #include <cassert>
52 #include <cstdlib>
53 #include <iterator>
54 #include <memory>
55 #include <utility>
56 
57 using namespace llvm;
58 
59 #define DEBUG_TYPE "memoryssa"
60 
61 INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
62                       true)
63 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
64 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
65 INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
66                     true)
67 
68 INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",
69                       "Memory SSA Printer", false, false)
70 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
71 INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",
72                     "Memory SSA Printer", false, false)
73 
74 static cl::opt<unsigned> MaxCheckLimit(
75     "memssa-check-limit", cl::Hidden, cl::init(100),
76     cl::desc("The maximum number of stores/phis MemorySSA"
77              "will consider trying to walk past (default = 100)"));
78 
79 // Always verify MemorySSA if expensive checking is enabled.
80 #ifdef EXPENSIVE_CHECKS
81 bool llvm::VerifyMemorySSA = true;
82 #else
83 bool llvm::VerifyMemorySSA = false;
84 #endif
85 /// Enables memory ssa as a dependency for loop passes in legacy pass manager.
86 cl::opt<bool> llvm::EnableMSSALoopDependency(
87     "enable-mssa-loop-dependency", cl::Hidden, cl::init(false),
88     cl::desc("Enable MemorySSA dependency for loop pass manager"));
89 
90 static cl::opt<bool, true>
91     VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA),
92                      cl::Hidden, cl::desc("Enable verification of MemorySSA."));
93 
94 namespace llvm {
95 
96 /// An assembly annotator class to print Memory SSA information in
97 /// comments.
98 class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
99   friend class MemorySSA;
100 
101   const MemorySSA *MSSA;
102 
103 public:
104   MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
105 
106   void emitBasicBlockStartAnnot(const BasicBlock *BB,
107                                 formatted_raw_ostream &OS) override {
108     if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
109       OS << "; " << *MA << "\n";
110   }
111 
112   void emitInstructionAnnot(const Instruction *I,
113                             formatted_raw_ostream &OS) override {
114     if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
115       OS << "; " << *MA << "\n";
116   }
117 };
118 
119 } // end namespace llvm
120 
121 namespace {
122 
123 /// Our current alias analysis API differentiates heavily between calls and
124 /// non-calls, and functions called on one usually assert on the other.
125 /// This class encapsulates the distinction to simplify other code that wants
126 /// "Memory affecting instructions and related data" to use as a key.
127 /// For example, this class is used as a densemap key in the use optimizer.
128 class MemoryLocOrCall {
129 public:
130   bool IsCall = false;
131 
132   MemoryLocOrCall(MemoryUseOrDef *MUD)
133       : MemoryLocOrCall(MUD->getMemoryInst()) {}
134   MemoryLocOrCall(const MemoryUseOrDef *MUD)
135       : MemoryLocOrCall(MUD->getMemoryInst()) {}
136 
137   MemoryLocOrCall(Instruction *Inst) {
138     if (auto *C = dyn_cast<CallBase>(Inst)) {
139       IsCall = true;
140       Call = C;
141     } else {
142       IsCall = false;
143       // There is no such thing as a memorylocation for a fence inst, and it is
144       // unique in that regard.
145       if (!isa<FenceInst>(Inst))
146         Loc = MemoryLocation::get(Inst);
147     }
148   }
149 
150   explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
151 
152   const CallBase *getCall() const {
153     assert(IsCall);
154     return Call;
155   }
156 
157   MemoryLocation getLoc() const {
158     assert(!IsCall);
159     return Loc;
160   }
161 
162   bool operator==(const MemoryLocOrCall &Other) const {
163     if (IsCall != Other.IsCall)
164       return false;
165 
166     if (!IsCall)
167       return Loc == Other.Loc;
168 
169     if (Call->getCalledValue() != Other.Call->getCalledValue())
170       return false;
171 
172     return Call->arg_size() == Other.Call->arg_size() &&
173            std::equal(Call->arg_begin(), Call->arg_end(),
174                       Other.Call->arg_begin());
175   }
176 
177 private:
178   union {
179     const CallBase *Call;
180     MemoryLocation Loc;
181   };
182 };
183 
184 } // end anonymous namespace
185 
186 namespace llvm {
187 
188 template <> struct DenseMapInfo<MemoryLocOrCall> {
189   static inline MemoryLocOrCall getEmptyKey() {
190     return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
191   }
192 
193   static inline MemoryLocOrCall getTombstoneKey() {
194     return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
195   }
196 
197   static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
198     if (!MLOC.IsCall)
199       return hash_combine(
200           MLOC.IsCall,
201           DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
202 
203     hash_code hash =
204         hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue(
205                                       MLOC.getCall()->getCalledValue()));
206 
207     for (const Value *Arg : MLOC.getCall()->args())
208       hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg));
209     return hash;
210   }
211 
212   static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
213     return LHS == RHS;
214   }
215 };
216 
217 } // end namespace llvm
218 
219 /// This does one-way checks to see if Use could theoretically be hoisted above
220 /// MayClobber. This will not check the other way around.
221 ///
222 /// This assumes that, for the purposes of MemorySSA, Use comes directly after
223 /// MayClobber, with no potentially clobbering operations in between them.
224 /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
225 static bool areLoadsReorderable(const LoadInst *Use,
226                                 const LoadInst *MayClobber) {
227   bool VolatileUse = Use->isVolatile();
228   bool VolatileClobber = MayClobber->isVolatile();
229   // Volatile operations may never be reordered with other volatile operations.
230   if (VolatileUse && VolatileClobber)
231     return false;
232   // Otherwise, volatile doesn't matter here. From the language reference:
233   // 'optimizers may change the order of volatile operations relative to
234   // non-volatile operations.'"
235 
236   // If a load is seq_cst, it cannot be moved above other loads. If its ordering
237   // is weaker, it can be moved above other loads. We just need to be sure that
238   // MayClobber isn't an acquire load, because loads can't be moved above
239   // acquire loads.
240   //
241   // Note that this explicitly *does* allow the free reordering of monotonic (or
242   // weaker) loads of the same address.
243   bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
244   bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
245                                                      AtomicOrdering::Acquire);
246   return !(SeqCstUse || MayClobberIsAcquire);
247 }
248 
249 namespace {
250 
251 struct ClobberAlias {
252   bool IsClobber;
253   Optional<AliasResult> AR;
254 };
255 
256 } // end anonymous namespace
257 
258 // Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being
259 // ignored if IsClobber = false.
260 template <typename AliasAnalysisType>
261 static ClobberAlias
262 instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc,
263                          const Instruction *UseInst, AliasAnalysisType &AA) {
264   Instruction *DefInst = MD->getMemoryInst();
265   assert(DefInst && "Defining instruction not actually an instruction");
266   const auto *UseCall = dyn_cast<CallBase>(UseInst);
267   Optional<AliasResult> AR;
268 
269   if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
270     // These intrinsics will show up as affecting memory, but they are just
271     // markers, mostly.
272     //
273     // FIXME: We probably don't actually want MemorySSA to model these at all
274     // (including creating MemoryAccesses for them): we just end up inventing
275     // clobbers where they don't really exist at all. Please see D43269 for
276     // context.
277     switch (II->getIntrinsicID()) {
278     case Intrinsic::lifetime_start:
279       if (UseCall)
280         return {false, NoAlias};
281       AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc);
282       return {AR != NoAlias, AR};
283     case Intrinsic::lifetime_end:
284     case Intrinsic::invariant_start:
285     case Intrinsic::invariant_end:
286     case Intrinsic::assume:
287       return {false, NoAlias};
288     default:
289       break;
290     }
291   }
292 
293   if (UseCall) {
294     ModRefInfo I = AA.getModRefInfo(DefInst, UseCall);
295     AR = isMustSet(I) ? MustAlias : MayAlias;
296     return {isModOrRefSet(I), AR};
297   }
298 
299   if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
300     if (auto *UseLoad = dyn_cast<LoadInst>(UseInst))
301       return {!areLoadsReorderable(UseLoad, DefLoad), MayAlias};
302 
303   ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
304   AR = isMustSet(I) ? MustAlias : MayAlias;
305   return {isModSet(I), AR};
306 }
307 
308 template <typename AliasAnalysisType>
309 static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
310                                              const MemoryUseOrDef *MU,
311                                              const MemoryLocOrCall &UseMLOC,
312                                              AliasAnalysisType &AA) {
313   // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
314   // to exist while MemoryLocOrCall is pushed through places.
315   if (UseMLOC.IsCall)
316     return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(),
317                                     AA);
318   return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
319                                   AA);
320 }
321 
322 // Return true when MD may alias MU, return false otherwise.
323 bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
324                                         AliasAnalysis &AA) {
325   return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber;
326 }
327 
328 namespace {
329 
330 struct UpwardsMemoryQuery {
331   // True if our original query started off as a call
332   bool IsCall = false;
333   // The pointer location we started the query with. This will be empty if
334   // IsCall is true.
335   MemoryLocation StartingLoc;
336   // This is the instruction we were querying about.
337   const Instruction *Inst = nullptr;
338   // The MemoryAccess we actually got called with, used to test local domination
339   const MemoryAccess *OriginalAccess = nullptr;
340   Optional<AliasResult> AR = MayAlias;
341   bool SkipSelfAccess = false;
342 
343   UpwardsMemoryQuery() = default;
344 
345   UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
346       : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {
347     if (!IsCall)
348       StartingLoc = MemoryLocation::get(Inst);
349   }
350 };
351 
352 } // end anonymous namespace
353 
354 static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
355                            BatchAAResults &AA) {
356   Instruction *Inst = MD->getMemoryInst();
357   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
358     switch (II->getIntrinsicID()) {
359     case Intrinsic::lifetime_end:
360       return AA.alias(MemoryLocation(II->getArgOperand(1)), Loc) == MustAlias;
361     default:
362       return false;
363     }
364   }
365   return false;
366 }
367 
368 template <typename AliasAnalysisType>
369 static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA,
370                                                    const Instruction *I) {
371   // If the memory can't be changed, then loads of the memory can't be
372   // clobbered.
373   return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
374                               AA.pointsToConstantMemory(MemoryLocation(
375                                   cast<LoadInst>(I)->getPointerOperand())));
376 }
377 
378 /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
379 /// inbetween `Start` and `ClobberAt` can clobbers `Start`.
380 ///
381 /// This is meant to be as simple and self-contained as possible. Because it
382 /// uses no cache, etc., it can be relatively expensive.
383 ///
384 /// \param Start     The MemoryAccess that we want to walk from.
385 /// \param ClobberAt A clobber for Start.
386 /// \param StartLoc  The MemoryLocation for Start.
387 /// \param MSSA      The MemorySSA instance that Start and ClobberAt belong to.
388 /// \param Query     The UpwardsMemoryQuery we used for our search.
389 /// \param AA        The AliasAnalysis we used for our search.
390 /// \param AllowImpreciseClobber Always false, unless we do relaxed verify.
391 
392 template <typename AliasAnalysisType>
393 LLVM_ATTRIBUTE_UNUSED static void
394 checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt,
395                    const MemoryLocation &StartLoc, const MemorySSA &MSSA,
396                    const UpwardsMemoryQuery &Query, AliasAnalysisType &AA,
397                    bool AllowImpreciseClobber = false) {
398   assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
399 
400   if (MSSA.isLiveOnEntryDef(Start)) {
401     assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
402            "liveOnEntry must clobber itself");
403     return;
404   }
405 
406   bool FoundClobber = false;
407   DenseSet<ConstMemoryAccessPair> VisitedPhis;
408   SmallVector<ConstMemoryAccessPair, 8> Worklist;
409   Worklist.emplace_back(Start, StartLoc);
410   // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
411   // is found, complain.
412   while (!Worklist.empty()) {
413     auto MAP = Worklist.pop_back_val();
414     // All we care about is that nothing from Start to ClobberAt clobbers Start.
415     // We learn nothing from revisiting nodes.
416     if (!VisitedPhis.insert(MAP).second)
417       continue;
418 
419     for (const auto *MA : def_chain(MAP.first)) {
420       if (MA == ClobberAt) {
421         if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
422           // instructionClobbersQuery isn't essentially free, so don't use `|=`,
423           // since it won't let us short-circuit.
424           //
425           // Also, note that this can't be hoisted out of the `Worklist` loop,
426           // since MD may only act as a clobber for 1 of N MemoryLocations.
427           FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD);
428           if (!FoundClobber) {
429             ClobberAlias CA =
430                 instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
431             if (CA.IsClobber) {
432               FoundClobber = true;
433               // Not used: CA.AR;
434             }
435           }
436         }
437         break;
438       }
439 
440       // We should never hit liveOnEntry, unless it's the clobber.
441       assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
442 
443       if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
444         // If Start is a Def, skip self.
445         if (MD == Start)
446           continue;
447 
448         assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)
449                     .IsClobber &&
450                "Found clobber before reaching ClobberAt!");
451         continue;
452       }
453 
454       if (const auto *MU = dyn_cast<MemoryUse>(MA)) {
455         (void)MU;
456         assert (MU == Start &&
457                 "Can only find use in def chain if Start is a use");
458         continue;
459       }
460 
461       assert(isa<MemoryPhi>(MA));
462       Worklist.append(
463           upward_defs_begin({const_cast<MemoryAccess *>(MA), MAP.second}),
464           upward_defs_end());
465     }
466   }
467 
468   // If the verify is done following an optimization, it's possible that
469   // ClobberAt was a conservative clobbering, that we can now infer is not a
470   // true clobbering access. Don't fail the verify if that's the case.
471   // We do have accesses that claim they're optimized, but could be optimized
472   // further. Updating all these can be expensive, so allow it for now (FIXME).
473   if (AllowImpreciseClobber)
474     return;
475 
476   // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
477   // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
478   assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
479          "ClobberAt never acted as a clobber");
480 }
481 
482 namespace {
483 
484 /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
485 /// in one class.
486 template <class AliasAnalysisType> class ClobberWalker {
487   /// Save a few bytes by using unsigned instead of size_t.
488   using ListIndex = unsigned;
489 
490   /// Represents a span of contiguous MemoryDefs, potentially ending in a
491   /// MemoryPhi.
492   struct DefPath {
493     MemoryLocation Loc;
494     // Note that, because we always walk in reverse, Last will always dominate
495     // First. Also note that First and Last are inclusive.
496     MemoryAccess *First;
497     MemoryAccess *Last;
498     Optional<ListIndex> Previous;
499 
500     DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
501             Optional<ListIndex> Previous)
502         : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
503 
504     DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
505             Optional<ListIndex> Previous)
506         : DefPath(Loc, Init, Init, Previous) {}
507   };
508 
509   const MemorySSA &MSSA;
510   AliasAnalysisType &AA;
511   DominatorTree &DT;
512   UpwardsMemoryQuery *Query;
513   unsigned *UpwardWalkLimit;
514 
515   // Phi optimization bookkeeping
516   SmallVector<DefPath, 32> Paths;
517   DenseSet<ConstMemoryAccessPair> VisitedPhis;
518 
519   /// Find the nearest def or phi that `From` can legally be optimized to.
520   const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
521     assert(From->getNumOperands() && "Phi with no operands?");
522 
523     BasicBlock *BB = From->getBlock();
524     MemoryAccess *Result = MSSA.getLiveOnEntryDef();
525     DomTreeNode *Node = DT.getNode(BB);
526     while ((Node = Node->getIDom())) {
527       auto *Defs = MSSA.getBlockDefs(Node->getBlock());
528       if (Defs)
529         return &*Defs->rbegin();
530     }
531     return Result;
532   }
533 
534   /// Result of calling walkToPhiOrClobber.
535   struct UpwardsWalkResult {
536     /// The "Result" of the walk. Either a clobber, the last thing we walked, or
537     /// both. Include alias info when clobber found.
538     MemoryAccess *Result;
539     bool IsKnownClobber;
540     Optional<AliasResult> AR;
541   };
542 
543   /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
544   /// This will update Desc.Last as it walks. It will (optionally) also stop at
545   /// StopAt.
546   ///
547   /// This does not test for whether StopAt is a clobber
548   UpwardsWalkResult
549   walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr,
550                      const MemoryAccess *SkipStopAt = nullptr) const {
551     assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
552     assert(UpwardWalkLimit && "Need a valid walk limit");
553     bool LimitAlreadyReached = false;
554     // (*UpwardWalkLimit) may be 0 here, due to the loop in tryOptimizePhi. Set
555     // it to 1. This will not do any alias() calls. It either returns in the
556     // first iteration in the loop below, or is set back to 0 if all def chains
557     // are free of MemoryDefs.
558     if (!*UpwardWalkLimit) {
559       *UpwardWalkLimit = 1;
560       LimitAlreadyReached = true;
561     }
562 
563     for (MemoryAccess *Current : def_chain(Desc.Last)) {
564       Desc.Last = Current;
565       if (Current == StopAt || Current == SkipStopAt)
566         return {Current, false, MayAlias};
567 
568       if (auto *MD = dyn_cast<MemoryDef>(Current)) {
569         if (MSSA.isLiveOnEntryDef(MD))
570           return {MD, true, MustAlias};
571 
572         if (!--*UpwardWalkLimit)
573           return {Current, true, MayAlias};
574 
575         ClobberAlias CA =
576             instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA);
577         if (CA.IsClobber)
578           return {MD, true, CA.AR};
579       }
580     }
581 
582     if (LimitAlreadyReached)
583       *UpwardWalkLimit = 0;
584 
585     assert(isa<MemoryPhi>(Desc.Last) &&
586            "Ended at a non-clobber that's not a phi?");
587     return {Desc.Last, false, MayAlias};
588   }
589 
590   void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
591                    ListIndex PriorNode) {
592     auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}),
593                                  upward_defs_end());
594     for (const MemoryAccessPair &P : UpwardDefs) {
595       PausedSearches.push_back(Paths.size());
596       Paths.emplace_back(P.second, P.first, PriorNode);
597     }
598   }
599 
600   /// Represents a search that terminated after finding a clobber. This clobber
601   /// may or may not be present in the path of defs from LastNode..SearchStart,
602   /// since it may have been retrieved from cache.
603   struct TerminatedPath {
604     MemoryAccess *Clobber;
605     ListIndex LastNode;
606   };
607 
608   /// Get an access that keeps us from optimizing to the given phi.
609   ///
610   /// PausedSearches is an array of indices into the Paths array. Its incoming
611   /// value is the indices of searches that stopped at the last phi optimization
612   /// target. It's left in an unspecified state.
613   ///
614   /// If this returns None, NewPaused is a vector of searches that terminated
615   /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
616   Optional<TerminatedPath>
617   getBlockingAccess(const MemoryAccess *StopWhere,
618                     SmallVectorImpl<ListIndex> &PausedSearches,
619                     SmallVectorImpl<ListIndex> &NewPaused,
620                     SmallVectorImpl<TerminatedPath> &Terminated) {
621     assert(!PausedSearches.empty() && "No searches to continue?");
622 
623     // BFS vs DFS really doesn't make a difference here, so just do a DFS with
624     // PausedSearches as our stack.
625     while (!PausedSearches.empty()) {
626       ListIndex PathIndex = PausedSearches.pop_back_val();
627       DefPath &Node = Paths[PathIndex];
628 
629       // If we've already visited this path with this MemoryLocation, we don't
630       // need to do so again.
631       //
632       // NOTE: That we just drop these paths on the ground makes caching
633       // behavior sporadic. e.g. given a diamond:
634       //  A
635       // B C
636       //  D
637       //
638       // ...If we walk D, B, A, C, we'll only cache the result of phi
639       // optimization for A, B, and D; C will be skipped because it dies here.
640       // This arguably isn't the worst thing ever, since:
641       //   - We generally query things in a top-down order, so if we got below D
642       //     without needing cache entries for {C, MemLoc}, then chances are
643       //     that those cache entries would end up ultimately unused.
644       //   - We still cache things for A, so C only needs to walk up a bit.
645       // If this behavior becomes problematic, we can fix without a ton of extra
646       // work.
647       if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)
648         continue;
649 
650       const MemoryAccess *SkipStopWhere = nullptr;
651       if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) {
652         assert(isa<MemoryDef>(Query->OriginalAccess));
653         SkipStopWhere = Query->OriginalAccess;
654       }
655 
656       UpwardsWalkResult Res = walkToPhiOrClobber(Node,
657                                                  /*StopAt=*/StopWhere,
658                                                  /*SkipStopAt=*/SkipStopWhere);
659       if (Res.IsKnownClobber) {
660         assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
661 
662         // If this wasn't a cache hit, we hit a clobber when walking. That's a
663         // failure.
664         TerminatedPath Term{Res.Result, PathIndex};
665         if (!MSSA.dominates(Res.Result, StopWhere))
666           return Term;
667 
668         // Otherwise, it's a valid thing to potentially optimize to.
669         Terminated.push_back(Term);
670         continue;
671       }
672 
673       if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
674         // We've hit our target. Save this path off for if we want to continue
675         // walking. If we are in the mode of skipping the OriginalAccess, and
676         // we've reached back to the OriginalAccess, do not save path, we've
677         // just looped back to self.
678         if (Res.Result != SkipStopWhere)
679           NewPaused.push_back(PathIndex);
680         continue;
681       }
682 
683       assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
684       addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
685     }
686 
687     return None;
688   }
689 
690   template <typename T, typename Walker>
691   struct generic_def_path_iterator
692       : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
693                                     std::forward_iterator_tag, T *> {
694     generic_def_path_iterator() {}
695     generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
696 
697     T &operator*() const { return curNode(); }
698 
699     generic_def_path_iterator &operator++() {
700       N = curNode().Previous;
701       return *this;
702     }
703 
704     bool operator==(const generic_def_path_iterator &O) const {
705       if (N.hasValue() != O.N.hasValue())
706         return false;
707       return !N.hasValue() || *N == *O.N;
708     }
709 
710   private:
711     T &curNode() const { return W->Paths[*N]; }
712 
713     Walker *W = nullptr;
714     Optional<ListIndex> N = None;
715   };
716 
717   using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
718   using const_def_path_iterator =
719       generic_def_path_iterator<const DefPath, const ClobberWalker>;
720 
721   iterator_range<def_path_iterator> def_path(ListIndex From) {
722     return make_range(def_path_iterator(this, From), def_path_iterator());
723   }
724 
725   iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
726     return make_range(const_def_path_iterator(this, From),
727                       const_def_path_iterator());
728   }
729 
730   struct OptznResult {
731     /// The path that contains our result.
732     TerminatedPath PrimaryClobber;
733     /// The paths that we can legally cache back from, but that aren't
734     /// necessarily the result of the Phi optimization.
735     SmallVector<TerminatedPath, 4> OtherClobbers;
736   };
737 
738   ListIndex defPathIndex(const DefPath &N) const {
739     // The assert looks nicer if we don't need to do &N
740     const DefPath *NP = &N;
741     assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
742            "Out of bounds DefPath!");
743     return NP - &Paths.front();
744   }
745 
746   /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
747   /// that act as legal clobbers. Note that this won't return *all* clobbers.
748   ///
749   /// Phi optimization algorithm tl;dr:
750   ///   - Find the earliest def/phi, A, we can optimize to
751   ///   - Find if all paths from the starting memory access ultimately reach A
752   ///     - If not, optimization isn't possible.
753   ///     - Otherwise, walk from A to another clobber or phi, A'.
754   ///       - If A' is a def, we're done.
755   ///       - If A' is a phi, try to optimize it.
756   ///
757   /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
758   /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
759   OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
760                              const MemoryLocation &Loc) {
761     assert(Paths.empty() && VisitedPhis.empty() &&
762            "Reset the optimization state.");
763 
764     Paths.emplace_back(Loc, Start, Phi, None);
765     // Stores how many "valid" optimization nodes we had prior to calling
766     // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
767     auto PriorPathsSize = Paths.size();
768 
769     SmallVector<ListIndex, 16> PausedSearches;
770     SmallVector<ListIndex, 8> NewPaused;
771     SmallVector<TerminatedPath, 4> TerminatedPaths;
772 
773     addSearches(Phi, PausedSearches, 0);
774 
775     // Moves the TerminatedPath with the "most dominated" Clobber to the end of
776     // Paths.
777     auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
778       assert(!Paths.empty() && "Need a path to move");
779       auto Dom = Paths.begin();
780       for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
781         if (!MSSA.dominates(I->Clobber, Dom->Clobber))
782           Dom = I;
783       auto Last = Paths.end() - 1;
784       if (Last != Dom)
785         std::iter_swap(Last, Dom);
786     };
787 
788     MemoryPhi *Current = Phi;
789     while (true) {
790       assert(!MSSA.isLiveOnEntryDef(Current) &&
791              "liveOnEntry wasn't treated as a clobber?");
792 
793       const auto *Target = getWalkTarget(Current);
794       // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
795       // optimization for the prior phi.
796       assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
797         return MSSA.dominates(P.Clobber, Target);
798       }));
799 
800       // FIXME: This is broken, because the Blocker may be reported to be
801       // liveOnEntry, and we'll happily wait for that to disappear (read: never)
802       // For the moment, this is fine, since we do nothing with blocker info.
803       if (Optional<TerminatedPath> Blocker = getBlockingAccess(
804               Target, PausedSearches, NewPaused, TerminatedPaths)) {
805 
806         // Find the node we started at. We can't search based on N->Last, since
807         // we may have gone around a loop with a different MemoryLocation.
808         auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
809           return defPathIndex(N) < PriorPathsSize;
810         });
811         assert(Iter != def_path_iterator());
812 
813         DefPath &CurNode = *Iter;
814         assert(CurNode.Last == Current);
815 
816         // Two things:
817         // A. We can't reliably cache all of NewPaused back. Consider a case
818         //    where we have two paths in NewPaused; one of which can't optimize
819         //    above this phi, whereas the other can. If we cache the second path
820         //    back, we'll end up with suboptimal cache entries. We can handle
821         //    cases like this a bit better when we either try to find all
822         //    clobbers that block phi optimization, or when our cache starts
823         //    supporting unfinished searches.
824         // B. We can't reliably cache TerminatedPaths back here without doing
825         //    extra checks; consider a case like:
826         //       T
827         //      / \
828         //     D   C
829         //      \ /
830         //       S
831         //    Where T is our target, C is a node with a clobber on it, D is a
832         //    diamond (with a clobber *only* on the left or right node, N), and
833         //    S is our start. Say we walk to D, through the node opposite N
834         //    (read: ignoring the clobber), and see a cache entry in the top
835         //    node of D. That cache entry gets put into TerminatedPaths. We then
836         //    walk up to C (N is later in our worklist), find the clobber, and
837         //    quit. If we append TerminatedPaths to OtherClobbers, we'll cache
838         //    the bottom part of D to the cached clobber, ignoring the clobber
839         //    in N. Again, this problem goes away if we start tracking all
840         //    blockers for a given phi optimization.
841         TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
842         return {Result, {}};
843       }
844 
845       // If there's nothing left to search, then all paths led to valid clobbers
846       // that we got from our cache; pick the nearest to the start, and allow
847       // the rest to be cached back.
848       if (NewPaused.empty()) {
849         MoveDominatedPathToEnd(TerminatedPaths);
850         TerminatedPath Result = TerminatedPaths.pop_back_val();
851         return {Result, std::move(TerminatedPaths)};
852       }
853 
854       MemoryAccess *DefChainEnd = nullptr;
855       SmallVector<TerminatedPath, 4> Clobbers;
856       for (ListIndex Paused : NewPaused) {
857         UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
858         if (WR.IsKnownClobber)
859           Clobbers.push_back({WR.Result, Paused});
860         else
861           // Micro-opt: If we hit the end of the chain, save it.
862           DefChainEnd = WR.Result;
863       }
864 
865       if (!TerminatedPaths.empty()) {
866         // If we couldn't find the dominating phi/liveOnEntry in the above loop,
867         // do it now.
868         if (!DefChainEnd)
869           for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
870             DefChainEnd = MA;
871 
872         // If any of the terminated paths don't dominate the phi we'll try to
873         // optimize, we need to figure out what they are and quit.
874         const BasicBlock *ChainBB = DefChainEnd->getBlock();
875         for (const TerminatedPath &TP : TerminatedPaths) {
876           // Because we know that DefChainEnd is as "high" as we can go, we
877           // don't need local dominance checks; BB dominance is sufficient.
878           if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
879             Clobbers.push_back(TP);
880         }
881       }
882 
883       // If we have clobbers in the def chain, find the one closest to Current
884       // and quit.
885       if (!Clobbers.empty()) {
886         MoveDominatedPathToEnd(Clobbers);
887         TerminatedPath Result = Clobbers.pop_back_val();
888         return {Result, std::move(Clobbers)};
889       }
890 
891       assert(all_of(NewPaused,
892                     [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
893 
894       // Because liveOnEntry is a clobber, this must be a phi.
895       auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
896 
897       PriorPathsSize = Paths.size();
898       PausedSearches.clear();
899       for (ListIndex I : NewPaused)
900         addSearches(DefChainPhi, PausedSearches, I);
901       NewPaused.clear();
902 
903       Current = DefChainPhi;
904     }
905   }
906 
907   void verifyOptResult(const OptznResult &R) const {
908     assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
909       return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
910     }));
911   }
912 
913   void resetPhiOptznState() {
914     Paths.clear();
915     VisitedPhis.clear();
916   }
917 
918 public:
919   ClobberWalker(const MemorySSA &MSSA, AliasAnalysisType &AA, DominatorTree &DT)
920       : MSSA(MSSA), AA(AA), DT(DT) {}
921 
922   AliasAnalysisType *getAA() { return &AA; }
923   /// Finds the nearest clobber for the given query, optimizing phis if
924   /// possible.
925   MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q,
926                             unsigned &UpWalkLimit) {
927     Query = &Q;
928     UpwardWalkLimit = &UpWalkLimit;
929     // Starting limit must be > 0.
930     if (!UpWalkLimit)
931       UpWalkLimit++;
932 
933     MemoryAccess *Current = Start;
934     // This walker pretends uses don't exist. If we're handed one, silently grab
935     // its def. (This has the nice side-effect of ensuring we never cache uses)
936     if (auto *MU = dyn_cast<MemoryUse>(Start))
937       Current = MU->getDefiningAccess();
938 
939     DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
940     // Fast path for the overly-common case (no crazy phi optimization
941     // necessary)
942     UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
943     MemoryAccess *Result;
944     if (WalkResult.IsKnownClobber) {
945       Result = WalkResult.Result;
946       Q.AR = WalkResult.AR;
947     } else {
948       OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
949                                           Current, Q.StartingLoc);
950       verifyOptResult(OptRes);
951       resetPhiOptznState();
952       Result = OptRes.PrimaryClobber.Clobber;
953     }
954 
955 #ifdef EXPENSIVE_CHECKS
956     if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0)
957       checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
958 #endif
959     return Result;
960   }
961 };
962 
963 struct RenamePassData {
964   DomTreeNode *DTN;
965   DomTreeNode::const_iterator ChildIt;
966   MemoryAccess *IncomingVal;
967 
968   RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
969                  MemoryAccess *M)
970       : DTN(D), ChildIt(It), IncomingVal(M) {}
971 
972   void swap(RenamePassData &RHS) {
973     std::swap(DTN, RHS.DTN);
974     std::swap(ChildIt, RHS.ChildIt);
975     std::swap(IncomingVal, RHS.IncomingVal);
976   }
977 };
978 
979 } // end anonymous namespace
980 
981 namespace llvm {
982 
983 template <class AliasAnalysisType> class MemorySSA::ClobberWalkerBase {
984   ClobberWalker<AliasAnalysisType> Walker;
985   MemorySSA *MSSA;
986 
987 public:
988   ClobberWalkerBase(MemorySSA *M, AliasAnalysisType *A, DominatorTree *D)
989       : Walker(*M, *A, *D), MSSA(M) {}
990 
991   MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
992                                               const MemoryLocation &,
993                                               unsigned &);
994   // Third argument (bool), defines whether the clobber search should skip the
995   // original queried access. If true, there will be a follow-up query searching
996   // for a clobber access past "self". Note that the Optimized access is not
997   // updated if a new clobber is found by this SkipSelf search. If this
998   // additional query becomes heavily used we may decide to cache the result.
999   // Walker instantiations will decide how to set the SkipSelf bool.
1000   MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, unsigned &, bool);
1001 };
1002 
1003 /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
1004 /// longer does caching on its own, but the name has been retained for the
1005 /// moment.
1006 template <class AliasAnalysisType>
1007 class MemorySSA::CachingWalker final : public MemorySSAWalker {
1008   ClobberWalkerBase<AliasAnalysisType> *Walker;
1009 
1010 public:
1011   CachingWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
1012       : MemorySSAWalker(M), Walker(W) {}
1013   ~CachingWalker() override = default;
1014 
1015   using MemorySSAWalker::getClobberingMemoryAccess;
1016 
1017   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1018     return Walker->getClobberingMemoryAccessBase(MA, UWL, false);
1019   }
1020   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1021                                           const MemoryLocation &Loc,
1022                                           unsigned &UWL) {
1023     return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1024   }
1025 
1026   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
1027     unsigned UpwardWalkLimit = MaxCheckLimit;
1028     return getClobberingMemoryAccess(MA, UpwardWalkLimit);
1029   }
1030   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1031                                           const MemoryLocation &Loc) override {
1032     unsigned UpwardWalkLimit = MaxCheckLimit;
1033     return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
1034   }
1035 
1036   void invalidateInfo(MemoryAccess *MA) override {
1037     if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1038       MUD->resetOptimized();
1039   }
1040 };
1041 
1042 template <class AliasAnalysisType>
1043 class MemorySSA::SkipSelfWalker final : public MemorySSAWalker {
1044   ClobberWalkerBase<AliasAnalysisType> *Walker;
1045 
1046 public:
1047   SkipSelfWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
1048       : MemorySSAWalker(M), Walker(W) {}
1049   ~SkipSelfWalker() override = default;
1050 
1051   using MemorySSAWalker::getClobberingMemoryAccess;
1052 
1053   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1054     return Walker->getClobberingMemoryAccessBase(MA, UWL, true);
1055   }
1056   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1057                                           const MemoryLocation &Loc,
1058                                           unsigned &UWL) {
1059     return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1060   }
1061 
1062   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
1063     unsigned UpwardWalkLimit = MaxCheckLimit;
1064     return getClobberingMemoryAccess(MA, UpwardWalkLimit);
1065   }
1066   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1067                                           const MemoryLocation &Loc) override {
1068     unsigned UpwardWalkLimit = MaxCheckLimit;
1069     return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
1070   }
1071 
1072   void invalidateInfo(MemoryAccess *MA) override {
1073     if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1074       MUD->resetOptimized();
1075   }
1076 };
1077 
1078 } // end namespace llvm
1079 
1080 void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
1081                                     bool RenameAllUses) {
1082   // Pass through values to our successors
1083   for (const BasicBlock *S : successors(BB)) {
1084     auto It = PerBlockAccesses.find(S);
1085     // Rename the phi nodes in our successor block
1086     if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1087       continue;
1088     AccessList *Accesses = It->second.get();
1089     auto *Phi = cast<MemoryPhi>(&Accesses->front());
1090     if (RenameAllUses) {
1091       int PhiIndex = Phi->getBasicBlockIndex(BB);
1092       assert(PhiIndex != -1 && "Incomplete phi during partial rename");
1093       Phi->setIncomingValue(PhiIndex, IncomingVal);
1094     } else
1095       Phi->addIncoming(IncomingVal, BB);
1096   }
1097 }
1098 
1099 /// Rename a single basic block into MemorySSA form.
1100 /// Uses the standard SSA renaming algorithm.
1101 /// \returns The new incoming value.
1102 MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
1103                                      bool RenameAllUses) {
1104   auto It = PerBlockAccesses.find(BB);
1105   // Skip most processing if the list is empty.
1106   if (It != PerBlockAccesses.end()) {
1107     AccessList *Accesses = It->second.get();
1108     for (MemoryAccess &L : *Accesses) {
1109       if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
1110         if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
1111           MUD->setDefiningAccess(IncomingVal);
1112         if (isa<MemoryDef>(&L))
1113           IncomingVal = &L;
1114       } else {
1115         IncomingVal = &L;
1116       }
1117     }
1118   }
1119   return IncomingVal;
1120 }
1121 
1122 /// This is the standard SSA renaming algorithm.
1123 ///
1124 /// We walk the dominator tree in preorder, renaming accesses, and then filling
1125 /// in phi nodes in our successors.
1126 void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
1127                            SmallPtrSetImpl<BasicBlock *> &Visited,
1128                            bool SkipVisited, bool RenameAllUses) {
1129   assert(Root && "Trying to rename accesses in an unreachable block");
1130 
1131   SmallVector<RenamePassData, 32> WorkStack;
1132   // Skip everything if we already renamed this block and we are skipping.
1133   // Note: You can't sink this into the if, because we need it to occur
1134   // regardless of whether we skip blocks or not.
1135   bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
1136   if (SkipVisited && AlreadyVisited)
1137     return;
1138 
1139   IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
1140   renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
1141   WorkStack.push_back({Root, Root->begin(), IncomingVal});
1142 
1143   while (!WorkStack.empty()) {
1144     DomTreeNode *Node = WorkStack.back().DTN;
1145     DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
1146     IncomingVal = WorkStack.back().IncomingVal;
1147 
1148     if (ChildIt == Node->end()) {
1149       WorkStack.pop_back();
1150     } else {
1151       DomTreeNode *Child = *ChildIt;
1152       ++WorkStack.back().ChildIt;
1153       BasicBlock *BB = Child->getBlock();
1154       // Note: You can't sink this into the if, because we need it to occur
1155       // regardless of whether we skip blocks or not.
1156       AlreadyVisited = !Visited.insert(BB).second;
1157       if (SkipVisited && AlreadyVisited) {
1158         // We already visited this during our renaming, which can happen when
1159         // being asked to rename multiple blocks. Figure out the incoming val,
1160         // which is the last def.
1161         // Incoming value can only change if there is a block def, and in that
1162         // case, it's the last block def in the list.
1163         if (auto *BlockDefs = getWritableBlockDefs(BB))
1164           IncomingVal = &*BlockDefs->rbegin();
1165       } else
1166         IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1167       renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1168       WorkStack.push_back({Child, Child->begin(), IncomingVal});
1169     }
1170   }
1171 }
1172 
1173 /// This handles unreachable block accesses by deleting phi nodes in
1174 /// unreachable blocks, and marking all other unreachable MemoryAccess's as
1175 /// being uses of the live on entry definition.
1176 void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
1177   assert(!DT->isReachableFromEntry(BB) &&
1178          "Reachable block found while handling unreachable blocks");
1179 
1180   // Make sure phi nodes in our reachable successors end up with a
1181   // LiveOnEntryDef for our incoming edge, even though our block is forward
1182   // unreachable.  We could just disconnect these blocks from the CFG fully,
1183   // but we do not right now.
1184   for (const BasicBlock *S : successors(BB)) {
1185     if (!DT->isReachableFromEntry(S))
1186       continue;
1187     auto It = PerBlockAccesses.find(S);
1188     // Rename the phi nodes in our successor block
1189     if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1190       continue;
1191     AccessList *Accesses = It->second.get();
1192     auto *Phi = cast<MemoryPhi>(&Accesses->front());
1193     Phi->addIncoming(LiveOnEntryDef.get(), BB);
1194   }
1195 
1196   auto It = PerBlockAccesses.find(BB);
1197   if (It == PerBlockAccesses.end())
1198     return;
1199 
1200   auto &Accesses = It->second;
1201   for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1202     auto Next = std::next(AI);
1203     // If we have a phi, just remove it. We are going to replace all
1204     // users with live on entry.
1205     if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1206       UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1207     else
1208       Accesses->erase(AI);
1209     AI = Next;
1210   }
1211 }
1212 
1213 MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
1214     : AA(nullptr), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1215       SkipWalker(nullptr), NextID(0) {
1216   // Build MemorySSA using a batch alias analysis. This reuses the internal
1217   // state that AA collects during an alias()/getModRefInfo() call. This is
1218   // safe because there are no CFG changes while building MemorySSA and can
1219   // significantly reduce the time spent by the compiler in AA, because we will
1220   // make queries about all the instructions in the Function.
1221   BatchAAResults BatchAA(*AA);
1222   buildMemorySSA(BatchAA);
1223   // Intentionally leave AA to nullptr while building so we don't accidently
1224   // use non-batch AliasAnalysis.
1225   this->AA = AA;
1226   // Also create the walker here.
1227   getWalker();
1228 }
1229 
1230 MemorySSA::~MemorySSA() {
1231   // Drop all our references
1232   for (const auto &Pair : PerBlockAccesses)
1233     for (MemoryAccess &MA : *Pair.second)
1234       MA.dropAllReferences();
1235 }
1236 
1237 MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
1238   auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
1239 
1240   if (Res.second)
1241     Res.first->second = llvm::make_unique<AccessList>();
1242   return Res.first->second.get();
1243 }
1244 
1245 MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1246   auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
1247 
1248   if (Res.second)
1249     Res.first->second = llvm::make_unique<DefsList>();
1250   return Res.first->second.get();
1251 }
1252 
1253 namespace llvm {
1254 
1255 /// This class is a batch walker of all MemoryUse's in the program, and points
1256 /// their defining access at the thing that actually clobbers them.  Because it
1257 /// is a batch walker that touches everything, it does not operate like the
1258 /// other walkers.  This walker is basically performing a top-down SSA renaming
1259 /// pass, where the version stack is used as the cache.  This enables it to be
1260 /// significantly more time and memory efficient than using the regular walker,
1261 /// which is walking bottom-up.
1262 class MemorySSA::OptimizeUses {
1263 public:
1264   OptimizeUses(MemorySSA *MSSA, CachingWalker<BatchAAResults> *Walker,
1265                BatchAAResults *BAA, DominatorTree *DT)
1266       : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}
1267 
1268   void optimizeUses();
1269 
1270 private:
1271   /// This represents where a given memorylocation is in the stack.
1272   struct MemlocStackInfo {
1273     // This essentially is keeping track of versions of the stack. Whenever
1274     // the stack changes due to pushes or pops, these versions increase.
1275     unsigned long StackEpoch;
1276     unsigned long PopEpoch;
1277     // This is the lower bound of places on the stack to check. It is equal to
1278     // the place the last stack walk ended.
1279     // Note: Correctness depends on this being initialized to 0, which densemap
1280     // does
1281     unsigned long LowerBound;
1282     const BasicBlock *LowerBoundBlock;
1283     // This is where the last walk for this memory location ended.
1284     unsigned long LastKill;
1285     bool LastKillValid;
1286     Optional<AliasResult> AR;
1287   };
1288 
1289   void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1290                            SmallVectorImpl<MemoryAccess *> &,
1291                            DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
1292 
1293   MemorySSA *MSSA;
1294   CachingWalker<BatchAAResults> *Walker;
1295   BatchAAResults *AA;
1296   DominatorTree *DT;
1297 };
1298 
1299 } // end namespace llvm
1300 
1301 /// Optimize the uses in a given block This is basically the SSA renaming
1302 /// algorithm, with one caveat: We are able to use a single stack for all
1303 /// MemoryUses.  This is because the set of *possible* reaching MemoryDefs is
1304 /// the same for every MemoryUse.  The *actual* clobbering MemoryDef is just
1305 /// going to be some position in that stack of possible ones.
1306 ///
1307 /// We track the stack positions that each MemoryLocation needs
1308 /// to check, and last ended at.  This is because we only want to check the
1309 /// things that changed since last time.  The same MemoryLocation should
1310 /// get clobbered by the same store (getModRefInfo does not use invariantness or
1311 /// things like this, and if they start, we can modify MemoryLocOrCall to
1312 /// include relevant data)
1313 void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1314     const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1315     SmallVectorImpl<MemoryAccess *> &VersionStack,
1316     DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) {
1317 
1318   /// If no accesses, nothing to do.
1319   MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1320   if (Accesses == nullptr)
1321     return;
1322 
1323   // Pop everything that doesn't dominate the current block off the stack,
1324   // increment the PopEpoch to account for this.
1325   while (true) {
1326     assert(
1327         !VersionStack.empty() &&
1328         "Version stack should have liveOnEntry sentinel dominating everything");
1329     BasicBlock *BackBlock = VersionStack.back()->getBlock();
1330     if (DT->dominates(BackBlock, BB))
1331       break;
1332     while (VersionStack.back()->getBlock() == BackBlock)
1333       VersionStack.pop_back();
1334     ++PopEpoch;
1335   }
1336 
1337   for (MemoryAccess &MA : *Accesses) {
1338     auto *MU = dyn_cast<MemoryUse>(&MA);
1339     if (!MU) {
1340       VersionStack.push_back(&MA);
1341       ++StackEpoch;
1342       continue;
1343     }
1344 
1345     if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
1346       MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None);
1347       continue;
1348     }
1349 
1350     MemoryLocOrCall UseMLOC(MU);
1351     auto &LocInfo = LocStackInfo[UseMLOC];
1352     // If the pop epoch changed, it means we've removed stuff from top of
1353     // stack due to changing blocks. We may have to reset the lower bound or
1354     // last kill info.
1355     if (LocInfo.PopEpoch != PopEpoch) {
1356       LocInfo.PopEpoch = PopEpoch;
1357       LocInfo.StackEpoch = StackEpoch;
1358       // If the lower bound was in something that no longer dominates us, we
1359       // have to reset it.
1360       // We can't simply track stack size, because the stack may have had
1361       // pushes/pops in the meantime.
1362       // XXX: This is non-optimal, but only is slower cases with heavily
1363       // branching dominator trees.  To get the optimal number of queries would
1364       // be to make lowerbound and lastkill a per-loc stack, and pop it until
1365       // the top of that stack dominates us.  This does not seem worth it ATM.
1366       // A much cheaper optimization would be to always explore the deepest
1367       // branch of the dominator tree first. This will guarantee this resets on
1368       // the smallest set of blocks.
1369       if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1370           !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1371         // Reset the lower bound of things to check.
1372         // TODO: Some day we should be able to reset to last kill, rather than
1373         // 0.
1374         LocInfo.LowerBound = 0;
1375         LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1376         LocInfo.LastKillValid = false;
1377       }
1378     } else if (LocInfo.StackEpoch != StackEpoch) {
1379       // If all that has changed is the StackEpoch, we only have to check the
1380       // new things on the stack, because we've checked everything before.  In
1381       // this case, the lower bound of things to check remains the same.
1382       LocInfo.PopEpoch = PopEpoch;
1383       LocInfo.StackEpoch = StackEpoch;
1384     }
1385     if (!LocInfo.LastKillValid) {
1386       LocInfo.LastKill = VersionStack.size() - 1;
1387       LocInfo.LastKillValid = true;
1388       LocInfo.AR = MayAlias;
1389     }
1390 
1391     // At this point, we should have corrected last kill and LowerBound to be
1392     // in bounds.
1393     assert(LocInfo.LowerBound < VersionStack.size() &&
1394            "Lower bound out of range");
1395     assert(LocInfo.LastKill < VersionStack.size() &&
1396            "Last kill info out of range");
1397     // In any case, the new upper bound is the top of the stack.
1398     unsigned long UpperBound = VersionStack.size() - 1;
1399 
1400     if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
1401       LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
1402                         << *(MU->getMemoryInst()) << ")"
1403                         << " because there are "
1404                         << UpperBound - LocInfo.LowerBound
1405                         << " stores to disambiguate\n");
1406       // Because we did not walk, LastKill is no longer valid, as this may
1407       // have been a kill.
1408       LocInfo.LastKillValid = false;
1409       continue;
1410     }
1411     bool FoundClobberResult = false;
1412     unsigned UpwardWalkLimit = MaxCheckLimit;
1413     while (UpperBound > LocInfo.LowerBound) {
1414       if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1415         // For phis, use the walker, see where we ended up, go there
1416         MemoryAccess *Result =
1417             Walker->getClobberingMemoryAccess(MU, UpwardWalkLimit);
1418         // We are guaranteed to find it or something is wrong
1419         while (VersionStack[UpperBound] != Result) {
1420           assert(UpperBound != 0);
1421           --UpperBound;
1422         }
1423         FoundClobberResult = true;
1424         break;
1425       }
1426 
1427       MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1428       // If the lifetime of the pointer ends at this instruction, it's live on
1429       // entry.
1430       if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) {
1431         // Reset UpperBound to liveOnEntryDef's place in the stack
1432         UpperBound = 0;
1433         FoundClobberResult = true;
1434         LocInfo.AR = MustAlias;
1435         break;
1436       }
1437       ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA);
1438       if (CA.IsClobber) {
1439         FoundClobberResult = true;
1440         LocInfo.AR = CA.AR;
1441         break;
1442       }
1443       --UpperBound;
1444     }
1445 
1446     // Note: Phis always have AliasResult AR set to MayAlias ATM.
1447 
1448     // At the end of this loop, UpperBound is either a clobber, or lower bound
1449     // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1450     if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1451       // We were last killed now by where we got to
1452       if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound]))
1453         LocInfo.AR = None;
1454       MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR);
1455       LocInfo.LastKill = UpperBound;
1456     } else {
1457       // Otherwise, we checked all the new ones, and now we know we can get to
1458       // LastKill.
1459       MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR);
1460     }
1461     LocInfo.LowerBound = VersionStack.size() - 1;
1462     LocInfo.LowerBoundBlock = BB;
1463   }
1464 }
1465 
1466 /// Optimize uses to point to their actual clobbering definitions.
1467 void MemorySSA::OptimizeUses::optimizeUses() {
1468   SmallVector<MemoryAccess *, 16> VersionStack;
1469   DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo;
1470   VersionStack.push_back(MSSA->getLiveOnEntryDef());
1471 
1472   unsigned long StackEpoch = 1;
1473   unsigned long PopEpoch = 1;
1474   // We perform a non-recursive top-down dominator tree walk.
1475   for (const auto *DomNode : depth_first(DT->getRootNode()))
1476     optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1477                         LocStackInfo);
1478 }
1479 
1480 void MemorySSA::placePHINodes(
1481     const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) {
1482   // Determine where our MemoryPhi's should go
1483   ForwardIDFCalculator IDFs(*DT);
1484   IDFs.setDefiningBlocks(DefiningBlocks);
1485   SmallVector<BasicBlock *, 32> IDFBlocks;
1486   IDFs.calculate(IDFBlocks);
1487 
1488   // Now place MemoryPhi nodes.
1489   for (auto &BB : IDFBlocks)
1490     createMemoryPhi(BB);
1491 }
1492 
1493 void MemorySSA::buildMemorySSA(BatchAAResults &BAA) {
1494   // We create an access to represent "live on entry", for things like
1495   // arguments or users of globals, where the memory they use is defined before
1496   // the beginning of the function. We do not actually insert it into the IR.
1497   // We do not define a live on exit for the immediate uses, and thus our
1498   // semantics do *not* imply that something with no immediate uses can simply
1499   // be removed.
1500   BasicBlock &StartingPoint = F.getEntryBlock();
1501   LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr,
1502                                      &StartingPoint, NextID++));
1503 
1504   // We maintain lists of memory accesses per-block, trading memory for time. We
1505   // could just look up the memory access for every possible instruction in the
1506   // stream.
1507   SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1508   // Go through each block, figure out where defs occur, and chain together all
1509   // the accesses.
1510   for (BasicBlock &B : F) {
1511     bool InsertIntoDef = false;
1512     AccessList *Accesses = nullptr;
1513     DefsList *Defs = nullptr;
1514     for (Instruction &I : B) {
1515       MemoryUseOrDef *MUD = createNewAccess(&I, &BAA);
1516       if (!MUD)
1517         continue;
1518 
1519       if (!Accesses)
1520         Accesses = getOrCreateAccessList(&B);
1521       Accesses->push_back(MUD);
1522       if (isa<MemoryDef>(MUD)) {
1523         InsertIntoDef = true;
1524         if (!Defs)
1525           Defs = getOrCreateDefsList(&B);
1526         Defs->push_back(*MUD);
1527       }
1528     }
1529     if (InsertIntoDef)
1530       DefiningBlocks.insert(&B);
1531   }
1532   placePHINodes(DefiningBlocks);
1533 
1534   // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1535   // filled in with all blocks.
1536   SmallPtrSet<BasicBlock *, 16> Visited;
1537   renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1538 
1539   ClobberWalkerBase<BatchAAResults> WalkerBase(this, &BAA, DT);
1540   CachingWalker<BatchAAResults> WalkerLocal(this, &WalkerBase);
1541   OptimizeUses(this, &WalkerLocal, &BAA, DT).optimizeUses();
1542 
1543   // Mark the uses in unreachable blocks as live on entry, so that they go
1544   // somewhere.
1545   for (auto &BB : F)
1546     if (!Visited.count(&BB))
1547       markUnreachableAsLiveOnEntry(&BB);
1548 }
1549 
1550 MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1551 
1552 MemorySSA::CachingWalker<AliasAnalysis> *MemorySSA::getWalkerImpl() {
1553   if (Walker)
1554     return Walker.get();
1555 
1556   if (!WalkerBase)
1557     WalkerBase =
1558         llvm::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
1559 
1560   Walker =
1561       llvm::make_unique<CachingWalker<AliasAnalysis>>(this, WalkerBase.get());
1562   return Walker.get();
1563 }
1564 
1565 MemorySSAWalker *MemorySSA::getSkipSelfWalker() {
1566   if (SkipWalker)
1567     return SkipWalker.get();
1568 
1569   if (!WalkerBase)
1570     WalkerBase =
1571         llvm::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
1572 
1573   SkipWalker =
1574       llvm::make_unique<SkipSelfWalker<AliasAnalysis>>(this, WalkerBase.get());
1575   return SkipWalker.get();
1576  }
1577 
1578 
1579 // This is a helper function used by the creation routines. It places NewAccess
1580 // into the access and defs lists for a given basic block, at the given
1581 // insertion point.
1582 void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess,
1583                                         const BasicBlock *BB,
1584                                         InsertionPlace Point) {
1585   auto *Accesses = getOrCreateAccessList(BB);
1586   if (Point == Beginning) {
1587     // If it's a phi node, it goes first, otherwise, it goes after any phi
1588     // nodes.
1589     if (isa<MemoryPhi>(NewAccess)) {
1590       Accesses->push_front(NewAccess);
1591       auto *Defs = getOrCreateDefsList(BB);
1592       Defs->push_front(*NewAccess);
1593     } else {
1594       auto AI = find_if_not(
1595           *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1596       Accesses->insert(AI, NewAccess);
1597       if (!isa<MemoryUse>(NewAccess)) {
1598         auto *Defs = getOrCreateDefsList(BB);
1599         auto DI = find_if_not(
1600             *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1601         Defs->insert(DI, *NewAccess);
1602       }
1603     }
1604   } else {
1605     Accesses->push_back(NewAccess);
1606     if (!isa<MemoryUse>(NewAccess)) {
1607       auto *Defs = getOrCreateDefsList(BB);
1608       Defs->push_back(*NewAccess);
1609     }
1610   }
1611   BlockNumberingValid.erase(BB);
1612 }
1613 
1614 void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB,
1615                                       AccessList::iterator InsertPt) {
1616   auto *Accesses = getWritableBlockAccesses(BB);
1617   bool WasEnd = InsertPt == Accesses->end();
1618   Accesses->insert(AccessList::iterator(InsertPt), What);
1619   if (!isa<MemoryUse>(What)) {
1620     auto *Defs = getOrCreateDefsList(BB);
1621     // If we got asked to insert at the end, we have an easy job, just shove it
1622     // at the end. If we got asked to insert before an existing def, we also get
1623     // an iterator. If we got asked to insert before a use, we have to hunt for
1624     // the next def.
1625     if (WasEnd) {
1626       Defs->push_back(*What);
1627     } else if (isa<MemoryDef>(InsertPt)) {
1628       Defs->insert(InsertPt->getDefsIterator(), *What);
1629     } else {
1630       while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1631         ++InsertPt;
1632       // Either we found a def, or we are inserting at the end
1633       if (InsertPt == Accesses->end())
1634         Defs->push_back(*What);
1635       else
1636         Defs->insert(InsertPt->getDefsIterator(), *What);
1637     }
1638   }
1639   BlockNumberingValid.erase(BB);
1640 }
1641 
1642 void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) {
1643   // Keep it in the lookup tables, remove from the lists
1644   removeFromLists(What, false);
1645 
1646   // Note that moving should implicitly invalidate the optimized state of a
1647   // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a
1648   // MemoryDef.
1649   if (auto *MD = dyn_cast<MemoryDef>(What))
1650     MD->resetOptimized();
1651   What->setBlock(BB);
1652 }
1653 
1654 // Move What before Where in the IR.  The end result is that What will belong to
1655 // the right lists and have the right Block set, but will not otherwise be
1656 // correct. It will not have the right defining access, and if it is a def,
1657 // things below it will not properly be updated.
1658 void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1659                        AccessList::iterator Where) {
1660   prepareForMoveTo(What, BB);
1661   insertIntoListsBefore(What, BB, Where);
1662 }
1663 
1664 void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB,
1665                        InsertionPlace Point) {
1666   if (isa<MemoryPhi>(What)) {
1667     assert(Point == Beginning &&
1668            "Can only move a Phi at the beginning of the block");
1669     // Update lookup table entry
1670     ValueToMemoryAccess.erase(What->getBlock());
1671     bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;
1672     (void)Inserted;
1673     assert(Inserted && "Cannot move a Phi to a block that already has one");
1674   }
1675 
1676   prepareForMoveTo(What, BB);
1677   insertIntoListsForBlock(What, BB, Point);
1678 }
1679 
1680 MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1681   assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
1682   MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
1683   // Phi's always are placed at the front of the block.
1684   insertIntoListsForBlock(Phi, BB, Beginning);
1685   ValueToMemoryAccess[BB] = Phi;
1686   return Phi;
1687 }
1688 
1689 MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
1690                                                MemoryAccess *Definition,
1691                                                const MemoryUseOrDef *Template,
1692                                                bool CreationMustSucceed) {
1693   assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
1694   MemoryUseOrDef *NewAccess = createNewAccess(I, AA, Template);
1695   if (CreationMustSucceed)
1696     assert(NewAccess != nullptr && "Tried to create a memory access for a "
1697                                    "non-memory touching instruction");
1698   if (NewAccess)
1699     NewAccess->setDefiningAccess(Definition);
1700   return NewAccess;
1701 }
1702 
1703 // Return true if the instruction has ordering constraints.
1704 // Note specifically that this only considers stores and loads
1705 // because others are still considered ModRef by getModRefInfo.
1706 static inline bool isOrdered(const Instruction *I) {
1707   if (auto *SI = dyn_cast<StoreInst>(I)) {
1708     if (!SI->isUnordered())
1709       return true;
1710   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1711     if (!LI->isUnordered())
1712       return true;
1713   }
1714   return false;
1715 }
1716 
1717 /// Helper function to create new memory accesses
1718 template <typename AliasAnalysisType>
1719 MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
1720                                            AliasAnalysisType *AAP,
1721                                            const MemoryUseOrDef *Template) {
1722   // The assume intrinsic has a control dependency which we model by claiming
1723   // that it writes arbitrarily. Ignore that fake memory dependency here.
1724   // FIXME: Replace this special casing with a more accurate modelling of
1725   // assume's control dependency.
1726   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1727     if (II->getIntrinsicID() == Intrinsic::assume)
1728       return nullptr;
1729 
1730   bool Def, Use;
1731   if (Template) {
1732     Def = dyn_cast_or_null<MemoryDef>(Template) != nullptr;
1733     Use = dyn_cast_or_null<MemoryUse>(Template) != nullptr;
1734 #if !defined(NDEBUG)
1735     ModRefInfo ModRef = AAP->getModRefInfo(I, None);
1736     bool DefCheck, UseCheck;
1737     DefCheck = isModSet(ModRef) || isOrdered(I);
1738     UseCheck = isRefSet(ModRef);
1739     assert(Def == DefCheck && (Def || Use == UseCheck) && "Invalid template");
1740 #endif
1741   } else {
1742     // Find out what affect this instruction has on memory.
1743     ModRefInfo ModRef = AAP->getModRefInfo(I, None);
1744     // The isOrdered check is used to ensure that volatiles end up as defs
1745     // (atomics end up as ModRef right now anyway).  Until we separate the
1746     // ordering chain from the memory chain, this enables people to see at least
1747     // some relative ordering to volatiles.  Note that getClobberingMemoryAccess
1748     // will still give an answer that bypasses other volatile loads.  TODO:
1749     // Separate memory aliasing and ordering into two different chains so that
1750     // we can precisely represent both "what memory will this read/write/is
1751     // clobbered by" and "what instructions can I move this past".
1752     Def = isModSet(ModRef) || isOrdered(I);
1753     Use = isRefSet(ModRef);
1754   }
1755 
1756   // It's possible for an instruction to not modify memory at all. During
1757   // construction, we ignore them.
1758   if (!Def && !Use)
1759     return nullptr;
1760 
1761   MemoryUseOrDef *MUD;
1762   if (Def)
1763     MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
1764   else
1765     MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
1766   ValueToMemoryAccess[I] = MUD;
1767   return MUD;
1768 }
1769 
1770 /// Returns true if \p Replacer dominates \p Replacee .
1771 bool MemorySSA::dominatesUse(const MemoryAccess *Replacer,
1772                              const MemoryAccess *Replacee) const {
1773   if (isa<MemoryUseOrDef>(Replacee))
1774     return DT->dominates(Replacer->getBlock(), Replacee->getBlock());
1775   const auto *MP = cast<MemoryPhi>(Replacee);
1776   // For a phi node, the use occurs in the predecessor block of the phi node.
1777   // Since we may occur multiple times in the phi node, we have to check each
1778   // operand to ensure Replacer dominates each operand where Replacee occurs.
1779   for (const Use &Arg : MP->operands()) {
1780     if (Arg.get() != Replacee &&
1781         !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg)))
1782       return false;
1783   }
1784   return true;
1785 }
1786 
1787 /// Properly remove \p MA from all of MemorySSA's lookup tables.
1788 void MemorySSA::removeFromLookups(MemoryAccess *MA) {
1789   assert(MA->use_empty() &&
1790          "Trying to remove memory access that still has uses");
1791   BlockNumbering.erase(MA);
1792   if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1793     MUD->setDefiningAccess(nullptr);
1794   // Invalidate our walker's cache if necessary
1795   if (!isa<MemoryUse>(MA))
1796     getWalker()->invalidateInfo(MA);
1797 
1798   Value *MemoryInst;
1799   if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1800     MemoryInst = MUD->getMemoryInst();
1801   else
1802     MemoryInst = MA->getBlock();
1803 
1804   auto VMA = ValueToMemoryAccess.find(MemoryInst);
1805   if (VMA->second == MA)
1806     ValueToMemoryAccess.erase(VMA);
1807 }
1808 
1809 /// Properly remove \p MA from all of MemorySSA's lists.
1810 ///
1811 /// Because of the way the intrusive list and use lists work, it is important to
1812 /// do removal in the right order.
1813 /// ShouldDelete defaults to true, and will cause the memory access to also be
1814 /// deleted, not just removed.
1815 void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
1816   BasicBlock *BB = MA->getBlock();
1817   // The access list owns the reference, so we erase it from the non-owning list
1818   // first.
1819   if (!isa<MemoryUse>(MA)) {
1820     auto DefsIt = PerBlockDefs.find(BB);
1821     std::unique_ptr<DefsList> &Defs = DefsIt->second;
1822     Defs->remove(*MA);
1823     if (Defs->empty())
1824       PerBlockDefs.erase(DefsIt);
1825   }
1826 
1827   // The erase call here will delete it. If we don't want it deleted, we call
1828   // remove instead.
1829   auto AccessIt = PerBlockAccesses.find(BB);
1830   std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1831   if (ShouldDelete)
1832     Accesses->erase(MA);
1833   else
1834     Accesses->remove(MA);
1835 
1836   if (Accesses->empty()) {
1837     PerBlockAccesses.erase(AccessIt);
1838     BlockNumberingValid.erase(BB);
1839   }
1840 }
1841 
1842 void MemorySSA::print(raw_ostream &OS) const {
1843   MemorySSAAnnotatedWriter Writer(this);
1844   F.print(OS, &Writer);
1845 }
1846 
1847 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1848 LLVM_DUMP_METHOD void MemorySSA::dump() const { print(dbgs()); }
1849 #endif
1850 
1851 void MemorySSA::verifyMemorySSA() const {
1852   verifyDefUses(F);
1853   verifyDomination(F);
1854   verifyOrdering(F);
1855   verifyDominationNumbers(F);
1856   verifyPrevDefInPhis(F);
1857   // Previously, the verification used to also verify that the clobberingAccess
1858   // cached by MemorySSA is the same as the clobberingAccess found at a later
1859   // query to AA. This does not hold true in general due to the current fragility
1860   // of BasicAA which has arbitrary caps on the things it analyzes before giving
1861   // up. As a result, transformations that are correct, will lead to BasicAA
1862   // returning different Alias answers before and after that transformation.
1863   // Invalidating MemorySSA is not an option, as the results in BasicAA can be so
1864   // random, in the worst case we'd need to rebuild MemorySSA from scratch after
1865   // every transformation, which defeats the purpose of using it. For such an
1866   // example, see test4 added in D51960.
1867 }
1868 
1869 void MemorySSA::verifyPrevDefInPhis(Function &F) const {
1870 #ifndef NDEBUG
1871   for (const BasicBlock &BB : F) {
1872     if (MemoryPhi *Phi = getMemoryAccess(&BB)) {
1873       for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1874         auto *Pred = Phi->getIncomingBlock(I);
1875         auto *IncAcc = Phi->getIncomingValue(I);
1876         // If Pred has no unreachable predecessors, get last def looking at
1877         // IDoms. If, while walkings IDoms, any of these has an unreachable
1878         // predecessor, then the expected incoming def is LoE.
1879         if (auto *DTNode = DT->getNode(Pred)) {
1880           while (DTNode) {
1881             if (auto *DefList = getBlockDefs(DTNode->getBlock())) {
1882               auto *LastAcc = &*(--DefList->end());
1883               assert(LastAcc == IncAcc &&
1884                      "Incorrect incoming access into phi.");
1885               break;
1886             }
1887             DTNode = DTNode->getIDom();
1888           }
1889           assert((DTNode || IncAcc == getLiveOnEntryDef()) &&
1890                  "Expected LoE inc");
1891         } else if (auto *DefList = getBlockDefs(Pred)) {
1892           // If Pred has unreachable predecessors, but has at least a Def, the
1893           // incoming access can be the last Def in Pred, or it could have been
1894           // optimized to LoE.
1895           auto *LastAcc = &*(--DefList->end());
1896           assert((LastAcc == IncAcc || IncAcc == getLiveOnEntryDef()) &&
1897                  "Incorrect incoming access into phi.");
1898         } else {
1899           // If Pred has unreachable predecessors and no Defs, incoming access
1900           // should be LoE.
1901           assert(IncAcc == getLiveOnEntryDef() && "Expected LoE inc");
1902         }
1903       }
1904     }
1905   }
1906 #endif
1907 }
1908 
1909 /// Verify that all of the blocks we believe to have valid domination numbers
1910 /// actually have valid domination numbers.
1911 void MemorySSA::verifyDominationNumbers(const Function &F) const {
1912 #ifndef NDEBUG
1913   if (BlockNumberingValid.empty())
1914     return;
1915 
1916   SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid;
1917   for (const BasicBlock &BB : F) {
1918     if (!ValidBlocks.count(&BB))
1919       continue;
1920 
1921     ValidBlocks.erase(&BB);
1922 
1923     const AccessList *Accesses = getBlockAccesses(&BB);
1924     // It's correct to say an empty block has valid numbering.
1925     if (!Accesses)
1926       continue;
1927 
1928     // Block numbering starts at 1.
1929     unsigned long LastNumber = 0;
1930     for (const MemoryAccess &MA : *Accesses) {
1931       auto ThisNumberIter = BlockNumbering.find(&MA);
1932       assert(ThisNumberIter != BlockNumbering.end() &&
1933              "MemoryAccess has no domination number in a valid block!");
1934 
1935       unsigned long ThisNumber = ThisNumberIter->second;
1936       assert(ThisNumber > LastNumber &&
1937              "Domination numbers should be strictly increasing!");
1938       LastNumber = ThisNumber;
1939     }
1940   }
1941 
1942   assert(ValidBlocks.empty() &&
1943          "All valid BasicBlocks should exist in F -- dangling pointers?");
1944 #endif
1945 }
1946 
1947 /// Verify that the order and existence of MemoryAccesses matches the
1948 /// order and existence of memory affecting instructions.
1949 void MemorySSA::verifyOrdering(Function &F) const {
1950 #ifndef NDEBUG
1951   // Walk all the blocks, comparing what the lookups think and what the access
1952   // lists think, as well as the order in the blocks vs the order in the access
1953   // lists.
1954   SmallVector<MemoryAccess *, 32> ActualAccesses;
1955   SmallVector<MemoryAccess *, 32> ActualDefs;
1956   for (BasicBlock &B : F) {
1957     const AccessList *AL = getBlockAccesses(&B);
1958     const auto *DL = getBlockDefs(&B);
1959     MemoryAccess *Phi = getMemoryAccess(&B);
1960     if (Phi) {
1961       ActualAccesses.push_back(Phi);
1962       ActualDefs.push_back(Phi);
1963     }
1964 
1965     for (Instruction &I : B) {
1966       MemoryAccess *MA = getMemoryAccess(&I);
1967       assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
1968              "We have memory affecting instructions "
1969              "in this block but they are not in the "
1970              "access list or defs list");
1971       if (MA) {
1972         ActualAccesses.push_back(MA);
1973         if (isa<MemoryDef>(MA))
1974           ActualDefs.push_back(MA);
1975       }
1976     }
1977     // Either we hit the assert, really have no accesses, or we have both
1978     // accesses and an access list.
1979     // Same with defs.
1980     if (!AL && !DL)
1981       continue;
1982     assert(AL->size() == ActualAccesses.size() &&
1983            "We don't have the same number of accesses in the block as on the "
1984            "access list");
1985     assert((DL || ActualDefs.size() == 0) &&
1986            "Either we should have a defs list, or we should have no defs");
1987     assert((!DL || DL->size() == ActualDefs.size()) &&
1988            "We don't have the same number of defs in the block as on the "
1989            "def list");
1990     auto ALI = AL->begin();
1991     auto AAI = ActualAccesses.begin();
1992     while (ALI != AL->end() && AAI != ActualAccesses.end()) {
1993       assert(&*ALI == *AAI && "Not the same accesses in the same order");
1994       ++ALI;
1995       ++AAI;
1996     }
1997     ActualAccesses.clear();
1998     if (DL) {
1999       auto DLI = DL->begin();
2000       auto ADI = ActualDefs.begin();
2001       while (DLI != DL->end() && ADI != ActualDefs.end()) {
2002         assert(&*DLI == *ADI && "Not the same defs in the same order");
2003         ++DLI;
2004         ++ADI;
2005       }
2006     }
2007     ActualDefs.clear();
2008   }
2009 #endif
2010 }
2011 
2012 /// Verify the domination properties of MemorySSA by checking that each
2013 /// definition dominates all of its uses.
2014 void MemorySSA::verifyDomination(Function &F) const {
2015 #ifndef NDEBUG
2016   for (BasicBlock &B : F) {
2017     // Phi nodes are attached to basic blocks
2018     if (MemoryPhi *MP = getMemoryAccess(&B))
2019       for (const Use &U : MP->uses())
2020         assert(dominates(MP, U) && "Memory PHI does not dominate it's uses");
2021 
2022     for (Instruction &I : B) {
2023       MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I));
2024       if (!MD)
2025         continue;
2026 
2027       for (const Use &U : MD->uses())
2028         assert(dominates(MD, U) && "Memory Def does not dominate it's uses");
2029     }
2030   }
2031 #endif
2032 }
2033 
2034 /// Verify the def-use lists in MemorySSA, by verifying that \p Use
2035 /// appears in the use list of \p Def.
2036 void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
2037 #ifndef NDEBUG
2038   // The live on entry use may cause us to get a NULL def here
2039   if (!Def)
2040     assert(isLiveOnEntryDef(Use) &&
2041            "Null def but use not point to live on entry def");
2042   else
2043     assert(is_contained(Def->users(), Use) &&
2044            "Did not find use in def's use list");
2045 #endif
2046 }
2047 
2048 /// Verify the immediate use information, by walking all the memory
2049 /// accesses and verifying that, for each use, it appears in the
2050 /// appropriate def's use list
2051 void MemorySSA::verifyDefUses(Function &F) const {
2052 #ifndef NDEBUG
2053   for (BasicBlock &B : F) {
2054     // Phi nodes are attached to basic blocks
2055     if (MemoryPhi *Phi = getMemoryAccess(&B)) {
2056       assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(
2057                                           pred_begin(&B), pred_end(&B))) &&
2058              "Incomplete MemoryPhi Node");
2059       for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
2060         verifyUseInDefs(Phi->getIncomingValue(I), Phi);
2061         assert(find(predecessors(&B), Phi->getIncomingBlock(I)) !=
2062                    pred_end(&B) &&
2063                "Incoming phi block not a block predecessor");
2064       }
2065     }
2066 
2067     for (Instruction &I : B) {
2068       if (MemoryUseOrDef *MA = getMemoryAccess(&I)) {
2069         verifyUseInDefs(MA->getDefiningAccess(), MA);
2070       }
2071     }
2072   }
2073 #endif
2074 }
2075 
2076 /// Perform a local numbering on blocks so that instruction ordering can be
2077 /// determined in constant time.
2078 /// TODO: We currently just number in order.  If we numbered by N, we could
2079 /// allow at least N-1 sequences of insertBefore or insertAfter (and at least
2080 /// log2(N) sequences of mixed before and after) without needing to invalidate
2081 /// the numbering.
2082 void MemorySSA::renumberBlock(const BasicBlock *B) const {
2083   // The pre-increment ensures the numbers really start at 1.
2084   unsigned long CurrentNumber = 0;
2085   const AccessList *AL = getBlockAccesses(B);
2086   assert(AL != nullptr && "Asking to renumber an empty block");
2087   for (const auto &I : *AL)
2088     BlockNumbering[&I] = ++CurrentNumber;
2089   BlockNumberingValid.insert(B);
2090 }
2091 
2092 /// Determine, for two memory accesses in the same block,
2093 /// whether \p Dominator dominates \p Dominatee.
2094 /// \returns True if \p Dominator dominates \p Dominatee.
2095 bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
2096                                  const MemoryAccess *Dominatee) const {
2097   const BasicBlock *DominatorBlock = Dominator->getBlock();
2098 
2099   assert((DominatorBlock == Dominatee->getBlock()) &&
2100          "Asking for local domination when accesses are in different blocks!");
2101   // A node dominates itself.
2102   if (Dominatee == Dominator)
2103     return true;
2104 
2105   // When Dominatee is defined on function entry, it is not dominated by another
2106   // memory access.
2107   if (isLiveOnEntryDef(Dominatee))
2108     return false;
2109 
2110   // When Dominator is defined on function entry, it dominates the other memory
2111   // access.
2112   if (isLiveOnEntryDef(Dominator))
2113     return true;
2114 
2115   if (!BlockNumberingValid.count(DominatorBlock))
2116     renumberBlock(DominatorBlock);
2117 
2118   unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
2119   // All numbers start with 1
2120   assert(DominatorNum != 0 && "Block was not numbered properly");
2121   unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
2122   assert(DominateeNum != 0 && "Block was not numbered properly");
2123   return DominatorNum < DominateeNum;
2124 }
2125 
2126 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2127                           const MemoryAccess *Dominatee) const {
2128   if (Dominator == Dominatee)
2129     return true;
2130 
2131   if (isLiveOnEntryDef(Dominatee))
2132     return false;
2133 
2134   if (Dominator->getBlock() != Dominatee->getBlock())
2135     return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
2136   return locallyDominates(Dominator, Dominatee);
2137 }
2138 
2139 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2140                           const Use &Dominatee) const {
2141   if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
2142     BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
2143     // The def must dominate the incoming block of the phi.
2144     if (UseBB != Dominator->getBlock())
2145       return DT->dominates(Dominator->getBlock(), UseBB);
2146     // If the UseBB and the DefBB are the same, compare locally.
2147     return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
2148   }
2149   // If it's not a PHI node use, the normal dominates can already handle it.
2150   return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
2151 }
2152 
2153 const static char LiveOnEntryStr[] = "liveOnEntry";
2154 
2155 void MemoryAccess::print(raw_ostream &OS) const {
2156   switch (getValueID()) {
2157   case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
2158   case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
2159   case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
2160   }
2161   llvm_unreachable("invalid value id");
2162 }
2163 
2164 void MemoryDef::print(raw_ostream &OS) const {
2165   MemoryAccess *UO = getDefiningAccess();
2166 
2167   auto printID = [&OS](MemoryAccess *A) {
2168     if (A && A->getID())
2169       OS << A->getID();
2170     else
2171       OS << LiveOnEntryStr;
2172   };
2173 
2174   OS << getID() << " = MemoryDef(";
2175   printID(UO);
2176   OS << ")";
2177 
2178   if (isOptimized()) {
2179     OS << "->";
2180     printID(getOptimized());
2181 
2182     if (Optional<AliasResult> AR = getOptimizedAccessType())
2183       OS << " " << *AR;
2184   }
2185 }
2186 
2187 void MemoryPhi::print(raw_ostream &OS) const {
2188   bool First = true;
2189   OS << getID() << " = MemoryPhi(";
2190   for (const auto &Op : operands()) {
2191     BasicBlock *BB = getIncomingBlock(Op);
2192     MemoryAccess *MA = cast<MemoryAccess>(Op);
2193     if (!First)
2194       OS << ',';
2195     else
2196       First = false;
2197 
2198     OS << '{';
2199     if (BB->hasName())
2200       OS << BB->getName();
2201     else
2202       BB->printAsOperand(OS, false);
2203     OS << ',';
2204     if (unsigned ID = MA->getID())
2205       OS << ID;
2206     else
2207       OS << LiveOnEntryStr;
2208     OS << '}';
2209   }
2210   OS << ')';
2211 }
2212 
2213 void MemoryUse::print(raw_ostream &OS) const {
2214   MemoryAccess *UO = getDefiningAccess();
2215   OS << "MemoryUse(";
2216   if (UO && UO->getID())
2217     OS << UO->getID();
2218   else
2219     OS << LiveOnEntryStr;
2220   OS << ')';
2221 
2222   if (Optional<AliasResult> AR = getOptimizedAccessType())
2223     OS << " " << *AR;
2224 }
2225 
2226 void MemoryAccess::dump() const {
2227 // Cannot completely remove virtual function even in release mode.
2228 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2229   print(dbgs());
2230   dbgs() << "\n";
2231 #endif
2232 }
2233 
2234 char MemorySSAPrinterLegacyPass::ID = 0;
2235 
2236 MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) {
2237   initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry());
2238 }
2239 
2240 void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
2241   AU.setPreservesAll();
2242   AU.addRequired<MemorySSAWrapperPass>();
2243 }
2244 
2245 bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
2246   auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2247   MSSA.print(dbgs());
2248   if (VerifyMemorySSA)
2249     MSSA.verifyMemorySSA();
2250   return false;
2251 }
2252 
2253 AnalysisKey MemorySSAAnalysis::Key;
2254 
2255 MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F,
2256                                                  FunctionAnalysisManager &AM) {
2257   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
2258   auto &AA = AM.getResult<AAManager>(F);
2259   return MemorySSAAnalysis::Result(llvm::make_unique<MemorySSA>(F, &AA, &DT));
2260 }
2261 
2262 bool MemorySSAAnalysis::Result::invalidate(
2263     Function &F, const PreservedAnalyses &PA,
2264     FunctionAnalysisManager::Invalidator &Inv) {
2265   auto PAC = PA.getChecker<MemorySSAAnalysis>();
2266   return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
2267          Inv.invalidate<AAManager>(F, PA) ||
2268          Inv.invalidate<DominatorTreeAnalysis>(F, PA);
2269 }
2270 
2271 PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
2272                                             FunctionAnalysisManager &AM) {
2273   OS << "MemorySSA for function: " << F.getName() << "\n";
2274   AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS);
2275 
2276   return PreservedAnalyses::all();
2277 }
2278 
2279 PreservedAnalyses MemorySSAVerifierPass::run(Function &F,
2280                                              FunctionAnalysisManager &AM) {
2281   AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
2282 
2283   return PreservedAnalyses::all();
2284 }
2285 
2286 char MemorySSAWrapperPass::ID = 0;
2287 
2288 MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) {
2289   initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
2290 }
2291 
2292 void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
2293 
2294 void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2295   AU.setPreservesAll();
2296   AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2297   AU.addRequiredTransitive<AAResultsWrapperPass>();
2298 }
2299 
2300 bool MemorySSAWrapperPass::runOnFunction(Function &F) {
2301   auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2302   auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2303   MSSA.reset(new MemorySSA(F, &AA, &DT));
2304   return false;
2305 }
2306 
2307 void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); }
2308 
2309 void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
2310   MSSA->print(OS);
2311 }
2312 
2313 MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
2314 
2315 /// Walk the use-def chains starting at \p StartingAccess and find
2316 /// the MemoryAccess that actually clobbers Loc.
2317 ///
2318 /// \returns our clobbering memory access
2319 template <typename AliasAnalysisType>
2320 MemoryAccess *
2321 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
2322     MemoryAccess *StartingAccess, const MemoryLocation &Loc,
2323     unsigned &UpwardWalkLimit) {
2324   if (isa<MemoryPhi>(StartingAccess))
2325     return StartingAccess;
2326 
2327   auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess);
2328   if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
2329     return StartingUseOrDef;
2330 
2331   Instruction *I = StartingUseOrDef->getMemoryInst();
2332 
2333   // Conservatively, fences are always clobbers, so don't perform the walk if we
2334   // hit a fence.
2335   if (!isa<CallBase>(I) && I->isFenceLike())
2336     return StartingUseOrDef;
2337 
2338   UpwardsMemoryQuery Q;
2339   Q.OriginalAccess = StartingUseOrDef;
2340   Q.StartingLoc = Loc;
2341   Q.Inst = I;
2342   Q.IsCall = false;
2343 
2344   // Unlike the other function, do not walk to the def of a def, because we are
2345   // handed something we already believe is the clobbering access.
2346   // We never set SkipSelf to true in Q in this method.
2347   MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
2348                                      ? StartingUseOrDef->getDefiningAccess()
2349                                      : StartingUseOrDef;
2350 
2351   MemoryAccess *Clobber =
2352       Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit);
2353   LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2354   LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n");
2355   LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
2356   LLVM_DEBUG(dbgs() << *Clobber << "\n");
2357   return Clobber;
2358 }
2359 
2360 template <typename AliasAnalysisType>
2361 MemoryAccess *
2362 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
2363     MemoryAccess *MA, unsigned &UpwardWalkLimit, bool SkipSelf) {
2364   auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2365   // If this is a MemoryPhi, we can't do anything.
2366   if (!StartingAccess)
2367     return MA;
2368 
2369   bool IsOptimized = false;
2370 
2371   // If this is an already optimized use or def, return the optimized result.
2372   // Note: Currently, we store the optimized def result in a separate field,
2373   // since we can't use the defining access.
2374   if (StartingAccess->isOptimized()) {
2375     if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
2376       return StartingAccess->getOptimized();
2377     IsOptimized = true;
2378   }
2379 
2380   const Instruction *I = StartingAccess->getMemoryInst();
2381   // We can't sanely do anything with a fence, since they conservatively clobber
2382   // all memory, and have no locations to get pointers from to try to
2383   // disambiguate.
2384   if (!isa<CallBase>(I) && I->isFenceLike())
2385     return StartingAccess;
2386 
2387   UpwardsMemoryQuery Q(I, StartingAccess);
2388 
2389   if (isUseTriviallyOptimizableToLiveOnEntry(*Walker.getAA(), I)) {
2390     MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2391     StartingAccess->setOptimized(LiveOnEntry);
2392     StartingAccess->setOptimizedAccessType(None);
2393     return LiveOnEntry;
2394   }
2395 
2396   MemoryAccess *OptimizedAccess;
2397   if (!IsOptimized) {
2398     // Start with the thing we already think clobbers this location
2399     MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2400 
2401     // At this point, DefiningAccess may be the live on entry def.
2402     // If it is, we will not get a better result.
2403     if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
2404       StartingAccess->setOptimized(DefiningAccess);
2405       StartingAccess->setOptimizedAccessType(None);
2406       return DefiningAccess;
2407     }
2408 
2409     OptimizedAccess = Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit);
2410     StartingAccess->setOptimized(OptimizedAccess);
2411     if (MSSA->isLiveOnEntryDef(OptimizedAccess))
2412       StartingAccess->setOptimizedAccessType(None);
2413     else if (Q.AR == MustAlias)
2414       StartingAccess->setOptimizedAccessType(MustAlias);
2415   } else
2416     OptimizedAccess = StartingAccess->getOptimized();
2417 
2418   LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2419   LLVM_DEBUG(dbgs() << *StartingAccess << "\n");
2420   LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ");
2421   LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n");
2422 
2423   MemoryAccess *Result;
2424   if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
2425       isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) {
2426     assert(isa<MemoryDef>(Q.OriginalAccess));
2427     Q.SkipSelfAccess = true;
2428     Result = Walker.findClobber(OptimizedAccess, Q, UpwardWalkLimit);
2429   } else
2430     Result = OptimizedAccess;
2431 
2432   LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf);
2433   LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n");
2434 
2435   return Result;
2436 }
2437 
2438 MemoryAccess *
2439 DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2440   if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2441     return Use->getDefiningAccess();
2442   return MA;
2443 }
2444 
2445 MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
2446     MemoryAccess *StartingAccess, const MemoryLocation &) {
2447   if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2448     return Use->getDefiningAccess();
2449   return StartingAccess;
2450 }
2451 
2452 void MemoryPhi::deleteMe(DerivedUser *Self) {
2453   delete static_cast<MemoryPhi *>(Self);
2454 }
2455 
2456 void MemoryDef::deleteMe(DerivedUser *Self) {
2457   delete static_cast<MemoryDef *>(Self);
2458 }
2459 
2460 void MemoryUse::deleteMe(DerivedUser *Self) {
2461   delete static_cast<MemoryUse *>(Self);
2462 }
2463