1 //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the MemorySSA class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/MemorySSA.h"
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/DenseMapInfo.h"
16 #include "llvm/ADT/DenseSet.h"
17 #include "llvm/ADT/DepthFirstIterator.h"
18 #include "llvm/ADT/Hashing.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/iterator.h"
25 #include "llvm/ADT/iterator_range.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/IteratedDominanceFrontier.h"
28 #include "llvm/Analysis/MemoryLocation.h"
29 #include "llvm/Config/llvm-config.h"
30 #include "llvm/IR/AssemblyAnnotationWriter.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/Dominators.h"
33 #include "llvm/IR/Function.h"
34 #include "llvm/IR/Instruction.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/IR/Intrinsics.h"
38 #include "llvm/IR/LLVMContext.h"
39 #include "llvm/IR/PassManager.h"
40 #include "llvm/IR/Use.h"
41 #include "llvm/Pass.h"
42 #include "llvm/Support/AtomicOrdering.h"
43 #include "llvm/Support/Casting.h"
44 #include "llvm/Support/CommandLine.h"
45 #include "llvm/Support/Compiler.h"
46 #include "llvm/Support/Debug.h"
47 #include "llvm/Support/ErrorHandling.h"
48 #include "llvm/Support/FormattedStream.h"
49 #include "llvm/Support/raw_ostream.h"
50 #include <algorithm>
51 #include <cassert>
52 #include <iterator>
53 #include <memory>
54 #include <utility>
55 
56 using namespace llvm;
57 
58 #define DEBUG_TYPE "memoryssa"
59 
60 INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
61                       true)
62 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
63 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
64 INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
65                     true)
66 
67 INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",
68                       "Memory SSA Printer", false, false)
69 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
70 INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",
71                     "Memory SSA Printer", false, false)
72 
73 static cl::opt<unsigned> MaxCheckLimit(
74     "memssa-check-limit", cl::Hidden, cl::init(100),
75     cl::desc("The maximum number of stores/phis MemorySSA"
76              "will consider trying to walk past (default = 100)"));
77 
78 // Always verify MemorySSA if expensive checking is enabled.
79 #ifdef EXPENSIVE_CHECKS
80 bool llvm::VerifyMemorySSA = true;
81 #else
82 bool llvm::VerifyMemorySSA = false;
83 #endif
84 static cl::opt<bool, true>
85     VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA),
86                      cl::Hidden, cl::desc("Enable verification of MemorySSA."));
87 
88 namespace llvm {
89 
90 /// An assembly annotator class to print Memory SSA information in
91 /// comments.
92 class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
93   friend class MemorySSA;
94 
95   const MemorySSA *MSSA;
96 
97 public:
98   MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
99 
100   void emitBasicBlockStartAnnot(const BasicBlock *BB,
101                                 formatted_raw_ostream &OS) override {
102     if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
103       OS << "; " << *MA << "\n";
104   }
105 
106   void emitInstructionAnnot(const Instruction *I,
107                             formatted_raw_ostream &OS) override {
108     if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
109       OS << "; " << *MA << "\n";
110   }
111 };
112 
113 } // end namespace llvm
114 
115 namespace {
116 
117 /// Our current alias analysis API differentiates heavily between calls and
118 /// non-calls, and functions called on one usually assert on the other.
119 /// This class encapsulates the distinction to simplify other code that wants
120 /// "Memory affecting instructions and related data" to use as a key.
121 /// For example, this class is used as a densemap key in the use optimizer.
122 class MemoryLocOrCall {
123 public:
124   bool IsCall = false;
125 
126   MemoryLocOrCall(MemoryUseOrDef *MUD)
127       : MemoryLocOrCall(MUD->getMemoryInst()) {}
128   MemoryLocOrCall(const MemoryUseOrDef *MUD)
129       : MemoryLocOrCall(MUD->getMemoryInst()) {}
130 
131   MemoryLocOrCall(Instruction *Inst) {
132     if (auto *C = dyn_cast<CallBase>(Inst)) {
133       IsCall = true;
134       Call = C;
135     } else {
136       IsCall = false;
137       // There is no such thing as a memorylocation for a fence inst, and it is
138       // unique in that regard.
139       if (!isa<FenceInst>(Inst))
140         Loc = MemoryLocation::get(Inst);
141     }
142   }
143 
144   explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
145 
146   const CallBase *getCall() const {
147     assert(IsCall);
148     return Call;
149   }
150 
151   MemoryLocation getLoc() const {
152     assert(!IsCall);
153     return Loc;
154   }
155 
156   bool operator==(const MemoryLocOrCall &Other) const {
157     if (IsCall != Other.IsCall)
158       return false;
159 
160     if (!IsCall)
161       return Loc == Other.Loc;
162 
163     if (Call->getCalledValue() != Other.Call->getCalledValue())
164       return false;
165 
166     return Call->arg_size() == Other.Call->arg_size() &&
167            std::equal(Call->arg_begin(), Call->arg_end(),
168                       Other.Call->arg_begin());
169   }
170 
171 private:
172   union {
173     const CallBase *Call;
174     MemoryLocation Loc;
175   };
176 };
177 
178 } // end anonymous namespace
179 
180 namespace llvm {
181 
182 template <> struct DenseMapInfo<MemoryLocOrCall> {
183   static inline MemoryLocOrCall getEmptyKey() {
184     return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
185   }
186 
187   static inline MemoryLocOrCall getTombstoneKey() {
188     return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
189   }
190 
191   static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
192     if (!MLOC.IsCall)
193       return hash_combine(
194           MLOC.IsCall,
195           DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
196 
197     hash_code hash =
198         hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue(
199                                       MLOC.getCall()->getCalledValue()));
200 
201     for (const Value *Arg : MLOC.getCall()->args())
202       hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg));
203     return hash;
204   }
205 
206   static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
207     return LHS == RHS;
208   }
209 };
210 
211 } // end namespace llvm
212 
213 /// This does one-way checks to see if Use could theoretically be hoisted above
214 /// MayClobber. This will not check the other way around.
215 ///
216 /// This assumes that, for the purposes of MemorySSA, Use comes directly after
217 /// MayClobber, with no potentially clobbering operations in between them.
218 /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
219 static bool areLoadsReorderable(const LoadInst *Use,
220                                 const LoadInst *MayClobber) {
221   bool VolatileUse = Use->isVolatile();
222   bool VolatileClobber = MayClobber->isVolatile();
223   // Volatile operations may never be reordered with other volatile operations.
224   if (VolatileUse && VolatileClobber)
225     return false;
226   // Otherwise, volatile doesn't matter here. From the language reference:
227   // 'optimizers may change the order of volatile operations relative to
228   // non-volatile operations.'"
229 
230   // If a load is seq_cst, it cannot be moved above other loads. If its ordering
231   // is weaker, it can be moved above other loads. We just need to be sure that
232   // MayClobber isn't an acquire load, because loads can't be moved above
233   // acquire loads.
234   //
235   // Note that this explicitly *does* allow the free reordering of monotonic (or
236   // weaker) loads of the same address.
237   bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
238   bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
239                                                      AtomicOrdering::Acquire);
240   return !(SeqCstUse || MayClobberIsAcquire);
241 }
242 
243 namespace {
244 
245 struct ClobberAlias {
246   bool IsClobber;
247   Optional<AliasResult> AR;
248 };
249 
250 } // end anonymous namespace
251 
252 // Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being
253 // ignored if IsClobber = false.
254 template <typename AliasAnalysisType>
255 static ClobberAlias
256 instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc,
257                          const Instruction *UseInst, AliasAnalysisType &AA) {
258   Instruction *DefInst = MD->getMemoryInst();
259   assert(DefInst && "Defining instruction not actually an instruction");
260   const auto *UseCall = dyn_cast<CallBase>(UseInst);
261   Optional<AliasResult> AR;
262 
263   if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
264     // These intrinsics will show up as affecting memory, but they are just
265     // markers, mostly.
266     //
267     // FIXME: We probably don't actually want MemorySSA to model these at all
268     // (including creating MemoryAccesses for them): we just end up inventing
269     // clobbers where they don't really exist at all. Please see D43269 for
270     // context.
271     switch (II->getIntrinsicID()) {
272     case Intrinsic::lifetime_start:
273       if (UseCall)
274         return {false, NoAlias};
275       AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc);
276       return {AR != NoAlias, AR};
277     case Intrinsic::lifetime_end:
278     case Intrinsic::invariant_start:
279     case Intrinsic::invariant_end:
280     case Intrinsic::assume:
281       return {false, NoAlias};
282     default:
283       break;
284     }
285   }
286 
287   if (UseCall) {
288     ModRefInfo I = AA.getModRefInfo(DefInst, UseCall);
289     AR = isMustSet(I) ? MustAlias : MayAlias;
290     return {isModOrRefSet(I), AR};
291   }
292 
293   if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
294     if (auto *UseLoad = dyn_cast<LoadInst>(UseInst))
295       return {!areLoadsReorderable(UseLoad, DefLoad), MayAlias};
296 
297   ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
298   AR = isMustSet(I) ? MustAlias : MayAlias;
299   return {isModSet(I), AR};
300 }
301 
302 template <typename AliasAnalysisType>
303 static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
304                                              const MemoryUseOrDef *MU,
305                                              const MemoryLocOrCall &UseMLOC,
306                                              AliasAnalysisType &AA) {
307   // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
308   // to exist while MemoryLocOrCall is pushed through places.
309   if (UseMLOC.IsCall)
310     return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(),
311                                     AA);
312   return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
313                                   AA);
314 }
315 
316 // Return true when MD may alias MU, return false otherwise.
317 bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
318                                         AliasAnalysis &AA) {
319   return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber;
320 }
321 
322 namespace {
323 
324 struct UpwardsMemoryQuery {
325   // True if our original query started off as a call
326   bool IsCall = false;
327   // The pointer location we started the query with. This will be empty if
328   // IsCall is true.
329   MemoryLocation StartingLoc;
330   // This is the instruction we were querying about.
331   const Instruction *Inst = nullptr;
332   // The MemoryAccess we actually got called with, used to test local domination
333   const MemoryAccess *OriginalAccess = nullptr;
334   Optional<AliasResult> AR = MayAlias;
335   bool SkipSelfAccess = false;
336 
337   UpwardsMemoryQuery() = default;
338 
339   UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
340       : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {
341     if (!IsCall)
342       StartingLoc = MemoryLocation::get(Inst);
343   }
344 };
345 
346 } // end anonymous namespace
347 
348 static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
349                            BatchAAResults &AA) {
350   Instruction *Inst = MD->getMemoryInst();
351   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
352     switch (II->getIntrinsicID()) {
353     case Intrinsic::lifetime_end:
354       return AA.alias(MemoryLocation(II->getArgOperand(1)), Loc) == MustAlias;
355     default:
356       return false;
357     }
358   }
359   return false;
360 }
361 
362 template <typename AliasAnalysisType>
363 static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA,
364                                                    const Instruction *I) {
365   // If the memory can't be changed, then loads of the memory can't be
366   // clobbered.
367   return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
368                               AA.pointsToConstantMemory(MemoryLocation(
369                                   cast<LoadInst>(I)->getPointerOperand())));
370 }
371 
372 /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
373 /// inbetween `Start` and `ClobberAt` can clobbers `Start`.
374 ///
375 /// This is meant to be as simple and self-contained as possible. Because it
376 /// uses no cache, etc., it can be relatively expensive.
377 ///
378 /// \param Start     The MemoryAccess that we want to walk from.
379 /// \param ClobberAt A clobber for Start.
380 /// \param StartLoc  The MemoryLocation for Start.
381 /// \param MSSA      The MemorySSA instance that Start and ClobberAt belong to.
382 /// \param Query     The UpwardsMemoryQuery we used for our search.
383 /// \param AA        The AliasAnalysis we used for our search.
384 /// \param AllowImpreciseClobber Always false, unless we do relaxed verify.
385 
386 template <typename AliasAnalysisType>
387 LLVM_ATTRIBUTE_UNUSED static void
388 checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt,
389                    const MemoryLocation &StartLoc, const MemorySSA &MSSA,
390                    const UpwardsMemoryQuery &Query, AliasAnalysisType &AA,
391                    bool AllowImpreciseClobber = false) {
392   assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
393 
394   if (MSSA.isLiveOnEntryDef(Start)) {
395     assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
396            "liveOnEntry must clobber itself");
397     return;
398   }
399 
400   bool FoundClobber = false;
401   DenseSet<ConstMemoryAccessPair> VisitedPhis;
402   SmallVector<ConstMemoryAccessPair, 8> Worklist;
403   Worklist.emplace_back(Start, StartLoc);
404   // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
405   // is found, complain.
406   while (!Worklist.empty()) {
407     auto MAP = Worklist.pop_back_val();
408     // All we care about is that nothing from Start to ClobberAt clobbers Start.
409     // We learn nothing from revisiting nodes.
410     if (!VisitedPhis.insert(MAP).second)
411       continue;
412 
413     for (const auto *MA : def_chain(MAP.first)) {
414       if (MA == ClobberAt) {
415         if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
416           // instructionClobbersQuery isn't essentially free, so don't use `|=`,
417           // since it won't let us short-circuit.
418           //
419           // Also, note that this can't be hoisted out of the `Worklist` loop,
420           // since MD may only act as a clobber for 1 of N MemoryLocations.
421           FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD);
422           if (!FoundClobber) {
423             ClobberAlias CA =
424                 instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
425             if (CA.IsClobber) {
426               FoundClobber = true;
427               // Not used: CA.AR;
428             }
429           }
430         }
431         break;
432       }
433 
434       // We should never hit liveOnEntry, unless it's the clobber.
435       assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
436 
437       if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
438         // If Start is a Def, skip self.
439         if (MD == Start)
440           continue;
441 
442         assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)
443                     .IsClobber &&
444                "Found clobber before reaching ClobberAt!");
445         continue;
446       }
447 
448       if (const auto *MU = dyn_cast<MemoryUse>(MA)) {
449         (void)MU;
450         assert (MU == Start &&
451                 "Can only find use in def chain if Start is a use");
452         continue;
453       }
454 
455       assert(isa<MemoryPhi>(MA));
456       Worklist.append(
457           upward_defs_begin({const_cast<MemoryAccess *>(MA), MAP.second}),
458           upward_defs_end());
459     }
460   }
461 
462   // If the verify is done following an optimization, it's possible that
463   // ClobberAt was a conservative clobbering, that we can now infer is not a
464   // true clobbering access. Don't fail the verify if that's the case.
465   // We do have accesses that claim they're optimized, but could be optimized
466   // further. Updating all these can be expensive, so allow it for now (FIXME).
467   if (AllowImpreciseClobber)
468     return;
469 
470   // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
471   // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
472   assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
473          "ClobberAt never acted as a clobber");
474 }
475 
476 namespace {
477 
478 /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
479 /// in one class.
480 template <class AliasAnalysisType> class ClobberWalker {
481   /// Save a few bytes by using unsigned instead of size_t.
482   using ListIndex = unsigned;
483 
484   /// Represents a span of contiguous MemoryDefs, potentially ending in a
485   /// MemoryPhi.
486   struct DefPath {
487     MemoryLocation Loc;
488     // Note that, because we always walk in reverse, Last will always dominate
489     // First. Also note that First and Last are inclusive.
490     MemoryAccess *First;
491     MemoryAccess *Last;
492     Optional<ListIndex> Previous;
493 
494     DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
495             Optional<ListIndex> Previous)
496         : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
497 
498     DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
499             Optional<ListIndex> Previous)
500         : DefPath(Loc, Init, Init, Previous) {}
501   };
502 
503   const MemorySSA &MSSA;
504   AliasAnalysisType &AA;
505   DominatorTree &DT;
506   UpwardsMemoryQuery *Query;
507   unsigned *UpwardWalkLimit;
508 
509   // Phi optimization bookkeeping
510   SmallVector<DefPath, 32> Paths;
511   DenseSet<ConstMemoryAccessPair> VisitedPhis;
512 
513   /// Find the nearest def or phi that `From` can legally be optimized to.
514   const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
515     assert(From->getNumOperands() && "Phi with no operands?");
516 
517     BasicBlock *BB = From->getBlock();
518     MemoryAccess *Result = MSSA.getLiveOnEntryDef();
519     DomTreeNode *Node = DT.getNode(BB);
520     while ((Node = Node->getIDom())) {
521       auto *Defs = MSSA.getBlockDefs(Node->getBlock());
522       if (Defs)
523         return &*Defs->rbegin();
524     }
525     return Result;
526   }
527 
528   /// Result of calling walkToPhiOrClobber.
529   struct UpwardsWalkResult {
530     /// The "Result" of the walk. Either a clobber, the last thing we walked, or
531     /// both. Include alias info when clobber found.
532     MemoryAccess *Result;
533     bool IsKnownClobber;
534     Optional<AliasResult> AR;
535   };
536 
537   /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
538   /// This will update Desc.Last as it walks. It will (optionally) also stop at
539   /// StopAt.
540   ///
541   /// This does not test for whether StopAt is a clobber
542   UpwardsWalkResult
543   walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr,
544                      const MemoryAccess *SkipStopAt = nullptr) const {
545     assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
546     assert(UpwardWalkLimit && "Need a valid walk limit");
547     bool LimitAlreadyReached = false;
548     // (*UpwardWalkLimit) may be 0 here, due to the loop in tryOptimizePhi. Set
549     // it to 1. This will not do any alias() calls. It either returns in the
550     // first iteration in the loop below, or is set back to 0 if all def chains
551     // are free of MemoryDefs.
552     if (!*UpwardWalkLimit) {
553       *UpwardWalkLimit = 1;
554       LimitAlreadyReached = true;
555     }
556 
557     for (MemoryAccess *Current : def_chain(Desc.Last)) {
558       Desc.Last = Current;
559       if (Current == StopAt || Current == SkipStopAt)
560         return {Current, false, MayAlias};
561 
562       if (auto *MD = dyn_cast<MemoryDef>(Current)) {
563         if (MSSA.isLiveOnEntryDef(MD))
564           return {MD, true, MustAlias};
565 
566         if (!--*UpwardWalkLimit)
567           return {Current, true, MayAlias};
568 
569         ClobberAlias CA =
570             instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA);
571         if (CA.IsClobber)
572           return {MD, true, CA.AR};
573       }
574     }
575 
576     if (LimitAlreadyReached)
577       *UpwardWalkLimit = 0;
578 
579     assert(isa<MemoryPhi>(Desc.Last) &&
580            "Ended at a non-clobber that's not a phi?");
581     return {Desc.Last, false, MayAlias};
582   }
583 
584   void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
585                    ListIndex PriorNode) {
586     auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}),
587                                  upward_defs_end());
588     for (const MemoryAccessPair &P : UpwardDefs) {
589       PausedSearches.push_back(Paths.size());
590       Paths.emplace_back(P.second, P.first, PriorNode);
591     }
592   }
593 
594   /// Represents a search that terminated after finding a clobber. This clobber
595   /// may or may not be present in the path of defs from LastNode..SearchStart,
596   /// since it may have been retrieved from cache.
597   struct TerminatedPath {
598     MemoryAccess *Clobber;
599     ListIndex LastNode;
600   };
601 
602   /// Get an access that keeps us from optimizing to the given phi.
603   ///
604   /// PausedSearches is an array of indices into the Paths array. Its incoming
605   /// value is the indices of searches that stopped at the last phi optimization
606   /// target. It's left in an unspecified state.
607   ///
608   /// If this returns None, NewPaused is a vector of searches that terminated
609   /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
610   Optional<TerminatedPath>
611   getBlockingAccess(const MemoryAccess *StopWhere,
612                     SmallVectorImpl<ListIndex> &PausedSearches,
613                     SmallVectorImpl<ListIndex> &NewPaused,
614                     SmallVectorImpl<TerminatedPath> &Terminated) {
615     assert(!PausedSearches.empty() && "No searches to continue?");
616 
617     // BFS vs DFS really doesn't make a difference here, so just do a DFS with
618     // PausedSearches as our stack.
619     while (!PausedSearches.empty()) {
620       ListIndex PathIndex = PausedSearches.pop_back_val();
621       DefPath &Node = Paths[PathIndex];
622 
623       // If we've already visited this path with this MemoryLocation, we don't
624       // need to do so again.
625       //
626       // NOTE: That we just drop these paths on the ground makes caching
627       // behavior sporadic. e.g. given a diamond:
628       //  A
629       // B C
630       //  D
631       //
632       // ...If we walk D, B, A, C, we'll only cache the result of phi
633       // optimization for A, B, and D; C will be skipped because it dies here.
634       // This arguably isn't the worst thing ever, since:
635       //   - We generally query things in a top-down order, so if we got below D
636       //     without needing cache entries for {C, MemLoc}, then chances are
637       //     that those cache entries would end up ultimately unused.
638       //   - We still cache things for A, so C only needs to walk up a bit.
639       // If this behavior becomes problematic, we can fix without a ton of extra
640       // work.
641       if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)
642         continue;
643 
644       const MemoryAccess *SkipStopWhere = nullptr;
645       if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) {
646         assert(isa<MemoryDef>(Query->OriginalAccess));
647         SkipStopWhere = Query->OriginalAccess;
648       }
649 
650       UpwardsWalkResult Res = walkToPhiOrClobber(Node,
651                                                  /*StopAt=*/StopWhere,
652                                                  /*SkipStopAt=*/SkipStopWhere);
653       if (Res.IsKnownClobber) {
654         assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
655 
656         // If this wasn't a cache hit, we hit a clobber when walking. That's a
657         // failure.
658         TerminatedPath Term{Res.Result, PathIndex};
659         if (!MSSA.dominates(Res.Result, StopWhere))
660           return Term;
661 
662         // Otherwise, it's a valid thing to potentially optimize to.
663         Terminated.push_back(Term);
664         continue;
665       }
666 
667       if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
668         // We've hit our target. Save this path off for if we want to continue
669         // walking. If we are in the mode of skipping the OriginalAccess, and
670         // we've reached back to the OriginalAccess, do not save path, we've
671         // just looped back to self.
672         if (Res.Result != SkipStopWhere)
673           NewPaused.push_back(PathIndex);
674         continue;
675       }
676 
677       assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
678       addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
679     }
680 
681     return None;
682   }
683 
684   template <typename T, typename Walker>
685   struct generic_def_path_iterator
686       : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
687                                     std::forward_iterator_tag, T *> {
688     generic_def_path_iterator() {}
689     generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
690 
691     T &operator*() const { return curNode(); }
692 
693     generic_def_path_iterator &operator++() {
694       N = curNode().Previous;
695       return *this;
696     }
697 
698     bool operator==(const generic_def_path_iterator &O) const {
699       if (N.hasValue() != O.N.hasValue())
700         return false;
701       return !N.hasValue() || *N == *O.N;
702     }
703 
704   private:
705     T &curNode() const { return W->Paths[*N]; }
706 
707     Walker *W = nullptr;
708     Optional<ListIndex> N = None;
709   };
710 
711   using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
712   using const_def_path_iterator =
713       generic_def_path_iterator<const DefPath, const ClobberWalker>;
714 
715   iterator_range<def_path_iterator> def_path(ListIndex From) {
716     return make_range(def_path_iterator(this, From), def_path_iterator());
717   }
718 
719   iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
720     return make_range(const_def_path_iterator(this, From),
721                       const_def_path_iterator());
722   }
723 
724   struct OptznResult {
725     /// The path that contains our result.
726     TerminatedPath PrimaryClobber;
727     /// The paths that we can legally cache back from, but that aren't
728     /// necessarily the result of the Phi optimization.
729     SmallVector<TerminatedPath, 4> OtherClobbers;
730   };
731 
732   ListIndex defPathIndex(const DefPath &N) const {
733     // The assert looks nicer if we don't need to do &N
734     const DefPath *NP = &N;
735     assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
736            "Out of bounds DefPath!");
737     return NP - &Paths.front();
738   }
739 
740   /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
741   /// that act as legal clobbers. Note that this won't return *all* clobbers.
742   ///
743   /// Phi optimization algorithm tl;dr:
744   ///   - Find the earliest def/phi, A, we can optimize to
745   ///   - Find if all paths from the starting memory access ultimately reach A
746   ///     - If not, optimization isn't possible.
747   ///     - Otherwise, walk from A to another clobber or phi, A'.
748   ///       - If A' is a def, we're done.
749   ///       - If A' is a phi, try to optimize it.
750   ///
751   /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
752   /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
753   OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
754                              const MemoryLocation &Loc) {
755     assert(Paths.empty() && VisitedPhis.empty() &&
756            "Reset the optimization state.");
757 
758     Paths.emplace_back(Loc, Start, Phi, None);
759     // Stores how many "valid" optimization nodes we had prior to calling
760     // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
761     auto PriorPathsSize = Paths.size();
762 
763     SmallVector<ListIndex, 16> PausedSearches;
764     SmallVector<ListIndex, 8> NewPaused;
765     SmallVector<TerminatedPath, 4> TerminatedPaths;
766 
767     addSearches(Phi, PausedSearches, 0);
768 
769     // Moves the TerminatedPath with the "most dominated" Clobber to the end of
770     // Paths.
771     auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
772       assert(!Paths.empty() && "Need a path to move");
773       auto Dom = Paths.begin();
774       for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
775         if (!MSSA.dominates(I->Clobber, Dom->Clobber))
776           Dom = I;
777       auto Last = Paths.end() - 1;
778       if (Last != Dom)
779         std::iter_swap(Last, Dom);
780     };
781 
782     MemoryPhi *Current = Phi;
783     while (true) {
784       assert(!MSSA.isLiveOnEntryDef(Current) &&
785              "liveOnEntry wasn't treated as a clobber?");
786 
787       const auto *Target = getWalkTarget(Current);
788       // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
789       // optimization for the prior phi.
790       assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
791         return MSSA.dominates(P.Clobber, Target);
792       }));
793 
794       // FIXME: This is broken, because the Blocker may be reported to be
795       // liveOnEntry, and we'll happily wait for that to disappear (read: never)
796       // For the moment, this is fine, since we do nothing with blocker info.
797       if (Optional<TerminatedPath> Blocker = getBlockingAccess(
798               Target, PausedSearches, NewPaused, TerminatedPaths)) {
799 
800         // Find the node we started at. We can't search based on N->Last, since
801         // we may have gone around a loop with a different MemoryLocation.
802         auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
803           return defPathIndex(N) < PriorPathsSize;
804         });
805         assert(Iter != def_path_iterator());
806 
807         DefPath &CurNode = *Iter;
808         assert(CurNode.Last == Current);
809 
810         // Two things:
811         // A. We can't reliably cache all of NewPaused back. Consider a case
812         //    where we have two paths in NewPaused; one of which can't optimize
813         //    above this phi, whereas the other can. If we cache the second path
814         //    back, we'll end up with suboptimal cache entries. We can handle
815         //    cases like this a bit better when we either try to find all
816         //    clobbers that block phi optimization, or when our cache starts
817         //    supporting unfinished searches.
818         // B. We can't reliably cache TerminatedPaths back here without doing
819         //    extra checks; consider a case like:
820         //       T
821         //      / \
822         //     D   C
823         //      \ /
824         //       S
825         //    Where T is our target, C is a node with a clobber on it, D is a
826         //    diamond (with a clobber *only* on the left or right node, N), and
827         //    S is our start. Say we walk to D, through the node opposite N
828         //    (read: ignoring the clobber), and see a cache entry in the top
829         //    node of D. That cache entry gets put into TerminatedPaths. We then
830         //    walk up to C (N is later in our worklist), find the clobber, and
831         //    quit. If we append TerminatedPaths to OtherClobbers, we'll cache
832         //    the bottom part of D to the cached clobber, ignoring the clobber
833         //    in N. Again, this problem goes away if we start tracking all
834         //    blockers for a given phi optimization.
835         TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
836         return {Result, {}};
837       }
838 
839       // If there's nothing left to search, then all paths led to valid clobbers
840       // that we got from our cache; pick the nearest to the start, and allow
841       // the rest to be cached back.
842       if (NewPaused.empty()) {
843         MoveDominatedPathToEnd(TerminatedPaths);
844         TerminatedPath Result = TerminatedPaths.pop_back_val();
845         return {Result, std::move(TerminatedPaths)};
846       }
847 
848       MemoryAccess *DefChainEnd = nullptr;
849       SmallVector<TerminatedPath, 4> Clobbers;
850       for (ListIndex Paused : NewPaused) {
851         UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
852         if (WR.IsKnownClobber)
853           Clobbers.push_back({WR.Result, Paused});
854         else
855           // Micro-opt: If we hit the end of the chain, save it.
856           DefChainEnd = WR.Result;
857       }
858 
859       if (!TerminatedPaths.empty()) {
860         // If we couldn't find the dominating phi/liveOnEntry in the above loop,
861         // do it now.
862         if (!DefChainEnd)
863           for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
864             DefChainEnd = MA;
865 
866         // If any of the terminated paths don't dominate the phi we'll try to
867         // optimize, we need to figure out what they are and quit.
868         const BasicBlock *ChainBB = DefChainEnd->getBlock();
869         for (const TerminatedPath &TP : TerminatedPaths) {
870           // Because we know that DefChainEnd is as "high" as we can go, we
871           // don't need local dominance checks; BB dominance is sufficient.
872           if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
873             Clobbers.push_back(TP);
874         }
875       }
876 
877       // If we have clobbers in the def chain, find the one closest to Current
878       // and quit.
879       if (!Clobbers.empty()) {
880         MoveDominatedPathToEnd(Clobbers);
881         TerminatedPath Result = Clobbers.pop_back_val();
882         return {Result, std::move(Clobbers)};
883       }
884 
885       assert(all_of(NewPaused,
886                     [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
887 
888       // Because liveOnEntry is a clobber, this must be a phi.
889       auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
890 
891       PriorPathsSize = Paths.size();
892       PausedSearches.clear();
893       for (ListIndex I : NewPaused)
894         addSearches(DefChainPhi, PausedSearches, I);
895       NewPaused.clear();
896 
897       Current = DefChainPhi;
898     }
899   }
900 
901   void verifyOptResult(const OptznResult &R) const {
902     assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
903       return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
904     }));
905   }
906 
907   void resetPhiOptznState() {
908     Paths.clear();
909     VisitedPhis.clear();
910   }
911 
912 public:
913   ClobberWalker(const MemorySSA &MSSA, AliasAnalysisType &AA, DominatorTree &DT)
914       : MSSA(MSSA), AA(AA), DT(DT) {}
915 
916   AliasAnalysisType *getAA() { return &AA; }
917   /// Finds the nearest clobber for the given query, optimizing phis if
918   /// possible.
919   MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q,
920                             unsigned &UpWalkLimit) {
921     Query = &Q;
922     UpwardWalkLimit = &UpWalkLimit;
923     // Starting limit must be > 0.
924     if (!UpWalkLimit)
925       UpWalkLimit++;
926 
927     MemoryAccess *Current = Start;
928     // This walker pretends uses don't exist. If we're handed one, silently grab
929     // its def. (This has the nice side-effect of ensuring we never cache uses)
930     if (auto *MU = dyn_cast<MemoryUse>(Start))
931       Current = MU->getDefiningAccess();
932 
933     DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
934     // Fast path for the overly-common case (no crazy phi optimization
935     // necessary)
936     UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
937     MemoryAccess *Result;
938     if (WalkResult.IsKnownClobber) {
939       Result = WalkResult.Result;
940       Q.AR = WalkResult.AR;
941     } else {
942       OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
943                                           Current, Q.StartingLoc);
944       verifyOptResult(OptRes);
945       resetPhiOptznState();
946       Result = OptRes.PrimaryClobber.Clobber;
947     }
948 
949 #ifdef EXPENSIVE_CHECKS
950     if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0)
951       checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
952 #endif
953     return Result;
954   }
955 };
956 
957 struct RenamePassData {
958   DomTreeNode *DTN;
959   DomTreeNode::const_iterator ChildIt;
960   MemoryAccess *IncomingVal;
961 
962   RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
963                  MemoryAccess *M)
964       : DTN(D), ChildIt(It), IncomingVal(M) {}
965 
966   void swap(RenamePassData &RHS) {
967     std::swap(DTN, RHS.DTN);
968     std::swap(ChildIt, RHS.ChildIt);
969     std::swap(IncomingVal, RHS.IncomingVal);
970   }
971 };
972 
973 } // end anonymous namespace
974 
975 namespace llvm {
976 
977 template <class AliasAnalysisType> class MemorySSA::ClobberWalkerBase {
978   ClobberWalker<AliasAnalysisType> Walker;
979   MemorySSA *MSSA;
980 
981 public:
982   ClobberWalkerBase(MemorySSA *M, AliasAnalysisType *A, DominatorTree *D)
983       : Walker(*M, *A, *D), MSSA(M) {}
984 
985   MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
986                                               const MemoryLocation &,
987                                               unsigned &);
988   // Third argument (bool), defines whether the clobber search should skip the
989   // original queried access. If true, there will be a follow-up query searching
990   // for a clobber access past "self". Note that the Optimized access is not
991   // updated if a new clobber is found by this SkipSelf search. If this
992   // additional query becomes heavily used we may decide to cache the result.
993   // Walker instantiations will decide how to set the SkipSelf bool.
994   MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, unsigned &, bool);
995 };
996 
997 /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
998 /// longer does caching on its own, but the name has been retained for the
999 /// moment.
1000 template <class AliasAnalysisType>
1001 class MemorySSA::CachingWalker final : public MemorySSAWalker {
1002   ClobberWalkerBase<AliasAnalysisType> *Walker;
1003 
1004 public:
1005   CachingWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
1006       : MemorySSAWalker(M), Walker(W) {}
1007   ~CachingWalker() override = default;
1008 
1009   using MemorySSAWalker::getClobberingMemoryAccess;
1010 
1011   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1012     return Walker->getClobberingMemoryAccessBase(MA, UWL, false);
1013   }
1014   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1015                                           const MemoryLocation &Loc,
1016                                           unsigned &UWL) {
1017     return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1018   }
1019 
1020   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
1021     unsigned UpwardWalkLimit = MaxCheckLimit;
1022     return getClobberingMemoryAccess(MA, UpwardWalkLimit);
1023   }
1024   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1025                                           const MemoryLocation &Loc) override {
1026     unsigned UpwardWalkLimit = MaxCheckLimit;
1027     return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
1028   }
1029 
1030   void invalidateInfo(MemoryAccess *MA) override {
1031     if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1032       MUD->resetOptimized();
1033   }
1034 };
1035 
1036 template <class AliasAnalysisType>
1037 class MemorySSA::SkipSelfWalker final : public MemorySSAWalker {
1038   ClobberWalkerBase<AliasAnalysisType> *Walker;
1039 
1040 public:
1041   SkipSelfWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
1042       : MemorySSAWalker(M), Walker(W) {}
1043   ~SkipSelfWalker() override = default;
1044 
1045   using MemorySSAWalker::getClobberingMemoryAccess;
1046 
1047   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1048     return Walker->getClobberingMemoryAccessBase(MA, UWL, true);
1049   }
1050   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1051                                           const MemoryLocation &Loc,
1052                                           unsigned &UWL) {
1053     return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1054   }
1055 
1056   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
1057     unsigned UpwardWalkLimit = MaxCheckLimit;
1058     return getClobberingMemoryAccess(MA, UpwardWalkLimit);
1059   }
1060   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1061                                           const MemoryLocation &Loc) override {
1062     unsigned UpwardWalkLimit = MaxCheckLimit;
1063     return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
1064   }
1065 
1066   void invalidateInfo(MemoryAccess *MA) override {
1067     if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1068       MUD->resetOptimized();
1069   }
1070 };
1071 
1072 } // end namespace llvm
1073 
1074 void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
1075                                     bool RenameAllUses) {
1076   // Pass through values to our successors
1077   for (const BasicBlock *S : successors(BB)) {
1078     auto It = PerBlockAccesses.find(S);
1079     // Rename the phi nodes in our successor block
1080     if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1081       continue;
1082     AccessList *Accesses = It->second.get();
1083     auto *Phi = cast<MemoryPhi>(&Accesses->front());
1084     if (RenameAllUses) {
1085       int PhiIndex = Phi->getBasicBlockIndex(BB);
1086       assert(PhiIndex != -1 && "Incomplete phi during partial rename");
1087       Phi->setIncomingValue(PhiIndex, IncomingVal);
1088     } else
1089       Phi->addIncoming(IncomingVal, BB);
1090   }
1091 }
1092 
1093 /// Rename a single basic block into MemorySSA form.
1094 /// Uses the standard SSA renaming algorithm.
1095 /// \returns The new incoming value.
1096 MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
1097                                      bool RenameAllUses) {
1098   auto It = PerBlockAccesses.find(BB);
1099   // Skip most processing if the list is empty.
1100   if (It != PerBlockAccesses.end()) {
1101     AccessList *Accesses = It->second.get();
1102     for (MemoryAccess &L : *Accesses) {
1103       if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
1104         if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
1105           MUD->setDefiningAccess(IncomingVal);
1106         if (isa<MemoryDef>(&L))
1107           IncomingVal = &L;
1108       } else {
1109         IncomingVal = &L;
1110       }
1111     }
1112   }
1113   return IncomingVal;
1114 }
1115 
1116 /// This is the standard SSA renaming algorithm.
1117 ///
1118 /// We walk the dominator tree in preorder, renaming accesses, and then filling
1119 /// in phi nodes in our successors.
1120 void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
1121                            SmallPtrSetImpl<BasicBlock *> &Visited,
1122                            bool SkipVisited, bool RenameAllUses) {
1123   SmallVector<RenamePassData, 32> WorkStack;
1124   // Skip everything if we already renamed this block and we are skipping.
1125   // Note: You can't sink this into the if, because we need it to occur
1126   // regardless of whether we skip blocks or not.
1127   bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
1128   if (SkipVisited && AlreadyVisited)
1129     return;
1130 
1131   IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
1132   renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
1133   WorkStack.push_back({Root, Root->begin(), IncomingVal});
1134 
1135   while (!WorkStack.empty()) {
1136     DomTreeNode *Node = WorkStack.back().DTN;
1137     DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
1138     IncomingVal = WorkStack.back().IncomingVal;
1139 
1140     if (ChildIt == Node->end()) {
1141       WorkStack.pop_back();
1142     } else {
1143       DomTreeNode *Child = *ChildIt;
1144       ++WorkStack.back().ChildIt;
1145       BasicBlock *BB = Child->getBlock();
1146       // Note: You can't sink this into the if, because we need it to occur
1147       // regardless of whether we skip blocks or not.
1148       AlreadyVisited = !Visited.insert(BB).second;
1149       if (SkipVisited && AlreadyVisited) {
1150         // We already visited this during our renaming, which can happen when
1151         // being asked to rename multiple blocks. Figure out the incoming val,
1152         // which is the last def.
1153         // Incoming value can only change if there is a block def, and in that
1154         // case, it's the last block def in the list.
1155         if (auto *BlockDefs = getWritableBlockDefs(BB))
1156           IncomingVal = &*BlockDefs->rbegin();
1157       } else
1158         IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1159       renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1160       WorkStack.push_back({Child, Child->begin(), IncomingVal});
1161     }
1162   }
1163 }
1164 
1165 /// This handles unreachable block accesses by deleting phi nodes in
1166 /// unreachable blocks, and marking all other unreachable MemoryAccess's as
1167 /// being uses of the live on entry definition.
1168 void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
1169   assert(!DT->isReachableFromEntry(BB) &&
1170          "Reachable block found while handling unreachable blocks");
1171 
1172   // Make sure phi nodes in our reachable successors end up with a
1173   // LiveOnEntryDef for our incoming edge, even though our block is forward
1174   // unreachable.  We could just disconnect these blocks from the CFG fully,
1175   // but we do not right now.
1176   for (const BasicBlock *S : successors(BB)) {
1177     if (!DT->isReachableFromEntry(S))
1178       continue;
1179     auto It = PerBlockAccesses.find(S);
1180     // Rename the phi nodes in our successor block
1181     if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1182       continue;
1183     AccessList *Accesses = It->second.get();
1184     auto *Phi = cast<MemoryPhi>(&Accesses->front());
1185     Phi->addIncoming(LiveOnEntryDef.get(), BB);
1186   }
1187 
1188   auto It = PerBlockAccesses.find(BB);
1189   if (It == PerBlockAccesses.end())
1190     return;
1191 
1192   auto &Accesses = It->second;
1193   for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1194     auto Next = std::next(AI);
1195     // If we have a phi, just remove it. We are going to replace all
1196     // users with live on entry.
1197     if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1198       UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1199     else
1200       Accesses->erase(AI);
1201     AI = Next;
1202   }
1203 }
1204 
1205 MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
1206     : AA(nullptr), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1207       SkipWalker(nullptr), NextID(0) {
1208   // Build MemorySSA using a batch alias analysis. This reuses the internal
1209   // state that AA collects during an alias()/getModRefInfo() call. This is
1210   // safe because there are no CFG changes while building MemorySSA and can
1211   // significantly reduce the time spent by the compiler in AA, because we will
1212   // make queries about all the instructions in the Function.
1213   BatchAAResults BatchAA(*AA);
1214   buildMemorySSA(BatchAA);
1215   // Intentionally leave AA to nullptr while building so we don't accidently
1216   // use non-batch AliasAnalysis.
1217   this->AA = AA;
1218   // Also create the walker here.
1219   getWalker();
1220 }
1221 
1222 MemorySSA::~MemorySSA() {
1223   // Drop all our references
1224   for (const auto &Pair : PerBlockAccesses)
1225     for (MemoryAccess &MA : *Pair.second)
1226       MA.dropAllReferences();
1227 }
1228 
1229 MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
1230   auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
1231 
1232   if (Res.second)
1233     Res.first->second = llvm::make_unique<AccessList>();
1234   return Res.first->second.get();
1235 }
1236 
1237 MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1238   auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
1239 
1240   if (Res.second)
1241     Res.first->second = llvm::make_unique<DefsList>();
1242   return Res.first->second.get();
1243 }
1244 
1245 namespace llvm {
1246 
1247 /// This class is a batch walker of all MemoryUse's in the program, and points
1248 /// their defining access at the thing that actually clobbers them.  Because it
1249 /// is a batch walker that touches everything, it does not operate like the
1250 /// other walkers.  This walker is basically performing a top-down SSA renaming
1251 /// pass, where the version stack is used as the cache.  This enables it to be
1252 /// significantly more time and memory efficient than using the regular walker,
1253 /// which is walking bottom-up.
1254 class MemorySSA::OptimizeUses {
1255 public:
1256   OptimizeUses(MemorySSA *MSSA, CachingWalker<BatchAAResults> *Walker,
1257                BatchAAResults *BAA, DominatorTree *DT)
1258       : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}
1259 
1260   void optimizeUses();
1261 
1262 private:
1263   /// This represents where a given memorylocation is in the stack.
1264   struct MemlocStackInfo {
1265     // This essentially is keeping track of versions of the stack. Whenever
1266     // the stack changes due to pushes or pops, these versions increase.
1267     unsigned long StackEpoch;
1268     unsigned long PopEpoch;
1269     // This is the lower bound of places on the stack to check. It is equal to
1270     // the place the last stack walk ended.
1271     // Note: Correctness depends on this being initialized to 0, which densemap
1272     // does
1273     unsigned long LowerBound;
1274     const BasicBlock *LowerBoundBlock;
1275     // This is where the last walk for this memory location ended.
1276     unsigned long LastKill;
1277     bool LastKillValid;
1278     Optional<AliasResult> AR;
1279   };
1280 
1281   void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1282                            SmallVectorImpl<MemoryAccess *> &,
1283                            DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
1284 
1285   MemorySSA *MSSA;
1286   CachingWalker<BatchAAResults> *Walker;
1287   BatchAAResults *AA;
1288   DominatorTree *DT;
1289 };
1290 
1291 } // end namespace llvm
1292 
1293 /// Optimize the uses in a given block This is basically the SSA renaming
1294 /// algorithm, with one caveat: We are able to use a single stack for all
1295 /// MemoryUses.  This is because the set of *possible* reaching MemoryDefs is
1296 /// the same for every MemoryUse.  The *actual* clobbering MemoryDef is just
1297 /// going to be some position in that stack of possible ones.
1298 ///
1299 /// We track the stack positions that each MemoryLocation needs
1300 /// to check, and last ended at.  This is because we only want to check the
1301 /// things that changed since last time.  The same MemoryLocation should
1302 /// get clobbered by the same store (getModRefInfo does not use invariantness or
1303 /// things like this, and if they start, we can modify MemoryLocOrCall to
1304 /// include relevant data)
1305 void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1306     const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1307     SmallVectorImpl<MemoryAccess *> &VersionStack,
1308     DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) {
1309 
1310   /// If no accesses, nothing to do.
1311   MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1312   if (Accesses == nullptr)
1313     return;
1314 
1315   // Pop everything that doesn't dominate the current block off the stack,
1316   // increment the PopEpoch to account for this.
1317   while (true) {
1318     assert(
1319         !VersionStack.empty() &&
1320         "Version stack should have liveOnEntry sentinel dominating everything");
1321     BasicBlock *BackBlock = VersionStack.back()->getBlock();
1322     if (DT->dominates(BackBlock, BB))
1323       break;
1324     while (VersionStack.back()->getBlock() == BackBlock)
1325       VersionStack.pop_back();
1326     ++PopEpoch;
1327   }
1328 
1329   for (MemoryAccess &MA : *Accesses) {
1330     auto *MU = dyn_cast<MemoryUse>(&MA);
1331     if (!MU) {
1332       VersionStack.push_back(&MA);
1333       ++StackEpoch;
1334       continue;
1335     }
1336 
1337     if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
1338       MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None);
1339       continue;
1340     }
1341 
1342     MemoryLocOrCall UseMLOC(MU);
1343     auto &LocInfo = LocStackInfo[UseMLOC];
1344     // If the pop epoch changed, it means we've removed stuff from top of
1345     // stack due to changing blocks. We may have to reset the lower bound or
1346     // last kill info.
1347     if (LocInfo.PopEpoch != PopEpoch) {
1348       LocInfo.PopEpoch = PopEpoch;
1349       LocInfo.StackEpoch = StackEpoch;
1350       // If the lower bound was in something that no longer dominates us, we
1351       // have to reset it.
1352       // We can't simply track stack size, because the stack may have had
1353       // pushes/pops in the meantime.
1354       // XXX: This is non-optimal, but only is slower cases with heavily
1355       // branching dominator trees.  To get the optimal number of queries would
1356       // be to make lowerbound and lastkill a per-loc stack, and pop it until
1357       // the top of that stack dominates us.  This does not seem worth it ATM.
1358       // A much cheaper optimization would be to always explore the deepest
1359       // branch of the dominator tree first. This will guarantee this resets on
1360       // the smallest set of blocks.
1361       if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1362           !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1363         // Reset the lower bound of things to check.
1364         // TODO: Some day we should be able to reset to last kill, rather than
1365         // 0.
1366         LocInfo.LowerBound = 0;
1367         LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1368         LocInfo.LastKillValid = false;
1369       }
1370     } else if (LocInfo.StackEpoch != StackEpoch) {
1371       // If all that has changed is the StackEpoch, we only have to check the
1372       // new things on the stack, because we've checked everything before.  In
1373       // this case, the lower bound of things to check remains the same.
1374       LocInfo.PopEpoch = PopEpoch;
1375       LocInfo.StackEpoch = StackEpoch;
1376     }
1377     if (!LocInfo.LastKillValid) {
1378       LocInfo.LastKill = VersionStack.size() - 1;
1379       LocInfo.LastKillValid = true;
1380       LocInfo.AR = MayAlias;
1381     }
1382 
1383     // At this point, we should have corrected last kill and LowerBound to be
1384     // in bounds.
1385     assert(LocInfo.LowerBound < VersionStack.size() &&
1386            "Lower bound out of range");
1387     assert(LocInfo.LastKill < VersionStack.size() &&
1388            "Last kill info out of range");
1389     // In any case, the new upper bound is the top of the stack.
1390     unsigned long UpperBound = VersionStack.size() - 1;
1391 
1392     if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
1393       LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
1394                         << *(MU->getMemoryInst()) << ")"
1395                         << " because there are "
1396                         << UpperBound - LocInfo.LowerBound
1397                         << " stores to disambiguate\n");
1398       // Because we did not walk, LastKill is no longer valid, as this may
1399       // have been a kill.
1400       LocInfo.LastKillValid = false;
1401       continue;
1402     }
1403     bool FoundClobberResult = false;
1404     unsigned UpwardWalkLimit = MaxCheckLimit;
1405     while (UpperBound > LocInfo.LowerBound) {
1406       if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1407         // For phis, use the walker, see where we ended up, go there
1408         MemoryAccess *Result =
1409             Walker->getClobberingMemoryAccess(MU, UpwardWalkLimit);
1410         // We are guaranteed to find it or something is wrong
1411         while (VersionStack[UpperBound] != Result) {
1412           assert(UpperBound != 0);
1413           --UpperBound;
1414         }
1415         FoundClobberResult = true;
1416         break;
1417       }
1418 
1419       MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1420       // If the lifetime of the pointer ends at this instruction, it's live on
1421       // entry.
1422       if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) {
1423         // Reset UpperBound to liveOnEntryDef's place in the stack
1424         UpperBound = 0;
1425         FoundClobberResult = true;
1426         LocInfo.AR = MustAlias;
1427         break;
1428       }
1429       ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA);
1430       if (CA.IsClobber) {
1431         FoundClobberResult = true;
1432         LocInfo.AR = CA.AR;
1433         break;
1434       }
1435       --UpperBound;
1436     }
1437 
1438     // Note: Phis always have AliasResult AR set to MayAlias ATM.
1439 
1440     // At the end of this loop, UpperBound is either a clobber, or lower bound
1441     // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1442     if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1443       // We were last killed now by where we got to
1444       if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound]))
1445         LocInfo.AR = None;
1446       MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR);
1447       LocInfo.LastKill = UpperBound;
1448     } else {
1449       // Otherwise, we checked all the new ones, and now we know we can get to
1450       // LastKill.
1451       MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR);
1452     }
1453     LocInfo.LowerBound = VersionStack.size() - 1;
1454     LocInfo.LowerBoundBlock = BB;
1455   }
1456 }
1457 
1458 /// Optimize uses to point to their actual clobbering definitions.
1459 void MemorySSA::OptimizeUses::optimizeUses() {
1460   SmallVector<MemoryAccess *, 16> VersionStack;
1461   DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo;
1462   VersionStack.push_back(MSSA->getLiveOnEntryDef());
1463 
1464   unsigned long StackEpoch = 1;
1465   unsigned long PopEpoch = 1;
1466   // We perform a non-recursive top-down dominator tree walk.
1467   for (const auto *DomNode : depth_first(DT->getRootNode()))
1468     optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1469                         LocStackInfo);
1470 }
1471 
1472 void MemorySSA::placePHINodes(
1473     const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) {
1474   // Determine where our MemoryPhi's should go
1475   ForwardIDFCalculator IDFs(*DT);
1476   IDFs.setDefiningBlocks(DefiningBlocks);
1477   SmallVector<BasicBlock *, 32> IDFBlocks;
1478   IDFs.calculate(IDFBlocks);
1479 
1480   // Now place MemoryPhi nodes.
1481   for (auto &BB : IDFBlocks)
1482     createMemoryPhi(BB);
1483 }
1484 
1485 void MemorySSA::buildMemorySSA(BatchAAResults &BAA) {
1486   // We create an access to represent "live on entry", for things like
1487   // arguments or users of globals, where the memory they use is defined before
1488   // the beginning of the function. We do not actually insert it into the IR.
1489   // We do not define a live on exit for the immediate uses, and thus our
1490   // semantics do *not* imply that something with no immediate uses can simply
1491   // be removed.
1492   BasicBlock &StartingPoint = F.getEntryBlock();
1493   LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr,
1494                                      &StartingPoint, NextID++));
1495 
1496   // We maintain lists of memory accesses per-block, trading memory for time. We
1497   // could just look up the memory access for every possible instruction in the
1498   // stream.
1499   SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1500   // Go through each block, figure out where defs occur, and chain together all
1501   // the accesses.
1502   for (BasicBlock &B : F) {
1503     bool InsertIntoDef = false;
1504     AccessList *Accesses = nullptr;
1505     DefsList *Defs = nullptr;
1506     for (Instruction &I : B) {
1507       MemoryUseOrDef *MUD = createNewAccess(&I, &BAA);
1508       if (!MUD)
1509         continue;
1510 
1511       if (!Accesses)
1512         Accesses = getOrCreateAccessList(&B);
1513       Accesses->push_back(MUD);
1514       if (isa<MemoryDef>(MUD)) {
1515         InsertIntoDef = true;
1516         if (!Defs)
1517           Defs = getOrCreateDefsList(&B);
1518         Defs->push_back(*MUD);
1519       }
1520     }
1521     if (InsertIntoDef)
1522       DefiningBlocks.insert(&B);
1523   }
1524   placePHINodes(DefiningBlocks);
1525 
1526   // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1527   // filled in with all blocks.
1528   SmallPtrSet<BasicBlock *, 16> Visited;
1529   renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1530 
1531   ClobberWalkerBase<BatchAAResults> WalkerBase(this, &BAA, DT);
1532   CachingWalker<BatchAAResults> WalkerLocal(this, &WalkerBase);
1533   OptimizeUses(this, &WalkerLocal, &BAA, DT).optimizeUses();
1534 
1535   // Mark the uses in unreachable blocks as live on entry, so that they go
1536   // somewhere.
1537   for (auto &BB : F)
1538     if (!Visited.count(&BB))
1539       markUnreachableAsLiveOnEntry(&BB);
1540 }
1541 
1542 MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1543 
1544 MemorySSA::CachingWalker<AliasAnalysis> *MemorySSA::getWalkerImpl() {
1545   if (Walker)
1546     return Walker.get();
1547 
1548   if (!WalkerBase)
1549     WalkerBase =
1550         llvm::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
1551 
1552   Walker =
1553       llvm::make_unique<CachingWalker<AliasAnalysis>>(this, WalkerBase.get());
1554   return Walker.get();
1555 }
1556 
1557 MemorySSAWalker *MemorySSA::getSkipSelfWalker() {
1558   if (SkipWalker)
1559     return SkipWalker.get();
1560 
1561   if (!WalkerBase)
1562     WalkerBase =
1563         llvm::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
1564 
1565   SkipWalker =
1566       llvm::make_unique<SkipSelfWalker<AliasAnalysis>>(this, WalkerBase.get());
1567   return SkipWalker.get();
1568  }
1569 
1570 
1571 // This is a helper function used by the creation routines. It places NewAccess
1572 // into the access and defs lists for a given basic block, at the given
1573 // insertion point.
1574 void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess,
1575                                         const BasicBlock *BB,
1576                                         InsertionPlace Point) {
1577   auto *Accesses = getOrCreateAccessList(BB);
1578   if (Point == Beginning) {
1579     // If it's a phi node, it goes first, otherwise, it goes after any phi
1580     // nodes.
1581     if (isa<MemoryPhi>(NewAccess)) {
1582       Accesses->push_front(NewAccess);
1583       auto *Defs = getOrCreateDefsList(BB);
1584       Defs->push_front(*NewAccess);
1585     } else {
1586       auto AI = find_if_not(
1587           *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1588       Accesses->insert(AI, NewAccess);
1589       if (!isa<MemoryUse>(NewAccess)) {
1590         auto *Defs = getOrCreateDefsList(BB);
1591         auto DI = find_if_not(
1592             *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1593         Defs->insert(DI, *NewAccess);
1594       }
1595     }
1596   } else {
1597     Accesses->push_back(NewAccess);
1598     if (!isa<MemoryUse>(NewAccess)) {
1599       auto *Defs = getOrCreateDefsList(BB);
1600       Defs->push_back(*NewAccess);
1601     }
1602   }
1603   BlockNumberingValid.erase(BB);
1604 }
1605 
1606 void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB,
1607                                       AccessList::iterator InsertPt) {
1608   auto *Accesses = getWritableBlockAccesses(BB);
1609   bool WasEnd = InsertPt == Accesses->end();
1610   Accesses->insert(AccessList::iterator(InsertPt), What);
1611   if (!isa<MemoryUse>(What)) {
1612     auto *Defs = getOrCreateDefsList(BB);
1613     // If we got asked to insert at the end, we have an easy job, just shove it
1614     // at the end. If we got asked to insert before an existing def, we also get
1615     // an iterator. If we got asked to insert before a use, we have to hunt for
1616     // the next def.
1617     if (WasEnd) {
1618       Defs->push_back(*What);
1619     } else if (isa<MemoryDef>(InsertPt)) {
1620       Defs->insert(InsertPt->getDefsIterator(), *What);
1621     } else {
1622       while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1623         ++InsertPt;
1624       // Either we found a def, or we are inserting at the end
1625       if (InsertPt == Accesses->end())
1626         Defs->push_back(*What);
1627       else
1628         Defs->insert(InsertPt->getDefsIterator(), *What);
1629     }
1630   }
1631   BlockNumberingValid.erase(BB);
1632 }
1633 
1634 void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) {
1635   // Keep it in the lookup tables, remove from the lists
1636   removeFromLists(What, false);
1637 
1638   // Note that moving should implicitly invalidate the optimized state of a
1639   // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a
1640   // MemoryDef.
1641   if (auto *MD = dyn_cast<MemoryDef>(What))
1642     MD->resetOptimized();
1643   What->setBlock(BB);
1644 }
1645 
1646 // Move What before Where in the IR.  The end result is that What will belong to
1647 // the right lists and have the right Block set, but will not otherwise be
1648 // correct. It will not have the right defining access, and if it is a def,
1649 // things below it will not properly be updated.
1650 void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1651                        AccessList::iterator Where) {
1652   prepareForMoveTo(What, BB);
1653   insertIntoListsBefore(What, BB, Where);
1654 }
1655 
1656 void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB,
1657                        InsertionPlace Point) {
1658   if (isa<MemoryPhi>(What)) {
1659     assert(Point == Beginning &&
1660            "Can only move a Phi at the beginning of the block");
1661     // Update lookup table entry
1662     ValueToMemoryAccess.erase(What->getBlock());
1663     bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;
1664     (void)Inserted;
1665     assert(Inserted && "Cannot move a Phi to a block that already has one");
1666   }
1667 
1668   prepareForMoveTo(What, BB);
1669   insertIntoListsForBlock(What, BB, Point);
1670 }
1671 
1672 MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1673   assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
1674   MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
1675   // Phi's always are placed at the front of the block.
1676   insertIntoListsForBlock(Phi, BB, Beginning);
1677   ValueToMemoryAccess[BB] = Phi;
1678   return Phi;
1679 }
1680 
1681 MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
1682                                                MemoryAccess *Definition,
1683                                                const MemoryUseOrDef *Template) {
1684   assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
1685   MemoryUseOrDef *NewAccess = createNewAccess(I, AA, Template);
1686   assert(
1687       NewAccess != nullptr &&
1688       "Tried to create a memory access for a non-memory touching instruction");
1689   NewAccess->setDefiningAccess(Definition);
1690   return NewAccess;
1691 }
1692 
1693 // Return true if the instruction has ordering constraints.
1694 // Note specifically that this only considers stores and loads
1695 // because others are still considered ModRef by getModRefInfo.
1696 static inline bool isOrdered(const Instruction *I) {
1697   if (auto *SI = dyn_cast<StoreInst>(I)) {
1698     if (!SI->isUnordered())
1699       return true;
1700   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1701     if (!LI->isUnordered())
1702       return true;
1703   }
1704   return false;
1705 }
1706 
1707 /// Helper function to create new memory accesses
1708 template <typename AliasAnalysisType>
1709 MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
1710                                            AliasAnalysisType *AAP,
1711                                            const MemoryUseOrDef *Template) {
1712   // The assume intrinsic has a control dependency which we model by claiming
1713   // that it writes arbitrarily. Ignore that fake memory dependency here.
1714   // FIXME: Replace this special casing with a more accurate modelling of
1715   // assume's control dependency.
1716   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1717     if (II->getIntrinsicID() == Intrinsic::assume)
1718       return nullptr;
1719 
1720   bool Def, Use;
1721   if (Template) {
1722     Def = dyn_cast_or_null<MemoryDef>(Template) != nullptr;
1723     Use = dyn_cast_or_null<MemoryUse>(Template) != nullptr;
1724 #if !defined(NDEBUG)
1725     ModRefInfo ModRef = AAP->getModRefInfo(I, None);
1726     bool DefCheck, UseCheck;
1727     DefCheck = isModSet(ModRef) || isOrdered(I);
1728     UseCheck = isRefSet(ModRef);
1729     assert(Def == DefCheck && (Def || Use == UseCheck) && "Invalid template");
1730 #endif
1731   } else {
1732     // Find out what affect this instruction has on memory.
1733     ModRefInfo ModRef = AAP->getModRefInfo(I, None);
1734     // The isOrdered check is used to ensure that volatiles end up as defs
1735     // (atomics end up as ModRef right now anyway).  Until we separate the
1736     // ordering chain from the memory chain, this enables people to see at least
1737     // some relative ordering to volatiles.  Note that getClobberingMemoryAccess
1738     // will still give an answer that bypasses other volatile loads.  TODO:
1739     // Separate memory aliasing and ordering into two different chains so that
1740     // we can precisely represent both "what memory will this read/write/is
1741     // clobbered by" and "what instructions can I move this past".
1742     Def = isModSet(ModRef) || isOrdered(I);
1743     Use = isRefSet(ModRef);
1744   }
1745 
1746   // It's possible for an instruction to not modify memory at all. During
1747   // construction, we ignore them.
1748   if (!Def && !Use)
1749     return nullptr;
1750 
1751   MemoryUseOrDef *MUD;
1752   if (Def)
1753     MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
1754   else
1755     MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
1756   ValueToMemoryAccess[I] = MUD;
1757   return MUD;
1758 }
1759 
1760 /// Returns true if \p Replacer dominates \p Replacee .
1761 bool MemorySSA::dominatesUse(const MemoryAccess *Replacer,
1762                              const MemoryAccess *Replacee) const {
1763   if (isa<MemoryUseOrDef>(Replacee))
1764     return DT->dominates(Replacer->getBlock(), Replacee->getBlock());
1765   const auto *MP = cast<MemoryPhi>(Replacee);
1766   // For a phi node, the use occurs in the predecessor block of the phi node.
1767   // Since we may occur multiple times in the phi node, we have to check each
1768   // operand to ensure Replacer dominates each operand where Replacee occurs.
1769   for (const Use &Arg : MP->operands()) {
1770     if (Arg.get() != Replacee &&
1771         !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg)))
1772       return false;
1773   }
1774   return true;
1775 }
1776 
1777 /// Properly remove \p MA from all of MemorySSA's lookup tables.
1778 void MemorySSA::removeFromLookups(MemoryAccess *MA) {
1779   assert(MA->use_empty() &&
1780          "Trying to remove memory access that still has uses");
1781   BlockNumbering.erase(MA);
1782   if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1783     MUD->setDefiningAccess(nullptr);
1784   // Invalidate our walker's cache if necessary
1785   if (!isa<MemoryUse>(MA))
1786     getWalker()->invalidateInfo(MA);
1787 
1788   Value *MemoryInst;
1789   if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1790     MemoryInst = MUD->getMemoryInst();
1791   else
1792     MemoryInst = MA->getBlock();
1793 
1794   auto VMA = ValueToMemoryAccess.find(MemoryInst);
1795   if (VMA->second == MA)
1796     ValueToMemoryAccess.erase(VMA);
1797 }
1798 
1799 /// Properly remove \p MA from all of MemorySSA's lists.
1800 ///
1801 /// Because of the way the intrusive list and use lists work, it is important to
1802 /// do removal in the right order.
1803 /// ShouldDelete defaults to true, and will cause the memory access to also be
1804 /// deleted, not just removed.
1805 void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
1806   BasicBlock *BB = MA->getBlock();
1807   // The access list owns the reference, so we erase it from the non-owning list
1808   // first.
1809   if (!isa<MemoryUse>(MA)) {
1810     auto DefsIt = PerBlockDefs.find(BB);
1811     std::unique_ptr<DefsList> &Defs = DefsIt->second;
1812     Defs->remove(*MA);
1813     if (Defs->empty())
1814       PerBlockDefs.erase(DefsIt);
1815   }
1816 
1817   // The erase call here will delete it. If we don't want it deleted, we call
1818   // remove instead.
1819   auto AccessIt = PerBlockAccesses.find(BB);
1820   std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1821   if (ShouldDelete)
1822     Accesses->erase(MA);
1823   else
1824     Accesses->remove(MA);
1825 
1826   if (Accesses->empty()) {
1827     PerBlockAccesses.erase(AccessIt);
1828     BlockNumberingValid.erase(BB);
1829   }
1830 }
1831 
1832 void MemorySSA::print(raw_ostream &OS) const {
1833   MemorySSAAnnotatedWriter Writer(this);
1834   F.print(OS, &Writer);
1835 }
1836 
1837 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1838 LLVM_DUMP_METHOD void MemorySSA::dump() const { print(dbgs()); }
1839 #endif
1840 
1841 void MemorySSA::verifyMemorySSA() const {
1842   verifyDefUses(F);
1843   verifyDomination(F);
1844   verifyOrdering(F);
1845   verifyDominationNumbers(F);
1846   // Previously, the verification used to also verify that the clobberingAccess
1847   // cached by MemorySSA is the same as the clobberingAccess found at a later
1848   // query to AA. This does not hold true in general due to the current fragility
1849   // of BasicAA which has arbitrary caps on the things it analyzes before giving
1850   // up. As a result, transformations that are correct, will lead to BasicAA
1851   // returning different Alias answers before and after that transformation.
1852   // Invalidating MemorySSA is not an option, as the results in BasicAA can be so
1853   // random, in the worst case we'd need to rebuild MemorySSA from scratch after
1854   // every transformation, which defeats the purpose of using it. For such an
1855   // example, see test4 added in D51960.
1856 }
1857 
1858 /// Verify that all of the blocks we believe to have valid domination numbers
1859 /// actually have valid domination numbers.
1860 void MemorySSA::verifyDominationNumbers(const Function &F) const {
1861 #ifndef NDEBUG
1862   if (BlockNumberingValid.empty())
1863     return;
1864 
1865   SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid;
1866   for (const BasicBlock &BB : F) {
1867     if (!ValidBlocks.count(&BB))
1868       continue;
1869 
1870     ValidBlocks.erase(&BB);
1871 
1872     const AccessList *Accesses = getBlockAccesses(&BB);
1873     // It's correct to say an empty block has valid numbering.
1874     if (!Accesses)
1875       continue;
1876 
1877     // Block numbering starts at 1.
1878     unsigned long LastNumber = 0;
1879     for (const MemoryAccess &MA : *Accesses) {
1880       auto ThisNumberIter = BlockNumbering.find(&MA);
1881       assert(ThisNumberIter != BlockNumbering.end() &&
1882              "MemoryAccess has no domination number in a valid block!");
1883 
1884       unsigned long ThisNumber = ThisNumberIter->second;
1885       assert(ThisNumber > LastNumber &&
1886              "Domination numbers should be strictly increasing!");
1887       LastNumber = ThisNumber;
1888     }
1889   }
1890 
1891   assert(ValidBlocks.empty() &&
1892          "All valid BasicBlocks should exist in F -- dangling pointers?");
1893 #endif
1894 }
1895 
1896 /// Verify that the order and existence of MemoryAccesses matches the
1897 /// order and existence of memory affecting instructions.
1898 void MemorySSA::verifyOrdering(Function &F) const {
1899 #ifndef NDEBUG
1900   // Walk all the blocks, comparing what the lookups think and what the access
1901   // lists think, as well as the order in the blocks vs the order in the access
1902   // lists.
1903   SmallVector<MemoryAccess *, 32> ActualAccesses;
1904   SmallVector<MemoryAccess *, 32> ActualDefs;
1905   for (BasicBlock &B : F) {
1906     const AccessList *AL = getBlockAccesses(&B);
1907     const auto *DL = getBlockDefs(&B);
1908     MemoryAccess *Phi = getMemoryAccess(&B);
1909     if (Phi) {
1910       ActualAccesses.push_back(Phi);
1911       ActualDefs.push_back(Phi);
1912     }
1913 
1914     for (Instruction &I : B) {
1915       MemoryAccess *MA = getMemoryAccess(&I);
1916       assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
1917              "We have memory affecting instructions "
1918              "in this block but they are not in the "
1919              "access list or defs list");
1920       if (MA) {
1921         ActualAccesses.push_back(MA);
1922         if (isa<MemoryDef>(MA))
1923           ActualDefs.push_back(MA);
1924       }
1925     }
1926     // Either we hit the assert, really have no accesses, or we have both
1927     // accesses and an access list.
1928     // Same with defs.
1929     if (!AL && !DL)
1930       continue;
1931     assert(AL->size() == ActualAccesses.size() &&
1932            "We don't have the same number of accesses in the block as on the "
1933            "access list");
1934     assert((DL || ActualDefs.size() == 0) &&
1935            "Either we should have a defs list, or we should have no defs");
1936     assert((!DL || DL->size() == ActualDefs.size()) &&
1937            "We don't have the same number of defs in the block as on the "
1938            "def list");
1939     auto ALI = AL->begin();
1940     auto AAI = ActualAccesses.begin();
1941     while (ALI != AL->end() && AAI != ActualAccesses.end()) {
1942       assert(&*ALI == *AAI && "Not the same accesses in the same order");
1943       ++ALI;
1944       ++AAI;
1945     }
1946     ActualAccesses.clear();
1947     if (DL) {
1948       auto DLI = DL->begin();
1949       auto ADI = ActualDefs.begin();
1950       while (DLI != DL->end() && ADI != ActualDefs.end()) {
1951         assert(&*DLI == *ADI && "Not the same defs in the same order");
1952         ++DLI;
1953         ++ADI;
1954       }
1955     }
1956     ActualDefs.clear();
1957   }
1958 #endif
1959 }
1960 
1961 /// Verify the domination properties of MemorySSA by checking that each
1962 /// definition dominates all of its uses.
1963 void MemorySSA::verifyDomination(Function &F) const {
1964 #ifndef NDEBUG
1965   for (BasicBlock &B : F) {
1966     // Phi nodes are attached to basic blocks
1967     if (MemoryPhi *MP = getMemoryAccess(&B))
1968       for (const Use &U : MP->uses())
1969         assert(dominates(MP, U) && "Memory PHI does not dominate it's uses");
1970 
1971     for (Instruction &I : B) {
1972       MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I));
1973       if (!MD)
1974         continue;
1975 
1976       for (const Use &U : MD->uses())
1977         assert(dominates(MD, U) && "Memory Def does not dominate it's uses");
1978     }
1979   }
1980 #endif
1981 }
1982 
1983 /// Verify the def-use lists in MemorySSA, by verifying that \p Use
1984 /// appears in the use list of \p Def.
1985 void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
1986 #ifndef NDEBUG
1987   // The live on entry use may cause us to get a NULL def here
1988   if (!Def)
1989     assert(isLiveOnEntryDef(Use) &&
1990            "Null def but use not point to live on entry def");
1991   else
1992     assert(is_contained(Def->users(), Use) &&
1993            "Did not find use in def's use list");
1994 #endif
1995 }
1996 
1997 /// Verify the immediate use information, by walking all the memory
1998 /// accesses and verifying that, for each use, it appears in the
1999 /// appropriate def's use list
2000 void MemorySSA::verifyDefUses(Function &F) const {
2001 #ifndef NDEBUG
2002   for (BasicBlock &B : F) {
2003     // Phi nodes are attached to basic blocks
2004     if (MemoryPhi *Phi = getMemoryAccess(&B)) {
2005       assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(
2006                                           pred_begin(&B), pred_end(&B))) &&
2007              "Incomplete MemoryPhi Node");
2008       for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
2009         verifyUseInDefs(Phi->getIncomingValue(I), Phi);
2010         assert(find(predecessors(&B), Phi->getIncomingBlock(I)) !=
2011                    pred_end(&B) &&
2012                "Incoming phi block not a block predecessor");
2013       }
2014     }
2015 
2016     for (Instruction &I : B) {
2017       if (MemoryUseOrDef *MA = getMemoryAccess(&I)) {
2018         verifyUseInDefs(MA->getDefiningAccess(), MA);
2019       }
2020     }
2021   }
2022 #endif
2023 }
2024 
2025 /// Perform a local numbering on blocks so that instruction ordering can be
2026 /// determined in constant time.
2027 /// TODO: We currently just number in order.  If we numbered by N, we could
2028 /// allow at least N-1 sequences of insertBefore or insertAfter (and at least
2029 /// log2(N) sequences of mixed before and after) without needing to invalidate
2030 /// the numbering.
2031 void MemorySSA::renumberBlock(const BasicBlock *B) const {
2032   // The pre-increment ensures the numbers really start at 1.
2033   unsigned long CurrentNumber = 0;
2034   const AccessList *AL = getBlockAccesses(B);
2035   assert(AL != nullptr && "Asking to renumber an empty block");
2036   for (const auto &I : *AL)
2037     BlockNumbering[&I] = ++CurrentNumber;
2038   BlockNumberingValid.insert(B);
2039 }
2040 
2041 /// Determine, for two memory accesses in the same block,
2042 /// whether \p Dominator dominates \p Dominatee.
2043 /// \returns True if \p Dominator dominates \p Dominatee.
2044 bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
2045                                  const MemoryAccess *Dominatee) const {
2046   const BasicBlock *DominatorBlock = Dominator->getBlock();
2047 
2048   assert((DominatorBlock == Dominatee->getBlock()) &&
2049          "Asking for local domination when accesses are in different blocks!");
2050   // A node dominates itself.
2051   if (Dominatee == Dominator)
2052     return true;
2053 
2054   // When Dominatee is defined on function entry, it is not dominated by another
2055   // memory access.
2056   if (isLiveOnEntryDef(Dominatee))
2057     return false;
2058 
2059   // When Dominator is defined on function entry, it dominates the other memory
2060   // access.
2061   if (isLiveOnEntryDef(Dominator))
2062     return true;
2063 
2064   if (!BlockNumberingValid.count(DominatorBlock))
2065     renumberBlock(DominatorBlock);
2066 
2067   unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
2068   // All numbers start with 1
2069   assert(DominatorNum != 0 && "Block was not numbered properly");
2070   unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
2071   assert(DominateeNum != 0 && "Block was not numbered properly");
2072   return DominatorNum < DominateeNum;
2073 }
2074 
2075 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2076                           const MemoryAccess *Dominatee) const {
2077   if (Dominator == Dominatee)
2078     return true;
2079 
2080   if (isLiveOnEntryDef(Dominatee))
2081     return false;
2082 
2083   if (Dominator->getBlock() != Dominatee->getBlock())
2084     return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
2085   return locallyDominates(Dominator, Dominatee);
2086 }
2087 
2088 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2089                           const Use &Dominatee) const {
2090   if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
2091     BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
2092     // The def must dominate the incoming block of the phi.
2093     if (UseBB != Dominator->getBlock())
2094       return DT->dominates(Dominator->getBlock(), UseBB);
2095     // If the UseBB and the DefBB are the same, compare locally.
2096     return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
2097   }
2098   // If it's not a PHI node use, the normal dominates can already handle it.
2099   return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
2100 }
2101 
2102 const static char LiveOnEntryStr[] = "liveOnEntry";
2103 
2104 void MemoryAccess::print(raw_ostream &OS) const {
2105   switch (getValueID()) {
2106   case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
2107   case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
2108   case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
2109   }
2110   llvm_unreachable("invalid value id");
2111 }
2112 
2113 void MemoryDef::print(raw_ostream &OS) const {
2114   MemoryAccess *UO = getDefiningAccess();
2115 
2116   auto printID = [&OS](MemoryAccess *A) {
2117     if (A && A->getID())
2118       OS << A->getID();
2119     else
2120       OS << LiveOnEntryStr;
2121   };
2122 
2123   OS << getID() << " = MemoryDef(";
2124   printID(UO);
2125   OS << ")";
2126 
2127   if (isOptimized()) {
2128     OS << "->";
2129     printID(getOptimized());
2130 
2131     if (Optional<AliasResult> AR = getOptimizedAccessType())
2132       OS << " " << *AR;
2133   }
2134 }
2135 
2136 void MemoryPhi::print(raw_ostream &OS) const {
2137   bool First = true;
2138   OS << getID() << " = MemoryPhi(";
2139   for (const auto &Op : operands()) {
2140     BasicBlock *BB = getIncomingBlock(Op);
2141     MemoryAccess *MA = cast<MemoryAccess>(Op);
2142     if (!First)
2143       OS << ',';
2144     else
2145       First = false;
2146 
2147     OS << '{';
2148     if (BB->hasName())
2149       OS << BB->getName();
2150     else
2151       BB->printAsOperand(OS, false);
2152     OS << ',';
2153     if (unsigned ID = MA->getID())
2154       OS << ID;
2155     else
2156       OS << LiveOnEntryStr;
2157     OS << '}';
2158   }
2159   OS << ')';
2160 }
2161 
2162 void MemoryUse::print(raw_ostream &OS) const {
2163   MemoryAccess *UO = getDefiningAccess();
2164   OS << "MemoryUse(";
2165   if (UO && UO->getID())
2166     OS << UO->getID();
2167   else
2168     OS << LiveOnEntryStr;
2169   OS << ')';
2170 
2171   if (Optional<AliasResult> AR = getOptimizedAccessType())
2172     OS << " " << *AR;
2173 }
2174 
2175 void MemoryAccess::dump() const {
2176 // Cannot completely remove virtual function even in release mode.
2177 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2178   print(dbgs());
2179   dbgs() << "\n";
2180 #endif
2181 }
2182 
2183 char MemorySSAPrinterLegacyPass::ID = 0;
2184 
2185 MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) {
2186   initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry());
2187 }
2188 
2189 void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
2190   AU.setPreservesAll();
2191   AU.addRequired<MemorySSAWrapperPass>();
2192 }
2193 
2194 bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
2195   auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2196   MSSA.print(dbgs());
2197   if (VerifyMemorySSA)
2198     MSSA.verifyMemorySSA();
2199   return false;
2200 }
2201 
2202 AnalysisKey MemorySSAAnalysis::Key;
2203 
2204 MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F,
2205                                                  FunctionAnalysisManager &AM) {
2206   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
2207   auto &AA = AM.getResult<AAManager>(F);
2208   return MemorySSAAnalysis::Result(llvm::make_unique<MemorySSA>(F, &AA, &DT));
2209 }
2210 
2211 PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
2212                                             FunctionAnalysisManager &AM) {
2213   OS << "MemorySSA for function: " << F.getName() << "\n";
2214   AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS);
2215 
2216   return PreservedAnalyses::all();
2217 }
2218 
2219 PreservedAnalyses MemorySSAVerifierPass::run(Function &F,
2220                                              FunctionAnalysisManager &AM) {
2221   AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
2222 
2223   return PreservedAnalyses::all();
2224 }
2225 
2226 char MemorySSAWrapperPass::ID = 0;
2227 
2228 MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) {
2229   initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
2230 }
2231 
2232 void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
2233 
2234 void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2235   AU.setPreservesAll();
2236   AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2237   AU.addRequiredTransitive<AAResultsWrapperPass>();
2238 }
2239 
2240 bool MemorySSAWrapperPass::runOnFunction(Function &F) {
2241   auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2242   auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2243   MSSA.reset(new MemorySSA(F, &AA, &DT));
2244   return false;
2245 }
2246 
2247 void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); }
2248 
2249 void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
2250   MSSA->print(OS);
2251 }
2252 
2253 MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
2254 
2255 /// Walk the use-def chains starting at \p StartingAccess and find
2256 /// the MemoryAccess that actually clobbers Loc.
2257 ///
2258 /// \returns our clobbering memory access
2259 template <typename AliasAnalysisType>
2260 MemoryAccess *
2261 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
2262     MemoryAccess *StartingAccess, const MemoryLocation &Loc,
2263     unsigned &UpwardWalkLimit) {
2264   if (isa<MemoryPhi>(StartingAccess))
2265     return StartingAccess;
2266 
2267   auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess);
2268   if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
2269     return StartingUseOrDef;
2270 
2271   Instruction *I = StartingUseOrDef->getMemoryInst();
2272 
2273   // Conservatively, fences are always clobbers, so don't perform the walk if we
2274   // hit a fence.
2275   if (!isa<CallBase>(I) && I->isFenceLike())
2276     return StartingUseOrDef;
2277 
2278   UpwardsMemoryQuery Q;
2279   Q.OriginalAccess = StartingUseOrDef;
2280   Q.StartingLoc = Loc;
2281   Q.Inst = I;
2282   Q.IsCall = false;
2283 
2284   // Unlike the other function, do not walk to the def of a def, because we are
2285   // handed something we already believe is the clobbering access.
2286   // We never set SkipSelf to true in Q in this method.
2287   MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
2288                                      ? StartingUseOrDef->getDefiningAccess()
2289                                      : StartingUseOrDef;
2290 
2291   MemoryAccess *Clobber =
2292       Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit);
2293   LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2294   LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n");
2295   LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
2296   LLVM_DEBUG(dbgs() << *Clobber << "\n");
2297   return Clobber;
2298 }
2299 
2300 template <typename AliasAnalysisType>
2301 MemoryAccess *
2302 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
2303     MemoryAccess *MA, unsigned &UpwardWalkLimit, bool SkipSelf) {
2304   auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2305   // If this is a MemoryPhi, we can't do anything.
2306   if (!StartingAccess)
2307     return MA;
2308 
2309   bool IsOptimized = false;
2310 
2311   // If this is an already optimized use or def, return the optimized result.
2312   // Note: Currently, we store the optimized def result in a separate field,
2313   // since we can't use the defining access.
2314   if (StartingAccess->isOptimized()) {
2315     if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
2316       return StartingAccess->getOptimized();
2317     IsOptimized = true;
2318   }
2319 
2320   const Instruction *I = StartingAccess->getMemoryInst();
2321   // We can't sanely do anything with a fence, since they conservatively clobber
2322   // all memory, and have no locations to get pointers from to try to
2323   // disambiguate.
2324   if (!isa<CallBase>(I) && I->isFenceLike())
2325     return StartingAccess;
2326 
2327   UpwardsMemoryQuery Q(I, StartingAccess);
2328 
2329   if (isUseTriviallyOptimizableToLiveOnEntry(*Walker.getAA(), I)) {
2330     MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2331     StartingAccess->setOptimized(LiveOnEntry);
2332     StartingAccess->setOptimizedAccessType(None);
2333     return LiveOnEntry;
2334   }
2335 
2336   MemoryAccess *OptimizedAccess;
2337   if (!IsOptimized) {
2338     // Start with the thing we already think clobbers this location
2339     MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2340 
2341     // At this point, DefiningAccess may be the live on entry def.
2342     // If it is, we will not get a better result.
2343     if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
2344       StartingAccess->setOptimized(DefiningAccess);
2345       StartingAccess->setOptimizedAccessType(None);
2346       return DefiningAccess;
2347     }
2348 
2349     OptimizedAccess = Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit);
2350     StartingAccess->setOptimized(OptimizedAccess);
2351     if (MSSA->isLiveOnEntryDef(OptimizedAccess))
2352       StartingAccess->setOptimizedAccessType(None);
2353     else if (Q.AR == MustAlias)
2354       StartingAccess->setOptimizedAccessType(MustAlias);
2355   } else
2356     OptimizedAccess = StartingAccess->getOptimized();
2357 
2358   LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2359   LLVM_DEBUG(dbgs() << *StartingAccess << "\n");
2360   LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ");
2361   LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n");
2362 
2363   MemoryAccess *Result;
2364   if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
2365       isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) {
2366     assert(isa<MemoryDef>(Q.OriginalAccess));
2367     Q.SkipSelfAccess = true;
2368     Result = Walker.findClobber(OptimizedAccess, Q, UpwardWalkLimit);
2369   } else
2370     Result = OptimizedAccess;
2371 
2372   LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf);
2373   LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n");
2374 
2375   return Result;
2376 }
2377 
2378 MemoryAccess *
2379 DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2380   if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2381     return Use->getDefiningAccess();
2382   return MA;
2383 }
2384 
2385 MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
2386     MemoryAccess *StartingAccess, const MemoryLocation &) {
2387   if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2388     return Use->getDefiningAccess();
2389   return StartingAccess;
2390 }
2391 
2392 void MemoryPhi::deleteMe(DerivedUser *Self) {
2393   delete static_cast<MemoryPhi *>(Self);
2394 }
2395 
2396 void MemoryDef::deleteMe(DerivedUser *Self) {
2397   delete static_cast<MemoryDef *>(Self);
2398 }
2399 
2400 void MemoryUse::deleteMe(DerivedUser *Self) {
2401   delete static_cast<MemoryUse *>(Self);
2402 }
2403