1 //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the MemorySSA class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Analysis/MemorySSA.h"
15 #include "llvm/ADT/DenseMap.h"
16 #include "llvm/ADT/DenseMapInfo.h"
17 #include "llvm/ADT/DenseSet.h"
18 #include "llvm/ADT/DepthFirstIterator.h"
19 #include "llvm/ADT/Hashing.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/Optional.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/iterator.h"
26 #include "llvm/ADT/iterator_range.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/IteratedDominanceFrontier.h"
29 #include "llvm/Analysis/MemoryLocation.h"
30 #include "llvm/Config/llvm-config.h"
31 #include "llvm/IR/AssemblyAnnotationWriter.h"
32 #include "llvm/IR/BasicBlock.h"
33 #include "llvm/IR/CallSite.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/Function.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/IR/LLVMContext.h"
41 #include "llvm/IR/PassManager.h"
42 #include "llvm/IR/Use.h"
43 #include "llvm/Pass.h"
44 #include "llvm/Support/AtomicOrdering.h"
45 #include "llvm/Support/Casting.h"
46 #include "llvm/Support/CommandLine.h"
47 #include "llvm/Support/Compiler.h"
48 #include "llvm/Support/Debug.h"
49 #include "llvm/Support/ErrorHandling.h"
50 #include "llvm/Support/FormattedStream.h"
51 #include "llvm/Support/raw_ostream.h"
52 #include <algorithm>
53 #include <cassert>
54 #include <iterator>
55 #include <memory>
56 #include <utility>
57 
58 using namespace llvm;
59 
60 #define DEBUG_TYPE "memoryssa"
61 
62 INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
63                       true)
64 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
65 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
66 INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
67                     true)
68 
69 INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",
70                       "Memory SSA Printer", false, false)
71 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
72 INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",
73                     "Memory SSA Printer", false, false)
74 
75 static cl::opt<unsigned> MaxCheckLimit(
76     "memssa-check-limit", cl::Hidden, cl::init(100),
77     cl::desc("The maximum number of stores/phis MemorySSA"
78              "will consider trying to walk past (default = 100)"));
79 
80 // Always verify MemorySSA if expensive checking is enabled.
81 #ifdef EXPENSIVE_CHECKS
82 bool llvm::VerifyMemorySSA = true;
83 #else
84 bool llvm::VerifyMemorySSA = false;
85 #endif
86 static cl::opt<bool, true>
87     VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA),
88                      cl::Hidden, cl::desc("Enable verification of MemorySSA."));
89 
90 namespace llvm {
91 
92 /// An assembly annotator class to print Memory SSA information in
93 /// comments.
94 class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
95   friend class MemorySSA;
96 
97   const MemorySSA *MSSA;
98 
99 public:
100   MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
101 
102   void emitBasicBlockStartAnnot(const BasicBlock *BB,
103                                 formatted_raw_ostream &OS) override {
104     if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
105       OS << "; " << *MA << "\n";
106   }
107 
108   void emitInstructionAnnot(const Instruction *I,
109                             formatted_raw_ostream &OS) override {
110     if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
111       OS << "; " << *MA << "\n";
112   }
113 };
114 
115 } // end namespace llvm
116 
117 namespace {
118 
119 /// Our current alias analysis API differentiates heavily between calls and
120 /// non-calls, and functions called on one usually assert on the other.
121 /// This class encapsulates the distinction to simplify other code that wants
122 /// "Memory affecting instructions and related data" to use as a key.
123 /// For example, this class is used as a densemap key in the use optimizer.
124 class MemoryLocOrCall {
125 public:
126   bool IsCall = false;
127 
128   MemoryLocOrCall() = default;
129   MemoryLocOrCall(MemoryUseOrDef *MUD)
130       : MemoryLocOrCall(MUD->getMemoryInst()) {}
131   MemoryLocOrCall(const MemoryUseOrDef *MUD)
132       : MemoryLocOrCall(MUD->getMemoryInst()) {}
133 
134   MemoryLocOrCall(Instruction *Inst) {
135     if (ImmutableCallSite(Inst)) {
136       IsCall = true;
137       CS = ImmutableCallSite(Inst);
138     } else {
139       IsCall = false;
140       // There is no such thing as a memorylocation for a fence inst, and it is
141       // unique in that regard.
142       if (!isa<FenceInst>(Inst))
143         Loc = MemoryLocation::get(Inst);
144     }
145   }
146 
147   explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
148 
149   ImmutableCallSite getCS() const {
150     assert(IsCall);
151     return CS;
152   }
153 
154   MemoryLocation getLoc() const {
155     assert(!IsCall);
156     return Loc;
157   }
158 
159   bool operator==(const MemoryLocOrCall &Other) const {
160     if (IsCall != Other.IsCall)
161       return false;
162 
163     if (!IsCall)
164       return Loc == Other.Loc;
165 
166     if (CS.getCalledValue() != Other.CS.getCalledValue())
167       return false;
168 
169     return CS.arg_size() == Other.CS.arg_size() &&
170            std::equal(CS.arg_begin(), CS.arg_end(), Other.CS.arg_begin());
171   }
172 
173 private:
174   union {
175     ImmutableCallSite CS;
176     MemoryLocation Loc;
177   };
178 };
179 
180 } // end anonymous namespace
181 
182 namespace llvm {
183 
184 template <> struct DenseMapInfo<MemoryLocOrCall> {
185   static inline MemoryLocOrCall getEmptyKey() {
186     return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
187   }
188 
189   static inline MemoryLocOrCall getTombstoneKey() {
190     return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
191   }
192 
193   static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
194     if (!MLOC.IsCall)
195       return hash_combine(
196           MLOC.IsCall,
197           DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
198 
199     hash_code hash =
200         hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue(
201                                       MLOC.getCS().getCalledValue()));
202 
203     for (const Value *Arg : MLOC.getCS().args())
204       hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg));
205     return hash;
206   }
207 
208   static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
209     return LHS == RHS;
210   }
211 };
212 
213 } // end namespace llvm
214 
215 /// This does one-way checks to see if Use could theoretically be hoisted above
216 /// MayClobber. This will not check the other way around.
217 ///
218 /// This assumes that, for the purposes of MemorySSA, Use comes directly after
219 /// MayClobber, with no potentially clobbering operations in between them.
220 /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
221 static bool areLoadsReorderable(const LoadInst *Use,
222                                 const LoadInst *MayClobber) {
223   bool VolatileUse = Use->isVolatile();
224   bool VolatileClobber = MayClobber->isVolatile();
225   // Volatile operations may never be reordered with other volatile operations.
226   if (VolatileUse && VolatileClobber)
227     return false;
228   // Otherwise, volatile doesn't matter here. From the language reference:
229   // 'optimizers may change the order of volatile operations relative to
230   // non-volatile operations.'"
231 
232   // If a load is seq_cst, it cannot be moved above other loads. If its ordering
233   // is weaker, it can be moved above other loads. We just need to be sure that
234   // MayClobber isn't an acquire load, because loads can't be moved above
235   // acquire loads.
236   //
237   // Note that this explicitly *does* allow the free reordering of monotonic (or
238   // weaker) loads of the same address.
239   bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
240   bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
241                                                      AtomicOrdering::Acquire);
242   return !(SeqCstUse || MayClobberIsAcquire);
243 }
244 
245 namespace {
246 
247 struct ClobberAlias {
248   bool IsClobber;
249   Optional<AliasResult> AR;
250 };
251 
252 } // end anonymous namespace
253 
254 // Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being
255 // ignored if IsClobber = false.
256 static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
257                                              const MemoryLocation &UseLoc,
258                                              const Instruction *UseInst,
259                                              AliasAnalysis &AA) {
260   Instruction *DefInst = MD->getMemoryInst();
261   assert(DefInst && "Defining instruction not actually an instruction");
262   ImmutableCallSite UseCS(UseInst);
263   Optional<AliasResult> AR;
264 
265   if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
266     // These intrinsics will show up as affecting memory, but they are just
267     // markers, mostly.
268     //
269     // FIXME: We probably don't actually want MemorySSA to model these at all
270     // (including creating MemoryAccesses for them): we just end up inventing
271     // clobbers where they don't really exist at all. Please see D43269 for
272     // context.
273     switch (II->getIntrinsicID()) {
274     case Intrinsic::lifetime_start:
275       if (UseCS)
276         return {false, NoAlias};
277       AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc);
278       return {AR != NoAlias, AR};
279     case Intrinsic::lifetime_end:
280     case Intrinsic::invariant_start:
281     case Intrinsic::invariant_end:
282     case Intrinsic::assume:
283       return {false, NoAlias};
284     default:
285       break;
286     }
287   }
288 
289   if (UseCS) {
290     ModRefInfo I = AA.getModRefInfo(DefInst, UseCS);
291     AR = isMustSet(I) ? MustAlias : MayAlias;
292     return {isModOrRefSet(I), AR};
293   }
294 
295   if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
296     if (auto *UseLoad = dyn_cast<LoadInst>(UseInst))
297       return {!areLoadsReorderable(UseLoad, DefLoad), MayAlias};
298 
299   ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
300   AR = isMustSet(I) ? MustAlias : MayAlias;
301   return {isModSet(I), AR};
302 }
303 
304 static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
305                                              const MemoryUseOrDef *MU,
306                                              const MemoryLocOrCall &UseMLOC,
307                                              AliasAnalysis &AA) {
308   // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
309   // to exist while MemoryLocOrCall is pushed through places.
310   if (UseMLOC.IsCall)
311     return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(),
312                                     AA);
313   return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
314                                   AA);
315 }
316 
317 // Return true when MD may alias MU, return false otherwise.
318 bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
319                                         AliasAnalysis &AA) {
320   return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber;
321 }
322 
323 namespace {
324 
325 struct UpwardsMemoryQuery {
326   // True if our original query started off as a call
327   bool IsCall = false;
328   // The pointer location we started the query with. This will be empty if
329   // IsCall is true.
330   MemoryLocation StartingLoc;
331   // This is the instruction we were querying about.
332   const Instruction *Inst = nullptr;
333   // The MemoryAccess we actually got called with, used to test local domination
334   const MemoryAccess *OriginalAccess = nullptr;
335   Optional<AliasResult> AR = MayAlias;
336 
337   UpwardsMemoryQuery() = default;
338 
339   UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
340       : IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) {
341     if (!IsCall)
342       StartingLoc = MemoryLocation::get(Inst);
343   }
344 };
345 
346 } // end anonymous namespace
347 
348 static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
349                            AliasAnalysis &AA) {
350   Instruction *Inst = MD->getMemoryInst();
351   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
352     switch (II->getIntrinsicID()) {
353     case Intrinsic::lifetime_end:
354       return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc);
355     default:
356       return false;
357     }
358   }
359   return false;
360 }
361 
362 static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA,
363                                                    const Instruction *I) {
364   // If the memory can't be changed, then loads of the memory can't be
365   // clobbered.
366   return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
367                               AA.pointsToConstantMemory(cast<LoadInst>(I)->
368                                                           getPointerOperand()));
369 }
370 
371 /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
372 /// inbetween `Start` and `ClobberAt` can clobbers `Start`.
373 ///
374 /// This is meant to be as simple and self-contained as possible. Because it
375 /// uses no cache, etc., it can be relatively expensive.
376 ///
377 /// \param Start     The MemoryAccess that we want to walk from.
378 /// \param ClobberAt A clobber for Start.
379 /// \param StartLoc  The MemoryLocation for Start.
380 /// \param MSSA      The MemorySSA isntance that Start and ClobberAt belong to.
381 /// \param Query     The UpwardsMemoryQuery we used for our search.
382 /// \param AA        The AliasAnalysis we used for our search.
383 static void LLVM_ATTRIBUTE_UNUSED
384 checkClobberSanity(MemoryAccess *Start, MemoryAccess *ClobberAt,
385                    const MemoryLocation &StartLoc, const MemorySSA &MSSA,
386                    const UpwardsMemoryQuery &Query, AliasAnalysis &AA) {
387   assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
388 
389   if (MSSA.isLiveOnEntryDef(Start)) {
390     assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
391            "liveOnEntry must clobber itself");
392     return;
393   }
394 
395   bool FoundClobber = false;
396   DenseSet<MemoryAccessPair> VisitedPhis;
397   SmallVector<MemoryAccessPair, 8> Worklist;
398   Worklist.emplace_back(Start, StartLoc);
399   // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
400   // is found, complain.
401   while (!Worklist.empty()) {
402     MemoryAccessPair MAP = Worklist.pop_back_val();
403     // All we care about is that nothing from Start to ClobberAt clobbers Start.
404     // We learn nothing from revisiting nodes.
405     if (!VisitedPhis.insert(MAP).second)
406       continue;
407 
408     for (MemoryAccess *MA : def_chain(MAP.first)) {
409       if (MA == ClobberAt) {
410         if (auto *MD = dyn_cast<MemoryDef>(MA)) {
411           // instructionClobbersQuery isn't essentially free, so don't use `|=`,
412           // since it won't let us short-circuit.
413           //
414           // Also, note that this can't be hoisted out of the `Worklist` loop,
415           // since MD may only act as a clobber for 1 of N MemoryLocations.
416           FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD);
417           if (!FoundClobber) {
418             ClobberAlias CA =
419                 instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
420             if (CA.IsClobber) {
421               FoundClobber = true;
422               // Not used: CA.AR;
423             }
424           }
425         }
426         break;
427       }
428 
429       // We should never hit liveOnEntry, unless it's the clobber.
430       assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
431 
432       if (auto *MD = dyn_cast<MemoryDef>(MA)) {
433         (void)MD;
434         assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)
435                     .IsClobber &&
436                "Found clobber before reaching ClobberAt!");
437         continue;
438       }
439 
440       assert(isa<MemoryPhi>(MA));
441       Worklist.append(upward_defs_begin({MA, MAP.second}), upward_defs_end());
442     }
443   }
444 
445   // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
446   // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
447   assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
448          "ClobberAt never acted as a clobber");
449 }
450 
451 namespace {
452 
453 /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
454 /// in one class.
455 class ClobberWalker {
456   /// Save a few bytes by using unsigned instead of size_t.
457   using ListIndex = unsigned;
458 
459   /// Represents a span of contiguous MemoryDefs, potentially ending in a
460   /// MemoryPhi.
461   struct DefPath {
462     MemoryLocation Loc;
463     // Note that, because we always walk in reverse, Last will always dominate
464     // First. Also note that First and Last are inclusive.
465     MemoryAccess *First;
466     MemoryAccess *Last;
467     Optional<ListIndex> Previous;
468 
469     DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
470             Optional<ListIndex> Previous)
471         : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
472 
473     DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
474             Optional<ListIndex> Previous)
475         : DefPath(Loc, Init, Init, Previous) {}
476   };
477 
478   const MemorySSA &MSSA;
479   AliasAnalysis &AA;
480   DominatorTree &DT;
481   UpwardsMemoryQuery *Query;
482 
483   // Phi optimization bookkeeping
484   SmallVector<DefPath, 32> Paths;
485   DenseSet<ConstMemoryAccessPair> VisitedPhis;
486 
487   /// Find the nearest def or phi that `From` can legally be optimized to.
488   const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
489     assert(From->getNumOperands() && "Phi with no operands?");
490 
491     BasicBlock *BB = From->getBlock();
492     MemoryAccess *Result = MSSA.getLiveOnEntryDef();
493     DomTreeNode *Node = DT.getNode(BB);
494     while ((Node = Node->getIDom())) {
495       auto *Defs = MSSA.getBlockDefs(Node->getBlock());
496       if (Defs)
497         return &*Defs->rbegin();
498     }
499     return Result;
500   }
501 
502   /// Result of calling walkToPhiOrClobber.
503   struct UpwardsWalkResult {
504     /// The "Result" of the walk. Either a clobber, the last thing we walked, or
505     /// both. Include alias info when clobber found.
506     MemoryAccess *Result;
507     bool IsKnownClobber;
508     Optional<AliasResult> AR;
509   };
510 
511   /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
512   /// This will update Desc.Last as it walks. It will (optionally) also stop at
513   /// StopAt.
514   ///
515   /// This does not test for whether StopAt is a clobber
516   UpwardsWalkResult
517   walkToPhiOrClobber(DefPath &Desc,
518                      const MemoryAccess *StopAt = nullptr) const {
519     assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
520 
521     for (MemoryAccess *Current : def_chain(Desc.Last)) {
522       Desc.Last = Current;
523       if (Current == StopAt)
524         return {Current, false, MayAlias};
525 
526       if (auto *MD = dyn_cast<MemoryDef>(Current)) {
527         if (MSSA.isLiveOnEntryDef(MD))
528           return {MD, true, MustAlias};
529         ClobberAlias CA =
530             instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA);
531         if (CA.IsClobber)
532           return {MD, true, CA.AR};
533       }
534     }
535 
536     assert(isa<MemoryPhi>(Desc.Last) &&
537            "Ended at a non-clobber that's not a phi?");
538     return {Desc.Last, false, MayAlias};
539   }
540 
541   void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
542                    ListIndex PriorNode) {
543     auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}),
544                                  upward_defs_end());
545     for (const MemoryAccessPair &P : UpwardDefs) {
546       PausedSearches.push_back(Paths.size());
547       Paths.emplace_back(P.second, P.first, PriorNode);
548     }
549   }
550 
551   /// Represents a search that terminated after finding a clobber. This clobber
552   /// may or may not be present in the path of defs from LastNode..SearchStart,
553   /// since it may have been retrieved from cache.
554   struct TerminatedPath {
555     MemoryAccess *Clobber;
556     ListIndex LastNode;
557   };
558 
559   /// Get an access that keeps us from optimizing to the given phi.
560   ///
561   /// PausedSearches is an array of indices into the Paths array. Its incoming
562   /// value is the indices of searches that stopped at the last phi optimization
563   /// target. It's left in an unspecified state.
564   ///
565   /// If this returns None, NewPaused is a vector of searches that terminated
566   /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
567   Optional<TerminatedPath>
568   getBlockingAccess(const MemoryAccess *StopWhere,
569                     SmallVectorImpl<ListIndex> &PausedSearches,
570                     SmallVectorImpl<ListIndex> &NewPaused,
571                     SmallVectorImpl<TerminatedPath> &Terminated) {
572     assert(!PausedSearches.empty() && "No searches to continue?");
573 
574     // BFS vs DFS really doesn't make a difference here, so just do a DFS with
575     // PausedSearches as our stack.
576     while (!PausedSearches.empty()) {
577       ListIndex PathIndex = PausedSearches.pop_back_val();
578       DefPath &Node = Paths[PathIndex];
579 
580       // If we've already visited this path with this MemoryLocation, we don't
581       // need to do so again.
582       //
583       // NOTE: That we just drop these paths on the ground makes caching
584       // behavior sporadic. e.g. given a diamond:
585       //  A
586       // B C
587       //  D
588       //
589       // ...If we walk D, B, A, C, we'll only cache the result of phi
590       // optimization for A, B, and D; C will be skipped because it dies here.
591       // This arguably isn't the worst thing ever, since:
592       //   - We generally query things in a top-down order, so if we got below D
593       //     without needing cache entries for {C, MemLoc}, then chances are
594       //     that those cache entries would end up ultimately unused.
595       //   - We still cache things for A, so C only needs to walk up a bit.
596       // If this behavior becomes problematic, we can fix without a ton of extra
597       // work.
598       if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)
599         continue;
600 
601       UpwardsWalkResult Res = walkToPhiOrClobber(Node, /*StopAt=*/StopWhere);
602       if (Res.IsKnownClobber) {
603         assert(Res.Result != StopWhere);
604         // If this wasn't a cache hit, we hit a clobber when walking. That's a
605         // failure.
606         TerminatedPath Term{Res.Result, PathIndex};
607         if (!MSSA.dominates(Res.Result, StopWhere))
608           return Term;
609 
610         // Otherwise, it's a valid thing to potentially optimize to.
611         Terminated.push_back(Term);
612         continue;
613       }
614 
615       if (Res.Result == StopWhere) {
616         // We've hit our target. Save this path off for if we want to continue
617         // walking.
618         NewPaused.push_back(PathIndex);
619         continue;
620       }
621 
622       assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
623       addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
624     }
625 
626     return None;
627   }
628 
629   template <typename T, typename Walker>
630   struct generic_def_path_iterator
631       : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
632                                     std::forward_iterator_tag, T *> {
633     generic_def_path_iterator() = default;
634     generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
635 
636     T &operator*() const { return curNode(); }
637 
638     generic_def_path_iterator &operator++() {
639       N = curNode().Previous;
640       return *this;
641     }
642 
643     bool operator==(const generic_def_path_iterator &O) const {
644       if (N.hasValue() != O.N.hasValue())
645         return false;
646       return !N.hasValue() || *N == *O.N;
647     }
648 
649   private:
650     T &curNode() const { return W->Paths[*N]; }
651 
652     Walker *W = nullptr;
653     Optional<ListIndex> N = None;
654   };
655 
656   using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
657   using const_def_path_iterator =
658       generic_def_path_iterator<const DefPath, const ClobberWalker>;
659 
660   iterator_range<def_path_iterator> def_path(ListIndex From) {
661     return make_range(def_path_iterator(this, From), def_path_iterator());
662   }
663 
664   iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
665     return make_range(const_def_path_iterator(this, From),
666                       const_def_path_iterator());
667   }
668 
669   struct OptznResult {
670     /// The path that contains our result.
671     TerminatedPath PrimaryClobber;
672     /// The paths that we can legally cache back from, but that aren't
673     /// necessarily the result of the Phi optimization.
674     SmallVector<TerminatedPath, 4> OtherClobbers;
675   };
676 
677   ListIndex defPathIndex(const DefPath &N) const {
678     // The assert looks nicer if we don't need to do &N
679     const DefPath *NP = &N;
680     assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
681            "Out of bounds DefPath!");
682     return NP - &Paths.front();
683   }
684 
685   /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
686   /// that act as legal clobbers. Note that this won't return *all* clobbers.
687   ///
688   /// Phi optimization algorithm tl;dr:
689   ///   - Find the earliest def/phi, A, we can optimize to
690   ///   - Find if all paths from the starting memory access ultimately reach A
691   ///     - If not, optimization isn't possible.
692   ///     - Otherwise, walk from A to another clobber or phi, A'.
693   ///       - If A' is a def, we're done.
694   ///       - If A' is a phi, try to optimize it.
695   ///
696   /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
697   /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
698   OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
699                              const MemoryLocation &Loc) {
700     assert(Paths.empty() && VisitedPhis.empty() &&
701            "Reset the optimization state.");
702 
703     Paths.emplace_back(Loc, Start, Phi, None);
704     // Stores how many "valid" optimization nodes we had prior to calling
705     // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
706     auto PriorPathsSize = Paths.size();
707 
708     SmallVector<ListIndex, 16> PausedSearches;
709     SmallVector<ListIndex, 8> NewPaused;
710     SmallVector<TerminatedPath, 4> TerminatedPaths;
711 
712     addSearches(Phi, PausedSearches, 0);
713 
714     // Moves the TerminatedPath with the "most dominated" Clobber to the end of
715     // Paths.
716     auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
717       assert(!Paths.empty() && "Need a path to move");
718       auto Dom = Paths.begin();
719       for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
720         if (!MSSA.dominates(I->Clobber, Dom->Clobber))
721           Dom = I;
722       auto Last = Paths.end() - 1;
723       if (Last != Dom)
724         std::iter_swap(Last, Dom);
725     };
726 
727     MemoryPhi *Current = Phi;
728     while (true) {
729       assert(!MSSA.isLiveOnEntryDef(Current) &&
730              "liveOnEntry wasn't treated as a clobber?");
731 
732       const auto *Target = getWalkTarget(Current);
733       // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
734       // optimization for the prior phi.
735       assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
736         return MSSA.dominates(P.Clobber, Target);
737       }));
738 
739       // FIXME: This is broken, because the Blocker may be reported to be
740       // liveOnEntry, and we'll happily wait for that to disappear (read: never)
741       // For the moment, this is fine, since we do nothing with blocker info.
742       if (Optional<TerminatedPath> Blocker = getBlockingAccess(
743               Target, PausedSearches, NewPaused, TerminatedPaths)) {
744 
745         // Find the node we started at. We can't search based on N->Last, since
746         // we may have gone around a loop with a different MemoryLocation.
747         auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
748           return defPathIndex(N) < PriorPathsSize;
749         });
750         assert(Iter != def_path_iterator());
751 
752         DefPath &CurNode = *Iter;
753         assert(CurNode.Last == Current);
754 
755         // Two things:
756         // A. We can't reliably cache all of NewPaused back. Consider a case
757         //    where we have two paths in NewPaused; one of which can't optimize
758         //    above this phi, whereas the other can. If we cache the second path
759         //    back, we'll end up with suboptimal cache entries. We can handle
760         //    cases like this a bit better when we either try to find all
761         //    clobbers that block phi optimization, or when our cache starts
762         //    supporting unfinished searches.
763         // B. We can't reliably cache TerminatedPaths back here without doing
764         //    extra checks; consider a case like:
765         //       T
766         //      / \
767         //     D   C
768         //      \ /
769         //       S
770         //    Where T is our target, C is a node with a clobber on it, D is a
771         //    diamond (with a clobber *only* on the left or right node, N), and
772         //    S is our start. Say we walk to D, through the node opposite N
773         //    (read: ignoring the clobber), and see a cache entry in the top
774         //    node of D. That cache entry gets put into TerminatedPaths. We then
775         //    walk up to C (N is later in our worklist), find the clobber, and
776         //    quit. If we append TerminatedPaths to OtherClobbers, we'll cache
777         //    the bottom part of D to the cached clobber, ignoring the clobber
778         //    in N. Again, this problem goes away if we start tracking all
779         //    blockers for a given phi optimization.
780         TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
781         return {Result, {}};
782       }
783 
784       // If there's nothing left to search, then all paths led to valid clobbers
785       // that we got from our cache; pick the nearest to the start, and allow
786       // the rest to be cached back.
787       if (NewPaused.empty()) {
788         MoveDominatedPathToEnd(TerminatedPaths);
789         TerminatedPath Result = TerminatedPaths.pop_back_val();
790         return {Result, std::move(TerminatedPaths)};
791       }
792 
793       MemoryAccess *DefChainEnd = nullptr;
794       SmallVector<TerminatedPath, 4> Clobbers;
795       for (ListIndex Paused : NewPaused) {
796         UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
797         if (WR.IsKnownClobber)
798           Clobbers.push_back({WR.Result, Paused});
799         else
800           // Micro-opt: If we hit the end of the chain, save it.
801           DefChainEnd = WR.Result;
802       }
803 
804       if (!TerminatedPaths.empty()) {
805         // If we couldn't find the dominating phi/liveOnEntry in the above loop,
806         // do it now.
807         if (!DefChainEnd)
808           for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
809             DefChainEnd = MA;
810 
811         // If any of the terminated paths don't dominate the phi we'll try to
812         // optimize, we need to figure out what they are and quit.
813         const BasicBlock *ChainBB = DefChainEnd->getBlock();
814         for (const TerminatedPath &TP : TerminatedPaths) {
815           // Because we know that DefChainEnd is as "high" as we can go, we
816           // don't need local dominance checks; BB dominance is sufficient.
817           if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
818             Clobbers.push_back(TP);
819         }
820       }
821 
822       // If we have clobbers in the def chain, find the one closest to Current
823       // and quit.
824       if (!Clobbers.empty()) {
825         MoveDominatedPathToEnd(Clobbers);
826         TerminatedPath Result = Clobbers.pop_back_val();
827         return {Result, std::move(Clobbers)};
828       }
829 
830       assert(all_of(NewPaused,
831                     [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
832 
833       // Because liveOnEntry is a clobber, this must be a phi.
834       auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
835 
836       PriorPathsSize = Paths.size();
837       PausedSearches.clear();
838       for (ListIndex I : NewPaused)
839         addSearches(DefChainPhi, PausedSearches, I);
840       NewPaused.clear();
841 
842       Current = DefChainPhi;
843     }
844   }
845 
846   void verifyOptResult(const OptznResult &R) const {
847     assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
848       return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
849     }));
850   }
851 
852   void resetPhiOptznState() {
853     Paths.clear();
854     VisitedPhis.clear();
855   }
856 
857 public:
858   ClobberWalker(const MemorySSA &MSSA, AliasAnalysis &AA, DominatorTree &DT)
859       : MSSA(MSSA), AA(AA), DT(DT) {}
860 
861   /// Finds the nearest clobber for the given query, optimizing phis if
862   /// possible.
863   MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q) {
864     Query = &Q;
865 
866     MemoryAccess *Current = Start;
867     // This walker pretends uses don't exist. If we're handed one, silently grab
868     // its def. (This has the nice side-effect of ensuring we never cache uses)
869     if (auto *MU = dyn_cast<MemoryUse>(Start))
870       Current = MU->getDefiningAccess();
871 
872     DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
873     // Fast path for the overly-common case (no crazy phi optimization
874     // necessary)
875     UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
876     MemoryAccess *Result;
877     if (WalkResult.IsKnownClobber) {
878       Result = WalkResult.Result;
879       Q.AR = WalkResult.AR;
880     } else {
881       OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
882                                           Current, Q.StartingLoc);
883       verifyOptResult(OptRes);
884       resetPhiOptznState();
885       Result = OptRes.PrimaryClobber.Clobber;
886     }
887 
888 #ifdef EXPENSIVE_CHECKS
889     checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
890 #endif
891     return Result;
892   }
893 
894   void verify(const MemorySSA *MSSA) { assert(MSSA == &this->MSSA); }
895 };
896 
897 struct RenamePassData {
898   DomTreeNode *DTN;
899   DomTreeNode::const_iterator ChildIt;
900   MemoryAccess *IncomingVal;
901 
902   RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
903                  MemoryAccess *M)
904       : DTN(D), ChildIt(It), IncomingVal(M) {}
905 
906   void swap(RenamePassData &RHS) {
907     std::swap(DTN, RHS.DTN);
908     std::swap(ChildIt, RHS.ChildIt);
909     std::swap(IncomingVal, RHS.IncomingVal);
910   }
911 };
912 
913 } // end anonymous namespace
914 
915 namespace llvm {
916 
917 /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
918 /// longer does caching on its own, but the name has been retained for the
919 /// moment.
920 class MemorySSA::CachingWalker final : public MemorySSAWalker {
921   ClobberWalker Walker;
922 
923   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, UpwardsMemoryQuery &);
924 
925 public:
926   CachingWalker(MemorySSA *, AliasAnalysis *, DominatorTree *);
927   ~CachingWalker() override = default;
928 
929   using MemorySSAWalker::getClobberingMemoryAccess;
930 
931   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
932   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
933                                           const MemoryLocation &) override;
934   void invalidateInfo(MemoryAccess *) override;
935 
936   void verify(const MemorySSA *MSSA) override {
937     MemorySSAWalker::verify(MSSA);
938     Walker.verify(MSSA);
939   }
940 };
941 
942 } // end namespace llvm
943 
944 void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
945                                     bool RenameAllUses) {
946   // Pass through values to our successors
947   for (const BasicBlock *S : successors(BB)) {
948     auto It = PerBlockAccesses.find(S);
949     // Rename the phi nodes in our successor block
950     if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
951       continue;
952     AccessList *Accesses = It->second.get();
953     auto *Phi = cast<MemoryPhi>(&Accesses->front());
954     if (RenameAllUses) {
955       int PhiIndex = Phi->getBasicBlockIndex(BB);
956       assert(PhiIndex != -1 && "Incomplete phi during partial rename");
957       Phi->setIncomingValue(PhiIndex, IncomingVal);
958     } else
959       Phi->addIncoming(IncomingVal, BB);
960   }
961 }
962 
963 /// Rename a single basic block into MemorySSA form.
964 /// Uses the standard SSA renaming algorithm.
965 /// \returns The new incoming value.
966 MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
967                                      bool RenameAllUses) {
968   auto It = PerBlockAccesses.find(BB);
969   // Skip most processing if the list is empty.
970   if (It != PerBlockAccesses.end()) {
971     AccessList *Accesses = It->second.get();
972     for (MemoryAccess &L : *Accesses) {
973       if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
974         if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
975           MUD->setDefiningAccess(IncomingVal);
976         if (isa<MemoryDef>(&L))
977           IncomingVal = &L;
978       } else {
979         IncomingVal = &L;
980       }
981     }
982   }
983   return IncomingVal;
984 }
985 
986 /// This is the standard SSA renaming algorithm.
987 ///
988 /// We walk the dominator tree in preorder, renaming accesses, and then filling
989 /// in phi nodes in our successors.
990 void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
991                            SmallPtrSetImpl<BasicBlock *> &Visited,
992                            bool SkipVisited, bool RenameAllUses) {
993   SmallVector<RenamePassData, 32> WorkStack;
994   // Skip everything if we already renamed this block and we are skipping.
995   // Note: You can't sink this into the if, because we need it to occur
996   // regardless of whether we skip blocks or not.
997   bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
998   if (SkipVisited && AlreadyVisited)
999     return;
1000 
1001   IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
1002   renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
1003   WorkStack.push_back({Root, Root->begin(), IncomingVal});
1004 
1005   while (!WorkStack.empty()) {
1006     DomTreeNode *Node = WorkStack.back().DTN;
1007     DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
1008     IncomingVal = WorkStack.back().IncomingVal;
1009 
1010     if (ChildIt == Node->end()) {
1011       WorkStack.pop_back();
1012     } else {
1013       DomTreeNode *Child = *ChildIt;
1014       ++WorkStack.back().ChildIt;
1015       BasicBlock *BB = Child->getBlock();
1016       // Note: You can't sink this into the if, because we need it to occur
1017       // regardless of whether we skip blocks or not.
1018       AlreadyVisited = !Visited.insert(BB).second;
1019       if (SkipVisited && AlreadyVisited) {
1020         // We already visited this during our renaming, which can happen when
1021         // being asked to rename multiple blocks. Figure out the incoming val,
1022         // which is the last def.
1023         // Incoming value can only change if there is a block def, and in that
1024         // case, it's the last block def in the list.
1025         if (auto *BlockDefs = getWritableBlockDefs(BB))
1026           IncomingVal = &*BlockDefs->rbegin();
1027       } else
1028         IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1029       renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1030       WorkStack.push_back({Child, Child->begin(), IncomingVal});
1031     }
1032   }
1033 }
1034 
1035 /// This handles unreachable block accesses by deleting phi nodes in
1036 /// unreachable blocks, and marking all other unreachable MemoryAccess's as
1037 /// being uses of the live on entry definition.
1038 void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
1039   assert(!DT->isReachableFromEntry(BB) &&
1040          "Reachable block found while handling unreachable blocks");
1041 
1042   // Make sure phi nodes in our reachable successors end up with a
1043   // LiveOnEntryDef for our incoming edge, even though our block is forward
1044   // unreachable.  We could just disconnect these blocks from the CFG fully,
1045   // but we do not right now.
1046   for (const BasicBlock *S : successors(BB)) {
1047     if (!DT->isReachableFromEntry(S))
1048       continue;
1049     auto It = PerBlockAccesses.find(S);
1050     // Rename the phi nodes in our successor block
1051     if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1052       continue;
1053     AccessList *Accesses = It->second.get();
1054     auto *Phi = cast<MemoryPhi>(&Accesses->front());
1055     Phi->addIncoming(LiveOnEntryDef.get(), BB);
1056   }
1057 
1058   auto It = PerBlockAccesses.find(BB);
1059   if (It == PerBlockAccesses.end())
1060     return;
1061 
1062   auto &Accesses = It->second;
1063   for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1064     auto Next = std::next(AI);
1065     // If we have a phi, just remove it. We are going to replace all
1066     // users with live on entry.
1067     if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1068       UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1069     else
1070       Accesses->erase(AI);
1071     AI = Next;
1072   }
1073 }
1074 
1075 MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
1076     : AA(AA), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1077       NextID(0) {
1078   buildMemorySSA();
1079 }
1080 
1081 MemorySSA::~MemorySSA() {
1082   // Drop all our references
1083   for (const auto &Pair : PerBlockAccesses)
1084     for (MemoryAccess &MA : *Pair.second)
1085       MA.dropAllReferences();
1086 }
1087 
1088 MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
1089   auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
1090 
1091   if (Res.second)
1092     Res.first->second = llvm::make_unique<AccessList>();
1093   return Res.first->second.get();
1094 }
1095 
1096 MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1097   auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
1098 
1099   if (Res.second)
1100     Res.first->second = llvm::make_unique<DefsList>();
1101   return Res.first->second.get();
1102 }
1103 
1104 namespace llvm {
1105 
1106 /// This class is a batch walker of all MemoryUse's in the program, and points
1107 /// their defining access at the thing that actually clobbers them.  Because it
1108 /// is a batch walker that touches everything, it does not operate like the
1109 /// other walkers.  This walker is basically performing a top-down SSA renaming
1110 /// pass, where the version stack is used as the cache.  This enables it to be
1111 /// significantly more time and memory efficient than using the regular walker,
1112 /// which is walking bottom-up.
1113 class MemorySSA::OptimizeUses {
1114 public:
1115   OptimizeUses(MemorySSA *MSSA, MemorySSAWalker *Walker, AliasAnalysis *AA,
1116                DominatorTree *DT)
1117       : MSSA(MSSA), Walker(Walker), AA(AA), DT(DT) {
1118     Walker = MSSA->getWalker();
1119   }
1120 
1121   void optimizeUses();
1122 
1123 private:
1124   /// This represents where a given memorylocation is in the stack.
1125   struct MemlocStackInfo {
1126     // This essentially is keeping track of versions of the stack. Whenever
1127     // the stack changes due to pushes or pops, these versions increase.
1128     unsigned long StackEpoch;
1129     unsigned long PopEpoch;
1130     // This is the lower bound of places on the stack to check. It is equal to
1131     // the place the last stack walk ended.
1132     // Note: Correctness depends on this being initialized to 0, which densemap
1133     // does
1134     unsigned long LowerBound;
1135     const BasicBlock *LowerBoundBlock;
1136     // This is where the last walk for this memory location ended.
1137     unsigned long LastKill;
1138     bool LastKillValid;
1139     Optional<AliasResult> AR;
1140   };
1141 
1142   void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1143                            SmallVectorImpl<MemoryAccess *> &,
1144                            DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
1145 
1146   MemorySSA *MSSA;
1147   MemorySSAWalker *Walker;
1148   AliasAnalysis *AA;
1149   DominatorTree *DT;
1150 };
1151 
1152 } // end namespace llvm
1153 
1154 /// Optimize the uses in a given block This is basically the SSA renaming
1155 /// algorithm, with one caveat: We are able to use a single stack for all
1156 /// MemoryUses.  This is because the set of *possible* reaching MemoryDefs is
1157 /// the same for every MemoryUse.  The *actual* clobbering MemoryDef is just
1158 /// going to be some position in that stack of possible ones.
1159 ///
1160 /// We track the stack positions that each MemoryLocation needs
1161 /// to check, and last ended at.  This is because we only want to check the
1162 /// things that changed since last time.  The same MemoryLocation should
1163 /// get clobbered by the same store (getModRefInfo does not use invariantness or
1164 /// things like this, and if they start, we can modify MemoryLocOrCall to
1165 /// include relevant data)
1166 void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1167     const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1168     SmallVectorImpl<MemoryAccess *> &VersionStack,
1169     DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) {
1170 
1171   /// If no accesses, nothing to do.
1172   MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1173   if (Accesses == nullptr)
1174     return;
1175 
1176   // Pop everything that doesn't dominate the current block off the stack,
1177   // increment the PopEpoch to account for this.
1178   while (true) {
1179     assert(
1180         !VersionStack.empty() &&
1181         "Version stack should have liveOnEntry sentinel dominating everything");
1182     BasicBlock *BackBlock = VersionStack.back()->getBlock();
1183     if (DT->dominates(BackBlock, BB))
1184       break;
1185     while (VersionStack.back()->getBlock() == BackBlock)
1186       VersionStack.pop_back();
1187     ++PopEpoch;
1188   }
1189 
1190   for (MemoryAccess &MA : *Accesses) {
1191     auto *MU = dyn_cast<MemoryUse>(&MA);
1192     if (!MU) {
1193       VersionStack.push_back(&MA);
1194       ++StackEpoch;
1195       continue;
1196     }
1197 
1198     if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
1199       MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None);
1200       continue;
1201     }
1202 
1203     MemoryLocOrCall UseMLOC(MU);
1204     auto &LocInfo = LocStackInfo[UseMLOC];
1205     // If the pop epoch changed, it means we've removed stuff from top of
1206     // stack due to changing blocks. We may have to reset the lower bound or
1207     // last kill info.
1208     if (LocInfo.PopEpoch != PopEpoch) {
1209       LocInfo.PopEpoch = PopEpoch;
1210       LocInfo.StackEpoch = StackEpoch;
1211       // If the lower bound was in something that no longer dominates us, we
1212       // have to reset it.
1213       // We can't simply track stack size, because the stack may have had
1214       // pushes/pops in the meantime.
1215       // XXX: This is non-optimal, but only is slower cases with heavily
1216       // branching dominator trees.  To get the optimal number of queries would
1217       // be to make lowerbound and lastkill a per-loc stack, and pop it until
1218       // the top of that stack dominates us.  This does not seem worth it ATM.
1219       // A much cheaper optimization would be to always explore the deepest
1220       // branch of the dominator tree first. This will guarantee this resets on
1221       // the smallest set of blocks.
1222       if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1223           !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1224         // Reset the lower bound of things to check.
1225         // TODO: Some day we should be able to reset to last kill, rather than
1226         // 0.
1227         LocInfo.LowerBound = 0;
1228         LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1229         LocInfo.LastKillValid = false;
1230       }
1231     } else if (LocInfo.StackEpoch != StackEpoch) {
1232       // If all that has changed is the StackEpoch, we only have to check the
1233       // new things on the stack, because we've checked everything before.  In
1234       // this case, the lower bound of things to check remains the same.
1235       LocInfo.PopEpoch = PopEpoch;
1236       LocInfo.StackEpoch = StackEpoch;
1237     }
1238     if (!LocInfo.LastKillValid) {
1239       LocInfo.LastKill = VersionStack.size() - 1;
1240       LocInfo.LastKillValid = true;
1241       LocInfo.AR = MayAlias;
1242     }
1243 
1244     // At this point, we should have corrected last kill and LowerBound to be
1245     // in bounds.
1246     assert(LocInfo.LowerBound < VersionStack.size() &&
1247            "Lower bound out of range");
1248     assert(LocInfo.LastKill < VersionStack.size() &&
1249            "Last kill info out of range");
1250     // In any case, the new upper bound is the top of the stack.
1251     unsigned long UpperBound = VersionStack.size() - 1;
1252 
1253     if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
1254       LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
1255                         << *(MU->getMemoryInst()) << ")"
1256                         << " because there are "
1257                         << UpperBound - LocInfo.LowerBound
1258                         << " stores to disambiguate\n");
1259       // Because we did not walk, LastKill is no longer valid, as this may
1260       // have been a kill.
1261       LocInfo.LastKillValid = false;
1262       continue;
1263     }
1264     bool FoundClobberResult = false;
1265     while (UpperBound > LocInfo.LowerBound) {
1266       if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1267         // For phis, use the walker, see where we ended up, go there
1268         Instruction *UseInst = MU->getMemoryInst();
1269         MemoryAccess *Result = Walker->getClobberingMemoryAccess(UseInst);
1270         // We are guaranteed to find it or something is wrong
1271         while (VersionStack[UpperBound] != Result) {
1272           assert(UpperBound != 0);
1273           --UpperBound;
1274         }
1275         FoundClobberResult = true;
1276         break;
1277       }
1278 
1279       MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1280       // If the lifetime of the pointer ends at this instruction, it's live on
1281       // entry.
1282       if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) {
1283         // Reset UpperBound to liveOnEntryDef's place in the stack
1284         UpperBound = 0;
1285         FoundClobberResult = true;
1286         LocInfo.AR = MustAlias;
1287         break;
1288       }
1289       ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA);
1290       if (CA.IsClobber) {
1291         FoundClobberResult = true;
1292         LocInfo.AR = CA.AR;
1293         break;
1294       }
1295       --UpperBound;
1296     }
1297 
1298     // Note: Phis always have AliasResult AR set to MayAlias ATM.
1299 
1300     // At the end of this loop, UpperBound is either a clobber, or lower bound
1301     // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1302     if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1303       // We were last killed now by where we got to
1304       if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound]))
1305         LocInfo.AR = None;
1306       MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR);
1307       LocInfo.LastKill = UpperBound;
1308     } else {
1309       // Otherwise, we checked all the new ones, and now we know we can get to
1310       // LastKill.
1311       MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR);
1312     }
1313     LocInfo.LowerBound = VersionStack.size() - 1;
1314     LocInfo.LowerBoundBlock = BB;
1315   }
1316 }
1317 
1318 /// Optimize uses to point to their actual clobbering definitions.
1319 void MemorySSA::OptimizeUses::optimizeUses() {
1320   SmallVector<MemoryAccess *, 16> VersionStack;
1321   DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo;
1322   VersionStack.push_back(MSSA->getLiveOnEntryDef());
1323 
1324   unsigned long StackEpoch = 1;
1325   unsigned long PopEpoch = 1;
1326   // We perform a non-recursive top-down dominator tree walk.
1327   for (const auto *DomNode : depth_first(DT->getRootNode()))
1328     optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1329                         LocStackInfo);
1330 }
1331 
1332 void MemorySSA::placePHINodes(
1333     const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) {
1334   // Determine where our MemoryPhi's should go
1335   ForwardIDFCalculator IDFs(*DT);
1336   IDFs.setDefiningBlocks(DefiningBlocks);
1337   SmallVector<BasicBlock *, 32> IDFBlocks;
1338   IDFs.calculate(IDFBlocks);
1339 
1340   // Now place MemoryPhi nodes.
1341   for (auto &BB : IDFBlocks)
1342     createMemoryPhi(BB);
1343 }
1344 
1345 void MemorySSA::buildMemorySSA() {
1346   // We create an access to represent "live on entry", for things like
1347   // arguments or users of globals, where the memory they use is defined before
1348   // the beginning of the function. We do not actually insert it into the IR.
1349   // We do not define a live on exit for the immediate uses, and thus our
1350   // semantics do *not* imply that something with no immediate uses can simply
1351   // be removed.
1352   BasicBlock &StartingPoint = F.getEntryBlock();
1353   LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr,
1354                                      &StartingPoint, NextID++));
1355 
1356   // We maintain lists of memory accesses per-block, trading memory for time. We
1357   // could just look up the memory access for every possible instruction in the
1358   // stream.
1359   SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1360   // Go through each block, figure out where defs occur, and chain together all
1361   // the accesses.
1362   for (BasicBlock &B : F) {
1363     bool InsertIntoDef = false;
1364     AccessList *Accesses = nullptr;
1365     DefsList *Defs = nullptr;
1366     for (Instruction &I : B) {
1367       MemoryUseOrDef *MUD = createNewAccess(&I);
1368       if (!MUD)
1369         continue;
1370 
1371       if (!Accesses)
1372         Accesses = getOrCreateAccessList(&B);
1373       Accesses->push_back(MUD);
1374       if (isa<MemoryDef>(MUD)) {
1375         InsertIntoDef = true;
1376         if (!Defs)
1377           Defs = getOrCreateDefsList(&B);
1378         Defs->push_back(*MUD);
1379       }
1380     }
1381     if (InsertIntoDef)
1382       DefiningBlocks.insert(&B);
1383   }
1384   placePHINodes(DefiningBlocks);
1385 
1386   // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1387   // filled in with all blocks.
1388   SmallPtrSet<BasicBlock *, 16> Visited;
1389   renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1390 
1391   CachingWalker *Walker = getWalkerImpl();
1392 
1393   OptimizeUses(this, Walker, AA, DT).optimizeUses();
1394 
1395   // Mark the uses in unreachable blocks as live on entry, so that they go
1396   // somewhere.
1397   for (auto &BB : F)
1398     if (!Visited.count(&BB))
1399       markUnreachableAsLiveOnEntry(&BB);
1400 }
1401 
1402 MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1403 
1404 MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() {
1405   if (Walker)
1406     return Walker.get();
1407 
1408   Walker = llvm::make_unique<CachingWalker>(this, AA, DT);
1409   return Walker.get();
1410 }
1411 
1412 // This is a helper function used by the creation routines. It places NewAccess
1413 // into the access and defs lists for a given basic block, at the given
1414 // insertion point.
1415 void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess,
1416                                         const BasicBlock *BB,
1417                                         InsertionPlace Point) {
1418   auto *Accesses = getOrCreateAccessList(BB);
1419   if (Point == Beginning) {
1420     // If it's a phi node, it goes first, otherwise, it goes after any phi
1421     // nodes.
1422     if (isa<MemoryPhi>(NewAccess)) {
1423       Accesses->push_front(NewAccess);
1424       auto *Defs = getOrCreateDefsList(BB);
1425       Defs->push_front(*NewAccess);
1426     } else {
1427       auto AI = find_if_not(
1428           *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1429       Accesses->insert(AI, NewAccess);
1430       if (!isa<MemoryUse>(NewAccess)) {
1431         auto *Defs = getOrCreateDefsList(BB);
1432         auto DI = find_if_not(
1433             *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1434         Defs->insert(DI, *NewAccess);
1435       }
1436     }
1437   } else {
1438     Accesses->push_back(NewAccess);
1439     if (!isa<MemoryUse>(NewAccess)) {
1440       auto *Defs = getOrCreateDefsList(BB);
1441       Defs->push_back(*NewAccess);
1442     }
1443   }
1444   BlockNumberingValid.erase(BB);
1445 }
1446 
1447 void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB,
1448                                       AccessList::iterator InsertPt) {
1449   auto *Accesses = getWritableBlockAccesses(BB);
1450   bool WasEnd = InsertPt == Accesses->end();
1451   Accesses->insert(AccessList::iterator(InsertPt), What);
1452   if (!isa<MemoryUse>(What)) {
1453     auto *Defs = getOrCreateDefsList(BB);
1454     // If we got asked to insert at the end, we have an easy job, just shove it
1455     // at the end. If we got asked to insert before an existing def, we also get
1456     // an iterator. If we got asked to insert before a use, we have to hunt for
1457     // the next def.
1458     if (WasEnd) {
1459       Defs->push_back(*What);
1460     } else if (isa<MemoryDef>(InsertPt)) {
1461       Defs->insert(InsertPt->getDefsIterator(), *What);
1462     } else {
1463       while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1464         ++InsertPt;
1465       // Either we found a def, or we are inserting at the end
1466       if (InsertPt == Accesses->end())
1467         Defs->push_back(*What);
1468       else
1469         Defs->insert(InsertPt->getDefsIterator(), *What);
1470     }
1471   }
1472   BlockNumberingValid.erase(BB);
1473 }
1474 
1475 // Move What before Where in the IR.  The end result is that What will belong to
1476 // the right lists and have the right Block set, but will not otherwise be
1477 // correct. It will not have the right defining access, and if it is a def,
1478 // things below it will not properly be updated.
1479 void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1480                        AccessList::iterator Where) {
1481   // Keep it in the lookup tables, remove from the lists
1482   removeFromLists(What, false);
1483   What->setBlock(BB);
1484   insertIntoListsBefore(What, BB, Where);
1485 }
1486 
1487 void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB,
1488                        InsertionPlace Point) {
1489   if (isa<MemoryPhi>(What)) {
1490     assert(Point == Beginning &&
1491            "Can only move a Phi at the beginning of the block");
1492     // Update lookup table entry
1493     ValueToMemoryAccess.erase(What->getBlock());
1494     bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;
1495     (void)Inserted;
1496     assert(Inserted && "Cannot move a Phi to a block that already has one");
1497   }
1498 
1499   removeFromLists(What, false);
1500   What->setBlock(BB);
1501   insertIntoListsForBlock(What, BB, Point);
1502 }
1503 
1504 MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1505   assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
1506   MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
1507   // Phi's always are placed at the front of the block.
1508   insertIntoListsForBlock(Phi, BB, Beginning);
1509   ValueToMemoryAccess[BB] = Phi;
1510   return Phi;
1511 }
1512 
1513 MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
1514                                                MemoryAccess *Definition) {
1515   assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
1516   MemoryUseOrDef *NewAccess = createNewAccess(I);
1517   assert(
1518       NewAccess != nullptr &&
1519       "Tried to create a memory access for a non-memory touching instruction");
1520   NewAccess->setDefiningAccess(Definition);
1521   return NewAccess;
1522 }
1523 
1524 // Return true if the instruction has ordering constraints.
1525 // Note specifically that this only considers stores and loads
1526 // because others are still considered ModRef by getModRefInfo.
1527 static inline bool isOrdered(const Instruction *I) {
1528   if (auto *SI = dyn_cast<StoreInst>(I)) {
1529     if (!SI->isUnordered())
1530       return true;
1531   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1532     if (!LI->isUnordered())
1533       return true;
1534   }
1535   return false;
1536 }
1537 
1538 /// Helper function to create new memory accesses
1539 MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) {
1540   // The assume intrinsic has a control dependency which we model by claiming
1541   // that it writes arbitrarily. Ignore that fake memory dependency here.
1542   // FIXME: Replace this special casing with a more accurate modelling of
1543   // assume's control dependency.
1544   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1545     if (II->getIntrinsicID() == Intrinsic::assume)
1546       return nullptr;
1547 
1548   // Find out what affect this instruction has on memory.
1549   ModRefInfo ModRef = AA->getModRefInfo(I, None);
1550   // The isOrdered check is used to ensure that volatiles end up as defs
1551   // (atomics end up as ModRef right now anyway).  Until we separate the
1552   // ordering chain from the memory chain, this enables people to see at least
1553   // some relative ordering to volatiles.  Note that getClobberingMemoryAccess
1554   // will still give an answer that bypasses other volatile loads.  TODO:
1555   // Separate memory aliasing and ordering into two different chains so that we
1556   // can precisely represent both "what memory will this read/write/is clobbered
1557   // by" and "what instructions can I move this past".
1558   bool Def = isModSet(ModRef) || isOrdered(I);
1559   bool Use = isRefSet(ModRef);
1560 
1561   // It's possible for an instruction to not modify memory at all. During
1562   // construction, we ignore them.
1563   if (!Def && !Use)
1564     return nullptr;
1565 
1566   MemoryUseOrDef *MUD;
1567   if (Def)
1568     MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
1569   else
1570     MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
1571   ValueToMemoryAccess[I] = MUD;
1572   return MUD;
1573 }
1574 
1575 /// Returns true if \p Replacer dominates \p Replacee .
1576 bool MemorySSA::dominatesUse(const MemoryAccess *Replacer,
1577                              const MemoryAccess *Replacee) const {
1578   if (isa<MemoryUseOrDef>(Replacee))
1579     return DT->dominates(Replacer->getBlock(), Replacee->getBlock());
1580   const auto *MP = cast<MemoryPhi>(Replacee);
1581   // For a phi node, the use occurs in the predecessor block of the phi node.
1582   // Since we may occur multiple times in the phi node, we have to check each
1583   // operand to ensure Replacer dominates each operand where Replacee occurs.
1584   for (const Use &Arg : MP->operands()) {
1585     if (Arg.get() != Replacee &&
1586         !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg)))
1587       return false;
1588   }
1589   return true;
1590 }
1591 
1592 /// Properly remove \p MA from all of MemorySSA's lookup tables.
1593 void MemorySSA::removeFromLookups(MemoryAccess *MA) {
1594   assert(MA->use_empty() &&
1595          "Trying to remove memory access that still has uses");
1596   BlockNumbering.erase(MA);
1597   if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1598     MUD->setDefiningAccess(nullptr);
1599   // Invalidate our walker's cache if necessary
1600   if (!isa<MemoryUse>(MA))
1601     Walker->invalidateInfo(MA);
1602 
1603   Value *MemoryInst;
1604   if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1605     MemoryInst = MUD->getMemoryInst();
1606   else
1607     MemoryInst = MA->getBlock();
1608 
1609   auto VMA = ValueToMemoryAccess.find(MemoryInst);
1610   if (VMA->second == MA)
1611     ValueToMemoryAccess.erase(VMA);
1612 }
1613 
1614 /// Properly remove \p MA from all of MemorySSA's lists.
1615 ///
1616 /// Because of the way the intrusive list and use lists work, it is important to
1617 /// do removal in the right order.
1618 /// ShouldDelete defaults to true, and will cause the memory access to also be
1619 /// deleted, not just removed.
1620 void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
1621   BasicBlock *BB = MA->getBlock();
1622   // The access list owns the reference, so we erase it from the non-owning list
1623   // first.
1624   if (!isa<MemoryUse>(MA)) {
1625     auto DefsIt = PerBlockDefs.find(BB);
1626     std::unique_ptr<DefsList> &Defs = DefsIt->second;
1627     Defs->remove(*MA);
1628     if (Defs->empty())
1629       PerBlockDefs.erase(DefsIt);
1630   }
1631 
1632   // The erase call here will delete it. If we don't want it deleted, we call
1633   // remove instead.
1634   auto AccessIt = PerBlockAccesses.find(BB);
1635   std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1636   if (ShouldDelete)
1637     Accesses->erase(MA);
1638   else
1639     Accesses->remove(MA);
1640 
1641   if (Accesses->empty()) {
1642     PerBlockAccesses.erase(AccessIt);
1643     BlockNumberingValid.erase(BB);
1644   }
1645 }
1646 
1647 void MemorySSA::print(raw_ostream &OS) const {
1648   MemorySSAAnnotatedWriter Writer(this);
1649   F.print(OS, &Writer);
1650 }
1651 
1652 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1653 LLVM_DUMP_METHOD void MemorySSA::dump() const { print(dbgs()); }
1654 #endif
1655 
1656 void MemorySSA::verifyMemorySSA() const {
1657   verifyDefUses(F);
1658   verifyDomination(F);
1659   verifyOrdering(F);
1660   verifyDominationNumbers(F);
1661   Walker->verify(this);
1662 }
1663 
1664 /// Verify that all of the blocks we believe to have valid domination numbers
1665 /// actually have valid domination numbers.
1666 void MemorySSA::verifyDominationNumbers(const Function &F) const {
1667 #ifndef NDEBUG
1668   if (BlockNumberingValid.empty())
1669     return;
1670 
1671   SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid;
1672   for (const BasicBlock &BB : F) {
1673     if (!ValidBlocks.count(&BB))
1674       continue;
1675 
1676     ValidBlocks.erase(&BB);
1677 
1678     const AccessList *Accesses = getBlockAccesses(&BB);
1679     // It's correct to say an empty block has valid numbering.
1680     if (!Accesses)
1681       continue;
1682 
1683     // Block numbering starts at 1.
1684     unsigned long LastNumber = 0;
1685     for (const MemoryAccess &MA : *Accesses) {
1686       auto ThisNumberIter = BlockNumbering.find(&MA);
1687       assert(ThisNumberIter != BlockNumbering.end() &&
1688              "MemoryAccess has no domination number in a valid block!");
1689 
1690       unsigned long ThisNumber = ThisNumberIter->second;
1691       assert(ThisNumber > LastNumber &&
1692              "Domination numbers should be strictly increasing!");
1693       LastNumber = ThisNumber;
1694     }
1695   }
1696 
1697   assert(ValidBlocks.empty() &&
1698          "All valid BasicBlocks should exist in F -- dangling pointers?");
1699 #endif
1700 }
1701 
1702 /// Verify that the order and existence of MemoryAccesses matches the
1703 /// order and existence of memory affecting instructions.
1704 void MemorySSA::verifyOrdering(Function &F) const {
1705   // Walk all the blocks, comparing what the lookups think and what the access
1706   // lists think, as well as the order in the blocks vs the order in the access
1707   // lists.
1708   SmallVector<MemoryAccess *, 32> ActualAccesses;
1709   SmallVector<MemoryAccess *, 32> ActualDefs;
1710   for (BasicBlock &B : F) {
1711     const AccessList *AL = getBlockAccesses(&B);
1712     const auto *DL = getBlockDefs(&B);
1713     MemoryAccess *Phi = getMemoryAccess(&B);
1714     if (Phi) {
1715       ActualAccesses.push_back(Phi);
1716       ActualDefs.push_back(Phi);
1717     }
1718 
1719     for (Instruction &I : B) {
1720       MemoryAccess *MA = getMemoryAccess(&I);
1721       assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
1722              "We have memory affecting instructions "
1723              "in this block but they are not in the "
1724              "access list or defs list");
1725       if (MA) {
1726         ActualAccesses.push_back(MA);
1727         if (isa<MemoryDef>(MA))
1728           ActualDefs.push_back(MA);
1729       }
1730     }
1731     // Either we hit the assert, really have no accesses, or we have both
1732     // accesses and an access list.
1733     // Same with defs.
1734     if (!AL && !DL)
1735       continue;
1736     assert(AL->size() == ActualAccesses.size() &&
1737            "We don't have the same number of accesses in the block as on the "
1738            "access list");
1739     assert((DL || ActualDefs.size() == 0) &&
1740            "Either we should have a defs list, or we should have no defs");
1741     assert((!DL || DL->size() == ActualDefs.size()) &&
1742            "We don't have the same number of defs in the block as on the "
1743            "def list");
1744     auto ALI = AL->begin();
1745     auto AAI = ActualAccesses.begin();
1746     while (ALI != AL->end() && AAI != ActualAccesses.end()) {
1747       assert(&*ALI == *AAI && "Not the same accesses in the same order");
1748       ++ALI;
1749       ++AAI;
1750     }
1751     ActualAccesses.clear();
1752     if (DL) {
1753       auto DLI = DL->begin();
1754       auto ADI = ActualDefs.begin();
1755       while (DLI != DL->end() && ADI != ActualDefs.end()) {
1756         assert(&*DLI == *ADI && "Not the same defs in the same order");
1757         ++DLI;
1758         ++ADI;
1759       }
1760     }
1761     ActualDefs.clear();
1762   }
1763 }
1764 
1765 /// Verify the domination properties of MemorySSA by checking that each
1766 /// definition dominates all of its uses.
1767 void MemorySSA::verifyDomination(Function &F) const {
1768 #ifndef NDEBUG
1769   for (BasicBlock &B : F) {
1770     // Phi nodes are attached to basic blocks
1771     if (MemoryPhi *MP = getMemoryAccess(&B))
1772       for (const Use &U : MP->uses())
1773         assert(dominates(MP, U) && "Memory PHI does not dominate it's uses");
1774 
1775     for (Instruction &I : B) {
1776       MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I));
1777       if (!MD)
1778         continue;
1779 
1780       for (const Use &U : MD->uses())
1781         assert(dominates(MD, U) && "Memory Def does not dominate it's uses");
1782     }
1783   }
1784 #endif
1785 }
1786 
1787 /// Verify the def-use lists in MemorySSA, by verifying that \p Use
1788 /// appears in the use list of \p Def.
1789 void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
1790 #ifndef NDEBUG
1791   // The live on entry use may cause us to get a NULL def here
1792   if (!Def)
1793     assert(isLiveOnEntryDef(Use) &&
1794            "Null def but use not point to live on entry def");
1795   else
1796     assert(is_contained(Def->users(), Use) &&
1797            "Did not find use in def's use list");
1798 #endif
1799 }
1800 
1801 /// Verify the immediate use information, by walking all the memory
1802 /// accesses and verifying that, for each use, it appears in the
1803 /// appropriate def's use list
1804 void MemorySSA::verifyDefUses(Function &F) const {
1805   for (BasicBlock &B : F) {
1806     // Phi nodes are attached to basic blocks
1807     if (MemoryPhi *Phi = getMemoryAccess(&B)) {
1808       assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(
1809                                           pred_begin(&B), pred_end(&B))) &&
1810              "Incomplete MemoryPhi Node");
1811       for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1812         verifyUseInDefs(Phi->getIncomingValue(I), Phi);
1813         assert(find(predecessors(&B), Phi->getIncomingBlock(I)) !=
1814                    pred_end(&B) &&
1815                "Incoming phi block not a block predecessor");
1816       }
1817     }
1818 
1819     for (Instruction &I : B) {
1820       if (MemoryUseOrDef *MA = getMemoryAccess(&I)) {
1821         verifyUseInDefs(MA->getDefiningAccess(), MA);
1822       }
1823     }
1824   }
1825 }
1826 
1827 /// Perform a local numbering on blocks so that instruction ordering can be
1828 /// determined in constant time.
1829 /// TODO: We currently just number in order.  If we numbered by N, we could
1830 /// allow at least N-1 sequences of insertBefore or insertAfter (and at least
1831 /// log2(N) sequences of mixed before and after) without needing to invalidate
1832 /// the numbering.
1833 void MemorySSA::renumberBlock(const BasicBlock *B) const {
1834   // The pre-increment ensures the numbers really start at 1.
1835   unsigned long CurrentNumber = 0;
1836   const AccessList *AL = getBlockAccesses(B);
1837   assert(AL != nullptr && "Asking to renumber an empty block");
1838   for (const auto &I : *AL)
1839     BlockNumbering[&I] = ++CurrentNumber;
1840   BlockNumberingValid.insert(B);
1841 }
1842 
1843 /// Determine, for two memory accesses in the same block,
1844 /// whether \p Dominator dominates \p Dominatee.
1845 /// \returns True if \p Dominator dominates \p Dominatee.
1846 bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
1847                                  const MemoryAccess *Dominatee) const {
1848   const BasicBlock *DominatorBlock = Dominator->getBlock();
1849 
1850   assert((DominatorBlock == Dominatee->getBlock()) &&
1851          "Asking for local domination when accesses are in different blocks!");
1852   // A node dominates itself.
1853   if (Dominatee == Dominator)
1854     return true;
1855 
1856   // When Dominatee is defined on function entry, it is not dominated by another
1857   // memory access.
1858   if (isLiveOnEntryDef(Dominatee))
1859     return false;
1860 
1861   // When Dominator is defined on function entry, it dominates the other memory
1862   // access.
1863   if (isLiveOnEntryDef(Dominator))
1864     return true;
1865 
1866   if (!BlockNumberingValid.count(DominatorBlock))
1867     renumberBlock(DominatorBlock);
1868 
1869   unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
1870   // All numbers start with 1
1871   assert(DominatorNum != 0 && "Block was not numbered properly");
1872   unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
1873   assert(DominateeNum != 0 && "Block was not numbered properly");
1874   return DominatorNum < DominateeNum;
1875 }
1876 
1877 bool MemorySSA::dominates(const MemoryAccess *Dominator,
1878                           const MemoryAccess *Dominatee) const {
1879   if (Dominator == Dominatee)
1880     return true;
1881 
1882   if (isLiveOnEntryDef(Dominatee))
1883     return false;
1884 
1885   if (Dominator->getBlock() != Dominatee->getBlock())
1886     return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
1887   return locallyDominates(Dominator, Dominatee);
1888 }
1889 
1890 bool MemorySSA::dominates(const MemoryAccess *Dominator,
1891                           const Use &Dominatee) const {
1892   if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
1893     BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
1894     // The def must dominate the incoming block of the phi.
1895     if (UseBB != Dominator->getBlock())
1896       return DT->dominates(Dominator->getBlock(), UseBB);
1897     // If the UseBB and the DefBB are the same, compare locally.
1898     return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
1899   }
1900   // If it's not a PHI node use, the normal dominates can already handle it.
1901   return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
1902 }
1903 
1904 const static char LiveOnEntryStr[] = "liveOnEntry";
1905 
1906 void MemoryAccess::print(raw_ostream &OS) const {
1907   switch (getValueID()) {
1908   case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
1909   case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
1910   case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
1911   }
1912   llvm_unreachable("invalid value id");
1913 }
1914 
1915 void MemoryDef::print(raw_ostream &OS) const {
1916   MemoryAccess *UO = getDefiningAccess();
1917 
1918   auto printID = [&OS](MemoryAccess *A) {
1919     if (A && A->getID())
1920       OS << A->getID();
1921     else
1922       OS << LiveOnEntryStr;
1923   };
1924 
1925   OS << getID() << " = MemoryDef(";
1926   printID(UO);
1927   OS << ")";
1928 
1929   if (isOptimized()) {
1930     OS << "->";
1931     printID(getOptimized());
1932 
1933     if (Optional<AliasResult> AR = getOptimizedAccessType())
1934       OS << " " << *AR;
1935   }
1936 }
1937 
1938 void MemoryPhi::print(raw_ostream &OS) const {
1939   bool First = true;
1940   OS << getID() << " = MemoryPhi(";
1941   for (const auto &Op : operands()) {
1942     BasicBlock *BB = getIncomingBlock(Op);
1943     MemoryAccess *MA = cast<MemoryAccess>(Op);
1944     if (!First)
1945       OS << ',';
1946     else
1947       First = false;
1948 
1949     OS << '{';
1950     if (BB->hasName())
1951       OS << BB->getName();
1952     else
1953       BB->printAsOperand(OS, false);
1954     OS << ',';
1955     if (unsigned ID = MA->getID())
1956       OS << ID;
1957     else
1958       OS << LiveOnEntryStr;
1959     OS << '}';
1960   }
1961   OS << ')';
1962 }
1963 
1964 void MemoryUse::print(raw_ostream &OS) const {
1965   MemoryAccess *UO = getDefiningAccess();
1966   OS << "MemoryUse(";
1967   if (UO && UO->getID())
1968     OS << UO->getID();
1969   else
1970     OS << LiveOnEntryStr;
1971   OS << ')';
1972 
1973   if (Optional<AliasResult> AR = getOptimizedAccessType())
1974     OS << " " << *AR;
1975 }
1976 
1977 void MemoryAccess::dump() const {
1978 // Cannot completely remove virtual function even in release mode.
1979 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1980   print(dbgs());
1981   dbgs() << "\n";
1982 #endif
1983 }
1984 
1985 char MemorySSAPrinterLegacyPass::ID = 0;
1986 
1987 MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) {
1988   initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry());
1989 }
1990 
1991 void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
1992   AU.setPreservesAll();
1993   AU.addRequired<MemorySSAWrapperPass>();
1994 }
1995 
1996 bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
1997   auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
1998   MSSA.print(dbgs());
1999   if (VerifyMemorySSA)
2000     MSSA.verifyMemorySSA();
2001   return false;
2002 }
2003 
2004 AnalysisKey MemorySSAAnalysis::Key;
2005 
2006 MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F,
2007                                                  FunctionAnalysisManager &AM) {
2008   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
2009   auto &AA = AM.getResult<AAManager>(F);
2010   return MemorySSAAnalysis::Result(llvm::make_unique<MemorySSA>(F, &AA, &DT));
2011 }
2012 
2013 PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
2014                                             FunctionAnalysisManager &AM) {
2015   OS << "MemorySSA for function: " << F.getName() << "\n";
2016   AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS);
2017 
2018   return PreservedAnalyses::all();
2019 }
2020 
2021 PreservedAnalyses MemorySSAVerifierPass::run(Function &F,
2022                                              FunctionAnalysisManager &AM) {
2023   AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
2024 
2025   return PreservedAnalyses::all();
2026 }
2027 
2028 char MemorySSAWrapperPass::ID = 0;
2029 
2030 MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) {
2031   initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
2032 }
2033 
2034 void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
2035 
2036 void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2037   AU.setPreservesAll();
2038   AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2039   AU.addRequiredTransitive<AAResultsWrapperPass>();
2040 }
2041 
2042 bool MemorySSAWrapperPass::runOnFunction(Function &F) {
2043   auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2044   auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2045   MSSA.reset(new MemorySSA(F, &AA, &DT));
2046   return false;
2047 }
2048 
2049 void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); }
2050 
2051 void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
2052   MSSA->print(OS);
2053 }
2054 
2055 MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
2056 
2057 MemorySSA::CachingWalker::CachingWalker(MemorySSA *M, AliasAnalysis *A,
2058                                         DominatorTree *D)
2059     : MemorySSAWalker(M), Walker(*M, *A, *D) {}
2060 
2061 void MemorySSA::CachingWalker::invalidateInfo(MemoryAccess *MA) {
2062   if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
2063     MUD->resetOptimized();
2064 }
2065 
2066 /// Walk the use-def chains starting at \p MA and find
2067 /// the MemoryAccess that actually clobbers Loc.
2068 ///
2069 /// \returns our clobbering memory access
2070 MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
2071     MemoryAccess *StartingAccess, UpwardsMemoryQuery &Q) {
2072   return Walker.findClobber(StartingAccess, Q);
2073 }
2074 
2075 MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
2076     MemoryAccess *StartingAccess, const MemoryLocation &Loc) {
2077   if (isa<MemoryPhi>(StartingAccess))
2078     return StartingAccess;
2079 
2080   auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess);
2081   if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
2082     return StartingUseOrDef;
2083 
2084   Instruction *I = StartingUseOrDef->getMemoryInst();
2085 
2086   // Conservatively, fences are always clobbers, so don't perform the walk if we
2087   // hit a fence.
2088   if (!ImmutableCallSite(I) && I->isFenceLike())
2089     return StartingUseOrDef;
2090 
2091   UpwardsMemoryQuery Q;
2092   Q.OriginalAccess = StartingUseOrDef;
2093   Q.StartingLoc = Loc;
2094   Q.Inst = I;
2095   Q.IsCall = false;
2096 
2097   // Unlike the other function, do not walk to the def of a def, because we are
2098   // handed something we already believe is the clobbering access.
2099   MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
2100                                      ? StartingUseOrDef->getDefiningAccess()
2101                                      : StartingUseOrDef;
2102 
2103   MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q);
2104   LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2105   LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n");
2106   LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
2107   LLVM_DEBUG(dbgs() << *Clobber << "\n");
2108   return Clobber;
2109 }
2110 
2111 MemoryAccess *
2112 MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2113   auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2114   // If this is a MemoryPhi, we can't do anything.
2115   if (!StartingAccess)
2116     return MA;
2117 
2118   // If this is an already optimized use or def, return the optimized result.
2119   // Note: Currently, we store the optimized def result in a separate field,
2120   // since we can't use the defining access.
2121   if (StartingAccess->isOptimized())
2122     return StartingAccess->getOptimized();
2123 
2124   const Instruction *I = StartingAccess->getMemoryInst();
2125   UpwardsMemoryQuery Q(I, StartingAccess);
2126   // We can't sanely do anything with a fence, since they conservatively clobber
2127   // all memory, and have no locations to get pointers from to try to
2128   // disambiguate.
2129   if (!Q.IsCall && I->isFenceLike())
2130     return StartingAccess;
2131 
2132   if (isUseTriviallyOptimizableToLiveOnEntry(*MSSA->AA, I)) {
2133     MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2134     StartingAccess->setOptimized(LiveOnEntry);
2135     StartingAccess->setOptimizedAccessType(None);
2136     return LiveOnEntry;
2137   }
2138 
2139   // Start with the thing we already think clobbers this location
2140   MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2141 
2142   // At this point, DefiningAccess may be the live on entry def.
2143   // If it is, we will not get a better result.
2144   if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
2145     StartingAccess->setOptimized(DefiningAccess);
2146     StartingAccess->setOptimizedAccessType(None);
2147     return DefiningAccess;
2148   }
2149 
2150   MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q);
2151   LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2152   LLVM_DEBUG(dbgs() << *DefiningAccess << "\n");
2153   LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
2154   LLVM_DEBUG(dbgs() << *Result << "\n");
2155 
2156   StartingAccess->setOptimized(Result);
2157   if (MSSA->isLiveOnEntryDef(Result))
2158     StartingAccess->setOptimizedAccessType(None);
2159   else if (Q.AR == MustAlias)
2160     StartingAccess->setOptimizedAccessType(MustAlias);
2161 
2162   return Result;
2163 }
2164 
2165 MemoryAccess *
2166 DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2167   if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2168     return Use->getDefiningAccess();
2169   return MA;
2170 }
2171 
2172 MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
2173     MemoryAccess *StartingAccess, const MemoryLocation &) {
2174   if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2175     return Use->getDefiningAccess();
2176   return StartingAccess;
2177 }
2178 
2179 void MemoryPhi::deleteMe(DerivedUser *Self) {
2180   delete static_cast<MemoryPhi *>(Self);
2181 }
2182 
2183 void MemoryDef::deleteMe(DerivedUser *Self) {
2184   delete static_cast<MemoryDef *>(Self);
2185 }
2186 
2187 void MemoryUse::deleteMe(DerivedUser *Self) {
2188   delete static_cast<MemoryUse *>(Self);
2189 }
2190