1 //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the MemorySSA class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/MemorySSA.h"
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/DenseMapInfo.h"
16 #include "llvm/ADT/DenseSet.h"
17 #include "llvm/ADT/DepthFirstIterator.h"
18 #include "llvm/ADT/Hashing.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/ADT/iterator.h"
26 #include "llvm/ADT/iterator_range.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/CFGPrinter.h"
29 #include "llvm/Analysis/IteratedDominanceFrontier.h"
30 #include "llvm/Analysis/MemoryLocation.h"
31 #include "llvm/Config/llvm-config.h"
32 #include "llvm/IR/AssemblyAnnotationWriter.h"
33 #include "llvm/IR/BasicBlock.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/Function.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/IR/LLVMContext.h"
41 #include "llvm/IR/PassManager.h"
42 #include "llvm/IR/Use.h"
43 #include "llvm/InitializePasses.h"
44 #include "llvm/Pass.h"
45 #include "llvm/Support/AtomicOrdering.h"
46 #include "llvm/Support/Casting.h"
47 #include "llvm/Support/CommandLine.h"
48 #include "llvm/Support/Compiler.h"
49 #include "llvm/Support/Debug.h"
50 #include "llvm/Support/ErrorHandling.h"
51 #include "llvm/Support/FormattedStream.h"
52 #include "llvm/Support/raw_ostream.h"
53 #include <algorithm>
54 #include <cassert>
55 #include <cstdlib>
56 #include <iterator>
57 #include <memory>
58 #include <utility>
59 
60 using namespace llvm;
61 
62 #define DEBUG_TYPE "memoryssa"
63 
64 static cl::opt<std::string>
65     DotCFGMSSA("dot-cfg-mssa",
66                cl::value_desc("file name for generated dot file"),
67                cl::desc("file name for generated dot file"), cl::init(""));
68 
69 INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
70                       true)
71 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
72 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
73 INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
74                     true)
75 
76 INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",
77                       "Memory SSA Printer", false, false)
78 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
79 INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",
80                     "Memory SSA Printer", false, false)
81 
82 static cl::opt<unsigned> MaxCheckLimit(
83     "memssa-check-limit", cl::Hidden, cl::init(100),
84     cl::desc("The maximum number of stores/phis MemorySSA"
85              "will consider trying to walk past (default = 100)"));
86 
87 // Always verify MemorySSA if expensive checking is enabled.
88 #ifdef EXPENSIVE_CHECKS
89 bool llvm::VerifyMemorySSA = true;
90 #else
91 bool llvm::VerifyMemorySSA = false;
92 #endif
93 
94 static cl::opt<bool, true>
95     VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA),
96                      cl::Hidden, cl::desc("Enable verification of MemorySSA."));
97 
98 namespace llvm {
99 
100 /// An assembly annotator class to print Memory SSA information in
101 /// comments.
102 class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
103   friend class MemorySSA;
104 
105   const MemorySSA *MSSA;
106 
107 public:
108   MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
109 
110   void emitBasicBlockStartAnnot(const BasicBlock *BB,
111                                 formatted_raw_ostream &OS) override {
112     if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
113       OS << "; " << *MA << "\n";
114   }
115 
116   void emitInstructionAnnot(const Instruction *I,
117                             formatted_raw_ostream &OS) override {
118     if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
119       OS << "; " << *MA << "\n";
120   }
121 };
122 
123 } // end namespace llvm
124 
125 namespace {
126 
127 /// Our current alias analysis API differentiates heavily between calls and
128 /// non-calls, and functions called on one usually assert on the other.
129 /// This class encapsulates the distinction to simplify other code that wants
130 /// "Memory affecting instructions and related data" to use as a key.
131 /// For example, this class is used as a densemap key in the use optimizer.
132 class MemoryLocOrCall {
133 public:
134   bool IsCall = false;
135 
136   MemoryLocOrCall(MemoryUseOrDef *MUD)
137       : MemoryLocOrCall(MUD->getMemoryInst()) {}
138   MemoryLocOrCall(const MemoryUseOrDef *MUD)
139       : MemoryLocOrCall(MUD->getMemoryInst()) {}
140 
141   MemoryLocOrCall(Instruction *Inst) {
142     if (auto *C = dyn_cast<CallBase>(Inst)) {
143       IsCall = true;
144       Call = C;
145     } else {
146       IsCall = false;
147       // There is no such thing as a memorylocation for a fence inst, and it is
148       // unique in that regard.
149       if (!isa<FenceInst>(Inst))
150         Loc = MemoryLocation::get(Inst);
151     }
152   }
153 
154   explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
155 
156   const CallBase *getCall() const {
157     assert(IsCall);
158     return Call;
159   }
160 
161   MemoryLocation getLoc() const {
162     assert(!IsCall);
163     return Loc;
164   }
165 
166   bool operator==(const MemoryLocOrCall &Other) const {
167     if (IsCall != Other.IsCall)
168       return false;
169 
170     if (!IsCall)
171       return Loc == Other.Loc;
172 
173     if (Call->getCalledOperand() != Other.Call->getCalledOperand())
174       return false;
175 
176     return Call->arg_size() == Other.Call->arg_size() &&
177            std::equal(Call->arg_begin(), Call->arg_end(),
178                       Other.Call->arg_begin());
179   }
180 
181 private:
182   union {
183     const CallBase *Call;
184     MemoryLocation Loc;
185   };
186 };
187 
188 } // end anonymous namespace
189 
190 namespace llvm {
191 
192 template <> struct DenseMapInfo<MemoryLocOrCall> {
193   static inline MemoryLocOrCall getEmptyKey() {
194     return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
195   }
196 
197   static inline MemoryLocOrCall getTombstoneKey() {
198     return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
199   }
200 
201   static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
202     if (!MLOC.IsCall)
203       return hash_combine(
204           MLOC.IsCall,
205           DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
206 
207     hash_code hash =
208         hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue(
209                                       MLOC.getCall()->getCalledOperand()));
210 
211     for (const Value *Arg : MLOC.getCall()->args())
212       hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg));
213     return hash;
214   }
215 
216   static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
217     return LHS == RHS;
218   }
219 };
220 
221 } // end namespace llvm
222 
223 /// This does one-way checks to see if Use could theoretically be hoisted above
224 /// MayClobber. This will not check the other way around.
225 ///
226 /// This assumes that, for the purposes of MemorySSA, Use comes directly after
227 /// MayClobber, with no potentially clobbering operations in between them.
228 /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
229 static bool areLoadsReorderable(const LoadInst *Use,
230                                 const LoadInst *MayClobber) {
231   bool VolatileUse = Use->isVolatile();
232   bool VolatileClobber = MayClobber->isVolatile();
233   // Volatile operations may never be reordered with other volatile operations.
234   if (VolatileUse && VolatileClobber)
235     return false;
236   // Otherwise, volatile doesn't matter here. From the language reference:
237   // 'optimizers may change the order of volatile operations relative to
238   // non-volatile operations.'"
239 
240   // If a load is seq_cst, it cannot be moved above other loads. If its ordering
241   // is weaker, it can be moved above other loads. We just need to be sure that
242   // MayClobber isn't an acquire load, because loads can't be moved above
243   // acquire loads.
244   //
245   // Note that this explicitly *does* allow the free reordering of monotonic (or
246   // weaker) loads of the same address.
247   bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
248   bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
249                                                      AtomicOrdering::Acquire);
250   return !(SeqCstUse || MayClobberIsAcquire);
251 }
252 
253 namespace {
254 
255 struct ClobberAlias {
256   bool IsClobber;
257   Optional<AliasResult> AR;
258 };
259 
260 } // end anonymous namespace
261 
262 // Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being
263 // ignored if IsClobber = false.
264 template <typename AliasAnalysisType>
265 static ClobberAlias
266 instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc,
267                          const Instruction *UseInst, AliasAnalysisType &AA) {
268   Instruction *DefInst = MD->getMemoryInst();
269   assert(DefInst && "Defining instruction not actually an instruction");
270   Optional<AliasResult> AR;
271 
272   if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
273     // These intrinsics will show up as affecting memory, but they are just
274     // markers, mostly.
275     //
276     // FIXME: We probably don't actually want MemorySSA to model these at all
277     // (including creating MemoryAccesses for them): we just end up inventing
278     // clobbers where they don't really exist at all. Please see D43269 for
279     // context.
280     switch (II->getIntrinsicID()) {
281     case Intrinsic::invariant_start:
282     case Intrinsic::invariant_end:
283     case Intrinsic::assume:
284     case Intrinsic::experimental_noalias_scope_decl:
285       return {false, AliasResult(AliasResult::NoAlias)};
286     case Intrinsic::dbg_addr:
287     case Intrinsic::dbg_declare:
288     case Intrinsic::dbg_label:
289     case Intrinsic::dbg_value:
290       llvm_unreachable("debuginfo shouldn't have associated defs!");
291     default:
292       break;
293     }
294   }
295 
296   if (auto *CB = dyn_cast_or_null<CallBase>(UseInst)) {
297     ModRefInfo I = AA.getModRefInfo(DefInst, CB);
298     AR = isMustSet(I) ? AliasResult::MustAlias : AliasResult::MayAlias;
299     return {isModOrRefSet(I), AR};
300   }
301 
302   if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
303     if (auto *UseLoad = dyn_cast_or_null<LoadInst>(UseInst))
304       return {!areLoadsReorderable(UseLoad, DefLoad),
305               AliasResult(AliasResult::MayAlias)};
306 
307   ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
308   AR = isMustSet(I) ? AliasResult::MustAlias : AliasResult::MayAlias;
309   return {isModSet(I), AR};
310 }
311 
312 template <typename AliasAnalysisType>
313 static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
314                                              const MemoryUseOrDef *MU,
315                                              const MemoryLocOrCall &UseMLOC,
316                                              AliasAnalysisType &AA) {
317   // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
318   // to exist while MemoryLocOrCall is pushed through places.
319   if (UseMLOC.IsCall)
320     return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(),
321                                     AA);
322   return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
323                                   AA);
324 }
325 
326 // Return true when MD may alias MU, return false otherwise.
327 bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
328                                         AliasAnalysis &AA) {
329   return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber;
330 }
331 
332 namespace {
333 
334 struct UpwardsMemoryQuery {
335   // True if our original query started off as a call
336   bool IsCall = false;
337   // The pointer location we started the query with. This will be empty if
338   // IsCall is true.
339   MemoryLocation StartingLoc;
340   // This is the instruction we were querying about.
341   const Instruction *Inst = nullptr;
342   // The MemoryAccess we actually got called with, used to test local domination
343   const MemoryAccess *OriginalAccess = nullptr;
344   Optional<AliasResult> AR = AliasResult(AliasResult::MayAlias);
345   bool SkipSelfAccess = false;
346 
347   UpwardsMemoryQuery() = default;
348 
349   UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
350       : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {
351     if (!IsCall)
352       StartingLoc = MemoryLocation::get(Inst);
353   }
354 };
355 
356 } // end anonymous namespace
357 
358 template <typename AliasAnalysisType>
359 static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA,
360                                                    const Instruction *I) {
361   // If the memory can't be changed, then loads of the memory can't be
362   // clobbered.
363   if (auto *LI = dyn_cast<LoadInst>(I))
364     return I->hasMetadata(LLVMContext::MD_invariant_load) ||
365            AA.pointsToConstantMemory(MemoryLocation::get(LI));
366   return false;
367 }
368 
369 /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
370 /// inbetween `Start` and `ClobberAt` can clobbers `Start`.
371 ///
372 /// This is meant to be as simple and self-contained as possible. Because it
373 /// uses no cache, etc., it can be relatively expensive.
374 ///
375 /// \param Start     The MemoryAccess that we want to walk from.
376 /// \param ClobberAt A clobber for Start.
377 /// \param StartLoc  The MemoryLocation for Start.
378 /// \param MSSA      The MemorySSA instance that Start and ClobberAt belong to.
379 /// \param Query     The UpwardsMemoryQuery we used for our search.
380 /// \param AA        The AliasAnalysis we used for our search.
381 /// \param AllowImpreciseClobber Always false, unless we do relaxed verify.
382 
383 template <typename AliasAnalysisType>
384 LLVM_ATTRIBUTE_UNUSED static void
385 checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt,
386                    const MemoryLocation &StartLoc, const MemorySSA &MSSA,
387                    const UpwardsMemoryQuery &Query, AliasAnalysisType &AA,
388                    bool AllowImpreciseClobber = false) {
389   assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
390 
391   if (MSSA.isLiveOnEntryDef(Start)) {
392     assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
393            "liveOnEntry must clobber itself");
394     return;
395   }
396 
397   bool FoundClobber = false;
398   DenseSet<ConstMemoryAccessPair> VisitedPhis;
399   SmallVector<ConstMemoryAccessPair, 8> Worklist;
400   Worklist.emplace_back(Start, StartLoc);
401   // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
402   // is found, complain.
403   while (!Worklist.empty()) {
404     auto MAP = Worklist.pop_back_val();
405     // All we care about is that nothing from Start to ClobberAt clobbers Start.
406     // We learn nothing from revisiting nodes.
407     if (!VisitedPhis.insert(MAP).second)
408       continue;
409 
410     for (const auto *MA : def_chain(MAP.first)) {
411       if (MA == ClobberAt) {
412         if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
413           // instructionClobbersQuery isn't essentially free, so don't use `|=`,
414           // since it won't let us short-circuit.
415           //
416           // Also, note that this can't be hoisted out of the `Worklist` loop,
417           // since MD may only act as a clobber for 1 of N MemoryLocations.
418           FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD);
419           if (!FoundClobber) {
420             ClobberAlias CA =
421                 instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
422             if (CA.IsClobber) {
423               FoundClobber = true;
424               // Not used: CA.AR;
425             }
426           }
427         }
428         break;
429       }
430 
431       // We should never hit liveOnEntry, unless it's the clobber.
432       assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
433 
434       if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
435         // If Start is a Def, skip self.
436         if (MD == Start)
437           continue;
438 
439         assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)
440                     .IsClobber &&
441                "Found clobber before reaching ClobberAt!");
442         continue;
443       }
444 
445       if (const auto *MU = dyn_cast<MemoryUse>(MA)) {
446         (void)MU;
447         assert (MU == Start &&
448                 "Can only find use in def chain if Start is a use");
449         continue;
450       }
451 
452       assert(isa<MemoryPhi>(MA));
453 
454       // Add reachable phi predecessors
455       for (auto ItB = upward_defs_begin(
456                     {const_cast<MemoryAccess *>(MA), MAP.second},
457                     MSSA.getDomTree()),
458                 ItE = upward_defs_end();
459            ItB != ItE; ++ItB)
460         if (MSSA.getDomTree().isReachableFromEntry(ItB.getPhiArgBlock()))
461           Worklist.emplace_back(*ItB);
462     }
463   }
464 
465   // If the verify is done following an optimization, it's possible that
466   // ClobberAt was a conservative clobbering, that we can now infer is not a
467   // true clobbering access. Don't fail the verify if that's the case.
468   // We do have accesses that claim they're optimized, but could be optimized
469   // further. Updating all these can be expensive, so allow it for now (FIXME).
470   if (AllowImpreciseClobber)
471     return;
472 
473   // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
474   // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
475   assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
476          "ClobberAt never acted as a clobber");
477 }
478 
479 namespace {
480 
481 /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
482 /// in one class.
483 template <class AliasAnalysisType> class ClobberWalker {
484   /// Save a few bytes by using unsigned instead of size_t.
485   using ListIndex = unsigned;
486 
487   /// Represents a span of contiguous MemoryDefs, potentially ending in a
488   /// MemoryPhi.
489   struct DefPath {
490     MemoryLocation Loc;
491     // Note that, because we always walk in reverse, Last will always dominate
492     // First. Also note that First and Last are inclusive.
493     MemoryAccess *First;
494     MemoryAccess *Last;
495     Optional<ListIndex> Previous;
496 
497     DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
498             Optional<ListIndex> Previous)
499         : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
500 
501     DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
502             Optional<ListIndex> Previous)
503         : DefPath(Loc, Init, Init, Previous) {}
504   };
505 
506   const MemorySSA &MSSA;
507   AliasAnalysisType &AA;
508   DominatorTree &DT;
509   UpwardsMemoryQuery *Query;
510   unsigned *UpwardWalkLimit;
511 
512   // Phi optimization bookkeeping:
513   // List of DefPath to process during the current phi optimization walk.
514   SmallVector<DefPath, 32> Paths;
515   // List of visited <Access, Location> pairs; we can skip paths already
516   // visited with the same memory location.
517   DenseSet<ConstMemoryAccessPair> VisitedPhis;
518   // Record if phi translation has been performed during the current phi
519   // optimization walk, as merging alias results after phi translation can
520   // yield incorrect results. Context in PR46156.
521   bool PerformedPhiTranslation = false;
522 
523   /// Find the nearest def or phi that `From` can legally be optimized to.
524   const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
525     assert(From->getNumOperands() && "Phi with no operands?");
526 
527     BasicBlock *BB = From->getBlock();
528     MemoryAccess *Result = MSSA.getLiveOnEntryDef();
529     DomTreeNode *Node = DT.getNode(BB);
530     while ((Node = Node->getIDom())) {
531       auto *Defs = MSSA.getBlockDefs(Node->getBlock());
532       if (Defs)
533         return &*Defs->rbegin();
534     }
535     return Result;
536   }
537 
538   /// Result of calling walkToPhiOrClobber.
539   struct UpwardsWalkResult {
540     /// The "Result" of the walk. Either a clobber, the last thing we walked, or
541     /// both. Include alias info when clobber found.
542     MemoryAccess *Result;
543     bool IsKnownClobber;
544     Optional<AliasResult> AR;
545   };
546 
547   /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
548   /// This will update Desc.Last as it walks. It will (optionally) also stop at
549   /// StopAt.
550   ///
551   /// This does not test for whether StopAt is a clobber
552   UpwardsWalkResult
553   walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr,
554                      const MemoryAccess *SkipStopAt = nullptr) const {
555     assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
556     assert(UpwardWalkLimit && "Need a valid walk limit");
557     bool LimitAlreadyReached = false;
558     // (*UpwardWalkLimit) may be 0 here, due to the loop in tryOptimizePhi. Set
559     // it to 1. This will not do any alias() calls. It either returns in the
560     // first iteration in the loop below, or is set back to 0 if all def chains
561     // are free of MemoryDefs.
562     if (!*UpwardWalkLimit) {
563       *UpwardWalkLimit = 1;
564       LimitAlreadyReached = true;
565     }
566 
567     for (MemoryAccess *Current : def_chain(Desc.Last)) {
568       Desc.Last = Current;
569       if (Current == StopAt || Current == SkipStopAt)
570         return {Current, false, AliasResult(AliasResult::MayAlias)};
571 
572       if (auto *MD = dyn_cast<MemoryDef>(Current)) {
573         if (MSSA.isLiveOnEntryDef(MD))
574           return {MD, true, AliasResult(AliasResult::MustAlias)};
575 
576         if (!--*UpwardWalkLimit)
577           return {Current, true, AliasResult(AliasResult::MayAlias)};
578 
579         ClobberAlias CA =
580             instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA);
581         if (CA.IsClobber)
582           return {MD, true, CA.AR};
583       }
584     }
585 
586     if (LimitAlreadyReached)
587       *UpwardWalkLimit = 0;
588 
589     assert(isa<MemoryPhi>(Desc.Last) &&
590            "Ended at a non-clobber that's not a phi?");
591     return {Desc.Last, false, AliasResult(AliasResult::MayAlias)};
592   }
593 
594   void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
595                    ListIndex PriorNode) {
596     auto UpwardDefsBegin = upward_defs_begin({Phi, Paths[PriorNode].Loc}, DT,
597                                              &PerformedPhiTranslation);
598     auto UpwardDefs = make_range(UpwardDefsBegin, upward_defs_end());
599     for (const MemoryAccessPair &P : UpwardDefs) {
600       PausedSearches.push_back(Paths.size());
601       Paths.emplace_back(P.second, P.first, PriorNode);
602     }
603   }
604 
605   /// Represents a search that terminated after finding a clobber. This clobber
606   /// may or may not be present in the path of defs from LastNode..SearchStart,
607   /// since it may have been retrieved from cache.
608   struct TerminatedPath {
609     MemoryAccess *Clobber;
610     ListIndex LastNode;
611   };
612 
613   /// Get an access that keeps us from optimizing to the given phi.
614   ///
615   /// PausedSearches is an array of indices into the Paths array. Its incoming
616   /// value is the indices of searches that stopped at the last phi optimization
617   /// target. It's left in an unspecified state.
618   ///
619   /// If this returns None, NewPaused is a vector of searches that terminated
620   /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
621   Optional<TerminatedPath>
622   getBlockingAccess(const MemoryAccess *StopWhere,
623                     SmallVectorImpl<ListIndex> &PausedSearches,
624                     SmallVectorImpl<ListIndex> &NewPaused,
625                     SmallVectorImpl<TerminatedPath> &Terminated) {
626     assert(!PausedSearches.empty() && "No searches to continue?");
627 
628     // BFS vs DFS really doesn't make a difference here, so just do a DFS with
629     // PausedSearches as our stack.
630     while (!PausedSearches.empty()) {
631       ListIndex PathIndex = PausedSearches.pop_back_val();
632       DefPath &Node = Paths[PathIndex];
633 
634       // If we've already visited this path with this MemoryLocation, we don't
635       // need to do so again.
636       //
637       // NOTE: That we just drop these paths on the ground makes caching
638       // behavior sporadic. e.g. given a diamond:
639       //  A
640       // B C
641       //  D
642       //
643       // ...If we walk D, B, A, C, we'll only cache the result of phi
644       // optimization for A, B, and D; C will be skipped because it dies here.
645       // This arguably isn't the worst thing ever, since:
646       //   - We generally query things in a top-down order, so if we got below D
647       //     without needing cache entries for {C, MemLoc}, then chances are
648       //     that those cache entries would end up ultimately unused.
649       //   - We still cache things for A, so C only needs to walk up a bit.
650       // If this behavior becomes problematic, we can fix without a ton of extra
651       // work.
652       if (!VisitedPhis.insert({Node.Last, Node.Loc}).second) {
653         if (PerformedPhiTranslation) {
654           // If visiting this path performed Phi translation, don't continue,
655           // since it may not be correct to merge results from two paths if one
656           // relies on the phi translation.
657           TerminatedPath Term{Node.Last, PathIndex};
658           return Term;
659         }
660         continue;
661       }
662 
663       const MemoryAccess *SkipStopWhere = nullptr;
664       if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) {
665         assert(isa<MemoryDef>(Query->OriginalAccess));
666         SkipStopWhere = Query->OriginalAccess;
667       }
668 
669       UpwardsWalkResult Res = walkToPhiOrClobber(Node,
670                                                  /*StopAt=*/StopWhere,
671                                                  /*SkipStopAt=*/SkipStopWhere);
672       if (Res.IsKnownClobber) {
673         assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
674 
675         // If this wasn't a cache hit, we hit a clobber when walking. That's a
676         // failure.
677         TerminatedPath Term{Res.Result, PathIndex};
678         if (!MSSA.dominates(Res.Result, StopWhere))
679           return Term;
680 
681         // Otherwise, it's a valid thing to potentially optimize to.
682         Terminated.push_back(Term);
683         continue;
684       }
685 
686       if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
687         // We've hit our target. Save this path off for if we want to continue
688         // walking. If we are in the mode of skipping the OriginalAccess, and
689         // we've reached back to the OriginalAccess, do not save path, we've
690         // just looped back to self.
691         if (Res.Result != SkipStopWhere)
692           NewPaused.push_back(PathIndex);
693         continue;
694       }
695 
696       assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
697       addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
698     }
699 
700     return None;
701   }
702 
703   template <typename T, typename Walker>
704   struct generic_def_path_iterator
705       : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
706                                     std::forward_iterator_tag, T *> {
707     generic_def_path_iterator() {}
708     generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
709 
710     T &operator*() const { return curNode(); }
711 
712     generic_def_path_iterator &operator++() {
713       N = curNode().Previous;
714       return *this;
715     }
716 
717     bool operator==(const generic_def_path_iterator &O) const {
718       if (N.hasValue() != O.N.hasValue())
719         return false;
720       return !N.hasValue() || *N == *O.N;
721     }
722 
723   private:
724     T &curNode() const { return W->Paths[*N]; }
725 
726     Walker *W = nullptr;
727     Optional<ListIndex> N = None;
728   };
729 
730   using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
731   using const_def_path_iterator =
732       generic_def_path_iterator<const DefPath, const ClobberWalker>;
733 
734   iterator_range<def_path_iterator> def_path(ListIndex From) {
735     return make_range(def_path_iterator(this, From), def_path_iterator());
736   }
737 
738   iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
739     return make_range(const_def_path_iterator(this, From),
740                       const_def_path_iterator());
741   }
742 
743   struct OptznResult {
744     /// The path that contains our result.
745     TerminatedPath PrimaryClobber;
746     /// The paths that we can legally cache back from, but that aren't
747     /// necessarily the result of the Phi optimization.
748     SmallVector<TerminatedPath, 4> OtherClobbers;
749   };
750 
751   ListIndex defPathIndex(const DefPath &N) const {
752     // The assert looks nicer if we don't need to do &N
753     const DefPath *NP = &N;
754     assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
755            "Out of bounds DefPath!");
756     return NP - &Paths.front();
757   }
758 
759   /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
760   /// that act as legal clobbers. Note that this won't return *all* clobbers.
761   ///
762   /// Phi optimization algorithm tl;dr:
763   ///   - Find the earliest def/phi, A, we can optimize to
764   ///   - Find if all paths from the starting memory access ultimately reach A
765   ///     - If not, optimization isn't possible.
766   ///     - Otherwise, walk from A to another clobber or phi, A'.
767   ///       - If A' is a def, we're done.
768   ///       - If A' is a phi, try to optimize it.
769   ///
770   /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
771   /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
772   OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
773                              const MemoryLocation &Loc) {
774     assert(Paths.empty() && VisitedPhis.empty() && !PerformedPhiTranslation &&
775            "Reset the optimization state.");
776 
777     Paths.emplace_back(Loc, Start, Phi, None);
778     // Stores how many "valid" optimization nodes we had prior to calling
779     // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
780     auto PriorPathsSize = Paths.size();
781 
782     SmallVector<ListIndex, 16> PausedSearches;
783     SmallVector<ListIndex, 8> NewPaused;
784     SmallVector<TerminatedPath, 4> TerminatedPaths;
785 
786     addSearches(Phi, PausedSearches, 0);
787 
788     // Moves the TerminatedPath with the "most dominated" Clobber to the end of
789     // Paths.
790     auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
791       assert(!Paths.empty() && "Need a path to move");
792       auto Dom = Paths.begin();
793       for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
794         if (!MSSA.dominates(I->Clobber, Dom->Clobber))
795           Dom = I;
796       auto Last = Paths.end() - 1;
797       if (Last != Dom)
798         std::iter_swap(Last, Dom);
799     };
800 
801     MemoryPhi *Current = Phi;
802     while (true) {
803       assert(!MSSA.isLiveOnEntryDef(Current) &&
804              "liveOnEntry wasn't treated as a clobber?");
805 
806       const auto *Target = getWalkTarget(Current);
807       // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
808       // optimization for the prior phi.
809       assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
810         return MSSA.dominates(P.Clobber, Target);
811       }));
812 
813       // FIXME: This is broken, because the Blocker may be reported to be
814       // liveOnEntry, and we'll happily wait for that to disappear (read: never)
815       // For the moment, this is fine, since we do nothing with blocker info.
816       if (Optional<TerminatedPath> Blocker = getBlockingAccess(
817               Target, PausedSearches, NewPaused, TerminatedPaths)) {
818 
819         // Find the node we started at. We can't search based on N->Last, since
820         // we may have gone around a loop with a different MemoryLocation.
821         auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
822           return defPathIndex(N) < PriorPathsSize;
823         });
824         assert(Iter != def_path_iterator());
825 
826         DefPath &CurNode = *Iter;
827         assert(CurNode.Last == Current);
828 
829         // Two things:
830         // A. We can't reliably cache all of NewPaused back. Consider a case
831         //    where we have two paths in NewPaused; one of which can't optimize
832         //    above this phi, whereas the other can. If we cache the second path
833         //    back, we'll end up with suboptimal cache entries. We can handle
834         //    cases like this a bit better when we either try to find all
835         //    clobbers that block phi optimization, or when our cache starts
836         //    supporting unfinished searches.
837         // B. We can't reliably cache TerminatedPaths back here without doing
838         //    extra checks; consider a case like:
839         //       T
840         //      / \
841         //     D   C
842         //      \ /
843         //       S
844         //    Where T is our target, C is a node with a clobber on it, D is a
845         //    diamond (with a clobber *only* on the left or right node, N), and
846         //    S is our start. Say we walk to D, through the node opposite N
847         //    (read: ignoring the clobber), and see a cache entry in the top
848         //    node of D. That cache entry gets put into TerminatedPaths. We then
849         //    walk up to C (N is later in our worklist), find the clobber, and
850         //    quit. If we append TerminatedPaths to OtherClobbers, we'll cache
851         //    the bottom part of D to the cached clobber, ignoring the clobber
852         //    in N. Again, this problem goes away if we start tracking all
853         //    blockers for a given phi optimization.
854         TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
855         return {Result, {}};
856       }
857 
858       // If there's nothing left to search, then all paths led to valid clobbers
859       // that we got from our cache; pick the nearest to the start, and allow
860       // the rest to be cached back.
861       if (NewPaused.empty()) {
862         MoveDominatedPathToEnd(TerminatedPaths);
863         TerminatedPath Result = TerminatedPaths.pop_back_val();
864         return {Result, std::move(TerminatedPaths)};
865       }
866 
867       MemoryAccess *DefChainEnd = nullptr;
868       SmallVector<TerminatedPath, 4> Clobbers;
869       for (ListIndex Paused : NewPaused) {
870         UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
871         if (WR.IsKnownClobber)
872           Clobbers.push_back({WR.Result, Paused});
873         else
874           // Micro-opt: If we hit the end of the chain, save it.
875           DefChainEnd = WR.Result;
876       }
877 
878       if (!TerminatedPaths.empty()) {
879         // If we couldn't find the dominating phi/liveOnEntry in the above loop,
880         // do it now.
881         if (!DefChainEnd)
882           for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
883             DefChainEnd = MA;
884         assert(DefChainEnd && "Failed to find dominating phi/liveOnEntry");
885 
886         // If any of the terminated paths don't dominate the phi we'll try to
887         // optimize, we need to figure out what they are and quit.
888         const BasicBlock *ChainBB = DefChainEnd->getBlock();
889         for (const TerminatedPath &TP : TerminatedPaths) {
890           // Because we know that DefChainEnd is as "high" as we can go, we
891           // don't need local dominance checks; BB dominance is sufficient.
892           if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
893             Clobbers.push_back(TP);
894         }
895       }
896 
897       // If we have clobbers in the def chain, find the one closest to Current
898       // and quit.
899       if (!Clobbers.empty()) {
900         MoveDominatedPathToEnd(Clobbers);
901         TerminatedPath Result = Clobbers.pop_back_val();
902         return {Result, std::move(Clobbers)};
903       }
904 
905       assert(all_of(NewPaused,
906                     [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
907 
908       // Because liveOnEntry is a clobber, this must be a phi.
909       auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
910 
911       PriorPathsSize = Paths.size();
912       PausedSearches.clear();
913       for (ListIndex I : NewPaused)
914         addSearches(DefChainPhi, PausedSearches, I);
915       NewPaused.clear();
916 
917       Current = DefChainPhi;
918     }
919   }
920 
921   void verifyOptResult(const OptznResult &R) const {
922     assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
923       return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
924     }));
925   }
926 
927   void resetPhiOptznState() {
928     Paths.clear();
929     VisitedPhis.clear();
930     PerformedPhiTranslation = false;
931   }
932 
933 public:
934   ClobberWalker(const MemorySSA &MSSA, AliasAnalysisType &AA, DominatorTree &DT)
935       : MSSA(MSSA), AA(AA), DT(DT) {}
936 
937   AliasAnalysisType *getAA() { return &AA; }
938   /// Finds the nearest clobber for the given query, optimizing phis if
939   /// possible.
940   MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q,
941                             unsigned &UpWalkLimit) {
942     Query = &Q;
943     UpwardWalkLimit = &UpWalkLimit;
944     // Starting limit must be > 0.
945     if (!UpWalkLimit)
946       UpWalkLimit++;
947 
948     MemoryAccess *Current = Start;
949     // This walker pretends uses don't exist. If we're handed one, silently grab
950     // its def. (This has the nice side-effect of ensuring we never cache uses)
951     if (auto *MU = dyn_cast<MemoryUse>(Start))
952       Current = MU->getDefiningAccess();
953 
954     DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
955     // Fast path for the overly-common case (no crazy phi optimization
956     // necessary)
957     UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
958     MemoryAccess *Result;
959     if (WalkResult.IsKnownClobber) {
960       Result = WalkResult.Result;
961       Q.AR = WalkResult.AR;
962     } else {
963       OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
964                                           Current, Q.StartingLoc);
965       verifyOptResult(OptRes);
966       resetPhiOptznState();
967       Result = OptRes.PrimaryClobber.Clobber;
968     }
969 
970 #ifdef EXPENSIVE_CHECKS
971     if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0)
972       checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
973 #endif
974     return Result;
975   }
976 };
977 
978 struct RenamePassData {
979   DomTreeNode *DTN;
980   DomTreeNode::const_iterator ChildIt;
981   MemoryAccess *IncomingVal;
982 
983   RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
984                  MemoryAccess *M)
985       : DTN(D), ChildIt(It), IncomingVal(M) {}
986 
987   void swap(RenamePassData &RHS) {
988     std::swap(DTN, RHS.DTN);
989     std::swap(ChildIt, RHS.ChildIt);
990     std::swap(IncomingVal, RHS.IncomingVal);
991   }
992 };
993 
994 } // end anonymous namespace
995 
996 namespace llvm {
997 
998 template <class AliasAnalysisType> class MemorySSA::ClobberWalkerBase {
999   ClobberWalker<AliasAnalysisType> Walker;
1000   MemorySSA *MSSA;
1001 
1002 public:
1003   ClobberWalkerBase(MemorySSA *M, AliasAnalysisType *A, DominatorTree *D)
1004       : Walker(*M, *A, *D), MSSA(M) {}
1005 
1006   MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
1007                                               const MemoryLocation &,
1008                                               unsigned &);
1009   // Third argument (bool), defines whether the clobber search should skip the
1010   // original queried access. If true, there will be a follow-up query searching
1011   // for a clobber access past "self". Note that the Optimized access is not
1012   // updated if a new clobber is found by this SkipSelf search. If this
1013   // additional query becomes heavily used we may decide to cache the result.
1014   // Walker instantiations will decide how to set the SkipSelf bool.
1015   MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, unsigned &, bool);
1016 };
1017 
1018 /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
1019 /// longer does caching on its own, but the name has been retained for the
1020 /// moment.
1021 template <class AliasAnalysisType>
1022 class MemorySSA::CachingWalker final : public MemorySSAWalker {
1023   ClobberWalkerBase<AliasAnalysisType> *Walker;
1024 
1025 public:
1026   CachingWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
1027       : MemorySSAWalker(M), Walker(W) {}
1028   ~CachingWalker() override = default;
1029 
1030   using MemorySSAWalker::getClobberingMemoryAccess;
1031 
1032   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1033     return Walker->getClobberingMemoryAccessBase(MA, UWL, false);
1034   }
1035   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1036                                           const MemoryLocation &Loc,
1037                                           unsigned &UWL) {
1038     return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1039   }
1040 
1041   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
1042     unsigned UpwardWalkLimit = MaxCheckLimit;
1043     return getClobberingMemoryAccess(MA, UpwardWalkLimit);
1044   }
1045   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1046                                           const MemoryLocation &Loc) override {
1047     unsigned UpwardWalkLimit = MaxCheckLimit;
1048     return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
1049   }
1050 
1051   void invalidateInfo(MemoryAccess *MA) override {
1052     if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1053       MUD->resetOptimized();
1054   }
1055 };
1056 
1057 template <class AliasAnalysisType>
1058 class MemorySSA::SkipSelfWalker final : public MemorySSAWalker {
1059   ClobberWalkerBase<AliasAnalysisType> *Walker;
1060 
1061 public:
1062   SkipSelfWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
1063       : MemorySSAWalker(M), Walker(W) {}
1064   ~SkipSelfWalker() override = default;
1065 
1066   using MemorySSAWalker::getClobberingMemoryAccess;
1067 
1068   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1069     return Walker->getClobberingMemoryAccessBase(MA, UWL, true);
1070   }
1071   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1072                                           const MemoryLocation &Loc,
1073                                           unsigned &UWL) {
1074     return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1075   }
1076 
1077   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
1078     unsigned UpwardWalkLimit = MaxCheckLimit;
1079     return getClobberingMemoryAccess(MA, UpwardWalkLimit);
1080   }
1081   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1082                                           const MemoryLocation &Loc) override {
1083     unsigned UpwardWalkLimit = MaxCheckLimit;
1084     return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
1085   }
1086 
1087   void invalidateInfo(MemoryAccess *MA) override {
1088     if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1089       MUD->resetOptimized();
1090   }
1091 };
1092 
1093 } // end namespace llvm
1094 
1095 void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
1096                                     bool RenameAllUses) {
1097   // Pass through values to our successors
1098   for (const BasicBlock *S : successors(BB)) {
1099     auto It = PerBlockAccesses.find(S);
1100     // Rename the phi nodes in our successor block
1101     if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1102       continue;
1103     AccessList *Accesses = It->second.get();
1104     auto *Phi = cast<MemoryPhi>(&Accesses->front());
1105     if (RenameAllUses) {
1106       bool ReplacementDone = false;
1107       for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I)
1108         if (Phi->getIncomingBlock(I) == BB) {
1109           Phi->setIncomingValue(I, IncomingVal);
1110           ReplacementDone = true;
1111         }
1112       (void) ReplacementDone;
1113       assert(ReplacementDone && "Incomplete phi during partial rename");
1114     } else
1115       Phi->addIncoming(IncomingVal, BB);
1116   }
1117 }
1118 
1119 /// Rename a single basic block into MemorySSA form.
1120 /// Uses the standard SSA renaming algorithm.
1121 /// \returns The new incoming value.
1122 MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
1123                                      bool RenameAllUses) {
1124   auto It = PerBlockAccesses.find(BB);
1125   // Skip most processing if the list is empty.
1126   if (It != PerBlockAccesses.end()) {
1127     AccessList *Accesses = It->second.get();
1128     for (MemoryAccess &L : *Accesses) {
1129       if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
1130         if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
1131           MUD->setDefiningAccess(IncomingVal);
1132         if (isa<MemoryDef>(&L))
1133           IncomingVal = &L;
1134       } else {
1135         IncomingVal = &L;
1136       }
1137     }
1138   }
1139   return IncomingVal;
1140 }
1141 
1142 /// This is the standard SSA renaming algorithm.
1143 ///
1144 /// We walk the dominator tree in preorder, renaming accesses, and then filling
1145 /// in phi nodes in our successors.
1146 void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
1147                            SmallPtrSetImpl<BasicBlock *> &Visited,
1148                            bool SkipVisited, bool RenameAllUses) {
1149   assert(Root && "Trying to rename accesses in an unreachable block");
1150 
1151   SmallVector<RenamePassData, 32> WorkStack;
1152   // Skip everything if we already renamed this block and we are skipping.
1153   // Note: You can't sink this into the if, because we need it to occur
1154   // regardless of whether we skip blocks or not.
1155   bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
1156   if (SkipVisited && AlreadyVisited)
1157     return;
1158 
1159   IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
1160   renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
1161   WorkStack.push_back({Root, Root->begin(), IncomingVal});
1162 
1163   while (!WorkStack.empty()) {
1164     DomTreeNode *Node = WorkStack.back().DTN;
1165     DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
1166     IncomingVal = WorkStack.back().IncomingVal;
1167 
1168     if (ChildIt == Node->end()) {
1169       WorkStack.pop_back();
1170     } else {
1171       DomTreeNode *Child = *ChildIt;
1172       ++WorkStack.back().ChildIt;
1173       BasicBlock *BB = Child->getBlock();
1174       // Note: You can't sink this into the if, because we need it to occur
1175       // regardless of whether we skip blocks or not.
1176       AlreadyVisited = !Visited.insert(BB).second;
1177       if (SkipVisited && AlreadyVisited) {
1178         // We already visited this during our renaming, which can happen when
1179         // being asked to rename multiple blocks. Figure out the incoming val,
1180         // which is the last def.
1181         // Incoming value can only change if there is a block def, and in that
1182         // case, it's the last block def in the list.
1183         if (auto *BlockDefs = getWritableBlockDefs(BB))
1184           IncomingVal = &*BlockDefs->rbegin();
1185       } else
1186         IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1187       renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1188       WorkStack.push_back({Child, Child->begin(), IncomingVal});
1189     }
1190   }
1191 }
1192 
1193 /// This handles unreachable block accesses by deleting phi nodes in
1194 /// unreachable blocks, and marking all other unreachable MemoryAccess's as
1195 /// being uses of the live on entry definition.
1196 void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
1197   assert(!DT->isReachableFromEntry(BB) &&
1198          "Reachable block found while handling unreachable blocks");
1199 
1200   // Make sure phi nodes in our reachable successors end up with a
1201   // LiveOnEntryDef for our incoming edge, even though our block is forward
1202   // unreachable.  We could just disconnect these blocks from the CFG fully,
1203   // but we do not right now.
1204   for (const BasicBlock *S : successors(BB)) {
1205     if (!DT->isReachableFromEntry(S))
1206       continue;
1207     auto It = PerBlockAccesses.find(S);
1208     // Rename the phi nodes in our successor block
1209     if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1210       continue;
1211     AccessList *Accesses = It->second.get();
1212     auto *Phi = cast<MemoryPhi>(&Accesses->front());
1213     Phi->addIncoming(LiveOnEntryDef.get(), BB);
1214   }
1215 
1216   auto It = PerBlockAccesses.find(BB);
1217   if (It == PerBlockAccesses.end())
1218     return;
1219 
1220   auto &Accesses = It->second;
1221   for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1222     auto Next = std::next(AI);
1223     // If we have a phi, just remove it. We are going to replace all
1224     // users with live on entry.
1225     if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1226       UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1227     else
1228       Accesses->erase(AI);
1229     AI = Next;
1230   }
1231 }
1232 
1233 MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
1234     : AA(nullptr), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1235       SkipWalker(nullptr), NextID(0) {
1236   // Build MemorySSA using a batch alias analysis. This reuses the internal
1237   // state that AA collects during an alias()/getModRefInfo() call. This is
1238   // safe because there are no CFG changes while building MemorySSA and can
1239   // significantly reduce the time spent by the compiler in AA, because we will
1240   // make queries about all the instructions in the Function.
1241   assert(AA && "No alias analysis?");
1242   BatchAAResults BatchAA(*AA);
1243   buildMemorySSA(BatchAA);
1244   // Intentionally leave AA to nullptr while building so we don't accidently
1245   // use non-batch AliasAnalysis.
1246   this->AA = AA;
1247   // Also create the walker here.
1248   getWalker();
1249 }
1250 
1251 MemorySSA::~MemorySSA() {
1252   // Drop all our references
1253   for (const auto &Pair : PerBlockAccesses)
1254     for (MemoryAccess &MA : *Pair.second)
1255       MA.dropAllReferences();
1256 }
1257 
1258 MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
1259   auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
1260 
1261   if (Res.second)
1262     Res.first->second = std::make_unique<AccessList>();
1263   return Res.first->second.get();
1264 }
1265 
1266 MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1267   auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
1268 
1269   if (Res.second)
1270     Res.first->second = std::make_unique<DefsList>();
1271   return Res.first->second.get();
1272 }
1273 
1274 namespace llvm {
1275 
1276 /// This class is a batch walker of all MemoryUse's in the program, and points
1277 /// their defining access at the thing that actually clobbers them.  Because it
1278 /// is a batch walker that touches everything, it does not operate like the
1279 /// other walkers.  This walker is basically performing a top-down SSA renaming
1280 /// pass, where the version stack is used as the cache.  This enables it to be
1281 /// significantly more time and memory efficient than using the regular walker,
1282 /// which is walking bottom-up.
1283 class MemorySSA::OptimizeUses {
1284 public:
1285   OptimizeUses(MemorySSA *MSSA, CachingWalker<BatchAAResults> *Walker,
1286                BatchAAResults *BAA, DominatorTree *DT)
1287       : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}
1288 
1289   void optimizeUses();
1290 
1291 private:
1292   /// This represents where a given memorylocation is in the stack.
1293   struct MemlocStackInfo {
1294     // This essentially is keeping track of versions of the stack. Whenever
1295     // the stack changes due to pushes or pops, these versions increase.
1296     unsigned long StackEpoch;
1297     unsigned long PopEpoch;
1298     // This is the lower bound of places on the stack to check. It is equal to
1299     // the place the last stack walk ended.
1300     // Note: Correctness depends on this being initialized to 0, which densemap
1301     // does
1302     unsigned long LowerBound;
1303     const BasicBlock *LowerBoundBlock;
1304     // This is where the last walk for this memory location ended.
1305     unsigned long LastKill;
1306     bool LastKillValid;
1307     Optional<AliasResult> AR;
1308   };
1309 
1310   void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1311                            SmallVectorImpl<MemoryAccess *> &,
1312                            DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
1313 
1314   MemorySSA *MSSA;
1315   CachingWalker<BatchAAResults> *Walker;
1316   BatchAAResults *AA;
1317   DominatorTree *DT;
1318 };
1319 
1320 } // end namespace llvm
1321 
1322 /// Optimize the uses in a given block This is basically the SSA renaming
1323 /// algorithm, with one caveat: We are able to use a single stack for all
1324 /// MemoryUses.  This is because the set of *possible* reaching MemoryDefs is
1325 /// the same for every MemoryUse.  The *actual* clobbering MemoryDef is just
1326 /// going to be some position in that stack of possible ones.
1327 ///
1328 /// We track the stack positions that each MemoryLocation needs
1329 /// to check, and last ended at.  This is because we only want to check the
1330 /// things that changed since last time.  The same MemoryLocation should
1331 /// get clobbered by the same store (getModRefInfo does not use invariantness or
1332 /// things like this, and if they start, we can modify MemoryLocOrCall to
1333 /// include relevant data)
1334 void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1335     const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1336     SmallVectorImpl<MemoryAccess *> &VersionStack,
1337     DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) {
1338 
1339   /// If no accesses, nothing to do.
1340   MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1341   if (Accesses == nullptr)
1342     return;
1343 
1344   // Pop everything that doesn't dominate the current block off the stack,
1345   // increment the PopEpoch to account for this.
1346   while (true) {
1347     assert(
1348         !VersionStack.empty() &&
1349         "Version stack should have liveOnEntry sentinel dominating everything");
1350     BasicBlock *BackBlock = VersionStack.back()->getBlock();
1351     if (DT->dominates(BackBlock, BB))
1352       break;
1353     while (VersionStack.back()->getBlock() == BackBlock)
1354       VersionStack.pop_back();
1355     ++PopEpoch;
1356   }
1357 
1358   for (MemoryAccess &MA : *Accesses) {
1359     auto *MU = dyn_cast<MemoryUse>(&MA);
1360     if (!MU) {
1361       VersionStack.push_back(&MA);
1362       ++StackEpoch;
1363       continue;
1364     }
1365 
1366     if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
1367       MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None);
1368       continue;
1369     }
1370 
1371     MemoryLocOrCall UseMLOC(MU);
1372     auto &LocInfo = LocStackInfo[UseMLOC];
1373     // If the pop epoch changed, it means we've removed stuff from top of
1374     // stack due to changing blocks. We may have to reset the lower bound or
1375     // last kill info.
1376     if (LocInfo.PopEpoch != PopEpoch) {
1377       LocInfo.PopEpoch = PopEpoch;
1378       LocInfo.StackEpoch = StackEpoch;
1379       // If the lower bound was in something that no longer dominates us, we
1380       // have to reset it.
1381       // We can't simply track stack size, because the stack may have had
1382       // pushes/pops in the meantime.
1383       // XXX: This is non-optimal, but only is slower cases with heavily
1384       // branching dominator trees.  To get the optimal number of queries would
1385       // be to make lowerbound and lastkill a per-loc stack, and pop it until
1386       // the top of that stack dominates us.  This does not seem worth it ATM.
1387       // A much cheaper optimization would be to always explore the deepest
1388       // branch of the dominator tree first. This will guarantee this resets on
1389       // the smallest set of blocks.
1390       if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1391           !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1392         // Reset the lower bound of things to check.
1393         // TODO: Some day we should be able to reset to last kill, rather than
1394         // 0.
1395         LocInfo.LowerBound = 0;
1396         LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1397         LocInfo.LastKillValid = false;
1398       }
1399     } else if (LocInfo.StackEpoch != StackEpoch) {
1400       // If all that has changed is the StackEpoch, we only have to check the
1401       // new things on the stack, because we've checked everything before.  In
1402       // this case, the lower bound of things to check remains the same.
1403       LocInfo.PopEpoch = PopEpoch;
1404       LocInfo.StackEpoch = StackEpoch;
1405     }
1406     if (!LocInfo.LastKillValid) {
1407       LocInfo.LastKill = VersionStack.size() - 1;
1408       LocInfo.LastKillValid = true;
1409       LocInfo.AR = AliasResult::MayAlias;
1410     }
1411 
1412     // At this point, we should have corrected last kill and LowerBound to be
1413     // in bounds.
1414     assert(LocInfo.LowerBound < VersionStack.size() &&
1415            "Lower bound out of range");
1416     assert(LocInfo.LastKill < VersionStack.size() &&
1417            "Last kill info out of range");
1418     // In any case, the new upper bound is the top of the stack.
1419     unsigned long UpperBound = VersionStack.size() - 1;
1420 
1421     if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
1422       LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
1423                         << *(MU->getMemoryInst()) << ")"
1424                         << " because there are "
1425                         << UpperBound - LocInfo.LowerBound
1426                         << " stores to disambiguate\n");
1427       // Because we did not walk, LastKill is no longer valid, as this may
1428       // have been a kill.
1429       LocInfo.LastKillValid = false;
1430       continue;
1431     }
1432     bool FoundClobberResult = false;
1433     unsigned UpwardWalkLimit = MaxCheckLimit;
1434     while (UpperBound > LocInfo.LowerBound) {
1435       if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1436         // For phis, use the walker, see where we ended up, go there
1437         MemoryAccess *Result =
1438             Walker->getClobberingMemoryAccess(MU, UpwardWalkLimit);
1439         // We are guaranteed to find it or something is wrong
1440         while (VersionStack[UpperBound] != Result) {
1441           assert(UpperBound != 0);
1442           --UpperBound;
1443         }
1444         FoundClobberResult = true;
1445         break;
1446       }
1447 
1448       MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1449       ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA);
1450       if (CA.IsClobber) {
1451         FoundClobberResult = true;
1452         LocInfo.AR = CA.AR;
1453         break;
1454       }
1455       --UpperBound;
1456     }
1457 
1458     // Note: Phis always have AliasResult AR set to MayAlias ATM.
1459 
1460     // At the end of this loop, UpperBound is either a clobber, or lower bound
1461     // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1462     if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1463       // We were last killed now by where we got to
1464       if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound]))
1465         LocInfo.AR = None;
1466       MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR);
1467       LocInfo.LastKill = UpperBound;
1468     } else {
1469       // Otherwise, we checked all the new ones, and now we know we can get to
1470       // LastKill.
1471       MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR);
1472     }
1473     LocInfo.LowerBound = VersionStack.size() - 1;
1474     LocInfo.LowerBoundBlock = BB;
1475   }
1476 }
1477 
1478 /// Optimize uses to point to their actual clobbering definitions.
1479 void MemorySSA::OptimizeUses::optimizeUses() {
1480   SmallVector<MemoryAccess *, 16> VersionStack;
1481   DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo;
1482   VersionStack.push_back(MSSA->getLiveOnEntryDef());
1483 
1484   unsigned long StackEpoch = 1;
1485   unsigned long PopEpoch = 1;
1486   // We perform a non-recursive top-down dominator tree walk.
1487   for (const auto *DomNode : depth_first(DT->getRootNode()))
1488     optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1489                         LocStackInfo);
1490 }
1491 
1492 void MemorySSA::placePHINodes(
1493     const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) {
1494   // Determine where our MemoryPhi's should go
1495   ForwardIDFCalculator IDFs(*DT);
1496   IDFs.setDefiningBlocks(DefiningBlocks);
1497   SmallVector<BasicBlock *, 32> IDFBlocks;
1498   IDFs.calculate(IDFBlocks);
1499 
1500   // Now place MemoryPhi nodes.
1501   for (auto &BB : IDFBlocks)
1502     createMemoryPhi(BB);
1503 }
1504 
1505 void MemorySSA::buildMemorySSA(BatchAAResults &BAA) {
1506   // We create an access to represent "live on entry", for things like
1507   // arguments or users of globals, where the memory they use is defined before
1508   // the beginning of the function. We do not actually insert it into the IR.
1509   // We do not define a live on exit for the immediate uses, and thus our
1510   // semantics do *not* imply that something with no immediate uses can simply
1511   // be removed.
1512   BasicBlock &StartingPoint = F.getEntryBlock();
1513   LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr,
1514                                      &StartingPoint, NextID++));
1515 
1516   // We maintain lists of memory accesses per-block, trading memory for time. We
1517   // could just look up the memory access for every possible instruction in the
1518   // stream.
1519   SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1520   // Go through each block, figure out where defs occur, and chain together all
1521   // the accesses.
1522   for (BasicBlock &B : F) {
1523     bool InsertIntoDef = false;
1524     AccessList *Accesses = nullptr;
1525     DefsList *Defs = nullptr;
1526     for (Instruction &I : B) {
1527       MemoryUseOrDef *MUD = createNewAccess(&I, &BAA);
1528       if (!MUD)
1529         continue;
1530 
1531       if (!Accesses)
1532         Accesses = getOrCreateAccessList(&B);
1533       Accesses->push_back(MUD);
1534       if (isa<MemoryDef>(MUD)) {
1535         InsertIntoDef = true;
1536         if (!Defs)
1537           Defs = getOrCreateDefsList(&B);
1538         Defs->push_back(*MUD);
1539       }
1540     }
1541     if (InsertIntoDef)
1542       DefiningBlocks.insert(&B);
1543   }
1544   placePHINodes(DefiningBlocks);
1545 
1546   // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1547   // filled in with all blocks.
1548   SmallPtrSet<BasicBlock *, 16> Visited;
1549   renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1550 
1551   ClobberWalkerBase<BatchAAResults> WalkerBase(this, &BAA, DT);
1552   CachingWalker<BatchAAResults> WalkerLocal(this, &WalkerBase);
1553   OptimizeUses(this, &WalkerLocal, &BAA, DT).optimizeUses();
1554 
1555   // Mark the uses in unreachable blocks as live on entry, so that they go
1556   // somewhere.
1557   for (auto &BB : F)
1558     if (!Visited.count(&BB))
1559       markUnreachableAsLiveOnEntry(&BB);
1560 }
1561 
1562 MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1563 
1564 MemorySSA::CachingWalker<AliasAnalysis> *MemorySSA::getWalkerImpl() {
1565   if (Walker)
1566     return Walker.get();
1567 
1568   if (!WalkerBase)
1569     WalkerBase =
1570         std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
1571 
1572   Walker =
1573       std::make_unique<CachingWalker<AliasAnalysis>>(this, WalkerBase.get());
1574   return Walker.get();
1575 }
1576 
1577 MemorySSAWalker *MemorySSA::getSkipSelfWalker() {
1578   if (SkipWalker)
1579     return SkipWalker.get();
1580 
1581   if (!WalkerBase)
1582     WalkerBase =
1583         std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
1584 
1585   SkipWalker =
1586       std::make_unique<SkipSelfWalker<AliasAnalysis>>(this, WalkerBase.get());
1587   return SkipWalker.get();
1588  }
1589 
1590 
1591 // This is a helper function used by the creation routines. It places NewAccess
1592 // into the access and defs lists for a given basic block, at the given
1593 // insertion point.
1594 void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess,
1595                                         const BasicBlock *BB,
1596                                         InsertionPlace Point) {
1597   auto *Accesses = getOrCreateAccessList(BB);
1598   if (Point == Beginning) {
1599     // If it's a phi node, it goes first, otherwise, it goes after any phi
1600     // nodes.
1601     if (isa<MemoryPhi>(NewAccess)) {
1602       Accesses->push_front(NewAccess);
1603       auto *Defs = getOrCreateDefsList(BB);
1604       Defs->push_front(*NewAccess);
1605     } else {
1606       auto AI = find_if_not(
1607           *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1608       Accesses->insert(AI, NewAccess);
1609       if (!isa<MemoryUse>(NewAccess)) {
1610         auto *Defs = getOrCreateDefsList(BB);
1611         auto DI = find_if_not(
1612             *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1613         Defs->insert(DI, *NewAccess);
1614       }
1615     }
1616   } else {
1617     Accesses->push_back(NewAccess);
1618     if (!isa<MemoryUse>(NewAccess)) {
1619       auto *Defs = getOrCreateDefsList(BB);
1620       Defs->push_back(*NewAccess);
1621     }
1622   }
1623   BlockNumberingValid.erase(BB);
1624 }
1625 
1626 void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB,
1627                                       AccessList::iterator InsertPt) {
1628   auto *Accesses = getWritableBlockAccesses(BB);
1629   bool WasEnd = InsertPt == Accesses->end();
1630   Accesses->insert(AccessList::iterator(InsertPt), What);
1631   if (!isa<MemoryUse>(What)) {
1632     auto *Defs = getOrCreateDefsList(BB);
1633     // If we got asked to insert at the end, we have an easy job, just shove it
1634     // at the end. If we got asked to insert before an existing def, we also get
1635     // an iterator. If we got asked to insert before a use, we have to hunt for
1636     // the next def.
1637     if (WasEnd) {
1638       Defs->push_back(*What);
1639     } else if (isa<MemoryDef>(InsertPt)) {
1640       Defs->insert(InsertPt->getDefsIterator(), *What);
1641     } else {
1642       while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1643         ++InsertPt;
1644       // Either we found a def, or we are inserting at the end
1645       if (InsertPt == Accesses->end())
1646         Defs->push_back(*What);
1647       else
1648         Defs->insert(InsertPt->getDefsIterator(), *What);
1649     }
1650   }
1651   BlockNumberingValid.erase(BB);
1652 }
1653 
1654 void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) {
1655   // Keep it in the lookup tables, remove from the lists
1656   removeFromLists(What, false);
1657 
1658   // Note that moving should implicitly invalidate the optimized state of a
1659   // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a
1660   // MemoryDef.
1661   if (auto *MD = dyn_cast<MemoryDef>(What))
1662     MD->resetOptimized();
1663   What->setBlock(BB);
1664 }
1665 
1666 // Move What before Where in the IR.  The end result is that What will belong to
1667 // the right lists and have the right Block set, but will not otherwise be
1668 // correct. It will not have the right defining access, and if it is a def,
1669 // things below it will not properly be updated.
1670 void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1671                        AccessList::iterator Where) {
1672   prepareForMoveTo(What, BB);
1673   insertIntoListsBefore(What, BB, Where);
1674 }
1675 
1676 void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB,
1677                        InsertionPlace Point) {
1678   if (isa<MemoryPhi>(What)) {
1679     assert(Point == Beginning &&
1680            "Can only move a Phi at the beginning of the block");
1681     // Update lookup table entry
1682     ValueToMemoryAccess.erase(What->getBlock());
1683     bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;
1684     (void)Inserted;
1685     assert(Inserted && "Cannot move a Phi to a block that already has one");
1686   }
1687 
1688   prepareForMoveTo(What, BB);
1689   insertIntoListsForBlock(What, BB, Point);
1690 }
1691 
1692 MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1693   assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
1694   MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
1695   // Phi's always are placed at the front of the block.
1696   insertIntoListsForBlock(Phi, BB, Beginning);
1697   ValueToMemoryAccess[BB] = Phi;
1698   return Phi;
1699 }
1700 
1701 MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
1702                                                MemoryAccess *Definition,
1703                                                const MemoryUseOrDef *Template,
1704                                                bool CreationMustSucceed) {
1705   assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
1706   MemoryUseOrDef *NewAccess = createNewAccess(I, AA, Template);
1707   if (CreationMustSucceed)
1708     assert(NewAccess != nullptr && "Tried to create a memory access for a "
1709                                    "non-memory touching instruction");
1710   if (NewAccess) {
1711     assert((!Definition || !isa<MemoryUse>(Definition)) &&
1712            "A use cannot be a defining access");
1713     NewAccess->setDefiningAccess(Definition);
1714   }
1715   return NewAccess;
1716 }
1717 
1718 // Return true if the instruction has ordering constraints.
1719 // Note specifically that this only considers stores and loads
1720 // because others are still considered ModRef by getModRefInfo.
1721 static inline bool isOrdered(const Instruction *I) {
1722   if (auto *SI = dyn_cast<StoreInst>(I)) {
1723     if (!SI->isUnordered())
1724       return true;
1725   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1726     if (!LI->isUnordered())
1727       return true;
1728   }
1729   return false;
1730 }
1731 
1732 /// Helper function to create new memory accesses
1733 template <typename AliasAnalysisType>
1734 MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
1735                                            AliasAnalysisType *AAP,
1736                                            const MemoryUseOrDef *Template) {
1737   // The assume intrinsic has a control dependency which we model by claiming
1738   // that it writes arbitrarily. Debuginfo intrinsics may be considered
1739   // clobbers when we have a nonstandard AA pipeline. Ignore these fake memory
1740   // dependencies here.
1741   // FIXME: Replace this special casing with a more accurate modelling of
1742   // assume's control dependency.
1743   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1744     switch (II->getIntrinsicID()) {
1745     default:
1746       break;
1747     case Intrinsic::assume:
1748     case Intrinsic::experimental_noalias_scope_decl:
1749       return nullptr;
1750     }
1751   }
1752 
1753   // Using a nonstandard AA pipelines might leave us with unexpected modref
1754   // results for I, so add a check to not model instructions that may not read
1755   // from or write to memory. This is necessary for correctness.
1756   if (!I->mayReadFromMemory() && !I->mayWriteToMemory())
1757     return nullptr;
1758 
1759   bool Def, Use;
1760   if (Template) {
1761     Def = isa<MemoryDef>(Template);
1762     Use = isa<MemoryUse>(Template);
1763 #if !defined(NDEBUG)
1764     ModRefInfo ModRef = AAP->getModRefInfo(I, None);
1765     bool DefCheck, UseCheck;
1766     DefCheck = isModSet(ModRef) || isOrdered(I);
1767     UseCheck = isRefSet(ModRef);
1768     assert(Def == DefCheck && (Def || Use == UseCheck) && "Invalid template");
1769 #endif
1770   } else {
1771     // Find out what affect this instruction has on memory.
1772     ModRefInfo ModRef = AAP->getModRefInfo(I, None);
1773     // The isOrdered check is used to ensure that volatiles end up as defs
1774     // (atomics end up as ModRef right now anyway).  Until we separate the
1775     // ordering chain from the memory chain, this enables people to see at least
1776     // some relative ordering to volatiles.  Note that getClobberingMemoryAccess
1777     // will still give an answer that bypasses other volatile loads.  TODO:
1778     // Separate memory aliasing and ordering into two different chains so that
1779     // we can precisely represent both "what memory will this read/write/is
1780     // clobbered by" and "what instructions can I move this past".
1781     Def = isModSet(ModRef) || isOrdered(I);
1782     Use = isRefSet(ModRef);
1783   }
1784 
1785   // It's possible for an instruction to not modify memory at all. During
1786   // construction, we ignore them.
1787   if (!Def && !Use)
1788     return nullptr;
1789 
1790   MemoryUseOrDef *MUD;
1791   if (Def)
1792     MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
1793   else
1794     MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
1795   ValueToMemoryAccess[I] = MUD;
1796   return MUD;
1797 }
1798 
1799 /// Properly remove \p MA from all of MemorySSA's lookup tables.
1800 void MemorySSA::removeFromLookups(MemoryAccess *MA) {
1801   assert(MA->use_empty() &&
1802          "Trying to remove memory access that still has uses");
1803   BlockNumbering.erase(MA);
1804   if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1805     MUD->setDefiningAccess(nullptr);
1806   // Invalidate our walker's cache if necessary
1807   if (!isa<MemoryUse>(MA))
1808     getWalker()->invalidateInfo(MA);
1809 
1810   Value *MemoryInst;
1811   if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1812     MemoryInst = MUD->getMemoryInst();
1813   else
1814     MemoryInst = MA->getBlock();
1815 
1816   auto VMA = ValueToMemoryAccess.find(MemoryInst);
1817   if (VMA->second == MA)
1818     ValueToMemoryAccess.erase(VMA);
1819 }
1820 
1821 /// Properly remove \p MA from all of MemorySSA's lists.
1822 ///
1823 /// Because of the way the intrusive list and use lists work, it is important to
1824 /// do removal in the right order.
1825 /// ShouldDelete defaults to true, and will cause the memory access to also be
1826 /// deleted, not just removed.
1827 void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
1828   BasicBlock *BB = MA->getBlock();
1829   // The access list owns the reference, so we erase it from the non-owning list
1830   // first.
1831   if (!isa<MemoryUse>(MA)) {
1832     auto DefsIt = PerBlockDefs.find(BB);
1833     std::unique_ptr<DefsList> &Defs = DefsIt->second;
1834     Defs->remove(*MA);
1835     if (Defs->empty())
1836       PerBlockDefs.erase(DefsIt);
1837   }
1838 
1839   // The erase call here will delete it. If we don't want it deleted, we call
1840   // remove instead.
1841   auto AccessIt = PerBlockAccesses.find(BB);
1842   std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1843   if (ShouldDelete)
1844     Accesses->erase(MA);
1845   else
1846     Accesses->remove(MA);
1847 
1848   if (Accesses->empty()) {
1849     PerBlockAccesses.erase(AccessIt);
1850     BlockNumberingValid.erase(BB);
1851   }
1852 }
1853 
1854 void MemorySSA::print(raw_ostream &OS) const {
1855   MemorySSAAnnotatedWriter Writer(this);
1856   F.print(OS, &Writer);
1857 }
1858 
1859 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1860 LLVM_DUMP_METHOD void MemorySSA::dump() const { print(dbgs()); }
1861 #endif
1862 
1863 void MemorySSA::verifyMemorySSA() const {
1864   verifyOrderingDominationAndDefUses(F);
1865   verifyDominationNumbers(F);
1866   verifyPrevDefInPhis(F);
1867   // Previously, the verification used to also verify that the clobberingAccess
1868   // cached by MemorySSA is the same as the clobberingAccess found at a later
1869   // query to AA. This does not hold true in general due to the current fragility
1870   // of BasicAA which has arbitrary caps on the things it analyzes before giving
1871   // up. As a result, transformations that are correct, will lead to BasicAA
1872   // returning different Alias answers before and after that transformation.
1873   // Invalidating MemorySSA is not an option, as the results in BasicAA can be so
1874   // random, in the worst case we'd need to rebuild MemorySSA from scratch after
1875   // every transformation, which defeats the purpose of using it. For such an
1876   // example, see test4 added in D51960.
1877 }
1878 
1879 void MemorySSA::verifyPrevDefInPhis(Function &F) const {
1880 #if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS)
1881   for (const BasicBlock &BB : F) {
1882     if (MemoryPhi *Phi = getMemoryAccess(&BB)) {
1883       for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1884         auto *Pred = Phi->getIncomingBlock(I);
1885         auto *IncAcc = Phi->getIncomingValue(I);
1886         // If Pred has no unreachable predecessors, get last def looking at
1887         // IDoms. If, while walkings IDoms, any of these has an unreachable
1888         // predecessor, then the incoming def can be any access.
1889         if (auto *DTNode = DT->getNode(Pred)) {
1890           while (DTNode) {
1891             if (auto *DefList = getBlockDefs(DTNode->getBlock())) {
1892               auto *LastAcc = &*(--DefList->end());
1893               assert(LastAcc == IncAcc &&
1894                      "Incorrect incoming access into phi.");
1895               break;
1896             }
1897             DTNode = DTNode->getIDom();
1898           }
1899         } else {
1900           // If Pred has unreachable predecessors, but has at least a Def, the
1901           // incoming access can be the last Def in Pred, or it could have been
1902           // optimized to LoE. After an update, though, the LoE may have been
1903           // replaced by another access, so IncAcc may be any access.
1904           // If Pred has unreachable predecessors and no Defs, incoming access
1905           // should be LoE; However, after an update, it may be any access.
1906         }
1907       }
1908     }
1909   }
1910 #endif
1911 }
1912 
1913 /// Verify that all of the blocks we believe to have valid domination numbers
1914 /// actually have valid domination numbers.
1915 void MemorySSA::verifyDominationNumbers(const Function &F) const {
1916 #ifndef NDEBUG
1917   if (BlockNumberingValid.empty())
1918     return;
1919 
1920   SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid;
1921   for (const BasicBlock &BB : F) {
1922     if (!ValidBlocks.count(&BB))
1923       continue;
1924 
1925     ValidBlocks.erase(&BB);
1926 
1927     const AccessList *Accesses = getBlockAccesses(&BB);
1928     // It's correct to say an empty block has valid numbering.
1929     if (!Accesses)
1930       continue;
1931 
1932     // Block numbering starts at 1.
1933     unsigned long LastNumber = 0;
1934     for (const MemoryAccess &MA : *Accesses) {
1935       auto ThisNumberIter = BlockNumbering.find(&MA);
1936       assert(ThisNumberIter != BlockNumbering.end() &&
1937              "MemoryAccess has no domination number in a valid block!");
1938 
1939       unsigned long ThisNumber = ThisNumberIter->second;
1940       assert(ThisNumber > LastNumber &&
1941              "Domination numbers should be strictly increasing!");
1942       LastNumber = ThisNumber;
1943     }
1944   }
1945 
1946   assert(ValidBlocks.empty() &&
1947          "All valid BasicBlocks should exist in F -- dangling pointers?");
1948 #endif
1949 }
1950 
1951 /// Verify ordering: the order and existence of MemoryAccesses matches the
1952 /// order and existence of memory affecting instructions.
1953 /// Verify domination: each definition dominates all of its uses.
1954 /// Verify def-uses: the immediate use information - walk all the memory
1955 /// accesses and verifying that, for each use, it appears in the appropriate
1956 /// def's use list
1957 void MemorySSA::verifyOrderingDominationAndDefUses(Function &F) const {
1958 #if !defined(NDEBUG)
1959   // Walk all the blocks, comparing what the lookups think and what the access
1960   // lists think, as well as the order in the blocks vs the order in the access
1961   // lists.
1962   SmallVector<MemoryAccess *, 32> ActualAccesses;
1963   SmallVector<MemoryAccess *, 32> ActualDefs;
1964   for (BasicBlock &B : F) {
1965     const AccessList *AL = getBlockAccesses(&B);
1966     const auto *DL = getBlockDefs(&B);
1967     MemoryPhi *Phi = getMemoryAccess(&B);
1968     if (Phi) {
1969       // Verify ordering.
1970       ActualAccesses.push_back(Phi);
1971       ActualDefs.push_back(Phi);
1972       // Verify domination
1973       for (const Use &U : Phi->uses())
1974         assert(dominates(Phi, U) && "Memory PHI does not dominate it's uses");
1975 #if defined(EXPENSIVE_CHECKS)
1976       // Verify def-uses.
1977       assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(
1978                                           pred_begin(&B), pred_end(&B))) &&
1979              "Incomplete MemoryPhi Node");
1980       for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1981         verifyUseInDefs(Phi->getIncomingValue(I), Phi);
1982         assert(is_contained(predecessors(&B), Phi->getIncomingBlock(I)) &&
1983                "Incoming phi block not a block predecessor");
1984       }
1985 #endif
1986     }
1987 
1988     for (Instruction &I : B) {
1989       MemoryUseOrDef *MA = getMemoryAccess(&I);
1990       assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
1991              "We have memory affecting instructions "
1992              "in this block but they are not in the "
1993              "access list or defs list");
1994       if (MA) {
1995         // Verify ordering.
1996         ActualAccesses.push_back(MA);
1997         if (MemoryAccess *MD = dyn_cast<MemoryDef>(MA)) {
1998           // Verify ordering.
1999           ActualDefs.push_back(MA);
2000           // Verify domination.
2001           for (const Use &U : MD->uses())
2002             assert(dominates(MD, U) &&
2003                    "Memory Def does not dominate it's uses");
2004         }
2005 #if defined(EXPENSIVE_CHECKS)
2006         // Verify def-uses.
2007         verifyUseInDefs(MA->getDefiningAccess(), MA);
2008 #endif
2009       }
2010     }
2011     // Either we hit the assert, really have no accesses, or we have both
2012     // accesses and an access list. Same with defs.
2013     if (!AL && !DL)
2014       continue;
2015     // Verify ordering.
2016     assert(AL->size() == ActualAccesses.size() &&
2017            "We don't have the same number of accesses in the block as on the "
2018            "access list");
2019     assert((DL || ActualDefs.size() == 0) &&
2020            "Either we should have a defs list, or we should have no defs");
2021     assert((!DL || DL->size() == ActualDefs.size()) &&
2022            "We don't have the same number of defs in the block as on the "
2023            "def list");
2024     auto ALI = AL->begin();
2025     auto AAI = ActualAccesses.begin();
2026     while (ALI != AL->end() && AAI != ActualAccesses.end()) {
2027       assert(&*ALI == *AAI && "Not the same accesses in the same order");
2028       ++ALI;
2029       ++AAI;
2030     }
2031     ActualAccesses.clear();
2032     if (DL) {
2033       auto DLI = DL->begin();
2034       auto ADI = ActualDefs.begin();
2035       while (DLI != DL->end() && ADI != ActualDefs.end()) {
2036         assert(&*DLI == *ADI && "Not the same defs in the same order");
2037         ++DLI;
2038         ++ADI;
2039       }
2040     }
2041     ActualDefs.clear();
2042   }
2043 #endif
2044 }
2045 
2046 /// Verify the def-use lists in MemorySSA, by verifying that \p Use
2047 /// appears in the use list of \p Def.
2048 void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
2049 #ifndef NDEBUG
2050   // The live on entry use may cause us to get a NULL def here
2051   if (!Def)
2052     assert(isLiveOnEntryDef(Use) &&
2053            "Null def but use not point to live on entry def");
2054   else
2055     assert(is_contained(Def->users(), Use) &&
2056            "Did not find use in def's use list");
2057 #endif
2058 }
2059 
2060 /// Perform a local numbering on blocks so that instruction ordering can be
2061 /// determined in constant time.
2062 /// TODO: We currently just number in order.  If we numbered by N, we could
2063 /// allow at least N-1 sequences of insertBefore or insertAfter (and at least
2064 /// log2(N) sequences of mixed before and after) without needing to invalidate
2065 /// the numbering.
2066 void MemorySSA::renumberBlock(const BasicBlock *B) const {
2067   // The pre-increment ensures the numbers really start at 1.
2068   unsigned long CurrentNumber = 0;
2069   const AccessList *AL = getBlockAccesses(B);
2070   assert(AL != nullptr && "Asking to renumber an empty block");
2071   for (const auto &I : *AL)
2072     BlockNumbering[&I] = ++CurrentNumber;
2073   BlockNumberingValid.insert(B);
2074 }
2075 
2076 /// Determine, for two memory accesses in the same block,
2077 /// whether \p Dominator dominates \p Dominatee.
2078 /// \returns True if \p Dominator dominates \p Dominatee.
2079 bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
2080                                  const MemoryAccess *Dominatee) const {
2081   const BasicBlock *DominatorBlock = Dominator->getBlock();
2082 
2083   assert((DominatorBlock == Dominatee->getBlock()) &&
2084          "Asking for local domination when accesses are in different blocks!");
2085   // A node dominates itself.
2086   if (Dominatee == Dominator)
2087     return true;
2088 
2089   // When Dominatee is defined on function entry, it is not dominated by another
2090   // memory access.
2091   if (isLiveOnEntryDef(Dominatee))
2092     return false;
2093 
2094   // When Dominator is defined on function entry, it dominates the other memory
2095   // access.
2096   if (isLiveOnEntryDef(Dominator))
2097     return true;
2098 
2099   if (!BlockNumberingValid.count(DominatorBlock))
2100     renumberBlock(DominatorBlock);
2101 
2102   unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
2103   // All numbers start with 1
2104   assert(DominatorNum != 0 && "Block was not numbered properly");
2105   unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
2106   assert(DominateeNum != 0 && "Block was not numbered properly");
2107   return DominatorNum < DominateeNum;
2108 }
2109 
2110 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2111                           const MemoryAccess *Dominatee) const {
2112   if (Dominator == Dominatee)
2113     return true;
2114 
2115   if (isLiveOnEntryDef(Dominatee))
2116     return false;
2117 
2118   if (Dominator->getBlock() != Dominatee->getBlock())
2119     return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
2120   return locallyDominates(Dominator, Dominatee);
2121 }
2122 
2123 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2124                           const Use &Dominatee) const {
2125   if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
2126     BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
2127     // The def must dominate the incoming block of the phi.
2128     if (UseBB != Dominator->getBlock())
2129       return DT->dominates(Dominator->getBlock(), UseBB);
2130     // If the UseBB and the DefBB are the same, compare locally.
2131     return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
2132   }
2133   // If it's not a PHI node use, the normal dominates can already handle it.
2134   return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
2135 }
2136 
2137 const static char LiveOnEntryStr[] = "liveOnEntry";
2138 
2139 void MemoryAccess::print(raw_ostream &OS) const {
2140   switch (getValueID()) {
2141   case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
2142   case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
2143   case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
2144   }
2145   llvm_unreachable("invalid value id");
2146 }
2147 
2148 void MemoryDef::print(raw_ostream &OS) const {
2149   MemoryAccess *UO = getDefiningAccess();
2150 
2151   auto printID = [&OS](MemoryAccess *A) {
2152     if (A && A->getID())
2153       OS << A->getID();
2154     else
2155       OS << LiveOnEntryStr;
2156   };
2157 
2158   OS << getID() << " = MemoryDef(";
2159   printID(UO);
2160   OS << ")";
2161 
2162   if (isOptimized()) {
2163     OS << "->";
2164     printID(getOptimized());
2165 
2166     if (Optional<AliasResult> AR = getOptimizedAccessType())
2167       OS << " " << *AR;
2168   }
2169 }
2170 
2171 void MemoryPhi::print(raw_ostream &OS) const {
2172   ListSeparator LS(",");
2173   OS << getID() << " = MemoryPhi(";
2174   for (const auto &Op : operands()) {
2175     BasicBlock *BB = getIncomingBlock(Op);
2176     MemoryAccess *MA = cast<MemoryAccess>(Op);
2177 
2178     OS << LS << '{';
2179     if (BB->hasName())
2180       OS << BB->getName();
2181     else
2182       BB->printAsOperand(OS, false);
2183     OS << ',';
2184     if (unsigned ID = MA->getID())
2185       OS << ID;
2186     else
2187       OS << LiveOnEntryStr;
2188     OS << '}';
2189   }
2190   OS << ')';
2191 }
2192 
2193 void MemoryUse::print(raw_ostream &OS) const {
2194   MemoryAccess *UO = getDefiningAccess();
2195   OS << "MemoryUse(";
2196   if (UO && UO->getID())
2197     OS << UO->getID();
2198   else
2199     OS << LiveOnEntryStr;
2200   OS << ')';
2201 
2202   if (Optional<AliasResult> AR = getOptimizedAccessType())
2203     OS << " " << *AR;
2204 }
2205 
2206 void MemoryAccess::dump() const {
2207 // Cannot completely remove virtual function even in release mode.
2208 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2209   print(dbgs());
2210   dbgs() << "\n";
2211 #endif
2212 }
2213 
2214 char MemorySSAPrinterLegacyPass::ID = 0;
2215 
2216 MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) {
2217   initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry());
2218 }
2219 
2220 void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
2221   AU.setPreservesAll();
2222   AU.addRequired<MemorySSAWrapperPass>();
2223 }
2224 
2225 class DOTFuncMSSAInfo {
2226 private:
2227   const Function &F;
2228   MemorySSAAnnotatedWriter MSSAWriter;
2229 
2230 public:
2231   DOTFuncMSSAInfo(const Function &F, MemorySSA &MSSA)
2232       : F(F), MSSAWriter(&MSSA) {}
2233 
2234   const Function *getFunction() { return &F; }
2235   MemorySSAAnnotatedWriter &getWriter() { return MSSAWriter; }
2236 };
2237 
2238 namespace llvm {
2239 
2240 template <>
2241 struct GraphTraits<DOTFuncMSSAInfo *> : public GraphTraits<const BasicBlock *> {
2242   static NodeRef getEntryNode(DOTFuncMSSAInfo *CFGInfo) {
2243     return &(CFGInfo->getFunction()->getEntryBlock());
2244   }
2245 
2246   // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
2247   using nodes_iterator = pointer_iterator<Function::const_iterator>;
2248 
2249   static nodes_iterator nodes_begin(DOTFuncMSSAInfo *CFGInfo) {
2250     return nodes_iterator(CFGInfo->getFunction()->begin());
2251   }
2252 
2253   static nodes_iterator nodes_end(DOTFuncMSSAInfo *CFGInfo) {
2254     return nodes_iterator(CFGInfo->getFunction()->end());
2255   }
2256 
2257   static size_t size(DOTFuncMSSAInfo *CFGInfo) {
2258     return CFGInfo->getFunction()->size();
2259   }
2260 };
2261 
2262 template <>
2263 struct DOTGraphTraits<DOTFuncMSSAInfo *> : public DefaultDOTGraphTraits {
2264 
2265   DOTGraphTraits(bool IsSimple = false) : DefaultDOTGraphTraits(IsSimple) {}
2266 
2267   static std::string getGraphName(DOTFuncMSSAInfo *CFGInfo) {
2268     return "MSSA CFG for '" + CFGInfo->getFunction()->getName().str() +
2269            "' function";
2270   }
2271 
2272   std::string getNodeLabel(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo) {
2273     return DOTGraphTraits<DOTFuncInfo *>::getCompleteNodeLabel(
2274         Node, nullptr,
2275         [CFGInfo](raw_string_ostream &OS, const BasicBlock &BB) -> void {
2276           BB.print(OS, &CFGInfo->getWriter(), true, true);
2277         },
2278         [](std::string &S, unsigned &I, unsigned Idx) -> void {
2279           std::string Str = S.substr(I, Idx - I);
2280           StringRef SR = Str;
2281           if (SR.count(" = MemoryDef(") || SR.count(" = MemoryPhi(") ||
2282               SR.count("MemoryUse("))
2283             return;
2284           DOTGraphTraits<DOTFuncInfo *>::eraseComment(S, I, Idx);
2285         });
2286   }
2287 
2288   static std::string getEdgeSourceLabel(const BasicBlock *Node,
2289                                         const_succ_iterator I) {
2290     return DOTGraphTraits<DOTFuncInfo *>::getEdgeSourceLabel(Node, I);
2291   }
2292 
2293   /// Display the raw branch weights from PGO.
2294   std::string getEdgeAttributes(const BasicBlock *Node, const_succ_iterator I,
2295                                 DOTFuncMSSAInfo *CFGInfo) {
2296     return "";
2297   }
2298 
2299   std::string getNodeAttributes(const BasicBlock *Node,
2300                                 DOTFuncMSSAInfo *CFGInfo) {
2301     return getNodeLabel(Node, CFGInfo).find(';') != std::string::npos
2302                ? "style=filled, fillcolor=lightpink"
2303                : "";
2304   }
2305 };
2306 
2307 } // namespace llvm
2308 
2309 bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
2310   auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2311   if (DotCFGMSSA != "") {
2312     DOTFuncMSSAInfo CFGInfo(F, MSSA);
2313     WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA);
2314   } else
2315     MSSA.print(dbgs());
2316 
2317   if (VerifyMemorySSA)
2318     MSSA.verifyMemorySSA();
2319   return false;
2320 }
2321 
2322 AnalysisKey MemorySSAAnalysis::Key;
2323 
2324 MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F,
2325                                                  FunctionAnalysisManager &AM) {
2326   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
2327   auto &AA = AM.getResult<AAManager>(F);
2328   return MemorySSAAnalysis::Result(std::make_unique<MemorySSA>(F, &AA, &DT));
2329 }
2330 
2331 bool MemorySSAAnalysis::Result::invalidate(
2332     Function &F, const PreservedAnalyses &PA,
2333     FunctionAnalysisManager::Invalidator &Inv) {
2334   auto PAC = PA.getChecker<MemorySSAAnalysis>();
2335   return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
2336          Inv.invalidate<AAManager>(F, PA) ||
2337          Inv.invalidate<DominatorTreeAnalysis>(F, PA);
2338 }
2339 
2340 PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
2341                                             FunctionAnalysisManager &AM) {
2342   auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2343   if (DotCFGMSSA != "") {
2344     DOTFuncMSSAInfo CFGInfo(F, MSSA);
2345     WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA);
2346   } else {
2347     OS << "MemorySSA for function: " << F.getName() << "\n";
2348     MSSA.print(OS);
2349   }
2350 
2351   return PreservedAnalyses::all();
2352 }
2353 
2354 PreservedAnalyses MemorySSAVerifierPass::run(Function &F,
2355                                              FunctionAnalysisManager &AM) {
2356   AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
2357 
2358   return PreservedAnalyses::all();
2359 }
2360 
2361 char MemorySSAWrapperPass::ID = 0;
2362 
2363 MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) {
2364   initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
2365 }
2366 
2367 void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
2368 
2369 void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2370   AU.setPreservesAll();
2371   AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2372   AU.addRequiredTransitive<AAResultsWrapperPass>();
2373 }
2374 
2375 bool MemorySSAWrapperPass::runOnFunction(Function &F) {
2376   auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2377   auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2378   MSSA.reset(new MemorySSA(F, &AA, &DT));
2379   return false;
2380 }
2381 
2382 void MemorySSAWrapperPass::verifyAnalysis() const {
2383   if (VerifyMemorySSA)
2384     MSSA->verifyMemorySSA();
2385 }
2386 
2387 void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
2388   MSSA->print(OS);
2389 }
2390 
2391 MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
2392 
2393 /// Walk the use-def chains starting at \p StartingAccess and find
2394 /// the MemoryAccess that actually clobbers Loc.
2395 ///
2396 /// \returns our clobbering memory access
2397 template <typename AliasAnalysisType>
2398 MemoryAccess *
2399 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
2400     MemoryAccess *StartingAccess, const MemoryLocation &Loc,
2401     unsigned &UpwardWalkLimit) {
2402   assert(!isa<MemoryUse>(StartingAccess) && "Use cannot be defining access");
2403 
2404   Instruction *I = nullptr;
2405   if (auto *StartingUseOrDef = dyn_cast<MemoryUseOrDef>(StartingAccess)) {
2406     if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
2407       return StartingUseOrDef;
2408 
2409     I = StartingUseOrDef->getMemoryInst();
2410 
2411     // Conservatively, fences are always clobbers, so don't perform the walk if
2412     // we hit a fence.
2413     if (!isa<CallBase>(I) && I->isFenceLike())
2414       return StartingUseOrDef;
2415   }
2416 
2417   UpwardsMemoryQuery Q;
2418   Q.OriginalAccess = StartingAccess;
2419   Q.StartingLoc = Loc;
2420   Q.Inst = nullptr;
2421   Q.IsCall = false;
2422 
2423   // Unlike the other function, do not walk to the def of a def, because we are
2424   // handed something we already believe is the clobbering access.
2425   // We never set SkipSelf to true in Q in this method.
2426   MemoryAccess *Clobber =
2427       Walker.findClobber(StartingAccess, Q, UpwardWalkLimit);
2428   LLVM_DEBUG({
2429     dbgs() << "Clobber starting at access " << *StartingAccess << "\n";
2430     if (I)
2431       dbgs() << "  for instruction " << *I << "\n";
2432     dbgs() << "  is " << *Clobber << "\n";
2433   });
2434   return Clobber;
2435 }
2436 
2437 template <typename AliasAnalysisType>
2438 MemoryAccess *
2439 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
2440     MemoryAccess *MA, unsigned &UpwardWalkLimit, bool SkipSelf) {
2441   auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2442   // If this is a MemoryPhi, we can't do anything.
2443   if (!StartingAccess)
2444     return MA;
2445 
2446   bool IsOptimized = false;
2447 
2448   // If this is an already optimized use or def, return the optimized result.
2449   // Note: Currently, we store the optimized def result in a separate field,
2450   // since we can't use the defining access.
2451   if (StartingAccess->isOptimized()) {
2452     if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
2453       return StartingAccess->getOptimized();
2454     IsOptimized = true;
2455   }
2456 
2457   const Instruction *I = StartingAccess->getMemoryInst();
2458   // We can't sanely do anything with a fence, since they conservatively clobber
2459   // all memory, and have no locations to get pointers from to try to
2460   // disambiguate.
2461   if (!isa<CallBase>(I) && I->isFenceLike())
2462     return StartingAccess;
2463 
2464   UpwardsMemoryQuery Q(I, StartingAccess);
2465 
2466   if (isUseTriviallyOptimizableToLiveOnEntry(*Walker.getAA(), I)) {
2467     MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2468     StartingAccess->setOptimized(LiveOnEntry);
2469     StartingAccess->setOptimizedAccessType(None);
2470     return LiveOnEntry;
2471   }
2472 
2473   MemoryAccess *OptimizedAccess;
2474   if (!IsOptimized) {
2475     // Start with the thing we already think clobbers this location
2476     MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2477 
2478     // At this point, DefiningAccess may be the live on entry def.
2479     // If it is, we will not get a better result.
2480     if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
2481       StartingAccess->setOptimized(DefiningAccess);
2482       StartingAccess->setOptimizedAccessType(None);
2483       return DefiningAccess;
2484     }
2485 
2486     OptimizedAccess = Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit);
2487     StartingAccess->setOptimized(OptimizedAccess);
2488     if (MSSA->isLiveOnEntryDef(OptimizedAccess))
2489       StartingAccess->setOptimizedAccessType(None);
2490     else if (Q.AR && *Q.AR == AliasResult::MustAlias)
2491       StartingAccess->setOptimizedAccessType(
2492           AliasResult(AliasResult::MustAlias));
2493   } else
2494     OptimizedAccess = StartingAccess->getOptimized();
2495 
2496   LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2497   LLVM_DEBUG(dbgs() << *StartingAccess << "\n");
2498   LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ");
2499   LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n");
2500 
2501   MemoryAccess *Result;
2502   if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
2503       isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) {
2504     assert(isa<MemoryDef>(Q.OriginalAccess));
2505     Q.SkipSelfAccess = true;
2506     Result = Walker.findClobber(OptimizedAccess, Q, UpwardWalkLimit);
2507   } else
2508     Result = OptimizedAccess;
2509 
2510   LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf);
2511   LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n");
2512 
2513   return Result;
2514 }
2515 
2516 MemoryAccess *
2517 DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2518   if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2519     return Use->getDefiningAccess();
2520   return MA;
2521 }
2522 
2523 MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
2524     MemoryAccess *StartingAccess, const MemoryLocation &) {
2525   if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2526     return Use->getDefiningAccess();
2527   return StartingAccess;
2528 }
2529 
2530 void MemoryPhi::deleteMe(DerivedUser *Self) {
2531   delete static_cast<MemoryPhi *>(Self);
2532 }
2533 
2534 void MemoryDef::deleteMe(DerivedUser *Self) {
2535   delete static_cast<MemoryDef *>(Self);
2536 }
2537 
2538 void MemoryUse::deleteMe(DerivedUser *Self) {
2539   delete static_cast<MemoryUse *>(Self);
2540 }
2541 
2542 bool upward_defs_iterator::IsGuaranteedLoopInvariant(Value *Ptr) const {
2543   auto IsGuaranteedLoopInvariantBase = [](Value *Ptr) {
2544     Ptr = Ptr->stripPointerCasts();
2545     if (!isa<Instruction>(Ptr))
2546       return true;
2547     return isa<AllocaInst>(Ptr);
2548   };
2549 
2550   Ptr = Ptr->stripPointerCasts();
2551   if (auto *I = dyn_cast<Instruction>(Ptr)) {
2552     if (I->getParent()->isEntryBlock())
2553       return true;
2554   }
2555   if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
2556     return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) &&
2557            GEP->hasAllConstantIndices();
2558   }
2559   return IsGuaranteedLoopInvariantBase(Ptr);
2560 }
2561