1 //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the MemorySSA class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/MemorySSA.h"
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/DenseMapInfo.h"
16 #include "llvm/ADT/DenseSet.h"
17 #include "llvm/ADT/DepthFirstIterator.h"
18 #include "llvm/ADT/Hashing.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/ADT/iterator.h"
26 #include "llvm/ADT/iterator_range.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/CFGPrinter.h"
29 #include "llvm/Analysis/IteratedDominanceFrontier.h"
30 #include "llvm/Analysis/MemoryLocation.h"
31 #include "llvm/Config/llvm-config.h"
32 #include "llvm/IR/AssemblyAnnotationWriter.h"
33 #include "llvm/IR/BasicBlock.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/Function.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/IR/LLVMContext.h"
41 #include "llvm/IR/Operator.h"
42 #include "llvm/IR/PassManager.h"
43 #include "llvm/IR/Use.h"
44 #include "llvm/InitializePasses.h"
45 #include "llvm/Pass.h"
46 #include "llvm/Support/AtomicOrdering.h"
47 #include "llvm/Support/Casting.h"
48 #include "llvm/Support/CommandLine.h"
49 #include "llvm/Support/Compiler.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/FormattedStream.h"
53 #include "llvm/Support/raw_ostream.h"
54 #include <algorithm>
55 #include <cassert>
56 #include <cstdlib>
57 #include <iterator>
58 #include <memory>
59 #include <utility>
60 
61 using namespace llvm;
62 
63 #define DEBUG_TYPE "memoryssa"
64 
65 static cl::opt<std::string>
66     DotCFGMSSA("dot-cfg-mssa",
67                cl::value_desc("file name for generated dot file"),
68                cl::desc("file name for generated dot file"), cl::init(""));
69 
70 INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
71                       true)
72 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
73 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
74 INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
75                     true)
76 
77 INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",
78                       "Memory SSA Printer", false, false)
79 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
80 INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",
81                     "Memory SSA Printer", false, false)
82 
83 static cl::opt<unsigned> MaxCheckLimit(
84     "memssa-check-limit", cl::Hidden, cl::init(100),
85     cl::desc("The maximum number of stores/phis MemorySSA"
86              "will consider trying to walk past (default = 100)"));
87 
88 // Always verify MemorySSA if expensive checking is enabled.
89 #ifdef EXPENSIVE_CHECKS
90 bool llvm::VerifyMemorySSA = true;
91 #else
92 bool llvm::VerifyMemorySSA = false;
93 #endif
94 
95 static cl::opt<bool, true>
96     VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA),
97                      cl::Hidden, cl::desc("Enable verification of MemorySSA."));
98 
99 const static char LiveOnEntryStr[] = "liveOnEntry";
100 
101 namespace {
102 
103 /// An assembly annotator class to print Memory SSA information in
104 /// comments.
105 class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
106   const MemorySSA *MSSA;
107 
108 public:
109   MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
110 
111   void emitBasicBlockStartAnnot(const BasicBlock *BB,
112                                 formatted_raw_ostream &OS) override {
113     if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
114       OS << "; " << *MA << "\n";
115   }
116 
117   void emitInstructionAnnot(const Instruction *I,
118                             formatted_raw_ostream &OS) override {
119     if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
120       OS << "; " << *MA << "\n";
121   }
122 };
123 
124 /// An assembly annotator class to print Memory SSA information in
125 /// comments.
126 class MemorySSAWalkerAnnotatedWriter : public AssemblyAnnotationWriter {
127   MemorySSA *MSSA;
128   MemorySSAWalker *Walker;
129 
130 public:
131   MemorySSAWalkerAnnotatedWriter(MemorySSA *M)
132       : MSSA(M), Walker(M->getWalker()) {}
133 
134   void emitInstructionAnnot(const Instruction *I,
135                             formatted_raw_ostream &OS) override {
136     if (MemoryAccess *MA = MSSA->getMemoryAccess(I)) {
137       MemoryAccess *Clobber = Walker->getClobberingMemoryAccess(MA);
138       OS << "; " << *MA;
139       if (Clobber) {
140         OS << " - clobbered by ";
141         if (MSSA->isLiveOnEntryDef(Clobber))
142           OS << LiveOnEntryStr;
143         else
144           OS << *Clobber;
145       }
146       OS << "\n";
147     }
148   }
149 };
150 
151 } // namespace
152 
153 namespace {
154 
155 /// Our current alias analysis API differentiates heavily between calls and
156 /// non-calls, and functions called on one usually assert on the other.
157 /// This class encapsulates the distinction to simplify other code that wants
158 /// "Memory affecting instructions and related data" to use as a key.
159 /// For example, this class is used as a densemap key in the use optimizer.
160 class MemoryLocOrCall {
161 public:
162   bool IsCall = false;
163 
164   MemoryLocOrCall(MemoryUseOrDef *MUD)
165       : MemoryLocOrCall(MUD->getMemoryInst()) {}
166   MemoryLocOrCall(const MemoryUseOrDef *MUD)
167       : MemoryLocOrCall(MUD->getMemoryInst()) {}
168 
169   MemoryLocOrCall(Instruction *Inst) {
170     if (auto *C = dyn_cast<CallBase>(Inst)) {
171       IsCall = true;
172       Call = C;
173     } else {
174       IsCall = false;
175       // There is no such thing as a memorylocation for a fence inst, and it is
176       // unique in that regard.
177       if (!isa<FenceInst>(Inst))
178         Loc = MemoryLocation::get(Inst);
179     }
180   }
181 
182   explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
183 
184   const CallBase *getCall() const {
185     assert(IsCall);
186     return Call;
187   }
188 
189   MemoryLocation getLoc() const {
190     assert(!IsCall);
191     return Loc;
192   }
193 
194   bool operator==(const MemoryLocOrCall &Other) const {
195     if (IsCall != Other.IsCall)
196       return false;
197 
198     if (!IsCall)
199       return Loc == Other.Loc;
200 
201     if (Call->getCalledOperand() != Other.Call->getCalledOperand())
202       return false;
203 
204     return Call->arg_size() == Other.Call->arg_size() &&
205            std::equal(Call->arg_begin(), Call->arg_end(),
206                       Other.Call->arg_begin());
207   }
208 
209 private:
210   union {
211     const CallBase *Call;
212     MemoryLocation Loc;
213   };
214 };
215 
216 } // end anonymous namespace
217 
218 namespace llvm {
219 
220 template <> struct DenseMapInfo<MemoryLocOrCall> {
221   static inline MemoryLocOrCall getEmptyKey() {
222     return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
223   }
224 
225   static inline MemoryLocOrCall getTombstoneKey() {
226     return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
227   }
228 
229   static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
230     if (!MLOC.IsCall)
231       return hash_combine(
232           MLOC.IsCall,
233           DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
234 
235     hash_code hash =
236         hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue(
237                                       MLOC.getCall()->getCalledOperand()));
238 
239     for (const Value *Arg : MLOC.getCall()->args())
240       hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg));
241     return hash;
242   }
243 
244   static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
245     return LHS == RHS;
246   }
247 };
248 
249 } // end namespace llvm
250 
251 /// This does one-way checks to see if Use could theoretically be hoisted above
252 /// MayClobber. This will not check the other way around.
253 ///
254 /// This assumes that, for the purposes of MemorySSA, Use comes directly after
255 /// MayClobber, with no potentially clobbering operations in between them.
256 /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
257 static bool areLoadsReorderable(const LoadInst *Use,
258                                 const LoadInst *MayClobber) {
259   bool VolatileUse = Use->isVolatile();
260   bool VolatileClobber = MayClobber->isVolatile();
261   // Volatile operations may never be reordered with other volatile operations.
262   if (VolatileUse && VolatileClobber)
263     return false;
264   // Otherwise, volatile doesn't matter here. From the language reference:
265   // 'optimizers may change the order of volatile operations relative to
266   // non-volatile operations.'"
267 
268   // If a load is seq_cst, it cannot be moved above other loads. If its ordering
269   // is weaker, it can be moved above other loads. We just need to be sure that
270   // MayClobber isn't an acquire load, because loads can't be moved above
271   // acquire loads.
272   //
273   // Note that this explicitly *does* allow the free reordering of monotonic (or
274   // weaker) loads of the same address.
275   bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
276   bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
277                                                      AtomicOrdering::Acquire);
278   return !(SeqCstUse || MayClobberIsAcquire);
279 }
280 
281 namespace {
282 
283 struct ClobberAlias {
284   bool IsClobber;
285   Optional<AliasResult> AR;
286 };
287 
288 } // end anonymous namespace
289 
290 // Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being
291 // ignored if IsClobber = false.
292 template <typename AliasAnalysisType>
293 static ClobberAlias
294 instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc,
295                          const Instruction *UseInst, AliasAnalysisType &AA) {
296   Instruction *DefInst = MD->getMemoryInst();
297   assert(DefInst && "Defining instruction not actually an instruction");
298   Optional<AliasResult> AR;
299 
300   if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
301     // These intrinsics will show up as affecting memory, but they are just
302     // markers, mostly.
303     //
304     // FIXME: We probably don't actually want MemorySSA to model these at all
305     // (including creating MemoryAccesses for them): we just end up inventing
306     // clobbers where they don't really exist at all. Please see D43269 for
307     // context.
308     switch (II->getIntrinsicID()) {
309     case Intrinsic::invariant_start:
310     case Intrinsic::invariant_end:
311     case Intrinsic::assume:
312     case Intrinsic::experimental_noalias_scope_decl:
313     case Intrinsic::pseudoprobe:
314       return {false, AliasResult(AliasResult::NoAlias)};
315     case Intrinsic::dbg_addr:
316     case Intrinsic::dbg_declare:
317     case Intrinsic::dbg_label:
318     case Intrinsic::dbg_value:
319       llvm_unreachable("debuginfo shouldn't have associated defs!");
320     default:
321       break;
322     }
323   }
324 
325   if (auto *CB = dyn_cast_or_null<CallBase>(UseInst)) {
326     ModRefInfo I = AA.getModRefInfo(DefInst, CB);
327     AR = isMustSet(I) ? AliasResult::MustAlias : AliasResult::MayAlias;
328     return {isModOrRefSet(I), AR};
329   }
330 
331   if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
332     if (auto *UseLoad = dyn_cast_or_null<LoadInst>(UseInst))
333       return {!areLoadsReorderable(UseLoad, DefLoad),
334               AliasResult(AliasResult::MayAlias)};
335 
336   ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
337   AR = isMustSet(I) ? AliasResult::MustAlias : AliasResult::MayAlias;
338   return {isModSet(I), AR};
339 }
340 
341 template <typename AliasAnalysisType>
342 static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
343                                              const MemoryUseOrDef *MU,
344                                              const MemoryLocOrCall &UseMLOC,
345                                              AliasAnalysisType &AA) {
346   // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
347   // to exist while MemoryLocOrCall is pushed through places.
348   if (UseMLOC.IsCall)
349     return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(),
350                                     AA);
351   return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
352                                   AA);
353 }
354 
355 // Return true when MD may alias MU, return false otherwise.
356 bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
357                                         AliasAnalysis &AA) {
358   return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber;
359 }
360 
361 namespace {
362 
363 struct UpwardsMemoryQuery {
364   // True if our original query started off as a call
365   bool IsCall = false;
366   // The pointer location we started the query with. This will be empty if
367   // IsCall is true.
368   MemoryLocation StartingLoc;
369   // This is the instruction we were querying about.
370   const Instruction *Inst = nullptr;
371   // The MemoryAccess we actually got called with, used to test local domination
372   const MemoryAccess *OriginalAccess = nullptr;
373   Optional<AliasResult> AR = AliasResult(AliasResult::MayAlias);
374   bool SkipSelfAccess = false;
375 
376   UpwardsMemoryQuery() = default;
377 
378   UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
379       : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {
380     if (!IsCall)
381       StartingLoc = MemoryLocation::get(Inst);
382   }
383 };
384 
385 } // end anonymous namespace
386 
387 template <typename AliasAnalysisType>
388 static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA,
389                                                    const Instruction *I) {
390   // If the memory can't be changed, then loads of the memory can't be
391   // clobbered.
392   if (auto *LI = dyn_cast<LoadInst>(I))
393     return I->hasMetadata(LLVMContext::MD_invariant_load) ||
394            AA.pointsToConstantMemory(MemoryLocation::get(LI));
395   return false;
396 }
397 
398 /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
399 /// inbetween `Start` and `ClobberAt` can clobbers `Start`.
400 ///
401 /// This is meant to be as simple and self-contained as possible. Because it
402 /// uses no cache, etc., it can be relatively expensive.
403 ///
404 /// \param Start     The MemoryAccess that we want to walk from.
405 /// \param ClobberAt A clobber for Start.
406 /// \param StartLoc  The MemoryLocation for Start.
407 /// \param MSSA      The MemorySSA instance that Start and ClobberAt belong to.
408 /// \param Query     The UpwardsMemoryQuery we used for our search.
409 /// \param AA        The AliasAnalysis we used for our search.
410 /// \param AllowImpreciseClobber Always false, unless we do relaxed verify.
411 
412 template <typename AliasAnalysisType>
413 LLVM_ATTRIBUTE_UNUSED static void
414 checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt,
415                    const MemoryLocation &StartLoc, const MemorySSA &MSSA,
416                    const UpwardsMemoryQuery &Query, AliasAnalysisType &AA,
417                    bool AllowImpreciseClobber = false) {
418   assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
419 
420   if (MSSA.isLiveOnEntryDef(Start)) {
421     assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
422            "liveOnEntry must clobber itself");
423     return;
424   }
425 
426   bool FoundClobber = false;
427   DenseSet<ConstMemoryAccessPair> VisitedPhis;
428   SmallVector<ConstMemoryAccessPair, 8> Worklist;
429   Worklist.emplace_back(Start, StartLoc);
430   // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
431   // is found, complain.
432   while (!Worklist.empty()) {
433     auto MAP = Worklist.pop_back_val();
434     // All we care about is that nothing from Start to ClobberAt clobbers Start.
435     // We learn nothing from revisiting nodes.
436     if (!VisitedPhis.insert(MAP).second)
437       continue;
438 
439     for (const auto *MA : def_chain(MAP.first)) {
440       if (MA == ClobberAt) {
441         if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
442           // instructionClobbersQuery isn't essentially free, so don't use `|=`,
443           // since it won't let us short-circuit.
444           //
445           // Also, note that this can't be hoisted out of the `Worklist` loop,
446           // since MD may only act as a clobber for 1 of N MemoryLocations.
447           FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD);
448           if (!FoundClobber) {
449             ClobberAlias CA =
450                 instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
451             if (CA.IsClobber) {
452               FoundClobber = true;
453               // Not used: CA.AR;
454             }
455           }
456         }
457         break;
458       }
459 
460       // We should never hit liveOnEntry, unless it's the clobber.
461       assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
462 
463       if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
464         // If Start is a Def, skip self.
465         if (MD == Start)
466           continue;
467 
468         assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)
469                     .IsClobber &&
470                "Found clobber before reaching ClobberAt!");
471         continue;
472       }
473 
474       if (const auto *MU = dyn_cast<MemoryUse>(MA)) {
475         (void)MU;
476         assert (MU == Start &&
477                 "Can only find use in def chain if Start is a use");
478         continue;
479       }
480 
481       assert(isa<MemoryPhi>(MA));
482 
483       // Add reachable phi predecessors
484       for (auto ItB = upward_defs_begin(
485                     {const_cast<MemoryAccess *>(MA), MAP.second},
486                     MSSA.getDomTree()),
487                 ItE = upward_defs_end();
488            ItB != ItE; ++ItB)
489         if (MSSA.getDomTree().isReachableFromEntry(ItB.getPhiArgBlock()))
490           Worklist.emplace_back(*ItB);
491     }
492   }
493 
494   // If the verify is done following an optimization, it's possible that
495   // ClobberAt was a conservative clobbering, that we can now infer is not a
496   // true clobbering access. Don't fail the verify if that's the case.
497   // We do have accesses that claim they're optimized, but could be optimized
498   // further. Updating all these can be expensive, so allow it for now (FIXME).
499   if (AllowImpreciseClobber)
500     return;
501 
502   // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
503   // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
504   assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
505          "ClobberAt never acted as a clobber");
506 }
507 
508 namespace {
509 
510 /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
511 /// in one class.
512 template <class AliasAnalysisType> class ClobberWalker {
513   /// Save a few bytes by using unsigned instead of size_t.
514   using ListIndex = unsigned;
515 
516   /// Represents a span of contiguous MemoryDefs, potentially ending in a
517   /// MemoryPhi.
518   struct DefPath {
519     MemoryLocation Loc;
520     // Note that, because we always walk in reverse, Last will always dominate
521     // First. Also note that First and Last are inclusive.
522     MemoryAccess *First;
523     MemoryAccess *Last;
524     Optional<ListIndex> Previous;
525 
526     DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
527             Optional<ListIndex> Previous)
528         : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
529 
530     DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
531             Optional<ListIndex> Previous)
532         : DefPath(Loc, Init, Init, Previous) {}
533   };
534 
535   const MemorySSA &MSSA;
536   AliasAnalysisType &AA;
537   DominatorTree &DT;
538   UpwardsMemoryQuery *Query;
539   unsigned *UpwardWalkLimit;
540 
541   // Phi optimization bookkeeping:
542   // List of DefPath to process during the current phi optimization walk.
543   SmallVector<DefPath, 32> Paths;
544   // List of visited <Access, Location> pairs; we can skip paths already
545   // visited with the same memory location.
546   DenseSet<ConstMemoryAccessPair> VisitedPhis;
547   // Record if phi translation has been performed during the current phi
548   // optimization walk, as merging alias results after phi translation can
549   // yield incorrect results. Context in PR46156.
550   bool PerformedPhiTranslation = false;
551 
552   /// Find the nearest def or phi that `From` can legally be optimized to.
553   const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
554     assert(From->getNumOperands() && "Phi with no operands?");
555 
556     BasicBlock *BB = From->getBlock();
557     MemoryAccess *Result = MSSA.getLiveOnEntryDef();
558     DomTreeNode *Node = DT.getNode(BB);
559     while ((Node = Node->getIDom())) {
560       auto *Defs = MSSA.getBlockDefs(Node->getBlock());
561       if (Defs)
562         return &*Defs->rbegin();
563     }
564     return Result;
565   }
566 
567   /// Result of calling walkToPhiOrClobber.
568   struct UpwardsWalkResult {
569     /// The "Result" of the walk. Either a clobber, the last thing we walked, or
570     /// both. Include alias info when clobber found.
571     MemoryAccess *Result;
572     bool IsKnownClobber;
573     Optional<AliasResult> AR;
574   };
575 
576   /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
577   /// This will update Desc.Last as it walks. It will (optionally) also stop at
578   /// StopAt.
579   ///
580   /// This does not test for whether StopAt is a clobber
581   UpwardsWalkResult
582   walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr,
583                      const MemoryAccess *SkipStopAt = nullptr) const {
584     assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
585     assert(UpwardWalkLimit && "Need a valid walk limit");
586     bool LimitAlreadyReached = false;
587     // (*UpwardWalkLimit) may be 0 here, due to the loop in tryOptimizePhi. Set
588     // it to 1. This will not do any alias() calls. It either returns in the
589     // first iteration in the loop below, or is set back to 0 if all def chains
590     // are free of MemoryDefs.
591     if (!*UpwardWalkLimit) {
592       *UpwardWalkLimit = 1;
593       LimitAlreadyReached = true;
594     }
595 
596     for (MemoryAccess *Current : def_chain(Desc.Last)) {
597       Desc.Last = Current;
598       if (Current == StopAt || Current == SkipStopAt)
599         return {Current, false, AliasResult(AliasResult::MayAlias)};
600 
601       if (auto *MD = dyn_cast<MemoryDef>(Current)) {
602         if (MSSA.isLiveOnEntryDef(MD))
603           return {MD, true, AliasResult(AliasResult::MustAlias)};
604 
605         if (!--*UpwardWalkLimit)
606           return {Current, true, AliasResult(AliasResult::MayAlias)};
607 
608         ClobberAlias CA =
609             instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA);
610         if (CA.IsClobber)
611           return {MD, true, CA.AR};
612       }
613     }
614 
615     if (LimitAlreadyReached)
616       *UpwardWalkLimit = 0;
617 
618     assert(isa<MemoryPhi>(Desc.Last) &&
619            "Ended at a non-clobber that's not a phi?");
620     return {Desc.Last, false, AliasResult(AliasResult::MayAlias)};
621   }
622 
623   void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
624                    ListIndex PriorNode) {
625     auto UpwardDefsBegin = upward_defs_begin({Phi, Paths[PriorNode].Loc}, DT,
626                                              &PerformedPhiTranslation);
627     auto UpwardDefs = make_range(UpwardDefsBegin, upward_defs_end());
628     for (const MemoryAccessPair &P : UpwardDefs) {
629       PausedSearches.push_back(Paths.size());
630       Paths.emplace_back(P.second, P.first, PriorNode);
631     }
632   }
633 
634   /// Represents a search that terminated after finding a clobber. This clobber
635   /// may or may not be present in the path of defs from LastNode..SearchStart,
636   /// since it may have been retrieved from cache.
637   struct TerminatedPath {
638     MemoryAccess *Clobber;
639     ListIndex LastNode;
640   };
641 
642   /// Get an access that keeps us from optimizing to the given phi.
643   ///
644   /// PausedSearches is an array of indices into the Paths array. Its incoming
645   /// value is the indices of searches that stopped at the last phi optimization
646   /// target. It's left in an unspecified state.
647   ///
648   /// If this returns None, NewPaused is a vector of searches that terminated
649   /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
650   Optional<TerminatedPath>
651   getBlockingAccess(const MemoryAccess *StopWhere,
652                     SmallVectorImpl<ListIndex> &PausedSearches,
653                     SmallVectorImpl<ListIndex> &NewPaused,
654                     SmallVectorImpl<TerminatedPath> &Terminated) {
655     assert(!PausedSearches.empty() && "No searches to continue?");
656 
657     // BFS vs DFS really doesn't make a difference here, so just do a DFS with
658     // PausedSearches as our stack.
659     while (!PausedSearches.empty()) {
660       ListIndex PathIndex = PausedSearches.pop_back_val();
661       DefPath &Node = Paths[PathIndex];
662 
663       // If we've already visited this path with this MemoryLocation, we don't
664       // need to do so again.
665       //
666       // NOTE: That we just drop these paths on the ground makes caching
667       // behavior sporadic. e.g. given a diamond:
668       //  A
669       // B C
670       //  D
671       //
672       // ...If we walk D, B, A, C, we'll only cache the result of phi
673       // optimization for A, B, and D; C will be skipped because it dies here.
674       // This arguably isn't the worst thing ever, since:
675       //   - We generally query things in a top-down order, so if we got below D
676       //     without needing cache entries for {C, MemLoc}, then chances are
677       //     that those cache entries would end up ultimately unused.
678       //   - We still cache things for A, so C only needs to walk up a bit.
679       // If this behavior becomes problematic, we can fix without a ton of extra
680       // work.
681       if (!VisitedPhis.insert({Node.Last, Node.Loc}).second) {
682         if (PerformedPhiTranslation) {
683           // If visiting this path performed Phi translation, don't continue,
684           // since it may not be correct to merge results from two paths if one
685           // relies on the phi translation.
686           TerminatedPath Term{Node.Last, PathIndex};
687           return Term;
688         }
689         continue;
690       }
691 
692       const MemoryAccess *SkipStopWhere = nullptr;
693       if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) {
694         assert(isa<MemoryDef>(Query->OriginalAccess));
695         SkipStopWhere = Query->OriginalAccess;
696       }
697 
698       UpwardsWalkResult Res = walkToPhiOrClobber(Node,
699                                                  /*StopAt=*/StopWhere,
700                                                  /*SkipStopAt=*/SkipStopWhere);
701       if (Res.IsKnownClobber) {
702         assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
703 
704         // If this wasn't a cache hit, we hit a clobber when walking. That's a
705         // failure.
706         TerminatedPath Term{Res.Result, PathIndex};
707         if (!MSSA.dominates(Res.Result, StopWhere))
708           return Term;
709 
710         // Otherwise, it's a valid thing to potentially optimize to.
711         Terminated.push_back(Term);
712         continue;
713       }
714 
715       if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
716         // We've hit our target. Save this path off for if we want to continue
717         // walking. If we are in the mode of skipping the OriginalAccess, and
718         // we've reached back to the OriginalAccess, do not save path, we've
719         // just looped back to self.
720         if (Res.Result != SkipStopWhere)
721           NewPaused.push_back(PathIndex);
722         continue;
723       }
724 
725       assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
726       addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
727     }
728 
729     return None;
730   }
731 
732   template <typename T, typename Walker>
733   struct generic_def_path_iterator
734       : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
735                                     std::forward_iterator_tag, T *> {
736     generic_def_path_iterator() = default;
737     generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
738 
739     T &operator*() const { return curNode(); }
740 
741     generic_def_path_iterator &operator++() {
742       N = curNode().Previous;
743       return *this;
744     }
745 
746     bool operator==(const generic_def_path_iterator &O) const {
747       if (N.hasValue() != O.N.hasValue())
748         return false;
749       return !N.hasValue() || *N == *O.N;
750     }
751 
752   private:
753     T &curNode() const { return W->Paths[*N]; }
754 
755     Walker *W = nullptr;
756     Optional<ListIndex> N = None;
757   };
758 
759   using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
760   using const_def_path_iterator =
761       generic_def_path_iterator<const DefPath, const ClobberWalker>;
762 
763   iterator_range<def_path_iterator> def_path(ListIndex From) {
764     return make_range(def_path_iterator(this, From), def_path_iterator());
765   }
766 
767   iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
768     return make_range(const_def_path_iterator(this, From),
769                       const_def_path_iterator());
770   }
771 
772   struct OptznResult {
773     /// The path that contains our result.
774     TerminatedPath PrimaryClobber;
775     /// The paths that we can legally cache back from, but that aren't
776     /// necessarily the result of the Phi optimization.
777     SmallVector<TerminatedPath, 4> OtherClobbers;
778   };
779 
780   ListIndex defPathIndex(const DefPath &N) const {
781     // The assert looks nicer if we don't need to do &N
782     const DefPath *NP = &N;
783     assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
784            "Out of bounds DefPath!");
785     return NP - &Paths.front();
786   }
787 
788   /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
789   /// that act as legal clobbers. Note that this won't return *all* clobbers.
790   ///
791   /// Phi optimization algorithm tl;dr:
792   ///   - Find the earliest def/phi, A, we can optimize to
793   ///   - Find if all paths from the starting memory access ultimately reach A
794   ///     - If not, optimization isn't possible.
795   ///     - Otherwise, walk from A to another clobber or phi, A'.
796   ///       - If A' is a def, we're done.
797   ///       - If A' is a phi, try to optimize it.
798   ///
799   /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
800   /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
801   OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
802                              const MemoryLocation &Loc) {
803     assert(Paths.empty() && VisitedPhis.empty() && !PerformedPhiTranslation &&
804            "Reset the optimization state.");
805 
806     Paths.emplace_back(Loc, Start, Phi, None);
807     // Stores how many "valid" optimization nodes we had prior to calling
808     // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
809     auto PriorPathsSize = Paths.size();
810 
811     SmallVector<ListIndex, 16> PausedSearches;
812     SmallVector<ListIndex, 8> NewPaused;
813     SmallVector<TerminatedPath, 4> TerminatedPaths;
814 
815     addSearches(Phi, PausedSearches, 0);
816 
817     // Moves the TerminatedPath with the "most dominated" Clobber to the end of
818     // Paths.
819     auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
820       assert(!Paths.empty() && "Need a path to move");
821       auto Dom = Paths.begin();
822       for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
823         if (!MSSA.dominates(I->Clobber, Dom->Clobber))
824           Dom = I;
825       auto Last = Paths.end() - 1;
826       if (Last != Dom)
827         std::iter_swap(Last, Dom);
828     };
829 
830     MemoryPhi *Current = Phi;
831     while (true) {
832       assert(!MSSA.isLiveOnEntryDef(Current) &&
833              "liveOnEntry wasn't treated as a clobber?");
834 
835       const auto *Target = getWalkTarget(Current);
836       // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
837       // optimization for the prior phi.
838       assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
839         return MSSA.dominates(P.Clobber, Target);
840       }));
841 
842       // FIXME: This is broken, because the Blocker may be reported to be
843       // liveOnEntry, and we'll happily wait for that to disappear (read: never)
844       // For the moment, this is fine, since we do nothing with blocker info.
845       if (Optional<TerminatedPath> Blocker = getBlockingAccess(
846               Target, PausedSearches, NewPaused, TerminatedPaths)) {
847 
848         // Find the node we started at. We can't search based on N->Last, since
849         // we may have gone around a loop with a different MemoryLocation.
850         auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
851           return defPathIndex(N) < PriorPathsSize;
852         });
853         assert(Iter != def_path_iterator());
854 
855         DefPath &CurNode = *Iter;
856         assert(CurNode.Last == Current);
857 
858         // Two things:
859         // A. We can't reliably cache all of NewPaused back. Consider a case
860         //    where we have two paths in NewPaused; one of which can't optimize
861         //    above this phi, whereas the other can. If we cache the second path
862         //    back, we'll end up with suboptimal cache entries. We can handle
863         //    cases like this a bit better when we either try to find all
864         //    clobbers that block phi optimization, or when our cache starts
865         //    supporting unfinished searches.
866         // B. We can't reliably cache TerminatedPaths back here without doing
867         //    extra checks; consider a case like:
868         //       T
869         //      / \
870         //     D   C
871         //      \ /
872         //       S
873         //    Where T is our target, C is a node with a clobber on it, D is a
874         //    diamond (with a clobber *only* on the left or right node, N), and
875         //    S is our start. Say we walk to D, through the node opposite N
876         //    (read: ignoring the clobber), and see a cache entry in the top
877         //    node of D. That cache entry gets put into TerminatedPaths. We then
878         //    walk up to C (N is later in our worklist), find the clobber, and
879         //    quit. If we append TerminatedPaths to OtherClobbers, we'll cache
880         //    the bottom part of D to the cached clobber, ignoring the clobber
881         //    in N. Again, this problem goes away if we start tracking all
882         //    blockers for a given phi optimization.
883         TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
884         return {Result, {}};
885       }
886 
887       // If there's nothing left to search, then all paths led to valid clobbers
888       // that we got from our cache; pick the nearest to the start, and allow
889       // the rest to be cached back.
890       if (NewPaused.empty()) {
891         MoveDominatedPathToEnd(TerminatedPaths);
892         TerminatedPath Result = TerminatedPaths.pop_back_val();
893         return {Result, std::move(TerminatedPaths)};
894       }
895 
896       MemoryAccess *DefChainEnd = nullptr;
897       SmallVector<TerminatedPath, 4> Clobbers;
898       for (ListIndex Paused : NewPaused) {
899         UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
900         if (WR.IsKnownClobber)
901           Clobbers.push_back({WR.Result, Paused});
902         else
903           // Micro-opt: If we hit the end of the chain, save it.
904           DefChainEnd = WR.Result;
905       }
906 
907       if (!TerminatedPaths.empty()) {
908         // If we couldn't find the dominating phi/liveOnEntry in the above loop,
909         // do it now.
910         if (!DefChainEnd)
911           for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
912             DefChainEnd = MA;
913         assert(DefChainEnd && "Failed to find dominating phi/liveOnEntry");
914 
915         // If any of the terminated paths don't dominate the phi we'll try to
916         // optimize, we need to figure out what they are and quit.
917         const BasicBlock *ChainBB = DefChainEnd->getBlock();
918         for (const TerminatedPath &TP : TerminatedPaths) {
919           // Because we know that DefChainEnd is as "high" as we can go, we
920           // don't need local dominance checks; BB dominance is sufficient.
921           if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
922             Clobbers.push_back(TP);
923         }
924       }
925 
926       // If we have clobbers in the def chain, find the one closest to Current
927       // and quit.
928       if (!Clobbers.empty()) {
929         MoveDominatedPathToEnd(Clobbers);
930         TerminatedPath Result = Clobbers.pop_back_val();
931         return {Result, std::move(Clobbers)};
932       }
933 
934       assert(all_of(NewPaused,
935                     [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
936 
937       // Because liveOnEntry is a clobber, this must be a phi.
938       auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
939 
940       PriorPathsSize = Paths.size();
941       PausedSearches.clear();
942       for (ListIndex I : NewPaused)
943         addSearches(DefChainPhi, PausedSearches, I);
944       NewPaused.clear();
945 
946       Current = DefChainPhi;
947     }
948   }
949 
950   void verifyOptResult(const OptznResult &R) const {
951     assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
952       return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
953     }));
954   }
955 
956   void resetPhiOptznState() {
957     Paths.clear();
958     VisitedPhis.clear();
959     PerformedPhiTranslation = false;
960   }
961 
962 public:
963   ClobberWalker(const MemorySSA &MSSA, AliasAnalysisType &AA, DominatorTree &DT)
964       : MSSA(MSSA), AA(AA), DT(DT) {}
965 
966   AliasAnalysisType *getAA() { return &AA; }
967   /// Finds the nearest clobber for the given query, optimizing phis if
968   /// possible.
969   MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q,
970                             unsigned &UpWalkLimit) {
971     Query = &Q;
972     UpwardWalkLimit = &UpWalkLimit;
973     // Starting limit must be > 0.
974     if (!UpWalkLimit)
975       UpWalkLimit++;
976 
977     MemoryAccess *Current = Start;
978     // This walker pretends uses don't exist. If we're handed one, silently grab
979     // its def. (This has the nice side-effect of ensuring we never cache uses)
980     if (auto *MU = dyn_cast<MemoryUse>(Start))
981       Current = MU->getDefiningAccess();
982 
983     DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
984     // Fast path for the overly-common case (no crazy phi optimization
985     // necessary)
986     UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
987     MemoryAccess *Result;
988     if (WalkResult.IsKnownClobber) {
989       Result = WalkResult.Result;
990       Q.AR = WalkResult.AR;
991     } else {
992       OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
993                                           Current, Q.StartingLoc);
994       verifyOptResult(OptRes);
995       resetPhiOptznState();
996       Result = OptRes.PrimaryClobber.Clobber;
997     }
998 
999 #ifdef EXPENSIVE_CHECKS
1000     if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0)
1001       checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
1002 #endif
1003     return Result;
1004   }
1005 };
1006 
1007 struct RenamePassData {
1008   DomTreeNode *DTN;
1009   DomTreeNode::const_iterator ChildIt;
1010   MemoryAccess *IncomingVal;
1011 
1012   RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
1013                  MemoryAccess *M)
1014       : DTN(D), ChildIt(It), IncomingVal(M) {}
1015 
1016   void swap(RenamePassData &RHS) {
1017     std::swap(DTN, RHS.DTN);
1018     std::swap(ChildIt, RHS.ChildIt);
1019     std::swap(IncomingVal, RHS.IncomingVal);
1020   }
1021 };
1022 
1023 } // end anonymous namespace
1024 
1025 namespace llvm {
1026 
1027 template <class AliasAnalysisType> class MemorySSA::ClobberWalkerBase {
1028   ClobberWalker<AliasAnalysisType> Walker;
1029   MemorySSA *MSSA;
1030 
1031 public:
1032   ClobberWalkerBase(MemorySSA *M, AliasAnalysisType *A, DominatorTree *D)
1033       : Walker(*M, *A, *D), MSSA(M) {}
1034 
1035   MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
1036                                               const MemoryLocation &,
1037                                               unsigned &);
1038   // Third argument (bool), defines whether the clobber search should skip the
1039   // original queried access. If true, there will be a follow-up query searching
1040   // for a clobber access past "self". Note that the Optimized access is not
1041   // updated if a new clobber is found by this SkipSelf search. If this
1042   // additional query becomes heavily used we may decide to cache the result.
1043   // Walker instantiations will decide how to set the SkipSelf bool.
1044   MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, unsigned &, bool,
1045                                               bool UseInvariantGroup = true);
1046 };
1047 
1048 /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
1049 /// longer does caching on its own, but the name has been retained for the
1050 /// moment.
1051 template <class AliasAnalysisType>
1052 class MemorySSA::CachingWalker final : public MemorySSAWalker {
1053   ClobberWalkerBase<AliasAnalysisType> *Walker;
1054 
1055 public:
1056   CachingWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
1057       : MemorySSAWalker(M), Walker(W) {}
1058   ~CachingWalker() override = default;
1059 
1060   using MemorySSAWalker::getClobberingMemoryAccess;
1061 
1062   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1063     return Walker->getClobberingMemoryAccessBase(MA, UWL, false);
1064   }
1065   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1066                                           const MemoryLocation &Loc,
1067                                           unsigned &UWL) {
1068     return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1069   }
1070   // This method is not accessible outside of this file.
1071   MemoryAccess *getClobberingMemoryAccessWithoutInvariantGroup(MemoryAccess *MA,
1072                                                                unsigned &UWL) {
1073     return Walker->getClobberingMemoryAccessBase(MA, UWL, false, false);
1074   }
1075 
1076   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
1077     unsigned UpwardWalkLimit = MaxCheckLimit;
1078     return getClobberingMemoryAccess(MA, UpwardWalkLimit);
1079   }
1080   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1081                                           const MemoryLocation &Loc) override {
1082     unsigned UpwardWalkLimit = MaxCheckLimit;
1083     return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
1084   }
1085 
1086   void invalidateInfo(MemoryAccess *MA) override {
1087     if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1088       MUD->resetOptimized();
1089   }
1090 };
1091 
1092 template <class AliasAnalysisType>
1093 class MemorySSA::SkipSelfWalker final : public MemorySSAWalker {
1094   ClobberWalkerBase<AliasAnalysisType> *Walker;
1095 
1096 public:
1097   SkipSelfWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
1098       : MemorySSAWalker(M), Walker(W) {}
1099   ~SkipSelfWalker() override = default;
1100 
1101   using MemorySSAWalker::getClobberingMemoryAccess;
1102 
1103   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1104     return Walker->getClobberingMemoryAccessBase(MA, UWL, true);
1105   }
1106   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1107                                           const MemoryLocation &Loc,
1108                                           unsigned &UWL) {
1109     return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1110   }
1111 
1112   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
1113     unsigned UpwardWalkLimit = MaxCheckLimit;
1114     return getClobberingMemoryAccess(MA, UpwardWalkLimit);
1115   }
1116   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1117                                           const MemoryLocation &Loc) override {
1118     unsigned UpwardWalkLimit = MaxCheckLimit;
1119     return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
1120   }
1121 
1122   void invalidateInfo(MemoryAccess *MA) override {
1123     if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1124       MUD->resetOptimized();
1125   }
1126 };
1127 
1128 } // end namespace llvm
1129 
1130 void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
1131                                     bool RenameAllUses) {
1132   // Pass through values to our successors
1133   for (const BasicBlock *S : successors(BB)) {
1134     auto It = PerBlockAccesses.find(S);
1135     // Rename the phi nodes in our successor block
1136     if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1137       continue;
1138     AccessList *Accesses = It->second.get();
1139     auto *Phi = cast<MemoryPhi>(&Accesses->front());
1140     if (RenameAllUses) {
1141       bool ReplacementDone = false;
1142       for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I)
1143         if (Phi->getIncomingBlock(I) == BB) {
1144           Phi->setIncomingValue(I, IncomingVal);
1145           ReplacementDone = true;
1146         }
1147       (void) ReplacementDone;
1148       assert(ReplacementDone && "Incomplete phi during partial rename");
1149     } else
1150       Phi->addIncoming(IncomingVal, BB);
1151   }
1152 }
1153 
1154 /// Rename a single basic block into MemorySSA form.
1155 /// Uses the standard SSA renaming algorithm.
1156 /// \returns The new incoming value.
1157 MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
1158                                      bool RenameAllUses) {
1159   auto It = PerBlockAccesses.find(BB);
1160   // Skip most processing if the list is empty.
1161   if (It != PerBlockAccesses.end()) {
1162     AccessList *Accesses = It->second.get();
1163     for (MemoryAccess &L : *Accesses) {
1164       if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
1165         if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
1166           MUD->setDefiningAccess(IncomingVal);
1167         if (isa<MemoryDef>(&L))
1168           IncomingVal = &L;
1169       } else {
1170         IncomingVal = &L;
1171       }
1172     }
1173   }
1174   return IncomingVal;
1175 }
1176 
1177 /// This is the standard SSA renaming algorithm.
1178 ///
1179 /// We walk the dominator tree in preorder, renaming accesses, and then filling
1180 /// in phi nodes in our successors.
1181 void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
1182                            SmallPtrSetImpl<BasicBlock *> &Visited,
1183                            bool SkipVisited, bool RenameAllUses) {
1184   assert(Root && "Trying to rename accesses in an unreachable block");
1185 
1186   SmallVector<RenamePassData, 32> WorkStack;
1187   // Skip everything if we already renamed this block and we are skipping.
1188   // Note: You can't sink this into the if, because we need it to occur
1189   // regardless of whether we skip blocks or not.
1190   bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
1191   if (SkipVisited && AlreadyVisited)
1192     return;
1193 
1194   IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
1195   renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
1196   WorkStack.push_back({Root, Root->begin(), IncomingVal});
1197 
1198   while (!WorkStack.empty()) {
1199     DomTreeNode *Node = WorkStack.back().DTN;
1200     DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
1201     IncomingVal = WorkStack.back().IncomingVal;
1202 
1203     if (ChildIt == Node->end()) {
1204       WorkStack.pop_back();
1205     } else {
1206       DomTreeNode *Child = *ChildIt;
1207       ++WorkStack.back().ChildIt;
1208       BasicBlock *BB = Child->getBlock();
1209       // Note: You can't sink this into the if, because we need it to occur
1210       // regardless of whether we skip blocks or not.
1211       AlreadyVisited = !Visited.insert(BB).second;
1212       if (SkipVisited && AlreadyVisited) {
1213         // We already visited this during our renaming, which can happen when
1214         // being asked to rename multiple blocks. Figure out the incoming val,
1215         // which is the last def.
1216         // Incoming value can only change if there is a block def, and in that
1217         // case, it's the last block def in the list.
1218         if (auto *BlockDefs = getWritableBlockDefs(BB))
1219           IncomingVal = &*BlockDefs->rbegin();
1220       } else
1221         IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1222       renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1223       WorkStack.push_back({Child, Child->begin(), IncomingVal});
1224     }
1225   }
1226 }
1227 
1228 /// This handles unreachable block accesses by deleting phi nodes in
1229 /// unreachable blocks, and marking all other unreachable MemoryAccess's as
1230 /// being uses of the live on entry definition.
1231 void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
1232   assert(!DT->isReachableFromEntry(BB) &&
1233          "Reachable block found while handling unreachable blocks");
1234 
1235   // Make sure phi nodes in our reachable successors end up with a
1236   // LiveOnEntryDef for our incoming edge, even though our block is forward
1237   // unreachable.  We could just disconnect these blocks from the CFG fully,
1238   // but we do not right now.
1239   for (const BasicBlock *S : successors(BB)) {
1240     if (!DT->isReachableFromEntry(S))
1241       continue;
1242     auto It = PerBlockAccesses.find(S);
1243     // Rename the phi nodes in our successor block
1244     if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1245       continue;
1246     AccessList *Accesses = It->second.get();
1247     auto *Phi = cast<MemoryPhi>(&Accesses->front());
1248     Phi->addIncoming(LiveOnEntryDef.get(), BB);
1249   }
1250 
1251   auto It = PerBlockAccesses.find(BB);
1252   if (It == PerBlockAccesses.end())
1253     return;
1254 
1255   auto &Accesses = It->second;
1256   for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1257     auto Next = std::next(AI);
1258     // If we have a phi, just remove it. We are going to replace all
1259     // users with live on entry.
1260     if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1261       UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1262     else
1263       Accesses->erase(AI);
1264     AI = Next;
1265   }
1266 }
1267 
1268 MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
1269     : DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1270       SkipWalker(nullptr) {
1271   // Build MemorySSA using a batch alias analysis. This reuses the internal
1272   // state that AA collects during an alias()/getModRefInfo() call. This is
1273   // safe because there are no CFG changes while building MemorySSA and can
1274   // significantly reduce the time spent by the compiler in AA, because we will
1275   // make queries about all the instructions in the Function.
1276   assert(AA && "No alias analysis?");
1277   BatchAAResults BatchAA(*AA);
1278   buildMemorySSA(BatchAA);
1279   // Intentionally leave AA to nullptr while building so we don't accidently
1280   // use non-batch AliasAnalysis.
1281   this->AA = AA;
1282   // Also create the walker here.
1283   getWalker();
1284 }
1285 
1286 MemorySSA::~MemorySSA() {
1287   // Drop all our references
1288   for (const auto &Pair : PerBlockAccesses)
1289     for (MemoryAccess &MA : *Pair.second)
1290       MA.dropAllReferences();
1291 }
1292 
1293 MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
1294   auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
1295 
1296   if (Res.second)
1297     Res.first->second = std::make_unique<AccessList>();
1298   return Res.first->second.get();
1299 }
1300 
1301 MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1302   auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
1303 
1304   if (Res.second)
1305     Res.first->second = std::make_unique<DefsList>();
1306   return Res.first->second.get();
1307 }
1308 
1309 namespace llvm {
1310 
1311 /// This class is a batch walker of all MemoryUse's in the program, and points
1312 /// their defining access at the thing that actually clobbers them.  Because it
1313 /// is a batch walker that touches everything, it does not operate like the
1314 /// other walkers.  This walker is basically performing a top-down SSA renaming
1315 /// pass, where the version stack is used as the cache.  This enables it to be
1316 /// significantly more time and memory efficient than using the regular walker,
1317 /// which is walking bottom-up.
1318 class MemorySSA::OptimizeUses {
1319 public:
1320   OptimizeUses(MemorySSA *MSSA, CachingWalker<BatchAAResults> *Walker,
1321                BatchAAResults *BAA, DominatorTree *DT)
1322       : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}
1323 
1324   void optimizeUses();
1325 
1326 private:
1327   /// This represents where a given memorylocation is in the stack.
1328   struct MemlocStackInfo {
1329     // This essentially is keeping track of versions of the stack. Whenever
1330     // the stack changes due to pushes or pops, these versions increase.
1331     unsigned long StackEpoch;
1332     unsigned long PopEpoch;
1333     // This is the lower bound of places on the stack to check. It is equal to
1334     // the place the last stack walk ended.
1335     // Note: Correctness depends on this being initialized to 0, which densemap
1336     // does
1337     unsigned long LowerBound;
1338     const BasicBlock *LowerBoundBlock;
1339     // This is where the last walk for this memory location ended.
1340     unsigned long LastKill;
1341     bool LastKillValid;
1342     Optional<AliasResult> AR;
1343   };
1344 
1345   void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1346                            SmallVectorImpl<MemoryAccess *> &,
1347                            DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
1348 
1349   MemorySSA *MSSA;
1350   CachingWalker<BatchAAResults> *Walker;
1351   BatchAAResults *AA;
1352   DominatorTree *DT;
1353 };
1354 
1355 } // end namespace llvm
1356 
1357 /// Optimize the uses in a given block This is basically the SSA renaming
1358 /// algorithm, with one caveat: We are able to use a single stack for all
1359 /// MemoryUses.  This is because the set of *possible* reaching MemoryDefs is
1360 /// the same for every MemoryUse.  The *actual* clobbering MemoryDef is just
1361 /// going to be some position in that stack of possible ones.
1362 ///
1363 /// We track the stack positions that each MemoryLocation needs
1364 /// to check, and last ended at.  This is because we only want to check the
1365 /// things that changed since last time.  The same MemoryLocation should
1366 /// get clobbered by the same store (getModRefInfo does not use invariantness or
1367 /// things like this, and if they start, we can modify MemoryLocOrCall to
1368 /// include relevant data)
1369 void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1370     const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1371     SmallVectorImpl<MemoryAccess *> &VersionStack,
1372     DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) {
1373 
1374   /// If no accesses, nothing to do.
1375   MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1376   if (Accesses == nullptr)
1377     return;
1378 
1379   // Pop everything that doesn't dominate the current block off the stack,
1380   // increment the PopEpoch to account for this.
1381   while (true) {
1382     assert(
1383         !VersionStack.empty() &&
1384         "Version stack should have liveOnEntry sentinel dominating everything");
1385     BasicBlock *BackBlock = VersionStack.back()->getBlock();
1386     if (DT->dominates(BackBlock, BB))
1387       break;
1388     while (VersionStack.back()->getBlock() == BackBlock)
1389       VersionStack.pop_back();
1390     ++PopEpoch;
1391   }
1392 
1393   for (MemoryAccess &MA : *Accesses) {
1394     auto *MU = dyn_cast<MemoryUse>(&MA);
1395     if (!MU) {
1396       VersionStack.push_back(&MA);
1397       ++StackEpoch;
1398       continue;
1399     }
1400 
1401     if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
1402       MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None);
1403       continue;
1404     }
1405 
1406     MemoryLocOrCall UseMLOC(MU);
1407     auto &LocInfo = LocStackInfo[UseMLOC];
1408     // If the pop epoch changed, it means we've removed stuff from top of
1409     // stack due to changing blocks. We may have to reset the lower bound or
1410     // last kill info.
1411     if (LocInfo.PopEpoch != PopEpoch) {
1412       LocInfo.PopEpoch = PopEpoch;
1413       LocInfo.StackEpoch = StackEpoch;
1414       // If the lower bound was in something that no longer dominates us, we
1415       // have to reset it.
1416       // We can't simply track stack size, because the stack may have had
1417       // pushes/pops in the meantime.
1418       // XXX: This is non-optimal, but only is slower cases with heavily
1419       // branching dominator trees.  To get the optimal number of queries would
1420       // be to make lowerbound and lastkill a per-loc stack, and pop it until
1421       // the top of that stack dominates us.  This does not seem worth it ATM.
1422       // A much cheaper optimization would be to always explore the deepest
1423       // branch of the dominator tree first. This will guarantee this resets on
1424       // the smallest set of blocks.
1425       if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1426           !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1427         // Reset the lower bound of things to check.
1428         // TODO: Some day we should be able to reset to last kill, rather than
1429         // 0.
1430         LocInfo.LowerBound = 0;
1431         LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1432         LocInfo.LastKillValid = false;
1433       }
1434     } else if (LocInfo.StackEpoch != StackEpoch) {
1435       // If all that has changed is the StackEpoch, we only have to check the
1436       // new things on the stack, because we've checked everything before.  In
1437       // this case, the lower bound of things to check remains the same.
1438       LocInfo.PopEpoch = PopEpoch;
1439       LocInfo.StackEpoch = StackEpoch;
1440     }
1441     if (!LocInfo.LastKillValid) {
1442       LocInfo.LastKill = VersionStack.size() - 1;
1443       LocInfo.LastKillValid = true;
1444       LocInfo.AR = AliasResult::MayAlias;
1445     }
1446 
1447     // At this point, we should have corrected last kill and LowerBound to be
1448     // in bounds.
1449     assert(LocInfo.LowerBound < VersionStack.size() &&
1450            "Lower bound out of range");
1451     assert(LocInfo.LastKill < VersionStack.size() &&
1452            "Last kill info out of range");
1453     // In any case, the new upper bound is the top of the stack.
1454     unsigned long UpperBound = VersionStack.size() - 1;
1455 
1456     if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
1457       LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
1458                         << *(MU->getMemoryInst()) << ")"
1459                         << " because there are "
1460                         << UpperBound - LocInfo.LowerBound
1461                         << " stores to disambiguate\n");
1462       // Because we did not walk, LastKill is no longer valid, as this may
1463       // have been a kill.
1464       LocInfo.LastKillValid = false;
1465       continue;
1466     }
1467     bool FoundClobberResult = false;
1468     unsigned UpwardWalkLimit = MaxCheckLimit;
1469     while (UpperBound > LocInfo.LowerBound) {
1470       if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1471         // For phis, use the walker, see where we ended up, go there.
1472         // The invariant.group handling in MemorySSA is ad-hoc and doesn't
1473         // support updates, so don't use it to optimize uses.
1474         MemoryAccess *Result =
1475             Walker->getClobberingMemoryAccessWithoutInvariantGroup(
1476                 MU, UpwardWalkLimit);
1477         // We are guaranteed to find it or something is wrong.
1478         while (VersionStack[UpperBound] != Result) {
1479           assert(UpperBound != 0);
1480           --UpperBound;
1481         }
1482         FoundClobberResult = true;
1483         break;
1484       }
1485 
1486       MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1487       ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA);
1488       if (CA.IsClobber) {
1489         FoundClobberResult = true;
1490         LocInfo.AR = CA.AR;
1491         break;
1492       }
1493       --UpperBound;
1494     }
1495 
1496     // Note: Phis always have AliasResult AR set to MayAlias ATM.
1497 
1498     // At the end of this loop, UpperBound is either a clobber, or lower bound
1499     // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1500     if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1501       // We were last killed now by where we got to
1502       if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound]))
1503         LocInfo.AR = None;
1504       MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR);
1505       LocInfo.LastKill = UpperBound;
1506     } else {
1507       // Otherwise, we checked all the new ones, and now we know we can get to
1508       // LastKill.
1509       MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR);
1510     }
1511     LocInfo.LowerBound = VersionStack.size() - 1;
1512     LocInfo.LowerBoundBlock = BB;
1513   }
1514 }
1515 
1516 /// Optimize uses to point to their actual clobbering definitions.
1517 void MemorySSA::OptimizeUses::optimizeUses() {
1518   SmallVector<MemoryAccess *, 16> VersionStack;
1519   DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo;
1520   VersionStack.push_back(MSSA->getLiveOnEntryDef());
1521 
1522   unsigned long StackEpoch = 1;
1523   unsigned long PopEpoch = 1;
1524   // We perform a non-recursive top-down dominator tree walk.
1525   for (const auto *DomNode : depth_first(DT->getRootNode()))
1526     optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1527                         LocStackInfo);
1528 }
1529 
1530 void MemorySSA::placePHINodes(
1531     const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) {
1532   // Determine where our MemoryPhi's should go
1533   ForwardIDFCalculator IDFs(*DT);
1534   IDFs.setDefiningBlocks(DefiningBlocks);
1535   SmallVector<BasicBlock *, 32> IDFBlocks;
1536   IDFs.calculate(IDFBlocks);
1537 
1538   // Now place MemoryPhi nodes.
1539   for (auto &BB : IDFBlocks)
1540     createMemoryPhi(BB);
1541 }
1542 
1543 void MemorySSA::buildMemorySSA(BatchAAResults &BAA) {
1544   // We create an access to represent "live on entry", for things like
1545   // arguments or users of globals, where the memory they use is defined before
1546   // the beginning of the function. We do not actually insert it into the IR.
1547   // We do not define a live on exit for the immediate uses, and thus our
1548   // semantics do *not* imply that something with no immediate uses can simply
1549   // be removed.
1550   BasicBlock &StartingPoint = F.getEntryBlock();
1551   LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr,
1552                                      &StartingPoint, NextID++));
1553 
1554   // We maintain lists of memory accesses per-block, trading memory for time. We
1555   // could just look up the memory access for every possible instruction in the
1556   // stream.
1557   SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1558   // Go through each block, figure out where defs occur, and chain together all
1559   // the accesses.
1560   for (BasicBlock &B : F) {
1561     bool InsertIntoDef = false;
1562     AccessList *Accesses = nullptr;
1563     DefsList *Defs = nullptr;
1564     for (Instruction &I : B) {
1565       MemoryUseOrDef *MUD = createNewAccess(&I, &BAA);
1566       if (!MUD)
1567         continue;
1568 
1569       if (!Accesses)
1570         Accesses = getOrCreateAccessList(&B);
1571       Accesses->push_back(MUD);
1572       if (isa<MemoryDef>(MUD)) {
1573         InsertIntoDef = true;
1574         if (!Defs)
1575           Defs = getOrCreateDefsList(&B);
1576         Defs->push_back(*MUD);
1577       }
1578     }
1579     if (InsertIntoDef)
1580       DefiningBlocks.insert(&B);
1581   }
1582   placePHINodes(DefiningBlocks);
1583 
1584   // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1585   // filled in with all blocks.
1586   SmallPtrSet<BasicBlock *, 16> Visited;
1587   renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1588 
1589   ClobberWalkerBase<BatchAAResults> WalkerBase(this, &BAA, DT);
1590   CachingWalker<BatchAAResults> WalkerLocal(this, &WalkerBase);
1591   OptimizeUses(this, &WalkerLocal, &BAA, DT).optimizeUses();
1592 
1593   // Mark the uses in unreachable blocks as live on entry, so that they go
1594   // somewhere.
1595   for (auto &BB : F)
1596     if (!Visited.count(&BB))
1597       markUnreachableAsLiveOnEntry(&BB);
1598 }
1599 
1600 MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1601 
1602 MemorySSA::CachingWalker<AliasAnalysis> *MemorySSA::getWalkerImpl() {
1603   if (Walker)
1604     return Walker.get();
1605 
1606   if (!WalkerBase)
1607     WalkerBase =
1608         std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
1609 
1610   Walker =
1611       std::make_unique<CachingWalker<AliasAnalysis>>(this, WalkerBase.get());
1612   return Walker.get();
1613 }
1614 
1615 MemorySSAWalker *MemorySSA::getSkipSelfWalker() {
1616   if (SkipWalker)
1617     return SkipWalker.get();
1618 
1619   if (!WalkerBase)
1620     WalkerBase =
1621         std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
1622 
1623   SkipWalker =
1624       std::make_unique<SkipSelfWalker<AliasAnalysis>>(this, WalkerBase.get());
1625   return SkipWalker.get();
1626  }
1627 
1628 
1629 // This is a helper function used by the creation routines. It places NewAccess
1630 // into the access and defs lists for a given basic block, at the given
1631 // insertion point.
1632 void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess,
1633                                         const BasicBlock *BB,
1634                                         InsertionPlace Point) {
1635   auto *Accesses = getOrCreateAccessList(BB);
1636   if (Point == Beginning) {
1637     // If it's a phi node, it goes first, otherwise, it goes after any phi
1638     // nodes.
1639     if (isa<MemoryPhi>(NewAccess)) {
1640       Accesses->push_front(NewAccess);
1641       auto *Defs = getOrCreateDefsList(BB);
1642       Defs->push_front(*NewAccess);
1643     } else {
1644       auto AI = find_if_not(
1645           *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1646       Accesses->insert(AI, NewAccess);
1647       if (!isa<MemoryUse>(NewAccess)) {
1648         auto *Defs = getOrCreateDefsList(BB);
1649         auto DI = find_if_not(
1650             *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1651         Defs->insert(DI, *NewAccess);
1652       }
1653     }
1654   } else {
1655     Accesses->push_back(NewAccess);
1656     if (!isa<MemoryUse>(NewAccess)) {
1657       auto *Defs = getOrCreateDefsList(BB);
1658       Defs->push_back(*NewAccess);
1659     }
1660   }
1661   BlockNumberingValid.erase(BB);
1662 }
1663 
1664 void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB,
1665                                       AccessList::iterator InsertPt) {
1666   auto *Accesses = getWritableBlockAccesses(BB);
1667   bool WasEnd = InsertPt == Accesses->end();
1668   Accesses->insert(AccessList::iterator(InsertPt), What);
1669   if (!isa<MemoryUse>(What)) {
1670     auto *Defs = getOrCreateDefsList(BB);
1671     // If we got asked to insert at the end, we have an easy job, just shove it
1672     // at the end. If we got asked to insert before an existing def, we also get
1673     // an iterator. If we got asked to insert before a use, we have to hunt for
1674     // the next def.
1675     if (WasEnd) {
1676       Defs->push_back(*What);
1677     } else if (isa<MemoryDef>(InsertPt)) {
1678       Defs->insert(InsertPt->getDefsIterator(), *What);
1679     } else {
1680       while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1681         ++InsertPt;
1682       // Either we found a def, or we are inserting at the end
1683       if (InsertPt == Accesses->end())
1684         Defs->push_back(*What);
1685       else
1686         Defs->insert(InsertPt->getDefsIterator(), *What);
1687     }
1688   }
1689   BlockNumberingValid.erase(BB);
1690 }
1691 
1692 void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) {
1693   // Keep it in the lookup tables, remove from the lists
1694   removeFromLists(What, false);
1695 
1696   // Note that moving should implicitly invalidate the optimized state of a
1697   // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a
1698   // MemoryDef.
1699   if (auto *MD = dyn_cast<MemoryDef>(What))
1700     MD->resetOptimized();
1701   What->setBlock(BB);
1702 }
1703 
1704 // Move What before Where in the IR.  The end result is that What will belong to
1705 // the right lists and have the right Block set, but will not otherwise be
1706 // correct. It will not have the right defining access, and if it is a def,
1707 // things below it will not properly be updated.
1708 void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1709                        AccessList::iterator Where) {
1710   prepareForMoveTo(What, BB);
1711   insertIntoListsBefore(What, BB, Where);
1712 }
1713 
1714 void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB,
1715                        InsertionPlace Point) {
1716   if (isa<MemoryPhi>(What)) {
1717     assert(Point == Beginning &&
1718            "Can only move a Phi at the beginning of the block");
1719     // Update lookup table entry
1720     ValueToMemoryAccess.erase(What->getBlock());
1721     bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;
1722     (void)Inserted;
1723     assert(Inserted && "Cannot move a Phi to a block that already has one");
1724   }
1725 
1726   prepareForMoveTo(What, BB);
1727   insertIntoListsForBlock(What, BB, Point);
1728 }
1729 
1730 MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1731   assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
1732   MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
1733   // Phi's always are placed at the front of the block.
1734   insertIntoListsForBlock(Phi, BB, Beginning);
1735   ValueToMemoryAccess[BB] = Phi;
1736   return Phi;
1737 }
1738 
1739 MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
1740                                                MemoryAccess *Definition,
1741                                                const MemoryUseOrDef *Template,
1742                                                bool CreationMustSucceed) {
1743   assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
1744   MemoryUseOrDef *NewAccess = createNewAccess(I, AA, Template);
1745   if (CreationMustSucceed)
1746     assert(NewAccess != nullptr && "Tried to create a memory access for a "
1747                                    "non-memory touching instruction");
1748   if (NewAccess) {
1749     assert((!Definition || !isa<MemoryUse>(Definition)) &&
1750            "A use cannot be a defining access");
1751     NewAccess->setDefiningAccess(Definition);
1752   }
1753   return NewAccess;
1754 }
1755 
1756 // Return true if the instruction has ordering constraints.
1757 // Note specifically that this only considers stores and loads
1758 // because others are still considered ModRef by getModRefInfo.
1759 static inline bool isOrdered(const Instruction *I) {
1760   if (auto *SI = dyn_cast<StoreInst>(I)) {
1761     if (!SI->isUnordered())
1762       return true;
1763   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1764     if (!LI->isUnordered())
1765       return true;
1766   }
1767   return false;
1768 }
1769 
1770 /// Helper function to create new memory accesses
1771 template <typename AliasAnalysisType>
1772 MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
1773                                            AliasAnalysisType *AAP,
1774                                            const MemoryUseOrDef *Template) {
1775   // The assume intrinsic has a control dependency which we model by claiming
1776   // that it writes arbitrarily. Debuginfo intrinsics may be considered
1777   // clobbers when we have a nonstandard AA pipeline. Ignore these fake memory
1778   // dependencies here.
1779   // FIXME: Replace this special casing with a more accurate modelling of
1780   // assume's control dependency.
1781   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1782     switch (II->getIntrinsicID()) {
1783     default:
1784       break;
1785     case Intrinsic::assume:
1786     case Intrinsic::experimental_noalias_scope_decl:
1787     case Intrinsic::pseudoprobe:
1788       return nullptr;
1789     }
1790   }
1791 
1792   // Using a nonstandard AA pipelines might leave us with unexpected modref
1793   // results for I, so add a check to not model instructions that may not read
1794   // from or write to memory. This is necessary for correctness.
1795   if (!I->mayReadFromMemory() && !I->mayWriteToMemory())
1796     return nullptr;
1797 
1798   bool Def, Use;
1799   if (Template) {
1800     Def = isa<MemoryDef>(Template);
1801     Use = isa<MemoryUse>(Template);
1802 #if !defined(NDEBUG)
1803     ModRefInfo ModRef = AAP->getModRefInfo(I, None);
1804     bool DefCheck, UseCheck;
1805     DefCheck = isModSet(ModRef) || isOrdered(I);
1806     UseCheck = isRefSet(ModRef);
1807     assert(Def == DefCheck && (Def || Use == UseCheck) && "Invalid template");
1808 #endif
1809   } else {
1810     // Find out what affect this instruction has on memory.
1811     ModRefInfo ModRef = AAP->getModRefInfo(I, None);
1812     // The isOrdered check is used to ensure that volatiles end up as defs
1813     // (atomics end up as ModRef right now anyway).  Until we separate the
1814     // ordering chain from the memory chain, this enables people to see at least
1815     // some relative ordering to volatiles.  Note that getClobberingMemoryAccess
1816     // will still give an answer that bypasses other volatile loads.  TODO:
1817     // Separate memory aliasing and ordering into two different chains so that
1818     // we can precisely represent both "what memory will this read/write/is
1819     // clobbered by" and "what instructions can I move this past".
1820     Def = isModSet(ModRef) || isOrdered(I);
1821     Use = isRefSet(ModRef);
1822   }
1823 
1824   // It's possible for an instruction to not modify memory at all. During
1825   // construction, we ignore them.
1826   if (!Def && !Use)
1827     return nullptr;
1828 
1829   MemoryUseOrDef *MUD;
1830   if (Def)
1831     MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
1832   else
1833     MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
1834   ValueToMemoryAccess[I] = MUD;
1835   return MUD;
1836 }
1837 
1838 /// Properly remove \p MA from all of MemorySSA's lookup tables.
1839 void MemorySSA::removeFromLookups(MemoryAccess *MA) {
1840   assert(MA->use_empty() &&
1841          "Trying to remove memory access that still has uses");
1842   BlockNumbering.erase(MA);
1843   if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1844     MUD->setDefiningAccess(nullptr);
1845   // Invalidate our walker's cache if necessary
1846   if (!isa<MemoryUse>(MA))
1847     getWalker()->invalidateInfo(MA);
1848 
1849   Value *MemoryInst;
1850   if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1851     MemoryInst = MUD->getMemoryInst();
1852   else
1853     MemoryInst = MA->getBlock();
1854 
1855   auto VMA = ValueToMemoryAccess.find(MemoryInst);
1856   if (VMA->second == MA)
1857     ValueToMemoryAccess.erase(VMA);
1858 }
1859 
1860 /// Properly remove \p MA from all of MemorySSA's lists.
1861 ///
1862 /// Because of the way the intrusive list and use lists work, it is important to
1863 /// do removal in the right order.
1864 /// ShouldDelete defaults to true, and will cause the memory access to also be
1865 /// deleted, not just removed.
1866 void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
1867   BasicBlock *BB = MA->getBlock();
1868   // The access list owns the reference, so we erase it from the non-owning list
1869   // first.
1870   if (!isa<MemoryUse>(MA)) {
1871     auto DefsIt = PerBlockDefs.find(BB);
1872     std::unique_ptr<DefsList> &Defs = DefsIt->second;
1873     Defs->remove(*MA);
1874     if (Defs->empty())
1875       PerBlockDefs.erase(DefsIt);
1876   }
1877 
1878   // The erase call here will delete it. If we don't want it deleted, we call
1879   // remove instead.
1880   auto AccessIt = PerBlockAccesses.find(BB);
1881   std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1882   if (ShouldDelete)
1883     Accesses->erase(MA);
1884   else
1885     Accesses->remove(MA);
1886 
1887   if (Accesses->empty()) {
1888     PerBlockAccesses.erase(AccessIt);
1889     BlockNumberingValid.erase(BB);
1890   }
1891 }
1892 
1893 void MemorySSA::print(raw_ostream &OS) const {
1894   MemorySSAAnnotatedWriter Writer(this);
1895   F.print(OS, &Writer);
1896 }
1897 
1898 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1899 LLVM_DUMP_METHOD void MemorySSA::dump() const { print(dbgs()); }
1900 #endif
1901 
1902 void MemorySSA::verifyMemorySSA(VerificationLevel VL) const {
1903 #if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS)
1904   VL = VerificationLevel::Full;
1905 #endif
1906 
1907 #ifndef NDEBUG
1908   verifyOrderingDominationAndDefUses(F, VL);
1909   verifyDominationNumbers(F);
1910   if (VL == VerificationLevel::Full)
1911     verifyPrevDefInPhis(F);
1912 #endif
1913   // Previously, the verification used to also verify that the clobberingAccess
1914   // cached by MemorySSA is the same as the clobberingAccess found at a later
1915   // query to AA. This does not hold true in general due to the current fragility
1916   // of BasicAA which has arbitrary caps on the things it analyzes before giving
1917   // up. As a result, transformations that are correct, will lead to BasicAA
1918   // returning different Alias answers before and after that transformation.
1919   // Invalidating MemorySSA is not an option, as the results in BasicAA can be so
1920   // random, in the worst case we'd need to rebuild MemorySSA from scratch after
1921   // every transformation, which defeats the purpose of using it. For such an
1922   // example, see test4 added in D51960.
1923 }
1924 
1925 void MemorySSA::verifyPrevDefInPhis(Function &F) const {
1926   for (const BasicBlock &BB : F) {
1927     if (MemoryPhi *Phi = getMemoryAccess(&BB)) {
1928       for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1929         auto *Pred = Phi->getIncomingBlock(I);
1930         auto *IncAcc = Phi->getIncomingValue(I);
1931         // If Pred has no unreachable predecessors, get last def looking at
1932         // IDoms. If, while walkings IDoms, any of these has an unreachable
1933         // predecessor, then the incoming def can be any access.
1934         if (auto *DTNode = DT->getNode(Pred)) {
1935           while (DTNode) {
1936             if (auto *DefList = getBlockDefs(DTNode->getBlock())) {
1937               auto *LastAcc = &*(--DefList->end());
1938               assert(LastAcc == IncAcc &&
1939                      "Incorrect incoming access into phi.");
1940               (void)IncAcc;
1941               (void)LastAcc;
1942               break;
1943             }
1944             DTNode = DTNode->getIDom();
1945           }
1946         } else {
1947           // If Pred has unreachable predecessors, but has at least a Def, the
1948           // incoming access can be the last Def in Pred, or it could have been
1949           // optimized to LoE. After an update, though, the LoE may have been
1950           // replaced by another access, so IncAcc may be any access.
1951           // If Pred has unreachable predecessors and no Defs, incoming access
1952           // should be LoE; However, after an update, it may be any access.
1953         }
1954       }
1955     }
1956   }
1957 }
1958 
1959 /// Verify that all of the blocks we believe to have valid domination numbers
1960 /// actually have valid domination numbers.
1961 void MemorySSA::verifyDominationNumbers(const Function &F) const {
1962   if (BlockNumberingValid.empty())
1963     return;
1964 
1965   SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid;
1966   for (const BasicBlock &BB : F) {
1967     if (!ValidBlocks.count(&BB))
1968       continue;
1969 
1970     ValidBlocks.erase(&BB);
1971 
1972     const AccessList *Accesses = getBlockAccesses(&BB);
1973     // It's correct to say an empty block has valid numbering.
1974     if (!Accesses)
1975       continue;
1976 
1977     // Block numbering starts at 1.
1978     unsigned long LastNumber = 0;
1979     for (const MemoryAccess &MA : *Accesses) {
1980       auto ThisNumberIter = BlockNumbering.find(&MA);
1981       assert(ThisNumberIter != BlockNumbering.end() &&
1982              "MemoryAccess has no domination number in a valid block!");
1983 
1984       unsigned long ThisNumber = ThisNumberIter->second;
1985       assert(ThisNumber > LastNumber &&
1986              "Domination numbers should be strictly increasing!");
1987       (void)LastNumber;
1988       LastNumber = ThisNumber;
1989     }
1990   }
1991 
1992   assert(ValidBlocks.empty() &&
1993          "All valid BasicBlocks should exist in F -- dangling pointers?");
1994 }
1995 
1996 /// Verify ordering: the order and existence of MemoryAccesses matches the
1997 /// order and existence of memory affecting instructions.
1998 /// Verify domination: each definition dominates all of its uses.
1999 /// Verify def-uses: the immediate use information - walk all the memory
2000 /// accesses and verifying that, for each use, it appears in the appropriate
2001 /// def's use list
2002 void MemorySSA::verifyOrderingDominationAndDefUses(Function &F,
2003                                                    VerificationLevel VL) const {
2004   // Walk all the blocks, comparing what the lookups think and what the access
2005   // lists think, as well as the order in the blocks vs the order in the access
2006   // lists.
2007   SmallVector<MemoryAccess *, 32> ActualAccesses;
2008   SmallVector<MemoryAccess *, 32> ActualDefs;
2009   for (BasicBlock &B : F) {
2010     const AccessList *AL = getBlockAccesses(&B);
2011     const auto *DL = getBlockDefs(&B);
2012     MemoryPhi *Phi = getMemoryAccess(&B);
2013     if (Phi) {
2014       // Verify ordering.
2015       ActualAccesses.push_back(Phi);
2016       ActualDefs.push_back(Phi);
2017       // Verify domination
2018       for (const Use &U : Phi->uses()) {
2019         assert(dominates(Phi, U) && "Memory PHI does not dominate it's uses");
2020         (void)U;
2021       }
2022       // Verify def-uses for full verify.
2023       if (VL == VerificationLevel::Full) {
2024         assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(
2025                                             pred_begin(&B), pred_end(&B))) &&
2026                "Incomplete MemoryPhi Node");
2027         for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
2028           verifyUseInDefs(Phi->getIncomingValue(I), Phi);
2029           assert(is_contained(predecessors(&B), Phi->getIncomingBlock(I)) &&
2030                  "Incoming phi block not a block predecessor");
2031         }
2032       }
2033     }
2034 
2035     for (Instruction &I : B) {
2036       MemoryUseOrDef *MA = getMemoryAccess(&I);
2037       assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
2038              "We have memory affecting instructions "
2039              "in this block but they are not in the "
2040              "access list or defs list");
2041       if (MA) {
2042         // Verify ordering.
2043         ActualAccesses.push_back(MA);
2044         if (MemoryAccess *MD = dyn_cast<MemoryDef>(MA)) {
2045           // Verify ordering.
2046           ActualDefs.push_back(MA);
2047           // Verify domination.
2048           for (const Use &U : MD->uses()) {
2049             assert(dominates(MD, U) &&
2050                    "Memory Def does not dominate it's uses");
2051             (void)U;
2052           }
2053         }
2054         // Verify def-uses for full verify.
2055         if (VL == VerificationLevel::Full)
2056           verifyUseInDefs(MA->getDefiningAccess(), MA);
2057       }
2058     }
2059     // Either we hit the assert, really have no accesses, or we have both
2060     // accesses and an access list. Same with defs.
2061     if (!AL && !DL)
2062       continue;
2063     // Verify ordering.
2064     assert(AL->size() == ActualAccesses.size() &&
2065            "We don't have the same number of accesses in the block as on the "
2066            "access list");
2067     assert((DL || ActualDefs.size() == 0) &&
2068            "Either we should have a defs list, or we should have no defs");
2069     assert((!DL || DL->size() == ActualDefs.size()) &&
2070            "We don't have the same number of defs in the block as on the "
2071            "def list");
2072     auto ALI = AL->begin();
2073     auto AAI = ActualAccesses.begin();
2074     while (ALI != AL->end() && AAI != ActualAccesses.end()) {
2075       assert(&*ALI == *AAI && "Not the same accesses in the same order");
2076       ++ALI;
2077       ++AAI;
2078     }
2079     ActualAccesses.clear();
2080     if (DL) {
2081       auto DLI = DL->begin();
2082       auto ADI = ActualDefs.begin();
2083       while (DLI != DL->end() && ADI != ActualDefs.end()) {
2084         assert(&*DLI == *ADI && "Not the same defs in the same order");
2085         ++DLI;
2086         ++ADI;
2087       }
2088     }
2089     ActualDefs.clear();
2090   }
2091 }
2092 
2093 /// Verify the def-use lists in MemorySSA, by verifying that \p Use
2094 /// appears in the use list of \p Def.
2095 void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
2096   // The live on entry use may cause us to get a NULL def here
2097   if (!Def)
2098     assert(isLiveOnEntryDef(Use) &&
2099            "Null def but use not point to live on entry def");
2100   else
2101     assert(is_contained(Def->users(), Use) &&
2102            "Did not find use in def's use list");
2103 }
2104 
2105 /// Perform a local numbering on blocks so that instruction ordering can be
2106 /// determined in constant time.
2107 /// TODO: We currently just number in order.  If we numbered by N, we could
2108 /// allow at least N-1 sequences of insertBefore or insertAfter (and at least
2109 /// log2(N) sequences of mixed before and after) without needing to invalidate
2110 /// the numbering.
2111 void MemorySSA::renumberBlock(const BasicBlock *B) const {
2112   // The pre-increment ensures the numbers really start at 1.
2113   unsigned long CurrentNumber = 0;
2114   const AccessList *AL = getBlockAccesses(B);
2115   assert(AL != nullptr && "Asking to renumber an empty block");
2116   for (const auto &I : *AL)
2117     BlockNumbering[&I] = ++CurrentNumber;
2118   BlockNumberingValid.insert(B);
2119 }
2120 
2121 /// Determine, for two memory accesses in the same block,
2122 /// whether \p Dominator dominates \p Dominatee.
2123 /// \returns True if \p Dominator dominates \p Dominatee.
2124 bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
2125                                  const MemoryAccess *Dominatee) const {
2126   const BasicBlock *DominatorBlock = Dominator->getBlock();
2127 
2128   assert((DominatorBlock == Dominatee->getBlock()) &&
2129          "Asking for local domination when accesses are in different blocks!");
2130   // A node dominates itself.
2131   if (Dominatee == Dominator)
2132     return true;
2133 
2134   // When Dominatee is defined on function entry, it is not dominated by another
2135   // memory access.
2136   if (isLiveOnEntryDef(Dominatee))
2137     return false;
2138 
2139   // When Dominator is defined on function entry, it dominates the other memory
2140   // access.
2141   if (isLiveOnEntryDef(Dominator))
2142     return true;
2143 
2144   if (!BlockNumberingValid.count(DominatorBlock))
2145     renumberBlock(DominatorBlock);
2146 
2147   unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
2148   // All numbers start with 1
2149   assert(DominatorNum != 0 && "Block was not numbered properly");
2150   unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
2151   assert(DominateeNum != 0 && "Block was not numbered properly");
2152   return DominatorNum < DominateeNum;
2153 }
2154 
2155 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2156                           const MemoryAccess *Dominatee) const {
2157   if (Dominator == Dominatee)
2158     return true;
2159 
2160   if (isLiveOnEntryDef(Dominatee))
2161     return false;
2162 
2163   if (Dominator->getBlock() != Dominatee->getBlock())
2164     return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
2165   return locallyDominates(Dominator, Dominatee);
2166 }
2167 
2168 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2169                           const Use &Dominatee) const {
2170   if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
2171     BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
2172     // The def must dominate the incoming block of the phi.
2173     if (UseBB != Dominator->getBlock())
2174       return DT->dominates(Dominator->getBlock(), UseBB);
2175     // If the UseBB and the DefBB are the same, compare locally.
2176     return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
2177   }
2178   // If it's not a PHI node use, the normal dominates can already handle it.
2179   return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
2180 }
2181 
2182 void MemoryAccess::print(raw_ostream &OS) const {
2183   switch (getValueID()) {
2184   case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
2185   case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
2186   case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
2187   }
2188   llvm_unreachable("invalid value id");
2189 }
2190 
2191 void MemoryDef::print(raw_ostream &OS) const {
2192   MemoryAccess *UO = getDefiningAccess();
2193 
2194   auto printID = [&OS](MemoryAccess *A) {
2195     if (A && A->getID())
2196       OS << A->getID();
2197     else
2198       OS << LiveOnEntryStr;
2199   };
2200 
2201   OS << getID() << " = MemoryDef(";
2202   printID(UO);
2203   OS << ")";
2204 
2205   if (isOptimized()) {
2206     OS << "->";
2207     printID(getOptimized());
2208 
2209     if (Optional<AliasResult> AR = getOptimizedAccessType())
2210       OS << " " << *AR;
2211   }
2212 }
2213 
2214 void MemoryPhi::print(raw_ostream &OS) const {
2215   ListSeparator LS(",");
2216   OS << getID() << " = MemoryPhi(";
2217   for (const auto &Op : operands()) {
2218     BasicBlock *BB = getIncomingBlock(Op);
2219     MemoryAccess *MA = cast<MemoryAccess>(Op);
2220 
2221     OS << LS << '{';
2222     if (BB->hasName())
2223       OS << BB->getName();
2224     else
2225       BB->printAsOperand(OS, false);
2226     OS << ',';
2227     if (unsigned ID = MA->getID())
2228       OS << ID;
2229     else
2230       OS << LiveOnEntryStr;
2231     OS << '}';
2232   }
2233   OS << ')';
2234 }
2235 
2236 void MemoryUse::print(raw_ostream &OS) const {
2237   MemoryAccess *UO = getDefiningAccess();
2238   OS << "MemoryUse(";
2239   if (UO && UO->getID())
2240     OS << UO->getID();
2241   else
2242     OS << LiveOnEntryStr;
2243   OS << ')';
2244 
2245   if (Optional<AliasResult> AR = getOptimizedAccessType())
2246     OS << " " << *AR;
2247 }
2248 
2249 void MemoryAccess::dump() const {
2250 // Cannot completely remove virtual function even in release mode.
2251 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2252   print(dbgs());
2253   dbgs() << "\n";
2254 #endif
2255 }
2256 
2257 char MemorySSAPrinterLegacyPass::ID = 0;
2258 
2259 MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) {
2260   initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry());
2261 }
2262 
2263 void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
2264   AU.setPreservesAll();
2265   AU.addRequired<MemorySSAWrapperPass>();
2266 }
2267 
2268 class DOTFuncMSSAInfo {
2269 private:
2270   const Function &F;
2271   MemorySSAAnnotatedWriter MSSAWriter;
2272 
2273 public:
2274   DOTFuncMSSAInfo(const Function &F, MemorySSA &MSSA)
2275       : F(F), MSSAWriter(&MSSA) {}
2276 
2277   const Function *getFunction() { return &F; }
2278   MemorySSAAnnotatedWriter &getWriter() { return MSSAWriter; }
2279 };
2280 
2281 namespace llvm {
2282 
2283 template <>
2284 struct GraphTraits<DOTFuncMSSAInfo *> : public GraphTraits<const BasicBlock *> {
2285   static NodeRef getEntryNode(DOTFuncMSSAInfo *CFGInfo) {
2286     return &(CFGInfo->getFunction()->getEntryBlock());
2287   }
2288 
2289   // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
2290   using nodes_iterator = pointer_iterator<Function::const_iterator>;
2291 
2292   static nodes_iterator nodes_begin(DOTFuncMSSAInfo *CFGInfo) {
2293     return nodes_iterator(CFGInfo->getFunction()->begin());
2294   }
2295 
2296   static nodes_iterator nodes_end(DOTFuncMSSAInfo *CFGInfo) {
2297     return nodes_iterator(CFGInfo->getFunction()->end());
2298   }
2299 
2300   static size_t size(DOTFuncMSSAInfo *CFGInfo) {
2301     return CFGInfo->getFunction()->size();
2302   }
2303 };
2304 
2305 template <>
2306 struct DOTGraphTraits<DOTFuncMSSAInfo *> : public DefaultDOTGraphTraits {
2307 
2308   DOTGraphTraits(bool IsSimple = false) : DefaultDOTGraphTraits(IsSimple) {}
2309 
2310   static std::string getGraphName(DOTFuncMSSAInfo *CFGInfo) {
2311     return "MSSA CFG for '" + CFGInfo->getFunction()->getName().str() +
2312            "' function";
2313   }
2314 
2315   std::string getNodeLabel(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo) {
2316     return DOTGraphTraits<DOTFuncInfo *>::getCompleteNodeLabel(
2317         Node, nullptr,
2318         [CFGInfo](raw_string_ostream &OS, const BasicBlock &BB) -> void {
2319           BB.print(OS, &CFGInfo->getWriter(), true, true);
2320         },
2321         [](std::string &S, unsigned &I, unsigned Idx) -> void {
2322           std::string Str = S.substr(I, Idx - I);
2323           StringRef SR = Str;
2324           if (SR.count(" = MemoryDef(") || SR.count(" = MemoryPhi(") ||
2325               SR.count("MemoryUse("))
2326             return;
2327           DOTGraphTraits<DOTFuncInfo *>::eraseComment(S, I, Idx);
2328         });
2329   }
2330 
2331   static std::string getEdgeSourceLabel(const BasicBlock *Node,
2332                                         const_succ_iterator I) {
2333     return DOTGraphTraits<DOTFuncInfo *>::getEdgeSourceLabel(Node, I);
2334   }
2335 
2336   /// Display the raw branch weights from PGO.
2337   std::string getEdgeAttributes(const BasicBlock *Node, const_succ_iterator I,
2338                                 DOTFuncMSSAInfo *CFGInfo) {
2339     return "";
2340   }
2341 
2342   std::string getNodeAttributes(const BasicBlock *Node,
2343                                 DOTFuncMSSAInfo *CFGInfo) {
2344     return getNodeLabel(Node, CFGInfo).find(';') != std::string::npos
2345                ? "style=filled, fillcolor=lightpink"
2346                : "";
2347   }
2348 };
2349 
2350 } // namespace llvm
2351 
2352 bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
2353   auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2354   if (DotCFGMSSA != "") {
2355     DOTFuncMSSAInfo CFGInfo(F, MSSA);
2356     WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA);
2357   } else
2358     MSSA.print(dbgs());
2359 
2360   if (VerifyMemorySSA)
2361     MSSA.verifyMemorySSA();
2362   return false;
2363 }
2364 
2365 AnalysisKey MemorySSAAnalysis::Key;
2366 
2367 MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F,
2368                                                  FunctionAnalysisManager &AM) {
2369   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
2370   auto &AA = AM.getResult<AAManager>(F);
2371   return MemorySSAAnalysis::Result(std::make_unique<MemorySSA>(F, &AA, &DT));
2372 }
2373 
2374 bool MemorySSAAnalysis::Result::invalidate(
2375     Function &F, const PreservedAnalyses &PA,
2376     FunctionAnalysisManager::Invalidator &Inv) {
2377   auto PAC = PA.getChecker<MemorySSAAnalysis>();
2378   return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
2379          Inv.invalidate<AAManager>(F, PA) ||
2380          Inv.invalidate<DominatorTreeAnalysis>(F, PA);
2381 }
2382 
2383 PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
2384                                             FunctionAnalysisManager &AM) {
2385   auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2386   if (DotCFGMSSA != "") {
2387     DOTFuncMSSAInfo CFGInfo(F, MSSA);
2388     WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA);
2389   } else {
2390     OS << "MemorySSA for function: " << F.getName() << "\n";
2391     MSSA.print(OS);
2392   }
2393 
2394   return PreservedAnalyses::all();
2395 }
2396 
2397 PreservedAnalyses MemorySSAWalkerPrinterPass::run(Function &F,
2398                                                   FunctionAnalysisManager &AM) {
2399   auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2400   OS << "MemorySSA (walker) for function: " << F.getName() << "\n";
2401   MemorySSAWalkerAnnotatedWriter Writer(&MSSA);
2402   F.print(OS, &Writer);
2403 
2404   return PreservedAnalyses::all();
2405 }
2406 
2407 PreservedAnalyses MemorySSAVerifierPass::run(Function &F,
2408                                              FunctionAnalysisManager &AM) {
2409   AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
2410 
2411   return PreservedAnalyses::all();
2412 }
2413 
2414 char MemorySSAWrapperPass::ID = 0;
2415 
2416 MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) {
2417   initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
2418 }
2419 
2420 void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
2421 
2422 void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2423   AU.setPreservesAll();
2424   AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2425   AU.addRequiredTransitive<AAResultsWrapperPass>();
2426 }
2427 
2428 bool MemorySSAWrapperPass::runOnFunction(Function &F) {
2429   auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2430   auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2431   MSSA.reset(new MemorySSA(F, &AA, &DT));
2432   return false;
2433 }
2434 
2435 void MemorySSAWrapperPass::verifyAnalysis() const {
2436   if (VerifyMemorySSA)
2437     MSSA->verifyMemorySSA();
2438 }
2439 
2440 void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
2441   MSSA->print(OS);
2442 }
2443 
2444 MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
2445 
2446 /// Walk the use-def chains starting at \p StartingAccess and find
2447 /// the MemoryAccess that actually clobbers Loc.
2448 ///
2449 /// \returns our clobbering memory access
2450 template <typename AliasAnalysisType>
2451 MemoryAccess *
2452 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
2453     MemoryAccess *StartingAccess, const MemoryLocation &Loc,
2454     unsigned &UpwardWalkLimit) {
2455   assert(!isa<MemoryUse>(StartingAccess) && "Use cannot be defining access");
2456 
2457   Instruction *I = nullptr;
2458   if (auto *StartingUseOrDef = dyn_cast<MemoryUseOrDef>(StartingAccess)) {
2459     if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
2460       return StartingUseOrDef;
2461 
2462     I = StartingUseOrDef->getMemoryInst();
2463 
2464     // Conservatively, fences are always clobbers, so don't perform the walk if
2465     // we hit a fence.
2466     if (!isa<CallBase>(I) && I->isFenceLike())
2467       return StartingUseOrDef;
2468   }
2469 
2470   UpwardsMemoryQuery Q;
2471   Q.OriginalAccess = StartingAccess;
2472   Q.StartingLoc = Loc;
2473   Q.Inst = nullptr;
2474   Q.IsCall = false;
2475 
2476   // Unlike the other function, do not walk to the def of a def, because we are
2477   // handed something we already believe is the clobbering access.
2478   // We never set SkipSelf to true in Q in this method.
2479   MemoryAccess *Clobber =
2480       Walker.findClobber(StartingAccess, Q, UpwardWalkLimit);
2481   LLVM_DEBUG({
2482     dbgs() << "Clobber starting at access " << *StartingAccess << "\n";
2483     if (I)
2484       dbgs() << "  for instruction " << *I << "\n";
2485     dbgs() << "  is " << *Clobber << "\n";
2486   });
2487   return Clobber;
2488 }
2489 
2490 static const Instruction *
2491 getInvariantGroupClobberingInstruction(Instruction &I, DominatorTree &DT) {
2492   if (!I.hasMetadata(LLVMContext::MD_invariant_group) || I.isVolatile())
2493     return nullptr;
2494 
2495   // We consider bitcasts and zero GEPs to be the same pointer value. Start by
2496   // stripping bitcasts and zero GEPs, then we will recursively look at loads
2497   // and stores through bitcasts and zero GEPs.
2498   Value *PointerOperand = getLoadStorePointerOperand(&I)->stripPointerCasts();
2499 
2500   // It's not safe to walk the use list of a global value because function
2501   // passes aren't allowed to look outside their functions.
2502   // FIXME: this could be fixed by filtering instructions from outside of
2503   // current function.
2504   if (isa<Constant>(PointerOperand))
2505     return nullptr;
2506 
2507   // Queue to process all pointers that are equivalent to load operand.
2508   SmallVector<const Value *, 8> PointerUsesQueue;
2509   PointerUsesQueue.push_back(PointerOperand);
2510 
2511   const Instruction *MostDominatingInstruction = &I;
2512 
2513   // FIXME: This loop is O(n^2) because dominates can be O(n) and in worst case
2514   // we will see all the instructions. It may not matter in practice. If it
2515   // does, we will have to support MemorySSA construction and updates.
2516   while (!PointerUsesQueue.empty()) {
2517     const Value *Ptr = PointerUsesQueue.pop_back_val();
2518     assert(Ptr && !isa<GlobalValue>(Ptr) &&
2519            "Null or GlobalValue should not be inserted");
2520 
2521     for (const User *Us : Ptr->users()) {
2522       auto *U = dyn_cast<Instruction>(Us);
2523       if (!U || U == &I || !DT.dominates(U, MostDominatingInstruction))
2524         continue;
2525 
2526       // Add bitcasts and zero GEPs to queue.
2527       if (isa<BitCastInst>(U)) {
2528         PointerUsesQueue.push_back(U);
2529         continue;
2530       }
2531       if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) {
2532         if (GEP->hasAllZeroIndices())
2533           PointerUsesQueue.push_back(U);
2534         continue;
2535       }
2536 
2537       // If we hit a load/store with an invariant.group metadata and the same
2538       // pointer operand, we can assume that value pointed to by the pointer
2539       // operand didn't change.
2540       if (U->hasMetadata(LLVMContext::MD_invariant_group) &&
2541           getLoadStorePointerOperand(U) == Ptr && !U->isVolatile()) {
2542         MostDominatingInstruction = U;
2543       }
2544     }
2545   }
2546   return MostDominatingInstruction == &I ? nullptr : MostDominatingInstruction;
2547 }
2548 
2549 template <typename AliasAnalysisType>
2550 MemoryAccess *
2551 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
2552     MemoryAccess *MA, unsigned &UpwardWalkLimit, bool SkipSelf,
2553     bool UseInvariantGroup) {
2554   auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2555   // If this is a MemoryPhi, we can't do anything.
2556   if (!StartingAccess)
2557     return MA;
2558 
2559   if (UseInvariantGroup) {
2560     if (auto *I = getInvariantGroupClobberingInstruction(
2561             *StartingAccess->getMemoryInst(), MSSA->getDomTree())) {
2562       assert(isa<LoadInst>(I) || isa<StoreInst>(I));
2563 
2564       auto *ClobberMA = MSSA->getMemoryAccess(I);
2565       assert(ClobberMA);
2566       if (isa<MemoryUse>(ClobberMA))
2567         return ClobberMA->getDefiningAccess();
2568       return ClobberMA;
2569     }
2570   }
2571 
2572   bool IsOptimized = false;
2573 
2574   // If this is an already optimized use or def, return the optimized result.
2575   // Note: Currently, we store the optimized def result in a separate field,
2576   // since we can't use the defining access.
2577   if (StartingAccess->isOptimized()) {
2578     if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
2579       return StartingAccess->getOptimized();
2580     IsOptimized = true;
2581   }
2582 
2583   const Instruction *I = StartingAccess->getMemoryInst();
2584   // We can't sanely do anything with a fence, since they conservatively clobber
2585   // all memory, and have no locations to get pointers from to try to
2586   // disambiguate.
2587   if (!isa<CallBase>(I) && I->isFenceLike())
2588     return StartingAccess;
2589 
2590   UpwardsMemoryQuery Q(I, StartingAccess);
2591 
2592   if (isUseTriviallyOptimizableToLiveOnEntry(*Walker.getAA(), I)) {
2593     MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2594     StartingAccess->setOptimized(LiveOnEntry);
2595     StartingAccess->setOptimizedAccessType(None);
2596     return LiveOnEntry;
2597   }
2598 
2599   MemoryAccess *OptimizedAccess;
2600   if (!IsOptimized) {
2601     // Start with the thing we already think clobbers this location
2602     MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2603 
2604     // At this point, DefiningAccess may be the live on entry def.
2605     // If it is, we will not get a better result.
2606     if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
2607       StartingAccess->setOptimized(DefiningAccess);
2608       StartingAccess->setOptimizedAccessType(None);
2609       return DefiningAccess;
2610     }
2611 
2612     OptimizedAccess = Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit);
2613     StartingAccess->setOptimized(OptimizedAccess);
2614     if (MSSA->isLiveOnEntryDef(OptimizedAccess))
2615       StartingAccess->setOptimizedAccessType(None);
2616     else if (Q.AR && *Q.AR == AliasResult::MustAlias)
2617       StartingAccess->setOptimizedAccessType(
2618           AliasResult(AliasResult::MustAlias));
2619   } else
2620     OptimizedAccess = StartingAccess->getOptimized();
2621 
2622   LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2623   LLVM_DEBUG(dbgs() << *StartingAccess << "\n");
2624   LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ");
2625   LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n");
2626 
2627   MemoryAccess *Result;
2628   if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
2629       isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) {
2630     assert(isa<MemoryDef>(Q.OriginalAccess));
2631     Q.SkipSelfAccess = true;
2632     Result = Walker.findClobber(OptimizedAccess, Q, UpwardWalkLimit);
2633   } else
2634     Result = OptimizedAccess;
2635 
2636   LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf);
2637   LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n");
2638 
2639   return Result;
2640 }
2641 
2642 MemoryAccess *
2643 DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2644   if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2645     return Use->getDefiningAccess();
2646   return MA;
2647 }
2648 
2649 MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
2650     MemoryAccess *StartingAccess, const MemoryLocation &) {
2651   if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2652     return Use->getDefiningAccess();
2653   return StartingAccess;
2654 }
2655 
2656 void MemoryPhi::deleteMe(DerivedUser *Self) {
2657   delete static_cast<MemoryPhi *>(Self);
2658 }
2659 
2660 void MemoryDef::deleteMe(DerivedUser *Self) {
2661   delete static_cast<MemoryDef *>(Self);
2662 }
2663 
2664 void MemoryUse::deleteMe(DerivedUser *Self) {
2665   delete static_cast<MemoryUse *>(Self);
2666 }
2667 
2668 bool upward_defs_iterator::IsGuaranteedLoopInvariant(Value *Ptr) const {
2669   auto IsGuaranteedLoopInvariantBase = [](Value *Ptr) {
2670     Ptr = Ptr->stripPointerCasts();
2671     if (!isa<Instruction>(Ptr))
2672       return true;
2673     return isa<AllocaInst>(Ptr);
2674   };
2675 
2676   Ptr = Ptr->stripPointerCasts();
2677   if (auto *I = dyn_cast<Instruction>(Ptr)) {
2678     if (I->getParent()->isEntryBlock())
2679       return true;
2680   }
2681   if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
2682     return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) &&
2683            GEP->hasAllConstantIndices();
2684   }
2685   return IsGuaranteedLoopInvariantBase(Ptr);
2686 }
2687