1 //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the MemorySSA class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "llvm/Analysis/MemorySSA.h"
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/DenseMapInfo.h"
16 #include "llvm/ADT/DenseSet.h"
17 #include "llvm/ADT/DepthFirstIterator.h"
18 #include "llvm/ADT/Hashing.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/ADT/iterator.h"
26 #include "llvm/ADT/iterator_range.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/CFGPrinter.h"
29 #include "llvm/Analysis/IteratedDominanceFrontier.h"
30 #include "llvm/Analysis/MemoryLocation.h"
31 #include "llvm/Config/llvm-config.h"
32 #include "llvm/IR/AssemblyAnnotationWriter.h"
33 #include "llvm/IR/BasicBlock.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/Function.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/LLVMContext.h"
40 #include "llvm/IR/Operator.h"
41 #include "llvm/IR/PassManager.h"
42 #include "llvm/IR/Use.h"
43 #include "llvm/InitializePasses.h"
44 #include "llvm/Pass.h"
45 #include "llvm/Support/AtomicOrdering.h"
46 #include "llvm/Support/Casting.h"
47 #include "llvm/Support/CommandLine.h"
48 #include "llvm/Support/Compiler.h"
49 #include "llvm/Support/Debug.h"
50 #include "llvm/Support/ErrorHandling.h"
51 #include "llvm/Support/FormattedStream.h"
52 #include "llvm/Support/GraphWriter.h"
53 #include "llvm/Support/raw_ostream.h"
54 #include <algorithm>
55 #include <cassert>
56 #include <iterator>
57 #include <memory>
58 #include <utility>
59 
60 using namespace llvm;
61 
62 #define DEBUG_TYPE "memoryssa"
63 
64 static cl::opt<std::string>
65     DotCFGMSSA("dot-cfg-mssa",
66                cl::value_desc("file name for generated dot file"),
67                cl::desc("file name for generated dot file"), cl::init(""));
68 
69 INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
70                       true)
71 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
72 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
73 INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
74                     true)
75 
76 INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",
77                       "Memory SSA Printer", false, false)
78 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
79 INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",
80                     "Memory SSA Printer", false, false)
81 
82 static cl::opt<unsigned> MaxCheckLimit(
83     "memssa-check-limit", cl::Hidden, cl::init(100),
84     cl::desc("The maximum number of stores/phis MemorySSA"
85              "will consider trying to walk past (default = 100)"));
86 
87 // Always verify MemorySSA if expensive checking is enabled.
88 #ifdef EXPENSIVE_CHECKS
89 bool llvm::VerifyMemorySSA = true;
90 #else
91 bool llvm::VerifyMemorySSA = false;
92 #endif
93 
94 static cl::opt<bool, true>
95     VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA),
96                      cl::Hidden, cl::desc("Enable verification of MemorySSA."));
97 
98 const static char LiveOnEntryStr[] = "liveOnEntry";
99 
100 namespace {
101 
102 /// An assembly annotator class to print Memory SSA information in
103 /// comments.
104 class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
105   const MemorySSA *MSSA;
106 
107 public:
108   MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
109 
110   void emitBasicBlockStartAnnot(const BasicBlock *BB,
111                                 formatted_raw_ostream &OS) override {
112     if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
113       OS << "; " << *MA << "\n";
114   }
115 
116   void emitInstructionAnnot(const Instruction *I,
117                             formatted_raw_ostream &OS) override {
118     if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
119       OS << "; " << *MA << "\n";
120   }
121 };
122 
123 /// An assembly annotator class to print Memory SSA information in
124 /// comments.
125 class MemorySSAWalkerAnnotatedWriter : public AssemblyAnnotationWriter {
126   MemorySSA *MSSA;
127   MemorySSAWalker *Walker;
128 
129 public:
130   MemorySSAWalkerAnnotatedWriter(MemorySSA *M)
131       : MSSA(M), Walker(M->getWalker()) {}
132 
133   void emitInstructionAnnot(const Instruction *I,
134                             formatted_raw_ostream &OS) override {
135     if (MemoryAccess *MA = MSSA->getMemoryAccess(I)) {
136       MemoryAccess *Clobber = Walker->getClobberingMemoryAccess(MA);
137       OS << "; " << *MA;
138       if (Clobber) {
139         OS << " - clobbered by ";
140         if (MSSA->isLiveOnEntryDef(Clobber))
141           OS << LiveOnEntryStr;
142         else
143           OS << *Clobber;
144       }
145       OS << "\n";
146     }
147   }
148 };
149 
150 } // namespace
151 
152 namespace {
153 
154 /// Our current alias analysis API differentiates heavily between calls and
155 /// non-calls, and functions called on one usually assert on the other.
156 /// This class encapsulates the distinction to simplify other code that wants
157 /// "Memory affecting instructions and related data" to use as a key.
158 /// For example, this class is used as a densemap key in the use optimizer.
159 class MemoryLocOrCall {
160 public:
161   bool IsCall = false;
162 
163   MemoryLocOrCall(MemoryUseOrDef *MUD)
164       : MemoryLocOrCall(MUD->getMemoryInst()) {}
165   MemoryLocOrCall(const MemoryUseOrDef *MUD)
166       : MemoryLocOrCall(MUD->getMemoryInst()) {}
167 
168   MemoryLocOrCall(Instruction *Inst) {
169     if (auto *C = dyn_cast<CallBase>(Inst)) {
170       IsCall = true;
171       Call = C;
172     } else {
173       IsCall = false;
174       // There is no such thing as a memorylocation for a fence inst, and it is
175       // unique in that regard.
176       if (!isa<FenceInst>(Inst))
177         Loc = MemoryLocation::get(Inst);
178     }
179   }
180 
181   explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
182 
183   const CallBase *getCall() const {
184     assert(IsCall);
185     return Call;
186   }
187 
188   MemoryLocation getLoc() const {
189     assert(!IsCall);
190     return Loc;
191   }
192 
193   bool operator==(const MemoryLocOrCall &Other) const {
194     if (IsCall != Other.IsCall)
195       return false;
196 
197     if (!IsCall)
198       return Loc == Other.Loc;
199 
200     if (Call->getCalledOperand() != Other.Call->getCalledOperand())
201       return false;
202 
203     return Call->arg_size() == Other.Call->arg_size() &&
204            std::equal(Call->arg_begin(), Call->arg_end(),
205                       Other.Call->arg_begin());
206   }
207 
208 private:
209   union {
210     const CallBase *Call;
211     MemoryLocation Loc;
212   };
213 };
214 
215 } // end anonymous namespace
216 
217 namespace llvm {
218 
219 template <> struct DenseMapInfo<MemoryLocOrCall> {
220   static inline MemoryLocOrCall getEmptyKey() {
221     return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
222   }
223 
224   static inline MemoryLocOrCall getTombstoneKey() {
225     return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
226   }
227 
228   static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
229     if (!MLOC.IsCall)
230       return hash_combine(
231           MLOC.IsCall,
232           DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
233 
234     hash_code hash =
235         hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue(
236                                       MLOC.getCall()->getCalledOperand()));
237 
238     for (const Value *Arg : MLOC.getCall()->args())
239       hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg));
240     return hash;
241   }
242 
243   static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
244     return LHS == RHS;
245   }
246 };
247 
248 } // end namespace llvm
249 
250 /// This does one-way checks to see if Use could theoretically be hoisted above
251 /// MayClobber. This will not check the other way around.
252 ///
253 /// This assumes that, for the purposes of MemorySSA, Use comes directly after
254 /// MayClobber, with no potentially clobbering operations in between them.
255 /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
256 static bool areLoadsReorderable(const LoadInst *Use,
257                                 const LoadInst *MayClobber) {
258   bool VolatileUse = Use->isVolatile();
259   bool VolatileClobber = MayClobber->isVolatile();
260   // Volatile operations may never be reordered with other volatile operations.
261   if (VolatileUse && VolatileClobber)
262     return false;
263   // Otherwise, volatile doesn't matter here. From the language reference:
264   // 'optimizers may change the order of volatile operations relative to
265   // non-volatile operations.'"
266 
267   // If a load is seq_cst, it cannot be moved above other loads. If its ordering
268   // is weaker, it can be moved above other loads. We just need to be sure that
269   // MayClobber isn't an acquire load, because loads can't be moved above
270   // acquire loads.
271   //
272   // Note that this explicitly *does* allow the free reordering of monotonic (or
273   // weaker) loads of the same address.
274   bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
275   bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
276                                                      AtomicOrdering::Acquire);
277   return !(SeqCstUse || MayClobberIsAcquire);
278 }
279 
280 namespace {
281 
282 struct ClobberAlias {
283   bool IsClobber;
284   Optional<AliasResult> AR;
285 };
286 
287 } // end anonymous namespace
288 
289 // Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being
290 // ignored if IsClobber = false.
291 template <typename AliasAnalysisType>
292 static ClobberAlias
293 instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc,
294                          const Instruction *UseInst, AliasAnalysisType &AA) {
295   Instruction *DefInst = MD->getMemoryInst();
296   assert(DefInst && "Defining instruction not actually an instruction");
297   Optional<AliasResult> AR;
298 
299   if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
300     // These intrinsics will show up as affecting memory, but they are just
301     // markers, mostly.
302     //
303     // FIXME: We probably don't actually want MemorySSA to model these at all
304     // (including creating MemoryAccesses for them): we just end up inventing
305     // clobbers where they don't really exist at all. Please see D43269 for
306     // context.
307     switch (II->getIntrinsicID()) {
308     case Intrinsic::invariant_start:
309     case Intrinsic::invariant_end:
310     case Intrinsic::assume:
311     case Intrinsic::experimental_noalias_scope_decl:
312     case Intrinsic::pseudoprobe:
313       return {false, AliasResult(AliasResult::NoAlias)};
314     case Intrinsic::dbg_addr:
315     case Intrinsic::dbg_declare:
316     case Intrinsic::dbg_label:
317     case Intrinsic::dbg_value:
318       llvm_unreachable("debuginfo shouldn't have associated defs!");
319     default:
320       break;
321     }
322   }
323 
324   if (auto *CB = dyn_cast_or_null<CallBase>(UseInst)) {
325     ModRefInfo I = AA.getModRefInfo(DefInst, CB);
326     AR = isMustSet(I) ? AliasResult::MustAlias : AliasResult::MayAlias;
327     return {isModOrRefSet(I), AR};
328   }
329 
330   if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
331     if (auto *UseLoad = dyn_cast_or_null<LoadInst>(UseInst))
332       return {!areLoadsReorderable(UseLoad, DefLoad),
333               AliasResult(AliasResult::MayAlias)};
334 
335   ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
336   AR = isMustSet(I) ? AliasResult::MustAlias : AliasResult::MayAlias;
337   return {isModSet(I), AR};
338 }
339 
340 template <typename AliasAnalysisType>
341 static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
342                                              const MemoryUseOrDef *MU,
343                                              const MemoryLocOrCall &UseMLOC,
344                                              AliasAnalysisType &AA) {
345   // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
346   // to exist while MemoryLocOrCall is pushed through places.
347   if (UseMLOC.IsCall)
348     return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(),
349                                     AA);
350   return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
351                                   AA);
352 }
353 
354 // Return true when MD may alias MU, return false otherwise.
355 bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
356                                         AliasAnalysis &AA) {
357   return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber;
358 }
359 
360 namespace {
361 
362 struct UpwardsMemoryQuery {
363   // True if our original query started off as a call
364   bool IsCall = false;
365   // The pointer location we started the query with. This will be empty if
366   // IsCall is true.
367   MemoryLocation StartingLoc;
368   // This is the instruction we were querying about.
369   const Instruction *Inst = nullptr;
370   // The MemoryAccess we actually got called with, used to test local domination
371   const MemoryAccess *OriginalAccess = nullptr;
372   Optional<AliasResult> AR = AliasResult(AliasResult::MayAlias);
373   bool SkipSelfAccess = false;
374 
375   UpwardsMemoryQuery() = default;
376 
377   UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
378       : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {
379     if (!IsCall)
380       StartingLoc = MemoryLocation::get(Inst);
381   }
382 };
383 
384 } // end anonymous namespace
385 
386 template <typename AliasAnalysisType>
387 static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA,
388                                                    const Instruction *I) {
389   // If the memory can't be changed, then loads of the memory can't be
390   // clobbered.
391   if (auto *LI = dyn_cast<LoadInst>(I))
392     return I->hasMetadata(LLVMContext::MD_invariant_load) ||
393            AA.pointsToConstantMemory(MemoryLocation::get(LI));
394   return false;
395 }
396 
397 /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
398 /// inbetween `Start` and `ClobberAt` can clobbers `Start`.
399 ///
400 /// This is meant to be as simple and self-contained as possible. Because it
401 /// uses no cache, etc., it can be relatively expensive.
402 ///
403 /// \param Start     The MemoryAccess that we want to walk from.
404 /// \param ClobberAt A clobber for Start.
405 /// \param StartLoc  The MemoryLocation for Start.
406 /// \param MSSA      The MemorySSA instance that Start and ClobberAt belong to.
407 /// \param Query     The UpwardsMemoryQuery we used for our search.
408 /// \param AA        The AliasAnalysis we used for our search.
409 /// \param AllowImpreciseClobber Always false, unless we do relaxed verify.
410 
411 template <typename AliasAnalysisType>
412 LLVM_ATTRIBUTE_UNUSED static void
413 checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt,
414                    const MemoryLocation &StartLoc, const MemorySSA &MSSA,
415                    const UpwardsMemoryQuery &Query, AliasAnalysisType &AA,
416                    bool AllowImpreciseClobber = false) {
417   assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
418 
419   if (MSSA.isLiveOnEntryDef(Start)) {
420     assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
421            "liveOnEntry must clobber itself");
422     return;
423   }
424 
425   bool FoundClobber = false;
426   DenseSet<ConstMemoryAccessPair> VisitedPhis;
427   SmallVector<ConstMemoryAccessPair, 8> Worklist;
428   Worklist.emplace_back(Start, StartLoc);
429   // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
430   // is found, complain.
431   while (!Worklist.empty()) {
432     auto MAP = Worklist.pop_back_val();
433     // All we care about is that nothing from Start to ClobberAt clobbers Start.
434     // We learn nothing from revisiting nodes.
435     if (!VisitedPhis.insert(MAP).second)
436       continue;
437 
438     for (const auto *MA : def_chain(MAP.first)) {
439       if (MA == ClobberAt) {
440         if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
441           // instructionClobbersQuery isn't essentially free, so don't use `|=`,
442           // since it won't let us short-circuit.
443           //
444           // Also, note that this can't be hoisted out of the `Worklist` loop,
445           // since MD may only act as a clobber for 1 of N MemoryLocations.
446           FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD);
447           if (!FoundClobber) {
448             ClobberAlias CA =
449                 instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
450             if (CA.IsClobber) {
451               FoundClobber = true;
452               // Not used: CA.AR;
453             }
454           }
455         }
456         break;
457       }
458 
459       // We should never hit liveOnEntry, unless it's the clobber.
460       assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
461 
462       if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
463         // If Start is a Def, skip self.
464         if (MD == Start)
465           continue;
466 
467         assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)
468                     .IsClobber &&
469                "Found clobber before reaching ClobberAt!");
470         continue;
471       }
472 
473       if (const auto *MU = dyn_cast<MemoryUse>(MA)) {
474         (void)MU;
475         assert (MU == Start &&
476                 "Can only find use in def chain if Start is a use");
477         continue;
478       }
479 
480       assert(isa<MemoryPhi>(MA));
481 
482       // Add reachable phi predecessors
483       for (auto ItB = upward_defs_begin(
484                     {const_cast<MemoryAccess *>(MA), MAP.second},
485                     MSSA.getDomTree()),
486                 ItE = upward_defs_end();
487            ItB != ItE; ++ItB)
488         if (MSSA.getDomTree().isReachableFromEntry(ItB.getPhiArgBlock()))
489           Worklist.emplace_back(*ItB);
490     }
491   }
492 
493   // If the verify is done following an optimization, it's possible that
494   // ClobberAt was a conservative clobbering, that we can now infer is not a
495   // true clobbering access. Don't fail the verify if that's the case.
496   // We do have accesses that claim they're optimized, but could be optimized
497   // further. Updating all these can be expensive, so allow it for now (FIXME).
498   if (AllowImpreciseClobber)
499     return;
500 
501   // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
502   // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
503   assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
504          "ClobberAt never acted as a clobber");
505 }
506 
507 namespace {
508 
509 /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
510 /// in one class.
511 template <class AliasAnalysisType> class ClobberWalker {
512   /// Save a few bytes by using unsigned instead of size_t.
513   using ListIndex = unsigned;
514 
515   /// Represents a span of contiguous MemoryDefs, potentially ending in a
516   /// MemoryPhi.
517   struct DefPath {
518     MemoryLocation Loc;
519     // Note that, because we always walk in reverse, Last will always dominate
520     // First. Also note that First and Last are inclusive.
521     MemoryAccess *First;
522     MemoryAccess *Last;
523     Optional<ListIndex> Previous;
524 
525     DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
526             Optional<ListIndex> Previous)
527         : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
528 
529     DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
530             Optional<ListIndex> Previous)
531         : DefPath(Loc, Init, Init, Previous) {}
532   };
533 
534   const MemorySSA &MSSA;
535   AliasAnalysisType &AA;
536   DominatorTree &DT;
537   UpwardsMemoryQuery *Query;
538   unsigned *UpwardWalkLimit;
539 
540   // Phi optimization bookkeeping:
541   // List of DefPath to process during the current phi optimization walk.
542   SmallVector<DefPath, 32> Paths;
543   // List of visited <Access, Location> pairs; we can skip paths already
544   // visited with the same memory location.
545   DenseSet<ConstMemoryAccessPair> VisitedPhis;
546   // Record if phi translation has been performed during the current phi
547   // optimization walk, as merging alias results after phi translation can
548   // yield incorrect results. Context in PR46156.
549   bool PerformedPhiTranslation = false;
550 
551   /// Find the nearest def or phi that `From` can legally be optimized to.
552   const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
553     assert(From->getNumOperands() && "Phi with no operands?");
554 
555     BasicBlock *BB = From->getBlock();
556     MemoryAccess *Result = MSSA.getLiveOnEntryDef();
557     DomTreeNode *Node = DT.getNode(BB);
558     while ((Node = Node->getIDom())) {
559       auto *Defs = MSSA.getBlockDefs(Node->getBlock());
560       if (Defs)
561         return &*Defs->rbegin();
562     }
563     return Result;
564   }
565 
566   /// Result of calling walkToPhiOrClobber.
567   struct UpwardsWalkResult {
568     /// The "Result" of the walk. Either a clobber, the last thing we walked, or
569     /// both. Include alias info when clobber found.
570     MemoryAccess *Result;
571     bool IsKnownClobber;
572     Optional<AliasResult> AR;
573   };
574 
575   /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
576   /// This will update Desc.Last as it walks. It will (optionally) also stop at
577   /// StopAt.
578   ///
579   /// This does not test for whether StopAt is a clobber
580   UpwardsWalkResult
581   walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr,
582                      const MemoryAccess *SkipStopAt = nullptr) const {
583     assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
584     assert(UpwardWalkLimit && "Need a valid walk limit");
585     bool LimitAlreadyReached = false;
586     // (*UpwardWalkLimit) may be 0 here, due to the loop in tryOptimizePhi. Set
587     // it to 1. This will not do any alias() calls. It either returns in the
588     // first iteration in the loop below, or is set back to 0 if all def chains
589     // are free of MemoryDefs.
590     if (!*UpwardWalkLimit) {
591       *UpwardWalkLimit = 1;
592       LimitAlreadyReached = true;
593     }
594 
595     for (MemoryAccess *Current : def_chain(Desc.Last)) {
596       Desc.Last = Current;
597       if (Current == StopAt || Current == SkipStopAt)
598         return {Current, false, AliasResult(AliasResult::MayAlias)};
599 
600       if (auto *MD = dyn_cast<MemoryDef>(Current)) {
601         if (MSSA.isLiveOnEntryDef(MD))
602           return {MD, true, AliasResult(AliasResult::MustAlias)};
603 
604         if (!--*UpwardWalkLimit)
605           return {Current, true, AliasResult(AliasResult::MayAlias)};
606 
607         ClobberAlias CA =
608             instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA);
609         if (CA.IsClobber)
610           return {MD, true, CA.AR};
611       }
612     }
613 
614     if (LimitAlreadyReached)
615       *UpwardWalkLimit = 0;
616 
617     assert(isa<MemoryPhi>(Desc.Last) &&
618            "Ended at a non-clobber that's not a phi?");
619     return {Desc.Last, false, AliasResult(AliasResult::MayAlias)};
620   }
621 
622   void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
623                    ListIndex PriorNode) {
624     auto UpwardDefsBegin = upward_defs_begin({Phi, Paths[PriorNode].Loc}, DT,
625                                              &PerformedPhiTranslation);
626     auto UpwardDefs = make_range(UpwardDefsBegin, upward_defs_end());
627     for (const MemoryAccessPair &P : UpwardDefs) {
628       PausedSearches.push_back(Paths.size());
629       Paths.emplace_back(P.second, P.first, PriorNode);
630     }
631   }
632 
633   /// Represents a search that terminated after finding a clobber. This clobber
634   /// may or may not be present in the path of defs from LastNode..SearchStart,
635   /// since it may have been retrieved from cache.
636   struct TerminatedPath {
637     MemoryAccess *Clobber;
638     ListIndex LastNode;
639   };
640 
641   /// Get an access that keeps us from optimizing to the given phi.
642   ///
643   /// PausedSearches is an array of indices into the Paths array. Its incoming
644   /// value is the indices of searches that stopped at the last phi optimization
645   /// target. It's left in an unspecified state.
646   ///
647   /// If this returns None, NewPaused is a vector of searches that terminated
648   /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
649   Optional<TerminatedPath>
650   getBlockingAccess(const MemoryAccess *StopWhere,
651                     SmallVectorImpl<ListIndex> &PausedSearches,
652                     SmallVectorImpl<ListIndex> &NewPaused,
653                     SmallVectorImpl<TerminatedPath> &Terminated) {
654     assert(!PausedSearches.empty() && "No searches to continue?");
655 
656     // BFS vs DFS really doesn't make a difference here, so just do a DFS with
657     // PausedSearches as our stack.
658     while (!PausedSearches.empty()) {
659       ListIndex PathIndex = PausedSearches.pop_back_val();
660       DefPath &Node = Paths[PathIndex];
661 
662       // If we've already visited this path with this MemoryLocation, we don't
663       // need to do so again.
664       //
665       // NOTE: That we just drop these paths on the ground makes caching
666       // behavior sporadic. e.g. given a diamond:
667       //  A
668       // B C
669       //  D
670       //
671       // ...If we walk D, B, A, C, we'll only cache the result of phi
672       // optimization for A, B, and D; C will be skipped because it dies here.
673       // This arguably isn't the worst thing ever, since:
674       //   - We generally query things in a top-down order, so if we got below D
675       //     without needing cache entries for {C, MemLoc}, then chances are
676       //     that those cache entries would end up ultimately unused.
677       //   - We still cache things for A, so C only needs to walk up a bit.
678       // If this behavior becomes problematic, we can fix without a ton of extra
679       // work.
680       if (!VisitedPhis.insert({Node.Last, Node.Loc}).second) {
681         if (PerformedPhiTranslation) {
682           // If visiting this path performed Phi translation, don't continue,
683           // since it may not be correct to merge results from two paths if one
684           // relies on the phi translation.
685           TerminatedPath Term{Node.Last, PathIndex};
686           return Term;
687         }
688         continue;
689       }
690 
691       const MemoryAccess *SkipStopWhere = nullptr;
692       if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) {
693         assert(isa<MemoryDef>(Query->OriginalAccess));
694         SkipStopWhere = Query->OriginalAccess;
695       }
696 
697       UpwardsWalkResult Res = walkToPhiOrClobber(Node,
698                                                  /*StopAt=*/StopWhere,
699                                                  /*SkipStopAt=*/SkipStopWhere);
700       if (Res.IsKnownClobber) {
701         assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
702 
703         // If this wasn't a cache hit, we hit a clobber when walking. That's a
704         // failure.
705         TerminatedPath Term{Res.Result, PathIndex};
706         if (!MSSA.dominates(Res.Result, StopWhere))
707           return Term;
708 
709         // Otherwise, it's a valid thing to potentially optimize to.
710         Terminated.push_back(Term);
711         continue;
712       }
713 
714       if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
715         // We've hit our target. Save this path off for if we want to continue
716         // walking. If we are in the mode of skipping the OriginalAccess, and
717         // we've reached back to the OriginalAccess, do not save path, we've
718         // just looped back to self.
719         if (Res.Result != SkipStopWhere)
720           NewPaused.push_back(PathIndex);
721         continue;
722       }
723 
724       assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
725       addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
726     }
727 
728     return None;
729   }
730 
731   template <typename T, typename Walker>
732   struct generic_def_path_iterator
733       : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
734                                     std::forward_iterator_tag, T *> {
735     generic_def_path_iterator() = default;
736     generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
737 
738     T &operator*() const { return curNode(); }
739 
740     generic_def_path_iterator &operator++() {
741       N = curNode().Previous;
742       return *this;
743     }
744 
745     bool operator==(const generic_def_path_iterator &O) const {
746       if (N.hasValue() != O.N.hasValue())
747         return false;
748       return !N.hasValue() || *N == *O.N;
749     }
750 
751   private:
752     T &curNode() const { return W->Paths[*N]; }
753 
754     Walker *W = nullptr;
755     Optional<ListIndex> N = None;
756   };
757 
758   using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
759   using const_def_path_iterator =
760       generic_def_path_iterator<const DefPath, const ClobberWalker>;
761 
762   iterator_range<def_path_iterator> def_path(ListIndex From) {
763     return make_range(def_path_iterator(this, From), def_path_iterator());
764   }
765 
766   iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
767     return make_range(const_def_path_iterator(this, From),
768                       const_def_path_iterator());
769   }
770 
771   struct OptznResult {
772     /// The path that contains our result.
773     TerminatedPath PrimaryClobber;
774     /// The paths that we can legally cache back from, but that aren't
775     /// necessarily the result of the Phi optimization.
776     SmallVector<TerminatedPath, 4> OtherClobbers;
777   };
778 
779   ListIndex defPathIndex(const DefPath &N) const {
780     // The assert looks nicer if we don't need to do &N
781     const DefPath *NP = &N;
782     assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
783            "Out of bounds DefPath!");
784     return NP - &Paths.front();
785   }
786 
787   /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
788   /// that act as legal clobbers. Note that this won't return *all* clobbers.
789   ///
790   /// Phi optimization algorithm tl;dr:
791   ///   - Find the earliest def/phi, A, we can optimize to
792   ///   - Find if all paths from the starting memory access ultimately reach A
793   ///     - If not, optimization isn't possible.
794   ///     - Otherwise, walk from A to another clobber or phi, A'.
795   ///       - If A' is a def, we're done.
796   ///       - If A' is a phi, try to optimize it.
797   ///
798   /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
799   /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
800   OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
801                              const MemoryLocation &Loc) {
802     assert(Paths.empty() && VisitedPhis.empty() && !PerformedPhiTranslation &&
803            "Reset the optimization state.");
804 
805     Paths.emplace_back(Loc, Start, Phi, None);
806     // Stores how many "valid" optimization nodes we had prior to calling
807     // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
808     auto PriorPathsSize = Paths.size();
809 
810     SmallVector<ListIndex, 16> PausedSearches;
811     SmallVector<ListIndex, 8> NewPaused;
812     SmallVector<TerminatedPath, 4> TerminatedPaths;
813 
814     addSearches(Phi, PausedSearches, 0);
815 
816     // Moves the TerminatedPath with the "most dominated" Clobber to the end of
817     // Paths.
818     auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
819       assert(!Paths.empty() && "Need a path to move");
820       auto Dom = Paths.begin();
821       for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
822         if (!MSSA.dominates(I->Clobber, Dom->Clobber))
823           Dom = I;
824       auto Last = Paths.end() - 1;
825       if (Last != Dom)
826         std::iter_swap(Last, Dom);
827     };
828 
829     MemoryPhi *Current = Phi;
830     while (true) {
831       assert(!MSSA.isLiveOnEntryDef(Current) &&
832              "liveOnEntry wasn't treated as a clobber?");
833 
834       const auto *Target = getWalkTarget(Current);
835       // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
836       // optimization for the prior phi.
837       assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
838         return MSSA.dominates(P.Clobber, Target);
839       }));
840 
841       // FIXME: This is broken, because the Blocker may be reported to be
842       // liveOnEntry, and we'll happily wait for that to disappear (read: never)
843       // For the moment, this is fine, since we do nothing with blocker info.
844       if (Optional<TerminatedPath> Blocker = getBlockingAccess(
845               Target, PausedSearches, NewPaused, TerminatedPaths)) {
846 
847         // Find the node we started at. We can't search based on N->Last, since
848         // we may have gone around a loop with a different MemoryLocation.
849         auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
850           return defPathIndex(N) < PriorPathsSize;
851         });
852         assert(Iter != def_path_iterator());
853 
854         DefPath &CurNode = *Iter;
855         assert(CurNode.Last == Current);
856 
857         // Two things:
858         // A. We can't reliably cache all of NewPaused back. Consider a case
859         //    where we have two paths in NewPaused; one of which can't optimize
860         //    above this phi, whereas the other can. If we cache the second path
861         //    back, we'll end up with suboptimal cache entries. We can handle
862         //    cases like this a bit better when we either try to find all
863         //    clobbers that block phi optimization, or when our cache starts
864         //    supporting unfinished searches.
865         // B. We can't reliably cache TerminatedPaths back here without doing
866         //    extra checks; consider a case like:
867         //       T
868         //      / \
869         //     D   C
870         //      \ /
871         //       S
872         //    Where T is our target, C is a node with a clobber on it, D is a
873         //    diamond (with a clobber *only* on the left or right node, N), and
874         //    S is our start. Say we walk to D, through the node opposite N
875         //    (read: ignoring the clobber), and see a cache entry in the top
876         //    node of D. That cache entry gets put into TerminatedPaths. We then
877         //    walk up to C (N is later in our worklist), find the clobber, and
878         //    quit. If we append TerminatedPaths to OtherClobbers, we'll cache
879         //    the bottom part of D to the cached clobber, ignoring the clobber
880         //    in N. Again, this problem goes away if we start tracking all
881         //    blockers for a given phi optimization.
882         TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
883         return {Result, {}};
884       }
885 
886       // If there's nothing left to search, then all paths led to valid clobbers
887       // that we got from our cache; pick the nearest to the start, and allow
888       // the rest to be cached back.
889       if (NewPaused.empty()) {
890         MoveDominatedPathToEnd(TerminatedPaths);
891         TerminatedPath Result = TerminatedPaths.pop_back_val();
892         return {Result, std::move(TerminatedPaths)};
893       }
894 
895       MemoryAccess *DefChainEnd = nullptr;
896       SmallVector<TerminatedPath, 4> Clobbers;
897       for (ListIndex Paused : NewPaused) {
898         UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
899         if (WR.IsKnownClobber)
900           Clobbers.push_back({WR.Result, Paused});
901         else
902           // Micro-opt: If we hit the end of the chain, save it.
903           DefChainEnd = WR.Result;
904       }
905 
906       if (!TerminatedPaths.empty()) {
907         // If we couldn't find the dominating phi/liveOnEntry in the above loop,
908         // do it now.
909         if (!DefChainEnd)
910           for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
911             DefChainEnd = MA;
912         assert(DefChainEnd && "Failed to find dominating phi/liveOnEntry");
913 
914         // If any of the terminated paths don't dominate the phi we'll try to
915         // optimize, we need to figure out what they are and quit.
916         const BasicBlock *ChainBB = DefChainEnd->getBlock();
917         for (const TerminatedPath &TP : TerminatedPaths) {
918           // Because we know that DefChainEnd is as "high" as we can go, we
919           // don't need local dominance checks; BB dominance is sufficient.
920           if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
921             Clobbers.push_back(TP);
922         }
923       }
924 
925       // If we have clobbers in the def chain, find the one closest to Current
926       // and quit.
927       if (!Clobbers.empty()) {
928         MoveDominatedPathToEnd(Clobbers);
929         TerminatedPath Result = Clobbers.pop_back_val();
930         return {Result, std::move(Clobbers)};
931       }
932 
933       assert(all_of(NewPaused,
934                     [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
935 
936       // Because liveOnEntry is a clobber, this must be a phi.
937       auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
938 
939       PriorPathsSize = Paths.size();
940       PausedSearches.clear();
941       for (ListIndex I : NewPaused)
942         addSearches(DefChainPhi, PausedSearches, I);
943       NewPaused.clear();
944 
945       Current = DefChainPhi;
946     }
947   }
948 
949   void verifyOptResult(const OptznResult &R) const {
950     assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
951       return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
952     }));
953   }
954 
955   void resetPhiOptznState() {
956     Paths.clear();
957     VisitedPhis.clear();
958     PerformedPhiTranslation = false;
959   }
960 
961 public:
962   ClobberWalker(const MemorySSA &MSSA, AliasAnalysisType &AA, DominatorTree &DT)
963       : MSSA(MSSA), AA(AA), DT(DT) {}
964 
965   AliasAnalysisType *getAA() { return &AA; }
966   /// Finds the nearest clobber for the given query, optimizing phis if
967   /// possible.
968   MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q,
969                             unsigned &UpWalkLimit) {
970     Query = &Q;
971     UpwardWalkLimit = &UpWalkLimit;
972     // Starting limit must be > 0.
973     if (!UpWalkLimit)
974       UpWalkLimit++;
975 
976     MemoryAccess *Current = Start;
977     // This walker pretends uses don't exist. If we're handed one, silently grab
978     // its def. (This has the nice side-effect of ensuring we never cache uses)
979     if (auto *MU = dyn_cast<MemoryUse>(Start))
980       Current = MU->getDefiningAccess();
981 
982     DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
983     // Fast path for the overly-common case (no crazy phi optimization
984     // necessary)
985     UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
986     MemoryAccess *Result;
987     if (WalkResult.IsKnownClobber) {
988       Result = WalkResult.Result;
989       Q.AR = WalkResult.AR;
990     } else {
991       OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
992                                           Current, Q.StartingLoc);
993       verifyOptResult(OptRes);
994       resetPhiOptznState();
995       Result = OptRes.PrimaryClobber.Clobber;
996     }
997 
998 #ifdef EXPENSIVE_CHECKS
999     if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0)
1000       checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
1001 #endif
1002     return Result;
1003   }
1004 };
1005 
1006 struct RenamePassData {
1007   DomTreeNode *DTN;
1008   DomTreeNode::const_iterator ChildIt;
1009   MemoryAccess *IncomingVal;
1010 
1011   RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
1012                  MemoryAccess *M)
1013       : DTN(D), ChildIt(It), IncomingVal(M) {}
1014 
1015   void swap(RenamePassData &RHS) {
1016     std::swap(DTN, RHS.DTN);
1017     std::swap(ChildIt, RHS.ChildIt);
1018     std::swap(IncomingVal, RHS.IncomingVal);
1019   }
1020 };
1021 
1022 } // end anonymous namespace
1023 
1024 namespace llvm {
1025 
1026 template <class AliasAnalysisType> class MemorySSA::ClobberWalkerBase {
1027   ClobberWalker<AliasAnalysisType> Walker;
1028   MemorySSA *MSSA;
1029 
1030 public:
1031   ClobberWalkerBase(MemorySSA *M, AliasAnalysisType *A, DominatorTree *D)
1032       : Walker(*M, *A, *D), MSSA(M) {}
1033 
1034   MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
1035                                               const MemoryLocation &,
1036                                               unsigned &);
1037   // Third argument (bool), defines whether the clobber search should skip the
1038   // original queried access. If true, there will be a follow-up query searching
1039   // for a clobber access past "self". Note that the Optimized access is not
1040   // updated if a new clobber is found by this SkipSelf search. If this
1041   // additional query becomes heavily used we may decide to cache the result.
1042   // Walker instantiations will decide how to set the SkipSelf bool.
1043   MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, unsigned &, bool,
1044                                               bool UseInvariantGroup = true);
1045 };
1046 
1047 /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
1048 /// longer does caching on its own, but the name has been retained for the
1049 /// moment.
1050 template <class AliasAnalysisType>
1051 class MemorySSA::CachingWalker final : public MemorySSAWalker {
1052   ClobberWalkerBase<AliasAnalysisType> *Walker;
1053 
1054 public:
1055   CachingWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
1056       : MemorySSAWalker(M), Walker(W) {}
1057   ~CachingWalker() override = default;
1058 
1059   using MemorySSAWalker::getClobberingMemoryAccess;
1060 
1061   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1062     return Walker->getClobberingMemoryAccessBase(MA, UWL, false);
1063   }
1064   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1065                                           const MemoryLocation &Loc,
1066                                           unsigned &UWL) {
1067     return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1068   }
1069   // This method is not accessible outside of this file.
1070   MemoryAccess *getClobberingMemoryAccessWithoutInvariantGroup(MemoryAccess *MA,
1071                                                                unsigned &UWL) {
1072     return Walker->getClobberingMemoryAccessBase(MA, UWL, false, false);
1073   }
1074 
1075   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
1076     unsigned UpwardWalkLimit = MaxCheckLimit;
1077     return getClobberingMemoryAccess(MA, UpwardWalkLimit);
1078   }
1079   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1080                                           const MemoryLocation &Loc) override {
1081     unsigned UpwardWalkLimit = MaxCheckLimit;
1082     return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
1083   }
1084 
1085   void invalidateInfo(MemoryAccess *MA) override {
1086     if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1087       MUD->resetOptimized();
1088   }
1089 };
1090 
1091 template <class AliasAnalysisType>
1092 class MemorySSA::SkipSelfWalker final : public MemorySSAWalker {
1093   ClobberWalkerBase<AliasAnalysisType> *Walker;
1094 
1095 public:
1096   SkipSelfWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
1097       : MemorySSAWalker(M), Walker(W) {}
1098   ~SkipSelfWalker() override = default;
1099 
1100   using MemorySSAWalker::getClobberingMemoryAccess;
1101 
1102   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1103     return Walker->getClobberingMemoryAccessBase(MA, UWL, true);
1104   }
1105   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1106                                           const MemoryLocation &Loc,
1107                                           unsigned &UWL) {
1108     return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1109   }
1110 
1111   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
1112     unsigned UpwardWalkLimit = MaxCheckLimit;
1113     return getClobberingMemoryAccess(MA, UpwardWalkLimit);
1114   }
1115   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1116                                           const MemoryLocation &Loc) override {
1117     unsigned UpwardWalkLimit = MaxCheckLimit;
1118     return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
1119   }
1120 
1121   void invalidateInfo(MemoryAccess *MA) override {
1122     if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1123       MUD->resetOptimized();
1124   }
1125 };
1126 
1127 } // end namespace llvm
1128 
1129 void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
1130                                     bool RenameAllUses) {
1131   // Pass through values to our successors
1132   for (const BasicBlock *S : successors(BB)) {
1133     auto It = PerBlockAccesses.find(S);
1134     // Rename the phi nodes in our successor block
1135     if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1136       continue;
1137     AccessList *Accesses = It->second.get();
1138     auto *Phi = cast<MemoryPhi>(&Accesses->front());
1139     if (RenameAllUses) {
1140       bool ReplacementDone = false;
1141       for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I)
1142         if (Phi->getIncomingBlock(I) == BB) {
1143           Phi->setIncomingValue(I, IncomingVal);
1144           ReplacementDone = true;
1145         }
1146       (void) ReplacementDone;
1147       assert(ReplacementDone && "Incomplete phi during partial rename");
1148     } else
1149       Phi->addIncoming(IncomingVal, BB);
1150   }
1151 }
1152 
1153 /// Rename a single basic block into MemorySSA form.
1154 /// Uses the standard SSA renaming algorithm.
1155 /// \returns The new incoming value.
1156 MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
1157                                      bool RenameAllUses) {
1158   auto It = PerBlockAccesses.find(BB);
1159   // Skip most processing if the list is empty.
1160   if (It != PerBlockAccesses.end()) {
1161     AccessList *Accesses = It->second.get();
1162     for (MemoryAccess &L : *Accesses) {
1163       if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
1164         if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
1165           MUD->setDefiningAccess(IncomingVal);
1166         if (isa<MemoryDef>(&L))
1167           IncomingVal = &L;
1168       } else {
1169         IncomingVal = &L;
1170       }
1171     }
1172   }
1173   return IncomingVal;
1174 }
1175 
1176 /// This is the standard SSA renaming algorithm.
1177 ///
1178 /// We walk the dominator tree in preorder, renaming accesses, and then filling
1179 /// in phi nodes in our successors.
1180 void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
1181                            SmallPtrSetImpl<BasicBlock *> &Visited,
1182                            bool SkipVisited, bool RenameAllUses) {
1183   assert(Root && "Trying to rename accesses in an unreachable block");
1184 
1185   SmallVector<RenamePassData, 32> WorkStack;
1186   // Skip everything if we already renamed this block and we are skipping.
1187   // Note: You can't sink this into the if, because we need it to occur
1188   // regardless of whether we skip blocks or not.
1189   bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
1190   if (SkipVisited && AlreadyVisited)
1191     return;
1192 
1193   IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
1194   renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
1195   WorkStack.push_back({Root, Root->begin(), IncomingVal});
1196 
1197   while (!WorkStack.empty()) {
1198     DomTreeNode *Node = WorkStack.back().DTN;
1199     DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
1200     IncomingVal = WorkStack.back().IncomingVal;
1201 
1202     if (ChildIt == Node->end()) {
1203       WorkStack.pop_back();
1204     } else {
1205       DomTreeNode *Child = *ChildIt;
1206       ++WorkStack.back().ChildIt;
1207       BasicBlock *BB = Child->getBlock();
1208       // Note: You can't sink this into the if, because we need it to occur
1209       // regardless of whether we skip blocks or not.
1210       AlreadyVisited = !Visited.insert(BB).second;
1211       if (SkipVisited && AlreadyVisited) {
1212         // We already visited this during our renaming, which can happen when
1213         // being asked to rename multiple blocks. Figure out the incoming val,
1214         // which is the last def.
1215         // Incoming value can only change if there is a block def, and in that
1216         // case, it's the last block def in the list.
1217         if (auto *BlockDefs = getWritableBlockDefs(BB))
1218           IncomingVal = &*BlockDefs->rbegin();
1219       } else
1220         IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1221       renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1222       WorkStack.push_back({Child, Child->begin(), IncomingVal});
1223     }
1224   }
1225 }
1226 
1227 /// This handles unreachable block accesses by deleting phi nodes in
1228 /// unreachable blocks, and marking all other unreachable MemoryAccess's as
1229 /// being uses of the live on entry definition.
1230 void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
1231   assert(!DT->isReachableFromEntry(BB) &&
1232          "Reachable block found while handling unreachable blocks");
1233 
1234   // Make sure phi nodes in our reachable successors end up with a
1235   // LiveOnEntryDef for our incoming edge, even though our block is forward
1236   // unreachable.  We could just disconnect these blocks from the CFG fully,
1237   // but we do not right now.
1238   for (const BasicBlock *S : successors(BB)) {
1239     if (!DT->isReachableFromEntry(S))
1240       continue;
1241     auto It = PerBlockAccesses.find(S);
1242     // Rename the phi nodes in our successor block
1243     if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1244       continue;
1245     AccessList *Accesses = It->second.get();
1246     auto *Phi = cast<MemoryPhi>(&Accesses->front());
1247     Phi->addIncoming(LiveOnEntryDef.get(), BB);
1248   }
1249 
1250   auto It = PerBlockAccesses.find(BB);
1251   if (It == PerBlockAccesses.end())
1252     return;
1253 
1254   auto &Accesses = It->second;
1255   for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1256     auto Next = std::next(AI);
1257     // If we have a phi, just remove it. We are going to replace all
1258     // users with live on entry.
1259     if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1260       UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1261     else
1262       Accesses->erase(AI);
1263     AI = Next;
1264   }
1265 }
1266 
1267 MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
1268     : DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1269       SkipWalker(nullptr) {
1270   // Build MemorySSA using a batch alias analysis. This reuses the internal
1271   // state that AA collects during an alias()/getModRefInfo() call. This is
1272   // safe because there are no CFG changes while building MemorySSA and can
1273   // significantly reduce the time spent by the compiler in AA, because we will
1274   // make queries about all the instructions in the Function.
1275   assert(AA && "No alias analysis?");
1276   BatchAAResults BatchAA(*AA);
1277   buildMemorySSA(BatchAA);
1278   // Intentionally leave AA to nullptr while building so we don't accidently
1279   // use non-batch AliasAnalysis.
1280   this->AA = AA;
1281   // Also create the walker here.
1282   getWalker();
1283 }
1284 
1285 MemorySSA::~MemorySSA() {
1286   // Drop all our references
1287   for (const auto &Pair : PerBlockAccesses)
1288     for (MemoryAccess &MA : *Pair.second)
1289       MA.dropAllReferences();
1290 }
1291 
1292 MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
1293   auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
1294 
1295   if (Res.second)
1296     Res.first->second = std::make_unique<AccessList>();
1297   return Res.first->second.get();
1298 }
1299 
1300 MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1301   auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
1302 
1303   if (Res.second)
1304     Res.first->second = std::make_unique<DefsList>();
1305   return Res.first->second.get();
1306 }
1307 
1308 namespace llvm {
1309 
1310 /// This class is a batch walker of all MemoryUse's in the program, and points
1311 /// their defining access at the thing that actually clobbers them.  Because it
1312 /// is a batch walker that touches everything, it does not operate like the
1313 /// other walkers.  This walker is basically performing a top-down SSA renaming
1314 /// pass, where the version stack is used as the cache.  This enables it to be
1315 /// significantly more time and memory efficient than using the regular walker,
1316 /// which is walking bottom-up.
1317 class MemorySSA::OptimizeUses {
1318 public:
1319   OptimizeUses(MemorySSA *MSSA, CachingWalker<BatchAAResults> *Walker,
1320                BatchAAResults *BAA, DominatorTree *DT)
1321       : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}
1322 
1323   void optimizeUses();
1324 
1325 private:
1326   /// This represents where a given memorylocation is in the stack.
1327   struct MemlocStackInfo {
1328     // This essentially is keeping track of versions of the stack. Whenever
1329     // the stack changes due to pushes or pops, these versions increase.
1330     unsigned long StackEpoch;
1331     unsigned long PopEpoch;
1332     // This is the lower bound of places on the stack to check. It is equal to
1333     // the place the last stack walk ended.
1334     // Note: Correctness depends on this being initialized to 0, which densemap
1335     // does
1336     unsigned long LowerBound;
1337     const BasicBlock *LowerBoundBlock;
1338     // This is where the last walk for this memory location ended.
1339     unsigned long LastKill;
1340     bool LastKillValid;
1341     Optional<AliasResult> AR;
1342   };
1343 
1344   void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1345                            SmallVectorImpl<MemoryAccess *> &,
1346                            DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
1347 
1348   MemorySSA *MSSA;
1349   CachingWalker<BatchAAResults> *Walker;
1350   BatchAAResults *AA;
1351   DominatorTree *DT;
1352 };
1353 
1354 } // end namespace llvm
1355 
1356 /// Optimize the uses in a given block This is basically the SSA renaming
1357 /// algorithm, with one caveat: We are able to use a single stack for all
1358 /// MemoryUses.  This is because the set of *possible* reaching MemoryDefs is
1359 /// the same for every MemoryUse.  The *actual* clobbering MemoryDef is just
1360 /// going to be some position in that stack of possible ones.
1361 ///
1362 /// We track the stack positions that each MemoryLocation needs
1363 /// to check, and last ended at.  This is because we only want to check the
1364 /// things that changed since last time.  The same MemoryLocation should
1365 /// get clobbered by the same store (getModRefInfo does not use invariantness or
1366 /// things like this, and if they start, we can modify MemoryLocOrCall to
1367 /// include relevant data)
1368 void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1369     const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1370     SmallVectorImpl<MemoryAccess *> &VersionStack,
1371     DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) {
1372 
1373   /// If no accesses, nothing to do.
1374   MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1375   if (Accesses == nullptr)
1376     return;
1377 
1378   // Pop everything that doesn't dominate the current block off the stack,
1379   // increment the PopEpoch to account for this.
1380   while (true) {
1381     assert(
1382         !VersionStack.empty() &&
1383         "Version stack should have liveOnEntry sentinel dominating everything");
1384     BasicBlock *BackBlock = VersionStack.back()->getBlock();
1385     if (DT->dominates(BackBlock, BB))
1386       break;
1387     while (VersionStack.back()->getBlock() == BackBlock)
1388       VersionStack.pop_back();
1389     ++PopEpoch;
1390   }
1391 
1392   for (MemoryAccess &MA : *Accesses) {
1393     auto *MU = dyn_cast<MemoryUse>(&MA);
1394     if (!MU) {
1395       VersionStack.push_back(&MA);
1396       ++StackEpoch;
1397       continue;
1398     }
1399 
1400     if (MU->isOptimized())
1401       continue;
1402 
1403     if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
1404       MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None);
1405       continue;
1406     }
1407 
1408     MemoryLocOrCall UseMLOC(MU);
1409     auto &LocInfo = LocStackInfo[UseMLOC];
1410     // If the pop epoch changed, it means we've removed stuff from top of
1411     // stack due to changing blocks. We may have to reset the lower bound or
1412     // last kill info.
1413     if (LocInfo.PopEpoch != PopEpoch) {
1414       LocInfo.PopEpoch = PopEpoch;
1415       LocInfo.StackEpoch = StackEpoch;
1416       // If the lower bound was in something that no longer dominates us, we
1417       // have to reset it.
1418       // We can't simply track stack size, because the stack may have had
1419       // pushes/pops in the meantime.
1420       // XXX: This is non-optimal, but only is slower cases with heavily
1421       // branching dominator trees.  To get the optimal number of queries would
1422       // be to make lowerbound and lastkill a per-loc stack, and pop it until
1423       // the top of that stack dominates us.  This does not seem worth it ATM.
1424       // A much cheaper optimization would be to always explore the deepest
1425       // branch of the dominator tree first. This will guarantee this resets on
1426       // the smallest set of blocks.
1427       if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1428           !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1429         // Reset the lower bound of things to check.
1430         // TODO: Some day we should be able to reset to last kill, rather than
1431         // 0.
1432         LocInfo.LowerBound = 0;
1433         LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1434         LocInfo.LastKillValid = false;
1435       }
1436     } else if (LocInfo.StackEpoch != StackEpoch) {
1437       // If all that has changed is the StackEpoch, we only have to check the
1438       // new things on the stack, because we've checked everything before.  In
1439       // this case, the lower bound of things to check remains the same.
1440       LocInfo.PopEpoch = PopEpoch;
1441       LocInfo.StackEpoch = StackEpoch;
1442     }
1443     if (!LocInfo.LastKillValid) {
1444       LocInfo.LastKill = VersionStack.size() - 1;
1445       LocInfo.LastKillValid = true;
1446       LocInfo.AR = AliasResult::MayAlias;
1447     }
1448 
1449     // At this point, we should have corrected last kill and LowerBound to be
1450     // in bounds.
1451     assert(LocInfo.LowerBound < VersionStack.size() &&
1452            "Lower bound out of range");
1453     assert(LocInfo.LastKill < VersionStack.size() &&
1454            "Last kill info out of range");
1455     // In any case, the new upper bound is the top of the stack.
1456     unsigned long UpperBound = VersionStack.size() - 1;
1457 
1458     if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
1459       LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
1460                         << *(MU->getMemoryInst()) << ")"
1461                         << " because there are "
1462                         << UpperBound - LocInfo.LowerBound
1463                         << " stores to disambiguate\n");
1464       // Because we did not walk, LastKill is no longer valid, as this may
1465       // have been a kill.
1466       LocInfo.LastKillValid = false;
1467       continue;
1468     }
1469     bool FoundClobberResult = false;
1470     unsigned UpwardWalkLimit = MaxCheckLimit;
1471     while (UpperBound > LocInfo.LowerBound) {
1472       if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1473         // For phis, use the walker, see where we ended up, go there.
1474         // The invariant.group handling in MemorySSA is ad-hoc and doesn't
1475         // support updates, so don't use it to optimize uses.
1476         MemoryAccess *Result =
1477             Walker->getClobberingMemoryAccessWithoutInvariantGroup(
1478                 MU, UpwardWalkLimit);
1479         // We are guaranteed to find it or something is wrong.
1480         while (VersionStack[UpperBound] != Result) {
1481           assert(UpperBound != 0);
1482           --UpperBound;
1483         }
1484         FoundClobberResult = true;
1485         break;
1486       }
1487 
1488       MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1489       ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA);
1490       if (CA.IsClobber) {
1491         FoundClobberResult = true;
1492         LocInfo.AR = CA.AR;
1493         break;
1494       }
1495       --UpperBound;
1496     }
1497 
1498     // Note: Phis always have AliasResult AR set to MayAlias ATM.
1499 
1500     // At the end of this loop, UpperBound is either a clobber, or lower bound
1501     // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1502     if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1503       // We were last killed now by where we got to
1504       if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound]))
1505         LocInfo.AR = None;
1506       MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR);
1507       LocInfo.LastKill = UpperBound;
1508     } else {
1509       // Otherwise, we checked all the new ones, and now we know we can get to
1510       // LastKill.
1511       MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR);
1512     }
1513     LocInfo.LowerBound = VersionStack.size() - 1;
1514     LocInfo.LowerBoundBlock = BB;
1515   }
1516 }
1517 
1518 /// Optimize uses to point to their actual clobbering definitions.
1519 void MemorySSA::OptimizeUses::optimizeUses() {
1520   SmallVector<MemoryAccess *, 16> VersionStack;
1521   DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo;
1522   VersionStack.push_back(MSSA->getLiveOnEntryDef());
1523 
1524   unsigned long StackEpoch = 1;
1525   unsigned long PopEpoch = 1;
1526   // We perform a non-recursive top-down dominator tree walk.
1527   for (const auto *DomNode : depth_first(DT->getRootNode()))
1528     optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1529                         LocStackInfo);
1530 }
1531 
1532 void MemorySSA::placePHINodes(
1533     const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) {
1534   // Determine where our MemoryPhi's should go
1535   ForwardIDFCalculator IDFs(*DT);
1536   IDFs.setDefiningBlocks(DefiningBlocks);
1537   SmallVector<BasicBlock *, 32> IDFBlocks;
1538   IDFs.calculate(IDFBlocks);
1539 
1540   // Now place MemoryPhi nodes.
1541   for (auto &BB : IDFBlocks)
1542     createMemoryPhi(BB);
1543 }
1544 
1545 void MemorySSA::buildMemorySSA(BatchAAResults &BAA) {
1546   // We create an access to represent "live on entry", for things like
1547   // arguments or users of globals, where the memory they use is defined before
1548   // the beginning of the function. We do not actually insert it into the IR.
1549   // We do not define a live on exit for the immediate uses, and thus our
1550   // semantics do *not* imply that something with no immediate uses can simply
1551   // be removed.
1552   BasicBlock &StartingPoint = F.getEntryBlock();
1553   LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr,
1554                                      &StartingPoint, NextID++));
1555 
1556   // We maintain lists of memory accesses per-block, trading memory for time. We
1557   // could just look up the memory access for every possible instruction in the
1558   // stream.
1559   SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1560   // Go through each block, figure out where defs occur, and chain together all
1561   // the accesses.
1562   for (BasicBlock &B : F) {
1563     bool InsertIntoDef = false;
1564     AccessList *Accesses = nullptr;
1565     DefsList *Defs = nullptr;
1566     for (Instruction &I : B) {
1567       MemoryUseOrDef *MUD = createNewAccess(&I, &BAA);
1568       if (!MUD)
1569         continue;
1570 
1571       if (!Accesses)
1572         Accesses = getOrCreateAccessList(&B);
1573       Accesses->push_back(MUD);
1574       if (isa<MemoryDef>(MUD)) {
1575         InsertIntoDef = true;
1576         if (!Defs)
1577           Defs = getOrCreateDefsList(&B);
1578         Defs->push_back(*MUD);
1579       }
1580     }
1581     if (InsertIntoDef)
1582       DefiningBlocks.insert(&B);
1583   }
1584   placePHINodes(DefiningBlocks);
1585 
1586   // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1587   // filled in with all blocks.
1588   SmallPtrSet<BasicBlock *, 16> Visited;
1589   renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1590 
1591   // Mark the uses in unreachable blocks as live on entry, so that they go
1592   // somewhere.
1593   for (auto &BB : F)
1594     if (!Visited.count(&BB))
1595       markUnreachableAsLiveOnEntry(&BB);
1596 }
1597 
1598 MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1599 
1600 MemorySSA::CachingWalker<AliasAnalysis> *MemorySSA::getWalkerImpl() {
1601   if (Walker)
1602     return Walker.get();
1603 
1604   if (!WalkerBase)
1605     WalkerBase =
1606         std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
1607 
1608   Walker =
1609       std::make_unique<CachingWalker<AliasAnalysis>>(this, WalkerBase.get());
1610   return Walker.get();
1611 }
1612 
1613 MemorySSAWalker *MemorySSA::getSkipSelfWalker() {
1614   if (SkipWalker)
1615     return SkipWalker.get();
1616 
1617   if (!WalkerBase)
1618     WalkerBase =
1619         std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
1620 
1621   SkipWalker =
1622       std::make_unique<SkipSelfWalker<AliasAnalysis>>(this, WalkerBase.get());
1623   return SkipWalker.get();
1624  }
1625 
1626 
1627 // This is a helper function used by the creation routines. It places NewAccess
1628 // into the access and defs lists for a given basic block, at the given
1629 // insertion point.
1630 void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess,
1631                                         const BasicBlock *BB,
1632                                         InsertionPlace Point) {
1633   auto *Accesses = getOrCreateAccessList(BB);
1634   if (Point == Beginning) {
1635     // If it's a phi node, it goes first, otherwise, it goes after any phi
1636     // nodes.
1637     if (isa<MemoryPhi>(NewAccess)) {
1638       Accesses->push_front(NewAccess);
1639       auto *Defs = getOrCreateDefsList(BB);
1640       Defs->push_front(*NewAccess);
1641     } else {
1642       auto AI = find_if_not(
1643           *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1644       Accesses->insert(AI, NewAccess);
1645       if (!isa<MemoryUse>(NewAccess)) {
1646         auto *Defs = getOrCreateDefsList(BB);
1647         auto DI = find_if_not(
1648             *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1649         Defs->insert(DI, *NewAccess);
1650       }
1651     }
1652   } else {
1653     Accesses->push_back(NewAccess);
1654     if (!isa<MemoryUse>(NewAccess)) {
1655       auto *Defs = getOrCreateDefsList(BB);
1656       Defs->push_back(*NewAccess);
1657     }
1658   }
1659   BlockNumberingValid.erase(BB);
1660 }
1661 
1662 void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB,
1663                                       AccessList::iterator InsertPt) {
1664   auto *Accesses = getWritableBlockAccesses(BB);
1665   bool WasEnd = InsertPt == Accesses->end();
1666   Accesses->insert(AccessList::iterator(InsertPt), What);
1667   if (!isa<MemoryUse>(What)) {
1668     auto *Defs = getOrCreateDefsList(BB);
1669     // If we got asked to insert at the end, we have an easy job, just shove it
1670     // at the end. If we got asked to insert before an existing def, we also get
1671     // an iterator. If we got asked to insert before a use, we have to hunt for
1672     // the next def.
1673     if (WasEnd) {
1674       Defs->push_back(*What);
1675     } else if (isa<MemoryDef>(InsertPt)) {
1676       Defs->insert(InsertPt->getDefsIterator(), *What);
1677     } else {
1678       while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1679         ++InsertPt;
1680       // Either we found a def, or we are inserting at the end
1681       if (InsertPt == Accesses->end())
1682         Defs->push_back(*What);
1683       else
1684         Defs->insert(InsertPt->getDefsIterator(), *What);
1685     }
1686   }
1687   BlockNumberingValid.erase(BB);
1688 }
1689 
1690 void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) {
1691   // Keep it in the lookup tables, remove from the lists
1692   removeFromLists(What, false);
1693 
1694   // Note that moving should implicitly invalidate the optimized state of a
1695   // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a
1696   // MemoryDef.
1697   if (auto *MD = dyn_cast<MemoryDef>(What))
1698     MD->resetOptimized();
1699   What->setBlock(BB);
1700 }
1701 
1702 // Move What before Where in the IR.  The end result is that What will belong to
1703 // the right lists and have the right Block set, but will not otherwise be
1704 // correct. It will not have the right defining access, and if it is a def,
1705 // things below it will not properly be updated.
1706 void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1707                        AccessList::iterator Where) {
1708   prepareForMoveTo(What, BB);
1709   insertIntoListsBefore(What, BB, Where);
1710 }
1711 
1712 void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB,
1713                        InsertionPlace Point) {
1714   if (isa<MemoryPhi>(What)) {
1715     assert(Point == Beginning &&
1716            "Can only move a Phi at the beginning of the block");
1717     // Update lookup table entry
1718     ValueToMemoryAccess.erase(What->getBlock());
1719     bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;
1720     (void)Inserted;
1721     assert(Inserted && "Cannot move a Phi to a block that already has one");
1722   }
1723 
1724   prepareForMoveTo(What, BB);
1725   insertIntoListsForBlock(What, BB, Point);
1726 }
1727 
1728 MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1729   assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
1730   MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
1731   // Phi's always are placed at the front of the block.
1732   insertIntoListsForBlock(Phi, BB, Beginning);
1733   ValueToMemoryAccess[BB] = Phi;
1734   return Phi;
1735 }
1736 
1737 MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
1738                                                MemoryAccess *Definition,
1739                                                const MemoryUseOrDef *Template,
1740                                                bool CreationMustSucceed) {
1741   assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
1742   MemoryUseOrDef *NewAccess = createNewAccess(I, AA, Template);
1743   if (CreationMustSucceed)
1744     assert(NewAccess != nullptr && "Tried to create a memory access for a "
1745                                    "non-memory touching instruction");
1746   if (NewAccess) {
1747     assert((!Definition || !isa<MemoryUse>(Definition)) &&
1748            "A use cannot be a defining access");
1749     NewAccess->setDefiningAccess(Definition);
1750   }
1751   return NewAccess;
1752 }
1753 
1754 // Return true if the instruction has ordering constraints.
1755 // Note specifically that this only considers stores and loads
1756 // because others are still considered ModRef by getModRefInfo.
1757 static inline bool isOrdered(const Instruction *I) {
1758   if (auto *SI = dyn_cast<StoreInst>(I)) {
1759     if (!SI->isUnordered())
1760       return true;
1761   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1762     if (!LI->isUnordered())
1763       return true;
1764   }
1765   return false;
1766 }
1767 
1768 /// Helper function to create new memory accesses
1769 template <typename AliasAnalysisType>
1770 MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
1771                                            AliasAnalysisType *AAP,
1772                                            const MemoryUseOrDef *Template) {
1773   // The assume intrinsic has a control dependency which we model by claiming
1774   // that it writes arbitrarily. Debuginfo intrinsics may be considered
1775   // clobbers when we have a nonstandard AA pipeline. Ignore these fake memory
1776   // dependencies here.
1777   // FIXME: Replace this special casing with a more accurate modelling of
1778   // assume's control dependency.
1779   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1780     switch (II->getIntrinsicID()) {
1781     default:
1782       break;
1783     case Intrinsic::assume:
1784     case Intrinsic::experimental_noalias_scope_decl:
1785     case Intrinsic::pseudoprobe:
1786       return nullptr;
1787     }
1788   }
1789 
1790   // Using a nonstandard AA pipelines might leave us with unexpected modref
1791   // results for I, so add a check to not model instructions that may not read
1792   // from or write to memory. This is necessary for correctness.
1793   if (!I->mayReadFromMemory() && !I->mayWriteToMemory())
1794     return nullptr;
1795 
1796   bool Def, Use;
1797   if (Template) {
1798     Def = isa<MemoryDef>(Template);
1799     Use = isa<MemoryUse>(Template);
1800 #if !defined(NDEBUG)
1801     ModRefInfo ModRef = AAP->getModRefInfo(I, None);
1802     bool DefCheck, UseCheck;
1803     DefCheck = isModSet(ModRef) || isOrdered(I);
1804     UseCheck = isRefSet(ModRef);
1805     assert(Def == DefCheck && (Def || Use == UseCheck) && "Invalid template");
1806 #endif
1807   } else {
1808     // Find out what affect this instruction has on memory.
1809     ModRefInfo ModRef = AAP->getModRefInfo(I, None);
1810     // The isOrdered check is used to ensure that volatiles end up as defs
1811     // (atomics end up as ModRef right now anyway).  Until we separate the
1812     // ordering chain from the memory chain, this enables people to see at least
1813     // some relative ordering to volatiles.  Note that getClobberingMemoryAccess
1814     // will still give an answer that bypasses other volatile loads.  TODO:
1815     // Separate memory aliasing and ordering into two different chains so that
1816     // we can precisely represent both "what memory will this read/write/is
1817     // clobbered by" and "what instructions can I move this past".
1818     Def = isModSet(ModRef) || isOrdered(I);
1819     Use = isRefSet(ModRef);
1820   }
1821 
1822   // It's possible for an instruction to not modify memory at all. During
1823   // construction, we ignore them.
1824   if (!Def && !Use)
1825     return nullptr;
1826 
1827   MemoryUseOrDef *MUD;
1828   if (Def)
1829     MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
1830   else
1831     MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
1832   ValueToMemoryAccess[I] = MUD;
1833   return MUD;
1834 }
1835 
1836 /// Properly remove \p MA from all of MemorySSA's lookup tables.
1837 void MemorySSA::removeFromLookups(MemoryAccess *MA) {
1838   assert(MA->use_empty() &&
1839          "Trying to remove memory access that still has uses");
1840   BlockNumbering.erase(MA);
1841   if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1842     MUD->setDefiningAccess(nullptr);
1843   // Invalidate our walker's cache if necessary
1844   if (!isa<MemoryUse>(MA))
1845     getWalker()->invalidateInfo(MA);
1846 
1847   Value *MemoryInst;
1848   if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1849     MemoryInst = MUD->getMemoryInst();
1850   else
1851     MemoryInst = MA->getBlock();
1852 
1853   auto VMA = ValueToMemoryAccess.find(MemoryInst);
1854   if (VMA->second == MA)
1855     ValueToMemoryAccess.erase(VMA);
1856 }
1857 
1858 /// Properly remove \p MA from all of MemorySSA's lists.
1859 ///
1860 /// Because of the way the intrusive list and use lists work, it is important to
1861 /// do removal in the right order.
1862 /// ShouldDelete defaults to true, and will cause the memory access to also be
1863 /// deleted, not just removed.
1864 void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
1865   BasicBlock *BB = MA->getBlock();
1866   // The access list owns the reference, so we erase it from the non-owning list
1867   // first.
1868   if (!isa<MemoryUse>(MA)) {
1869     auto DefsIt = PerBlockDefs.find(BB);
1870     std::unique_ptr<DefsList> &Defs = DefsIt->second;
1871     Defs->remove(*MA);
1872     if (Defs->empty())
1873       PerBlockDefs.erase(DefsIt);
1874   }
1875 
1876   // The erase call here will delete it. If we don't want it deleted, we call
1877   // remove instead.
1878   auto AccessIt = PerBlockAccesses.find(BB);
1879   std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1880   if (ShouldDelete)
1881     Accesses->erase(MA);
1882   else
1883     Accesses->remove(MA);
1884 
1885   if (Accesses->empty()) {
1886     PerBlockAccesses.erase(AccessIt);
1887     BlockNumberingValid.erase(BB);
1888   }
1889 }
1890 
1891 void MemorySSA::print(raw_ostream &OS) const {
1892   MemorySSAAnnotatedWriter Writer(this);
1893   F.print(OS, &Writer);
1894 }
1895 
1896 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1897 LLVM_DUMP_METHOD void MemorySSA::dump() const { print(dbgs()); }
1898 #endif
1899 
1900 void MemorySSA::verifyMemorySSA(VerificationLevel VL) const {
1901 #if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS)
1902   VL = VerificationLevel::Full;
1903 #endif
1904 
1905 #ifndef NDEBUG
1906   verifyOrderingDominationAndDefUses(F, VL);
1907   verifyDominationNumbers(F);
1908   if (VL == VerificationLevel::Full)
1909     verifyPrevDefInPhis(F);
1910 #endif
1911   // Previously, the verification used to also verify that the clobberingAccess
1912   // cached by MemorySSA is the same as the clobberingAccess found at a later
1913   // query to AA. This does not hold true in general due to the current fragility
1914   // of BasicAA which has arbitrary caps on the things it analyzes before giving
1915   // up. As a result, transformations that are correct, will lead to BasicAA
1916   // returning different Alias answers before and after that transformation.
1917   // Invalidating MemorySSA is not an option, as the results in BasicAA can be so
1918   // random, in the worst case we'd need to rebuild MemorySSA from scratch after
1919   // every transformation, which defeats the purpose of using it. For such an
1920   // example, see test4 added in D51960.
1921 }
1922 
1923 void MemorySSA::verifyPrevDefInPhis(Function &F) const {
1924   for (const BasicBlock &BB : F) {
1925     if (MemoryPhi *Phi = getMemoryAccess(&BB)) {
1926       for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1927         auto *Pred = Phi->getIncomingBlock(I);
1928         auto *IncAcc = Phi->getIncomingValue(I);
1929         // If Pred has no unreachable predecessors, get last def looking at
1930         // IDoms. If, while walkings IDoms, any of these has an unreachable
1931         // predecessor, then the incoming def can be any access.
1932         if (auto *DTNode = DT->getNode(Pred)) {
1933           while (DTNode) {
1934             if (auto *DefList = getBlockDefs(DTNode->getBlock())) {
1935               auto *LastAcc = &*(--DefList->end());
1936               assert(LastAcc == IncAcc &&
1937                      "Incorrect incoming access into phi.");
1938               (void)IncAcc;
1939               (void)LastAcc;
1940               break;
1941             }
1942             DTNode = DTNode->getIDom();
1943           }
1944         } else {
1945           // If Pred has unreachable predecessors, but has at least a Def, the
1946           // incoming access can be the last Def in Pred, or it could have been
1947           // optimized to LoE. After an update, though, the LoE may have been
1948           // replaced by another access, so IncAcc may be any access.
1949           // If Pred has unreachable predecessors and no Defs, incoming access
1950           // should be LoE; However, after an update, it may be any access.
1951         }
1952       }
1953     }
1954   }
1955 }
1956 
1957 /// Verify that all of the blocks we believe to have valid domination numbers
1958 /// actually have valid domination numbers.
1959 void MemorySSA::verifyDominationNumbers(const Function &F) const {
1960   if (BlockNumberingValid.empty())
1961     return;
1962 
1963   SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid;
1964   for (const BasicBlock &BB : F) {
1965     if (!ValidBlocks.count(&BB))
1966       continue;
1967 
1968     ValidBlocks.erase(&BB);
1969 
1970     const AccessList *Accesses = getBlockAccesses(&BB);
1971     // It's correct to say an empty block has valid numbering.
1972     if (!Accesses)
1973       continue;
1974 
1975     // Block numbering starts at 1.
1976     unsigned long LastNumber = 0;
1977     for (const MemoryAccess &MA : *Accesses) {
1978       auto ThisNumberIter = BlockNumbering.find(&MA);
1979       assert(ThisNumberIter != BlockNumbering.end() &&
1980              "MemoryAccess has no domination number in a valid block!");
1981 
1982       unsigned long ThisNumber = ThisNumberIter->second;
1983       assert(ThisNumber > LastNumber &&
1984              "Domination numbers should be strictly increasing!");
1985       (void)LastNumber;
1986       LastNumber = ThisNumber;
1987     }
1988   }
1989 
1990   assert(ValidBlocks.empty() &&
1991          "All valid BasicBlocks should exist in F -- dangling pointers?");
1992 }
1993 
1994 /// Verify ordering: the order and existence of MemoryAccesses matches the
1995 /// order and existence of memory affecting instructions.
1996 /// Verify domination: each definition dominates all of its uses.
1997 /// Verify def-uses: the immediate use information - walk all the memory
1998 /// accesses and verifying that, for each use, it appears in the appropriate
1999 /// def's use list
2000 void MemorySSA::verifyOrderingDominationAndDefUses(Function &F,
2001                                                    VerificationLevel VL) const {
2002   // Walk all the blocks, comparing what the lookups think and what the access
2003   // lists think, as well as the order in the blocks vs the order in the access
2004   // lists.
2005   SmallVector<MemoryAccess *, 32> ActualAccesses;
2006   SmallVector<MemoryAccess *, 32> ActualDefs;
2007   for (BasicBlock &B : F) {
2008     const AccessList *AL = getBlockAccesses(&B);
2009     const auto *DL = getBlockDefs(&B);
2010     MemoryPhi *Phi = getMemoryAccess(&B);
2011     if (Phi) {
2012       // Verify ordering.
2013       ActualAccesses.push_back(Phi);
2014       ActualDefs.push_back(Phi);
2015       // Verify domination
2016       for (const Use &U : Phi->uses()) {
2017         assert(dominates(Phi, U) && "Memory PHI does not dominate it's uses");
2018         (void)U;
2019       }
2020       // Verify def-uses for full verify.
2021       if (VL == VerificationLevel::Full) {
2022         assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(
2023                                             pred_begin(&B), pred_end(&B))) &&
2024                "Incomplete MemoryPhi Node");
2025         for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
2026           verifyUseInDefs(Phi->getIncomingValue(I), Phi);
2027           assert(is_contained(predecessors(&B), Phi->getIncomingBlock(I)) &&
2028                  "Incoming phi block not a block predecessor");
2029         }
2030       }
2031     }
2032 
2033     for (Instruction &I : B) {
2034       MemoryUseOrDef *MA = getMemoryAccess(&I);
2035       assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
2036              "We have memory affecting instructions "
2037              "in this block but they are not in the "
2038              "access list or defs list");
2039       if (MA) {
2040         // Verify ordering.
2041         ActualAccesses.push_back(MA);
2042         if (MemoryAccess *MD = dyn_cast<MemoryDef>(MA)) {
2043           // Verify ordering.
2044           ActualDefs.push_back(MA);
2045           // Verify domination.
2046           for (const Use &U : MD->uses()) {
2047             assert(dominates(MD, U) &&
2048                    "Memory Def does not dominate it's uses");
2049             (void)U;
2050           }
2051         }
2052         // Verify def-uses for full verify.
2053         if (VL == VerificationLevel::Full)
2054           verifyUseInDefs(MA->getDefiningAccess(), MA);
2055       }
2056     }
2057     // Either we hit the assert, really have no accesses, or we have both
2058     // accesses and an access list. Same with defs.
2059     if (!AL && !DL)
2060       continue;
2061     // Verify ordering.
2062     assert(AL->size() == ActualAccesses.size() &&
2063            "We don't have the same number of accesses in the block as on the "
2064            "access list");
2065     assert((DL || ActualDefs.size() == 0) &&
2066            "Either we should have a defs list, or we should have no defs");
2067     assert((!DL || DL->size() == ActualDefs.size()) &&
2068            "We don't have the same number of defs in the block as on the "
2069            "def list");
2070     auto ALI = AL->begin();
2071     auto AAI = ActualAccesses.begin();
2072     while (ALI != AL->end() && AAI != ActualAccesses.end()) {
2073       assert(&*ALI == *AAI && "Not the same accesses in the same order");
2074       ++ALI;
2075       ++AAI;
2076     }
2077     ActualAccesses.clear();
2078     if (DL) {
2079       auto DLI = DL->begin();
2080       auto ADI = ActualDefs.begin();
2081       while (DLI != DL->end() && ADI != ActualDefs.end()) {
2082         assert(&*DLI == *ADI && "Not the same defs in the same order");
2083         ++DLI;
2084         ++ADI;
2085       }
2086     }
2087     ActualDefs.clear();
2088   }
2089 }
2090 
2091 /// Verify the def-use lists in MemorySSA, by verifying that \p Use
2092 /// appears in the use list of \p Def.
2093 void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
2094   // The live on entry use may cause us to get a NULL def here
2095   if (!Def)
2096     assert(isLiveOnEntryDef(Use) &&
2097            "Null def but use not point to live on entry def");
2098   else
2099     assert(is_contained(Def->users(), Use) &&
2100            "Did not find use in def's use list");
2101 }
2102 
2103 /// Perform a local numbering on blocks so that instruction ordering can be
2104 /// determined in constant time.
2105 /// TODO: We currently just number in order.  If we numbered by N, we could
2106 /// allow at least N-1 sequences of insertBefore or insertAfter (and at least
2107 /// log2(N) sequences of mixed before and after) without needing to invalidate
2108 /// the numbering.
2109 void MemorySSA::renumberBlock(const BasicBlock *B) const {
2110   // The pre-increment ensures the numbers really start at 1.
2111   unsigned long CurrentNumber = 0;
2112   const AccessList *AL = getBlockAccesses(B);
2113   assert(AL != nullptr && "Asking to renumber an empty block");
2114   for (const auto &I : *AL)
2115     BlockNumbering[&I] = ++CurrentNumber;
2116   BlockNumberingValid.insert(B);
2117 }
2118 
2119 /// Determine, for two memory accesses in the same block,
2120 /// whether \p Dominator dominates \p Dominatee.
2121 /// \returns True if \p Dominator dominates \p Dominatee.
2122 bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
2123                                  const MemoryAccess *Dominatee) const {
2124   const BasicBlock *DominatorBlock = Dominator->getBlock();
2125 
2126   assert((DominatorBlock == Dominatee->getBlock()) &&
2127          "Asking for local domination when accesses are in different blocks!");
2128   // A node dominates itself.
2129   if (Dominatee == Dominator)
2130     return true;
2131 
2132   // When Dominatee is defined on function entry, it is not dominated by another
2133   // memory access.
2134   if (isLiveOnEntryDef(Dominatee))
2135     return false;
2136 
2137   // When Dominator is defined on function entry, it dominates the other memory
2138   // access.
2139   if (isLiveOnEntryDef(Dominator))
2140     return true;
2141 
2142   if (!BlockNumberingValid.count(DominatorBlock))
2143     renumberBlock(DominatorBlock);
2144 
2145   unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
2146   // All numbers start with 1
2147   assert(DominatorNum != 0 && "Block was not numbered properly");
2148   unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
2149   assert(DominateeNum != 0 && "Block was not numbered properly");
2150   return DominatorNum < DominateeNum;
2151 }
2152 
2153 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2154                           const MemoryAccess *Dominatee) const {
2155   if (Dominator == Dominatee)
2156     return true;
2157 
2158   if (isLiveOnEntryDef(Dominatee))
2159     return false;
2160 
2161   if (Dominator->getBlock() != Dominatee->getBlock())
2162     return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
2163   return locallyDominates(Dominator, Dominatee);
2164 }
2165 
2166 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2167                           const Use &Dominatee) const {
2168   if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
2169     BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
2170     // The def must dominate the incoming block of the phi.
2171     if (UseBB != Dominator->getBlock())
2172       return DT->dominates(Dominator->getBlock(), UseBB);
2173     // If the UseBB and the DefBB are the same, compare locally.
2174     return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
2175   }
2176   // If it's not a PHI node use, the normal dominates can already handle it.
2177   return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
2178 }
2179 
2180 void MemorySSA::ensureOptimizedUses() {
2181   if (IsOptimized)
2182     return;
2183 
2184   BatchAAResults BatchAA(*AA);
2185   ClobberWalkerBase<BatchAAResults> WalkerBase(this, &BatchAA, DT);
2186   CachingWalker<BatchAAResults> WalkerLocal(this, &WalkerBase);
2187   OptimizeUses(this, &WalkerLocal, &BatchAA, DT).optimizeUses();
2188   IsOptimized = true;
2189 }
2190 
2191 void MemoryAccess::print(raw_ostream &OS) const {
2192   switch (getValueID()) {
2193   case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
2194   case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
2195   case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
2196   }
2197   llvm_unreachable("invalid value id");
2198 }
2199 
2200 void MemoryDef::print(raw_ostream &OS) const {
2201   MemoryAccess *UO = getDefiningAccess();
2202 
2203   auto printID = [&OS](MemoryAccess *A) {
2204     if (A && A->getID())
2205       OS << A->getID();
2206     else
2207       OS << LiveOnEntryStr;
2208   };
2209 
2210   OS << getID() << " = MemoryDef(";
2211   printID(UO);
2212   OS << ")";
2213 
2214   if (isOptimized()) {
2215     OS << "->";
2216     printID(getOptimized());
2217 
2218     if (Optional<AliasResult> AR = getOptimizedAccessType())
2219       OS << " " << *AR;
2220   }
2221 }
2222 
2223 void MemoryPhi::print(raw_ostream &OS) const {
2224   ListSeparator LS(",");
2225   OS << getID() << " = MemoryPhi(";
2226   for (const auto &Op : operands()) {
2227     BasicBlock *BB = getIncomingBlock(Op);
2228     MemoryAccess *MA = cast<MemoryAccess>(Op);
2229 
2230     OS << LS << '{';
2231     if (BB->hasName())
2232       OS << BB->getName();
2233     else
2234       BB->printAsOperand(OS, false);
2235     OS << ',';
2236     if (unsigned ID = MA->getID())
2237       OS << ID;
2238     else
2239       OS << LiveOnEntryStr;
2240     OS << '}';
2241   }
2242   OS << ')';
2243 }
2244 
2245 void MemoryUse::print(raw_ostream &OS) const {
2246   MemoryAccess *UO = getDefiningAccess();
2247   OS << "MemoryUse(";
2248   if (UO && UO->getID())
2249     OS << UO->getID();
2250   else
2251     OS << LiveOnEntryStr;
2252   OS << ')';
2253 
2254   if (Optional<AliasResult> AR = getOptimizedAccessType())
2255     OS << " " << *AR;
2256 }
2257 
2258 void MemoryAccess::dump() const {
2259 // Cannot completely remove virtual function even in release mode.
2260 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2261   print(dbgs());
2262   dbgs() << "\n";
2263 #endif
2264 }
2265 
2266 char MemorySSAPrinterLegacyPass::ID = 0;
2267 
2268 MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) {
2269   initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry());
2270 }
2271 
2272 void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
2273   AU.setPreservesAll();
2274   AU.addRequired<MemorySSAWrapperPass>();
2275 }
2276 
2277 class DOTFuncMSSAInfo {
2278 private:
2279   const Function &F;
2280   MemorySSAAnnotatedWriter MSSAWriter;
2281 
2282 public:
2283   DOTFuncMSSAInfo(const Function &F, MemorySSA &MSSA)
2284       : F(F), MSSAWriter(&MSSA) {}
2285 
2286   const Function *getFunction() { return &F; }
2287   MemorySSAAnnotatedWriter &getWriter() { return MSSAWriter; }
2288 };
2289 
2290 namespace llvm {
2291 
2292 template <>
2293 struct GraphTraits<DOTFuncMSSAInfo *> : public GraphTraits<const BasicBlock *> {
2294   static NodeRef getEntryNode(DOTFuncMSSAInfo *CFGInfo) {
2295     return &(CFGInfo->getFunction()->getEntryBlock());
2296   }
2297 
2298   // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
2299   using nodes_iterator = pointer_iterator<Function::const_iterator>;
2300 
2301   static nodes_iterator nodes_begin(DOTFuncMSSAInfo *CFGInfo) {
2302     return nodes_iterator(CFGInfo->getFunction()->begin());
2303   }
2304 
2305   static nodes_iterator nodes_end(DOTFuncMSSAInfo *CFGInfo) {
2306     return nodes_iterator(CFGInfo->getFunction()->end());
2307   }
2308 
2309   static size_t size(DOTFuncMSSAInfo *CFGInfo) {
2310     return CFGInfo->getFunction()->size();
2311   }
2312 };
2313 
2314 template <>
2315 struct DOTGraphTraits<DOTFuncMSSAInfo *> : public DefaultDOTGraphTraits {
2316 
2317   DOTGraphTraits(bool IsSimple = false) : DefaultDOTGraphTraits(IsSimple) {}
2318 
2319   static std::string getGraphName(DOTFuncMSSAInfo *CFGInfo) {
2320     return "MSSA CFG for '" + CFGInfo->getFunction()->getName().str() +
2321            "' function";
2322   }
2323 
2324   std::string getNodeLabel(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo) {
2325     return DOTGraphTraits<DOTFuncInfo *>::getCompleteNodeLabel(
2326         Node, nullptr,
2327         [CFGInfo](raw_string_ostream &OS, const BasicBlock &BB) -> void {
2328           BB.print(OS, &CFGInfo->getWriter(), true, true);
2329         },
2330         [](std::string &S, unsigned &I, unsigned Idx) -> void {
2331           std::string Str = S.substr(I, Idx - I);
2332           StringRef SR = Str;
2333           if (SR.count(" = MemoryDef(") || SR.count(" = MemoryPhi(") ||
2334               SR.count("MemoryUse("))
2335             return;
2336           DOTGraphTraits<DOTFuncInfo *>::eraseComment(S, I, Idx);
2337         });
2338   }
2339 
2340   static std::string getEdgeSourceLabel(const BasicBlock *Node,
2341                                         const_succ_iterator I) {
2342     return DOTGraphTraits<DOTFuncInfo *>::getEdgeSourceLabel(Node, I);
2343   }
2344 
2345   /// Display the raw branch weights from PGO.
2346   std::string getEdgeAttributes(const BasicBlock *Node, const_succ_iterator I,
2347                                 DOTFuncMSSAInfo *CFGInfo) {
2348     return "";
2349   }
2350 
2351   std::string getNodeAttributes(const BasicBlock *Node,
2352                                 DOTFuncMSSAInfo *CFGInfo) {
2353     return getNodeLabel(Node, CFGInfo).find(';') != std::string::npos
2354                ? "style=filled, fillcolor=lightpink"
2355                : "";
2356   }
2357 };
2358 
2359 } // namespace llvm
2360 
2361 bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
2362   auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2363   MSSA.ensureOptimizedUses();
2364   if (DotCFGMSSA != "") {
2365     DOTFuncMSSAInfo CFGInfo(F, MSSA);
2366     WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA);
2367   } else
2368     MSSA.print(dbgs());
2369 
2370   if (VerifyMemorySSA)
2371     MSSA.verifyMemorySSA();
2372   return false;
2373 }
2374 
2375 AnalysisKey MemorySSAAnalysis::Key;
2376 
2377 MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F,
2378                                                  FunctionAnalysisManager &AM) {
2379   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
2380   auto &AA = AM.getResult<AAManager>(F);
2381   return MemorySSAAnalysis::Result(std::make_unique<MemorySSA>(F, &AA, &DT));
2382 }
2383 
2384 bool MemorySSAAnalysis::Result::invalidate(
2385     Function &F, const PreservedAnalyses &PA,
2386     FunctionAnalysisManager::Invalidator &Inv) {
2387   auto PAC = PA.getChecker<MemorySSAAnalysis>();
2388   return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
2389          Inv.invalidate<AAManager>(F, PA) ||
2390          Inv.invalidate<DominatorTreeAnalysis>(F, PA);
2391 }
2392 
2393 PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
2394                                             FunctionAnalysisManager &AM) {
2395   auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2396   MSSA.ensureOptimizedUses();
2397   if (DotCFGMSSA != "") {
2398     DOTFuncMSSAInfo CFGInfo(F, MSSA);
2399     WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA);
2400   } else {
2401     OS << "MemorySSA for function: " << F.getName() << "\n";
2402     MSSA.print(OS);
2403   }
2404 
2405   return PreservedAnalyses::all();
2406 }
2407 
2408 PreservedAnalyses MemorySSAWalkerPrinterPass::run(Function &F,
2409                                                   FunctionAnalysisManager &AM) {
2410   auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2411   OS << "MemorySSA (walker) for function: " << F.getName() << "\n";
2412   MemorySSAWalkerAnnotatedWriter Writer(&MSSA);
2413   F.print(OS, &Writer);
2414 
2415   return PreservedAnalyses::all();
2416 }
2417 
2418 PreservedAnalyses MemorySSAVerifierPass::run(Function &F,
2419                                              FunctionAnalysisManager &AM) {
2420   AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
2421 
2422   return PreservedAnalyses::all();
2423 }
2424 
2425 char MemorySSAWrapperPass::ID = 0;
2426 
2427 MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) {
2428   initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
2429 }
2430 
2431 void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
2432 
2433 void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2434   AU.setPreservesAll();
2435   AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2436   AU.addRequiredTransitive<AAResultsWrapperPass>();
2437 }
2438 
2439 bool MemorySSAWrapperPass::runOnFunction(Function &F) {
2440   auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2441   auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2442   MSSA.reset(new MemorySSA(F, &AA, &DT));
2443   return false;
2444 }
2445 
2446 void MemorySSAWrapperPass::verifyAnalysis() const {
2447   if (VerifyMemorySSA)
2448     MSSA->verifyMemorySSA();
2449 }
2450 
2451 void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
2452   MSSA->print(OS);
2453 }
2454 
2455 MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
2456 
2457 /// Walk the use-def chains starting at \p StartingAccess and find
2458 /// the MemoryAccess that actually clobbers Loc.
2459 ///
2460 /// \returns our clobbering memory access
2461 template <typename AliasAnalysisType>
2462 MemoryAccess *
2463 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
2464     MemoryAccess *StartingAccess, const MemoryLocation &Loc,
2465     unsigned &UpwardWalkLimit) {
2466   assert(!isa<MemoryUse>(StartingAccess) && "Use cannot be defining access");
2467 
2468   Instruction *I = nullptr;
2469   if (auto *StartingUseOrDef = dyn_cast<MemoryUseOrDef>(StartingAccess)) {
2470     if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
2471       return StartingUseOrDef;
2472 
2473     I = StartingUseOrDef->getMemoryInst();
2474 
2475     // Conservatively, fences are always clobbers, so don't perform the walk if
2476     // we hit a fence.
2477     if (!isa<CallBase>(I) && I->isFenceLike())
2478       return StartingUseOrDef;
2479   }
2480 
2481   UpwardsMemoryQuery Q;
2482   Q.OriginalAccess = StartingAccess;
2483   Q.StartingLoc = Loc;
2484   Q.Inst = nullptr;
2485   Q.IsCall = false;
2486 
2487   // Unlike the other function, do not walk to the def of a def, because we are
2488   // handed something we already believe is the clobbering access.
2489   // We never set SkipSelf to true in Q in this method.
2490   MemoryAccess *Clobber =
2491       Walker.findClobber(StartingAccess, Q, UpwardWalkLimit);
2492   LLVM_DEBUG({
2493     dbgs() << "Clobber starting at access " << *StartingAccess << "\n";
2494     if (I)
2495       dbgs() << "  for instruction " << *I << "\n";
2496     dbgs() << "  is " << *Clobber << "\n";
2497   });
2498   return Clobber;
2499 }
2500 
2501 static const Instruction *
2502 getInvariantGroupClobberingInstruction(Instruction &I, DominatorTree &DT) {
2503   if (!I.hasMetadata(LLVMContext::MD_invariant_group) || I.isVolatile())
2504     return nullptr;
2505 
2506   // We consider bitcasts and zero GEPs to be the same pointer value. Start by
2507   // stripping bitcasts and zero GEPs, then we will recursively look at loads
2508   // and stores through bitcasts and zero GEPs.
2509   Value *PointerOperand = getLoadStorePointerOperand(&I)->stripPointerCasts();
2510 
2511   // It's not safe to walk the use list of a global value because function
2512   // passes aren't allowed to look outside their functions.
2513   // FIXME: this could be fixed by filtering instructions from outside of
2514   // current function.
2515   if (isa<Constant>(PointerOperand))
2516     return nullptr;
2517 
2518   // Queue to process all pointers that are equivalent to load operand.
2519   SmallVector<const Value *, 8> PointerUsesQueue;
2520   PointerUsesQueue.push_back(PointerOperand);
2521 
2522   const Instruction *MostDominatingInstruction = &I;
2523 
2524   // FIXME: This loop is O(n^2) because dominates can be O(n) and in worst case
2525   // we will see all the instructions. It may not matter in practice. If it
2526   // does, we will have to support MemorySSA construction and updates.
2527   while (!PointerUsesQueue.empty()) {
2528     const Value *Ptr = PointerUsesQueue.pop_back_val();
2529     assert(Ptr && !isa<GlobalValue>(Ptr) &&
2530            "Null or GlobalValue should not be inserted");
2531 
2532     for (const User *Us : Ptr->users()) {
2533       auto *U = dyn_cast<Instruction>(Us);
2534       if (!U || U == &I || !DT.dominates(U, MostDominatingInstruction))
2535         continue;
2536 
2537       // Add bitcasts and zero GEPs to queue.
2538       if (isa<BitCastInst>(U)) {
2539         PointerUsesQueue.push_back(U);
2540         continue;
2541       }
2542       if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) {
2543         if (GEP->hasAllZeroIndices())
2544           PointerUsesQueue.push_back(U);
2545         continue;
2546       }
2547 
2548       // If we hit a load/store with an invariant.group metadata and the same
2549       // pointer operand, we can assume that value pointed to by the pointer
2550       // operand didn't change.
2551       if (U->hasMetadata(LLVMContext::MD_invariant_group) &&
2552           getLoadStorePointerOperand(U) == Ptr && !U->isVolatile()) {
2553         MostDominatingInstruction = U;
2554       }
2555     }
2556   }
2557   return MostDominatingInstruction == &I ? nullptr : MostDominatingInstruction;
2558 }
2559 
2560 template <typename AliasAnalysisType>
2561 MemoryAccess *
2562 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
2563     MemoryAccess *MA, unsigned &UpwardWalkLimit, bool SkipSelf,
2564     bool UseInvariantGroup) {
2565   auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2566   // If this is a MemoryPhi, we can't do anything.
2567   if (!StartingAccess)
2568     return MA;
2569 
2570   if (UseInvariantGroup) {
2571     if (auto *I = getInvariantGroupClobberingInstruction(
2572             *StartingAccess->getMemoryInst(), MSSA->getDomTree())) {
2573       assert(isa<LoadInst>(I) || isa<StoreInst>(I));
2574 
2575       auto *ClobberMA = MSSA->getMemoryAccess(I);
2576       assert(ClobberMA);
2577       if (isa<MemoryUse>(ClobberMA))
2578         return ClobberMA->getDefiningAccess();
2579       return ClobberMA;
2580     }
2581   }
2582 
2583   bool IsOptimized = false;
2584 
2585   // If this is an already optimized use or def, return the optimized result.
2586   // Note: Currently, we store the optimized def result in a separate field,
2587   // since we can't use the defining access.
2588   if (StartingAccess->isOptimized()) {
2589     if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
2590       return StartingAccess->getOptimized();
2591     IsOptimized = true;
2592   }
2593 
2594   const Instruction *I = StartingAccess->getMemoryInst();
2595   // We can't sanely do anything with a fence, since they conservatively clobber
2596   // all memory, and have no locations to get pointers from to try to
2597   // disambiguate.
2598   if (!isa<CallBase>(I) && I->isFenceLike())
2599     return StartingAccess;
2600 
2601   UpwardsMemoryQuery Q(I, StartingAccess);
2602 
2603   if (isUseTriviallyOptimizableToLiveOnEntry(*Walker.getAA(), I)) {
2604     MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2605     StartingAccess->setOptimized(LiveOnEntry);
2606     StartingAccess->setOptimizedAccessType(None);
2607     return LiveOnEntry;
2608   }
2609 
2610   MemoryAccess *OptimizedAccess;
2611   if (!IsOptimized) {
2612     // Start with the thing we already think clobbers this location
2613     MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2614 
2615     // At this point, DefiningAccess may be the live on entry def.
2616     // If it is, we will not get a better result.
2617     if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
2618       StartingAccess->setOptimized(DefiningAccess);
2619       StartingAccess->setOptimizedAccessType(None);
2620       return DefiningAccess;
2621     }
2622 
2623     OptimizedAccess = Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit);
2624     StartingAccess->setOptimized(OptimizedAccess);
2625     if (MSSA->isLiveOnEntryDef(OptimizedAccess))
2626       StartingAccess->setOptimizedAccessType(None);
2627     else if (Q.AR && *Q.AR == AliasResult::MustAlias)
2628       StartingAccess->setOptimizedAccessType(
2629           AliasResult(AliasResult::MustAlias));
2630   } else
2631     OptimizedAccess = StartingAccess->getOptimized();
2632 
2633   LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2634   LLVM_DEBUG(dbgs() << *StartingAccess << "\n");
2635   LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ");
2636   LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n");
2637 
2638   MemoryAccess *Result;
2639   if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
2640       isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) {
2641     assert(isa<MemoryDef>(Q.OriginalAccess));
2642     Q.SkipSelfAccess = true;
2643     Result = Walker.findClobber(OptimizedAccess, Q, UpwardWalkLimit);
2644   } else
2645     Result = OptimizedAccess;
2646 
2647   LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf);
2648   LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n");
2649 
2650   return Result;
2651 }
2652 
2653 MemoryAccess *
2654 DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2655   if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2656     return Use->getDefiningAccess();
2657   return MA;
2658 }
2659 
2660 MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
2661     MemoryAccess *StartingAccess, const MemoryLocation &) {
2662   if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2663     return Use->getDefiningAccess();
2664   return StartingAccess;
2665 }
2666 
2667 void MemoryPhi::deleteMe(DerivedUser *Self) {
2668   delete static_cast<MemoryPhi *>(Self);
2669 }
2670 
2671 void MemoryDef::deleteMe(DerivedUser *Self) {
2672   delete static_cast<MemoryDef *>(Self);
2673 }
2674 
2675 void MemoryUse::deleteMe(DerivedUser *Self) {
2676   delete static_cast<MemoryUse *>(Self);
2677 }
2678 
2679 bool upward_defs_iterator::IsGuaranteedLoopInvariant(Value *Ptr) const {
2680   auto IsGuaranteedLoopInvariantBase = [](Value *Ptr) {
2681     Ptr = Ptr->stripPointerCasts();
2682     if (!isa<Instruction>(Ptr))
2683       return true;
2684     return isa<AllocaInst>(Ptr);
2685   };
2686 
2687   Ptr = Ptr->stripPointerCasts();
2688   if (auto *I = dyn_cast<Instruction>(Ptr)) {
2689     if (I->getParent()->isEntryBlock())
2690       return true;
2691   }
2692   if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
2693     return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) &&
2694            GEP->hasAllConstantIndices();
2695   }
2696   return IsGuaranteedLoopInvariantBase(Ptr);
2697 }
2698