1 //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the MemorySSA class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/Analysis/MemorySSA.h"
14 #include "llvm/ADT/DenseMap.h"
15 #include "llvm/ADT/DenseMapInfo.h"
16 #include "llvm/ADT/DenseSet.h"
17 #include "llvm/ADT/DepthFirstIterator.h"
18 #include "llvm/ADT/Hashing.h"
19 #include "llvm/ADT/None.h"
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/SmallVector.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/ADT/iterator.h"
26 #include "llvm/ADT/iterator_range.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/CFGPrinter.h"
29 #include "llvm/Analysis/IteratedDominanceFrontier.h"
30 #include "llvm/Analysis/MemoryLocation.h"
31 #include "llvm/Config/llvm-config.h"
32 #include "llvm/IR/AssemblyAnnotationWriter.h"
33 #include "llvm/IR/BasicBlock.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/Function.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/LLVMContext.h"
40 #include "llvm/IR/Operator.h"
41 #include "llvm/IR/PassManager.h"
42 #include "llvm/IR/Use.h"
43 #include "llvm/InitializePasses.h"
44 #include "llvm/Pass.h"
45 #include "llvm/Support/AtomicOrdering.h"
46 #include "llvm/Support/Casting.h"
47 #include "llvm/Support/CommandLine.h"
48 #include "llvm/Support/Compiler.h"
49 #include "llvm/Support/Debug.h"
50 #include "llvm/Support/ErrorHandling.h"
51 #include "llvm/Support/FormattedStream.h"
52 #include "llvm/Support/GraphWriter.h"
53 #include "llvm/Support/raw_ostream.h"
54 #include <algorithm>
55 #include <cassert>
56 #include <iterator>
57 #include <memory>
58 #include <utility>
59
60 using namespace llvm;
61
62 #define DEBUG_TYPE "memoryssa"
63
64 static cl::opt<std::string>
65 DotCFGMSSA("dot-cfg-mssa",
66 cl::value_desc("file name for generated dot file"),
67 cl::desc("file name for generated dot file"), cl::init(""));
68
69 INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
70 true)
71 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
72 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
73 INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
74 true)
75
76 INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",
77 "Memory SSA Printer", false, false)
78 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
79 INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",
80 "Memory SSA Printer", false, false)
81
82 static cl::opt<unsigned> MaxCheckLimit(
83 "memssa-check-limit", cl::Hidden, cl::init(100),
84 cl::desc("The maximum number of stores/phis MemorySSA"
85 "will consider trying to walk past (default = 100)"));
86
87 // Always verify MemorySSA if expensive checking is enabled.
88 #ifdef EXPENSIVE_CHECKS
89 bool llvm::VerifyMemorySSA = true;
90 #else
91 bool llvm::VerifyMemorySSA = false;
92 #endif
93
94 static cl::opt<bool, true>
95 VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA),
96 cl::Hidden, cl::desc("Enable verification of MemorySSA."));
97
98 const static char LiveOnEntryStr[] = "liveOnEntry";
99
100 namespace {
101
102 /// An assembly annotator class to print Memory SSA information in
103 /// comments.
104 class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
105 const MemorySSA *MSSA;
106
107 public:
MemorySSAAnnotatedWriter(const MemorySSA * M)108 MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
109
emitBasicBlockStartAnnot(const BasicBlock * BB,formatted_raw_ostream & OS)110 void emitBasicBlockStartAnnot(const BasicBlock *BB,
111 formatted_raw_ostream &OS) override {
112 if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
113 OS << "; " << *MA << "\n";
114 }
115
emitInstructionAnnot(const Instruction * I,formatted_raw_ostream & OS)116 void emitInstructionAnnot(const Instruction *I,
117 formatted_raw_ostream &OS) override {
118 if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
119 OS << "; " << *MA << "\n";
120 }
121 };
122
123 /// An assembly annotator class to print Memory SSA information in
124 /// comments.
125 class MemorySSAWalkerAnnotatedWriter : public AssemblyAnnotationWriter {
126 MemorySSA *MSSA;
127 MemorySSAWalker *Walker;
128
129 public:
MemorySSAWalkerAnnotatedWriter(MemorySSA * M)130 MemorySSAWalkerAnnotatedWriter(MemorySSA *M)
131 : MSSA(M), Walker(M->getWalker()) {}
132
emitBasicBlockStartAnnot(const BasicBlock * BB,formatted_raw_ostream & OS)133 void emitBasicBlockStartAnnot(const BasicBlock *BB,
134 formatted_raw_ostream &OS) override {
135 if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
136 OS << "; " << *MA << "\n";
137 }
138
emitInstructionAnnot(const Instruction * I,formatted_raw_ostream & OS)139 void emitInstructionAnnot(const Instruction *I,
140 formatted_raw_ostream &OS) override {
141 if (MemoryAccess *MA = MSSA->getMemoryAccess(I)) {
142 MemoryAccess *Clobber = Walker->getClobberingMemoryAccess(MA);
143 OS << "; " << *MA;
144 if (Clobber) {
145 OS << " - clobbered by ";
146 if (MSSA->isLiveOnEntryDef(Clobber))
147 OS << LiveOnEntryStr;
148 else
149 OS << *Clobber;
150 }
151 OS << "\n";
152 }
153 }
154 };
155
156 } // namespace
157
158 namespace {
159
160 /// Our current alias analysis API differentiates heavily between calls and
161 /// non-calls, and functions called on one usually assert on the other.
162 /// This class encapsulates the distinction to simplify other code that wants
163 /// "Memory affecting instructions and related data" to use as a key.
164 /// For example, this class is used as a densemap key in the use optimizer.
165 class MemoryLocOrCall {
166 public:
167 bool IsCall = false;
168
MemoryLocOrCall(MemoryUseOrDef * MUD)169 MemoryLocOrCall(MemoryUseOrDef *MUD)
170 : MemoryLocOrCall(MUD->getMemoryInst()) {}
MemoryLocOrCall(const MemoryUseOrDef * MUD)171 MemoryLocOrCall(const MemoryUseOrDef *MUD)
172 : MemoryLocOrCall(MUD->getMemoryInst()) {}
173
MemoryLocOrCall(Instruction * Inst)174 MemoryLocOrCall(Instruction *Inst) {
175 if (auto *C = dyn_cast<CallBase>(Inst)) {
176 IsCall = true;
177 Call = C;
178 } else {
179 IsCall = false;
180 // There is no such thing as a memorylocation for a fence inst, and it is
181 // unique in that regard.
182 if (!isa<FenceInst>(Inst))
183 Loc = MemoryLocation::get(Inst);
184 }
185 }
186
MemoryLocOrCall(const MemoryLocation & Loc)187 explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
188
getCall() const189 const CallBase *getCall() const {
190 assert(IsCall);
191 return Call;
192 }
193
getLoc() const194 MemoryLocation getLoc() const {
195 assert(!IsCall);
196 return Loc;
197 }
198
operator ==(const MemoryLocOrCall & Other) const199 bool operator==(const MemoryLocOrCall &Other) const {
200 if (IsCall != Other.IsCall)
201 return false;
202
203 if (!IsCall)
204 return Loc == Other.Loc;
205
206 if (Call->getCalledOperand() != Other.Call->getCalledOperand())
207 return false;
208
209 return Call->arg_size() == Other.Call->arg_size() &&
210 std::equal(Call->arg_begin(), Call->arg_end(),
211 Other.Call->arg_begin());
212 }
213
214 private:
215 union {
216 const CallBase *Call;
217 MemoryLocation Loc;
218 };
219 };
220
221 } // end anonymous namespace
222
223 namespace llvm {
224
225 template <> struct DenseMapInfo<MemoryLocOrCall> {
getEmptyKeyllvm::DenseMapInfo226 static inline MemoryLocOrCall getEmptyKey() {
227 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
228 }
229
getTombstoneKeyllvm::DenseMapInfo230 static inline MemoryLocOrCall getTombstoneKey() {
231 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
232 }
233
getHashValuellvm::DenseMapInfo234 static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
235 if (!MLOC.IsCall)
236 return hash_combine(
237 MLOC.IsCall,
238 DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
239
240 hash_code hash =
241 hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue(
242 MLOC.getCall()->getCalledOperand()));
243
244 for (const Value *Arg : MLOC.getCall()->args())
245 hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg));
246 return hash;
247 }
248
isEqualllvm::DenseMapInfo249 static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
250 return LHS == RHS;
251 }
252 };
253
254 } // end namespace llvm
255
256 /// This does one-way checks to see if Use could theoretically be hoisted above
257 /// MayClobber. This will not check the other way around.
258 ///
259 /// This assumes that, for the purposes of MemorySSA, Use comes directly after
260 /// MayClobber, with no potentially clobbering operations in between them.
261 /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
areLoadsReorderable(const LoadInst * Use,const LoadInst * MayClobber)262 static bool areLoadsReorderable(const LoadInst *Use,
263 const LoadInst *MayClobber) {
264 bool VolatileUse = Use->isVolatile();
265 bool VolatileClobber = MayClobber->isVolatile();
266 // Volatile operations may never be reordered with other volatile operations.
267 if (VolatileUse && VolatileClobber)
268 return false;
269 // Otherwise, volatile doesn't matter here. From the language reference:
270 // 'optimizers may change the order of volatile operations relative to
271 // non-volatile operations.'"
272
273 // If a load is seq_cst, it cannot be moved above other loads. If its ordering
274 // is weaker, it can be moved above other loads. We just need to be sure that
275 // MayClobber isn't an acquire load, because loads can't be moved above
276 // acquire loads.
277 //
278 // Note that this explicitly *does* allow the free reordering of monotonic (or
279 // weaker) loads of the same address.
280 bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
281 bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
282 AtomicOrdering::Acquire);
283 return !(SeqCstUse || MayClobberIsAcquire);
284 }
285
286 namespace {
287
288 struct ClobberAlias {
289 bool IsClobber;
290 Optional<AliasResult> AR;
291 };
292
293 } // end anonymous namespace
294
295 // Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being
296 // ignored if IsClobber = false.
297 template <typename AliasAnalysisType>
298 static ClobberAlias
instructionClobbersQuery(const MemoryDef * MD,const MemoryLocation & UseLoc,const Instruction * UseInst,AliasAnalysisType & AA)299 instructionClobbersQuery(const MemoryDef *MD, const MemoryLocation &UseLoc,
300 const Instruction *UseInst, AliasAnalysisType &AA) {
301 Instruction *DefInst = MD->getMemoryInst();
302 assert(DefInst && "Defining instruction not actually an instruction");
303 Optional<AliasResult> AR;
304
305 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
306 // These intrinsics will show up as affecting memory, but they are just
307 // markers, mostly.
308 //
309 // FIXME: We probably don't actually want MemorySSA to model these at all
310 // (including creating MemoryAccesses for them): we just end up inventing
311 // clobbers where they don't really exist at all. Please see D43269 for
312 // context.
313 switch (II->getIntrinsicID()) {
314 case Intrinsic::invariant_start:
315 case Intrinsic::invariant_end:
316 case Intrinsic::assume:
317 case Intrinsic::experimental_noalias_scope_decl:
318 case Intrinsic::pseudoprobe:
319 return {false, AliasResult(AliasResult::NoAlias)};
320 case Intrinsic::dbg_addr:
321 case Intrinsic::dbg_declare:
322 case Intrinsic::dbg_label:
323 case Intrinsic::dbg_value:
324 llvm_unreachable("debuginfo shouldn't have associated defs!");
325 default:
326 break;
327 }
328 }
329
330 if (auto *CB = dyn_cast_or_null<CallBase>(UseInst)) {
331 ModRefInfo I = AA.getModRefInfo(DefInst, CB);
332 AR = isMustSet(I) ? AliasResult::MustAlias : AliasResult::MayAlias;
333 return {isModOrRefSet(I), AR};
334 }
335
336 if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
337 if (auto *UseLoad = dyn_cast_or_null<LoadInst>(UseInst))
338 return {!areLoadsReorderable(UseLoad, DefLoad),
339 AliasResult(AliasResult::MayAlias)};
340
341 ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
342 AR = isMustSet(I) ? AliasResult::MustAlias : AliasResult::MayAlias;
343 return {isModSet(I), AR};
344 }
345
346 template <typename AliasAnalysisType>
instructionClobbersQuery(MemoryDef * MD,const MemoryUseOrDef * MU,const MemoryLocOrCall & UseMLOC,AliasAnalysisType & AA)347 static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
348 const MemoryUseOrDef *MU,
349 const MemoryLocOrCall &UseMLOC,
350 AliasAnalysisType &AA) {
351 // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
352 // to exist while MemoryLocOrCall is pushed through places.
353 if (UseMLOC.IsCall)
354 return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(),
355 AA);
356 return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
357 AA);
358 }
359
360 // Return true when MD may alias MU, return false otherwise.
defClobbersUseOrDef(MemoryDef * MD,const MemoryUseOrDef * MU,AliasAnalysis & AA)361 bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
362 AliasAnalysis &AA) {
363 return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber;
364 }
365
366 namespace {
367
368 struct UpwardsMemoryQuery {
369 // True if our original query started off as a call
370 bool IsCall = false;
371 // The pointer location we started the query with. This will be empty if
372 // IsCall is true.
373 MemoryLocation StartingLoc;
374 // This is the instruction we were querying about.
375 const Instruction *Inst = nullptr;
376 // The MemoryAccess we actually got called with, used to test local domination
377 const MemoryAccess *OriginalAccess = nullptr;
378 Optional<AliasResult> AR = AliasResult(AliasResult::MayAlias);
379 bool SkipSelfAccess = false;
380
381 UpwardsMemoryQuery() = default;
382
UpwardsMemoryQuery__anonca596e530511::UpwardsMemoryQuery383 UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
384 : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {
385 if (!IsCall)
386 StartingLoc = MemoryLocation::get(Inst);
387 }
388 };
389
390 } // end anonymous namespace
391
392 template <typename AliasAnalysisType>
isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType & AA,const Instruction * I)393 static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysisType &AA,
394 const Instruction *I) {
395 // If the memory can't be changed, then loads of the memory can't be
396 // clobbered.
397 if (auto *LI = dyn_cast<LoadInst>(I))
398 return I->hasMetadata(LLVMContext::MD_invariant_load) ||
399 AA.pointsToConstantMemory(MemoryLocation::get(LI));
400 return false;
401 }
402
403 /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
404 /// inbetween `Start` and `ClobberAt` can clobbers `Start`.
405 ///
406 /// This is meant to be as simple and self-contained as possible. Because it
407 /// uses no cache, etc., it can be relatively expensive.
408 ///
409 /// \param Start The MemoryAccess that we want to walk from.
410 /// \param ClobberAt A clobber for Start.
411 /// \param StartLoc The MemoryLocation for Start.
412 /// \param MSSA The MemorySSA instance that Start and ClobberAt belong to.
413 /// \param Query The UpwardsMemoryQuery we used for our search.
414 /// \param AA The AliasAnalysis we used for our search.
415 /// \param AllowImpreciseClobber Always false, unless we do relaxed verify.
416
417 template <typename AliasAnalysisType>
418 LLVM_ATTRIBUTE_UNUSED static void
checkClobberSanity(const MemoryAccess * Start,MemoryAccess * ClobberAt,const MemoryLocation & StartLoc,const MemorySSA & MSSA,const UpwardsMemoryQuery & Query,AliasAnalysisType & AA,bool AllowImpreciseClobber=false)419 checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt,
420 const MemoryLocation &StartLoc, const MemorySSA &MSSA,
421 const UpwardsMemoryQuery &Query, AliasAnalysisType &AA,
422 bool AllowImpreciseClobber = false) {
423 assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
424
425 if (MSSA.isLiveOnEntryDef(Start)) {
426 assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
427 "liveOnEntry must clobber itself");
428 return;
429 }
430
431 bool FoundClobber = false;
432 DenseSet<ConstMemoryAccessPair> VisitedPhis;
433 SmallVector<ConstMemoryAccessPair, 8> Worklist;
434 Worklist.emplace_back(Start, StartLoc);
435 // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
436 // is found, complain.
437 while (!Worklist.empty()) {
438 auto MAP = Worklist.pop_back_val();
439 // All we care about is that nothing from Start to ClobberAt clobbers Start.
440 // We learn nothing from revisiting nodes.
441 if (!VisitedPhis.insert(MAP).second)
442 continue;
443
444 for (const auto *MA : def_chain(MAP.first)) {
445 if (MA == ClobberAt) {
446 if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
447 // instructionClobbersQuery isn't essentially free, so don't use `|=`,
448 // since it won't let us short-circuit.
449 //
450 // Also, note that this can't be hoisted out of the `Worklist` loop,
451 // since MD may only act as a clobber for 1 of N MemoryLocations.
452 FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD);
453 if (!FoundClobber) {
454 ClobberAlias CA =
455 instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
456 if (CA.IsClobber) {
457 FoundClobber = true;
458 // Not used: CA.AR;
459 }
460 }
461 }
462 break;
463 }
464
465 // We should never hit liveOnEntry, unless it's the clobber.
466 assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
467
468 if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
469 // If Start is a Def, skip self.
470 if (MD == Start)
471 continue;
472
473 assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)
474 .IsClobber &&
475 "Found clobber before reaching ClobberAt!");
476 continue;
477 }
478
479 if (const auto *MU = dyn_cast<MemoryUse>(MA)) {
480 (void)MU;
481 assert (MU == Start &&
482 "Can only find use in def chain if Start is a use");
483 continue;
484 }
485
486 assert(isa<MemoryPhi>(MA));
487
488 // Add reachable phi predecessors
489 for (auto ItB = upward_defs_begin(
490 {const_cast<MemoryAccess *>(MA), MAP.second},
491 MSSA.getDomTree()),
492 ItE = upward_defs_end();
493 ItB != ItE; ++ItB)
494 if (MSSA.getDomTree().isReachableFromEntry(ItB.getPhiArgBlock()))
495 Worklist.emplace_back(*ItB);
496 }
497 }
498
499 // If the verify is done following an optimization, it's possible that
500 // ClobberAt was a conservative clobbering, that we can now infer is not a
501 // true clobbering access. Don't fail the verify if that's the case.
502 // We do have accesses that claim they're optimized, but could be optimized
503 // further. Updating all these can be expensive, so allow it for now (FIXME).
504 if (AllowImpreciseClobber)
505 return;
506
507 // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
508 // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
509 assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
510 "ClobberAt never acted as a clobber");
511 }
512
513 namespace {
514
515 /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
516 /// in one class.
517 template <class AliasAnalysisType> class ClobberWalker {
518 /// Save a few bytes by using unsigned instead of size_t.
519 using ListIndex = unsigned;
520
521 /// Represents a span of contiguous MemoryDefs, potentially ending in a
522 /// MemoryPhi.
523 struct DefPath {
524 MemoryLocation Loc;
525 // Note that, because we always walk in reverse, Last will always dominate
526 // First. Also note that First and Last are inclusive.
527 MemoryAccess *First;
528 MemoryAccess *Last;
529 Optional<ListIndex> Previous;
530
DefPath__anonca596e530611::ClobberWalker::DefPath531 DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
532 Optional<ListIndex> Previous)
533 : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
534
DefPath__anonca596e530611::ClobberWalker::DefPath535 DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
536 Optional<ListIndex> Previous)
537 : DefPath(Loc, Init, Init, Previous) {}
538 };
539
540 const MemorySSA &MSSA;
541 AliasAnalysisType &AA;
542 DominatorTree &DT;
543 UpwardsMemoryQuery *Query;
544 unsigned *UpwardWalkLimit;
545
546 // Phi optimization bookkeeping:
547 // List of DefPath to process during the current phi optimization walk.
548 SmallVector<DefPath, 32> Paths;
549 // List of visited <Access, Location> pairs; we can skip paths already
550 // visited with the same memory location.
551 DenseSet<ConstMemoryAccessPair> VisitedPhis;
552 // Record if phi translation has been performed during the current phi
553 // optimization walk, as merging alias results after phi translation can
554 // yield incorrect results. Context in PR46156.
555 bool PerformedPhiTranslation = false;
556
557 /// Find the nearest def or phi that `From` can legally be optimized to.
getWalkTarget(const MemoryPhi * From) const558 const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
559 assert(From->getNumOperands() && "Phi with no operands?");
560
561 BasicBlock *BB = From->getBlock();
562 MemoryAccess *Result = MSSA.getLiveOnEntryDef();
563 DomTreeNode *Node = DT.getNode(BB);
564 while ((Node = Node->getIDom())) {
565 auto *Defs = MSSA.getBlockDefs(Node->getBlock());
566 if (Defs)
567 return &*Defs->rbegin();
568 }
569 return Result;
570 }
571
572 /// Result of calling walkToPhiOrClobber.
573 struct UpwardsWalkResult {
574 /// The "Result" of the walk. Either a clobber, the last thing we walked, or
575 /// both. Include alias info when clobber found.
576 MemoryAccess *Result;
577 bool IsKnownClobber;
578 Optional<AliasResult> AR;
579 };
580
581 /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
582 /// This will update Desc.Last as it walks. It will (optionally) also stop at
583 /// StopAt.
584 ///
585 /// This does not test for whether StopAt is a clobber
586 UpwardsWalkResult
walkToPhiOrClobber(DefPath & Desc,const MemoryAccess * StopAt=nullptr,const MemoryAccess * SkipStopAt=nullptr) const587 walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr,
588 const MemoryAccess *SkipStopAt = nullptr) const {
589 assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
590 assert(UpwardWalkLimit && "Need a valid walk limit");
591 bool LimitAlreadyReached = false;
592 // (*UpwardWalkLimit) may be 0 here, due to the loop in tryOptimizePhi. Set
593 // it to 1. This will not do any alias() calls. It either returns in the
594 // first iteration in the loop below, or is set back to 0 if all def chains
595 // are free of MemoryDefs.
596 if (!*UpwardWalkLimit) {
597 *UpwardWalkLimit = 1;
598 LimitAlreadyReached = true;
599 }
600
601 for (MemoryAccess *Current : def_chain(Desc.Last)) {
602 Desc.Last = Current;
603 if (Current == StopAt || Current == SkipStopAt)
604 return {Current, false, AliasResult(AliasResult::MayAlias)};
605
606 if (auto *MD = dyn_cast<MemoryDef>(Current)) {
607 if (MSSA.isLiveOnEntryDef(MD))
608 return {MD, true, AliasResult(AliasResult::MustAlias)};
609
610 if (!--*UpwardWalkLimit)
611 return {Current, true, AliasResult(AliasResult::MayAlias)};
612
613 ClobberAlias CA =
614 instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA);
615 if (CA.IsClobber)
616 return {MD, true, CA.AR};
617 }
618 }
619
620 if (LimitAlreadyReached)
621 *UpwardWalkLimit = 0;
622
623 assert(isa<MemoryPhi>(Desc.Last) &&
624 "Ended at a non-clobber that's not a phi?");
625 return {Desc.Last, false, AliasResult(AliasResult::MayAlias)};
626 }
627
addSearches(MemoryPhi * Phi,SmallVectorImpl<ListIndex> & PausedSearches,ListIndex PriorNode)628 void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
629 ListIndex PriorNode) {
630 auto UpwardDefsBegin = upward_defs_begin({Phi, Paths[PriorNode].Loc}, DT,
631 &PerformedPhiTranslation);
632 auto UpwardDefs = make_range(UpwardDefsBegin, upward_defs_end());
633 for (const MemoryAccessPair &P : UpwardDefs) {
634 PausedSearches.push_back(Paths.size());
635 Paths.emplace_back(P.second, P.first, PriorNode);
636 }
637 }
638
639 /// Represents a search that terminated after finding a clobber. This clobber
640 /// may or may not be present in the path of defs from LastNode..SearchStart,
641 /// since it may have been retrieved from cache.
642 struct TerminatedPath {
643 MemoryAccess *Clobber;
644 ListIndex LastNode;
645 };
646
647 /// Get an access that keeps us from optimizing to the given phi.
648 ///
649 /// PausedSearches is an array of indices into the Paths array. Its incoming
650 /// value is the indices of searches that stopped at the last phi optimization
651 /// target. It's left in an unspecified state.
652 ///
653 /// If this returns None, NewPaused is a vector of searches that terminated
654 /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
655 Optional<TerminatedPath>
getBlockingAccess(const MemoryAccess * StopWhere,SmallVectorImpl<ListIndex> & PausedSearches,SmallVectorImpl<ListIndex> & NewPaused,SmallVectorImpl<TerminatedPath> & Terminated)656 getBlockingAccess(const MemoryAccess *StopWhere,
657 SmallVectorImpl<ListIndex> &PausedSearches,
658 SmallVectorImpl<ListIndex> &NewPaused,
659 SmallVectorImpl<TerminatedPath> &Terminated) {
660 assert(!PausedSearches.empty() && "No searches to continue?");
661
662 // BFS vs DFS really doesn't make a difference here, so just do a DFS with
663 // PausedSearches as our stack.
664 while (!PausedSearches.empty()) {
665 ListIndex PathIndex = PausedSearches.pop_back_val();
666 DefPath &Node = Paths[PathIndex];
667
668 // If we've already visited this path with this MemoryLocation, we don't
669 // need to do so again.
670 //
671 // NOTE: That we just drop these paths on the ground makes caching
672 // behavior sporadic. e.g. given a diamond:
673 // A
674 // B C
675 // D
676 //
677 // ...If we walk D, B, A, C, we'll only cache the result of phi
678 // optimization for A, B, and D; C will be skipped because it dies here.
679 // This arguably isn't the worst thing ever, since:
680 // - We generally query things in a top-down order, so if we got below D
681 // without needing cache entries for {C, MemLoc}, then chances are
682 // that those cache entries would end up ultimately unused.
683 // - We still cache things for A, so C only needs to walk up a bit.
684 // If this behavior becomes problematic, we can fix without a ton of extra
685 // work.
686 if (!VisitedPhis.insert({Node.Last, Node.Loc}).second) {
687 if (PerformedPhiTranslation) {
688 // If visiting this path performed Phi translation, don't continue,
689 // since it may not be correct to merge results from two paths if one
690 // relies on the phi translation.
691 TerminatedPath Term{Node.Last, PathIndex};
692 return Term;
693 }
694 continue;
695 }
696
697 const MemoryAccess *SkipStopWhere = nullptr;
698 if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) {
699 assert(isa<MemoryDef>(Query->OriginalAccess));
700 SkipStopWhere = Query->OriginalAccess;
701 }
702
703 UpwardsWalkResult Res = walkToPhiOrClobber(Node,
704 /*StopAt=*/StopWhere,
705 /*SkipStopAt=*/SkipStopWhere);
706 if (Res.IsKnownClobber) {
707 assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
708
709 // If this wasn't a cache hit, we hit a clobber when walking. That's a
710 // failure.
711 TerminatedPath Term{Res.Result, PathIndex};
712 if (!MSSA.dominates(Res.Result, StopWhere))
713 return Term;
714
715 // Otherwise, it's a valid thing to potentially optimize to.
716 Terminated.push_back(Term);
717 continue;
718 }
719
720 if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
721 // We've hit our target. Save this path off for if we want to continue
722 // walking. If we are in the mode of skipping the OriginalAccess, and
723 // we've reached back to the OriginalAccess, do not save path, we've
724 // just looped back to self.
725 if (Res.Result != SkipStopWhere)
726 NewPaused.push_back(PathIndex);
727 continue;
728 }
729
730 assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
731 addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
732 }
733
734 return None;
735 }
736
737 template <typename T, typename Walker>
738 struct generic_def_path_iterator
739 : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
740 std::forward_iterator_tag, T *> {
741 generic_def_path_iterator() = default;
generic_def_path_iterator__anonca596e530611::ClobberWalker::generic_def_path_iterator742 generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
743
operator *__anonca596e530611::ClobberWalker::generic_def_path_iterator744 T &operator*() const { return curNode(); }
745
operator ++__anonca596e530611::ClobberWalker::generic_def_path_iterator746 generic_def_path_iterator &operator++() {
747 N = curNode().Previous;
748 return *this;
749 }
750
operator ==__anonca596e530611::ClobberWalker::generic_def_path_iterator751 bool operator==(const generic_def_path_iterator &O) const {
752 if (N.has_value() != O.N.has_value())
753 return false;
754 return !N || *N == *O.N;
755 }
756
757 private:
curNode__anonca596e530611::ClobberWalker::generic_def_path_iterator758 T &curNode() const { return W->Paths[*N]; }
759
760 Walker *W = nullptr;
761 Optional<ListIndex> N = None;
762 };
763
764 using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
765 using const_def_path_iterator =
766 generic_def_path_iterator<const DefPath, const ClobberWalker>;
767
def_path(ListIndex From)768 iterator_range<def_path_iterator> def_path(ListIndex From) {
769 return make_range(def_path_iterator(this, From), def_path_iterator());
770 }
771
const_def_path(ListIndex From) const772 iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
773 return make_range(const_def_path_iterator(this, From),
774 const_def_path_iterator());
775 }
776
777 struct OptznResult {
778 /// The path that contains our result.
779 TerminatedPath PrimaryClobber;
780 /// The paths that we can legally cache back from, but that aren't
781 /// necessarily the result of the Phi optimization.
782 SmallVector<TerminatedPath, 4> OtherClobbers;
783 };
784
defPathIndex(const DefPath & N) const785 ListIndex defPathIndex(const DefPath &N) const {
786 // The assert looks nicer if we don't need to do &N
787 const DefPath *NP = &N;
788 assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
789 "Out of bounds DefPath!");
790 return NP - &Paths.front();
791 }
792
793 /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
794 /// that act as legal clobbers. Note that this won't return *all* clobbers.
795 ///
796 /// Phi optimization algorithm tl;dr:
797 /// - Find the earliest def/phi, A, we can optimize to
798 /// - Find if all paths from the starting memory access ultimately reach A
799 /// - If not, optimization isn't possible.
800 /// - Otherwise, walk from A to another clobber or phi, A'.
801 /// - If A' is a def, we're done.
802 /// - If A' is a phi, try to optimize it.
803 ///
804 /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
805 /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
tryOptimizePhi(MemoryPhi * Phi,MemoryAccess * Start,const MemoryLocation & Loc)806 OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
807 const MemoryLocation &Loc) {
808 assert(Paths.empty() && VisitedPhis.empty() && !PerformedPhiTranslation &&
809 "Reset the optimization state.");
810
811 Paths.emplace_back(Loc, Start, Phi, None);
812 // Stores how many "valid" optimization nodes we had prior to calling
813 // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
814 auto PriorPathsSize = Paths.size();
815
816 SmallVector<ListIndex, 16> PausedSearches;
817 SmallVector<ListIndex, 8> NewPaused;
818 SmallVector<TerminatedPath, 4> TerminatedPaths;
819
820 addSearches(Phi, PausedSearches, 0);
821
822 // Moves the TerminatedPath with the "most dominated" Clobber to the end of
823 // Paths.
824 auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
825 assert(!Paths.empty() && "Need a path to move");
826 auto Dom = Paths.begin();
827 for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
828 if (!MSSA.dominates(I->Clobber, Dom->Clobber))
829 Dom = I;
830 auto Last = Paths.end() - 1;
831 if (Last != Dom)
832 std::iter_swap(Last, Dom);
833 };
834
835 MemoryPhi *Current = Phi;
836 while (true) {
837 assert(!MSSA.isLiveOnEntryDef(Current) &&
838 "liveOnEntry wasn't treated as a clobber?");
839
840 const auto *Target = getWalkTarget(Current);
841 // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
842 // optimization for the prior phi.
843 assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
844 return MSSA.dominates(P.Clobber, Target);
845 }));
846
847 // FIXME: This is broken, because the Blocker may be reported to be
848 // liveOnEntry, and we'll happily wait for that to disappear (read: never)
849 // For the moment, this is fine, since we do nothing with blocker info.
850 if (Optional<TerminatedPath> Blocker = getBlockingAccess(
851 Target, PausedSearches, NewPaused, TerminatedPaths)) {
852
853 // Find the node we started at. We can't search based on N->Last, since
854 // we may have gone around a loop with a different MemoryLocation.
855 auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
856 return defPathIndex(N) < PriorPathsSize;
857 });
858 assert(Iter != def_path_iterator());
859
860 DefPath &CurNode = *Iter;
861 assert(CurNode.Last == Current);
862
863 // Two things:
864 // A. We can't reliably cache all of NewPaused back. Consider a case
865 // where we have two paths in NewPaused; one of which can't optimize
866 // above this phi, whereas the other can. If we cache the second path
867 // back, we'll end up with suboptimal cache entries. We can handle
868 // cases like this a bit better when we either try to find all
869 // clobbers that block phi optimization, or when our cache starts
870 // supporting unfinished searches.
871 // B. We can't reliably cache TerminatedPaths back here without doing
872 // extra checks; consider a case like:
873 // T
874 // / \
875 // D C
876 // \ /
877 // S
878 // Where T is our target, C is a node with a clobber on it, D is a
879 // diamond (with a clobber *only* on the left or right node, N), and
880 // S is our start. Say we walk to D, through the node opposite N
881 // (read: ignoring the clobber), and see a cache entry in the top
882 // node of D. That cache entry gets put into TerminatedPaths. We then
883 // walk up to C (N is later in our worklist), find the clobber, and
884 // quit. If we append TerminatedPaths to OtherClobbers, we'll cache
885 // the bottom part of D to the cached clobber, ignoring the clobber
886 // in N. Again, this problem goes away if we start tracking all
887 // blockers for a given phi optimization.
888 TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
889 return {Result, {}};
890 }
891
892 // If there's nothing left to search, then all paths led to valid clobbers
893 // that we got from our cache; pick the nearest to the start, and allow
894 // the rest to be cached back.
895 if (NewPaused.empty()) {
896 MoveDominatedPathToEnd(TerminatedPaths);
897 TerminatedPath Result = TerminatedPaths.pop_back_val();
898 return {Result, std::move(TerminatedPaths)};
899 }
900
901 MemoryAccess *DefChainEnd = nullptr;
902 SmallVector<TerminatedPath, 4> Clobbers;
903 for (ListIndex Paused : NewPaused) {
904 UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
905 if (WR.IsKnownClobber)
906 Clobbers.push_back({WR.Result, Paused});
907 else
908 // Micro-opt: If we hit the end of the chain, save it.
909 DefChainEnd = WR.Result;
910 }
911
912 if (!TerminatedPaths.empty()) {
913 // If we couldn't find the dominating phi/liveOnEntry in the above loop,
914 // do it now.
915 if (!DefChainEnd)
916 for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
917 DefChainEnd = MA;
918 assert(DefChainEnd && "Failed to find dominating phi/liveOnEntry");
919
920 // If any of the terminated paths don't dominate the phi we'll try to
921 // optimize, we need to figure out what they are and quit.
922 const BasicBlock *ChainBB = DefChainEnd->getBlock();
923 for (const TerminatedPath &TP : TerminatedPaths) {
924 // Because we know that DefChainEnd is as "high" as we can go, we
925 // don't need local dominance checks; BB dominance is sufficient.
926 if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
927 Clobbers.push_back(TP);
928 }
929 }
930
931 // If we have clobbers in the def chain, find the one closest to Current
932 // and quit.
933 if (!Clobbers.empty()) {
934 MoveDominatedPathToEnd(Clobbers);
935 TerminatedPath Result = Clobbers.pop_back_val();
936 return {Result, std::move(Clobbers)};
937 }
938
939 assert(all_of(NewPaused,
940 [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
941
942 // Because liveOnEntry is a clobber, this must be a phi.
943 auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
944
945 PriorPathsSize = Paths.size();
946 PausedSearches.clear();
947 for (ListIndex I : NewPaused)
948 addSearches(DefChainPhi, PausedSearches, I);
949 NewPaused.clear();
950
951 Current = DefChainPhi;
952 }
953 }
954
verifyOptResult(const OptznResult & R) const955 void verifyOptResult(const OptznResult &R) const {
956 assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
957 return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
958 }));
959 }
960
resetPhiOptznState()961 void resetPhiOptznState() {
962 Paths.clear();
963 VisitedPhis.clear();
964 PerformedPhiTranslation = false;
965 }
966
967 public:
ClobberWalker(const MemorySSA & MSSA,AliasAnalysisType & AA,DominatorTree & DT)968 ClobberWalker(const MemorySSA &MSSA, AliasAnalysisType &AA, DominatorTree &DT)
969 : MSSA(MSSA), AA(AA), DT(DT) {}
970
getAA()971 AliasAnalysisType *getAA() { return &AA; }
972 /// Finds the nearest clobber for the given query, optimizing phis if
973 /// possible.
findClobber(MemoryAccess * Start,UpwardsMemoryQuery & Q,unsigned & UpWalkLimit)974 MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q,
975 unsigned &UpWalkLimit) {
976 Query = &Q;
977 UpwardWalkLimit = &UpWalkLimit;
978 // Starting limit must be > 0.
979 if (!UpWalkLimit)
980 UpWalkLimit++;
981
982 MemoryAccess *Current = Start;
983 // This walker pretends uses don't exist. If we're handed one, silently grab
984 // its def. (This has the nice side-effect of ensuring we never cache uses)
985 if (auto *MU = dyn_cast<MemoryUse>(Start))
986 Current = MU->getDefiningAccess();
987
988 DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
989 // Fast path for the overly-common case (no crazy phi optimization
990 // necessary)
991 UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
992 MemoryAccess *Result;
993 if (WalkResult.IsKnownClobber) {
994 Result = WalkResult.Result;
995 Q.AR = WalkResult.AR;
996 } else {
997 OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
998 Current, Q.StartingLoc);
999 verifyOptResult(OptRes);
1000 resetPhiOptznState();
1001 Result = OptRes.PrimaryClobber.Clobber;
1002 }
1003
1004 #ifdef EXPENSIVE_CHECKS
1005 if (!Q.SkipSelfAccess && *UpwardWalkLimit > 0)
1006 checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
1007 #endif
1008 return Result;
1009 }
1010 };
1011
1012 struct RenamePassData {
1013 DomTreeNode *DTN;
1014 DomTreeNode::const_iterator ChildIt;
1015 MemoryAccess *IncomingVal;
1016
RenamePassData__anonca596e530611::RenamePassData1017 RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
1018 MemoryAccess *M)
1019 : DTN(D), ChildIt(It), IncomingVal(M) {}
1020
swap__anonca596e530611::RenamePassData1021 void swap(RenamePassData &RHS) {
1022 std::swap(DTN, RHS.DTN);
1023 std::swap(ChildIt, RHS.ChildIt);
1024 std::swap(IncomingVal, RHS.IncomingVal);
1025 }
1026 };
1027
1028 } // end anonymous namespace
1029
1030 namespace llvm {
1031
1032 template <class AliasAnalysisType> class MemorySSA::ClobberWalkerBase {
1033 ClobberWalker<AliasAnalysisType> Walker;
1034 MemorySSA *MSSA;
1035
1036 public:
ClobberWalkerBase(MemorySSA * M,AliasAnalysisType * A,DominatorTree * D)1037 ClobberWalkerBase(MemorySSA *M, AliasAnalysisType *A, DominatorTree *D)
1038 : Walker(*M, *A, *D), MSSA(M) {}
1039
1040 MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
1041 const MemoryLocation &,
1042 unsigned &);
1043 // Third argument (bool), defines whether the clobber search should skip the
1044 // original queried access. If true, there will be a follow-up query searching
1045 // for a clobber access past "self". Note that the Optimized access is not
1046 // updated if a new clobber is found by this SkipSelf search. If this
1047 // additional query becomes heavily used we may decide to cache the result.
1048 // Walker instantiations will decide how to set the SkipSelf bool.
1049 MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, unsigned &, bool,
1050 bool UseInvariantGroup = true);
1051 };
1052
1053 /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
1054 /// longer does caching on its own, but the name has been retained for the
1055 /// moment.
1056 template <class AliasAnalysisType>
1057 class MemorySSA::CachingWalker final : public MemorySSAWalker {
1058 ClobberWalkerBase<AliasAnalysisType> *Walker;
1059
1060 public:
CachingWalker(MemorySSA * M,ClobberWalkerBase<AliasAnalysisType> * W)1061 CachingWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
1062 : MemorySSAWalker(M), Walker(W) {}
1063 ~CachingWalker() override = default;
1064
1065 using MemorySSAWalker::getClobberingMemoryAccess;
1066
getClobberingMemoryAccess(MemoryAccess * MA,unsigned & UWL)1067 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1068 return Walker->getClobberingMemoryAccessBase(MA, UWL, false);
1069 }
getClobberingMemoryAccess(MemoryAccess * MA,const MemoryLocation & Loc,unsigned & UWL)1070 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1071 const MemoryLocation &Loc,
1072 unsigned &UWL) {
1073 return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1074 }
1075 // This method is not accessible outside of this file.
getClobberingMemoryAccessWithoutInvariantGroup(MemoryAccess * MA,unsigned & UWL)1076 MemoryAccess *getClobberingMemoryAccessWithoutInvariantGroup(MemoryAccess *MA,
1077 unsigned &UWL) {
1078 return Walker->getClobberingMemoryAccessBase(MA, UWL, false, false);
1079 }
1080
getClobberingMemoryAccess(MemoryAccess * MA)1081 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
1082 unsigned UpwardWalkLimit = MaxCheckLimit;
1083 return getClobberingMemoryAccess(MA, UpwardWalkLimit);
1084 }
getClobberingMemoryAccess(MemoryAccess * MA,const MemoryLocation & Loc)1085 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1086 const MemoryLocation &Loc) override {
1087 unsigned UpwardWalkLimit = MaxCheckLimit;
1088 return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
1089 }
1090
invalidateInfo(MemoryAccess * MA)1091 void invalidateInfo(MemoryAccess *MA) override {
1092 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1093 MUD->resetOptimized();
1094 }
1095 };
1096
1097 template <class AliasAnalysisType>
1098 class MemorySSA::SkipSelfWalker final : public MemorySSAWalker {
1099 ClobberWalkerBase<AliasAnalysisType> *Walker;
1100
1101 public:
SkipSelfWalker(MemorySSA * M,ClobberWalkerBase<AliasAnalysisType> * W)1102 SkipSelfWalker(MemorySSA *M, ClobberWalkerBase<AliasAnalysisType> *W)
1103 : MemorySSAWalker(M), Walker(W) {}
1104 ~SkipSelfWalker() override = default;
1105
1106 using MemorySSAWalker::getClobberingMemoryAccess;
1107
getClobberingMemoryAccess(MemoryAccess * MA,unsigned & UWL)1108 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA, unsigned &UWL) {
1109 return Walker->getClobberingMemoryAccessBase(MA, UWL, true);
1110 }
getClobberingMemoryAccess(MemoryAccess * MA,const MemoryLocation & Loc,unsigned & UWL)1111 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1112 const MemoryLocation &Loc,
1113 unsigned &UWL) {
1114 return Walker->getClobberingMemoryAccessBase(MA, Loc, UWL);
1115 }
1116
getClobberingMemoryAccess(MemoryAccess * MA)1117 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override {
1118 unsigned UpwardWalkLimit = MaxCheckLimit;
1119 return getClobberingMemoryAccess(MA, UpwardWalkLimit);
1120 }
getClobberingMemoryAccess(MemoryAccess * MA,const MemoryLocation & Loc)1121 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1122 const MemoryLocation &Loc) override {
1123 unsigned UpwardWalkLimit = MaxCheckLimit;
1124 return getClobberingMemoryAccess(MA, Loc, UpwardWalkLimit);
1125 }
1126
invalidateInfo(MemoryAccess * MA)1127 void invalidateInfo(MemoryAccess *MA) override {
1128 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1129 MUD->resetOptimized();
1130 }
1131 };
1132
1133 } // end namespace llvm
1134
renameSuccessorPhis(BasicBlock * BB,MemoryAccess * IncomingVal,bool RenameAllUses)1135 void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
1136 bool RenameAllUses) {
1137 // Pass through values to our successors
1138 for (const BasicBlock *S : successors(BB)) {
1139 auto It = PerBlockAccesses.find(S);
1140 // Rename the phi nodes in our successor block
1141 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1142 continue;
1143 AccessList *Accesses = It->second.get();
1144 auto *Phi = cast<MemoryPhi>(&Accesses->front());
1145 if (RenameAllUses) {
1146 bool ReplacementDone = false;
1147 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I)
1148 if (Phi->getIncomingBlock(I) == BB) {
1149 Phi->setIncomingValue(I, IncomingVal);
1150 ReplacementDone = true;
1151 }
1152 (void) ReplacementDone;
1153 assert(ReplacementDone && "Incomplete phi during partial rename");
1154 } else
1155 Phi->addIncoming(IncomingVal, BB);
1156 }
1157 }
1158
1159 /// Rename a single basic block into MemorySSA form.
1160 /// Uses the standard SSA renaming algorithm.
1161 /// \returns The new incoming value.
renameBlock(BasicBlock * BB,MemoryAccess * IncomingVal,bool RenameAllUses)1162 MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
1163 bool RenameAllUses) {
1164 auto It = PerBlockAccesses.find(BB);
1165 // Skip most processing if the list is empty.
1166 if (It != PerBlockAccesses.end()) {
1167 AccessList *Accesses = It->second.get();
1168 for (MemoryAccess &L : *Accesses) {
1169 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
1170 if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
1171 MUD->setDefiningAccess(IncomingVal);
1172 if (isa<MemoryDef>(&L))
1173 IncomingVal = &L;
1174 } else {
1175 IncomingVal = &L;
1176 }
1177 }
1178 }
1179 return IncomingVal;
1180 }
1181
1182 /// This is the standard SSA renaming algorithm.
1183 ///
1184 /// We walk the dominator tree in preorder, renaming accesses, and then filling
1185 /// in phi nodes in our successors.
renamePass(DomTreeNode * Root,MemoryAccess * IncomingVal,SmallPtrSetImpl<BasicBlock * > & Visited,bool SkipVisited,bool RenameAllUses)1186 void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
1187 SmallPtrSetImpl<BasicBlock *> &Visited,
1188 bool SkipVisited, bool RenameAllUses) {
1189 assert(Root && "Trying to rename accesses in an unreachable block");
1190
1191 SmallVector<RenamePassData, 32> WorkStack;
1192 // Skip everything if we already renamed this block and we are skipping.
1193 // Note: You can't sink this into the if, because we need it to occur
1194 // regardless of whether we skip blocks or not.
1195 bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
1196 if (SkipVisited && AlreadyVisited)
1197 return;
1198
1199 IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
1200 renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
1201 WorkStack.push_back({Root, Root->begin(), IncomingVal});
1202
1203 while (!WorkStack.empty()) {
1204 DomTreeNode *Node = WorkStack.back().DTN;
1205 DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
1206 IncomingVal = WorkStack.back().IncomingVal;
1207
1208 if (ChildIt == Node->end()) {
1209 WorkStack.pop_back();
1210 } else {
1211 DomTreeNode *Child = *ChildIt;
1212 ++WorkStack.back().ChildIt;
1213 BasicBlock *BB = Child->getBlock();
1214 // Note: You can't sink this into the if, because we need it to occur
1215 // regardless of whether we skip blocks or not.
1216 AlreadyVisited = !Visited.insert(BB).second;
1217 if (SkipVisited && AlreadyVisited) {
1218 // We already visited this during our renaming, which can happen when
1219 // being asked to rename multiple blocks. Figure out the incoming val,
1220 // which is the last def.
1221 // Incoming value can only change if there is a block def, and in that
1222 // case, it's the last block def in the list.
1223 if (auto *BlockDefs = getWritableBlockDefs(BB))
1224 IncomingVal = &*BlockDefs->rbegin();
1225 } else
1226 IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1227 renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1228 WorkStack.push_back({Child, Child->begin(), IncomingVal});
1229 }
1230 }
1231 }
1232
1233 /// This handles unreachable block accesses by deleting phi nodes in
1234 /// unreachable blocks, and marking all other unreachable MemoryAccess's as
1235 /// being uses of the live on entry definition.
markUnreachableAsLiveOnEntry(BasicBlock * BB)1236 void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
1237 assert(!DT->isReachableFromEntry(BB) &&
1238 "Reachable block found while handling unreachable blocks");
1239
1240 // Make sure phi nodes in our reachable successors end up with a
1241 // LiveOnEntryDef for our incoming edge, even though our block is forward
1242 // unreachable. We could just disconnect these blocks from the CFG fully,
1243 // but we do not right now.
1244 for (const BasicBlock *S : successors(BB)) {
1245 if (!DT->isReachableFromEntry(S))
1246 continue;
1247 auto It = PerBlockAccesses.find(S);
1248 // Rename the phi nodes in our successor block
1249 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1250 continue;
1251 AccessList *Accesses = It->second.get();
1252 auto *Phi = cast<MemoryPhi>(&Accesses->front());
1253 Phi->addIncoming(LiveOnEntryDef.get(), BB);
1254 }
1255
1256 auto It = PerBlockAccesses.find(BB);
1257 if (It == PerBlockAccesses.end())
1258 return;
1259
1260 auto &Accesses = It->second;
1261 for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1262 auto Next = std::next(AI);
1263 // If we have a phi, just remove it. We are going to replace all
1264 // users with live on entry.
1265 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1266 UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1267 else
1268 Accesses->erase(AI);
1269 AI = Next;
1270 }
1271 }
1272
MemorySSA(Function & Func,AliasAnalysis * AA,DominatorTree * DT)1273 MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
1274 : DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1275 SkipWalker(nullptr) {
1276 // Build MemorySSA using a batch alias analysis. This reuses the internal
1277 // state that AA collects during an alias()/getModRefInfo() call. This is
1278 // safe because there are no CFG changes while building MemorySSA and can
1279 // significantly reduce the time spent by the compiler in AA, because we will
1280 // make queries about all the instructions in the Function.
1281 assert(AA && "No alias analysis?");
1282 BatchAAResults BatchAA(*AA);
1283 buildMemorySSA(BatchAA);
1284 // Intentionally leave AA to nullptr while building so we don't accidently
1285 // use non-batch AliasAnalysis.
1286 this->AA = AA;
1287 // Also create the walker here.
1288 getWalker();
1289 }
1290
~MemorySSA()1291 MemorySSA::~MemorySSA() {
1292 // Drop all our references
1293 for (const auto &Pair : PerBlockAccesses)
1294 for (MemoryAccess &MA : *Pair.second)
1295 MA.dropAllReferences();
1296 }
1297
getOrCreateAccessList(const BasicBlock * BB)1298 MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
1299 auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
1300
1301 if (Res.second)
1302 Res.first->second = std::make_unique<AccessList>();
1303 return Res.first->second.get();
1304 }
1305
getOrCreateDefsList(const BasicBlock * BB)1306 MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1307 auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
1308
1309 if (Res.second)
1310 Res.first->second = std::make_unique<DefsList>();
1311 return Res.first->second.get();
1312 }
1313
1314 namespace llvm {
1315
1316 /// This class is a batch walker of all MemoryUse's in the program, and points
1317 /// their defining access at the thing that actually clobbers them. Because it
1318 /// is a batch walker that touches everything, it does not operate like the
1319 /// other walkers. This walker is basically performing a top-down SSA renaming
1320 /// pass, where the version stack is used as the cache. This enables it to be
1321 /// significantly more time and memory efficient than using the regular walker,
1322 /// which is walking bottom-up.
1323 class MemorySSA::OptimizeUses {
1324 public:
OptimizeUses(MemorySSA * MSSA,CachingWalker<BatchAAResults> * Walker,BatchAAResults * BAA,DominatorTree * DT)1325 OptimizeUses(MemorySSA *MSSA, CachingWalker<BatchAAResults> *Walker,
1326 BatchAAResults *BAA, DominatorTree *DT)
1327 : MSSA(MSSA), Walker(Walker), AA(BAA), DT(DT) {}
1328
1329 void optimizeUses();
1330
1331 private:
1332 /// This represents where a given memorylocation is in the stack.
1333 struct MemlocStackInfo {
1334 // This essentially is keeping track of versions of the stack. Whenever
1335 // the stack changes due to pushes or pops, these versions increase.
1336 unsigned long StackEpoch;
1337 unsigned long PopEpoch;
1338 // This is the lower bound of places on the stack to check. It is equal to
1339 // the place the last stack walk ended.
1340 // Note: Correctness depends on this being initialized to 0, which densemap
1341 // does
1342 unsigned long LowerBound;
1343 const BasicBlock *LowerBoundBlock;
1344 // This is where the last walk for this memory location ended.
1345 unsigned long LastKill;
1346 bool LastKillValid;
1347 Optional<AliasResult> AR;
1348 };
1349
1350 void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1351 SmallVectorImpl<MemoryAccess *> &,
1352 DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
1353
1354 MemorySSA *MSSA;
1355 CachingWalker<BatchAAResults> *Walker;
1356 BatchAAResults *AA;
1357 DominatorTree *DT;
1358 };
1359
1360 } // end namespace llvm
1361
1362 /// Optimize the uses in a given block This is basically the SSA renaming
1363 /// algorithm, with one caveat: We are able to use a single stack for all
1364 /// MemoryUses. This is because the set of *possible* reaching MemoryDefs is
1365 /// the same for every MemoryUse. The *actual* clobbering MemoryDef is just
1366 /// going to be some position in that stack of possible ones.
1367 ///
1368 /// We track the stack positions that each MemoryLocation needs
1369 /// to check, and last ended at. This is because we only want to check the
1370 /// things that changed since last time. The same MemoryLocation should
1371 /// get clobbered by the same store (getModRefInfo does not use invariantness or
1372 /// things like this, and if they start, we can modify MemoryLocOrCall to
1373 /// include relevant data)
optimizeUsesInBlock(const BasicBlock * BB,unsigned long & StackEpoch,unsigned long & PopEpoch,SmallVectorImpl<MemoryAccess * > & VersionStack,DenseMap<MemoryLocOrCall,MemlocStackInfo> & LocStackInfo)1374 void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1375 const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1376 SmallVectorImpl<MemoryAccess *> &VersionStack,
1377 DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) {
1378
1379 /// If no accesses, nothing to do.
1380 MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1381 if (Accesses == nullptr)
1382 return;
1383
1384 // Pop everything that doesn't dominate the current block off the stack,
1385 // increment the PopEpoch to account for this.
1386 while (true) {
1387 assert(
1388 !VersionStack.empty() &&
1389 "Version stack should have liveOnEntry sentinel dominating everything");
1390 BasicBlock *BackBlock = VersionStack.back()->getBlock();
1391 if (DT->dominates(BackBlock, BB))
1392 break;
1393 while (VersionStack.back()->getBlock() == BackBlock)
1394 VersionStack.pop_back();
1395 ++PopEpoch;
1396 }
1397
1398 for (MemoryAccess &MA : *Accesses) {
1399 auto *MU = dyn_cast<MemoryUse>(&MA);
1400 if (!MU) {
1401 VersionStack.push_back(&MA);
1402 ++StackEpoch;
1403 continue;
1404 }
1405
1406 if (MU->isOptimized())
1407 continue;
1408
1409 if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
1410 MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None);
1411 continue;
1412 }
1413
1414 MemoryLocOrCall UseMLOC(MU);
1415 auto &LocInfo = LocStackInfo[UseMLOC];
1416 // If the pop epoch changed, it means we've removed stuff from top of
1417 // stack due to changing blocks. We may have to reset the lower bound or
1418 // last kill info.
1419 if (LocInfo.PopEpoch != PopEpoch) {
1420 LocInfo.PopEpoch = PopEpoch;
1421 LocInfo.StackEpoch = StackEpoch;
1422 // If the lower bound was in something that no longer dominates us, we
1423 // have to reset it.
1424 // We can't simply track stack size, because the stack may have had
1425 // pushes/pops in the meantime.
1426 // XXX: This is non-optimal, but only is slower cases with heavily
1427 // branching dominator trees. To get the optimal number of queries would
1428 // be to make lowerbound and lastkill a per-loc stack, and pop it until
1429 // the top of that stack dominates us. This does not seem worth it ATM.
1430 // A much cheaper optimization would be to always explore the deepest
1431 // branch of the dominator tree first. This will guarantee this resets on
1432 // the smallest set of blocks.
1433 if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1434 !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1435 // Reset the lower bound of things to check.
1436 // TODO: Some day we should be able to reset to last kill, rather than
1437 // 0.
1438 LocInfo.LowerBound = 0;
1439 LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1440 LocInfo.LastKillValid = false;
1441 }
1442 } else if (LocInfo.StackEpoch != StackEpoch) {
1443 // If all that has changed is the StackEpoch, we only have to check the
1444 // new things on the stack, because we've checked everything before. In
1445 // this case, the lower bound of things to check remains the same.
1446 LocInfo.PopEpoch = PopEpoch;
1447 LocInfo.StackEpoch = StackEpoch;
1448 }
1449 if (!LocInfo.LastKillValid) {
1450 LocInfo.LastKill = VersionStack.size() - 1;
1451 LocInfo.LastKillValid = true;
1452 LocInfo.AR = AliasResult::MayAlias;
1453 }
1454
1455 // At this point, we should have corrected last kill and LowerBound to be
1456 // in bounds.
1457 assert(LocInfo.LowerBound < VersionStack.size() &&
1458 "Lower bound out of range");
1459 assert(LocInfo.LastKill < VersionStack.size() &&
1460 "Last kill info out of range");
1461 // In any case, the new upper bound is the top of the stack.
1462 unsigned long UpperBound = VersionStack.size() - 1;
1463
1464 if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
1465 LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
1466 << *(MU->getMemoryInst()) << ")"
1467 << " because there are "
1468 << UpperBound - LocInfo.LowerBound
1469 << " stores to disambiguate\n");
1470 // Because we did not walk, LastKill is no longer valid, as this may
1471 // have been a kill.
1472 LocInfo.LastKillValid = false;
1473 continue;
1474 }
1475 bool FoundClobberResult = false;
1476 unsigned UpwardWalkLimit = MaxCheckLimit;
1477 while (UpperBound > LocInfo.LowerBound) {
1478 if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1479 // For phis, use the walker, see where we ended up, go there.
1480 // The invariant.group handling in MemorySSA is ad-hoc and doesn't
1481 // support updates, so don't use it to optimize uses.
1482 MemoryAccess *Result =
1483 Walker->getClobberingMemoryAccessWithoutInvariantGroup(
1484 MU, UpwardWalkLimit);
1485 // We are guaranteed to find it or something is wrong.
1486 while (VersionStack[UpperBound] != Result) {
1487 assert(UpperBound != 0);
1488 --UpperBound;
1489 }
1490 FoundClobberResult = true;
1491 break;
1492 }
1493
1494 MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1495 ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA);
1496 if (CA.IsClobber) {
1497 FoundClobberResult = true;
1498 LocInfo.AR = CA.AR;
1499 break;
1500 }
1501 --UpperBound;
1502 }
1503
1504 // Note: Phis always have AliasResult AR set to MayAlias ATM.
1505
1506 // At the end of this loop, UpperBound is either a clobber, or lower bound
1507 // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1508 if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1509 // We were last killed now by where we got to
1510 if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound]))
1511 LocInfo.AR = None;
1512 MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR);
1513 LocInfo.LastKill = UpperBound;
1514 } else {
1515 // Otherwise, we checked all the new ones, and now we know we can get to
1516 // LastKill.
1517 MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR);
1518 }
1519 LocInfo.LowerBound = VersionStack.size() - 1;
1520 LocInfo.LowerBoundBlock = BB;
1521 }
1522 }
1523
1524 /// Optimize uses to point to their actual clobbering definitions.
optimizeUses()1525 void MemorySSA::OptimizeUses::optimizeUses() {
1526 SmallVector<MemoryAccess *, 16> VersionStack;
1527 DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo;
1528 VersionStack.push_back(MSSA->getLiveOnEntryDef());
1529
1530 unsigned long StackEpoch = 1;
1531 unsigned long PopEpoch = 1;
1532 // We perform a non-recursive top-down dominator tree walk.
1533 for (const auto *DomNode : depth_first(DT->getRootNode()))
1534 optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1535 LocStackInfo);
1536 }
1537
placePHINodes(const SmallPtrSetImpl<BasicBlock * > & DefiningBlocks)1538 void MemorySSA::placePHINodes(
1539 const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) {
1540 // Determine where our MemoryPhi's should go
1541 ForwardIDFCalculator IDFs(*DT);
1542 IDFs.setDefiningBlocks(DefiningBlocks);
1543 SmallVector<BasicBlock *, 32> IDFBlocks;
1544 IDFs.calculate(IDFBlocks);
1545
1546 // Now place MemoryPhi nodes.
1547 for (auto &BB : IDFBlocks)
1548 createMemoryPhi(BB);
1549 }
1550
buildMemorySSA(BatchAAResults & BAA)1551 void MemorySSA::buildMemorySSA(BatchAAResults &BAA) {
1552 // We create an access to represent "live on entry", for things like
1553 // arguments or users of globals, where the memory they use is defined before
1554 // the beginning of the function. We do not actually insert it into the IR.
1555 // We do not define a live on exit for the immediate uses, and thus our
1556 // semantics do *not* imply that something with no immediate uses can simply
1557 // be removed.
1558 BasicBlock &StartingPoint = F.getEntryBlock();
1559 LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr,
1560 &StartingPoint, NextID++));
1561
1562 // We maintain lists of memory accesses per-block, trading memory for time. We
1563 // could just look up the memory access for every possible instruction in the
1564 // stream.
1565 SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1566 // Go through each block, figure out where defs occur, and chain together all
1567 // the accesses.
1568 for (BasicBlock &B : F) {
1569 bool InsertIntoDef = false;
1570 AccessList *Accesses = nullptr;
1571 DefsList *Defs = nullptr;
1572 for (Instruction &I : B) {
1573 MemoryUseOrDef *MUD = createNewAccess(&I, &BAA);
1574 if (!MUD)
1575 continue;
1576
1577 if (!Accesses)
1578 Accesses = getOrCreateAccessList(&B);
1579 Accesses->push_back(MUD);
1580 if (isa<MemoryDef>(MUD)) {
1581 InsertIntoDef = true;
1582 if (!Defs)
1583 Defs = getOrCreateDefsList(&B);
1584 Defs->push_back(*MUD);
1585 }
1586 }
1587 if (InsertIntoDef)
1588 DefiningBlocks.insert(&B);
1589 }
1590 placePHINodes(DefiningBlocks);
1591
1592 // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1593 // filled in with all blocks.
1594 SmallPtrSet<BasicBlock *, 16> Visited;
1595 renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1596
1597 // Mark the uses in unreachable blocks as live on entry, so that they go
1598 // somewhere.
1599 for (auto &BB : F)
1600 if (!Visited.count(&BB))
1601 markUnreachableAsLiveOnEntry(&BB);
1602 }
1603
getWalker()1604 MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1605
getWalkerImpl()1606 MemorySSA::CachingWalker<AliasAnalysis> *MemorySSA::getWalkerImpl() {
1607 if (Walker)
1608 return Walker.get();
1609
1610 if (!WalkerBase)
1611 WalkerBase =
1612 std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
1613
1614 Walker =
1615 std::make_unique<CachingWalker<AliasAnalysis>>(this, WalkerBase.get());
1616 return Walker.get();
1617 }
1618
getSkipSelfWalker()1619 MemorySSAWalker *MemorySSA::getSkipSelfWalker() {
1620 if (SkipWalker)
1621 return SkipWalker.get();
1622
1623 if (!WalkerBase)
1624 WalkerBase =
1625 std::make_unique<ClobberWalkerBase<AliasAnalysis>>(this, AA, DT);
1626
1627 SkipWalker =
1628 std::make_unique<SkipSelfWalker<AliasAnalysis>>(this, WalkerBase.get());
1629 return SkipWalker.get();
1630 }
1631
1632
1633 // This is a helper function used by the creation routines. It places NewAccess
1634 // into the access and defs lists for a given basic block, at the given
1635 // insertion point.
insertIntoListsForBlock(MemoryAccess * NewAccess,const BasicBlock * BB,InsertionPlace Point)1636 void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess,
1637 const BasicBlock *BB,
1638 InsertionPlace Point) {
1639 auto *Accesses = getOrCreateAccessList(BB);
1640 if (Point == Beginning) {
1641 // If it's a phi node, it goes first, otherwise, it goes after any phi
1642 // nodes.
1643 if (isa<MemoryPhi>(NewAccess)) {
1644 Accesses->push_front(NewAccess);
1645 auto *Defs = getOrCreateDefsList(BB);
1646 Defs->push_front(*NewAccess);
1647 } else {
1648 auto AI = find_if_not(
1649 *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1650 Accesses->insert(AI, NewAccess);
1651 if (!isa<MemoryUse>(NewAccess)) {
1652 auto *Defs = getOrCreateDefsList(BB);
1653 auto DI = find_if_not(
1654 *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1655 Defs->insert(DI, *NewAccess);
1656 }
1657 }
1658 } else {
1659 Accesses->push_back(NewAccess);
1660 if (!isa<MemoryUse>(NewAccess)) {
1661 auto *Defs = getOrCreateDefsList(BB);
1662 Defs->push_back(*NewAccess);
1663 }
1664 }
1665 BlockNumberingValid.erase(BB);
1666 }
1667
insertIntoListsBefore(MemoryAccess * What,const BasicBlock * BB,AccessList::iterator InsertPt)1668 void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB,
1669 AccessList::iterator InsertPt) {
1670 auto *Accesses = getWritableBlockAccesses(BB);
1671 bool WasEnd = InsertPt == Accesses->end();
1672 Accesses->insert(AccessList::iterator(InsertPt), What);
1673 if (!isa<MemoryUse>(What)) {
1674 auto *Defs = getOrCreateDefsList(BB);
1675 // If we got asked to insert at the end, we have an easy job, just shove it
1676 // at the end. If we got asked to insert before an existing def, we also get
1677 // an iterator. If we got asked to insert before a use, we have to hunt for
1678 // the next def.
1679 if (WasEnd) {
1680 Defs->push_back(*What);
1681 } else if (isa<MemoryDef>(InsertPt)) {
1682 Defs->insert(InsertPt->getDefsIterator(), *What);
1683 } else {
1684 while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1685 ++InsertPt;
1686 // Either we found a def, or we are inserting at the end
1687 if (InsertPt == Accesses->end())
1688 Defs->push_back(*What);
1689 else
1690 Defs->insert(InsertPt->getDefsIterator(), *What);
1691 }
1692 }
1693 BlockNumberingValid.erase(BB);
1694 }
1695
prepareForMoveTo(MemoryAccess * What,BasicBlock * BB)1696 void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) {
1697 // Keep it in the lookup tables, remove from the lists
1698 removeFromLists(What, false);
1699
1700 // Note that moving should implicitly invalidate the optimized state of a
1701 // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a
1702 // MemoryDef.
1703 if (auto *MD = dyn_cast<MemoryDef>(What))
1704 MD->resetOptimized();
1705 What->setBlock(BB);
1706 }
1707
1708 // Move What before Where in the IR. The end result is that What will belong to
1709 // the right lists and have the right Block set, but will not otherwise be
1710 // correct. It will not have the right defining access, and if it is a def,
1711 // things below it will not properly be updated.
moveTo(MemoryUseOrDef * What,BasicBlock * BB,AccessList::iterator Where)1712 void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1713 AccessList::iterator Where) {
1714 prepareForMoveTo(What, BB);
1715 insertIntoListsBefore(What, BB, Where);
1716 }
1717
moveTo(MemoryAccess * What,BasicBlock * BB,InsertionPlace Point)1718 void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB,
1719 InsertionPlace Point) {
1720 if (isa<MemoryPhi>(What)) {
1721 assert(Point == Beginning &&
1722 "Can only move a Phi at the beginning of the block");
1723 // Update lookup table entry
1724 ValueToMemoryAccess.erase(What->getBlock());
1725 bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;
1726 (void)Inserted;
1727 assert(Inserted && "Cannot move a Phi to a block that already has one");
1728 }
1729
1730 prepareForMoveTo(What, BB);
1731 insertIntoListsForBlock(What, BB, Point);
1732 }
1733
createMemoryPhi(BasicBlock * BB)1734 MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1735 assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
1736 MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
1737 // Phi's always are placed at the front of the block.
1738 insertIntoListsForBlock(Phi, BB, Beginning);
1739 ValueToMemoryAccess[BB] = Phi;
1740 return Phi;
1741 }
1742
createDefinedAccess(Instruction * I,MemoryAccess * Definition,const MemoryUseOrDef * Template,bool CreationMustSucceed)1743 MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
1744 MemoryAccess *Definition,
1745 const MemoryUseOrDef *Template,
1746 bool CreationMustSucceed) {
1747 assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
1748 MemoryUseOrDef *NewAccess = createNewAccess(I, AA, Template);
1749 if (CreationMustSucceed)
1750 assert(NewAccess != nullptr && "Tried to create a memory access for a "
1751 "non-memory touching instruction");
1752 if (NewAccess) {
1753 assert((!Definition || !isa<MemoryUse>(Definition)) &&
1754 "A use cannot be a defining access");
1755 NewAccess->setDefiningAccess(Definition);
1756 }
1757 return NewAccess;
1758 }
1759
1760 // Return true if the instruction has ordering constraints.
1761 // Note specifically that this only considers stores and loads
1762 // because others are still considered ModRef by getModRefInfo.
isOrdered(const Instruction * I)1763 static inline bool isOrdered(const Instruction *I) {
1764 if (auto *SI = dyn_cast<StoreInst>(I)) {
1765 if (!SI->isUnordered())
1766 return true;
1767 } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1768 if (!LI->isUnordered())
1769 return true;
1770 }
1771 return false;
1772 }
1773
1774 /// Helper function to create new memory accesses
1775 template <typename AliasAnalysisType>
createNewAccess(Instruction * I,AliasAnalysisType * AAP,const MemoryUseOrDef * Template)1776 MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
1777 AliasAnalysisType *AAP,
1778 const MemoryUseOrDef *Template) {
1779 // The assume intrinsic has a control dependency which we model by claiming
1780 // that it writes arbitrarily. Debuginfo intrinsics may be considered
1781 // clobbers when we have a nonstandard AA pipeline. Ignore these fake memory
1782 // dependencies here.
1783 // FIXME: Replace this special casing with a more accurate modelling of
1784 // assume's control dependency.
1785 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
1786 switch (II->getIntrinsicID()) {
1787 default:
1788 break;
1789 case Intrinsic::assume:
1790 case Intrinsic::experimental_noalias_scope_decl:
1791 case Intrinsic::pseudoprobe:
1792 return nullptr;
1793 }
1794 }
1795
1796 // Using a nonstandard AA pipelines might leave us with unexpected modref
1797 // results for I, so add a check to not model instructions that may not read
1798 // from or write to memory. This is necessary for correctness.
1799 if (!I->mayReadFromMemory() && !I->mayWriteToMemory())
1800 return nullptr;
1801
1802 bool Def, Use;
1803 if (Template) {
1804 Def = isa<MemoryDef>(Template);
1805 Use = isa<MemoryUse>(Template);
1806 #if !defined(NDEBUG)
1807 ModRefInfo ModRef = AAP->getModRefInfo(I, None);
1808 bool DefCheck, UseCheck;
1809 DefCheck = isModSet(ModRef) || isOrdered(I);
1810 UseCheck = isRefSet(ModRef);
1811 assert(Def == DefCheck && (Def || Use == UseCheck) && "Invalid template");
1812 #endif
1813 } else {
1814 // Find out what affect this instruction has on memory.
1815 ModRefInfo ModRef = AAP->getModRefInfo(I, None);
1816 // The isOrdered check is used to ensure that volatiles end up as defs
1817 // (atomics end up as ModRef right now anyway). Until we separate the
1818 // ordering chain from the memory chain, this enables people to see at least
1819 // some relative ordering to volatiles. Note that getClobberingMemoryAccess
1820 // will still give an answer that bypasses other volatile loads. TODO:
1821 // Separate memory aliasing and ordering into two different chains so that
1822 // we can precisely represent both "what memory will this read/write/is
1823 // clobbered by" and "what instructions can I move this past".
1824 Def = isModSet(ModRef) || isOrdered(I);
1825 Use = isRefSet(ModRef);
1826 }
1827
1828 // It's possible for an instruction to not modify memory at all. During
1829 // construction, we ignore them.
1830 if (!Def && !Use)
1831 return nullptr;
1832
1833 MemoryUseOrDef *MUD;
1834 if (Def)
1835 MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
1836 else
1837 MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
1838 ValueToMemoryAccess[I] = MUD;
1839 return MUD;
1840 }
1841
1842 /// Properly remove \p MA from all of MemorySSA's lookup tables.
removeFromLookups(MemoryAccess * MA)1843 void MemorySSA::removeFromLookups(MemoryAccess *MA) {
1844 assert(MA->use_empty() &&
1845 "Trying to remove memory access that still has uses");
1846 BlockNumbering.erase(MA);
1847 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1848 MUD->setDefiningAccess(nullptr);
1849 // Invalidate our walker's cache if necessary
1850 if (!isa<MemoryUse>(MA))
1851 getWalker()->invalidateInfo(MA);
1852
1853 Value *MemoryInst;
1854 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1855 MemoryInst = MUD->getMemoryInst();
1856 else
1857 MemoryInst = MA->getBlock();
1858
1859 auto VMA = ValueToMemoryAccess.find(MemoryInst);
1860 if (VMA->second == MA)
1861 ValueToMemoryAccess.erase(VMA);
1862 }
1863
1864 /// Properly remove \p MA from all of MemorySSA's lists.
1865 ///
1866 /// Because of the way the intrusive list and use lists work, it is important to
1867 /// do removal in the right order.
1868 /// ShouldDelete defaults to true, and will cause the memory access to also be
1869 /// deleted, not just removed.
removeFromLists(MemoryAccess * MA,bool ShouldDelete)1870 void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
1871 BasicBlock *BB = MA->getBlock();
1872 // The access list owns the reference, so we erase it from the non-owning list
1873 // first.
1874 if (!isa<MemoryUse>(MA)) {
1875 auto DefsIt = PerBlockDefs.find(BB);
1876 std::unique_ptr<DefsList> &Defs = DefsIt->second;
1877 Defs->remove(*MA);
1878 if (Defs->empty())
1879 PerBlockDefs.erase(DefsIt);
1880 }
1881
1882 // The erase call here will delete it. If we don't want it deleted, we call
1883 // remove instead.
1884 auto AccessIt = PerBlockAccesses.find(BB);
1885 std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1886 if (ShouldDelete)
1887 Accesses->erase(MA);
1888 else
1889 Accesses->remove(MA);
1890
1891 if (Accesses->empty()) {
1892 PerBlockAccesses.erase(AccessIt);
1893 BlockNumberingValid.erase(BB);
1894 }
1895 }
1896
print(raw_ostream & OS) const1897 void MemorySSA::print(raw_ostream &OS) const {
1898 MemorySSAAnnotatedWriter Writer(this);
1899 F.print(OS, &Writer);
1900 }
1901
1902 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const1903 LLVM_DUMP_METHOD void MemorySSA::dump() const { print(dbgs()); }
1904 #endif
1905
verifyMemorySSA(VerificationLevel VL) const1906 void MemorySSA::verifyMemorySSA(VerificationLevel VL) const {
1907 #if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS)
1908 VL = VerificationLevel::Full;
1909 #endif
1910
1911 #ifndef NDEBUG
1912 verifyOrderingDominationAndDefUses(F, VL);
1913 verifyDominationNumbers(F);
1914 if (VL == VerificationLevel::Full)
1915 verifyPrevDefInPhis(F);
1916 #endif
1917 // Previously, the verification used to also verify that the clobberingAccess
1918 // cached by MemorySSA is the same as the clobberingAccess found at a later
1919 // query to AA. This does not hold true in general due to the current fragility
1920 // of BasicAA which has arbitrary caps on the things it analyzes before giving
1921 // up. As a result, transformations that are correct, will lead to BasicAA
1922 // returning different Alias answers before and after that transformation.
1923 // Invalidating MemorySSA is not an option, as the results in BasicAA can be so
1924 // random, in the worst case we'd need to rebuild MemorySSA from scratch after
1925 // every transformation, which defeats the purpose of using it. For such an
1926 // example, see test4 added in D51960.
1927 }
1928
verifyPrevDefInPhis(Function & F) const1929 void MemorySSA::verifyPrevDefInPhis(Function &F) const {
1930 for (const BasicBlock &BB : F) {
1931 if (MemoryPhi *Phi = getMemoryAccess(&BB)) {
1932 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1933 auto *Pred = Phi->getIncomingBlock(I);
1934 auto *IncAcc = Phi->getIncomingValue(I);
1935 // If Pred has no unreachable predecessors, get last def looking at
1936 // IDoms. If, while walkings IDoms, any of these has an unreachable
1937 // predecessor, then the incoming def can be any access.
1938 if (auto *DTNode = DT->getNode(Pred)) {
1939 while (DTNode) {
1940 if (auto *DefList = getBlockDefs(DTNode->getBlock())) {
1941 auto *LastAcc = &*(--DefList->end());
1942 assert(LastAcc == IncAcc &&
1943 "Incorrect incoming access into phi.");
1944 (void)IncAcc;
1945 (void)LastAcc;
1946 break;
1947 }
1948 DTNode = DTNode->getIDom();
1949 }
1950 } else {
1951 // If Pred has unreachable predecessors, but has at least a Def, the
1952 // incoming access can be the last Def in Pred, or it could have been
1953 // optimized to LoE. After an update, though, the LoE may have been
1954 // replaced by another access, so IncAcc may be any access.
1955 // If Pred has unreachable predecessors and no Defs, incoming access
1956 // should be LoE; However, after an update, it may be any access.
1957 }
1958 }
1959 }
1960 }
1961 }
1962
1963 /// Verify that all of the blocks we believe to have valid domination numbers
1964 /// actually have valid domination numbers.
verifyDominationNumbers(const Function & F) const1965 void MemorySSA::verifyDominationNumbers(const Function &F) const {
1966 if (BlockNumberingValid.empty())
1967 return;
1968
1969 SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid;
1970 for (const BasicBlock &BB : F) {
1971 if (!ValidBlocks.count(&BB))
1972 continue;
1973
1974 ValidBlocks.erase(&BB);
1975
1976 const AccessList *Accesses = getBlockAccesses(&BB);
1977 // It's correct to say an empty block has valid numbering.
1978 if (!Accesses)
1979 continue;
1980
1981 // Block numbering starts at 1.
1982 unsigned long LastNumber = 0;
1983 for (const MemoryAccess &MA : *Accesses) {
1984 auto ThisNumberIter = BlockNumbering.find(&MA);
1985 assert(ThisNumberIter != BlockNumbering.end() &&
1986 "MemoryAccess has no domination number in a valid block!");
1987
1988 unsigned long ThisNumber = ThisNumberIter->second;
1989 assert(ThisNumber > LastNumber &&
1990 "Domination numbers should be strictly increasing!");
1991 (void)LastNumber;
1992 LastNumber = ThisNumber;
1993 }
1994 }
1995
1996 assert(ValidBlocks.empty() &&
1997 "All valid BasicBlocks should exist in F -- dangling pointers?");
1998 }
1999
2000 /// Verify ordering: the order and existence of MemoryAccesses matches the
2001 /// order and existence of memory affecting instructions.
2002 /// Verify domination: each definition dominates all of its uses.
2003 /// Verify def-uses: the immediate use information - walk all the memory
2004 /// accesses and verifying that, for each use, it appears in the appropriate
2005 /// def's use list
verifyOrderingDominationAndDefUses(Function & F,VerificationLevel VL) const2006 void MemorySSA::verifyOrderingDominationAndDefUses(Function &F,
2007 VerificationLevel VL) const {
2008 // Walk all the blocks, comparing what the lookups think and what the access
2009 // lists think, as well as the order in the blocks vs the order in the access
2010 // lists.
2011 SmallVector<MemoryAccess *, 32> ActualAccesses;
2012 SmallVector<MemoryAccess *, 32> ActualDefs;
2013 for (BasicBlock &B : F) {
2014 const AccessList *AL = getBlockAccesses(&B);
2015 const auto *DL = getBlockDefs(&B);
2016 MemoryPhi *Phi = getMemoryAccess(&B);
2017 if (Phi) {
2018 // Verify ordering.
2019 ActualAccesses.push_back(Phi);
2020 ActualDefs.push_back(Phi);
2021 // Verify domination
2022 for (const Use &U : Phi->uses()) {
2023 assert(dominates(Phi, U) && "Memory PHI does not dominate it's uses");
2024 (void)U;
2025 }
2026 // Verify def-uses for full verify.
2027 if (VL == VerificationLevel::Full) {
2028 assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(
2029 pred_begin(&B), pred_end(&B))) &&
2030 "Incomplete MemoryPhi Node");
2031 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
2032 verifyUseInDefs(Phi->getIncomingValue(I), Phi);
2033 assert(is_contained(predecessors(&B), Phi->getIncomingBlock(I)) &&
2034 "Incoming phi block not a block predecessor");
2035 }
2036 }
2037 }
2038
2039 for (Instruction &I : B) {
2040 MemoryUseOrDef *MA = getMemoryAccess(&I);
2041 assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
2042 "We have memory affecting instructions "
2043 "in this block but they are not in the "
2044 "access list or defs list");
2045 if (MA) {
2046 // Verify ordering.
2047 ActualAccesses.push_back(MA);
2048 if (MemoryAccess *MD = dyn_cast<MemoryDef>(MA)) {
2049 // Verify ordering.
2050 ActualDefs.push_back(MA);
2051 // Verify domination.
2052 for (const Use &U : MD->uses()) {
2053 assert(dominates(MD, U) &&
2054 "Memory Def does not dominate it's uses");
2055 (void)U;
2056 }
2057 }
2058 // Verify def-uses for full verify.
2059 if (VL == VerificationLevel::Full)
2060 verifyUseInDefs(MA->getDefiningAccess(), MA);
2061 }
2062 }
2063 // Either we hit the assert, really have no accesses, or we have both
2064 // accesses and an access list. Same with defs.
2065 if (!AL && !DL)
2066 continue;
2067 // Verify ordering.
2068 assert(AL->size() == ActualAccesses.size() &&
2069 "We don't have the same number of accesses in the block as on the "
2070 "access list");
2071 assert((DL || ActualDefs.size() == 0) &&
2072 "Either we should have a defs list, or we should have no defs");
2073 assert((!DL || DL->size() == ActualDefs.size()) &&
2074 "We don't have the same number of defs in the block as on the "
2075 "def list");
2076 auto ALI = AL->begin();
2077 auto AAI = ActualAccesses.begin();
2078 while (ALI != AL->end() && AAI != ActualAccesses.end()) {
2079 assert(&*ALI == *AAI && "Not the same accesses in the same order");
2080 ++ALI;
2081 ++AAI;
2082 }
2083 ActualAccesses.clear();
2084 if (DL) {
2085 auto DLI = DL->begin();
2086 auto ADI = ActualDefs.begin();
2087 while (DLI != DL->end() && ADI != ActualDefs.end()) {
2088 assert(&*DLI == *ADI && "Not the same defs in the same order");
2089 ++DLI;
2090 ++ADI;
2091 }
2092 }
2093 ActualDefs.clear();
2094 }
2095 }
2096
2097 /// Verify the def-use lists in MemorySSA, by verifying that \p Use
2098 /// appears in the use list of \p Def.
verifyUseInDefs(MemoryAccess * Def,MemoryAccess * Use) const2099 void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
2100 // The live on entry use may cause us to get a NULL def here
2101 if (!Def)
2102 assert(isLiveOnEntryDef(Use) &&
2103 "Null def but use not point to live on entry def");
2104 else
2105 assert(is_contained(Def->users(), Use) &&
2106 "Did not find use in def's use list");
2107 }
2108
2109 /// Perform a local numbering on blocks so that instruction ordering can be
2110 /// determined in constant time.
2111 /// TODO: We currently just number in order. If we numbered by N, we could
2112 /// allow at least N-1 sequences of insertBefore or insertAfter (and at least
2113 /// log2(N) sequences of mixed before and after) without needing to invalidate
2114 /// the numbering.
renumberBlock(const BasicBlock * B) const2115 void MemorySSA::renumberBlock(const BasicBlock *B) const {
2116 // The pre-increment ensures the numbers really start at 1.
2117 unsigned long CurrentNumber = 0;
2118 const AccessList *AL = getBlockAccesses(B);
2119 assert(AL != nullptr && "Asking to renumber an empty block");
2120 for (const auto &I : *AL)
2121 BlockNumbering[&I] = ++CurrentNumber;
2122 BlockNumberingValid.insert(B);
2123 }
2124
2125 /// Determine, for two memory accesses in the same block,
2126 /// whether \p Dominator dominates \p Dominatee.
2127 /// \returns True if \p Dominator dominates \p Dominatee.
locallyDominates(const MemoryAccess * Dominator,const MemoryAccess * Dominatee) const2128 bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
2129 const MemoryAccess *Dominatee) const {
2130 const BasicBlock *DominatorBlock = Dominator->getBlock();
2131
2132 assert((DominatorBlock == Dominatee->getBlock()) &&
2133 "Asking for local domination when accesses are in different blocks!");
2134 // A node dominates itself.
2135 if (Dominatee == Dominator)
2136 return true;
2137
2138 // When Dominatee is defined on function entry, it is not dominated by another
2139 // memory access.
2140 if (isLiveOnEntryDef(Dominatee))
2141 return false;
2142
2143 // When Dominator is defined on function entry, it dominates the other memory
2144 // access.
2145 if (isLiveOnEntryDef(Dominator))
2146 return true;
2147
2148 if (!BlockNumberingValid.count(DominatorBlock))
2149 renumberBlock(DominatorBlock);
2150
2151 unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
2152 // All numbers start with 1
2153 assert(DominatorNum != 0 && "Block was not numbered properly");
2154 unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
2155 assert(DominateeNum != 0 && "Block was not numbered properly");
2156 return DominatorNum < DominateeNum;
2157 }
2158
dominates(const MemoryAccess * Dominator,const MemoryAccess * Dominatee) const2159 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2160 const MemoryAccess *Dominatee) const {
2161 if (Dominator == Dominatee)
2162 return true;
2163
2164 if (isLiveOnEntryDef(Dominatee))
2165 return false;
2166
2167 if (Dominator->getBlock() != Dominatee->getBlock())
2168 return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
2169 return locallyDominates(Dominator, Dominatee);
2170 }
2171
dominates(const MemoryAccess * Dominator,const Use & Dominatee) const2172 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2173 const Use &Dominatee) const {
2174 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
2175 BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
2176 // The def must dominate the incoming block of the phi.
2177 if (UseBB != Dominator->getBlock())
2178 return DT->dominates(Dominator->getBlock(), UseBB);
2179 // If the UseBB and the DefBB are the same, compare locally.
2180 return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
2181 }
2182 // If it's not a PHI node use, the normal dominates can already handle it.
2183 return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
2184 }
2185
ensureOptimizedUses()2186 void MemorySSA::ensureOptimizedUses() {
2187 if (IsOptimized)
2188 return;
2189
2190 BatchAAResults BatchAA(*AA);
2191 ClobberWalkerBase<BatchAAResults> WalkerBase(this, &BatchAA, DT);
2192 CachingWalker<BatchAAResults> WalkerLocal(this, &WalkerBase);
2193 OptimizeUses(this, &WalkerLocal, &BatchAA, DT).optimizeUses();
2194 IsOptimized = true;
2195 }
2196
print(raw_ostream & OS) const2197 void MemoryAccess::print(raw_ostream &OS) const {
2198 switch (getValueID()) {
2199 case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
2200 case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
2201 case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
2202 }
2203 llvm_unreachable("invalid value id");
2204 }
2205
print(raw_ostream & OS) const2206 void MemoryDef::print(raw_ostream &OS) const {
2207 MemoryAccess *UO = getDefiningAccess();
2208
2209 auto printID = [&OS](MemoryAccess *A) {
2210 if (A && A->getID())
2211 OS << A->getID();
2212 else
2213 OS << LiveOnEntryStr;
2214 };
2215
2216 OS << getID() << " = MemoryDef(";
2217 printID(UO);
2218 OS << ")";
2219
2220 if (isOptimized()) {
2221 OS << "->";
2222 printID(getOptimized());
2223
2224 if (Optional<AliasResult> AR = getOptimizedAccessType())
2225 OS << " " << *AR;
2226 }
2227 }
2228
print(raw_ostream & OS) const2229 void MemoryPhi::print(raw_ostream &OS) const {
2230 ListSeparator LS(",");
2231 OS << getID() << " = MemoryPhi(";
2232 for (const auto &Op : operands()) {
2233 BasicBlock *BB = getIncomingBlock(Op);
2234 MemoryAccess *MA = cast<MemoryAccess>(Op);
2235
2236 OS << LS << '{';
2237 if (BB->hasName())
2238 OS << BB->getName();
2239 else
2240 BB->printAsOperand(OS, false);
2241 OS << ',';
2242 if (unsigned ID = MA->getID())
2243 OS << ID;
2244 else
2245 OS << LiveOnEntryStr;
2246 OS << '}';
2247 }
2248 OS << ')';
2249 }
2250
print(raw_ostream & OS) const2251 void MemoryUse::print(raw_ostream &OS) const {
2252 MemoryAccess *UO = getDefiningAccess();
2253 OS << "MemoryUse(";
2254 if (UO && UO->getID())
2255 OS << UO->getID();
2256 else
2257 OS << LiveOnEntryStr;
2258 OS << ')';
2259
2260 if (Optional<AliasResult> AR = getOptimizedAccessType())
2261 OS << " " << *AR;
2262 }
2263
dump() const2264 void MemoryAccess::dump() const {
2265 // Cannot completely remove virtual function even in release mode.
2266 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2267 print(dbgs());
2268 dbgs() << "\n";
2269 #endif
2270 }
2271
2272 char MemorySSAPrinterLegacyPass::ID = 0;
2273
MemorySSAPrinterLegacyPass()2274 MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) {
2275 initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry());
2276 }
2277
getAnalysisUsage(AnalysisUsage & AU) const2278 void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
2279 AU.setPreservesAll();
2280 AU.addRequired<MemorySSAWrapperPass>();
2281 }
2282
2283 class DOTFuncMSSAInfo {
2284 private:
2285 const Function &F;
2286 MemorySSAAnnotatedWriter MSSAWriter;
2287
2288 public:
DOTFuncMSSAInfo(const Function & F,MemorySSA & MSSA)2289 DOTFuncMSSAInfo(const Function &F, MemorySSA &MSSA)
2290 : F(F), MSSAWriter(&MSSA) {}
2291
getFunction()2292 const Function *getFunction() { return &F; }
getWriter()2293 MemorySSAAnnotatedWriter &getWriter() { return MSSAWriter; }
2294 };
2295
2296 namespace llvm {
2297
2298 template <>
2299 struct GraphTraits<DOTFuncMSSAInfo *> : public GraphTraits<const BasicBlock *> {
getEntryNodellvm::GraphTraits2300 static NodeRef getEntryNode(DOTFuncMSSAInfo *CFGInfo) {
2301 return &(CFGInfo->getFunction()->getEntryBlock());
2302 }
2303
2304 // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
2305 using nodes_iterator = pointer_iterator<Function::const_iterator>;
2306
nodes_beginllvm::GraphTraits2307 static nodes_iterator nodes_begin(DOTFuncMSSAInfo *CFGInfo) {
2308 return nodes_iterator(CFGInfo->getFunction()->begin());
2309 }
2310
nodes_endllvm::GraphTraits2311 static nodes_iterator nodes_end(DOTFuncMSSAInfo *CFGInfo) {
2312 return nodes_iterator(CFGInfo->getFunction()->end());
2313 }
2314
sizellvm::GraphTraits2315 static size_t size(DOTFuncMSSAInfo *CFGInfo) {
2316 return CFGInfo->getFunction()->size();
2317 }
2318 };
2319
2320 template <>
2321 struct DOTGraphTraits<DOTFuncMSSAInfo *> : public DefaultDOTGraphTraits {
2322
DOTGraphTraitsllvm::DOTGraphTraits2323 DOTGraphTraits(bool IsSimple = false) : DefaultDOTGraphTraits(IsSimple) {}
2324
getGraphNamellvm::DOTGraphTraits2325 static std::string getGraphName(DOTFuncMSSAInfo *CFGInfo) {
2326 return "MSSA CFG for '" + CFGInfo->getFunction()->getName().str() +
2327 "' function";
2328 }
2329
getNodeLabelllvm::DOTGraphTraits2330 std::string getNodeLabel(const BasicBlock *Node, DOTFuncMSSAInfo *CFGInfo) {
2331 return DOTGraphTraits<DOTFuncInfo *>::getCompleteNodeLabel(
2332 Node, nullptr,
2333 [CFGInfo](raw_string_ostream &OS, const BasicBlock &BB) -> void {
2334 BB.print(OS, &CFGInfo->getWriter(), true, true);
2335 },
2336 [](std::string &S, unsigned &I, unsigned Idx) -> void {
2337 std::string Str = S.substr(I, Idx - I);
2338 StringRef SR = Str;
2339 if (SR.count(" = MemoryDef(") || SR.count(" = MemoryPhi(") ||
2340 SR.count("MemoryUse("))
2341 return;
2342 DOTGraphTraits<DOTFuncInfo *>::eraseComment(S, I, Idx);
2343 });
2344 }
2345
getEdgeSourceLabelllvm::DOTGraphTraits2346 static std::string getEdgeSourceLabel(const BasicBlock *Node,
2347 const_succ_iterator I) {
2348 return DOTGraphTraits<DOTFuncInfo *>::getEdgeSourceLabel(Node, I);
2349 }
2350
2351 /// Display the raw branch weights from PGO.
getEdgeAttributesllvm::DOTGraphTraits2352 std::string getEdgeAttributes(const BasicBlock *Node, const_succ_iterator I,
2353 DOTFuncMSSAInfo *CFGInfo) {
2354 return "";
2355 }
2356
getNodeAttributesllvm::DOTGraphTraits2357 std::string getNodeAttributes(const BasicBlock *Node,
2358 DOTFuncMSSAInfo *CFGInfo) {
2359 return getNodeLabel(Node, CFGInfo).find(';') != std::string::npos
2360 ? "style=filled, fillcolor=lightpink"
2361 : "";
2362 }
2363 };
2364
2365 } // namespace llvm
2366
runOnFunction(Function & F)2367 bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
2368 auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2369 MSSA.ensureOptimizedUses();
2370 if (DotCFGMSSA != "") {
2371 DOTFuncMSSAInfo CFGInfo(F, MSSA);
2372 WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA);
2373 } else
2374 MSSA.print(dbgs());
2375
2376 if (VerifyMemorySSA)
2377 MSSA.verifyMemorySSA();
2378 return false;
2379 }
2380
2381 AnalysisKey MemorySSAAnalysis::Key;
2382
run(Function & F,FunctionAnalysisManager & AM)2383 MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F,
2384 FunctionAnalysisManager &AM) {
2385 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
2386 auto &AA = AM.getResult<AAManager>(F);
2387 return MemorySSAAnalysis::Result(std::make_unique<MemorySSA>(F, &AA, &DT));
2388 }
2389
invalidate(Function & F,const PreservedAnalyses & PA,FunctionAnalysisManager::Invalidator & Inv)2390 bool MemorySSAAnalysis::Result::invalidate(
2391 Function &F, const PreservedAnalyses &PA,
2392 FunctionAnalysisManager::Invalidator &Inv) {
2393 auto PAC = PA.getChecker<MemorySSAAnalysis>();
2394 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
2395 Inv.invalidate<AAManager>(F, PA) ||
2396 Inv.invalidate<DominatorTreeAnalysis>(F, PA);
2397 }
2398
run(Function & F,FunctionAnalysisManager & AM)2399 PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
2400 FunctionAnalysisManager &AM) {
2401 auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2402 MSSA.ensureOptimizedUses();
2403 if (DotCFGMSSA != "") {
2404 DOTFuncMSSAInfo CFGInfo(F, MSSA);
2405 WriteGraph(&CFGInfo, "", false, "MSSA", DotCFGMSSA);
2406 } else {
2407 OS << "MemorySSA for function: " << F.getName() << "\n";
2408 MSSA.print(OS);
2409 }
2410
2411 return PreservedAnalyses::all();
2412 }
2413
run(Function & F,FunctionAnalysisManager & AM)2414 PreservedAnalyses MemorySSAWalkerPrinterPass::run(Function &F,
2415 FunctionAnalysisManager &AM) {
2416 auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2417 OS << "MemorySSA (walker) for function: " << F.getName() << "\n";
2418 MemorySSAWalkerAnnotatedWriter Writer(&MSSA);
2419 F.print(OS, &Writer);
2420
2421 return PreservedAnalyses::all();
2422 }
2423
run(Function & F,FunctionAnalysisManager & AM)2424 PreservedAnalyses MemorySSAVerifierPass::run(Function &F,
2425 FunctionAnalysisManager &AM) {
2426 AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
2427
2428 return PreservedAnalyses::all();
2429 }
2430
2431 char MemorySSAWrapperPass::ID = 0;
2432
MemorySSAWrapperPass()2433 MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) {
2434 initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
2435 }
2436
releaseMemory()2437 void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
2438
getAnalysisUsage(AnalysisUsage & AU) const2439 void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2440 AU.setPreservesAll();
2441 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2442 AU.addRequiredTransitive<AAResultsWrapperPass>();
2443 }
2444
runOnFunction(Function & F)2445 bool MemorySSAWrapperPass::runOnFunction(Function &F) {
2446 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2447 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2448 MSSA.reset(new MemorySSA(F, &AA, &DT));
2449 return false;
2450 }
2451
verifyAnalysis() const2452 void MemorySSAWrapperPass::verifyAnalysis() const {
2453 if (VerifyMemorySSA)
2454 MSSA->verifyMemorySSA();
2455 }
2456
print(raw_ostream & OS,const Module * M) const2457 void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
2458 MSSA->print(OS);
2459 }
2460
MemorySSAWalker(MemorySSA * M)2461 MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
2462
2463 /// Walk the use-def chains starting at \p StartingAccess and find
2464 /// the MemoryAccess that actually clobbers Loc.
2465 ///
2466 /// \returns our clobbering memory access
2467 template <typename AliasAnalysisType>
2468 MemoryAccess *
getClobberingMemoryAccessBase(MemoryAccess * StartingAccess,const MemoryLocation & Loc,unsigned & UpwardWalkLimit)2469 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
2470 MemoryAccess *StartingAccess, const MemoryLocation &Loc,
2471 unsigned &UpwardWalkLimit) {
2472 assert(!isa<MemoryUse>(StartingAccess) && "Use cannot be defining access");
2473
2474 Instruction *I = nullptr;
2475 if (auto *StartingUseOrDef = dyn_cast<MemoryUseOrDef>(StartingAccess)) {
2476 if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
2477 return StartingUseOrDef;
2478
2479 I = StartingUseOrDef->getMemoryInst();
2480
2481 // Conservatively, fences are always clobbers, so don't perform the walk if
2482 // we hit a fence.
2483 if (!isa<CallBase>(I) && I->isFenceLike())
2484 return StartingUseOrDef;
2485 }
2486
2487 UpwardsMemoryQuery Q;
2488 Q.OriginalAccess = StartingAccess;
2489 Q.StartingLoc = Loc;
2490 Q.Inst = nullptr;
2491 Q.IsCall = false;
2492
2493 // Unlike the other function, do not walk to the def of a def, because we are
2494 // handed something we already believe is the clobbering access.
2495 // We never set SkipSelf to true in Q in this method.
2496 MemoryAccess *Clobber =
2497 Walker.findClobber(StartingAccess, Q, UpwardWalkLimit);
2498 LLVM_DEBUG({
2499 dbgs() << "Clobber starting at access " << *StartingAccess << "\n";
2500 if (I)
2501 dbgs() << " for instruction " << *I << "\n";
2502 dbgs() << " is " << *Clobber << "\n";
2503 });
2504 return Clobber;
2505 }
2506
2507 static const Instruction *
getInvariantGroupClobberingInstruction(Instruction & I,DominatorTree & DT)2508 getInvariantGroupClobberingInstruction(Instruction &I, DominatorTree &DT) {
2509 if (!I.hasMetadata(LLVMContext::MD_invariant_group) || I.isVolatile())
2510 return nullptr;
2511
2512 // We consider bitcasts and zero GEPs to be the same pointer value. Start by
2513 // stripping bitcasts and zero GEPs, then we will recursively look at loads
2514 // and stores through bitcasts and zero GEPs.
2515 Value *PointerOperand = getLoadStorePointerOperand(&I)->stripPointerCasts();
2516
2517 // It's not safe to walk the use list of a global value because function
2518 // passes aren't allowed to look outside their functions.
2519 // FIXME: this could be fixed by filtering instructions from outside of
2520 // current function.
2521 if (isa<Constant>(PointerOperand))
2522 return nullptr;
2523
2524 // Queue to process all pointers that are equivalent to load operand.
2525 SmallVector<const Value *, 8> PointerUsesQueue;
2526 PointerUsesQueue.push_back(PointerOperand);
2527
2528 const Instruction *MostDominatingInstruction = &I;
2529
2530 // FIXME: This loop is O(n^2) because dominates can be O(n) and in worst case
2531 // we will see all the instructions. It may not matter in practice. If it
2532 // does, we will have to support MemorySSA construction and updates.
2533 while (!PointerUsesQueue.empty()) {
2534 const Value *Ptr = PointerUsesQueue.pop_back_val();
2535 assert(Ptr && !isa<GlobalValue>(Ptr) &&
2536 "Null or GlobalValue should not be inserted");
2537
2538 for (const User *Us : Ptr->users()) {
2539 auto *U = dyn_cast<Instruction>(Us);
2540 if (!U || U == &I || !DT.dominates(U, MostDominatingInstruction))
2541 continue;
2542
2543 // Add bitcasts and zero GEPs to queue.
2544 if (isa<BitCastInst>(U)) {
2545 PointerUsesQueue.push_back(U);
2546 continue;
2547 }
2548 if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) {
2549 if (GEP->hasAllZeroIndices())
2550 PointerUsesQueue.push_back(U);
2551 continue;
2552 }
2553
2554 // If we hit a load/store with an invariant.group metadata and the same
2555 // pointer operand, we can assume that value pointed to by the pointer
2556 // operand didn't change.
2557 if (U->hasMetadata(LLVMContext::MD_invariant_group) &&
2558 getLoadStorePointerOperand(U) == Ptr && !U->isVolatile()) {
2559 MostDominatingInstruction = U;
2560 }
2561 }
2562 }
2563 return MostDominatingInstruction == &I ? nullptr : MostDominatingInstruction;
2564 }
2565
2566 template <typename AliasAnalysisType>
2567 MemoryAccess *
getClobberingMemoryAccessBase(MemoryAccess * MA,unsigned & UpwardWalkLimit,bool SkipSelf,bool UseInvariantGroup)2568 MemorySSA::ClobberWalkerBase<AliasAnalysisType>::getClobberingMemoryAccessBase(
2569 MemoryAccess *MA, unsigned &UpwardWalkLimit, bool SkipSelf,
2570 bool UseInvariantGroup) {
2571 auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2572 // If this is a MemoryPhi, we can't do anything.
2573 if (!StartingAccess)
2574 return MA;
2575
2576 if (UseInvariantGroup) {
2577 if (auto *I = getInvariantGroupClobberingInstruction(
2578 *StartingAccess->getMemoryInst(), MSSA->getDomTree())) {
2579 assert(isa<LoadInst>(I) || isa<StoreInst>(I));
2580
2581 auto *ClobberMA = MSSA->getMemoryAccess(I);
2582 assert(ClobberMA);
2583 if (isa<MemoryUse>(ClobberMA))
2584 return ClobberMA->getDefiningAccess();
2585 return ClobberMA;
2586 }
2587 }
2588
2589 bool IsOptimized = false;
2590
2591 // If this is an already optimized use or def, return the optimized result.
2592 // Note: Currently, we store the optimized def result in a separate field,
2593 // since we can't use the defining access.
2594 if (StartingAccess->isOptimized()) {
2595 if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
2596 return StartingAccess->getOptimized();
2597 IsOptimized = true;
2598 }
2599
2600 const Instruction *I = StartingAccess->getMemoryInst();
2601 // We can't sanely do anything with a fence, since they conservatively clobber
2602 // all memory, and have no locations to get pointers from to try to
2603 // disambiguate.
2604 if (!isa<CallBase>(I) && I->isFenceLike())
2605 return StartingAccess;
2606
2607 UpwardsMemoryQuery Q(I, StartingAccess);
2608
2609 if (isUseTriviallyOptimizableToLiveOnEntry(*Walker.getAA(), I)) {
2610 MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2611 StartingAccess->setOptimized(LiveOnEntry);
2612 StartingAccess->setOptimizedAccessType(None);
2613 return LiveOnEntry;
2614 }
2615
2616 MemoryAccess *OptimizedAccess;
2617 if (!IsOptimized) {
2618 // Start with the thing we already think clobbers this location
2619 MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2620
2621 // At this point, DefiningAccess may be the live on entry def.
2622 // If it is, we will not get a better result.
2623 if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
2624 StartingAccess->setOptimized(DefiningAccess);
2625 StartingAccess->setOptimizedAccessType(None);
2626 return DefiningAccess;
2627 }
2628
2629 OptimizedAccess = Walker.findClobber(DefiningAccess, Q, UpwardWalkLimit);
2630 StartingAccess->setOptimized(OptimizedAccess);
2631 if (MSSA->isLiveOnEntryDef(OptimizedAccess))
2632 StartingAccess->setOptimizedAccessType(None);
2633 else if (Q.AR && *Q.AR == AliasResult::MustAlias)
2634 StartingAccess->setOptimizedAccessType(
2635 AliasResult(AliasResult::MustAlias));
2636 } else
2637 OptimizedAccess = StartingAccess->getOptimized();
2638
2639 LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2640 LLVM_DEBUG(dbgs() << *StartingAccess << "\n");
2641 LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ");
2642 LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n");
2643
2644 MemoryAccess *Result;
2645 if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
2646 isa<MemoryDef>(StartingAccess) && UpwardWalkLimit) {
2647 assert(isa<MemoryDef>(Q.OriginalAccess));
2648 Q.SkipSelfAccess = true;
2649 Result = Walker.findClobber(OptimizedAccess, Q, UpwardWalkLimit);
2650 } else
2651 Result = OptimizedAccess;
2652
2653 LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf);
2654 LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n");
2655
2656 return Result;
2657 }
2658
2659 MemoryAccess *
getClobberingMemoryAccess(MemoryAccess * MA)2660 DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2661 if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2662 return Use->getDefiningAccess();
2663 return MA;
2664 }
2665
getClobberingMemoryAccess(MemoryAccess * StartingAccess,const MemoryLocation &)2666 MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
2667 MemoryAccess *StartingAccess, const MemoryLocation &) {
2668 if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2669 return Use->getDefiningAccess();
2670 return StartingAccess;
2671 }
2672
deleteMe(DerivedUser * Self)2673 void MemoryPhi::deleteMe(DerivedUser *Self) {
2674 delete static_cast<MemoryPhi *>(Self);
2675 }
2676
deleteMe(DerivedUser * Self)2677 void MemoryDef::deleteMe(DerivedUser *Self) {
2678 delete static_cast<MemoryDef *>(Self);
2679 }
2680
deleteMe(DerivedUser * Self)2681 void MemoryUse::deleteMe(DerivedUser *Self) {
2682 delete static_cast<MemoryUse *>(Self);
2683 }
2684
IsGuaranteedLoopInvariant(Value * Ptr) const2685 bool upward_defs_iterator::IsGuaranteedLoopInvariant(Value *Ptr) const {
2686 auto IsGuaranteedLoopInvariantBase = [](Value *Ptr) {
2687 Ptr = Ptr->stripPointerCasts();
2688 if (!isa<Instruction>(Ptr))
2689 return true;
2690 return isa<AllocaInst>(Ptr);
2691 };
2692
2693 Ptr = Ptr->stripPointerCasts();
2694 if (auto *I = dyn_cast<Instruction>(Ptr)) {
2695 if (I->getParent()->isEntryBlock())
2696 return true;
2697 }
2698 if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
2699 return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) &&
2700 GEP->hasAllConstantIndices();
2701 }
2702 return IsGuaranteedLoopInvariantBase(Ptr);
2703 }
2704