1 //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the MemorySSA class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/Analysis/MemorySSA.h"
15 #include "llvm/ADT/DenseMap.h"
16 #include "llvm/ADT/DenseMapInfo.h"
17 #include "llvm/ADT/DenseSet.h"
18 #include "llvm/ADT/DepthFirstIterator.h"
19 #include "llvm/ADT/Hashing.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/Optional.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/iterator.h"
26 #include "llvm/ADT/iterator_range.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/IteratedDominanceFrontier.h"
29 #include "llvm/Analysis/MemoryLocation.h"
30 #include "llvm/Config/llvm-config.h"
31 #include "llvm/IR/AssemblyAnnotationWriter.h"
32 #include "llvm/IR/BasicBlock.h"
33 #include "llvm/IR/Dominators.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/Instruction.h"
36 #include "llvm/IR/Instructions.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/IR/LLVMContext.h"
40 #include "llvm/IR/PassManager.h"
41 #include "llvm/IR/Use.h"
42 #include "llvm/Pass.h"
43 #include "llvm/Support/AtomicOrdering.h"
44 #include "llvm/Support/Casting.h"
45 #include "llvm/Support/CommandLine.h"
46 #include "llvm/Support/Compiler.h"
47 #include "llvm/Support/Debug.h"
48 #include "llvm/Support/ErrorHandling.h"
49 #include "llvm/Support/FormattedStream.h"
50 #include "llvm/Support/raw_ostream.h"
51 #include <algorithm>
52 #include <cassert>
53 #include <iterator>
54 #include <memory>
55 #include <utility>
56
57 using namespace llvm;
58
59 #define DEBUG_TYPE "memoryssa"
60
61 INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
62 true)
63 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
64 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
65 INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
66 true)
67
68 INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",
69 "Memory SSA Printer", false, false)
70 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
71 INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",
72 "Memory SSA Printer", false, false)
73
74 static cl::opt<unsigned> MaxCheckLimit(
75 "memssa-check-limit", cl::Hidden, cl::init(100),
76 cl::desc("The maximum number of stores/phis MemorySSA"
77 "will consider trying to walk past (default = 100)"));
78
79 // Always verify MemorySSA if expensive checking is enabled.
80 #ifdef EXPENSIVE_CHECKS
81 bool llvm::VerifyMemorySSA = true;
82 #else
83 bool llvm::VerifyMemorySSA = false;
84 #endif
85 static cl::opt<bool, true>
86 VerifyMemorySSAX("verify-memoryssa", cl::location(VerifyMemorySSA),
87 cl::Hidden, cl::desc("Enable verification of MemorySSA."));
88
89 namespace llvm {
90
91 /// An assembly annotator class to print Memory SSA information in
92 /// comments.
93 class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
94 friend class MemorySSA;
95
96 const MemorySSA *MSSA;
97
98 public:
MemorySSAAnnotatedWriter(const MemorySSA * M)99 MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
100
emitBasicBlockStartAnnot(const BasicBlock * BB,formatted_raw_ostream & OS)101 void emitBasicBlockStartAnnot(const BasicBlock *BB,
102 formatted_raw_ostream &OS) override {
103 if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
104 OS << "; " << *MA << "\n";
105 }
106
emitInstructionAnnot(const Instruction * I,formatted_raw_ostream & OS)107 void emitInstructionAnnot(const Instruction *I,
108 formatted_raw_ostream &OS) override {
109 if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
110 OS << "; " << *MA << "\n";
111 }
112 };
113
114 } // end namespace llvm
115
116 namespace {
117
118 /// Our current alias analysis API differentiates heavily between calls and
119 /// non-calls, and functions called on one usually assert on the other.
120 /// This class encapsulates the distinction to simplify other code that wants
121 /// "Memory affecting instructions and related data" to use as a key.
122 /// For example, this class is used as a densemap key in the use optimizer.
123 class MemoryLocOrCall {
124 public:
125 bool IsCall = false;
126
MemoryLocOrCall(MemoryUseOrDef * MUD)127 MemoryLocOrCall(MemoryUseOrDef *MUD)
128 : MemoryLocOrCall(MUD->getMemoryInst()) {}
MemoryLocOrCall(const MemoryUseOrDef * MUD)129 MemoryLocOrCall(const MemoryUseOrDef *MUD)
130 : MemoryLocOrCall(MUD->getMemoryInst()) {}
131
MemoryLocOrCall(Instruction * Inst)132 MemoryLocOrCall(Instruction *Inst) {
133 if (auto *C = dyn_cast<CallBase>(Inst)) {
134 IsCall = true;
135 Call = C;
136 } else {
137 IsCall = false;
138 // There is no such thing as a memorylocation for a fence inst, and it is
139 // unique in that regard.
140 if (!isa<FenceInst>(Inst))
141 Loc = MemoryLocation::get(Inst);
142 }
143 }
144
MemoryLocOrCall(const MemoryLocation & Loc)145 explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
146
getCall() const147 const CallBase *getCall() const {
148 assert(IsCall);
149 return Call;
150 }
151
getLoc() const152 MemoryLocation getLoc() const {
153 assert(!IsCall);
154 return Loc;
155 }
156
operator ==(const MemoryLocOrCall & Other) const157 bool operator==(const MemoryLocOrCall &Other) const {
158 if (IsCall != Other.IsCall)
159 return false;
160
161 if (!IsCall)
162 return Loc == Other.Loc;
163
164 if (Call->getCalledValue() != Other.Call->getCalledValue())
165 return false;
166
167 return Call->arg_size() == Other.Call->arg_size() &&
168 std::equal(Call->arg_begin(), Call->arg_end(),
169 Other.Call->arg_begin());
170 }
171
172 private:
173 union {
174 const CallBase *Call;
175 MemoryLocation Loc;
176 };
177 };
178
179 } // end anonymous namespace
180
181 namespace llvm {
182
183 template <> struct DenseMapInfo<MemoryLocOrCall> {
getEmptyKeyllvm::DenseMapInfo184 static inline MemoryLocOrCall getEmptyKey() {
185 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
186 }
187
getTombstoneKeyllvm::DenseMapInfo188 static inline MemoryLocOrCall getTombstoneKey() {
189 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
190 }
191
getHashValuellvm::DenseMapInfo192 static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
193 if (!MLOC.IsCall)
194 return hash_combine(
195 MLOC.IsCall,
196 DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
197
198 hash_code hash =
199 hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue(
200 MLOC.getCall()->getCalledValue()));
201
202 for (const Value *Arg : MLOC.getCall()->args())
203 hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg));
204 return hash;
205 }
206
isEqualllvm::DenseMapInfo207 static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
208 return LHS == RHS;
209 }
210 };
211
212 } // end namespace llvm
213
214 /// This does one-way checks to see if Use could theoretically be hoisted above
215 /// MayClobber. This will not check the other way around.
216 ///
217 /// This assumes that, for the purposes of MemorySSA, Use comes directly after
218 /// MayClobber, with no potentially clobbering operations in between them.
219 /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
areLoadsReorderable(const LoadInst * Use,const LoadInst * MayClobber)220 static bool areLoadsReorderable(const LoadInst *Use,
221 const LoadInst *MayClobber) {
222 bool VolatileUse = Use->isVolatile();
223 bool VolatileClobber = MayClobber->isVolatile();
224 // Volatile operations may never be reordered with other volatile operations.
225 if (VolatileUse && VolatileClobber)
226 return false;
227 // Otherwise, volatile doesn't matter here. From the language reference:
228 // 'optimizers may change the order of volatile operations relative to
229 // non-volatile operations.'"
230
231 // If a load is seq_cst, it cannot be moved above other loads. If its ordering
232 // is weaker, it can be moved above other loads. We just need to be sure that
233 // MayClobber isn't an acquire load, because loads can't be moved above
234 // acquire loads.
235 //
236 // Note that this explicitly *does* allow the free reordering of monotonic (or
237 // weaker) loads of the same address.
238 bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
239 bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
240 AtomicOrdering::Acquire);
241 return !(SeqCstUse || MayClobberIsAcquire);
242 }
243
244 namespace {
245
246 struct ClobberAlias {
247 bool IsClobber;
248 Optional<AliasResult> AR;
249 };
250
251 } // end anonymous namespace
252
253 // Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being
254 // ignored if IsClobber = false.
instructionClobbersQuery(const MemoryDef * MD,const MemoryLocation & UseLoc,const Instruction * UseInst,AliasAnalysis & AA)255 static ClobberAlias instructionClobbersQuery(const MemoryDef *MD,
256 const MemoryLocation &UseLoc,
257 const Instruction *UseInst,
258 AliasAnalysis &AA) {
259 Instruction *DefInst = MD->getMemoryInst();
260 assert(DefInst && "Defining instruction not actually an instruction");
261 const auto *UseCall = dyn_cast<CallBase>(UseInst);
262 Optional<AliasResult> AR;
263
264 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
265 // These intrinsics will show up as affecting memory, but they are just
266 // markers, mostly.
267 //
268 // FIXME: We probably don't actually want MemorySSA to model these at all
269 // (including creating MemoryAccesses for them): we just end up inventing
270 // clobbers where they don't really exist at all. Please see D43269 for
271 // context.
272 switch (II->getIntrinsicID()) {
273 case Intrinsic::lifetime_start:
274 if (UseCall)
275 return {false, NoAlias};
276 AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc);
277 return {AR != NoAlias, AR};
278 case Intrinsic::lifetime_end:
279 case Intrinsic::invariant_start:
280 case Intrinsic::invariant_end:
281 case Intrinsic::assume:
282 return {false, NoAlias};
283 default:
284 break;
285 }
286 }
287
288 if (UseCall) {
289 ModRefInfo I = AA.getModRefInfo(DefInst, UseCall);
290 AR = isMustSet(I) ? MustAlias : MayAlias;
291 return {isModOrRefSet(I), AR};
292 }
293
294 if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
295 if (auto *UseLoad = dyn_cast<LoadInst>(UseInst))
296 return {!areLoadsReorderable(UseLoad, DefLoad), MayAlias};
297
298 ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
299 AR = isMustSet(I) ? MustAlias : MayAlias;
300 return {isModSet(I), AR};
301 }
302
instructionClobbersQuery(MemoryDef * MD,const MemoryUseOrDef * MU,const MemoryLocOrCall & UseMLOC,AliasAnalysis & AA)303 static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
304 const MemoryUseOrDef *MU,
305 const MemoryLocOrCall &UseMLOC,
306 AliasAnalysis &AA) {
307 // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
308 // to exist while MemoryLocOrCall is pushed through places.
309 if (UseMLOC.IsCall)
310 return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(),
311 AA);
312 return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
313 AA);
314 }
315
316 // Return true when MD may alias MU, return false otherwise.
defClobbersUseOrDef(MemoryDef * MD,const MemoryUseOrDef * MU,AliasAnalysis & AA)317 bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
318 AliasAnalysis &AA) {
319 return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber;
320 }
321
322 namespace {
323
324 struct UpwardsMemoryQuery {
325 // True if our original query started off as a call
326 bool IsCall = false;
327 // The pointer location we started the query with. This will be empty if
328 // IsCall is true.
329 MemoryLocation StartingLoc;
330 // This is the instruction we were querying about.
331 const Instruction *Inst = nullptr;
332 // The MemoryAccess we actually got called with, used to test local domination
333 const MemoryAccess *OriginalAccess = nullptr;
334 Optional<AliasResult> AR = MayAlias;
335 bool SkipSelfAccess = false;
336
337 UpwardsMemoryQuery() = default;
338
UpwardsMemoryQuery__anonb08d52880411::UpwardsMemoryQuery339 UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
340 : IsCall(isa<CallBase>(Inst)), Inst(Inst), OriginalAccess(Access) {
341 if (!IsCall)
342 StartingLoc = MemoryLocation::get(Inst);
343 }
344 };
345
346 } // end anonymous namespace
347
lifetimeEndsAt(MemoryDef * MD,const MemoryLocation & Loc,AliasAnalysis & AA)348 static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
349 AliasAnalysis &AA) {
350 Instruction *Inst = MD->getMemoryInst();
351 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
352 switch (II->getIntrinsicID()) {
353 case Intrinsic::lifetime_end:
354 return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc);
355 default:
356 return false;
357 }
358 }
359 return false;
360 }
361
isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis & AA,const Instruction * I)362 static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA,
363 const Instruction *I) {
364 // If the memory can't be changed, then loads of the memory can't be
365 // clobbered.
366 return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
367 AA.pointsToConstantMemory(cast<LoadInst>(I)->
368 getPointerOperand()));
369 }
370
371 /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
372 /// inbetween `Start` and `ClobberAt` can clobbers `Start`.
373 ///
374 /// This is meant to be as simple and self-contained as possible. Because it
375 /// uses no cache, etc., it can be relatively expensive.
376 ///
377 /// \param Start The MemoryAccess that we want to walk from.
378 /// \param ClobberAt A clobber for Start.
379 /// \param StartLoc The MemoryLocation for Start.
380 /// \param MSSA The MemorySSA instance that Start and ClobberAt belong to.
381 /// \param Query The UpwardsMemoryQuery we used for our search.
382 /// \param AA The AliasAnalysis we used for our search.
383 /// \param AllowImpreciseClobber Always false, unless we do relaxed verify.
384 static void
checkClobberSanity(const MemoryAccess * Start,MemoryAccess * ClobberAt,const MemoryLocation & StartLoc,const MemorySSA & MSSA,const UpwardsMemoryQuery & Query,AliasAnalysis & AA,bool AllowImpreciseClobber=false)385 checkClobberSanity(const MemoryAccess *Start, MemoryAccess *ClobberAt,
386 const MemoryLocation &StartLoc, const MemorySSA &MSSA,
387 const UpwardsMemoryQuery &Query, AliasAnalysis &AA,
388 bool AllowImpreciseClobber = false) {
389 assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
390
391 if (MSSA.isLiveOnEntryDef(Start)) {
392 assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
393 "liveOnEntry must clobber itself");
394 return;
395 }
396
397 bool FoundClobber = false;
398 DenseSet<ConstMemoryAccessPair> VisitedPhis;
399 SmallVector<ConstMemoryAccessPair, 8> Worklist;
400 Worklist.emplace_back(Start, StartLoc);
401 // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
402 // is found, complain.
403 while (!Worklist.empty()) {
404 auto MAP = Worklist.pop_back_val();
405 // All we care about is that nothing from Start to ClobberAt clobbers Start.
406 // We learn nothing from revisiting nodes.
407 if (!VisitedPhis.insert(MAP).second)
408 continue;
409
410 for (const auto *MA : def_chain(MAP.first)) {
411 if (MA == ClobberAt) {
412 if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
413 // instructionClobbersQuery isn't essentially free, so don't use `|=`,
414 // since it won't let us short-circuit.
415 //
416 // Also, note that this can't be hoisted out of the `Worklist` loop,
417 // since MD may only act as a clobber for 1 of N MemoryLocations.
418 FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD);
419 if (!FoundClobber) {
420 ClobberAlias CA =
421 instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
422 if (CA.IsClobber) {
423 FoundClobber = true;
424 // Not used: CA.AR;
425 }
426 }
427 }
428 break;
429 }
430
431 // We should never hit liveOnEntry, unless it's the clobber.
432 assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
433
434 if (const auto *MD = dyn_cast<MemoryDef>(MA)) {
435 // If Start is a Def, skip self.
436 if (MD == Start)
437 continue;
438
439 assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)
440 .IsClobber &&
441 "Found clobber before reaching ClobberAt!");
442 continue;
443 }
444
445 if (const auto *MU = dyn_cast<MemoryUse>(MA)) {
446 (void)MU;
447 assert (MU == Start &&
448 "Can only find use in def chain if Start is a use");
449 continue;
450 }
451
452 assert(isa<MemoryPhi>(MA));
453 Worklist.append(
454 upward_defs_begin({const_cast<MemoryAccess *>(MA), MAP.second}),
455 upward_defs_end());
456 }
457 }
458
459 // If the verify is done following an optimization, it's possible that
460 // ClobberAt was a conservative clobbering, that we can now infer is not a
461 // true clobbering access. Don't fail the verify if that's the case.
462 // We do have accesses that claim they're optimized, but could be optimized
463 // further. Updating all these can be expensive, so allow it for now (FIXME).
464 if (AllowImpreciseClobber)
465 return;
466
467 // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
468 // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
469 assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
470 "ClobberAt never acted as a clobber");
471 }
472
473 namespace {
474
475 /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
476 /// in one class.
477 class ClobberWalker {
478 /// Save a few bytes by using unsigned instead of size_t.
479 using ListIndex = unsigned;
480
481 /// Represents a span of contiguous MemoryDefs, potentially ending in a
482 /// MemoryPhi.
483 struct DefPath {
484 MemoryLocation Loc;
485 // Note that, because we always walk in reverse, Last will always dominate
486 // First. Also note that First and Last are inclusive.
487 MemoryAccess *First;
488 MemoryAccess *Last;
489 Optional<ListIndex> Previous;
490
DefPath__anonb08d52880511::ClobberWalker::DefPath491 DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
492 Optional<ListIndex> Previous)
493 : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
494
DefPath__anonb08d52880511::ClobberWalker::DefPath495 DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
496 Optional<ListIndex> Previous)
497 : DefPath(Loc, Init, Init, Previous) {}
498 };
499
500 const MemorySSA &MSSA;
501 AliasAnalysis &AA;
502 DominatorTree &DT;
503 UpwardsMemoryQuery *Query;
504
505 // Phi optimization bookkeeping
506 SmallVector<DefPath, 32> Paths;
507 DenseSet<ConstMemoryAccessPair> VisitedPhis;
508
509 /// Find the nearest def or phi that `From` can legally be optimized to.
getWalkTarget(const MemoryPhi * From) const510 const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
511 assert(From->getNumOperands() && "Phi with no operands?");
512
513 BasicBlock *BB = From->getBlock();
514 MemoryAccess *Result = MSSA.getLiveOnEntryDef();
515 DomTreeNode *Node = DT.getNode(BB);
516 while ((Node = Node->getIDom())) {
517 auto *Defs = MSSA.getBlockDefs(Node->getBlock());
518 if (Defs)
519 return &*Defs->rbegin();
520 }
521 return Result;
522 }
523
524 /// Result of calling walkToPhiOrClobber.
525 struct UpwardsWalkResult {
526 /// The "Result" of the walk. Either a clobber, the last thing we walked, or
527 /// both. Include alias info when clobber found.
528 MemoryAccess *Result;
529 bool IsKnownClobber;
530 Optional<AliasResult> AR;
531 };
532
533 /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
534 /// This will update Desc.Last as it walks. It will (optionally) also stop at
535 /// StopAt.
536 ///
537 /// This does not test for whether StopAt is a clobber
538 UpwardsWalkResult
walkToPhiOrClobber(DefPath & Desc,const MemoryAccess * StopAt=nullptr,const MemoryAccess * SkipStopAt=nullptr) const539 walkToPhiOrClobber(DefPath &Desc, const MemoryAccess *StopAt = nullptr,
540 const MemoryAccess *SkipStopAt = nullptr) const {
541 assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
542
543 for (MemoryAccess *Current : def_chain(Desc.Last)) {
544 Desc.Last = Current;
545 if (Current == StopAt || Current == SkipStopAt)
546 return {Current, false, MayAlias};
547
548 if (auto *MD = dyn_cast<MemoryDef>(Current)) {
549 if (MSSA.isLiveOnEntryDef(MD))
550 return {MD, true, MustAlias};
551 ClobberAlias CA =
552 instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA);
553 if (CA.IsClobber)
554 return {MD, true, CA.AR};
555 }
556 }
557
558 assert(isa<MemoryPhi>(Desc.Last) &&
559 "Ended at a non-clobber that's not a phi?");
560 return {Desc.Last, false, MayAlias};
561 }
562
addSearches(MemoryPhi * Phi,SmallVectorImpl<ListIndex> & PausedSearches,ListIndex PriorNode)563 void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
564 ListIndex PriorNode) {
565 auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}),
566 upward_defs_end());
567 for (const MemoryAccessPair &P : UpwardDefs) {
568 PausedSearches.push_back(Paths.size());
569 Paths.emplace_back(P.second, P.first, PriorNode);
570 }
571 }
572
573 /// Represents a search that terminated after finding a clobber. This clobber
574 /// may or may not be present in the path of defs from LastNode..SearchStart,
575 /// since it may have been retrieved from cache.
576 struct TerminatedPath {
577 MemoryAccess *Clobber;
578 ListIndex LastNode;
579 };
580
581 /// Get an access that keeps us from optimizing to the given phi.
582 ///
583 /// PausedSearches is an array of indices into the Paths array. Its incoming
584 /// value is the indices of searches that stopped at the last phi optimization
585 /// target. It's left in an unspecified state.
586 ///
587 /// If this returns None, NewPaused is a vector of searches that terminated
588 /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
589 Optional<TerminatedPath>
getBlockingAccess(const MemoryAccess * StopWhere,SmallVectorImpl<ListIndex> & PausedSearches,SmallVectorImpl<ListIndex> & NewPaused,SmallVectorImpl<TerminatedPath> & Terminated)590 getBlockingAccess(const MemoryAccess *StopWhere,
591 SmallVectorImpl<ListIndex> &PausedSearches,
592 SmallVectorImpl<ListIndex> &NewPaused,
593 SmallVectorImpl<TerminatedPath> &Terminated) {
594 assert(!PausedSearches.empty() && "No searches to continue?");
595
596 // BFS vs DFS really doesn't make a difference here, so just do a DFS with
597 // PausedSearches as our stack.
598 while (!PausedSearches.empty()) {
599 ListIndex PathIndex = PausedSearches.pop_back_val();
600 DefPath &Node = Paths[PathIndex];
601
602 // If we've already visited this path with this MemoryLocation, we don't
603 // need to do so again.
604 //
605 // NOTE: That we just drop these paths on the ground makes caching
606 // behavior sporadic. e.g. given a diamond:
607 // A
608 // B C
609 // D
610 //
611 // ...If we walk D, B, A, C, we'll only cache the result of phi
612 // optimization for A, B, and D; C will be skipped because it dies here.
613 // This arguably isn't the worst thing ever, since:
614 // - We generally query things in a top-down order, so if we got below D
615 // without needing cache entries for {C, MemLoc}, then chances are
616 // that those cache entries would end up ultimately unused.
617 // - We still cache things for A, so C only needs to walk up a bit.
618 // If this behavior becomes problematic, we can fix without a ton of extra
619 // work.
620 if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)
621 continue;
622
623 const MemoryAccess *SkipStopWhere = nullptr;
624 if (Query->SkipSelfAccess && Node.Loc == Query->StartingLoc) {
625 assert(isa<MemoryDef>(Query->OriginalAccess));
626 SkipStopWhere = Query->OriginalAccess;
627 }
628
629 UpwardsWalkResult Res = walkToPhiOrClobber(Node, /*StopAt=*/StopWhere,
630 /*SkipStopAt=*/SkipStopWhere);
631 if (Res.IsKnownClobber) {
632 assert(Res.Result != StopWhere && Res.Result != SkipStopWhere);
633 // If this wasn't a cache hit, we hit a clobber when walking. That's a
634 // failure.
635 TerminatedPath Term{Res.Result, PathIndex};
636 if (!MSSA.dominates(Res.Result, StopWhere))
637 return Term;
638
639 // Otherwise, it's a valid thing to potentially optimize to.
640 Terminated.push_back(Term);
641 continue;
642 }
643
644 if (Res.Result == StopWhere || Res.Result == SkipStopWhere) {
645 // We've hit our target. Save this path off for if we want to continue
646 // walking. If we are in the mode of skipping the OriginalAccess, and
647 // we've reached back to the OriginalAccess, do not save path, we've
648 // just looped back to self.
649 if (Res.Result != SkipStopWhere)
650 NewPaused.push_back(PathIndex);
651 continue;
652 }
653
654 assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
655 addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
656 }
657
658 return None;
659 }
660
661 template <typename T, typename Walker>
662 struct generic_def_path_iterator
663 : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
664 std::forward_iterator_tag, T *> {
665 generic_def_path_iterator() = default;
generic_def_path_iterator__anonb08d52880511::ClobberWalker::generic_def_path_iterator666 generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
667
operator *__anonb08d52880511::ClobberWalker::generic_def_path_iterator668 T &operator*() const { return curNode(); }
669
operator ++__anonb08d52880511::ClobberWalker::generic_def_path_iterator670 generic_def_path_iterator &operator++() {
671 N = curNode().Previous;
672 return *this;
673 }
674
operator ==__anonb08d52880511::ClobberWalker::generic_def_path_iterator675 bool operator==(const generic_def_path_iterator &O) const {
676 if (N.hasValue() != O.N.hasValue())
677 return false;
678 return !N.hasValue() || *N == *O.N;
679 }
680
681 private:
curNode__anonb08d52880511::ClobberWalker::generic_def_path_iterator682 T &curNode() const { return W->Paths[*N]; }
683
684 Walker *W = nullptr;
685 Optional<ListIndex> N = None;
686 };
687
688 using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
689 using const_def_path_iterator =
690 generic_def_path_iterator<const DefPath, const ClobberWalker>;
691
def_path(ListIndex From)692 iterator_range<def_path_iterator> def_path(ListIndex From) {
693 return make_range(def_path_iterator(this, From), def_path_iterator());
694 }
695
const_def_path(ListIndex From) const696 iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
697 return make_range(const_def_path_iterator(this, From),
698 const_def_path_iterator());
699 }
700
701 struct OptznResult {
702 /// The path that contains our result.
703 TerminatedPath PrimaryClobber;
704 /// The paths that we can legally cache back from, but that aren't
705 /// necessarily the result of the Phi optimization.
706 SmallVector<TerminatedPath, 4> OtherClobbers;
707 };
708
defPathIndex(const DefPath & N) const709 ListIndex defPathIndex(const DefPath &N) const {
710 // The assert looks nicer if we don't need to do &N
711 const DefPath *NP = &N;
712 assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
713 "Out of bounds DefPath!");
714 return NP - &Paths.front();
715 }
716
717 /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
718 /// that act as legal clobbers. Note that this won't return *all* clobbers.
719 ///
720 /// Phi optimization algorithm tl;dr:
721 /// - Find the earliest def/phi, A, we can optimize to
722 /// - Find if all paths from the starting memory access ultimately reach A
723 /// - If not, optimization isn't possible.
724 /// - Otherwise, walk from A to another clobber or phi, A'.
725 /// - If A' is a def, we're done.
726 /// - If A' is a phi, try to optimize it.
727 ///
728 /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
729 /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
tryOptimizePhi(MemoryPhi * Phi,MemoryAccess * Start,const MemoryLocation & Loc)730 OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
731 const MemoryLocation &Loc) {
732 assert(Paths.empty() && VisitedPhis.empty() &&
733 "Reset the optimization state.");
734
735 Paths.emplace_back(Loc, Start, Phi, None);
736 // Stores how many "valid" optimization nodes we had prior to calling
737 // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
738 auto PriorPathsSize = Paths.size();
739
740 SmallVector<ListIndex, 16> PausedSearches;
741 SmallVector<ListIndex, 8> NewPaused;
742 SmallVector<TerminatedPath, 4> TerminatedPaths;
743
744 addSearches(Phi, PausedSearches, 0);
745
746 // Moves the TerminatedPath with the "most dominated" Clobber to the end of
747 // Paths.
748 auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
749 assert(!Paths.empty() && "Need a path to move");
750 auto Dom = Paths.begin();
751 for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
752 if (!MSSA.dominates(I->Clobber, Dom->Clobber))
753 Dom = I;
754 auto Last = Paths.end() - 1;
755 if (Last != Dom)
756 std::iter_swap(Last, Dom);
757 };
758
759 MemoryPhi *Current = Phi;
760 while (true) {
761 assert(!MSSA.isLiveOnEntryDef(Current) &&
762 "liveOnEntry wasn't treated as a clobber?");
763
764 const auto *Target = getWalkTarget(Current);
765 // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
766 // optimization for the prior phi.
767 assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
768 return MSSA.dominates(P.Clobber, Target);
769 }));
770
771 // FIXME: This is broken, because the Blocker may be reported to be
772 // liveOnEntry, and we'll happily wait for that to disappear (read: never)
773 // For the moment, this is fine, since we do nothing with blocker info.
774 if (Optional<TerminatedPath> Blocker = getBlockingAccess(
775 Target, PausedSearches, NewPaused, TerminatedPaths)) {
776
777 // Find the node we started at. We can't search based on N->Last, since
778 // we may have gone around a loop with a different MemoryLocation.
779 auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
780 return defPathIndex(N) < PriorPathsSize;
781 });
782 assert(Iter != def_path_iterator());
783
784 DefPath &CurNode = *Iter;
785 assert(CurNode.Last == Current);
786
787 // Two things:
788 // A. We can't reliably cache all of NewPaused back. Consider a case
789 // where we have two paths in NewPaused; one of which can't optimize
790 // above this phi, whereas the other can. If we cache the second path
791 // back, we'll end up with suboptimal cache entries. We can handle
792 // cases like this a bit better when we either try to find all
793 // clobbers that block phi optimization, or when our cache starts
794 // supporting unfinished searches.
795 // B. We can't reliably cache TerminatedPaths back here without doing
796 // extra checks; consider a case like:
797 // T
798 // / \
799 // D C
800 // \ /
801 // S
802 // Where T is our target, C is a node with a clobber on it, D is a
803 // diamond (with a clobber *only* on the left or right node, N), and
804 // S is our start. Say we walk to D, through the node opposite N
805 // (read: ignoring the clobber), and see a cache entry in the top
806 // node of D. That cache entry gets put into TerminatedPaths. We then
807 // walk up to C (N is later in our worklist), find the clobber, and
808 // quit. If we append TerminatedPaths to OtherClobbers, we'll cache
809 // the bottom part of D to the cached clobber, ignoring the clobber
810 // in N. Again, this problem goes away if we start tracking all
811 // blockers for a given phi optimization.
812 TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
813 return {Result, {}};
814 }
815
816 // If there's nothing left to search, then all paths led to valid clobbers
817 // that we got from our cache; pick the nearest to the start, and allow
818 // the rest to be cached back.
819 if (NewPaused.empty()) {
820 MoveDominatedPathToEnd(TerminatedPaths);
821 TerminatedPath Result = TerminatedPaths.pop_back_val();
822 return {Result, std::move(TerminatedPaths)};
823 }
824
825 MemoryAccess *DefChainEnd = nullptr;
826 SmallVector<TerminatedPath, 4> Clobbers;
827 for (ListIndex Paused : NewPaused) {
828 UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
829 if (WR.IsKnownClobber)
830 Clobbers.push_back({WR.Result, Paused});
831 else
832 // Micro-opt: If we hit the end of the chain, save it.
833 DefChainEnd = WR.Result;
834 }
835
836 if (!TerminatedPaths.empty()) {
837 // If we couldn't find the dominating phi/liveOnEntry in the above loop,
838 // do it now.
839 if (!DefChainEnd)
840 for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
841 DefChainEnd = MA;
842
843 // If any of the terminated paths don't dominate the phi we'll try to
844 // optimize, we need to figure out what they are and quit.
845 const BasicBlock *ChainBB = DefChainEnd->getBlock();
846 for (const TerminatedPath &TP : TerminatedPaths) {
847 // Because we know that DefChainEnd is as "high" as we can go, we
848 // don't need local dominance checks; BB dominance is sufficient.
849 if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
850 Clobbers.push_back(TP);
851 }
852 }
853
854 // If we have clobbers in the def chain, find the one closest to Current
855 // and quit.
856 if (!Clobbers.empty()) {
857 MoveDominatedPathToEnd(Clobbers);
858 TerminatedPath Result = Clobbers.pop_back_val();
859 return {Result, std::move(Clobbers)};
860 }
861
862 assert(all_of(NewPaused,
863 [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
864
865 // Because liveOnEntry is a clobber, this must be a phi.
866 auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
867
868 PriorPathsSize = Paths.size();
869 PausedSearches.clear();
870 for (ListIndex I : NewPaused)
871 addSearches(DefChainPhi, PausedSearches, I);
872 NewPaused.clear();
873
874 Current = DefChainPhi;
875 }
876 }
877
verifyOptResult(const OptznResult & R) const878 void verifyOptResult(const OptznResult &R) const {
879 assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
880 return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
881 }));
882 }
883
resetPhiOptznState()884 void resetPhiOptznState() {
885 Paths.clear();
886 VisitedPhis.clear();
887 }
888
889 public:
ClobberWalker(const MemorySSA & MSSA,AliasAnalysis & AA,DominatorTree & DT)890 ClobberWalker(const MemorySSA &MSSA, AliasAnalysis &AA, DominatorTree &DT)
891 : MSSA(MSSA), AA(AA), DT(DT) {}
892
893 /// Finds the nearest clobber for the given query, optimizing phis if
894 /// possible.
findClobber(MemoryAccess * Start,UpwardsMemoryQuery & Q)895 MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q) {
896 Query = &Q;
897
898 MemoryAccess *Current = Start;
899 // This walker pretends uses don't exist. If we're handed one, silently grab
900 // its def. (This has the nice side-effect of ensuring we never cache uses)
901 if (auto *MU = dyn_cast<MemoryUse>(Start))
902 Current = MU->getDefiningAccess();
903
904 DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
905 // Fast path for the overly-common case (no crazy phi optimization
906 // necessary)
907 UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
908 MemoryAccess *Result;
909 if (WalkResult.IsKnownClobber) {
910 Result = WalkResult.Result;
911 Q.AR = WalkResult.AR;
912 } else {
913 OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
914 Current, Q.StartingLoc);
915 verifyOptResult(OptRes);
916 resetPhiOptznState();
917 Result = OptRes.PrimaryClobber.Clobber;
918 }
919
920 #ifdef EXPENSIVE_CHECKS
921 if (!Q.SkipSelfAccess)
922 checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
923 #endif
924 return Result;
925 }
926
verify(const MemorySSA * MSSA)927 void verify(const MemorySSA *MSSA) { assert(MSSA == &this->MSSA); }
928 };
929
930 struct RenamePassData {
931 DomTreeNode *DTN;
932 DomTreeNode::const_iterator ChildIt;
933 MemoryAccess *IncomingVal;
934
RenamePassData__anonb08d52880511::RenamePassData935 RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
936 MemoryAccess *M)
937 : DTN(D), ChildIt(It), IncomingVal(M) {}
938
swap__anonb08d52880511::RenamePassData939 void swap(RenamePassData &RHS) {
940 std::swap(DTN, RHS.DTN);
941 std::swap(ChildIt, RHS.ChildIt);
942 std::swap(IncomingVal, RHS.IncomingVal);
943 }
944 };
945
946 } // end anonymous namespace
947
948 namespace llvm {
949
950 class MemorySSA::ClobberWalkerBase {
951 ClobberWalker Walker;
952 MemorySSA *MSSA;
953
954 public:
ClobberWalkerBase(MemorySSA * M,AliasAnalysis * A,DominatorTree * D)955 ClobberWalkerBase(MemorySSA *M, AliasAnalysis *A, DominatorTree *D)
956 : Walker(*M, *A, *D), MSSA(M) {}
957
958 MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *,
959 const MemoryLocation &);
960 // Second argument (bool), defines whether the clobber search should skip the
961 // original queried access. If true, there will be a follow-up query searching
962 // for a clobber access past "self". Note that the Optimized access is not
963 // updated if a new clobber is found by this SkipSelf search. If this
964 // additional query becomes heavily used we may decide to cache the result.
965 // Walker instantiations will decide how to set the SkipSelf bool.
966 MemoryAccess *getClobberingMemoryAccessBase(MemoryAccess *, bool);
verify(const MemorySSA * MSSA)967 void verify(const MemorySSA *MSSA) { Walker.verify(MSSA); }
968 };
969
970 /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
971 /// longer does caching on its own, but the name has been retained for the
972 /// moment.
973 class MemorySSA::CachingWalker final : public MemorySSAWalker {
974 ClobberWalkerBase *Walker;
975
976 public:
CachingWalker(MemorySSA * M,ClobberWalkerBase * W)977 CachingWalker(MemorySSA *M, ClobberWalkerBase *W)
978 : MemorySSAWalker(M), Walker(W) {}
979 ~CachingWalker() override = default;
980
981 using MemorySSAWalker::getClobberingMemoryAccess;
982
983 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override;
984 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
985 const MemoryLocation &Loc) override;
986
invalidateInfo(MemoryAccess * MA)987 void invalidateInfo(MemoryAccess *MA) override {
988 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
989 MUD->resetOptimized();
990 }
991
verify(const MemorySSA * MSSA)992 void verify(const MemorySSA *MSSA) override {
993 MemorySSAWalker::verify(MSSA);
994 Walker->verify(MSSA);
995 }
996 };
997
998 class MemorySSA::SkipSelfWalker final : public MemorySSAWalker {
999 ClobberWalkerBase *Walker;
1000
1001 public:
SkipSelfWalker(MemorySSA * M,ClobberWalkerBase * W)1002 SkipSelfWalker(MemorySSA *M, ClobberWalkerBase *W)
1003 : MemorySSAWalker(M), Walker(W) {}
1004 ~SkipSelfWalker() override = default;
1005
1006 using MemorySSAWalker::getClobberingMemoryAccess;
1007
1008 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA) override;
1009 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *MA,
1010 const MemoryLocation &Loc) override;
1011
invalidateInfo(MemoryAccess * MA)1012 void invalidateInfo(MemoryAccess *MA) override {
1013 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1014 MUD->resetOptimized();
1015 }
1016
verify(const MemorySSA * MSSA)1017 void verify(const MemorySSA *MSSA) override {
1018 MemorySSAWalker::verify(MSSA);
1019 Walker->verify(MSSA);
1020 }
1021 };
1022
1023 } // end namespace llvm
1024
renameSuccessorPhis(BasicBlock * BB,MemoryAccess * IncomingVal,bool RenameAllUses)1025 void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
1026 bool RenameAllUses) {
1027 // Pass through values to our successors
1028 for (const BasicBlock *S : successors(BB)) {
1029 auto It = PerBlockAccesses.find(S);
1030 // Rename the phi nodes in our successor block
1031 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1032 continue;
1033 AccessList *Accesses = It->second.get();
1034 auto *Phi = cast<MemoryPhi>(&Accesses->front());
1035 if (RenameAllUses) {
1036 int PhiIndex = Phi->getBasicBlockIndex(BB);
1037 assert(PhiIndex != -1 && "Incomplete phi during partial rename");
1038 Phi->setIncomingValue(PhiIndex, IncomingVal);
1039 } else
1040 Phi->addIncoming(IncomingVal, BB);
1041 }
1042 }
1043
1044 /// Rename a single basic block into MemorySSA form.
1045 /// Uses the standard SSA renaming algorithm.
1046 /// \returns The new incoming value.
renameBlock(BasicBlock * BB,MemoryAccess * IncomingVal,bool RenameAllUses)1047 MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
1048 bool RenameAllUses) {
1049 auto It = PerBlockAccesses.find(BB);
1050 // Skip most processing if the list is empty.
1051 if (It != PerBlockAccesses.end()) {
1052 AccessList *Accesses = It->second.get();
1053 for (MemoryAccess &L : *Accesses) {
1054 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
1055 if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
1056 MUD->setDefiningAccess(IncomingVal);
1057 if (isa<MemoryDef>(&L))
1058 IncomingVal = &L;
1059 } else {
1060 IncomingVal = &L;
1061 }
1062 }
1063 }
1064 return IncomingVal;
1065 }
1066
1067 /// This is the standard SSA renaming algorithm.
1068 ///
1069 /// We walk the dominator tree in preorder, renaming accesses, and then filling
1070 /// in phi nodes in our successors.
renamePass(DomTreeNode * Root,MemoryAccess * IncomingVal,SmallPtrSetImpl<BasicBlock * > & Visited,bool SkipVisited,bool RenameAllUses)1071 void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
1072 SmallPtrSetImpl<BasicBlock *> &Visited,
1073 bool SkipVisited, bool RenameAllUses) {
1074 SmallVector<RenamePassData, 32> WorkStack;
1075 // Skip everything if we already renamed this block and we are skipping.
1076 // Note: You can't sink this into the if, because we need it to occur
1077 // regardless of whether we skip blocks or not.
1078 bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
1079 if (SkipVisited && AlreadyVisited)
1080 return;
1081
1082 IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
1083 renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
1084 WorkStack.push_back({Root, Root->begin(), IncomingVal});
1085
1086 while (!WorkStack.empty()) {
1087 DomTreeNode *Node = WorkStack.back().DTN;
1088 DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
1089 IncomingVal = WorkStack.back().IncomingVal;
1090
1091 if (ChildIt == Node->end()) {
1092 WorkStack.pop_back();
1093 } else {
1094 DomTreeNode *Child = *ChildIt;
1095 ++WorkStack.back().ChildIt;
1096 BasicBlock *BB = Child->getBlock();
1097 // Note: You can't sink this into the if, because we need it to occur
1098 // regardless of whether we skip blocks or not.
1099 AlreadyVisited = !Visited.insert(BB).second;
1100 if (SkipVisited && AlreadyVisited) {
1101 // We already visited this during our renaming, which can happen when
1102 // being asked to rename multiple blocks. Figure out the incoming val,
1103 // which is the last def.
1104 // Incoming value can only change if there is a block def, and in that
1105 // case, it's the last block def in the list.
1106 if (auto *BlockDefs = getWritableBlockDefs(BB))
1107 IncomingVal = &*BlockDefs->rbegin();
1108 } else
1109 IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1110 renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1111 WorkStack.push_back({Child, Child->begin(), IncomingVal});
1112 }
1113 }
1114 }
1115
1116 /// This handles unreachable block accesses by deleting phi nodes in
1117 /// unreachable blocks, and marking all other unreachable MemoryAccess's as
1118 /// being uses of the live on entry definition.
markUnreachableAsLiveOnEntry(BasicBlock * BB)1119 void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
1120 assert(!DT->isReachableFromEntry(BB) &&
1121 "Reachable block found while handling unreachable blocks");
1122
1123 // Make sure phi nodes in our reachable successors end up with a
1124 // LiveOnEntryDef for our incoming edge, even though our block is forward
1125 // unreachable. We could just disconnect these blocks from the CFG fully,
1126 // but we do not right now.
1127 for (const BasicBlock *S : successors(BB)) {
1128 if (!DT->isReachableFromEntry(S))
1129 continue;
1130 auto It = PerBlockAccesses.find(S);
1131 // Rename the phi nodes in our successor block
1132 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1133 continue;
1134 AccessList *Accesses = It->second.get();
1135 auto *Phi = cast<MemoryPhi>(&Accesses->front());
1136 Phi->addIncoming(LiveOnEntryDef.get(), BB);
1137 }
1138
1139 auto It = PerBlockAccesses.find(BB);
1140 if (It == PerBlockAccesses.end())
1141 return;
1142
1143 auto &Accesses = It->second;
1144 for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1145 auto Next = std::next(AI);
1146 // If we have a phi, just remove it. We are going to replace all
1147 // users with live on entry.
1148 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1149 UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1150 else
1151 Accesses->erase(AI);
1152 AI = Next;
1153 }
1154 }
1155
MemorySSA(Function & Func,AliasAnalysis * AA,DominatorTree * DT)1156 MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
1157 : AA(AA), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1158 SkipWalker(nullptr), NextID(0) {
1159 buildMemorySSA();
1160 }
1161
~MemorySSA()1162 MemorySSA::~MemorySSA() {
1163 // Drop all our references
1164 for (const auto &Pair : PerBlockAccesses)
1165 for (MemoryAccess &MA : *Pair.second)
1166 MA.dropAllReferences();
1167 }
1168
getOrCreateAccessList(const BasicBlock * BB)1169 MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
1170 auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
1171
1172 if (Res.second)
1173 Res.first->second = llvm::make_unique<AccessList>();
1174 return Res.first->second.get();
1175 }
1176
getOrCreateDefsList(const BasicBlock * BB)1177 MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1178 auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
1179
1180 if (Res.second)
1181 Res.first->second = llvm::make_unique<DefsList>();
1182 return Res.first->second.get();
1183 }
1184
1185 namespace llvm {
1186
1187 /// This class is a batch walker of all MemoryUse's in the program, and points
1188 /// their defining access at the thing that actually clobbers them. Because it
1189 /// is a batch walker that touches everything, it does not operate like the
1190 /// other walkers. This walker is basically performing a top-down SSA renaming
1191 /// pass, where the version stack is used as the cache. This enables it to be
1192 /// significantly more time and memory efficient than using the regular walker,
1193 /// which is walking bottom-up.
1194 class MemorySSA::OptimizeUses {
1195 public:
OptimizeUses(MemorySSA * MSSA,MemorySSAWalker * Walker,AliasAnalysis * AA,DominatorTree * DT)1196 OptimizeUses(MemorySSA *MSSA, MemorySSAWalker *Walker, AliasAnalysis *AA,
1197 DominatorTree *DT)
1198 : MSSA(MSSA), Walker(Walker), AA(AA), DT(DT) {
1199 Walker = MSSA->getWalker();
1200 }
1201
1202 void optimizeUses();
1203
1204 private:
1205 /// This represents where a given memorylocation is in the stack.
1206 struct MemlocStackInfo {
1207 // This essentially is keeping track of versions of the stack. Whenever
1208 // the stack changes due to pushes or pops, these versions increase.
1209 unsigned long StackEpoch;
1210 unsigned long PopEpoch;
1211 // This is the lower bound of places on the stack to check. It is equal to
1212 // the place the last stack walk ended.
1213 // Note: Correctness depends on this being initialized to 0, which densemap
1214 // does
1215 unsigned long LowerBound;
1216 const BasicBlock *LowerBoundBlock;
1217 // This is where the last walk for this memory location ended.
1218 unsigned long LastKill;
1219 bool LastKillValid;
1220 Optional<AliasResult> AR;
1221 };
1222
1223 void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1224 SmallVectorImpl<MemoryAccess *> &,
1225 DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
1226
1227 MemorySSA *MSSA;
1228 MemorySSAWalker *Walker;
1229 AliasAnalysis *AA;
1230 DominatorTree *DT;
1231 };
1232
1233 } // end namespace llvm
1234
1235 /// Optimize the uses in a given block This is basically the SSA renaming
1236 /// algorithm, with one caveat: We are able to use a single stack for all
1237 /// MemoryUses. This is because the set of *possible* reaching MemoryDefs is
1238 /// the same for every MemoryUse. The *actual* clobbering MemoryDef is just
1239 /// going to be some position in that stack of possible ones.
1240 ///
1241 /// We track the stack positions that each MemoryLocation needs
1242 /// to check, and last ended at. This is because we only want to check the
1243 /// things that changed since last time. The same MemoryLocation should
1244 /// get clobbered by the same store (getModRefInfo does not use invariantness or
1245 /// things like this, and if they start, we can modify MemoryLocOrCall to
1246 /// include relevant data)
optimizeUsesInBlock(const BasicBlock * BB,unsigned long & StackEpoch,unsigned long & PopEpoch,SmallVectorImpl<MemoryAccess * > & VersionStack,DenseMap<MemoryLocOrCall,MemlocStackInfo> & LocStackInfo)1247 void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1248 const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1249 SmallVectorImpl<MemoryAccess *> &VersionStack,
1250 DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) {
1251
1252 /// If no accesses, nothing to do.
1253 MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1254 if (Accesses == nullptr)
1255 return;
1256
1257 // Pop everything that doesn't dominate the current block off the stack,
1258 // increment the PopEpoch to account for this.
1259 while (true) {
1260 assert(
1261 !VersionStack.empty() &&
1262 "Version stack should have liveOnEntry sentinel dominating everything");
1263 BasicBlock *BackBlock = VersionStack.back()->getBlock();
1264 if (DT->dominates(BackBlock, BB))
1265 break;
1266 while (VersionStack.back()->getBlock() == BackBlock)
1267 VersionStack.pop_back();
1268 ++PopEpoch;
1269 }
1270
1271 for (MemoryAccess &MA : *Accesses) {
1272 auto *MU = dyn_cast<MemoryUse>(&MA);
1273 if (!MU) {
1274 VersionStack.push_back(&MA);
1275 ++StackEpoch;
1276 continue;
1277 }
1278
1279 if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
1280 MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None);
1281 continue;
1282 }
1283
1284 MemoryLocOrCall UseMLOC(MU);
1285 auto &LocInfo = LocStackInfo[UseMLOC];
1286 // If the pop epoch changed, it means we've removed stuff from top of
1287 // stack due to changing blocks. We may have to reset the lower bound or
1288 // last kill info.
1289 if (LocInfo.PopEpoch != PopEpoch) {
1290 LocInfo.PopEpoch = PopEpoch;
1291 LocInfo.StackEpoch = StackEpoch;
1292 // If the lower bound was in something that no longer dominates us, we
1293 // have to reset it.
1294 // We can't simply track stack size, because the stack may have had
1295 // pushes/pops in the meantime.
1296 // XXX: This is non-optimal, but only is slower cases with heavily
1297 // branching dominator trees. To get the optimal number of queries would
1298 // be to make lowerbound and lastkill a per-loc stack, and pop it until
1299 // the top of that stack dominates us. This does not seem worth it ATM.
1300 // A much cheaper optimization would be to always explore the deepest
1301 // branch of the dominator tree first. This will guarantee this resets on
1302 // the smallest set of blocks.
1303 if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1304 !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1305 // Reset the lower bound of things to check.
1306 // TODO: Some day we should be able to reset to last kill, rather than
1307 // 0.
1308 LocInfo.LowerBound = 0;
1309 LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1310 LocInfo.LastKillValid = false;
1311 }
1312 } else if (LocInfo.StackEpoch != StackEpoch) {
1313 // If all that has changed is the StackEpoch, we only have to check the
1314 // new things on the stack, because we've checked everything before. In
1315 // this case, the lower bound of things to check remains the same.
1316 LocInfo.PopEpoch = PopEpoch;
1317 LocInfo.StackEpoch = StackEpoch;
1318 }
1319 if (!LocInfo.LastKillValid) {
1320 LocInfo.LastKill = VersionStack.size() - 1;
1321 LocInfo.LastKillValid = true;
1322 LocInfo.AR = MayAlias;
1323 }
1324
1325 // At this point, we should have corrected last kill and LowerBound to be
1326 // in bounds.
1327 assert(LocInfo.LowerBound < VersionStack.size() &&
1328 "Lower bound out of range");
1329 assert(LocInfo.LastKill < VersionStack.size() &&
1330 "Last kill info out of range");
1331 // In any case, the new upper bound is the top of the stack.
1332 unsigned long UpperBound = VersionStack.size() - 1;
1333
1334 if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
1335 LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
1336 << *(MU->getMemoryInst()) << ")"
1337 << " because there are "
1338 << UpperBound - LocInfo.LowerBound
1339 << " stores to disambiguate\n");
1340 // Because we did not walk, LastKill is no longer valid, as this may
1341 // have been a kill.
1342 LocInfo.LastKillValid = false;
1343 continue;
1344 }
1345 bool FoundClobberResult = false;
1346 while (UpperBound > LocInfo.LowerBound) {
1347 if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1348 // For phis, use the walker, see where we ended up, go there
1349 Instruction *UseInst = MU->getMemoryInst();
1350 MemoryAccess *Result = Walker->getClobberingMemoryAccess(UseInst);
1351 // We are guaranteed to find it or something is wrong
1352 while (VersionStack[UpperBound] != Result) {
1353 assert(UpperBound != 0);
1354 --UpperBound;
1355 }
1356 FoundClobberResult = true;
1357 break;
1358 }
1359
1360 MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1361 // If the lifetime of the pointer ends at this instruction, it's live on
1362 // entry.
1363 if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) {
1364 // Reset UpperBound to liveOnEntryDef's place in the stack
1365 UpperBound = 0;
1366 FoundClobberResult = true;
1367 LocInfo.AR = MustAlias;
1368 break;
1369 }
1370 ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA);
1371 if (CA.IsClobber) {
1372 FoundClobberResult = true;
1373 LocInfo.AR = CA.AR;
1374 break;
1375 }
1376 --UpperBound;
1377 }
1378
1379 // Note: Phis always have AliasResult AR set to MayAlias ATM.
1380
1381 // At the end of this loop, UpperBound is either a clobber, or lower bound
1382 // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1383 if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1384 // We were last killed now by where we got to
1385 if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound]))
1386 LocInfo.AR = None;
1387 MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR);
1388 LocInfo.LastKill = UpperBound;
1389 } else {
1390 // Otherwise, we checked all the new ones, and now we know we can get to
1391 // LastKill.
1392 MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR);
1393 }
1394 LocInfo.LowerBound = VersionStack.size() - 1;
1395 LocInfo.LowerBoundBlock = BB;
1396 }
1397 }
1398
1399 /// Optimize uses to point to their actual clobbering definitions.
optimizeUses()1400 void MemorySSA::OptimizeUses::optimizeUses() {
1401 SmallVector<MemoryAccess *, 16> VersionStack;
1402 DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo;
1403 VersionStack.push_back(MSSA->getLiveOnEntryDef());
1404
1405 unsigned long StackEpoch = 1;
1406 unsigned long PopEpoch = 1;
1407 // We perform a non-recursive top-down dominator tree walk.
1408 for (const auto *DomNode : depth_first(DT->getRootNode()))
1409 optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1410 LocStackInfo);
1411 }
1412
placePHINodes(const SmallPtrSetImpl<BasicBlock * > & DefiningBlocks)1413 void MemorySSA::placePHINodes(
1414 const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) {
1415 // Determine where our MemoryPhi's should go
1416 ForwardIDFCalculator IDFs(*DT);
1417 IDFs.setDefiningBlocks(DefiningBlocks);
1418 SmallVector<BasicBlock *, 32> IDFBlocks;
1419 IDFs.calculate(IDFBlocks);
1420
1421 // Now place MemoryPhi nodes.
1422 for (auto &BB : IDFBlocks)
1423 createMemoryPhi(BB);
1424 }
1425
buildMemorySSA()1426 void MemorySSA::buildMemorySSA() {
1427 // We create an access to represent "live on entry", for things like
1428 // arguments or users of globals, where the memory they use is defined before
1429 // the beginning of the function. We do not actually insert it into the IR.
1430 // We do not define a live on exit for the immediate uses, and thus our
1431 // semantics do *not* imply that something with no immediate uses can simply
1432 // be removed.
1433 BasicBlock &StartingPoint = F.getEntryBlock();
1434 LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr,
1435 &StartingPoint, NextID++));
1436
1437 // We maintain lists of memory accesses per-block, trading memory for time. We
1438 // could just look up the memory access for every possible instruction in the
1439 // stream.
1440 SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1441 // Go through each block, figure out where defs occur, and chain together all
1442 // the accesses.
1443 for (BasicBlock &B : F) {
1444 bool InsertIntoDef = false;
1445 AccessList *Accesses = nullptr;
1446 DefsList *Defs = nullptr;
1447 for (Instruction &I : B) {
1448 MemoryUseOrDef *MUD = createNewAccess(&I);
1449 if (!MUD)
1450 continue;
1451
1452 if (!Accesses)
1453 Accesses = getOrCreateAccessList(&B);
1454 Accesses->push_back(MUD);
1455 if (isa<MemoryDef>(MUD)) {
1456 InsertIntoDef = true;
1457 if (!Defs)
1458 Defs = getOrCreateDefsList(&B);
1459 Defs->push_back(*MUD);
1460 }
1461 }
1462 if (InsertIntoDef)
1463 DefiningBlocks.insert(&B);
1464 }
1465 placePHINodes(DefiningBlocks);
1466
1467 // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1468 // filled in with all blocks.
1469 SmallPtrSet<BasicBlock *, 16> Visited;
1470 renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1471
1472 CachingWalker *Walker = getWalkerImpl();
1473
1474 OptimizeUses(this, Walker, AA, DT).optimizeUses();
1475
1476 // Mark the uses in unreachable blocks as live on entry, so that they go
1477 // somewhere.
1478 for (auto &BB : F)
1479 if (!Visited.count(&BB))
1480 markUnreachableAsLiveOnEntry(&BB);
1481 }
1482
getWalker()1483 MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1484
getWalkerImpl()1485 MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() {
1486 if (Walker)
1487 return Walker.get();
1488
1489 if (!WalkerBase)
1490 WalkerBase = llvm::make_unique<ClobberWalkerBase>(this, AA, DT);
1491
1492 Walker = llvm::make_unique<CachingWalker>(this, WalkerBase.get());
1493 return Walker.get();
1494 }
1495
getSkipSelfWalker()1496 MemorySSAWalker *MemorySSA::getSkipSelfWalker() {
1497 if (SkipWalker)
1498 return SkipWalker.get();
1499
1500 if (!WalkerBase)
1501 WalkerBase = llvm::make_unique<ClobberWalkerBase>(this, AA, DT);
1502
1503 SkipWalker = llvm::make_unique<SkipSelfWalker>(this, WalkerBase.get());
1504 return SkipWalker.get();
1505 }
1506
1507
1508 // This is a helper function used by the creation routines. It places NewAccess
1509 // into the access and defs lists for a given basic block, at the given
1510 // insertion point.
insertIntoListsForBlock(MemoryAccess * NewAccess,const BasicBlock * BB,InsertionPlace Point)1511 void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess,
1512 const BasicBlock *BB,
1513 InsertionPlace Point) {
1514 auto *Accesses = getOrCreateAccessList(BB);
1515 if (Point == Beginning) {
1516 // If it's a phi node, it goes first, otherwise, it goes after any phi
1517 // nodes.
1518 if (isa<MemoryPhi>(NewAccess)) {
1519 Accesses->push_front(NewAccess);
1520 auto *Defs = getOrCreateDefsList(BB);
1521 Defs->push_front(*NewAccess);
1522 } else {
1523 auto AI = find_if_not(
1524 *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1525 Accesses->insert(AI, NewAccess);
1526 if (!isa<MemoryUse>(NewAccess)) {
1527 auto *Defs = getOrCreateDefsList(BB);
1528 auto DI = find_if_not(
1529 *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1530 Defs->insert(DI, *NewAccess);
1531 }
1532 }
1533 } else {
1534 Accesses->push_back(NewAccess);
1535 if (!isa<MemoryUse>(NewAccess)) {
1536 auto *Defs = getOrCreateDefsList(BB);
1537 Defs->push_back(*NewAccess);
1538 }
1539 }
1540 BlockNumberingValid.erase(BB);
1541 }
1542
insertIntoListsBefore(MemoryAccess * What,const BasicBlock * BB,AccessList::iterator InsertPt)1543 void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB,
1544 AccessList::iterator InsertPt) {
1545 auto *Accesses = getWritableBlockAccesses(BB);
1546 bool WasEnd = InsertPt == Accesses->end();
1547 Accesses->insert(AccessList::iterator(InsertPt), What);
1548 if (!isa<MemoryUse>(What)) {
1549 auto *Defs = getOrCreateDefsList(BB);
1550 // If we got asked to insert at the end, we have an easy job, just shove it
1551 // at the end. If we got asked to insert before an existing def, we also get
1552 // an iterator. If we got asked to insert before a use, we have to hunt for
1553 // the next def.
1554 if (WasEnd) {
1555 Defs->push_back(*What);
1556 } else if (isa<MemoryDef>(InsertPt)) {
1557 Defs->insert(InsertPt->getDefsIterator(), *What);
1558 } else {
1559 while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1560 ++InsertPt;
1561 // Either we found a def, or we are inserting at the end
1562 if (InsertPt == Accesses->end())
1563 Defs->push_back(*What);
1564 else
1565 Defs->insert(InsertPt->getDefsIterator(), *What);
1566 }
1567 }
1568 BlockNumberingValid.erase(BB);
1569 }
1570
prepareForMoveTo(MemoryAccess * What,BasicBlock * BB)1571 void MemorySSA::prepareForMoveTo(MemoryAccess *What, BasicBlock *BB) {
1572 // Keep it in the lookup tables, remove from the lists
1573 removeFromLists(What, false);
1574
1575 // Note that moving should implicitly invalidate the optimized state of a
1576 // MemoryUse (and Phis can't be optimized). However, it doesn't do so for a
1577 // MemoryDef.
1578 if (auto *MD = dyn_cast<MemoryDef>(What))
1579 MD->resetOptimized();
1580 What->setBlock(BB);
1581 }
1582
1583 // Move What before Where in the IR. The end result is that What will belong to
1584 // the right lists and have the right Block set, but will not otherwise be
1585 // correct. It will not have the right defining access, and if it is a def,
1586 // things below it will not properly be updated.
moveTo(MemoryUseOrDef * What,BasicBlock * BB,AccessList::iterator Where)1587 void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1588 AccessList::iterator Where) {
1589 prepareForMoveTo(What, BB);
1590 insertIntoListsBefore(What, BB, Where);
1591 }
1592
moveTo(MemoryAccess * What,BasicBlock * BB,InsertionPlace Point)1593 void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB,
1594 InsertionPlace Point) {
1595 if (isa<MemoryPhi>(What)) {
1596 assert(Point == Beginning &&
1597 "Can only move a Phi at the beginning of the block");
1598 // Update lookup table entry
1599 ValueToMemoryAccess.erase(What->getBlock());
1600 bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;
1601 (void)Inserted;
1602 assert(Inserted && "Cannot move a Phi to a block that already has one");
1603 }
1604
1605 prepareForMoveTo(What, BB);
1606 insertIntoListsForBlock(What, BB, Point);
1607 }
1608
createMemoryPhi(BasicBlock * BB)1609 MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1610 assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
1611 MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
1612 // Phi's always are placed at the front of the block.
1613 insertIntoListsForBlock(Phi, BB, Beginning);
1614 ValueToMemoryAccess[BB] = Phi;
1615 return Phi;
1616 }
1617
createDefinedAccess(Instruction * I,MemoryAccess * Definition,const MemoryUseOrDef * Template)1618 MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
1619 MemoryAccess *Definition,
1620 const MemoryUseOrDef *Template) {
1621 assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
1622 MemoryUseOrDef *NewAccess = createNewAccess(I, Template);
1623 assert(
1624 NewAccess != nullptr &&
1625 "Tried to create a memory access for a non-memory touching instruction");
1626 NewAccess->setDefiningAccess(Definition);
1627 return NewAccess;
1628 }
1629
1630 // Return true if the instruction has ordering constraints.
1631 // Note specifically that this only considers stores and loads
1632 // because others are still considered ModRef by getModRefInfo.
isOrdered(const Instruction * I)1633 static inline bool isOrdered(const Instruction *I) {
1634 if (auto *SI = dyn_cast<StoreInst>(I)) {
1635 if (!SI->isUnordered())
1636 return true;
1637 } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1638 if (!LI->isUnordered())
1639 return true;
1640 }
1641 return false;
1642 }
1643
1644 /// Helper function to create new memory accesses
createNewAccess(Instruction * I,const MemoryUseOrDef * Template)1645 MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I,
1646 const MemoryUseOrDef *Template) {
1647 // The assume intrinsic has a control dependency which we model by claiming
1648 // that it writes arbitrarily. Ignore that fake memory dependency here.
1649 // FIXME: Replace this special casing with a more accurate modelling of
1650 // assume's control dependency.
1651 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1652 if (II->getIntrinsicID() == Intrinsic::assume)
1653 return nullptr;
1654
1655 bool Def, Use;
1656 if (Template) {
1657 Def = dyn_cast_or_null<MemoryDef>(Template) != nullptr;
1658 Use = dyn_cast_or_null<MemoryUse>(Template) != nullptr;
1659 #if !defined(NDEBUG)
1660 ModRefInfo ModRef = AA->getModRefInfo(I, None);
1661 bool DefCheck, UseCheck;
1662 DefCheck = isModSet(ModRef) || isOrdered(I);
1663 UseCheck = isRefSet(ModRef);
1664 assert(Def == DefCheck && (Def || Use == UseCheck) && "Invalid template");
1665 #endif
1666 } else {
1667 // Find out what affect this instruction has on memory.
1668 ModRefInfo ModRef = AA->getModRefInfo(I, None);
1669 // The isOrdered check is used to ensure that volatiles end up as defs
1670 // (atomics end up as ModRef right now anyway). Until we separate the
1671 // ordering chain from the memory chain, this enables people to see at least
1672 // some relative ordering to volatiles. Note that getClobberingMemoryAccess
1673 // will still give an answer that bypasses other volatile loads. TODO:
1674 // Separate memory aliasing and ordering into two different chains so that
1675 // we can precisely represent both "what memory will this read/write/is
1676 // clobbered by" and "what instructions can I move this past".
1677 Def = isModSet(ModRef) || isOrdered(I);
1678 Use = isRefSet(ModRef);
1679 }
1680
1681 // It's possible for an instruction to not modify memory at all. During
1682 // construction, we ignore them.
1683 if (!Def && !Use)
1684 return nullptr;
1685
1686 MemoryUseOrDef *MUD;
1687 if (Def)
1688 MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
1689 else
1690 MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
1691 ValueToMemoryAccess[I] = MUD;
1692 return MUD;
1693 }
1694
1695 /// Returns true if \p Replacer dominates \p Replacee .
dominatesUse(const MemoryAccess * Replacer,const MemoryAccess * Replacee) const1696 bool MemorySSA::dominatesUse(const MemoryAccess *Replacer,
1697 const MemoryAccess *Replacee) const {
1698 if (isa<MemoryUseOrDef>(Replacee))
1699 return DT->dominates(Replacer->getBlock(), Replacee->getBlock());
1700 const auto *MP = cast<MemoryPhi>(Replacee);
1701 // For a phi node, the use occurs in the predecessor block of the phi node.
1702 // Since we may occur multiple times in the phi node, we have to check each
1703 // operand to ensure Replacer dominates each operand where Replacee occurs.
1704 for (const Use &Arg : MP->operands()) {
1705 if (Arg.get() != Replacee &&
1706 !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg)))
1707 return false;
1708 }
1709 return true;
1710 }
1711
1712 /// Properly remove \p MA from all of MemorySSA's lookup tables.
removeFromLookups(MemoryAccess * MA)1713 void MemorySSA::removeFromLookups(MemoryAccess *MA) {
1714 assert(MA->use_empty() &&
1715 "Trying to remove memory access that still has uses");
1716 BlockNumbering.erase(MA);
1717 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1718 MUD->setDefiningAccess(nullptr);
1719 // Invalidate our walker's cache if necessary
1720 if (!isa<MemoryUse>(MA))
1721 Walker->invalidateInfo(MA);
1722
1723 Value *MemoryInst;
1724 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1725 MemoryInst = MUD->getMemoryInst();
1726 else
1727 MemoryInst = MA->getBlock();
1728
1729 auto VMA = ValueToMemoryAccess.find(MemoryInst);
1730 if (VMA->second == MA)
1731 ValueToMemoryAccess.erase(VMA);
1732 }
1733
1734 /// Properly remove \p MA from all of MemorySSA's lists.
1735 ///
1736 /// Because of the way the intrusive list and use lists work, it is important to
1737 /// do removal in the right order.
1738 /// ShouldDelete defaults to true, and will cause the memory access to also be
1739 /// deleted, not just removed.
removeFromLists(MemoryAccess * MA,bool ShouldDelete)1740 void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
1741 BasicBlock *BB = MA->getBlock();
1742 // The access list owns the reference, so we erase it from the non-owning list
1743 // first.
1744 if (!isa<MemoryUse>(MA)) {
1745 auto DefsIt = PerBlockDefs.find(BB);
1746 std::unique_ptr<DefsList> &Defs = DefsIt->second;
1747 Defs->remove(*MA);
1748 if (Defs->empty())
1749 PerBlockDefs.erase(DefsIt);
1750 }
1751
1752 // The erase call here will delete it. If we don't want it deleted, we call
1753 // remove instead.
1754 auto AccessIt = PerBlockAccesses.find(BB);
1755 std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1756 if (ShouldDelete)
1757 Accesses->erase(MA);
1758 else
1759 Accesses->remove(MA);
1760
1761 if (Accesses->empty()) {
1762 PerBlockAccesses.erase(AccessIt);
1763 BlockNumberingValid.erase(BB);
1764 }
1765 }
1766
print(raw_ostream & OS) const1767 void MemorySSA::print(raw_ostream &OS) const {
1768 MemorySSAAnnotatedWriter Writer(this);
1769 F.print(OS, &Writer);
1770 }
1771
1772 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const1773 LLVM_DUMP_METHOD void MemorySSA::dump() const { print(dbgs()); }
1774 #endif
1775
verifyMemorySSA() const1776 void MemorySSA::verifyMemorySSA() const {
1777 verifyDefUses(F);
1778 verifyDomination(F);
1779 verifyOrdering(F);
1780 verifyDominationNumbers(F);
1781 Walker->verify(this);
1782 verifyClobberSanity(F);
1783 }
1784
1785 /// Check sanity of the clobbering instruction for access MA.
checkClobberSanityAccess(const MemoryAccess * MA) const1786 void MemorySSA::checkClobberSanityAccess(const MemoryAccess *MA) const {
1787 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA)) {
1788 if (!MUD->isOptimized())
1789 return;
1790 auto *I = MUD->getMemoryInst();
1791 auto Loc = MemoryLocation::getOrNone(I);
1792 if (Loc == None)
1793 return;
1794 auto *Clobber = MUD->getOptimized();
1795 UpwardsMemoryQuery Q(I, MUD);
1796 checkClobberSanity(MUD, Clobber, *Loc, *this, Q, *AA, true);
1797 }
1798 }
1799
verifyClobberSanity(const Function & F) const1800 void MemorySSA::verifyClobberSanity(const Function &F) const {
1801 #if !defined(NDEBUG) && defined(EXPENSIVE_CHECKS)
1802 for (const BasicBlock &BB : F) {
1803 const AccessList *Accesses = getBlockAccesses(&BB);
1804 if (!Accesses)
1805 continue;
1806 for (const MemoryAccess &MA : *Accesses)
1807 checkClobberSanityAccess(&MA);
1808 }
1809 #endif
1810 }
1811
1812 /// Verify that all of the blocks we believe to have valid domination numbers
1813 /// actually have valid domination numbers.
verifyDominationNumbers(const Function & F) const1814 void MemorySSA::verifyDominationNumbers(const Function &F) const {
1815 #ifndef NDEBUG
1816 if (BlockNumberingValid.empty())
1817 return;
1818
1819 SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid;
1820 for (const BasicBlock &BB : F) {
1821 if (!ValidBlocks.count(&BB))
1822 continue;
1823
1824 ValidBlocks.erase(&BB);
1825
1826 const AccessList *Accesses = getBlockAccesses(&BB);
1827 // It's correct to say an empty block has valid numbering.
1828 if (!Accesses)
1829 continue;
1830
1831 // Block numbering starts at 1.
1832 unsigned long LastNumber = 0;
1833 for (const MemoryAccess &MA : *Accesses) {
1834 auto ThisNumberIter = BlockNumbering.find(&MA);
1835 assert(ThisNumberIter != BlockNumbering.end() &&
1836 "MemoryAccess has no domination number in a valid block!");
1837
1838 unsigned long ThisNumber = ThisNumberIter->second;
1839 assert(ThisNumber > LastNumber &&
1840 "Domination numbers should be strictly increasing!");
1841 LastNumber = ThisNumber;
1842 }
1843 }
1844
1845 assert(ValidBlocks.empty() &&
1846 "All valid BasicBlocks should exist in F -- dangling pointers?");
1847 #endif
1848 }
1849
1850 /// Verify that the order and existence of MemoryAccesses matches the
1851 /// order and existence of memory affecting instructions.
verifyOrdering(Function & F) const1852 void MemorySSA::verifyOrdering(Function &F) const {
1853 #ifndef NDEBUG
1854 // Walk all the blocks, comparing what the lookups think and what the access
1855 // lists think, as well as the order in the blocks vs the order in the access
1856 // lists.
1857 SmallVector<MemoryAccess *, 32> ActualAccesses;
1858 SmallVector<MemoryAccess *, 32> ActualDefs;
1859 for (BasicBlock &B : F) {
1860 const AccessList *AL = getBlockAccesses(&B);
1861 const auto *DL = getBlockDefs(&B);
1862 MemoryAccess *Phi = getMemoryAccess(&B);
1863 if (Phi) {
1864 ActualAccesses.push_back(Phi);
1865 ActualDefs.push_back(Phi);
1866 }
1867
1868 for (Instruction &I : B) {
1869 MemoryAccess *MA = getMemoryAccess(&I);
1870 assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
1871 "We have memory affecting instructions "
1872 "in this block but they are not in the "
1873 "access list or defs list");
1874 if (MA) {
1875 ActualAccesses.push_back(MA);
1876 if (isa<MemoryDef>(MA))
1877 ActualDefs.push_back(MA);
1878 }
1879 }
1880 // Either we hit the assert, really have no accesses, or we have both
1881 // accesses and an access list.
1882 // Same with defs.
1883 if (!AL && !DL)
1884 continue;
1885 assert(AL->size() == ActualAccesses.size() &&
1886 "We don't have the same number of accesses in the block as on the "
1887 "access list");
1888 assert((DL || ActualDefs.size() == 0) &&
1889 "Either we should have a defs list, or we should have no defs");
1890 assert((!DL || DL->size() == ActualDefs.size()) &&
1891 "We don't have the same number of defs in the block as on the "
1892 "def list");
1893 auto ALI = AL->begin();
1894 auto AAI = ActualAccesses.begin();
1895 while (ALI != AL->end() && AAI != ActualAccesses.end()) {
1896 assert(&*ALI == *AAI && "Not the same accesses in the same order");
1897 ++ALI;
1898 ++AAI;
1899 }
1900 ActualAccesses.clear();
1901 if (DL) {
1902 auto DLI = DL->begin();
1903 auto ADI = ActualDefs.begin();
1904 while (DLI != DL->end() && ADI != ActualDefs.end()) {
1905 assert(&*DLI == *ADI && "Not the same defs in the same order");
1906 ++DLI;
1907 ++ADI;
1908 }
1909 }
1910 ActualDefs.clear();
1911 }
1912 #endif
1913 }
1914
1915 /// Verify the domination properties of MemorySSA by checking that each
1916 /// definition dominates all of its uses.
verifyDomination(Function & F) const1917 void MemorySSA::verifyDomination(Function &F) const {
1918 #ifndef NDEBUG
1919 for (BasicBlock &B : F) {
1920 // Phi nodes are attached to basic blocks
1921 if (MemoryPhi *MP = getMemoryAccess(&B))
1922 for (const Use &U : MP->uses())
1923 assert(dominates(MP, U) && "Memory PHI does not dominate it's uses");
1924
1925 for (Instruction &I : B) {
1926 MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I));
1927 if (!MD)
1928 continue;
1929
1930 for (const Use &U : MD->uses())
1931 assert(dominates(MD, U) && "Memory Def does not dominate it's uses");
1932 }
1933 }
1934 #endif
1935 }
1936
1937 /// Verify the def-use lists in MemorySSA, by verifying that \p Use
1938 /// appears in the use list of \p Def.
verifyUseInDefs(MemoryAccess * Def,MemoryAccess * Use) const1939 void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
1940 #ifndef NDEBUG
1941 // The live on entry use may cause us to get a NULL def here
1942 if (!Def)
1943 assert(isLiveOnEntryDef(Use) &&
1944 "Null def but use not point to live on entry def");
1945 else
1946 assert(is_contained(Def->users(), Use) &&
1947 "Did not find use in def's use list");
1948 #endif
1949 }
1950
1951 /// Verify the immediate use information, by walking all the memory
1952 /// accesses and verifying that, for each use, it appears in the
1953 /// appropriate def's use list
verifyDefUses(Function & F) const1954 void MemorySSA::verifyDefUses(Function &F) const {
1955 #ifndef NDEBUG
1956 for (BasicBlock &B : F) {
1957 // Phi nodes are attached to basic blocks
1958 if (MemoryPhi *Phi = getMemoryAccess(&B)) {
1959 assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(
1960 pred_begin(&B), pred_end(&B))) &&
1961 "Incomplete MemoryPhi Node");
1962 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1963 verifyUseInDefs(Phi->getIncomingValue(I), Phi);
1964 assert(find(predecessors(&B), Phi->getIncomingBlock(I)) !=
1965 pred_end(&B) &&
1966 "Incoming phi block not a block predecessor");
1967 }
1968 }
1969
1970 for (Instruction &I : B) {
1971 if (MemoryUseOrDef *MA = getMemoryAccess(&I)) {
1972 verifyUseInDefs(MA->getDefiningAccess(), MA);
1973 }
1974 }
1975 }
1976 #endif
1977 }
1978
1979 /// Perform a local numbering on blocks so that instruction ordering can be
1980 /// determined in constant time.
1981 /// TODO: We currently just number in order. If we numbered by N, we could
1982 /// allow at least N-1 sequences of insertBefore or insertAfter (and at least
1983 /// log2(N) sequences of mixed before and after) without needing to invalidate
1984 /// the numbering.
renumberBlock(const BasicBlock * B) const1985 void MemorySSA::renumberBlock(const BasicBlock *B) const {
1986 // The pre-increment ensures the numbers really start at 1.
1987 unsigned long CurrentNumber = 0;
1988 const AccessList *AL = getBlockAccesses(B);
1989 assert(AL != nullptr && "Asking to renumber an empty block");
1990 for (const auto &I : *AL)
1991 BlockNumbering[&I] = ++CurrentNumber;
1992 BlockNumberingValid.insert(B);
1993 }
1994
1995 /// Determine, for two memory accesses in the same block,
1996 /// whether \p Dominator dominates \p Dominatee.
1997 /// \returns True if \p Dominator dominates \p Dominatee.
locallyDominates(const MemoryAccess * Dominator,const MemoryAccess * Dominatee) const1998 bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
1999 const MemoryAccess *Dominatee) const {
2000 const BasicBlock *DominatorBlock = Dominator->getBlock();
2001
2002 assert((DominatorBlock == Dominatee->getBlock()) &&
2003 "Asking for local domination when accesses are in different blocks!");
2004 // A node dominates itself.
2005 if (Dominatee == Dominator)
2006 return true;
2007
2008 // When Dominatee is defined on function entry, it is not dominated by another
2009 // memory access.
2010 if (isLiveOnEntryDef(Dominatee))
2011 return false;
2012
2013 // When Dominator is defined on function entry, it dominates the other memory
2014 // access.
2015 if (isLiveOnEntryDef(Dominator))
2016 return true;
2017
2018 if (!BlockNumberingValid.count(DominatorBlock))
2019 renumberBlock(DominatorBlock);
2020
2021 unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
2022 // All numbers start with 1
2023 assert(DominatorNum != 0 && "Block was not numbered properly");
2024 unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
2025 assert(DominateeNum != 0 && "Block was not numbered properly");
2026 return DominatorNum < DominateeNum;
2027 }
2028
dominates(const MemoryAccess * Dominator,const MemoryAccess * Dominatee) const2029 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2030 const MemoryAccess *Dominatee) const {
2031 if (Dominator == Dominatee)
2032 return true;
2033
2034 if (isLiveOnEntryDef(Dominatee))
2035 return false;
2036
2037 if (Dominator->getBlock() != Dominatee->getBlock())
2038 return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
2039 return locallyDominates(Dominator, Dominatee);
2040 }
2041
dominates(const MemoryAccess * Dominator,const Use & Dominatee) const2042 bool MemorySSA::dominates(const MemoryAccess *Dominator,
2043 const Use &Dominatee) const {
2044 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
2045 BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
2046 // The def must dominate the incoming block of the phi.
2047 if (UseBB != Dominator->getBlock())
2048 return DT->dominates(Dominator->getBlock(), UseBB);
2049 // If the UseBB and the DefBB are the same, compare locally.
2050 return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
2051 }
2052 // If it's not a PHI node use, the normal dominates can already handle it.
2053 return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
2054 }
2055
2056 const static char LiveOnEntryStr[] = "liveOnEntry";
2057
print(raw_ostream & OS) const2058 void MemoryAccess::print(raw_ostream &OS) const {
2059 switch (getValueID()) {
2060 case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
2061 case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
2062 case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
2063 }
2064 llvm_unreachable("invalid value id");
2065 }
2066
print(raw_ostream & OS) const2067 void MemoryDef::print(raw_ostream &OS) const {
2068 MemoryAccess *UO = getDefiningAccess();
2069
2070 auto printID = [&OS](MemoryAccess *A) {
2071 if (A && A->getID())
2072 OS << A->getID();
2073 else
2074 OS << LiveOnEntryStr;
2075 };
2076
2077 OS << getID() << " = MemoryDef(";
2078 printID(UO);
2079 OS << ")";
2080
2081 if (isOptimized()) {
2082 OS << "->";
2083 printID(getOptimized());
2084
2085 if (Optional<AliasResult> AR = getOptimizedAccessType())
2086 OS << " " << *AR;
2087 }
2088 }
2089
print(raw_ostream & OS) const2090 void MemoryPhi::print(raw_ostream &OS) const {
2091 bool First = true;
2092 OS << getID() << " = MemoryPhi(";
2093 for (const auto &Op : operands()) {
2094 BasicBlock *BB = getIncomingBlock(Op);
2095 MemoryAccess *MA = cast<MemoryAccess>(Op);
2096 if (!First)
2097 OS << ',';
2098 else
2099 First = false;
2100
2101 OS << '{';
2102 if (BB->hasName())
2103 OS << BB->getName();
2104 else
2105 BB->printAsOperand(OS, false);
2106 OS << ',';
2107 if (unsigned ID = MA->getID())
2108 OS << ID;
2109 else
2110 OS << LiveOnEntryStr;
2111 OS << '}';
2112 }
2113 OS << ')';
2114 }
2115
print(raw_ostream & OS) const2116 void MemoryUse::print(raw_ostream &OS) const {
2117 MemoryAccess *UO = getDefiningAccess();
2118 OS << "MemoryUse(";
2119 if (UO && UO->getID())
2120 OS << UO->getID();
2121 else
2122 OS << LiveOnEntryStr;
2123 OS << ')';
2124
2125 if (Optional<AliasResult> AR = getOptimizedAccessType())
2126 OS << " " << *AR;
2127 }
2128
dump() const2129 void MemoryAccess::dump() const {
2130 // Cannot completely remove virtual function even in release mode.
2131 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
2132 print(dbgs());
2133 dbgs() << "\n";
2134 #endif
2135 }
2136
2137 char MemorySSAPrinterLegacyPass::ID = 0;
2138
MemorySSAPrinterLegacyPass()2139 MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) {
2140 initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry());
2141 }
2142
getAnalysisUsage(AnalysisUsage & AU) const2143 void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
2144 AU.setPreservesAll();
2145 AU.addRequired<MemorySSAWrapperPass>();
2146 }
2147
runOnFunction(Function & F)2148 bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
2149 auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2150 MSSA.print(dbgs());
2151 if (VerifyMemorySSA)
2152 MSSA.verifyMemorySSA();
2153 return false;
2154 }
2155
2156 AnalysisKey MemorySSAAnalysis::Key;
2157
run(Function & F,FunctionAnalysisManager & AM)2158 MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F,
2159 FunctionAnalysisManager &AM) {
2160 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
2161 auto &AA = AM.getResult<AAManager>(F);
2162 return MemorySSAAnalysis::Result(llvm::make_unique<MemorySSA>(F, &AA, &DT));
2163 }
2164
run(Function & F,FunctionAnalysisManager & AM)2165 PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
2166 FunctionAnalysisManager &AM) {
2167 OS << "MemorySSA for function: " << F.getName() << "\n";
2168 AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS);
2169
2170 return PreservedAnalyses::all();
2171 }
2172
run(Function & F,FunctionAnalysisManager & AM)2173 PreservedAnalyses MemorySSAVerifierPass::run(Function &F,
2174 FunctionAnalysisManager &AM) {
2175 AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
2176
2177 return PreservedAnalyses::all();
2178 }
2179
2180 char MemorySSAWrapperPass::ID = 0;
2181
MemorySSAWrapperPass()2182 MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) {
2183 initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
2184 }
2185
releaseMemory()2186 void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
2187
getAnalysisUsage(AnalysisUsage & AU) const2188 void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2189 AU.setPreservesAll();
2190 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2191 AU.addRequiredTransitive<AAResultsWrapperPass>();
2192 }
2193
runOnFunction(Function & F)2194 bool MemorySSAWrapperPass::runOnFunction(Function &F) {
2195 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2196 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2197 MSSA.reset(new MemorySSA(F, &AA, &DT));
2198 return false;
2199 }
2200
verifyAnalysis() const2201 void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); }
2202
print(raw_ostream & OS,const Module * M) const2203 void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
2204 MSSA->print(OS);
2205 }
2206
MemorySSAWalker(MemorySSA * M)2207 MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
2208
2209 /// Walk the use-def chains starting at \p StartingAccess and find
2210 /// the MemoryAccess that actually clobbers Loc.
2211 ///
2212 /// \returns our clobbering memory access
getClobberingMemoryAccessBase(MemoryAccess * StartingAccess,const MemoryLocation & Loc)2213 MemoryAccess *MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(
2214 MemoryAccess *StartingAccess, const MemoryLocation &Loc) {
2215 if (isa<MemoryPhi>(StartingAccess))
2216 return StartingAccess;
2217
2218 auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess);
2219 if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
2220 return StartingUseOrDef;
2221
2222 Instruction *I = StartingUseOrDef->getMemoryInst();
2223
2224 // Conservatively, fences are always clobbers, so don't perform the walk if we
2225 // hit a fence.
2226 if (!isa<CallBase>(I) && I->isFenceLike())
2227 return StartingUseOrDef;
2228
2229 UpwardsMemoryQuery Q;
2230 Q.OriginalAccess = StartingUseOrDef;
2231 Q.StartingLoc = Loc;
2232 Q.Inst = I;
2233 Q.IsCall = false;
2234
2235 // Unlike the other function, do not walk to the def of a def, because we are
2236 // handed something we already believe is the clobbering access.
2237 // We never set SkipSelf to true in Q in this method.
2238 MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
2239 ? StartingUseOrDef->getDefiningAccess()
2240 : StartingUseOrDef;
2241
2242 MemoryAccess *Clobber = Walker.findClobber(DefiningAccess, Q);
2243 LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2244 LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n");
2245 LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
2246 LLVM_DEBUG(dbgs() << *Clobber << "\n");
2247 return Clobber;
2248 }
2249
2250 MemoryAccess *
getClobberingMemoryAccessBase(MemoryAccess * MA,bool SkipSelf)2251 MemorySSA::ClobberWalkerBase::getClobberingMemoryAccessBase(MemoryAccess *MA,
2252 bool SkipSelf) {
2253 auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2254 // If this is a MemoryPhi, we can't do anything.
2255 if (!StartingAccess)
2256 return MA;
2257
2258 bool IsOptimized = false;
2259
2260 // If this is an already optimized use or def, return the optimized result.
2261 // Note: Currently, we store the optimized def result in a separate field,
2262 // since we can't use the defining access.
2263 if (StartingAccess->isOptimized()) {
2264 if (!SkipSelf || !isa<MemoryDef>(StartingAccess))
2265 return StartingAccess->getOptimized();
2266 IsOptimized = true;
2267 }
2268
2269 const Instruction *I = StartingAccess->getMemoryInst();
2270 // We can't sanely do anything with a fence, since they conservatively clobber
2271 // all memory, and have no locations to get pointers from to try to
2272 // disambiguate.
2273 if (!isa<CallBase>(I) && I->isFenceLike())
2274 return StartingAccess;
2275
2276 UpwardsMemoryQuery Q(I, StartingAccess);
2277
2278 if (isUseTriviallyOptimizableToLiveOnEntry(*MSSA->AA, I)) {
2279 MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2280 StartingAccess->setOptimized(LiveOnEntry);
2281 StartingAccess->setOptimizedAccessType(None);
2282 return LiveOnEntry;
2283 }
2284
2285 MemoryAccess *OptimizedAccess;
2286 if (!IsOptimized) {
2287 // Start with the thing we already think clobbers this location
2288 MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2289
2290 // At this point, DefiningAccess may be the live on entry def.
2291 // If it is, we will not get a better result.
2292 if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
2293 StartingAccess->setOptimized(DefiningAccess);
2294 StartingAccess->setOptimizedAccessType(None);
2295 return DefiningAccess;
2296 }
2297
2298 OptimizedAccess = Walker.findClobber(DefiningAccess, Q);
2299 StartingAccess->setOptimized(OptimizedAccess);
2300 if (MSSA->isLiveOnEntryDef(OptimizedAccess))
2301 StartingAccess->setOptimizedAccessType(None);
2302 else if (Q.AR == MustAlias)
2303 StartingAccess->setOptimizedAccessType(MustAlias);
2304 } else
2305 OptimizedAccess = StartingAccess->getOptimized();
2306
2307 LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2308 LLVM_DEBUG(dbgs() << *StartingAccess << "\n");
2309 LLVM_DEBUG(dbgs() << "Optimized Memory SSA clobber for " << *I << " is ");
2310 LLVM_DEBUG(dbgs() << *OptimizedAccess << "\n");
2311
2312 MemoryAccess *Result;
2313 if (SkipSelf && isa<MemoryPhi>(OptimizedAccess) &&
2314 isa<MemoryDef>(StartingAccess)) {
2315 assert(isa<MemoryDef>(Q.OriginalAccess));
2316 Q.SkipSelfAccess = true;
2317 Result = Walker.findClobber(OptimizedAccess, Q);
2318 } else
2319 Result = OptimizedAccess;
2320
2321 LLVM_DEBUG(dbgs() << "Result Memory SSA clobber [SkipSelf = " << SkipSelf);
2322 LLVM_DEBUG(dbgs() << "] for " << *I << " is " << *Result << "\n");
2323
2324 return Result;
2325 }
2326
2327 MemoryAccess *
getClobberingMemoryAccess(MemoryAccess * MA)2328 MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2329 return Walker->getClobberingMemoryAccessBase(MA, false);
2330 }
2331
2332 MemoryAccess *
getClobberingMemoryAccess(MemoryAccess * MA,const MemoryLocation & Loc)2333 MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA,
2334 const MemoryLocation &Loc) {
2335 return Walker->getClobberingMemoryAccessBase(MA, Loc);
2336 }
2337
2338 MemoryAccess *
getClobberingMemoryAccess(MemoryAccess * MA)2339 MemorySSA::SkipSelfWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2340 return Walker->getClobberingMemoryAccessBase(MA, true);
2341 }
2342
2343 MemoryAccess *
getClobberingMemoryAccess(MemoryAccess * MA,const MemoryLocation & Loc)2344 MemorySSA::SkipSelfWalker::getClobberingMemoryAccess(MemoryAccess *MA,
2345 const MemoryLocation &Loc) {
2346 return Walker->getClobberingMemoryAccessBase(MA, Loc);
2347 }
2348
2349 MemoryAccess *
getClobberingMemoryAccess(MemoryAccess * MA)2350 DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2351 if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2352 return Use->getDefiningAccess();
2353 return MA;
2354 }
2355
getClobberingMemoryAccess(MemoryAccess * StartingAccess,const MemoryLocation &)2356 MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
2357 MemoryAccess *StartingAccess, const MemoryLocation &) {
2358 if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2359 return Use->getDefiningAccess();
2360 return StartingAccess;
2361 }
2362
deleteMe(DerivedUser * Self)2363 void MemoryPhi::deleteMe(DerivedUser *Self) {
2364 delete static_cast<MemoryPhi *>(Self);
2365 }
2366
deleteMe(DerivedUser * Self)2367 void MemoryDef::deleteMe(DerivedUser *Self) {
2368 delete static_cast<MemoryDef *>(Self);
2369 }
2370
deleteMe(DerivedUser * Self)2371 void MemoryUse::deleteMe(DerivedUser *Self) {
2372 delete static_cast<MemoryUse *>(Self);
2373 }
2374