1 //===-- MemorySSAUpdater.cpp - Memory SSA Updater--------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------===//
8 //
9 // This file implements the MemorySSAUpdater class.
10 //
11 //===----------------------------------------------------------------===//
12 #include "llvm/Analysis/MemorySSAUpdater.h"
13 #include "llvm/Analysis/LoopIterator.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/Analysis/IteratedDominanceFrontier.h"
18 #include "llvm/Analysis/MemorySSA.h"
19 #include "llvm/IR/BasicBlock.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/Dominators.h"
22 #include "llvm/IR/GlobalVariable.h"
23 #include "llvm/IR/IRBuilder.h"
24 #include "llvm/IR/LLVMContext.h"
25 #include "llvm/IR/Metadata.h"
26 #include "llvm/IR/Module.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/FormattedStream.h"
29 #include <algorithm>
30 
31 #define DEBUG_TYPE "memoryssa"
32 using namespace llvm;
33 
34 // This is the marker algorithm from "Simple and Efficient Construction of
35 // Static Single Assignment Form"
36 // The simple, non-marker algorithm places phi nodes at any join
37 // Here, we place markers, and only place phi nodes if they end up necessary.
38 // They are only necessary if they break a cycle (IE we recursively visit
39 // ourselves again), or we discover, while getting the value of the operands,
40 // that there are two or more definitions needing to be merged.
41 // This still will leave non-minimal form in the case of irreducible control
42 // flow, where phi nodes may be in cycles with themselves, but unnecessary.
43 MemoryAccess *MemorySSAUpdater::getPreviousDefRecursive(
44     BasicBlock *BB,
45     DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
46   // First, do a cache lookup. Without this cache, certain CFG structures
47   // (like a series of if statements) take exponential time to visit.
48   auto Cached = CachedPreviousDef.find(BB);
49   if (Cached != CachedPreviousDef.end())
50     return Cached->second;
51 
52   // If this method is called from an unreachable block, return LoE.
53   if (!MSSA->DT->isReachableFromEntry(BB))
54     return MSSA->getLiveOnEntryDef();
55 
56   if (BasicBlock *Pred = BB->getUniquePredecessor()) {
57     VisitedBlocks.insert(BB);
58     // Single predecessor case, just recurse, we can only have one definition.
59     MemoryAccess *Result = getPreviousDefFromEnd(Pred, CachedPreviousDef);
60     CachedPreviousDef.insert({BB, Result});
61     return Result;
62   }
63 
64   if (VisitedBlocks.count(BB)) {
65     // We hit our node again, meaning we had a cycle, we must insert a phi
66     // node to break it so we have an operand. The only case this will
67     // insert useless phis is if we have irreducible control flow.
68     MemoryAccess *Result = MSSA->createMemoryPhi(BB);
69     CachedPreviousDef.insert({BB, Result});
70     return Result;
71   }
72 
73   if (VisitedBlocks.insert(BB).second) {
74     // Mark us visited so we can detect a cycle
75     SmallVector<TrackingVH<MemoryAccess>, 8> PhiOps;
76 
77     // Recurse to get the values in our predecessors for placement of a
78     // potential phi node. This will insert phi nodes if we cycle in order to
79     // break the cycle and have an operand.
80     bool UniqueIncomingAccess = true;
81     MemoryAccess *SingleAccess = nullptr;
82     for (auto *Pred : predecessors(BB)) {
83       if (MSSA->DT->isReachableFromEntry(Pred)) {
84         auto *IncomingAccess = getPreviousDefFromEnd(Pred, CachedPreviousDef);
85         if (!SingleAccess)
86           SingleAccess = IncomingAccess;
87         else if (IncomingAccess != SingleAccess)
88           UniqueIncomingAccess = false;
89         PhiOps.push_back(IncomingAccess);
90       } else
91         PhiOps.push_back(MSSA->getLiveOnEntryDef());
92     }
93 
94     // Now try to simplify the ops to avoid placing a phi.
95     // This may return null if we never created a phi yet, that's okay
96     MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MSSA->getMemoryAccess(BB));
97 
98     // See if we can avoid the phi by simplifying it.
99     auto *Result = tryRemoveTrivialPhi(Phi, PhiOps);
100     // If we couldn't simplify, we may have to create a phi
101     if (Result == Phi && UniqueIncomingAccess && SingleAccess) {
102       // A concrete Phi only exists if we created an empty one to break a cycle.
103       if (Phi) {
104         assert(Phi->operands().empty() && "Expected empty Phi");
105         Phi->replaceAllUsesWith(SingleAccess);
106         removeMemoryAccess(Phi);
107       }
108       Result = SingleAccess;
109     } else if (Result == Phi && !(UniqueIncomingAccess && SingleAccess)) {
110       if (!Phi)
111         Phi = MSSA->createMemoryPhi(BB);
112 
113       // See if the existing phi operands match what we need.
114       // Unlike normal SSA, we only allow one phi node per block, so we can't just
115       // create a new one.
116       if (Phi->getNumOperands() != 0) {
117         // FIXME: Figure out whether this is dead code and if so remove it.
118         if (!std::equal(Phi->op_begin(), Phi->op_end(), PhiOps.begin())) {
119           // These will have been filled in by the recursive read we did above.
120           llvm::copy(PhiOps, Phi->op_begin());
121           std::copy(pred_begin(BB), pred_end(BB), Phi->block_begin());
122         }
123       } else {
124         unsigned i = 0;
125         for (auto *Pred : predecessors(BB))
126           Phi->addIncoming(&*PhiOps[i++], Pred);
127         InsertedPHIs.push_back(Phi);
128       }
129       Result = Phi;
130     }
131 
132     // Set ourselves up for the next variable by resetting visited state.
133     VisitedBlocks.erase(BB);
134     CachedPreviousDef.insert({BB, Result});
135     return Result;
136   }
137   llvm_unreachable("Should have hit one of the three cases above");
138 }
139 
140 // This starts at the memory access, and goes backwards in the block to find the
141 // previous definition. If a definition is not found the block of the access,
142 // it continues globally, creating phi nodes to ensure we have a single
143 // definition.
144 MemoryAccess *MemorySSAUpdater::getPreviousDef(MemoryAccess *MA) {
145   if (auto *LocalResult = getPreviousDefInBlock(MA))
146     return LocalResult;
147   DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef;
148   return getPreviousDefRecursive(MA->getBlock(), CachedPreviousDef);
149 }
150 
151 // This starts at the memory access, and goes backwards in the block to the find
152 // the previous definition. If the definition is not found in the block of the
153 // access, it returns nullptr.
154 MemoryAccess *MemorySSAUpdater::getPreviousDefInBlock(MemoryAccess *MA) {
155   auto *Defs = MSSA->getWritableBlockDefs(MA->getBlock());
156 
157   // It's possible there are no defs, or we got handed the first def to start.
158   if (Defs) {
159     // If this is a def, we can just use the def iterators.
160     if (!isa<MemoryUse>(MA)) {
161       auto Iter = MA->getReverseDefsIterator();
162       ++Iter;
163       if (Iter != Defs->rend())
164         return &*Iter;
165     } else {
166       // Otherwise, have to walk the all access iterator.
167       auto End = MSSA->getWritableBlockAccesses(MA->getBlock())->rend();
168       for (auto &U : make_range(++MA->getReverseIterator(), End))
169         if (!isa<MemoryUse>(U))
170           return cast<MemoryAccess>(&U);
171       // Note that if MA comes before Defs->begin(), we won't hit a def.
172       return nullptr;
173     }
174   }
175   return nullptr;
176 }
177 
178 // This starts at the end of block
179 MemoryAccess *MemorySSAUpdater::getPreviousDefFromEnd(
180     BasicBlock *BB,
181     DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
182   auto *Defs = MSSA->getWritableBlockDefs(BB);
183 
184   if (Defs) {
185     CachedPreviousDef.insert({BB, &*Defs->rbegin()});
186     return &*Defs->rbegin();
187   }
188 
189   return getPreviousDefRecursive(BB, CachedPreviousDef);
190 }
191 // Recurse over a set of phi uses to eliminate the trivial ones
192 MemoryAccess *MemorySSAUpdater::recursePhi(MemoryAccess *Phi) {
193   if (!Phi)
194     return nullptr;
195   TrackingVH<MemoryAccess> Res(Phi);
196   SmallVector<TrackingVH<Value>, 8> Uses;
197   std::copy(Phi->user_begin(), Phi->user_end(), std::back_inserter(Uses));
198   for (auto &U : Uses)
199     if (MemoryPhi *UsePhi = dyn_cast<MemoryPhi>(&*U))
200       tryRemoveTrivialPhi(UsePhi);
201   return Res;
202 }
203 
204 // Eliminate trivial phis
205 // Phis are trivial if they are defined either by themselves, or all the same
206 // argument.
207 // IE phi(a, a) or b = phi(a, b) or c = phi(a, a, c)
208 // We recursively try to remove them.
209 MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi) {
210   assert(Phi && "Can only remove concrete Phi.");
211   auto OperRange = Phi->operands();
212   return tryRemoveTrivialPhi(Phi, OperRange);
213 }
214 template <class RangeType>
215 MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi,
216                                                     RangeType &Operands) {
217   // Bail out on non-opt Phis.
218   if (NonOptPhis.count(Phi))
219     return Phi;
220 
221   // Detect equal or self arguments
222   MemoryAccess *Same = nullptr;
223   for (auto &Op : Operands) {
224     // If the same or self, good so far
225     if (Op == Phi || Op == Same)
226       continue;
227     // not the same, return the phi since it's not eliminatable by us
228     if (Same)
229       return Phi;
230     Same = cast<MemoryAccess>(&*Op);
231   }
232   // Never found a non-self reference, the phi is undef
233   if (Same == nullptr)
234     return MSSA->getLiveOnEntryDef();
235   if (Phi) {
236     Phi->replaceAllUsesWith(Same);
237     removeMemoryAccess(Phi);
238   }
239 
240   // We should only end up recursing in case we replaced something, in which
241   // case, we may have made other Phis trivial.
242   return recursePhi(Same);
243 }
244 
245 void MemorySSAUpdater::insertUse(MemoryUse *MU, bool RenameUses) {
246   InsertedPHIs.clear();
247   MU->setDefiningAccess(getPreviousDef(MU));
248 
249   // In cases without unreachable blocks, because uses do not create new
250   // may-defs, there are only two cases:
251   // 1. There was a def already below us, and therefore, we should not have
252   // created a phi node because it was already needed for the def.
253   //
254   // 2. There is no def below us, and therefore, there is no extra renaming work
255   // to do.
256 
257   // In cases with unreachable blocks, where the unnecessary Phis were
258   // optimized out, adding the Use may re-insert those Phis. Hence, when
259   // inserting Uses outside of the MSSA creation process, and new Phis were
260   // added, rename all uses if we are asked.
261 
262   if (!RenameUses && !InsertedPHIs.empty()) {
263     auto *Defs = MSSA->getBlockDefs(MU->getBlock());
264     (void)Defs;
265     assert((!Defs || (++Defs->begin() == Defs->end())) &&
266            "Block may have only a Phi or no defs");
267   }
268 
269   if (RenameUses && InsertedPHIs.size()) {
270     SmallPtrSet<BasicBlock *, 16> Visited;
271     BasicBlock *StartBlock = MU->getBlock();
272 
273     if (auto *Defs = MSSA->getWritableBlockDefs(StartBlock)) {
274       MemoryAccess *FirstDef = &*Defs->begin();
275       // Convert to incoming value if it's a memorydef. A phi *is* already an
276       // incoming value.
277       if (auto *MD = dyn_cast<MemoryDef>(FirstDef))
278         FirstDef = MD->getDefiningAccess();
279 
280       MSSA->renamePass(MU->getBlock(), FirstDef, Visited);
281     }
282     // We just inserted a phi into this block, so the incoming value will
283     // become the phi anyway, so it does not matter what we pass.
284     for (auto &MP : InsertedPHIs)
285       if (MemoryPhi *Phi = cast_or_null<MemoryPhi>(MP))
286         MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
287   }
288 }
289 
290 // Set every incoming edge {BB, MP->getBlock()} of MemoryPhi MP to NewDef.
291 static void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB,
292                                       MemoryAccess *NewDef) {
293   // Replace any operand with us an incoming block with the new defining
294   // access.
295   int i = MP->getBasicBlockIndex(BB);
296   assert(i != -1 && "Should have found the basic block in the phi");
297   // We can't just compare i against getNumOperands since one is signed and the
298   // other not. So use it to index into the block iterator.
299   for (auto BBIter = MP->block_begin() + i; BBIter != MP->block_end();
300        ++BBIter) {
301     if (*BBIter != BB)
302       break;
303     MP->setIncomingValue(i, NewDef);
304     ++i;
305   }
306 }
307 
308 // A brief description of the algorithm:
309 // First, we compute what should define the new def, using the SSA
310 // construction algorithm.
311 // Then, we update the defs below us (and any new phi nodes) in the graph to
312 // point to the correct new defs, to ensure we only have one variable, and no
313 // disconnected stores.
314 void MemorySSAUpdater::insertDef(MemoryDef *MD, bool RenameUses) {
315   InsertedPHIs.clear();
316 
317   // See if we had a local def, and if not, go hunting.
318   MemoryAccess *DefBefore = getPreviousDef(MD);
319   bool DefBeforeSameBlock = false;
320   if (DefBefore->getBlock() == MD->getBlock() &&
321       !(isa<MemoryPhi>(DefBefore) &&
322         llvm::is_contained(InsertedPHIs, DefBefore)))
323     DefBeforeSameBlock = true;
324 
325   // There is a def before us, which means we can replace any store/phi uses
326   // of that thing with us, since we are in the way of whatever was there
327   // before.
328   // We now define that def's memorydefs and memoryphis
329   if (DefBeforeSameBlock) {
330     DefBefore->replaceUsesWithIf(MD, [MD](Use &U) {
331       // Leave the MemoryUses alone.
332       // Also make sure we skip ourselves to avoid self references.
333       User *Usr = U.getUser();
334       return !isa<MemoryUse>(Usr) && Usr != MD;
335       // Defs are automatically unoptimized when the user is set to MD below,
336       // because the isOptimized() call will fail to find the same ID.
337     });
338   }
339 
340   // and that def is now our defining access.
341   MD->setDefiningAccess(DefBefore);
342 
343   SmallVector<WeakVH, 8> FixupList(InsertedPHIs.begin(), InsertedPHIs.end());
344 
345   // Remember the index where we may insert new phis.
346   unsigned NewPhiIndex = InsertedPHIs.size();
347   if (!DefBeforeSameBlock) {
348     // If there was a local def before us, we must have the same effect it
349     // did. Because every may-def is the same, any phis/etc we would create, it
350     // would also have created.  If there was no local def before us, we
351     // performed a global update, and have to search all successors and make
352     // sure we update the first def in each of them (following all paths until
353     // we hit the first def along each path). This may also insert phi nodes.
354     // TODO: There are other cases we can skip this work, such as when we have a
355     // single successor, and only used a straight line of single pred blocks
356     // backwards to find the def.  To make that work, we'd have to track whether
357     // getDefRecursive only ever used the single predecessor case.  These types
358     // of paths also only exist in between CFG simplifications.
359 
360     // If this is the first def in the block and this insert is in an arbitrary
361     // place, compute IDF and place phis.
362     SmallPtrSet<BasicBlock *, 2> DefiningBlocks;
363 
364     // If this is the last Def in the block, also compute IDF based on MD, since
365     // this may a new Def added, and we may need additional Phis.
366     auto Iter = MD->getDefsIterator();
367     ++Iter;
368     auto IterEnd = MSSA->getBlockDefs(MD->getBlock())->end();
369     if (Iter == IterEnd)
370       DefiningBlocks.insert(MD->getBlock());
371 
372     for (const auto &VH : InsertedPHIs)
373       if (const auto *RealPHI = cast_or_null<MemoryPhi>(VH))
374         DefiningBlocks.insert(RealPHI->getBlock());
375     ForwardIDFCalculator IDFs(*MSSA->DT);
376     SmallVector<BasicBlock *, 32> IDFBlocks;
377     IDFs.setDefiningBlocks(DefiningBlocks);
378     IDFs.calculate(IDFBlocks);
379     SmallVector<AssertingVH<MemoryPhi>, 4> NewInsertedPHIs;
380     for (auto *BBIDF : IDFBlocks) {
381       auto *MPhi = MSSA->getMemoryAccess(BBIDF);
382       if (!MPhi) {
383         MPhi = MSSA->createMemoryPhi(BBIDF);
384         NewInsertedPHIs.push_back(MPhi);
385       }
386       // Add the phis created into the IDF blocks to NonOptPhis, so they are not
387       // optimized out as trivial by the call to getPreviousDefFromEnd below.
388       // Once they are complete, all these Phis are added to the FixupList, and
389       // removed from NonOptPhis inside fixupDefs(). Existing Phis in IDF may
390       // need fixing as well, and potentially be trivial before this insertion,
391       // hence add all IDF Phis. See PR43044.
392       NonOptPhis.insert(MPhi);
393     }
394     for (auto &MPhi : NewInsertedPHIs) {
395       auto *BBIDF = MPhi->getBlock();
396       for (auto *Pred : predecessors(BBIDF)) {
397         DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef;
398         MPhi->addIncoming(getPreviousDefFromEnd(Pred, CachedPreviousDef), Pred);
399       }
400     }
401 
402     // Re-take the index where we're adding the new phis, because the above call
403     // to getPreviousDefFromEnd, may have inserted into InsertedPHIs.
404     NewPhiIndex = InsertedPHIs.size();
405     for (auto &MPhi : NewInsertedPHIs) {
406       InsertedPHIs.push_back(&*MPhi);
407       FixupList.push_back(&*MPhi);
408     }
409 
410     FixupList.push_back(MD);
411   }
412 
413   // Remember the index where we stopped inserting new phis above, since the
414   // fixupDefs call in the loop below may insert more, that are already minimal.
415   unsigned NewPhiIndexEnd = InsertedPHIs.size();
416 
417   while (!FixupList.empty()) {
418     unsigned StartingPHISize = InsertedPHIs.size();
419     fixupDefs(FixupList);
420     FixupList.clear();
421     // Put any new phis on the fixup list, and process them
422     FixupList.append(InsertedPHIs.begin() + StartingPHISize, InsertedPHIs.end());
423   }
424 
425   // Optimize potentially non-minimal phis added in this method.
426   unsigned NewPhiSize = NewPhiIndexEnd - NewPhiIndex;
427   if (NewPhiSize)
428     tryRemoveTrivialPhis(ArrayRef<WeakVH>(&InsertedPHIs[NewPhiIndex], NewPhiSize));
429 
430   // Now that all fixups are done, rename all uses if we are asked.
431   if (RenameUses) {
432     SmallPtrSet<BasicBlock *, 16> Visited;
433     BasicBlock *StartBlock = MD->getBlock();
434     // We are guaranteed there is a def in the block, because we just got it
435     // handed to us in this function.
436     MemoryAccess *FirstDef = &*MSSA->getWritableBlockDefs(StartBlock)->begin();
437     // Convert to incoming value if it's a memorydef. A phi *is* already an
438     // incoming value.
439     if (auto *MD = dyn_cast<MemoryDef>(FirstDef))
440       FirstDef = MD->getDefiningAccess();
441 
442     MSSA->renamePass(MD->getBlock(), FirstDef, Visited);
443     // We just inserted a phi into this block, so the incoming value will become
444     // the phi anyway, so it does not matter what we pass.
445     for (auto &MP : InsertedPHIs) {
446       MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP);
447       if (Phi)
448         MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
449     }
450   }
451 }
452 
453 void MemorySSAUpdater::fixupDefs(const SmallVectorImpl<WeakVH> &Vars) {
454   SmallPtrSet<const BasicBlock *, 8> Seen;
455   SmallVector<const BasicBlock *, 16> Worklist;
456   for (auto &Var : Vars) {
457     MemoryAccess *NewDef = dyn_cast_or_null<MemoryAccess>(Var);
458     if (!NewDef)
459       continue;
460     // First, see if there is a local def after the operand.
461     auto *Defs = MSSA->getWritableBlockDefs(NewDef->getBlock());
462     auto DefIter = NewDef->getDefsIterator();
463 
464     // The temporary Phi is being fixed, unmark it for not to optimize.
465     if (MemoryPhi *Phi = dyn_cast<MemoryPhi>(NewDef))
466       NonOptPhis.erase(Phi);
467 
468     // If there is a local def after us, we only have to rename that.
469     if (++DefIter != Defs->end()) {
470       cast<MemoryDef>(DefIter)->setDefiningAccess(NewDef);
471       continue;
472     }
473 
474     // Otherwise, we need to search down through the CFG.
475     // For each of our successors, handle it directly if their is a phi, or
476     // place on the fixup worklist.
477     for (const auto *S : successors(NewDef->getBlock())) {
478       if (auto *MP = MSSA->getMemoryAccess(S))
479         setMemoryPhiValueForBlock(MP, NewDef->getBlock(), NewDef);
480       else
481         Worklist.push_back(S);
482     }
483 
484     while (!Worklist.empty()) {
485       const BasicBlock *FixupBlock = Worklist.back();
486       Worklist.pop_back();
487 
488       // Get the first def in the block that isn't a phi node.
489       if (auto *Defs = MSSA->getWritableBlockDefs(FixupBlock)) {
490         auto *FirstDef = &*Defs->begin();
491         // The loop above and below should have taken care of phi nodes
492         assert(!isa<MemoryPhi>(FirstDef) &&
493                "Should have already handled phi nodes!");
494         // We are now this def's defining access, make sure we actually dominate
495         // it
496         assert(MSSA->dominates(NewDef, FirstDef) &&
497                "Should have dominated the new access");
498 
499         // This may insert new phi nodes, because we are not guaranteed the
500         // block we are processing has a single pred, and depending where the
501         // store was inserted, it may require phi nodes below it.
502         cast<MemoryDef>(FirstDef)->setDefiningAccess(getPreviousDef(FirstDef));
503         return;
504       }
505       // We didn't find a def, so we must continue.
506       for (const auto *S : successors(FixupBlock)) {
507         // If there is a phi node, handle it.
508         // Otherwise, put the block on the worklist
509         if (auto *MP = MSSA->getMemoryAccess(S))
510           setMemoryPhiValueForBlock(MP, FixupBlock, NewDef);
511         else {
512           // If we cycle, we should have ended up at a phi node that we already
513           // processed.  FIXME: Double check this
514           if (!Seen.insert(S).second)
515             continue;
516           Worklist.push_back(S);
517         }
518       }
519     }
520   }
521 }
522 
523 void MemorySSAUpdater::removeEdge(BasicBlock *From, BasicBlock *To) {
524   if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
525     MPhi->unorderedDeleteIncomingBlock(From);
526     tryRemoveTrivialPhi(MPhi);
527   }
528 }
529 
530 void MemorySSAUpdater::removeDuplicatePhiEdgesBetween(const BasicBlock *From,
531                                                       const BasicBlock *To) {
532   if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
533     bool Found = false;
534     MPhi->unorderedDeleteIncomingIf([&](const MemoryAccess *, BasicBlock *B) {
535       if (From != B)
536         return false;
537       if (Found)
538         return true;
539       Found = true;
540       return false;
541     });
542     tryRemoveTrivialPhi(MPhi);
543   }
544 }
545 
546 static MemoryAccess *getNewDefiningAccessForClone(MemoryAccess *MA,
547                                                   const ValueToValueMapTy &VMap,
548                                                   PhiToDefMap &MPhiMap,
549                                                   bool CloneWasSimplified,
550                                                   MemorySSA *MSSA) {
551   MemoryAccess *InsnDefining = MA;
552   if (MemoryDef *DefMUD = dyn_cast<MemoryDef>(InsnDefining)) {
553     if (!MSSA->isLiveOnEntryDef(DefMUD)) {
554       Instruction *DefMUDI = DefMUD->getMemoryInst();
555       assert(DefMUDI && "Found MemoryUseOrDef with no Instruction.");
556       if (Instruction *NewDefMUDI =
557               cast_or_null<Instruction>(VMap.lookup(DefMUDI))) {
558         InsnDefining = MSSA->getMemoryAccess(NewDefMUDI);
559         if (!CloneWasSimplified)
560           assert(InsnDefining && "Defining instruction cannot be nullptr.");
561         else if (!InsnDefining || isa<MemoryUse>(InsnDefining)) {
562           // The clone was simplified, it's no longer a MemoryDef, look up.
563           auto DefIt = DefMUD->getDefsIterator();
564           // Since simplified clones only occur in single block cloning, a
565           // previous definition must exist, otherwise NewDefMUDI would not
566           // have been found in VMap.
567           assert(DefIt != MSSA->getBlockDefs(DefMUD->getBlock())->begin() &&
568                  "Previous def must exist");
569           InsnDefining = getNewDefiningAccessForClone(
570               &*(--DefIt), VMap, MPhiMap, CloneWasSimplified, MSSA);
571         }
572       }
573     }
574   } else {
575     MemoryPhi *DefPhi = cast<MemoryPhi>(InsnDefining);
576     if (MemoryAccess *NewDefPhi = MPhiMap.lookup(DefPhi))
577       InsnDefining = NewDefPhi;
578   }
579   assert(InsnDefining && "Defining instruction cannot be nullptr.");
580   return InsnDefining;
581 }
582 
583 void MemorySSAUpdater::cloneUsesAndDefs(BasicBlock *BB, BasicBlock *NewBB,
584                                         const ValueToValueMapTy &VMap,
585                                         PhiToDefMap &MPhiMap,
586                                         bool CloneWasSimplified) {
587   const MemorySSA::AccessList *Acc = MSSA->getBlockAccesses(BB);
588   if (!Acc)
589     return;
590   for (const MemoryAccess &MA : *Acc) {
591     if (const MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&MA)) {
592       Instruction *Insn = MUD->getMemoryInst();
593       // Entry does not exist if the clone of the block did not clone all
594       // instructions. This occurs in LoopRotate when cloning instructions
595       // from the old header to the old preheader. The cloned instruction may
596       // also be a simplified Value, not an Instruction (see LoopRotate).
597       // Also in LoopRotate, even when it's an instruction, due to it being
598       // simplified, it may be a Use rather than a Def, so we cannot use MUD as
599       // template. Calls coming from updateForClonedBlockIntoPred, ensure this.
600       if (Instruction *NewInsn =
601               dyn_cast_or_null<Instruction>(VMap.lookup(Insn))) {
602         MemoryAccess *NewUseOrDef = MSSA->createDefinedAccess(
603             NewInsn,
604             getNewDefiningAccessForClone(MUD->getDefiningAccess(), VMap,
605                                          MPhiMap, CloneWasSimplified, MSSA),
606             /*Template=*/CloneWasSimplified ? nullptr : MUD,
607             /*CreationMustSucceed=*/CloneWasSimplified ? false : true);
608         if (NewUseOrDef)
609           MSSA->insertIntoListsForBlock(NewUseOrDef, NewBB, MemorySSA::End);
610       }
611     }
612   }
613 }
614 
615 void MemorySSAUpdater::updatePhisWhenInsertingUniqueBackedgeBlock(
616     BasicBlock *Header, BasicBlock *Preheader, BasicBlock *BEBlock) {
617   auto *MPhi = MSSA->getMemoryAccess(Header);
618   if (!MPhi)
619     return;
620 
621   // Create phi node in the backedge block and populate it with the same
622   // incoming values as MPhi. Skip incoming values coming from Preheader.
623   auto *NewMPhi = MSSA->createMemoryPhi(BEBlock);
624   bool HasUniqueIncomingValue = true;
625   MemoryAccess *UniqueValue = nullptr;
626   for (unsigned I = 0, E = MPhi->getNumIncomingValues(); I != E; ++I) {
627     BasicBlock *IBB = MPhi->getIncomingBlock(I);
628     MemoryAccess *IV = MPhi->getIncomingValue(I);
629     if (IBB != Preheader) {
630       NewMPhi->addIncoming(IV, IBB);
631       if (HasUniqueIncomingValue) {
632         if (!UniqueValue)
633           UniqueValue = IV;
634         else if (UniqueValue != IV)
635           HasUniqueIncomingValue = false;
636       }
637     }
638   }
639 
640   // Update incoming edges into MPhi. Remove all but the incoming edge from
641   // Preheader. Add an edge from NewMPhi
642   auto *AccFromPreheader = MPhi->getIncomingValueForBlock(Preheader);
643   MPhi->setIncomingValue(0, AccFromPreheader);
644   MPhi->setIncomingBlock(0, Preheader);
645   for (unsigned I = MPhi->getNumIncomingValues() - 1; I >= 1; --I)
646     MPhi->unorderedDeleteIncoming(I);
647   MPhi->addIncoming(NewMPhi, BEBlock);
648 
649   // If NewMPhi is a trivial phi, remove it. Its use in the header MPhi will be
650   // replaced with the unique value.
651   tryRemoveTrivialPhi(NewMPhi);
652 }
653 
654 void MemorySSAUpdater::updateForClonedLoop(const LoopBlocksRPO &LoopBlocks,
655                                            ArrayRef<BasicBlock *> ExitBlocks,
656                                            const ValueToValueMapTy &VMap,
657                                            bool IgnoreIncomingWithNoClones) {
658   PhiToDefMap MPhiMap;
659 
660   auto FixPhiIncomingValues = [&](MemoryPhi *Phi, MemoryPhi *NewPhi) {
661     assert(Phi && NewPhi && "Invalid Phi nodes.");
662     BasicBlock *NewPhiBB = NewPhi->getBlock();
663     SmallPtrSet<BasicBlock *, 4> NewPhiBBPreds(pred_begin(NewPhiBB),
664                                                pred_end(NewPhiBB));
665     for (unsigned It = 0, E = Phi->getNumIncomingValues(); It < E; ++It) {
666       MemoryAccess *IncomingAccess = Phi->getIncomingValue(It);
667       BasicBlock *IncBB = Phi->getIncomingBlock(It);
668 
669       if (BasicBlock *NewIncBB = cast_or_null<BasicBlock>(VMap.lookup(IncBB)))
670         IncBB = NewIncBB;
671       else if (IgnoreIncomingWithNoClones)
672         continue;
673 
674       // Now we have IncBB, and will need to add incoming from it to NewPhi.
675 
676       // If IncBB is not a predecessor of NewPhiBB, then do not add it.
677       // NewPhiBB was cloned without that edge.
678       if (!NewPhiBBPreds.count(IncBB))
679         continue;
680 
681       // Determine incoming value and add it as incoming from IncBB.
682       if (MemoryUseOrDef *IncMUD = dyn_cast<MemoryUseOrDef>(IncomingAccess)) {
683         if (!MSSA->isLiveOnEntryDef(IncMUD)) {
684           Instruction *IncI = IncMUD->getMemoryInst();
685           assert(IncI && "Found MemoryUseOrDef with no Instruction.");
686           if (Instruction *NewIncI =
687                   cast_or_null<Instruction>(VMap.lookup(IncI))) {
688             IncMUD = MSSA->getMemoryAccess(NewIncI);
689             assert(IncMUD &&
690                    "MemoryUseOrDef cannot be null, all preds processed.");
691           }
692         }
693         NewPhi->addIncoming(IncMUD, IncBB);
694       } else {
695         MemoryPhi *IncPhi = cast<MemoryPhi>(IncomingAccess);
696         if (MemoryAccess *NewDefPhi = MPhiMap.lookup(IncPhi))
697           NewPhi->addIncoming(NewDefPhi, IncBB);
698         else
699           NewPhi->addIncoming(IncPhi, IncBB);
700       }
701     }
702   };
703 
704   auto ProcessBlock = [&](BasicBlock *BB) {
705     BasicBlock *NewBlock = cast_or_null<BasicBlock>(VMap.lookup(BB));
706     if (!NewBlock)
707       return;
708 
709     assert(!MSSA->getWritableBlockAccesses(NewBlock) &&
710            "Cloned block should have no accesses");
711 
712     // Add MemoryPhi.
713     if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB)) {
714       MemoryPhi *NewPhi = MSSA->createMemoryPhi(NewBlock);
715       MPhiMap[MPhi] = NewPhi;
716     }
717     // Update Uses and Defs.
718     cloneUsesAndDefs(BB, NewBlock, VMap, MPhiMap);
719   };
720 
721   for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
722     ProcessBlock(BB);
723 
724   for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
725     if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
726       if (MemoryAccess *NewPhi = MPhiMap.lookup(MPhi))
727         FixPhiIncomingValues(MPhi, cast<MemoryPhi>(NewPhi));
728 }
729 
730 void MemorySSAUpdater::updateForClonedBlockIntoPred(
731     BasicBlock *BB, BasicBlock *P1, const ValueToValueMapTy &VM) {
732   // All defs/phis from outside BB that are used in BB, are valid uses in P1.
733   // Since those defs/phis must have dominated BB, and also dominate P1.
734   // Defs from BB being used in BB will be replaced with the cloned defs from
735   // VM. The uses of BB's Phi (if it exists) in BB will be replaced by the
736   // incoming def into the Phi from P1.
737   // Instructions cloned into the predecessor are in practice sometimes
738   // simplified, so disable the use of the template, and create an access from
739   // scratch.
740   PhiToDefMap MPhiMap;
741   if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
742     MPhiMap[MPhi] = MPhi->getIncomingValueForBlock(P1);
743   cloneUsesAndDefs(BB, P1, VM, MPhiMap, /*CloneWasSimplified=*/true);
744 }
745 
746 template <typename Iter>
747 void MemorySSAUpdater::privateUpdateExitBlocksForClonedLoop(
748     ArrayRef<BasicBlock *> ExitBlocks, Iter ValuesBegin, Iter ValuesEnd,
749     DominatorTree &DT) {
750   SmallVector<CFGUpdate, 4> Updates;
751   // Update/insert phis in all successors of exit blocks.
752   for (auto *Exit : ExitBlocks)
753     for (const ValueToValueMapTy *VMap : make_range(ValuesBegin, ValuesEnd))
754       if (BasicBlock *NewExit = cast_or_null<BasicBlock>(VMap->lookup(Exit))) {
755         BasicBlock *ExitSucc = NewExit->getTerminator()->getSuccessor(0);
756         Updates.push_back({DT.Insert, NewExit, ExitSucc});
757       }
758   applyInsertUpdates(Updates, DT);
759 }
760 
761 void MemorySSAUpdater::updateExitBlocksForClonedLoop(
762     ArrayRef<BasicBlock *> ExitBlocks, const ValueToValueMapTy &VMap,
763     DominatorTree &DT) {
764   const ValueToValueMapTy *const Arr[] = {&VMap};
765   privateUpdateExitBlocksForClonedLoop(ExitBlocks, std::begin(Arr),
766                                        std::end(Arr), DT);
767 }
768 
769 void MemorySSAUpdater::updateExitBlocksForClonedLoop(
770     ArrayRef<BasicBlock *> ExitBlocks,
771     ArrayRef<std::unique_ptr<ValueToValueMapTy>> VMaps, DominatorTree &DT) {
772   auto GetPtr = [&](const std::unique_ptr<ValueToValueMapTy> &I) {
773     return I.get();
774   };
775   using MappedIteratorType =
776       mapped_iterator<const std::unique_ptr<ValueToValueMapTy> *,
777                       decltype(GetPtr)>;
778   auto MapBegin = MappedIteratorType(VMaps.begin(), GetPtr);
779   auto MapEnd = MappedIteratorType(VMaps.end(), GetPtr);
780   privateUpdateExitBlocksForClonedLoop(ExitBlocks, MapBegin, MapEnd, DT);
781 }
782 
783 void MemorySSAUpdater::applyUpdates(ArrayRef<CFGUpdate> Updates,
784                                     DominatorTree &DT) {
785   SmallVector<CFGUpdate, 4> DeleteUpdates;
786   SmallVector<CFGUpdate, 4> RevDeleteUpdates;
787   SmallVector<CFGUpdate, 4> InsertUpdates;
788   for (auto &Update : Updates) {
789     if (Update.getKind() == DT.Insert)
790       InsertUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
791     else {
792       DeleteUpdates.push_back({DT.Delete, Update.getFrom(), Update.getTo()});
793       RevDeleteUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
794     }
795   }
796 
797   if (!DeleteUpdates.empty()) {
798     SmallVector<CFGUpdate, 0> Empty;
799     // Deletes are reversed applied, because this CFGView is pretending the
800     // deletes did not happen yet, hence the edges still exist.
801     DT.applyUpdates(Empty, RevDeleteUpdates);
802 
803     // Note: the MSSA update below doesn't distinguish between a GD with
804     // (RevDelete,false) and (Delete, true), but this matters for the DT
805     // updates above; for "children" purposes they are equivalent; but the
806     // updates themselves convey the desired update, used inside DT only.
807     GraphDiff<BasicBlock *> GD(RevDeleteUpdates);
808     applyInsertUpdates(InsertUpdates, DT, &GD);
809     // Update DT to redelete edges; this matches the real CFG so we can perform
810     // the standard update without a postview of the CFG.
811     DT.applyUpdates(DeleteUpdates);
812   } else {
813     GraphDiff<BasicBlock *> GD;
814     applyInsertUpdates(InsertUpdates, DT, &GD);
815   }
816 
817   // Update for deleted edges
818   for (auto &Update : DeleteUpdates)
819     removeEdge(Update.getFrom(), Update.getTo());
820 }
821 
822 void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates,
823                                           DominatorTree &DT) {
824   GraphDiff<BasicBlock *> GD;
825   applyInsertUpdates(Updates, DT, &GD);
826 }
827 
828 void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates,
829                                           DominatorTree &DT,
830                                           const GraphDiff<BasicBlock *> *GD) {
831   // Get recursive last Def, assuming well formed MSSA and updated DT.
832   auto GetLastDef = [&](BasicBlock *BB) -> MemoryAccess * {
833     while (true) {
834       MemorySSA::DefsList *Defs = MSSA->getWritableBlockDefs(BB);
835       // Return last Def or Phi in BB, if it exists.
836       if (Defs)
837         return &*(--Defs->end());
838 
839       // Check number of predecessors, we only care if there's more than one.
840       unsigned Count = 0;
841       BasicBlock *Pred = nullptr;
842       for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BB)) {
843         Pred = Pi;
844         Count++;
845         if (Count == 2)
846           break;
847       }
848 
849       // If BB has multiple predecessors, get last definition from IDom.
850       if (Count != 1) {
851         // [SimpleLoopUnswitch] If BB is a dead block, about to be deleted, its
852         // DT is invalidated. Return LoE as its last def. This will be added to
853         // MemoryPhi node, and later deleted when the block is deleted.
854         if (!DT.getNode(BB))
855           return MSSA->getLiveOnEntryDef();
856         if (auto *IDom = DT.getNode(BB)->getIDom())
857           if (IDom->getBlock() != BB) {
858             BB = IDom->getBlock();
859             continue;
860           }
861         return MSSA->getLiveOnEntryDef();
862       } else {
863         // Single predecessor, BB cannot be dead. GetLastDef of Pred.
864         assert(Count == 1 && Pred && "Single predecessor expected.");
865         // BB can be unreachable though, return LoE if that is the case.
866         if (!DT.getNode(BB))
867           return MSSA->getLiveOnEntryDef();
868         BB = Pred;
869       }
870     };
871     llvm_unreachable("Unable to get last definition.");
872   };
873 
874   // Get nearest IDom given a set of blocks.
875   // TODO: this can be optimized by starting the search at the node with the
876   // lowest level (highest in the tree).
877   auto FindNearestCommonDominator =
878       [&](const SmallSetVector<BasicBlock *, 2> &BBSet) -> BasicBlock * {
879     BasicBlock *PrevIDom = *BBSet.begin();
880     for (auto *BB : BBSet)
881       PrevIDom = DT.findNearestCommonDominator(PrevIDom, BB);
882     return PrevIDom;
883   };
884 
885   // Get all blocks that dominate PrevIDom, stop when reaching CurrIDom. Do not
886   // include CurrIDom.
887   auto GetNoLongerDomBlocks =
888       [&](BasicBlock *PrevIDom, BasicBlock *CurrIDom,
889           SmallVectorImpl<BasicBlock *> &BlocksPrevDom) {
890         if (PrevIDom == CurrIDom)
891           return;
892         BlocksPrevDom.push_back(PrevIDom);
893         BasicBlock *NextIDom = PrevIDom;
894         while (BasicBlock *UpIDom =
895                    DT.getNode(NextIDom)->getIDom()->getBlock()) {
896           if (UpIDom == CurrIDom)
897             break;
898           BlocksPrevDom.push_back(UpIDom);
899           NextIDom = UpIDom;
900         }
901       };
902 
903   // Map a BB to its predecessors: added + previously existing. To get a
904   // deterministic order, store predecessors as SetVectors. The order in each
905   // will be defined by the order in Updates (fixed) and the order given by
906   // children<> (also fixed). Since we further iterate over these ordered sets,
907   // we lose the information of multiple edges possibly existing between two
908   // blocks, so we'll keep and EdgeCount map for that.
909   // An alternate implementation could keep unordered set for the predecessors,
910   // traverse either Updates or children<> each time to get  the deterministic
911   // order, and drop the usage of EdgeCount. This alternate approach would still
912   // require querying the maps for each predecessor, and children<> call has
913   // additional computation inside for creating the snapshot-graph predecessors.
914   // As such, we favor using a little additional storage and less compute time.
915   // This decision can be revisited if we find the alternative more favorable.
916 
917   struct PredInfo {
918     SmallSetVector<BasicBlock *, 2> Added;
919     SmallSetVector<BasicBlock *, 2> Prev;
920   };
921   SmallDenseMap<BasicBlock *, PredInfo> PredMap;
922 
923   for (auto &Edge : Updates) {
924     BasicBlock *BB = Edge.getTo();
925     auto &AddedBlockSet = PredMap[BB].Added;
926     AddedBlockSet.insert(Edge.getFrom());
927   }
928 
929   // Store all existing predecessor for each BB, at least one must exist.
930   SmallDenseMap<std::pair<BasicBlock *, BasicBlock *>, int> EdgeCountMap;
931   SmallPtrSet<BasicBlock *, 2> NewBlocks;
932   for (auto &BBPredPair : PredMap) {
933     auto *BB = BBPredPair.first;
934     const auto &AddedBlockSet = BBPredPair.second.Added;
935     auto &PrevBlockSet = BBPredPair.second.Prev;
936     for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BB)) {
937       if (!AddedBlockSet.count(Pi))
938         PrevBlockSet.insert(Pi);
939       EdgeCountMap[{Pi, BB}]++;
940     }
941 
942     if (PrevBlockSet.empty()) {
943       assert(pred_size(BB) == AddedBlockSet.size() && "Duplicate edges added.");
944       LLVM_DEBUG(
945           dbgs()
946           << "Adding a predecessor to a block with no predecessors. "
947              "This must be an edge added to a new, likely cloned, block. "
948              "Its memory accesses must be already correct, assuming completed "
949              "via the updateExitBlocksForClonedLoop API. "
950              "Assert a single such edge is added so no phi addition or "
951              "additional processing is required.\n");
952       assert(AddedBlockSet.size() == 1 &&
953              "Can only handle adding one predecessor to a new block.");
954       // Need to remove new blocks from PredMap. Remove below to not invalidate
955       // iterator here.
956       NewBlocks.insert(BB);
957     }
958   }
959   // Nothing to process for new/cloned blocks.
960   for (auto *BB : NewBlocks)
961     PredMap.erase(BB);
962 
963   SmallVector<BasicBlock *, 16> BlocksWithDefsToReplace;
964   SmallVector<WeakVH, 8> InsertedPhis;
965 
966   // First create MemoryPhis in all blocks that don't have one. Create in the
967   // order found in Updates, not in PredMap, to get deterministic numbering.
968   for (auto &Edge : Updates) {
969     BasicBlock *BB = Edge.getTo();
970     if (PredMap.count(BB) && !MSSA->getMemoryAccess(BB))
971       InsertedPhis.push_back(MSSA->createMemoryPhi(BB));
972   }
973 
974   // Now we'll fill in the MemoryPhis with the right incoming values.
975   for (auto &BBPredPair : PredMap) {
976     auto *BB = BBPredPair.first;
977     const auto &PrevBlockSet = BBPredPair.second.Prev;
978     const auto &AddedBlockSet = BBPredPair.second.Added;
979     assert(!PrevBlockSet.empty() &&
980            "At least one previous predecessor must exist.");
981 
982     // TODO: if this becomes a bottleneck, we can save on GetLastDef calls by
983     // keeping this map before the loop. We can reuse already populated entries
984     // if an edge is added from the same predecessor to two different blocks,
985     // and this does happen in rotate. Note that the map needs to be updated
986     // when deleting non-necessary phis below, if the phi is in the map by
987     // replacing the value with DefP1.
988     SmallDenseMap<BasicBlock *, MemoryAccess *> LastDefAddedPred;
989     for (auto *AddedPred : AddedBlockSet) {
990       auto *DefPn = GetLastDef(AddedPred);
991       assert(DefPn != nullptr && "Unable to find last definition.");
992       LastDefAddedPred[AddedPred] = DefPn;
993     }
994 
995     MemoryPhi *NewPhi = MSSA->getMemoryAccess(BB);
996     // If Phi is not empty, add an incoming edge from each added pred. Must
997     // still compute blocks with defs to replace for this block below.
998     if (NewPhi->getNumOperands()) {
999       for (auto *Pred : AddedBlockSet) {
1000         auto *LastDefForPred = LastDefAddedPred[Pred];
1001         for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
1002           NewPhi->addIncoming(LastDefForPred, Pred);
1003       }
1004     } else {
1005       // Pick any existing predecessor and get its definition. All other
1006       // existing predecessors should have the same one, since no phi existed.
1007       auto *P1 = *PrevBlockSet.begin();
1008       MemoryAccess *DefP1 = GetLastDef(P1);
1009 
1010       // Check DefP1 against all Defs in LastDefPredPair. If all the same,
1011       // nothing to add.
1012       bool InsertPhi = false;
1013       for (auto LastDefPredPair : LastDefAddedPred)
1014         if (DefP1 != LastDefPredPair.second) {
1015           InsertPhi = true;
1016           break;
1017         }
1018       if (!InsertPhi) {
1019         // Since NewPhi may be used in other newly added Phis, replace all uses
1020         // of NewPhi with the definition coming from all predecessors (DefP1),
1021         // before deleting it.
1022         NewPhi->replaceAllUsesWith(DefP1);
1023         removeMemoryAccess(NewPhi);
1024         continue;
1025       }
1026 
1027       // Update Phi with new values for new predecessors and old value for all
1028       // other predecessors. Since AddedBlockSet and PrevBlockSet are ordered
1029       // sets, the order of entries in NewPhi is deterministic.
1030       for (auto *Pred : AddedBlockSet) {
1031         auto *LastDefForPred = LastDefAddedPred[Pred];
1032         for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
1033           NewPhi->addIncoming(LastDefForPred, Pred);
1034       }
1035       for (auto *Pred : PrevBlockSet)
1036         for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
1037           NewPhi->addIncoming(DefP1, Pred);
1038     }
1039 
1040     // Get all blocks that used to dominate BB and no longer do after adding
1041     // AddedBlockSet, where PrevBlockSet are the previously known predecessors.
1042     assert(DT.getNode(BB)->getIDom() && "BB does not have valid idom");
1043     BasicBlock *PrevIDom = FindNearestCommonDominator(PrevBlockSet);
1044     assert(PrevIDom && "Previous IDom should exists");
1045     BasicBlock *NewIDom = DT.getNode(BB)->getIDom()->getBlock();
1046     assert(NewIDom && "BB should have a new valid idom");
1047     assert(DT.dominates(NewIDom, PrevIDom) &&
1048            "New idom should dominate old idom");
1049     GetNoLongerDomBlocks(PrevIDom, NewIDom, BlocksWithDefsToReplace);
1050   }
1051 
1052   tryRemoveTrivialPhis(InsertedPhis);
1053   // Create the set of blocks that now have a definition. We'll use this to
1054   // compute IDF and add Phis there next.
1055   SmallVector<BasicBlock *, 8> BlocksToProcess;
1056   for (auto &VH : InsertedPhis)
1057     if (auto *MPhi = cast_or_null<MemoryPhi>(VH))
1058       BlocksToProcess.push_back(MPhi->getBlock());
1059 
1060   // Compute IDF and add Phis in all IDF blocks that do not have one.
1061   SmallVector<BasicBlock *, 32> IDFBlocks;
1062   if (!BlocksToProcess.empty()) {
1063     ForwardIDFCalculator IDFs(DT, GD);
1064     SmallPtrSet<BasicBlock *, 16> DefiningBlocks(BlocksToProcess.begin(),
1065                                                  BlocksToProcess.end());
1066     IDFs.setDefiningBlocks(DefiningBlocks);
1067     IDFs.calculate(IDFBlocks);
1068 
1069     SmallSetVector<MemoryPhi *, 4> PhisToFill;
1070     // First create all needed Phis.
1071     for (auto *BBIDF : IDFBlocks)
1072       if (!MSSA->getMemoryAccess(BBIDF)) {
1073         auto *IDFPhi = MSSA->createMemoryPhi(BBIDF);
1074         InsertedPhis.push_back(IDFPhi);
1075         PhisToFill.insert(IDFPhi);
1076       }
1077     // Then update or insert their correct incoming values.
1078     for (auto *BBIDF : IDFBlocks) {
1079       auto *IDFPhi = MSSA->getMemoryAccess(BBIDF);
1080       assert(IDFPhi && "Phi must exist");
1081       if (!PhisToFill.count(IDFPhi)) {
1082         // Update existing Phi.
1083         // FIXME: some updates may be redundant, try to optimize and skip some.
1084         for (unsigned I = 0, E = IDFPhi->getNumIncomingValues(); I < E; ++I)
1085           IDFPhi->setIncomingValue(I, GetLastDef(IDFPhi->getIncomingBlock(I)));
1086       } else {
1087         for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BBIDF))
1088           IDFPhi->addIncoming(GetLastDef(Pi), Pi);
1089       }
1090     }
1091   }
1092 
1093   // Now for all defs in BlocksWithDefsToReplace, if there are uses they no
1094   // longer dominate, replace those with the closest dominating def.
1095   // This will also update optimized accesses, as they're also uses.
1096   for (auto *BlockWithDefsToReplace : BlocksWithDefsToReplace) {
1097     if (auto DefsList = MSSA->getWritableBlockDefs(BlockWithDefsToReplace)) {
1098       for (auto &DefToReplaceUses : *DefsList) {
1099         BasicBlock *DominatingBlock = DefToReplaceUses.getBlock();
1100         Value::use_iterator UI = DefToReplaceUses.use_begin(),
1101                             E = DefToReplaceUses.use_end();
1102         for (; UI != E;) {
1103           Use &U = *UI;
1104           ++UI;
1105           MemoryAccess *Usr = cast<MemoryAccess>(U.getUser());
1106           if (MemoryPhi *UsrPhi = dyn_cast<MemoryPhi>(Usr)) {
1107             BasicBlock *DominatedBlock = UsrPhi->getIncomingBlock(U);
1108             if (!DT.dominates(DominatingBlock, DominatedBlock))
1109               U.set(GetLastDef(DominatedBlock));
1110           } else {
1111             BasicBlock *DominatedBlock = Usr->getBlock();
1112             if (!DT.dominates(DominatingBlock, DominatedBlock)) {
1113               if (auto *DomBlPhi = MSSA->getMemoryAccess(DominatedBlock))
1114                 U.set(DomBlPhi);
1115               else {
1116                 auto *IDom = DT.getNode(DominatedBlock)->getIDom();
1117                 assert(IDom && "Block must have a valid IDom.");
1118                 U.set(GetLastDef(IDom->getBlock()));
1119               }
1120               cast<MemoryUseOrDef>(Usr)->resetOptimized();
1121             }
1122           }
1123         }
1124       }
1125     }
1126   }
1127   tryRemoveTrivialPhis(InsertedPhis);
1128 }
1129 
1130 // Move What before Where in the MemorySSA IR.
1131 template <class WhereType>
1132 void MemorySSAUpdater::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1133                               WhereType Where) {
1134   // Mark MemoryPhi users of What not to be optimized.
1135   for (auto *U : What->users())
1136     if (MemoryPhi *PhiUser = dyn_cast<MemoryPhi>(U))
1137       NonOptPhis.insert(PhiUser);
1138 
1139   // Replace all our users with our defining access.
1140   What->replaceAllUsesWith(What->getDefiningAccess());
1141 
1142   // Let MemorySSA take care of moving it around in the lists.
1143   MSSA->moveTo(What, BB, Where);
1144 
1145   // Now reinsert it into the IR and do whatever fixups needed.
1146   if (auto *MD = dyn_cast<MemoryDef>(What))
1147     insertDef(MD, /*RenameUses=*/true);
1148   else
1149     insertUse(cast<MemoryUse>(What), /*RenameUses=*/true);
1150 
1151   // Clear dangling pointers. We added all MemoryPhi users, but not all
1152   // of them are removed by fixupDefs().
1153   NonOptPhis.clear();
1154 }
1155 
1156 // Move What before Where in the MemorySSA IR.
1157 void MemorySSAUpdater::moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where) {
1158   moveTo(What, Where->getBlock(), Where->getIterator());
1159 }
1160 
1161 // Move What after Where in the MemorySSA IR.
1162 void MemorySSAUpdater::moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where) {
1163   moveTo(What, Where->getBlock(), ++Where->getIterator());
1164 }
1165 
1166 void MemorySSAUpdater::moveToPlace(MemoryUseOrDef *What, BasicBlock *BB,
1167                                    MemorySSA::InsertionPlace Where) {
1168   if (Where != MemorySSA::InsertionPlace::BeforeTerminator)
1169     return moveTo(What, BB, Where);
1170 
1171   if (auto *Where = MSSA->getMemoryAccess(BB->getTerminator()))
1172     return moveBefore(What, Where);
1173   else
1174     return moveTo(What, BB, MemorySSA::InsertionPlace::End);
1175 }
1176 
1177 // All accesses in To used to be in From. Move to end and update access lists.
1178 void MemorySSAUpdater::moveAllAccesses(BasicBlock *From, BasicBlock *To,
1179                                        Instruction *Start) {
1180 
1181   MemorySSA::AccessList *Accs = MSSA->getWritableBlockAccesses(From);
1182   if (!Accs)
1183     return;
1184 
1185   assert(Start->getParent() == To && "Incorrect Start instruction");
1186   MemoryAccess *FirstInNew = nullptr;
1187   for (Instruction &I : make_range(Start->getIterator(), To->end()))
1188     if ((FirstInNew = MSSA->getMemoryAccess(&I)))
1189       break;
1190   if (FirstInNew) {
1191     auto *MUD = cast<MemoryUseOrDef>(FirstInNew);
1192     do {
1193       auto NextIt = ++MUD->getIterator();
1194       MemoryUseOrDef *NextMUD = (!Accs || NextIt == Accs->end())
1195                                     ? nullptr
1196                                     : cast<MemoryUseOrDef>(&*NextIt);
1197       MSSA->moveTo(MUD, To, MemorySSA::End);
1198       // Moving MUD from Accs in the moveTo above, may delete Accs, so we need
1199       // to retrieve it again.
1200       Accs = MSSA->getWritableBlockAccesses(From);
1201       MUD = NextMUD;
1202     } while (MUD);
1203   }
1204 
1205   // If all accesses were moved and only a trivial Phi remains, we try to remove
1206   // that Phi. This is needed when From is going to be deleted.
1207   auto *Defs = MSSA->getWritableBlockDefs(From);
1208   if (Defs && !Defs->empty())
1209     if (auto *Phi = dyn_cast<MemoryPhi>(&*Defs->begin()))
1210       tryRemoveTrivialPhi(Phi);
1211 }
1212 
1213 void MemorySSAUpdater::moveAllAfterSpliceBlocks(BasicBlock *From,
1214                                                 BasicBlock *To,
1215                                                 Instruction *Start) {
1216   assert(MSSA->getBlockAccesses(To) == nullptr &&
1217          "To block is expected to be free of MemoryAccesses.");
1218   moveAllAccesses(From, To, Start);
1219   for (BasicBlock *Succ : successors(To))
1220     if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
1221       MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
1222 }
1223 
1224 void MemorySSAUpdater::moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To,
1225                                                Instruction *Start) {
1226   assert(From->getUniquePredecessor() == To &&
1227          "From block is expected to have a single predecessor (To).");
1228   moveAllAccesses(From, To, Start);
1229   for (BasicBlock *Succ : successors(From))
1230     if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
1231       MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
1232 }
1233 
1234 /// If all arguments of a MemoryPHI are defined by the same incoming
1235 /// argument, return that argument.
1236 static MemoryAccess *onlySingleValue(MemoryPhi *MP) {
1237   MemoryAccess *MA = nullptr;
1238 
1239   for (auto &Arg : MP->operands()) {
1240     if (!MA)
1241       MA = cast<MemoryAccess>(Arg);
1242     else if (MA != Arg)
1243       return nullptr;
1244   }
1245   return MA;
1246 }
1247 
1248 void MemorySSAUpdater::wireOldPredecessorsToNewImmediatePredecessor(
1249     BasicBlock *Old, BasicBlock *New, ArrayRef<BasicBlock *> Preds,
1250     bool IdenticalEdgesWereMerged) {
1251   assert(!MSSA->getWritableBlockAccesses(New) &&
1252          "Access list should be null for a new block.");
1253   MemoryPhi *Phi = MSSA->getMemoryAccess(Old);
1254   if (!Phi)
1255     return;
1256   if (Old->hasNPredecessors(1)) {
1257     assert(pred_size(New) == Preds.size() &&
1258            "Should have moved all predecessors.");
1259     MSSA->moveTo(Phi, New, MemorySSA::Beginning);
1260   } else {
1261     assert(!Preds.empty() && "Must be moving at least one predecessor to the "
1262                              "new immediate predecessor.");
1263     MemoryPhi *NewPhi = MSSA->createMemoryPhi(New);
1264     SmallPtrSet<BasicBlock *, 16> PredsSet(Preds.begin(), Preds.end());
1265     // Currently only support the case of removing a single incoming edge when
1266     // identical edges were not merged.
1267     if (!IdenticalEdgesWereMerged)
1268       assert(PredsSet.size() == Preds.size() &&
1269              "If identical edges were not merged, we cannot have duplicate "
1270              "blocks in the predecessors");
1271     Phi->unorderedDeleteIncomingIf([&](MemoryAccess *MA, BasicBlock *B) {
1272       if (PredsSet.count(B)) {
1273         NewPhi->addIncoming(MA, B);
1274         if (!IdenticalEdgesWereMerged)
1275           PredsSet.erase(B);
1276         return true;
1277       }
1278       return false;
1279     });
1280     Phi->addIncoming(NewPhi, New);
1281     tryRemoveTrivialPhi(NewPhi);
1282   }
1283 }
1284 
1285 void MemorySSAUpdater::removeMemoryAccess(MemoryAccess *MA, bool OptimizePhis) {
1286   assert(!MSSA->isLiveOnEntryDef(MA) &&
1287          "Trying to remove the live on entry def");
1288   // We can only delete phi nodes if they have no uses, or we can replace all
1289   // uses with a single definition.
1290   MemoryAccess *NewDefTarget = nullptr;
1291   if (MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) {
1292     // Note that it is sufficient to know that all edges of the phi node have
1293     // the same argument.  If they do, by the definition of dominance frontiers
1294     // (which we used to place this phi), that argument must dominate this phi,
1295     // and thus, must dominate the phi's uses, and so we will not hit the assert
1296     // below.
1297     NewDefTarget = onlySingleValue(MP);
1298     assert((NewDefTarget || MP->use_empty()) &&
1299            "We can't delete this memory phi");
1300   } else {
1301     NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess();
1302   }
1303 
1304   SmallSetVector<MemoryPhi *, 4> PhisToCheck;
1305 
1306   // Re-point the uses at our defining access
1307   if (!isa<MemoryUse>(MA) && !MA->use_empty()) {
1308     // Reset optimized on users of this store, and reset the uses.
1309     // A few notes:
1310     // 1. This is a slightly modified version of RAUW to avoid walking the
1311     // uses twice here.
1312     // 2. If we wanted to be complete, we would have to reset the optimized
1313     // flags on users of phi nodes if doing the below makes a phi node have all
1314     // the same arguments. Instead, we prefer users to removeMemoryAccess those
1315     // phi nodes, because doing it here would be N^3.
1316     if (MA->hasValueHandle())
1317       ValueHandleBase::ValueIsRAUWd(MA, NewDefTarget);
1318     // Note: We assume MemorySSA is not used in metadata since it's not really
1319     // part of the IR.
1320 
1321     while (!MA->use_empty()) {
1322       Use &U = *MA->use_begin();
1323       if (auto *MUD = dyn_cast<MemoryUseOrDef>(U.getUser()))
1324         MUD->resetOptimized();
1325       if (OptimizePhis)
1326         if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U.getUser()))
1327           PhisToCheck.insert(MP);
1328       U.set(NewDefTarget);
1329     }
1330   }
1331 
1332   // The call below to erase will destroy MA, so we can't change the order we
1333   // are doing things here
1334   MSSA->removeFromLookups(MA);
1335   MSSA->removeFromLists(MA);
1336 
1337   // Optionally optimize Phi uses. This will recursively remove trivial phis.
1338   if (!PhisToCheck.empty()) {
1339     SmallVector<WeakVH, 16> PhisToOptimize{PhisToCheck.begin(),
1340                                            PhisToCheck.end()};
1341     PhisToCheck.clear();
1342 
1343     unsigned PhisSize = PhisToOptimize.size();
1344     while (PhisSize-- > 0)
1345       if (MemoryPhi *MP =
1346               cast_or_null<MemoryPhi>(PhisToOptimize.pop_back_val()))
1347         tryRemoveTrivialPhi(MP);
1348   }
1349 }
1350 
1351 void MemorySSAUpdater::removeBlocks(
1352     const SmallSetVector<BasicBlock *, 8> &DeadBlocks) {
1353   // First delete all uses of BB in MemoryPhis.
1354   for (BasicBlock *BB : DeadBlocks) {
1355     Instruction *TI = BB->getTerminator();
1356     assert(TI && "Basic block expected to have a terminator instruction");
1357     for (BasicBlock *Succ : successors(TI))
1358       if (!DeadBlocks.count(Succ))
1359         if (MemoryPhi *MP = MSSA->getMemoryAccess(Succ)) {
1360           MP->unorderedDeleteIncomingBlock(BB);
1361           tryRemoveTrivialPhi(MP);
1362         }
1363     // Drop all references of all accesses in BB
1364     if (MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB))
1365       for (MemoryAccess &MA : *Acc)
1366         MA.dropAllReferences();
1367   }
1368 
1369   // Next, delete all memory accesses in each block
1370   for (BasicBlock *BB : DeadBlocks) {
1371     MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB);
1372     if (!Acc)
1373       continue;
1374     for (auto AB = Acc->begin(), AE = Acc->end(); AB != AE;) {
1375       MemoryAccess *MA = &*AB;
1376       ++AB;
1377       MSSA->removeFromLookups(MA);
1378       MSSA->removeFromLists(MA);
1379     }
1380   }
1381 }
1382 
1383 void MemorySSAUpdater::tryRemoveTrivialPhis(ArrayRef<WeakVH> UpdatedPHIs) {
1384   for (auto &VH : UpdatedPHIs)
1385     if (auto *MPhi = cast_or_null<MemoryPhi>(VH))
1386       tryRemoveTrivialPhi(MPhi);
1387 }
1388 
1389 void MemorySSAUpdater::changeToUnreachable(const Instruction *I) {
1390   const BasicBlock *BB = I->getParent();
1391   // Remove memory accesses in BB for I and all following instructions.
1392   auto BBI = I->getIterator(), BBE = BB->end();
1393   // FIXME: If this becomes too expensive, iterate until the first instruction
1394   // with a memory access, then iterate over MemoryAccesses.
1395   while (BBI != BBE)
1396     removeMemoryAccess(&*(BBI++));
1397   // Update phis in BB's successors to remove BB.
1398   SmallVector<WeakVH, 16> UpdatedPHIs;
1399   for (const BasicBlock *Successor : successors(BB)) {
1400     removeDuplicatePhiEdgesBetween(BB, Successor);
1401     if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Successor)) {
1402       MPhi->unorderedDeleteIncomingBlock(BB);
1403       UpdatedPHIs.push_back(MPhi);
1404     }
1405   }
1406   // Optimize trivial phis.
1407   tryRemoveTrivialPhis(UpdatedPHIs);
1408 }
1409 
1410 void MemorySSAUpdater::changeCondBranchToUnconditionalTo(const BranchInst *BI,
1411                                                          const BasicBlock *To) {
1412   const BasicBlock *BB = BI->getParent();
1413   SmallVector<WeakVH, 16> UpdatedPHIs;
1414   for (const BasicBlock *Succ : successors(BB)) {
1415     removeDuplicatePhiEdgesBetween(BB, Succ);
1416     if (Succ != To)
1417       if (auto *MPhi = MSSA->getMemoryAccess(Succ)) {
1418         MPhi->unorderedDeleteIncomingBlock(BB);
1419         UpdatedPHIs.push_back(MPhi);
1420       }
1421   }
1422   // Optimize trivial phis.
1423   tryRemoveTrivialPhis(UpdatedPHIs);
1424 }
1425 
1426 MemoryAccess *MemorySSAUpdater::createMemoryAccessInBB(
1427     Instruction *I, MemoryAccess *Definition, const BasicBlock *BB,
1428     MemorySSA::InsertionPlace Point) {
1429   MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1430   MSSA->insertIntoListsForBlock(NewAccess, BB, Point);
1431   return NewAccess;
1432 }
1433 
1434 MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessBefore(
1435     Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt) {
1436   assert(I->getParent() == InsertPt->getBlock() &&
1437          "New and old access must be in the same block");
1438   MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1439   MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1440                               InsertPt->getIterator());
1441   return NewAccess;
1442 }
1443 
1444 MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessAfter(
1445     Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt) {
1446   assert(I->getParent() == InsertPt->getBlock() &&
1447          "New and old access must be in the same block");
1448   MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1449   MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1450                               ++InsertPt->getIterator());
1451   return NewAccess;
1452 }
1453