1 //===-- MemorySSAUpdater.cpp - Memory SSA Updater--------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------===//
8 //
9 // This file implements the MemorySSAUpdater class.
10 //
11 //===----------------------------------------------------------------===//
12 #include "llvm/Analysis/MemorySSAUpdater.h"
13 #include "llvm/Analysis/LoopIterator.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/SetVector.h"
16 #include "llvm/ADT/SmallPtrSet.h"
17 #include "llvm/Analysis/IteratedDominanceFrontier.h"
18 #include "llvm/Analysis/MemorySSA.h"
19 #include "llvm/IR/BasicBlock.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/Dominators.h"
22 #include "llvm/IR/GlobalVariable.h"
23 #include "llvm/IR/IRBuilder.h"
24 #include "llvm/IR/LLVMContext.h"
25 #include "llvm/IR/Metadata.h"
26 #include "llvm/IR/Module.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/FormattedStream.h"
29 #include <algorithm>
30 
31 #define DEBUG_TYPE "memoryssa"
32 using namespace llvm;
33 
34 // This is the marker algorithm from "Simple and Efficient Construction of
35 // Static Single Assignment Form"
36 // The simple, non-marker algorithm places phi nodes at any join
37 // Here, we place markers, and only place phi nodes if they end up necessary.
38 // They are only necessary if they break a cycle (IE we recursively visit
39 // ourselves again), or we discover, while getting the value of the operands,
40 // that there are two or more definitions needing to be merged.
41 // This still will leave non-minimal form in the case of irreducible control
42 // flow, where phi nodes may be in cycles with themselves, but unnecessary.
43 MemoryAccess *MemorySSAUpdater::getPreviousDefRecursive(
44     BasicBlock *BB,
45     DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
46   // First, do a cache lookup. Without this cache, certain CFG structures
47   // (like a series of if statements) take exponential time to visit.
48   auto Cached = CachedPreviousDef.find(BB);
49   if (Cached != CachedPreviousDef.end())
50     return Cached->second;
51 
52   // If this method is called from an unreachable block, return LoE.
53   if (!MSSA->DT->isReachableFromEntry(BB))
54     return MSSA->getLiveOnEntryDef();
55 
56   if (BasicBlock *Pred = BB->getUniquePredecessor()) {
57     VisitedBlocks.insert(BB);
58     // Single predecessor case, just recurse, we can only have one definition.
59     MemoryAccess *Result = getPreviousDefFromEnd(Pred, CachedPreviousDef);
60     CachedPreviousDef.insert({BB, Result});
61     return Result;
62   }
63 
64   if (VisitedBlocks.count(BB)) {
65     // We hit our node again, meaning we had a cycle, we must insert a phi
66     // node to break it so we have an operand. The only case this will
67     // insert useless phis is if we have irreducible control flow.
68     MemoryAccess *Result = MSSA->createMemoryPhi(BB);
69     CachedPreviousDef.insert({BB, Result});
70     return Result;
71   }
72 
73   if (VisitedBlocks.insert(BB).second) {
74     // Mark us visited so we can detect a cycle
75     SmallVector<TrackingVH<MemoryAccess>, 8> PhiOps;
76 
77     // Recurse to get the values in our predecessors for placement of a
78     // potential phi node. This will insert phi nodes if we cycle in order to
79     // break the cycle and have an operand.
80     bool UniqueIncomingAccess = true;
81     MemoryAccess *SingleAccess = nullptr;
82     for (auto *Pred : predecessors(BB)) {
83       if (MSSA->DT->isReachableFromEntry(Pred)) {
84         auto *IncomingAccess = getPreviousDefFromEnd(Pred, CachedPreviousDef);
85         if (!SingleAccess)
86           SingleAccess = IncomingAccess;
87         else if (IncomingAccess != SingleAccess)
88           UniqueIncomingAccess = false;
89         PhiOps.push_back(IncomingAccess);
90       } else
91         PhiOps.push_back(MSSA->getLiveOnEntryDef());
92     }
93 
94     // Now try to simplify the ops to avoid placing a phi.
95     // This may return null if we never created a phi yet, that's okay
96     MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MSSA->getMemoryAccess(BB));
97 
98     // See if we can avoid the phi by simplifying it.
99     auto *Result = tryRemoveTrivialPhi(Phi, PhiOps);
100     // If we couldn't simplify, we may have to create a phi
101     if (Result == Phi && UniqueIncomingAccess && SingleAccess) {
102       // A concrete Phi only exists if we created an empty one to break a cycle.
103       if (Phi) {
104         assert(Phi->operands().empty() && "Expected empty Phi");
105         Phi->replaceAllUsesWith(SingleAccess);
106         removeMemoryAccess(Phi);
107       }
108       Result = SingleAccess;
109     } else if (Result == Phi && !(UniqueIncomingAccess && SingleAccess)) {
110       if (!Phi)
111         Phi = MSSA->createMemoryPhi(BB);
112 
113       // See if the existing phi operands match what we need.
114       // Unlike normal SSA, we only allow one phi node per block, so we can't just
115       // create a new one.
116       if (Phi->getNumOperands() != 0) {
117         // FIXME: Figure out whether this is dead code and if so remove it.
118         if (!std::equal(Phi->op_begin(), Phi->op_end(), PhiOps.begin())) {
119           // These will have been filled in by the recursive read we did above.
120           llvm::copy(PhiOps, Phi->op_begin());
121           std::copy(pred_begin(BB), pred_end(BB), Phi->block_begin());
122         }
123       } else {
124         unsigned i = 0;
125         for (auto *Pred : predecessors(BB))
126           Phi->addIncoming(&*PhiOps[i++], Pred);
127         InsertedPHIs.push_back(Phi);
128       }
129       Result = Phi;
130     }
131 
132     // Set ourselves up for the next variable by resetting visited state.
133     VisitedBlocks.erase(BB);
134     CachedPreviousDef.insert({BB, Result});
135     return Result;
136   }
137   llvm_unreachable("Should have hit one of the three cases above");
138 }
139 
140 // This starts at the memory access, and goes backwards in the block to find the
141 // previous definition. If a definition is not found the block of the access,
142 // it continues globally, creating phi nodes to ensure we have a single
143 // definition.
144 MemoryAccess *MemorySSAUpdater::getPreviousDef(MemoryAccess *MA) {
145   if (auto *LocalResult = getPreviousDefInBlock(MA))
146     return LocalResult;
147   DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef;
148   return getPreviousDefRecursive(MA->getBlock(), CachedPreviousDef);
149 }
150 
151 // This starts at the memory access, and goes backwards in the block to the find
152 // the previous definition. If the definition is not found in the block of the
153 // access, it returns nullptr.
154 MemoryAccess *MemorySSAUpdater::getPreviousDefInBlock(MemoryAccess *MA) {
155   auto *Defs = MSSA->getWritableBlockDefs(MA->getBlock());
156 
157   // It's possible there are no defs, or we got handed the first def to start.
158   if (Defs) {
159     // If this is a def, we can just use the def iterators.
160     if (!isa<MemoryUse>(MA)) {
161       auto Iter = MA->getReverseDefsIterator();
162       ++Iter;
163       if (Iter != Defs->rend())
164         return &*Iter;
165     } else {
166       // Otherwise, have to walk the all access iterator.
167       auto End = MSSA->getWritableBlockAccesses(MA->getBlock())->rend();
168       for (auto &U : make_range(++MA->getReverseIterator(), End))
169         if (!isa<MemoryUse>(U))
170           return cast<MemoryAccess>(&U);
171       // Note that if MA comes before Defs->begin(), we won't hit a def.
172       return nullptr;
173     }
174   }
175   return nullptr;
176 }
177 
178 // This starts at the end of block
179 MemoryAccess *MemorySSAUpdater::getPreviousDefFromEnd(
180     BasicBlock *BB,
181     DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) {
182   auto *Defs = MSSA->getWritableBlockDefs(BB);
183 
184   if (Defs) {
185     CachedPreviousDef.insert({BB, &*Defs->rbegin()});
186     return &*Defs->rbegin();
187   }
188 
189   return getPreviousDefRecursive(BB, CachedPreviousDef);
190 }
191 // Recurse over a set of phi uses to eliminate the trivial ones
192 MemoryAccess *MemorySSAUpdater::recursePhi(MemoryAccess *Phi) {
193   if (!Phi)
194     return nullptr;
195   TrackingVH<MemoryAccess> Res(Phi);
196   SmallVector<TrackingVH<Value>, 8> Uses;
197   std::copy(Phi->user_begin(), Phi->user_end(), std::back_inserter(Uses));
198   for (auto &U : Uses)
199     if (MemoryPhi *UsePhi = dyn_cast<MemoryPhi>(&*U))
200       tryRemoveTrivialPhi(UsePhi);
201   return Res;
202 }
203 
204 // Eliminate trivial phis
205 // Phis are trivial if they are defined either by themselves, or all the same
206 // argument.
207 // IE phi(a, a) or b = phi(a, b) or c = phi(a, a, c)
208 // We recursively try to remove them.
209 MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi) {
210   assert(Phi && "Can only remove concrete Phi.");
211   auto OperRange = Phi->operands();
212   return tryRemoveTrivialPhi(Phi, OperRange);
213 }
214 template <class RangeType>
215 MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi,
216                                                     RangeType &Operands) {
217   // Bail out on non-opt Phis.
218   if (NonOptPhis.count(Phi))
219     return Phi;
220 
221   // Detect equal or self arguments
222   MemoryAccess *Same = nullptr;
223   for (auto &Op : Operands) {
224     // If the same or self, good so far
225     if (Op == Phi || Op == Same)
226       continue;
227     // not the same, return the phi since it's not eliminatable by us
228     if (Same)
229       return Phi;
230     Same = cast<MemoryAccess>(&*Op);
231   }
232   // Never found a non-self reference, the phi is undef
233   if (Same == nullptr)
234     return MSSA->getLiveOnEntryDef();
235   if (Phi) {
236     Phi->replaceAllUsesWith(Same);
237     removeMemoryAccess(Phi);
238   }
239 
240   // We should only end up recursing in case we replaced something, in which
241   // case, we may have made other Phis trivial.
242   return recursePhi(Same);
243 }
244 
245 void MemorySSAUpdater::insertUse(MemoryUse *MU, bool RenameUses) {
246   InsertedPHIs.clear();
247   MU->setDefiningAccess(getPreviousDef(MU));
248 
249   // In cases without unreachable blocks, because uses do not create new
250   // may-defs, there are only two cases:
251   // 1. There was a def already below us, and therefore, we should not have
252   // created a phi node because it was already needed for the def.
253   //
254   // 2. There is no def below us, and therefore, there is no extra renaming work
255   // to do.
256 
257   // In cases with unreachable blocks, where the unnecessary Phis were
258   // optimized out, adding the Use may re-insert those Phis. Hence, when
259   // inserting Uses outside of the MSSA creation process, and new Phis were
260   // added, rename all uses if we are asked.
261 
262   if (!RenameUses && !InsertedPHIs.empty()) {
263     auto *Defs = MSSA->getBlockDefs(MU->getBlock());
264     (void)Defs;
265     assert((!Defs || (++Defs->begin() == Defs->end())) &&
266            "Block may have only a Phi or no defs");
267   }
268 
269   if (RenameUses && InsertedPHIs.size()) {
270     SmallPtrSet<BasicBlock *, 16> Visited;
271     BasicBlock *StartBlock = MU->getBlock();
272 
273     if (auto *Defs = MSSA->getWritableBlockDefs(StartBlock)) {
274       MemoryAccess *FirstDef = &*Defs->begin();
275       // Convert to incoming value if it's a memorydef. A phi *is* already an
276       // incoming value.
277       if (auto *MD = dyn_cast<MemoryDef>(FirstDef))
278         FirstDef = MD->getDefiningAccess();
279 
280       MSSA->renamePass(MU->getBlock(), FirstDef, Visited);
281     }
282     // We just inserted a phi into this block, so the incoming value will
283     // become the phi anyway, so it does not matter what we pass.
284     for (auto &MP : InsertedPHIs)
285       if (MemoryPhi *Phi = cast_or_null<MemoryPhi>(MP))
286         MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
287   }
288 }
289 
290 // Set every incoming edge {BB, MP->getBlock()} of MemoryPhi MP to NewDef.
291 static void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB,
292                                       MemoryAccess *NewDef) {
293   // Replace any operand with us an incoming block with the new defining
294   // access.
295   int i = MP->getBasicBlockIndex(BB);
296   assert(i != -1 && "Should have found the basic block in the phi");
297   // We can't just compare i against getNumOperands since one is signed and the
298   // other not. So use it to index into the block iterator.
299   for (auto BBIter = MP->block_begin() + i; BBIter != MP->block_end();
300        ++BBIter) {
301     if (*BBIter != BB)
302       break;
303     MP->setIncomingValue(i, NewDef);
304     ++i;
305   }
306 }
307 
308 // A brief description of the algorithm:
309 // First, we compute what should define the new def, using the SSA
310 // construction algorithm.
311 // Then, we update the defs below us (and any new phi nodes) in the graph to
312 // point to the correct new defs, to ensure we only have one variable, and no
313 // disconnected stores.
314 void MemorySSAUpdater::insertDef(MemoryDef *MD, bool RenameUses) {
315   InsertedPHIs.clear();
316 
317   // See if we had a local def, and if not, go hunting.
318   MemoryAccess *DefBefore = getPreviousDef(MD);
319   bool DefBeforeSameBlock = false;
320   if (DefBefore->getBlock() == MD->getBlock() &&
321       !(isa<MemoryPhi>(DefBefore) &&
322         llvm::is_contained(InsertedPHIs, DefBefore)))
323     DefBeforeSameBlock = true;
324 
325   // There is a def before us, which means we can replace any store/phi uses
326   // of that thing with us, since we are in the way of whatever was there
327   // before.
328   // We now define that def's memorydefs and memoryphis
329   if (DefBeforeSameBlock) {
330     DefBefore->replaceUsesWithIf(MD, [MD](Use &U) {
331       // Leave the MemoryUses alone.
332       // Also make sure we skip ourselves to avoid self references.
333       User *Usr = U.getUser();
334       return !isa<MemoryUse>(Usr) && Usr != MD;
335       // Defs are automatically unoptimized when the user is set to MD below,
336       // because the isOptimized() call will fail to find the same ID.
337     });
338   }
339 
340   // and that def is now our defining access.
341   MD->setDefiningAccess(DefBefore);
342 
343   SmallVector<WeakVH, 8> FixupList(InsertedPHIs.begin(), InsertedPHIs.end());
344 
345   // Remember the index where we may insert new phis.
346   unsigned NewPhiIndex = InsertedPHIs.size();
347   if (!DefBeforeSameBlock) {
348     // If there was a local def before us, we must have the same effect it
349     // did. Because every may-def is the same, any phis/etc we would create, it
350     // would also have created.  If there was no local def before us, we
351     // performed a global update, and have to search all successors and make
352     // sure we update the first def in each of them (following all paths until
353     // we hit the first def along each path). This may also insert phi nodes.
354     // TODO: There are other cases we can skip this work, such as when we have a
355     // single successor, and only used a straight line of single pred blocks
356     // backwards to find the def.  To make that work, we'd have to track whether
357     // getDefRecursive only ever used the single predecessor case.  These types
358     // of paths also only exist in between CFG simplifications.
359 
360     // If this is the first def in the block and this insert is in an arbitrary
361     // place, compute IDF and place phis.
362     SmallPtrSet<BasicBlock *, 2> DefiningBlocks;
363 
364     // If this is the last Def in the block, also compute IDF based on MD, since
365     // this may a new Def added, and we may need additional Phis.
366     auto Iter = MD->getDefsIterator();
367     ++Iter;
368     auto IterEnd = MSSA->getBlockDefs(MD->getBlock())->end();
369     if (Iter == IterEnd)
370       DefiningBlocks.insert(MD->getBlock());
371 
372     for (const auto &VH : InsertedPHIs)
373       if (const auto *RealPHI = cast_or_null<MemoryPhi>(VH))
374         DefiningBlocks.insert(RealPHI->getBlock());
375     ForwardIDFCalculator IDFs(*MSSA->DT);
376     SmallVector<BasicBlock *, 32> IDFBlocks;
377     IDFs.setDefiningBlocks(DefiningBlocks);
378     IDFs.calculate(IDFBlocks);
379     SmallVector<AssertingVH<MemoryPhi>, 4> NewInsertedPHIs;
380     for (auto *BBIDF : IDFBlocks) {
381       auto *MPhi = MSSA->getMemoryAccess(BBIDF);
382       if (!MPhi) {
383         MPhi = MSSA->createMemoryPhi(BBIDF);
384         NewInsertedPHIs.push_back(MPhi);
385       }
386       // Add the phis created into the IDF blocks to NonOptPhis, so they are not
387       // optimized out as trivial by the call to getPreviousDefFromEnd below.
388       // Once they are complete, all these Phis are added to the FixupList, and
389       // removed from NonOptPhis inside fixupDefs(). Existing Phis in IDF may
390       // need fixing as well, and potentially be trivial before this insertion,
391       // hence add all IDF Phis. See PR43044.
392       NonOptPhis.insert(MPhi);
393     }
394     for (auto &MPhi : NewInsertedPHIs) {
395       auto *BBIDF = MPhi->getBlock();
396       for (auto *Pred : predecessors(BBIDF)) {
397         DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef;
398         MPhi->addIncoming(getPreviousDefFromEnd(Pred, CachedPreviousDef), Pred);
399       }
400     }
401 
402     // Re-take the index where we're adding the new phis, because the above call
403     // to getPreviousDefFromEnd, may have inserted into InsertedPHIs.
404     NewPhiIndex = InsertedPHIs.size();
405     for (auto &MPhi : NewInsertedPHIs) {
406       InsertedPHIs.push_back(&*MPhi);
407       FixupList.push_back(&*MPhi);
408     }
409 
410     FixupList.push_back(MD);
411   }
412 
413   // Remember the index where we stopped inserting new phis above, since the
414   // fixupDefs call in the loop below may insert more, that are already minimal.
415   unsigned NewPhiIndexEnd = InsertedPHIs.size();
416 
417   while (!FixupList.empty()) {
418     unsigned StartingPHISize = InsertedPHIs.size();
419     fixupDefs(FixupList);
420     FixupList.clear();
421     // Put any new phis on the fixup list, and process them
422     FixupList.append(InsertedPHIs.begin() + StartingPHISize, InsertedPHIs.end());
423   }
424 
425   // Optimize potentially non-minimal phis added in this method.
426   unsigned NewPhiSize = NewPhiIndexEnd - NewPhiIndex;
427   if (NewPhiSize)
428     tryRemoveTrivialPhis(ArrayRef<WeakVH>(&InsertedPHIs[NewPhiIndex], NewPhiSize));
429 
430   // Now that all fixups are done, rename all uses if we are asked.
431   if (RenameUses) {
432     SmallPtrSet<BasicBlock *, 16> Visited;
433     BasicBlock *StartBlock = MD->getBlock();
434     // We are guaranteed there is a def in the block, because we just got it
435     // handed to us in this function.
436     MemoryAccess *FirstDef = &*MSSA->getWritableBlockDefs(StartBlock)->begin();
437     // Convert to incoming value if it's a memorydef. A phi *is* already an
438     // incoming value.
439     if (auto *MD = dyn_cast<MemoryDef>(FirstDef))
440       FirstDef = MD->getDefiningAccess();
441 
442     MSSA->renamePass(MD->getBlock(), FirstDef, Visited);
443     // We just inserted a phi into this block, so the incoming value will become
444     // the phi anyway, so it does not matter what we pass.
445     for (auto &MP : InsertedPHIs) {
446       MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP);
447       if (Phi)
448         MSSA->renamePass(Phi->getBlock(), nullptr, Visited);
449     }
450   }
451 }
452 
453 void MemorySSAUpdater::fixupDefs(const SmallVectorImpl<WeakVH> &Vars) {
454   SmallPtrSet<const BasicBlock *, 8> Seen;
455   SmallVector<const BasicBlock *, 16> Worklist;
456   for (auto &Var : Vars) {
457     MemoryAccess *NewDef = dyn_cast_or_null<MemoryAccess>(Var);
458     if (!NewDef)
459       continue;
460     // First, see if there is a local def after the operand.
461     auto *Defs = MSSA->getWritableBlockDefs(NewDef->getBlock());
462     auto DefIter = NewDef->getDefsIterator();
463 
464     // The temporary Phi is being fixed, unmark it for not to optimize.
465     if (MemoryPhi *Phi = dyn_cast<MemoryPhi>(NewDef))
466       NonOptPhis.erase(Phi);
467 
468     // If there is a local def after us, we only have to rename that.
469     if (++DefIter != Defs->end()) {
470       cast<MemoryDef>(DefIter)->setDefiningAccess(NewDef);
471       continue;
472     }
473 
474     // Otherwise, we need to search down through the CFG.
475     // For each of our successors, handle it directly if their is a phi, or
476     // place on the fixup worklist.
477     for (const auto *S : successors(NewDef->getBlock())) {
478       if (auto *MP = MSSA->getMemoryAccess(S))
479         setMemoryPhiValueForBlock(MP, NewDef->getBlock(), NewDef);
480       else
481         Worklist.push_back(S);
482     }
483 
484     while (!Worklist.empty()) {
485       const BasicBlock *FixupBlock = Worklist.back();
486       Worklist.pop_back();
487 
488       // Get the first def in the block that isn't a phi node.
489       if (auto *Defs = MSSA->getWritableBlockDefs(FixupBlock)) {
490         auto *FirstDef = &*Defs->begin();
491         // The loop above and below should have taken care of phi nodes
492         assert(!isa<MemoryPhi>(FirstDef) &&
493                "Should have already handled phi nodes!");
494         // We are now this def's defining access, make sure we actually dominate
495         // it
496         assert(MSSA->dominates(NewDef, FirstDef) &&
497                "Should have dominated the new access");
498 
499         // This may insert new phi nodes, because we are not guaranteed the
500         // block we are processing has a single pred, and depending where the
501         // store was inserted, it may require phi nodes below it.
502         cast<MemoryDef>(FirstDef)->setDefiningAccess(getPreviousDef(FirstDef));
503         return;
504       }
505       // We didn't find a def, so we must continue.
506       for (const auto *S : successors(FixupBlock)) {
507         // If there is a phi node, handle it.
508         // Otherwise, put the block on the worklist
509         if (auto *MP = MSSA->getMemoryAccess(S))
510           setMemoryPhiValueForBlock(MP, FixupBlock, NewDef);
511         else {
512           // If we cycle, we should have ended up at a phi node that we already
513           // processed.  FIXME: Double check this
514           if (!Seen.insert(S).second)
515             continue;
516           Worklist.push_back(S);
517         }
518       }
519     }
520   }
521 }
522 
523 void MemorySSAUpdater::removeEdge(BasicBlock *From, BasicBlock *To) {
524   if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
525     MPhi->unorderedDeleteIncomingBlock(From);
526     tryRemoveTrivialPhi(MPhi);
527   }
528 }
529 
530 void MemorySSAUpdater::removeDuplicatePhiEdgesBetween(const BasicBlock *From,
531                                                       const BasicBlock *To) {
532   if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) {
533     bool Found = false;
534     MPhi->unorderedDeleteIncomingIf([&](const MemoryAccess *, BasicBlock *B) {
535       if (From != B)
536         return false;
537       if (Found)
538         return true;
539       Found = true;
540       return false;
541     });
542     tryRemoveTrivialPhi(MPhi);
543   }
544 }
545 
546 /// If all arguments of a MemoryPHI are defined by the same incoming
547 /// argument, return that argument.
548 static MemoryAccess *onlySingleValue(MemoryPhi *MP) {
549   MemoryAccess *MA = nullptr;
550 
551   for (auto &Arg : MP->operands()) {
552     if (!MA)
553       MA = cast<MemoryAccess>(Arg);
554     else if (MA != Arg)
555       return nullptr;
556   }
557   return MA;
558 }
559 
560 static MemoryAccess *getNewDefiningAccessForClone(MemoryAccess *MA,
561                                                   const ValueToValueMapTy &VMap,
562                                                   PhiToDefMap &MPhiMap,
563                                                   bool CloneWasSimplified,
564                                                   MemorySSA *MSSA) {
565   MemoryAccess *InsnDefining = MA;
566   if (MemoryDef *DefMUD = dyn_cast<MemoryDef>(InsnDefining)) {
567     if (!MSSA->isLiveOnEntryDef(DefMUD)) {
568       Instruction *DefMUDI = DefMUD->getMemoryInst();
569       assert(DefMUDI && "Found MemoryUseOrDef with no Instruction.");
570       if (Instruction *NewDefMUDI =
571               cast_or_null<Instruction>(VMap.lookup(DefMUDI))) {
572         InsnDefining = MSSA->getMemoryAccess(NewDefMUDI);
573         if (!CloneWasSimplified)
574           assert(InsnDefining && "Defining instruction cannot be nullptr.");
575         else if (!InsnDefining || isa<MemoryUse>(InsnDefining)) {
576           // The clone was simplified, it's no longer a MemoryDef, look up.
577           auto DefIt = DefMUD->getDefsIterator();
578           // Since simplified clones only occur in single block cloning, a
579           // previous definition must exist, otherwise NewDefMUDI would not
580           // have been found in VMap.
581           assert(DefIt != MSSA->getBlockDefs(DefMUD->getBlock())->begin() &&
582                  "Previous def must exist");
583           InsnDefining = getNewDefiningAccessForClone(
584               &*(--DefIt), VMap, MPhiMap, CloneWasSimplified, MSSA);
585         }
586       }
587     }
588   } else {
589     MemoryPhi *DefPhi = cast<MemoryPhi>(InsnDefining);
590     if (MemoryAccess *NewDefPhi = MPhiMap.lookup(DefPhi))
591       InsnDefining = NewDefPhi;
592   }
593   assert(InsnDefining && "Defining instruction cannot be nullptr.");
594   return InsnDefining;
595 }
596 
597 void MemorySSAUpdater::cloneUsesAndDefs(BasicBlock *BB, BasicBlock *NewBB,
598                                         const ValueToValueMapTy &VMap,
599                                         PhiToDefMap &MPhiMap,
600                                         bool CloneWasSimplified) {
601   const MemorySSA::AccessList *Acc = MSSA->getBlockAccesses(BB);
602   if (!Acc)
603     return;
604   for (const MemoryAccess &MA : *Acc) {
605     if (const MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&MA)) {
606       Instruction *Insn = MUD->getMemoryInst();
607       // Entry does not exist if the clone of the block did not clone all
608       // instructions. This occurs in LoopRotate when cloning instructions
609       // from the old header to the old preheader. The cloned instruction may
610       // also be a simplified Value, not an Instruction (see LoopRotate).
611       // Also in LoopRotate, even when it's an instruction, due to it being
612       // simplified, it may be a Use rather than a Def, so we cannot use MUD as
613       // template. Calls coming from updateForClonedBlockIntoPred, ensure this.
614       if (Instruction *NewInsn =
615               dyn_cast_or_null<Instruction>(VMap.lookup(Insn))) {
616         MemoryAccess *NewUseOrDef = MSSA->createDefinedAccess(
617             NewInsn,
618             getNewDefiningAccessForClone(MUD->getDefiningAccess(), VMap,
619                                          MPhiMap, CloneWasSimplified, MSSA),
620             /*Template=*/CloneWasSimplified ? nullptr : MUD,
621             /*CreationMustSucceed=*/CloneWasSimplified ? false : true);
622         if (NewUseOrDef)
623           MSSA->insertIntoListsForBlock(NewUseOrDef, NewBB, MemorySSA::End);
624       }
625     }
626   }
627 }
628 
629 void MemorySSAUpdater::updatePhisWhenInsertingUniqueBackedgeBlock(
630     BasicBlock *Header, BasicBlock *Preheader, BasicBlock *BEBlock) {
631   auto *MPhi = MSSA->getMemoryAccess(Header);
632   if (!MPhi)
633     return;
634 
635   // Create phi node in the backedge block and populate it with the same
636   // incoming values as MPhi. Skip incoming values coming from Preheader.
637   auto *NewMPhi = MSSA->createMemoryPhi(BEBlock);
638   bool HasUniqueIncomingValue = true;
639   MemoryAccess *UniqueValue = nullptr;
640   for (unsigned I = 0, E = MPhi->getNumIncomingValues(); I != E; ++I) {
641     BasicBlock *IBB = MPhi->getIncomingBlock(I);
642     MemoryAccess *IV = MPhi->getIncomingValue(I);
643     if (IBB != Preheader) {
644       NewMPhi->addIncoming(IV, IBB);
645       if (HasUniqueIncomingValue) {
646         if (!UniqueValue)
647           UniqueValue = IV;
648         else if (UniqueValue != IV)
649           HasUniqueIncomingValue = false;
650       }
651     }
652   }
653 
654   // Update incoming edges into MPhi. Remove all but the incoming edge from
655   // Preheader. Add an edge from NewMPhi
656   auto *AccFromPreheader = MPhi->getIncomingValueForBlock(Preheader);
657   MPhi->setIncomingValue(0, AccFromPreheader);
658   MPhi->setIncomingBlock(0, Preheader);
659   for (unsigned I = MPhi->getNumIncomingValues() - 1; I >= 1; --I)
660     MPhi->unorderedDeleteIncoming(I);
661   MPhi->addIncoming(NewMPhi, BEBlock);
662 
663   // If NewMPhi is a trivial phi, remove it. Its use in the header MPhi will be
664   // replaced with the unique value.
665   tryRemoveTrivialPhi(NewMPhi);
666 }
667 
668 void MemorySSAUpdater::updateForClonedLoop(const LoopBlocksRPO &LoopBlocks,
669                                            ArrayRef<BasicBlock *> ExitBlocks,
670                                            const ValueToValueMapTy &VMap,
671                                            bool IgnoreIncomingWithNoClones) {
672   PhiToDefMap MPhiMap;
673 
674   auto FixPhiIncomingValues = [&](MemoryPhi *Phi, MemoryPhi *NewPhi) {
675     assert(Phi && NewPhi && "Invalid Phi nodes.");
676     BasicBlock *NewPhiBB = NewPhi->getBlock();
677     SmallPtrSet<BasicBlock *, 4> NewPhiBBPreds(pred_begin(NewPhiBB),
678                                                pred_end(NewPhiBB));
679     for (unsigned It = 0, E = Phi->getNumIncomingValues(); It < E; ++It) {
680       MemoryAccess *IncomingAccess = Phi->getIncomingValue(It);
681       BasicBlock *IncBB = Phi->getIncomingBlock(It);
682 
683       if (BasicBlock *NewIncBB = cast_or_null<BasicBlock>(VMap.lookup(IncBB)))
684         IncBB = NewIncBB;
685       else if (IgnoreIncomingWithNoClones)
686         continue;
687 
688       // Now we have IncBB, and will need to add incoming from it to NewPhi.
689 
690       // If IncBB is not a predecessor of NewPhiBB, then do not add it.
691       // NewPhiBB was cloned without that edge.
692       if (!NewPhiBBPreds.count(IncBB))
693         continue;
694 
695       // Determine incoming value and add it as incoming from IncBB.
696       if (MemoryUseOrDef *IncMUD = dyn_cast<MemoryUseOrDef>(IncomingAccess)) {
697         if (!MSSA->isLiveOnEntryDef(IncMUD)) {
698           Instruction *IncI = IncMUD->getMemoryInst();
699           assert(IncI && "Found MemoryUseOrDef with no Instruction.");
700           if (Instruction *NewIncI =
701                   cast_or_null<Instruction>(VMap.lookup(IncI))) {
702             IncMUD = MSSA->getMemoryAccess(NewIncI);
703             assert(IncMUD &&
704                    "MemoryUseOrDef cannot be null, all preds processed.");
705           }
706         }
707         NewPhi->addIncoming(IncMUD, IncBB);
708       } else {
709         MemoryPhi *IncPhi = cast<MemoryPhi>(IncomingAccess);
710         if (MemoryAccess *NewDefPhi = MPhiMap.lookup(IncPhi))
711           NewPhi->addIncoming(NewDefPhi, IncBB);
712         else
713           NewPhi->addIncoming(IncPhi, IncBB);
714       }
715     }
716     if (auto *SingleAccess = onlySingleValue(NewPhi)) {
717       MPhiMap[Phi] = SingleAccess;
718       removeMemoryAccess(NewPhi);
719     }
720   };
721 
722   auto ProcessBlock = [&](BasicBlock *BB) {
723     BasicBlock *NewBlock = cast_or_null<BasicBlock>(VMap.lookup(BB));
724     if (!NewBlock)
725       return;
726 
727     assert(!MSSA->getWritableBlockAccesses(NewBlock) &&
728            "Cloned block should have no accesses");
729 
730     // Add MemoryPhi.
731     if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB)) {
732       MemoryPhi *NewPhi = MSSA->createMemoryPhi(NewBlock);
733       MPhiMap[MPhi] = NewPhi;
734     }
735     // Update Uses and Defs.
736     cloneUsesAndDefs(BB, NewBlock, VMap, MPhiMap);
737   };
738 
739   for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
740     ProcessBlock(BB);
741 
742   for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks))
743     if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
744       if (MemoryAccess *NewPhi = MPhiMap.lookup(MPhi))
745         FixPhiIncomingValues(MPhi, cast<MemoryPhi>(NewPhi));
746 }
747 
748 void MemorySSAUpdater::updateForClonedBlockIntoPred(
749     BasicBlock *BB, BasicBlock *P1, const ValueToValueMapTy &VM) {
750   // All defs/phis from outside BB that are used in BB, are valid uses in P1.
751   // Since those defs/phis must have dominated BB, and also dominate P1.
752   // Defs from BB being used in BB will be replaced with the cloned defs from
753   // VM. The uses of BB's Phi (if it exists) in BB will be replaced by the
754   // incoming def into the Phi from P1.
755   // Instructions cloned into the predecessor are in practice sometimes
756   // simplified, so disable the use of the template, and create an access from
757   // scratch.
758   PhiToDefMap MPhiMap;
759   if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB))
760     MPhiMap[MPhi] = MPhi->getIncomingValueForBlock(P1);
761   cloneUsesAndDefs(BB, P1, VM, MPhiMap, /*CloneWasSimplified=*/true);
762 }
763 
764 template <typename Iter>
765 void MemorySSAUpdater::privateUpdateExitBlocksForClonedLoop(
766     ArrayRef<BasicBlock *> ExitBlocks, Iter ValuesBegin, Iter ValuesEnd,
767     DominatorTree &DT) {
768   SmallVector<CFGUpdate, 4> Updates;
769   // Update/insert phis in all successors of exit blocks.
770   for (auto *Exit : ExitBlocks)
771     for (const ValueToValueMapTy *VMap : make_range(ValuesBegin, ValuesEnd))
772       if (BasicBlock *NewExit = cast_or_null<BasicBlock>(VMap->lookup(Exit))) {
773         BasicBlock *ExitSucc = NewExit->getTerminator()->getSuccessor(0);
774         Updates.push_back({DT.Insert, NewExit, ExitSucc});
775       }
776   applyInsertUpdates(Updates, DT);
777 }
778 
779 void MemorySSAUpdater::updateExitBlocksForClonedLoop(
780     ArrayRef<BasicBlock *> ExitBlocks, const ValueToValueMapTy &VMap,
781     DominatorTree &DT) {
782   const ValueToValueMapTy *const Arr[] = {&VMap};
783   privateUpdateExitBlocksForClonedLoop(ExitBlocks, std::begin(Arr),
784                                        std::end(Arr), DT);
785 }
786 
787 void MemorySSAUpdater::updateExitBlocksForClonedLoop(
788     ArrayRef<BasicBlock *> ExitBlocks,
789     ArrayRef<std::unique_ptr<ValueToValueMapTy>> VMaps, DominatorTree &DT) {
790   auto GetPtr = [&](const std::unique_ptr<ValueToValueMapTy> &I) {
791     return I.get();
792   };
793   using MappedIteratorType =
794       mapped_iterator<const std::unique_ptr<ValueToValueMapTy> *,
795                       decltype(GetPtr)>;
796   auto MapBegin = MappedIteratorType(VMaps.begin(), GetPtr);
797   auto MapEnd = MappedIteratorType(VMaps.end(), GetPtr);
798   privateUpdateExitBlocksForClonedLoop(ExitBlocks, MapBegin, MapEnd, DT);
799 }
800 
801 void MemorySSAUpdater::applyUpdates(ArrayRef<CFGUpdate> Updates,
802                                     DominatorTree &DT) {
803   SmallVector<CFGUpdate, 4> DeleteUpdates;
804   SmallVector<CFGUpdate, 4> RevDeleteUpdates;
805   SmallVector<CFGUpdate, 4> InsertUpdates;
806   for (auto &Update : Updates) {
807     if (Update.getKind() == DT.Insert)
808       InsertUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
809     else {
810       DeleteUpdates.push_back({DT.Delete, Update.getFrom(), Update.getTo()});
811       RevDeleteUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()});
812     }
813   }
814 
815   if (!DeleteUpdates.empty()) {
816     SmallVector<CFGUpdate, 0> Empty;
817     // Deletes are reversed applied, because this CFGView is pretending the
818     // deletes did not happen yet, hence the edges still exist.
819     DT.applyUpdates(Empty, RevDeleteUpdates);
820 
821     // Note: the MSSA update below doesn't distinguish between a GD with
822     // (RevDelete,false) and (Delete, true), but this matters for the DT
823     // updates above; for "children" purposes they are equivalent; but the
824     // updates themselves convey the desired update, used inside DT only.
825     GraphDiff<BasicBlock *> GD(RevDeleteUpdates);
826     applyInsertUpdates(InsertUpdates, DT, &GD);
827     // Update DT to redelete edges; this matches the real CFG so we can perform
828     // the standard update without a postview of the CFG.
829     DT.applyUpdates(DeleteUpdates);
830   } else {
831     GraphDiff<BasicBlock *> GD;
832     applyInsertUpdates(InsertUpdates, DT, &GD);
833   }
834 
835   // Update for deleted edges
836   for (auto &Update : DeleteUpdates)
837     removeEdge(Update.getFrom(), Update.getTo());
838 }
839 
840 void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates,
841                                           DominatorTree &DT) {
842   GraphDiff<BasicBlock *> GD;
843   applyInsertUpdates(Updates, DT, &GD);
844 }
845 
846 void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates,
847                                           DominatorTree &DT,
848                                           const GraphDiff<BasicBlock *> *GD) {
849   // Get recursive last Def, assuming well formed MSSA and updated DT.
850   auto GetLastDef = [&](BasicBlock *BB) -> MemoryAccess * {
851     while (true) {
852       MemorySSA::DefsList *Defs = MSSA->getWritableBlockDefs(BB);
853       // Return last Def or Phi in BB, if it exists.
854       if (Defs)
855         return &*(--Defs->end());
856 
857       // Check number of predecessors, we only care if there's more than one.
858       unsigned Count = 0;
859       BasicBlock *Pred = nullptr;
860       for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BB)) {
861         Pred = Pi;
862         Count++;
863         if (Count == 2)
864           break;
865       }
866 
867       // If BB has multiple predecessors, get last definition from IDom.
868       if (Count != 1) {
869         // [SimpleLoopUnswitch] If BB is a dead block, about to be deleted, its
870         // DT is invalidated. Return LoE as its last def. This will be added to
871         // MemoryPhi node, and later deleted when the block is deleted.
872         if (!DT.getNode(BB))
873           return MSSA->getLiveOnEntryDef();
874         if (auto *IDom = DT.getNode(BB)->getIDom())
875           if (IDom->getBlock() != BB) {
876             BB = IDom->getBlock();
877             continue;
878           }
879         return MSSA->getLiveOnEntryDef();
880       } else {
881         // Single predecessor, BB cannot be dead. GetLastDef of Pred.
882         assert(Count == 1 && Pred && "Single predecessor expected.");
883         // BB can be unreachable though, return LoE if that is the case.
884         if (!DT.getNode(BB))
885           return MSSA->getLiveOnEntryDef();
886         BB = Pred;
887       }
888     };
889     llvm_unreachable("Unable to get last definition.");
890   };
891 
892   // Get nearest IDom given a set of blocks.
893   // TODO: this can be optimized by starting the search at the node with the
894   // lowest level (highest in the tree).
895   auto FindNearestCommonDominator =
896       [&](const SmallSetVector<BasicBlock *, 2> &BBSet) -> BasicBlock * {
897     BasicBlock *PrevIDom = *BBSet.begin();
898     for (auto *BB : BBSet)
899       PrevIDom = DT.findNearestCommonDominator(PrevIDom, BB);
900     return PrevIDom;
901   };
902 
903   // Get all blocks that dominate PrevIDom, stop when reaching CurrIDom. Do not
904   // include CurrIDom.
905   auto GetNoLongerDomBlocks =
906       [&](BasicBlock *PrevIDom, BasicBlock *CurrIDom,
907           SmallVectorImpl<BasicBlock *> &BlocksPrevDom) {
908         if (PrevIDom == CurrIDom)
909           return;
910         BlocksPrevDom.push_back(PrevIDom);
911         BasicBlock *NextIDom = PrevIDom;
912         while (BasicBlock *UpIDom =
913                    DT.getNode(NextIDom)->getIDom()->getBlock()) {
914           if (UpIDom == CurrIDom)
915             break;
916           BlocksPrevDom.push_back(UpIDom);
917           NextIDom = UpIDom;
918         }
919       };
920 
921   // Map a BB to its predecessors: added + previously existing. To get a
922   // deterministic order, store predecessors as SetVectors. The order in each
923   // will be defined by the order in Updates (fixed) and the order given by
924   // children<> (also fixed). Since we further iterate over these ordered sets,
925   // we lose the information of multiple edges possibly existing between two
926   // blocks, so we'll keep and EdgeCount map for that.
927   // An alternate implementation could keep unordered set for the predecessors,
928   // traverse either Updates or children<> each time to get  the deterministic
929   // order, and drop the usage of EdgeCount. This alternate approach would still
930   // require querying the maps for each predecessor, and children<> call has
931   // additional computation inside for creating the snapshot-graph predecessors.
932   // As such, we favor using a little additional storage and less compute time.
933   // This decision can be revisited if we find the alternative more favorable.
934 
935   struct PredInfo {
936     SmallSetVector<BasicBlock *, 2> Added;
937     SmallSetVector<BasicBlock *, 2> Prev;
938   };
939   SmallDenseMap<BasicBlock *, PredInfo> PredMap;
940 
941   for (auto &Edge : Updates) {
942     BasicBlock *BB = Edge.getTo();
943     auto &AddedBlockSet = PredMap[BB].Added;
944     AddedBlockSet.insert(Edge.getFrom());
945   }
946 
947   // Store all existing predecessor for each BB, at least one must exist.
948   SmallDenseMap<std::pair<BasicBlock *, BasicBlock *>, int> EdgeCountMap;
949   SmallPtrSet<BasicBlock *, 2> NewBlocks;
950   for (auto &BBPredPair : PredMap) {
951     auto *BB = BBPredPair.first;
952     const auto &AddedBlockSet = BBPredPair.second.Added;
953     auto &PrevBlockSet = BBPredPair.second.Prev;
954     for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BB)) {
955       if (!AddedBlockSet.count(Pi))
956         PrevBlockSet.insert(Pi);
957       EdgeCountMap[{Pi, BB}]++;
958     }
959 
960     if (PrevBlockSet.empty()) {
961       assert(pred_size(BB) == AddedBlockSet.size() && "Duplicate edges added.");
962       LLVM_DEBUG(
963           dbgs()
964           << "Adding a predecessor to a block with no predecessors. "
965              "This must be an edge added to a new, likely cloned, block. "
966              "Its memory accesses must be already correct, assuming completed "
967              "via the updateExitBlocksForClonedLoop API. "
968              "Assert a single such edge is added so no phi addition or "
969              "additional processing is required.\n");
970       assert(AddedBlockSet.size() == 1 &&
971              "Can only handle adding one predecessor to a new block.");
972       // Need to remove new blocks from PredMap. Remove below to not invalidate
973       // iterator here.
974       NewBlocks.insert(BB);
975     }
976   }
977   // Nothing to process for new/cloned blocks.
978   for (auto *BB : NewBlocks)
979     PredMap.erase(BB);
980 
981   SmallVector<BasicBlock *, 16> BlocksWithDefsToReplace;
982   SmallVector<WeakVH, 8> InsertedPhis;
983 
984   // First create MemoryPhis in all blocks that don't have one. Create in the
985   // order found in Updates, not in PredMap, to get deterministic numbering.
986   for (auto &Edge : Updates) {
987     BasicBlock *BB = Edge.getTo();
988     if (PredMap.count(BB) && !MSSA->getMemoryAccess(BB))
989       InsertedPhis.push_back(MSSA->createMemoryPhi(BB));
990   }
991 
992   // Now we'll fill in the MemoryPhis with the right incoming values.
993   for (auto &BBPredPair : PredMap) {
994     auto *BB = BBPredPair.first;
995     const auto &PrevBlockSet = BBPredPair.second.Prev;
996     const auto &AddedBlockSet = BBPredPair.second.Added;
997     assert(!PrevBlockSet.empty() &&
998            "At least one previous predecessor must exist.");
999 
1000     // TODO: if this becomes a bottleneck, we can save on GetLastDef calls by
1001     // keeping this map before the loop. We can reuse already populated entries
1002     // if an edge is added from the same predecessor to two different blocks,
1003     // and this does happen in rotate. Note that the map needs to be updated
1004     // when deleting non-necessary phis below, if the phi is in the map by
1005     // replacing the value with DefP1.
1006     SmallDenseMap<BasicBlock *, MemoryAccess *> LastDefAddedPred;
1007     for (auto *AddedPred : AddedBlockSet) {
1008       auto *DefPn = GetLastDef(AddedPred);
1009       assert(DefPn != nullptr && "Unable to find last definition.");
1010       LastDefAddedPred[AddedPred] = DefPn;
1011     }
1012 
1013     MemoryPhi *NewPhi = MSSA->getMemoryAccess(BB);
1014     // If Phi is not empty, add an incoming edge from each added pred. Must
1015     // still compute blocks with defs to replace for this block below.
1016     if (NewPhi->getNumOperands()) {
1017       for (auto *Pred : AddedBlockSet) {
1018         auto *LastDefForPred = LastDefAddedPred[Pred];
1019         for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
1020           NewPhi->addIncoming(LastDefForPred, Pred);
1021       }
1022     } else {
1023       // Pick any existing predecessor and get its definition. All other
1024       // existing predecessors should have the same one, since no phi existed.
1025       auto *P1 = *PrevBlockSet.begin();
1026       MemoryAccess *DefP1 = GetLastDef(P1);
1027 
1028       // Check DefP1 against all Defs in LastDefPredPair. If all the same,
1029       // nothing to add.
1030       bool InsertPhi = false;
1031       for (auto LastDefPredPair : LastDefAddedPred)
1032         if (DefP1 != LastDefPredPair.second) {
1033           InsertPhi = true;
1034           break;
1035         }
1036       if (!InsertPhi) {
1037         // Since NewPhi may be used in other newly added Phis, replace all uses
1038         // of NewPhi with the definition coming from all predecessors (DefP1),
1039         // before deleting it.
1040         NewPhi->replaceAllUsesWith(DefP1);
1041         removeMemoryAccess(NewPhi);
1042         continue;
1043       }
1044 
1045       // Update Phi with new values for new predecessors and old value for all
1046       // other predecessors. Since AddedBlockSet and PrevBlockSet are ordered
1047       // sets, the order of entries in NewPhi is deterministic.
1048       for (auto *Pred : AddedBlockSet) {
1049         auto *LastDefForPred = LastDefAddedPred[Pred];
1050         for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
1051           NewPhi->addIncoming(LastDefForPred, Pred);
1052       }
1053       for (auto *Pred : PrevBlockSet)
1054         for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I)
1055           NewPhi->addIncoming(DefP1, Pred);
1056     }
1057 
1058     // Get all blocks that used to dominate BB and no longer do after adding
1059     // AddedBlockSet, where PrevBlockSet are the previously known predecessors.
1060     assert(DT.getNode(BB)->getIDom() && "BB does not have valid idom");
1061     BasicBlock *PrevIDom = FindNearestCommonDominator(PrevBlockSet);
1062     assert(PrevIDom && "Previous IDom should exists");
1063     BasicBlock *NewIDom = DT.getNode(BB)->getIDom()->getBlock();
1064     assert(NewIDom && "BB should have a new valid idom");
1065     assert(DT.dominates(NewIDom, PrevIDom) &&
1066            "New idom should dominate old idom");
1067     GetNoLongerDomBlocks(PrevIDom, NewIDom, BlocksWithDefsToReplace);
1068   }
1069 
1070   tryRemoveTrivialPhis(InsertedPhis);
1071   // Create the set of blocks that now have a definition. We'll use this to
1072   // compute IDF and add Phis there next.
1073   SmallVector<BasicBlock *, 8> BlocksToProcess;
1074   for (auto &VH : InsertedPhis)
1075     if (auto *MPhi = cast_or_null<MemoryPhi>(VH))
1076       BlocksToProcess.push_back(MPhi->getBlock());
1077 
1078   // Compute IDF and add Phis in all IDF blocks that do not have one.
1079   SmallVector<BasicBlock *, 32> IDFBlocks;
1080   if (!BlocksToProcess.empty()) {
1081     ForwardIDFCalculator IDFs(DT, GD);
1082     SmallPtrSet<BasicBlock *, 16> DefiningBlocks(BlocksToProcess.begin(),
1083                                                  BlocksToProcess.end());
1084     IDFs.setDefiningBlocks(DefiningBlocks);
1085     IDFs.calculate(IDFBlocks);
1086 
1087     SmallSetVector<MemoryPhi *, 4> PhisToFill;
1088     // First create all needed Phis.
1089     for (auto *BBIDF : IDFBlocks)
1090       if (!MSSA->getMemoryAccess(BBIDF)) {
1091         auto *IDFPhi = MSSA->createMemoryPhi(BBIDF);
1092         InsertedPhis.push_back(IDFPhi);
1093         PhisToFill.insert(IDFPhi);
1094       }
1095     // Then update or insert their correct incoming values.
1096     for (auto *BBIDF : IDFBlocks) {
1097       auto *IDFPhi = MSSA->getMemoryAccess(BBIDF);
1098       assert(IDFPhi && "Phi must exist");
1099       if (!PhisToFill.count(IDFPhi)) {
1100         // Update existing Phi.
1101         // FIXME: some updates may be redundant, try to optimize and skip some.
1102         for (unsigned I = 0, E = IDFPhi->getNumIncomingValues(); I < E; ++I)
1103           IDFPhi->setIncomingValue(I, GetLastDef(IDFPhi->getIncomingBlock(I)));
1104       } else {
1105         for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BBIDF))
1106           IDFPhi->addIncoming(GetLastDef(Pi), Pi);
1107       }
1108     }
1109   }
1110 
1111   // Now for all defs in BlocksWithDefsToReplace, if there are uses they no
1112   // longer dominate, replace those with the closest dominating def.
1113   // This will also update optimized accesses, as they're also uses.
1114   for (auto *BlockWithDefsToReplace : BlocksWithDefsToReplace) {
1115     if (auto DefsList = MSSA->getWritableBlockDefs(BlockWithDefsToReplace)) {
1116       for (auto &DefToReplaceUses : *DefsList) {
1117         BasicBlock *DominatingBlock = DefToReplaceUses.getBlock();
1118         Value::use_iterator UI = DefToReplaceUses.use_begin(),
1119                             E = DefToReplaceUses.use_end();
1120         for (; UI != E;) {
1121           Use &U = *UI;
1122           ++UI;
1123           MemoryAccess *Usr = cast<MemoryAccess>(U.getUser());
1124           if (MemoryPhi *UsrPhi = dyn_cast<MemoryPhi>(Usr)) {
1125             BasicBlock *DominatedBlock = UsrPhi->getIncomingBlock(U);
1126             if (!DT.dominates(DominatingBlock, DominatedBlock))
1127               U.set(GetLastDef(DominatedBlock));
1128           } else {
1129             BasicBlock *DominatedBlock = Usr->getBlock();
1130             if (!DT.dominates(DominatingBlock, DominatedBlock)) {
1131               if (auto *DomBlPhi = MSSA->getMemoryAccess(DominatedBlock))
1132                 U.set(DomBlPhi);
1133               else {
1134                 auto *IDom = DT.getNode(DominatedBlock)->getIDom();
1135                 assert(IDom && "Block must have a valid IDom.");
1136                 U.set(GetLastDef(IDom->getBlock()));
1137               }
1138               cast<MemoryUseOrDef>(Usr)->resetOptimized();
1139             }
1140           }
1141         }
1142       }
1143     }
1144   }
1145   tryRemoveTrivialPhis(InsertedPhis);
1146 }
1147 
1148 // Move What before Where in the MemorySSA IR.
1149 template <class WhereType>
1150 void MemorySSAUpdater::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1151                               WhereType Where) {
1152   // Mark MemoryPhi users of What not to be optimized.
1153   for (auto *U : What->users())
1154     if (MemoryPhi *PhiUser = dyn_cast<MemoryPhi>(U))
1155       NonOptPhis.insert(PhiUser);
1156 
1157   // Replace all our users with our defining access.
1158   What->replaceAllUsesWith(What->getDefiningAccess());
1159 
1160   // Let MemorySSA take care of moving it around in the lists.
1161   MSSA->moveTo(What, BB, Where);
1162 
1163   // Now reinsert it into the IR and do whatever fixups needed.
1164   if (auto *MD = dyn_cast<MemoryDef>(What))
1165     insertDef(MD, /*RenameUses=*/true);
1166   else
1167     insertUse(cast<MemoryUse>(What), /*RenameUses=*/true);
1168 
1169   // Clear dangling pointers. We added all MemoryPhi users, but not all
1170   // of them are removed by fixupDefs().
1171   NonOptPhis.clear();
1172 }
1173 
1174 // Move What before Where in the MemorySSA IR.
1175 void MemorySSAUpdater::moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where) {
1176   moveTo(What, Where->getBlock(), Where->getIterator());
1177 }
1178 
1179 // Move What after Where in the MemorySSA IR.
1180 void MemorySSAUpdater::moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where) {
1181   moveTo(What, Where->getBlock(), ++Where->getIterator());
1182 }
1183 
1184 void MemorySSAUpdater::moveToPlace(MemoryUseOrDef *What, BasicBlock *BB,
1185                                    MemorySSA::InsertionPlace Where) {
1186   if (Where != MemorySSA::InsertionPlace::BeforeTerminator)
1187     return moveTo(What, BB, Where);
1188 
1189   if (auto *Where = MSSA->getMemoryAccess(BB->getTerminator()))
1190     return moveBefore(What, Where);
1191   else
1192     return moveTo(What, BB, MemorySSA::InsertionPlace::End);
1193 }
1194 
1195 // All accesses in To used to be in From. Move to end and update access lists.
1196 void MemorySSAUpdater::moveAllAccesses(BasicBlock *From, BasicBlock *To,
1197                                        Instruction *Start) {
1198 
1199   MemorySSA::AccessList *Accs = MSSA->getWritableBlockAccesses(From);
1200   if (!Accs)
1201     return;
1202 
1203   assert(Start->getParent() == To && "Incorrect Start instruction");
1204   MemoryAccess *FirstInNew = nullptr;
1205   for (Instruction &I : make_range(Start->getIterator(), To->end()))
1206     if ((FirstInNew = MSSA->getMemoryAccess(&I)))
1207       break;
1208   if (FirstInNew) {
1209     auto *MUD = cast<MemoryUseOrDef>(FirstInNew);
1210     do {
1211       auto NextIt = ++MUD->getIterator();
1212       MemoryUseOrDef *NextMUD = (!Accs || NextIt == Accs->end())
1213                                     ? nullptr
1214                                     : cast<MemoryUseOrDef>(&*NextIt);
1215       MSSA->moveTo(MUD, To, MemorySSA::End);
1216       // Moving MUD from Accs in the moveTo above, may delete Accs, so we need
1217       // to retrieve it again.
1218       Accs = MSSA->getWritableBlockAccesses(From);
1219       MUD = NextMUD;
1220     } while (MUD);
1221   }
1222 
1223   // If all accesses were moved and only a trivial Phi remains, we try to remove
1224   // that Phi. This is needed when From is going to be deleted.
1225   auto *Defs = MSSA->getWritableBlockDefs(From);
1226   if (Defs && !Defs->empty())
1227     if (auto *Phi = dyn_cast<MemoryPhi>(&*Defs->begin()))
1228       tryRemoveTrivialPhi(Phi);
1229 }
1230 
1231 void MemorySSAUpdater::moveAllAfterSpliceBlocks(BasicBlock *From,
1232                                                 BasicBlock *To,
1233                                                 Instruction *Start) {
1234   assert(MSSA->getBlockAccesses(To) == nullptr &&
1235          "To block is expected to be free of MemoryAccesses.");
1236   moveAllAccesses(From, To, Start);
1237   for (BasicBlock *Succ : successors(To))
1238     if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
1239       MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
1240 }
1241 
1242 void MemorySSAUpdater::moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To,
1243                                                Instruction *Start) {
1244   assert(From->getUniquePredecessor() == To &&
1245          "From block is expected to have a single predecessor (To).");
1246   moveAllAccesses(From, To, Start);
1247   for (BasicBlock *Succ : successors(From))
1248     if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ))
1249       MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To);
1250 }
1251 
1252 void MemorySSAUpdater::wireOldPredecessorsToNewImmediatePredecessor(
1253     BasicBlock *Old, BasicBlock *New, ArrayRef<BasicBlock *> Preds,
1254     bool IdenticalEdgesWereMerged) {
1255   assert(!MSSA->getWritableBlockAccesses(New) &&
1256          "Access list should be null for a new block.");
1257   MemoryPhi *Phi = MSSA->getMemoryAccess(Old);
1258   if (!Phi)
1259     return;
1260   if (Old->hasNPredecessors(1)) {
1261     assert(pred_size(New) == Preds.size() &&
1262            "Should have moved all predecessors.");
1263     MSSA->moveTo(Phi, New, MemorySSA::Beginning);
1264   } else {
1265     assert(!Preds.empty() && "Must be moving at least one predecessor to the "
1266                              "new immediate predecessor.");
1267     MemoryPhi *NewPhi = MSSA->createMemoryPhi(New);
1268     SmallPtrSet<BasicBlock *, 16> PredsSet(Preds.begin(), Preds.end());
1269     // Currently only support the case of removing a single incoming edge when
1270     // identical edges were not merged.
1271     if (!IdenticalEdgesWereMerged)
1272       assert(PredsSet.size() == Preds.size() &&
1273              "If identical edges were not merged, we cannot have duplicate "
1274              "blocks in the predecessors");
1275     Phi->unorderedDeleteIncomingIf([&](MemoryAccess *MA, BasicBlock *B) {
1276       if (PredsSet.count(B)) {
1277         NewPhi->addIncoming(MA, B);
1278         if (!IdenticalEdgesWereMerged)
1279           PredsSet.erase(B);
1280         return true;
1281       }
1282       return false;
1283     });
1284     Phi->addIncoming(NewPhi, New);
1285     tryRemoveTrivialPhi(NewPhi);
1286   }
1287 }
1288 
1289 void MemorySSAUpdater::removeMemoryAccess(MemoryAccess *MA, bool OptimizePhis) {
1290   assert(!MSSA->isLiveOnEntryDef(MA) &&
1291          "Trying to remove the live on entry def");
1292   // We can only delete phi nodes if they have no uses, or we can replace all
1293   // uses with a single definition.
1294   MemoryAccess *NewDefTarget = nullptr;
1295   if (MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) {
1296     // Note that it is sufficient to know that all edges of the phi node have
1297     // the same argument.  If they do, by the definition of dominance frontiers
1298     // (which we used to place this phi), that argument must dominate this phi,
1299     // and thus, must dominate the phi's uses, and so we will not hit the assert
1300     // below.
1301     NewDefTarget = onlySingleValue(MP);
1302     assert((NewDefTarget || MP->use_empty()) &&
1303            "We can't delete this memory phi");
1304   } else {
1305     NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess();
1306   }
1307 
1308   SmallSetVector<MemoryPhi *, 4> PhisToCheck;
1309 
1310   // Re-point the uses at our defining access
1311   if (!isa<MemoryUse>(MA) && !MA->use_empty()) {
1312     // Reset optimized on users of this store, and reset the uses.
1313     // A few notes:
1314     // 1. This is a slightly modified version of RAUW to avoid walking the
1315     // uses twice here.
1316     // 2. If we wanted to be complete, we would have to reset the optimized
1317     // flags on users of phi nodes if doing the below makes a phi node have all
1318     // the same arguments. Instead, we prefer users to removeMemoryAccess those
1319     // phi nodes, because doing it here would be N^3.
1320     if (MA->hasValueHandle())
1321       ValueHandleBase::ValueIsRAUWd(MA, NewDefTarget);
1322     // Note: We assume MemorySSA is not used in metadata since it's not really
1323     // part of the IR.
1324 
1325     while (!MA->use_empty()) {
1326       Use &U = *MA->use_begin();
1327       if (auto *MUD = dyn_cast<MemoryUseOrDef>(U.getUser()))
1328         MUD->resetOptimized();
1329       if (OptimizePhis)
1330         if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U.getUser()))
1331           PhisToCheck.insert(MP);
1332       U.set(NewDefTarget);
1333     }
1334   }
1335 
1336   // The call below to erase will destroy MA, so we can't change the order we
1337   // are doing things here
1338   MSSA->removeFromLookups(MA);
1339   MSSA->removeFromLists(MA);
1340 
1341   // Optionally optimize Phi uses. This will recursively remove trivial phis.
1342   if (!PhisToCheck.empty()) {
1343     SmallVector<WeakVH, 16> PhisToOptimize{PhisToCheck.begin(),
1344                                            PhisToCheck.end()};
1345     PhisToCheck.clear();
1346 
1347     unsigned PhisSize = PhisToOptimize.size();
1348     while (PhisSize-- > 0)
1349       if (MemoryPhi *MP =
1350               cast_or_null<MemoryPhi>(PhisToOptimize.pop_back_val()))
1351         tryRemoveTrivialPhi(MP);
1352   }
1353 }
1354 
1355 void MemorySSAUpdater::removeBlocks(
1356     const SmallSetVector<BasicBlock *, 8> &DeadBlocks) {
1357   // First delete all uses of BB in MemoryPhis.
1358   for (BasicBlock *BB : DeadBlocks) {
1359     Instruction *TI = BB->getTerminator();
1360     assert(TI && "Basic block expected to have a terminator instruction");
1361     for (BasicBlock *Succ : successors(TI))
1362       if (!DeadBlocks.count(Succ))
1363         if (MemoryPhi *MP = MSSA->getMemoryAccess(Succ)) {
1364           MP->unorderedDeleteIncomingBlock(BB);
1365           tryRemoveTrivialPhi(MP);
1366         }
1367     // Drop all references of all accesses in BB
1368     if (MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB))
1369       for (MemoryAccess &MA : *Acc)
1370         MA.dropAllReferences();
1371   }
1372 
1373   // Next, delete all memory accesses in each block
1374   for (BasicBlock *BB : DeadBlocks) {
1375     MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB);
1376     if (!Acc)
1377       continue;
1378     for (auto AB = Acc->begin(), AE = Acc->end(); AB != AE;) {
1379       MemoryAccess *MA = &*AB;
1380       ++AB;
1381       MSSA->removeFromLookups(MA);
1382       MSSA->removeFromLists(MA);
1383     }
1384   }
1385 }
1386 
1387 void MemorySSAUpdater::tryRemoveTrivialPhis(ArrayRef<WeakVH> UpdatedPHIs) {
1388   for (auto &VH : UpdatedPHIs)
1389     if (auto *MPhi = cast_or_null<MemoryPhi>(VH))
1390       tryRemoveTrivialPhi(MPhi);
1391 }
1392 
1393 void MemorySSAUpdater::changeToUnreachable(const Instruction *I) {
1394   const BasicBlock *BB = I->getParent();
1395   // Remove memory accesses in BB for I and all following instructions.
1396   auto BBI = I->getIterator(), BBE = BB->end();
1397   // FIXME: If this becomes too expensive, iterate until the first instruction
1398   // with a memory access, then iterate over MemoryAccesses.
1399   while (BBI != BBE)
1400     removeMemoryAccess(&*(BBI++));
1401   // Update phis in BB's successors to remove BB.
1402   SmallVector<WeakVH, 16> UpdatedPHIs;
1403   for (const BasicBlock *Successor : successors(BB)) {
1404     removeDuplicatePhiEdgesBetween(BB, Successor);
1405     if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Successor)) {
1406       MPhi->unorderedDeleteIncomingBlock(BB);
1407       UpdatedPHIs.push_back(MPhi);
1408     }
1409   }
1410   // Optimize trivial phis.
1411   tryRemoveTrivialPhis(UpdatedPHIs);
1412 }
1413 
1414 void MemorySSAUpdater::changeCondBranchToUnconditionalTo(const BranchInst *BI,
1415                                                          const BasicBlock *To) {
1416   const BasicBlock *BB = BI->getParent();
1417   SmallVector<WeakVH, 16> UpdatedPHIs;
1418   for (const BasicBlock *Succ : successors(BB)) {
1419     removeDuplicatePhiEdgesBetween(BB, Succ);
1420     if (Succ != To)
1421       if (auto *MPhi = MSSA->getMemoryAccess(Succ)) {
1422         MPhi->unorderedDeleteIncomingBlock(BB);
1423         UpdatedPHIs.push_back(MPhi);
1424       }
1425   }
1426   // Optimize trivial phis.
1427   tryRemoveTrivialPhis(UpdatedPHIs);
1428 }
1429 
1430 MemoryAccess *MemorySSAUpdater::createMemoryAccessInBB(
1431     Instruction *I, MemoryAccess *Definition, const BasicBlock *BB,
1432     MemorySSA::InsertionPlace Point) {
1433   MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1434   MSSA->insertIntoListsForBlock(NewAccess, BB, Point);
1435   return NewAccess;
1436 }
1437 
1438 MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessBefore(
1439     Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt) {
1440   assert(I->getParent() == InsertPt->getBlock() &&
1441          "New and old access must be in the same block");
1442   MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1443   MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1444                               InsertPt->getIterator());
1445   return NewAccess;
1446 }
1447 
1448 MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessAfter(
1449     Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt) {
1450   assert(I->getParent() == InsertPt->getBlock() &&
1451          "New and old access must be in the same block");
1452   MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition);
1453   MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(),
1454                               ++InsertPt->getIterator());
1455   return NewAccess;
1456 }
1457