1 //===-- MemorySSAUpdater.cpp - Memory SSA Updater--------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------===// 8 // 9 // This file implements the MemorySSAUpdater class. 10 // 11 //===----------------------------------------------------------------===// 12 #include "llvm/Analysis/MemorySSAUpdater.h" 13 #include "llvm/Analysis/LoopIterator.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/ADT/SetVector.h" 16 #include "llvm/ADT/SmallPtrSet.h" 17 #include "llvm/Analysis/IteratedDominanceFrontier.h" 18 #include "llvm/Analysis/MemorySSA.h" 19 #include "llvm/IR/BasicBlock.h" 20 #include "llvm/IR/DataLayout.h" 21 #include "llvm/IR/Dominators.h" 22 #include "llvm/IR/GlobalVariable.h" 23 #include "llvm/IR/IRBuilder.h" 24 #include "llvm/IR/LLVMContext.h" 25 #include "llvm/IR/Metadata.h" 26 #include "llvm/IR/Module.h" 27 #include "llvm/Support/Debug.h" 28 #include "llvm/Support/FormattedStream.h" 29 #include <algorithm> 30 31 #define DEBUG_TYPE "memoryssa" 32 using namespace llvm; 33 34 // This is the marker algorithm from "Simple and Efficient Construction of 35 // Static Single Assignment Form" 36 // The simple, non-marker algorithm places phi nodes at any join 37 // Here, we place markers, and only place phi nodes if they end up necessary. 38 // They are only necessary if they break a cycle (IE we recursively visit 39 // ourselves again), or we discover, while getting the value of the operands, 40 // that there are two or more definitions needing to be merged. 41 // This still will leave non-minimal form in the case of irreducible control 42 // flow, where phi nodes may be in cycles with themselves, but unnecessary. 43 MemoryAccess *MemorySSAUpdater::getPreviousDefRecursive( 44 BasicBlock *BB, 45 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) { 46 // First, do a cache lookup. Without this cache, certain CFG structures 47 // (like a series of if statements) take exponential time to visit. 48 auto Cached = CachedPreviousDef.find(BB); 49 if (Cached != CachedPreviousDef.end()) 50 return Cached->second; 51 52 // If this method is called from an unreachable block, return LoE. 53 if (!MSSA->DT->isReachableFromEntry(BB)) 54 return MSSA->getLiveOnEntryDef(); 55 56 if (BasicBlock *Pred = BB->getUniquePredecessor()) { 57 VisitedBlocks.insert(BB); 58 // Single predecessor case, just recurse, we can only have one definition. 59 MemoryAccess *Result = getPreviousDefFromEnd(Pred, CachedPreviousDef); 60 CachedPreviousDef.insert({BB, Result}); 61 return Result; 62 } 63 64 if (VisitedBlocks.count(BB)) { 65 // We hit our node again, meaning we had a cycle, we must insert a phi 66 // node to break it so we have an operand. The only case this will 67 // insert useless phis is if we have irreducible control flow. 68 MemoryAccess *Result = MSSA->createMemoryPhi(BB); 69 CachedPreviousDef.insert({BB, Result}); 70 return Result; 71 } 72 73 if (VisitedBlocks.insert(BB).second) { 74 // Mark us visited so we can detect a cycle 75 SmallVector<TrackingVH<MemoryAccess>, 8> PhiOps; 76 77 // Recurse to get the values in our predecessors for placement of a 78 // potential phi node. This will insert phi nodes if we cycle in order to 79 // break the cycle and have an operand. 80 bool UniqueIncomingAccess = true; 81 MemoryAccess *SingleAccess = nullptr; 82 for (auto *Pred : predecessors(BB)) { 83 if (MSSA->DT->isReachableFromEntry(Pred)) { 84 auto *IncomingAccess = getPreviousDefFromEnd(Pred, CachedPreviousDef); 85 if (!SingleAccess) 86 SingleAccess = IncomingAccess; 87 else if (IncomingAccess != SingleAccess) 88 UniqueIncomingAccess = false; 89 PhiOps.push_back(IncomingAccess); 90 } else 91 PhiOps.push_back(MSSA->getLiveOnEntryDef()); 92 } 93 94 // Now try to simplify the ops to avoid placing a phi. 95 // This may return null if we never created a phi yet, that's okay 96 MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MSSA->getMemoryAccess(BB)); 97 98 // See if we can avoid the phi by simplifying it. 99 auto *Result = tryRemoveTrivialPhi(Phi, PhiOps); 100 // If we couldn't simplify, we may have to create a phi 101 if (Result == Phi && UniqueIncomingAccess && SingleAccess) { 102 // A concrete Phi only exists if we created an empty one to break a cycle. 103 if (Phi) { 104 assert(Phi->operands().empty() && "Expected empty Phi"); 105 Phi->replaceAllUsesWith(SingleAccess); 106 removeMemoryAccess(Phi); 107 } 108 Result = SingleAccess; 109 } else if (Result == Phi && !(UniqueIncomingAccess && SingleAccess)) { 110 if (!Phi) 111 Phi = MSSA->createMemoryPhi(BB); 112 113 // See if the existing phi operands match what we need. 114 // Unlike normal SSA, we only allow one phi node per block, so we can't just 115 // create a new one. 116 if (Phi->getNumOperands() != 0) { 117 // FIXME: Figure out whether this is dead code and if so remove it. 118 if (!std::equal(Phi->op_begin(), Phi->op_end(), PhiOps.begin())) { 119 // These will have been filled in by the recursive read we did above. 120 llvm::copy(PhiOps, Phi->op_begin()); 121 std::copy(pred_begin(BB), pred_end(BB), Phi->block_begin()); 122 } 123 } else { 124 unsigned i = 0; 125 for (auto *Pred : predecessors(BB)) 126 Phi->addIncoming(&*PhiOps[i++], Pred); 127 InsertedPHIs.push_back(Phi); 128 } 129 Result = Phi; 130 } 131 132 // Set ourselves up for the next variable by resetting visited state. 133 VisitedBlocks.erase(BB); 134 CachedPreviousDef.insert({BB, Result}); 135 return Result; 136 } 137 llvm_unreachable("Should have hit one of the three cases above"); 138 } 139 140 // This starts at the memory access, and goes backwards in the block to find the 141 // previous definition. If a definition is not found the block of the access, 142 // it continues globally, creating phi nodes to ensure we have a single 143 // definition. 144 MemoryAccess *MemorySSAUpdater::getPreviousDef(MemoryAccess *MA) { 145 if (auto *LocalResult = getPreviousDefInBlock(MA)) 146 return LocalResult; 147 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef; 148 return getPreviousDefRecursive(MA->getBlock(), CachedPreviousDef); 149 } 150 151 // This starts at the memory access, and goes backwards in the block to the find 152 // the previous definition. If the definition is not found in the block of the 153 // access, it returns nullptr. 154 MemoryAccess *MemorySSAUpdater::getPreviousDefInBlock(MemoryAccess *MA) { 155 auto *Defs = MSSA->getWritableBlockDefs(MA->getBlock()); 156 157 // It's possible there are no defs, or we got handed the first def to start. 158 if (Defs) { 159 // If this is a def, we can just use the def iterators. 160 if (!isa<MemoryUse>(MA)) { 161 auto Iter = MA->getReverseDefsIterator(); 162 ++Iter; 163 if (Iter != Defs->rend()) 164 return &*Iter; 165 } else { 166 // Otherwise, have to walk the all access iterator. 167 auto End = MSSA->getWritableBlockAccesses(MA->getBlock())->rend(); 168 for (auto &U : make_range(++MA->getReverseIterator(), End)) 169 if (!isa<MemoryUse>(U)) 170 return cast<MemoryAccess>(&U); 171 // Note that if MA comes before Defs->begin(), we won't hit a def. 172 return nullptr; 173 } 174 } 175 return nullptr; 176 } 177 178 // This starts at the end of block 179 MemoryAccess *MemorySSAUpdater::getPreviousDefFromEnd( 180 BasicBlock *BB, 181 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> &CachedPreviousDef) { 182 auto *Defs = MSSA->getWritableBlockDefs(BB); 183 184 if (Defs) { 185 CachedPreviousDef.insert({BB, &*Defs->rbegin()}); 186 return &*Defs->rbegin(); 187 } 188 189 return getPreviousDefRecursive(BB, CachedPreviousDef); 190 } 191 // Recurse over a set of phi uses to eliminate the trivial ones 192 MemoryAccess *MemorySSAUpdater::recursePhi(MemoryAccess *Phi) { 193 if (!Phi) 194 return nullptr; 195 TrackingVH<MemoryAccess> Res(Phi); 196 SmallVector<TrackingVH<Value>, 8> Uses; 197 std::copy(Phi->user_begin(), Phi->user_end(), std::back_inserter(Uses)); 198 for (auto &U : Uses) 199 if (MemoryPhi *UsePhi = dyn_cast<MemoryPhi>(&*U)) 200 tryRemoveTrivialPhi(UsePhi); 201 return Res; 202 } 203 204 // Eliminate trivial phis 205 // Phis are trivial if they are defined either by themselves, or all the same 206 // argument. 207 // IE phi(a, a) or b = phi(a, b) or c = phi(a, a, c) 208 // We recursively try to remove them. 209 MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi) { 210 assert(Phi && "Can only remove concrete Phi."); 211 auto OperRange = Phi->operands(); 212 return tryRemoveTrivialPhi(Phi, OperRange); 213 } 214 template <class RangeType> 215 MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi, 216 RangeType &Operands) { 217 // Bail out on non-opt Phis. 218 if (NonOptPhis.count(Phi)) 219 return Phi; 220 221 // Detect equal or self arguments 222 MemoryAccess *Same = nullptr; 223 for (auto &Op : Operands) { 224 // If the same or self, good so far 225 if (Op == Phi || Op == Same) 226 continue; 227 // not the same, return the phi since it's not eliminatable by us 228 if (Same) 229 return Phi; 230 Same = cast<MemoryAccess>(&*Op); 231 } 232 // Never found a non-self reference, the phi is undef 233 if (Same == nullptr) 234 return MSSA->getLiveOnEntryDef(); 235 if (Phi) { 236 Phi->replaceAllUsesWith(Same); 237 removeMemoryAccess(Phi); 238 } 239 240 // We should only end up recursing in case we replaced something, in which 241 // case, we may have made other Phis trivial. 242 return recursePhi(Same); 243 } 244 245 void MemorySSAUpdater::insertUse(MemoryUse *MU, bool RenameUses) { 246 InsertedPHIs.clear(); 247 MU->setDefiningAccess(getPreviousDef(MU)); 248 249 // In cases without unreachable blocks, because uses do not create new 250 // may-defs, there are only two cases: 251 // 1. There was a def already below us, and therefore, we should not have 252 // created a phi node because it was already needed for the def. 253 // 254 // 2. There is no def below us, and therefore, there is no extra renaming work 255 // to do. 256 257 // In cases with unreachable blocks, where the unnecessary Phis were 258 // optimized out, adding the Use may re-insert those Phis. Hence, when 259 // inserting Uses outside of the MSSA creation process, and new Phis were 260 // added, rename all uses if we are asked. 261 262 if (!RenameUses && !InsertedPHIs.empty()) { 263 auto *Defs = MSSA->getBlockDefs(MU->getBlock()); 264 (void)Defs; 265 assert((!Defs || (++Defs->begin() == Defs->end())) && 266 "Block may have only a Phi or no defs"); 267 } 268 269 if (RenameUses && InsertedPHIs.size()) { 270 SmallPtrSet<BasicBlock *, 16> Visited; 271 BasicBlock *StartBlock = MU->getBlock(); 272 273 if (auto *Defs = MSSA->getWritableBlockDefs(StartBlock)) { 274 MemoryAccess *FirstDef = &*Defs->begin(); 275 // Convert to incoming value if it's a memorydef. A phi *is* already an 276 // incoming value. 277 if (auto *MD = dyn_cast<MemoryDef>(FirstDef)) 278 FirstDef = MD->getDefiningAccess(); 279 280 MSSA->renamePass(MU->getBlock(), FirstDef, Visited); 281 } 282 // We just inserted a phi into this block, so the incoming value will 283 // become the phi anyway, so it does not matter what we pass. 284 for (auto &MP : InsertedPHIs) 285 if (MemoryPhi *Phi = cast_or_null<MemoryPhi>(MP)) 286 MSSA->renamePass(Phi->getBlock(), nullptr, Visited); 287 } 288 } 289 290 // Set every incoming edge {BB, MP->getBlock()} of MemoryPhi MP to NewDef. 291 static void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB, 292 MemoryAccess *NewDef) { 293 // Replace any operand with us an incoming block with the new defining 294 // access. 295 int i = MP->getBasicBlockIndex(BB); 296 assert(i != -1 && "Should have found the basic block in the phi"); 297 // We can't just compare i against getNumOperands since one is signed and the 298 // other not. So use it to index into the block iterator. 299 for (auto BBIter = MP->block_begin() + i; BBIter != MP->block_end(); 300 ++BBIter) { 301 if (*BBIter != BB) 302 break; 303 MP->setIncomingValue(i, NewDef); 304 ++i; 305 } 306 } 307 308 // A brief description of the algorithm: 309 // First, we compute what should define the new def, using the SSA 310 // construction algorithm. 311 // Then, we update the defs below us (and any new phi nodes) in the graph to 312 // point to the correct new defs, to ensure we only have one variable, and no 313 // disconnected stores. 314 void MemorySSAUpdater::insertDef(MemoryDef *MD, bool RenameUses) { 315 InsertedPHIs.clear(); 316 317 // See if we had a local def, and if not, go hunting. 318 MemoryAccess *DefBefore = getPreviousDef(MD); 319 bool DefBeforeSameBlock = false; 320 if (DefBefore->getBlock() == MD->getBlock() && 321 !(isa<MemoryPhi>(DefBefore) && 322 llvm::is_contained(InsertedPHIs, DefBefore))) 323 DefBeforeSameBlock = true; 324 325 // There is a def before us, which means we can replace any store/phi uses 326 // of that thing with us, since we are in the way of whatever was there 327 // before. 328 // We now define that def's memorydefs and memoryphis 329 if (DefBeforeSameBlock) { 330 DefBefore->replaceUsesWithIf(MD, [MD](Use &U) { 331 // Leave the MemoryUses alone. 332 // Also make sure we skip ourselves to avoid self references. 333 User *Usr = U.getUser(); 334 return !isa<MemoryUse>(Usr) && Usr != MD; 335 // Defs are automatically unoptimized when the user is set to MD below, 336 // because the isOptimized() call will fail to find the same ID. 337 }); 338 } 339 340 // and that def is now our defining access. 341 MD->setDefiningAccess(DefBefore); 342 343 SmallVector<WeakVH, 8> FixupList(InsertedPHIs.begin(), InsertedPHIs.end()); 344 345 SmallSet<WeakVH, 8> ExistingPhis; 346 347 // Remember the index where we may insert new phis. 348 unsigned NewPhiIndex = InsertedPHIs.size(); 349 if (!DefBeforeSameBlock) { 350 // If there was a local def before us, we must have the same effect it 351 // did. Because every may-def is the same, any phis/etc we would create, it 352 // would also have created. If there was no local def before us, we 353 // performed a global update, and have to search all successors and make 354 // sure we update the first def in each of them (following all paths until 355 // we hit the first def along each path). This may also insert phi nodes. 356 // TODO: There are other cases we can skip this work, such as when we have a 357 // single successor, and only used a straight line of single pred blocks 358 // backwards to find the def. To make that work, we'd have to track whether 359 // getDefRecursive only ever used the single predecessor case. These types 360 // of paths also only exist in between CFG simplifications. 361 362 // If this is the first def in the block and this insert is in an arbitrary 363 // place, compute IDF and place phis. 364 SmallPtrSet<BasicBlock *, 2> DefiningBlocks; 365 366 // If this is the last Def in the block, also compute IDF based on MD, since 367 // this may a new Def added, and we may need additional Phis. 368 auto Iter = MD->getDefsIterator(); 369 ++Iter; 370 auto IterEnd = MSSA->getBlockDefs(MD->getBlock())->end(); 371 if (Iter == IterEnd) 372 DefiningBlocks.insert(MD->getBlock()); 373 374 for (const auto &VH : InsertedPHIs) 375 if (const auto *RealPHI = cast_or_null<MemoryPhi>(VH)) 376 DefiningBlocks.insert(RealPHI->getBlock()); 377 ForwardIDFCalculator IDFs(*MSSA->DT); 378 SmallVector<BasicBlock *, 32> IDFBlocks; 379 IDFs.setDefiningBlocks(DefiningBlocks); 380 IDFs.calculate(IDFBlocks); 381 SmallVector<AssertingVH<MemoryPhi>, 4> NewInsertedPHIs; 382 for (auto *BBIDF : IDFBlocks) { 383 auto *MPhi = MSSA->getMemoryAccess(BBIDF); 384 if (!MPhi) { 385 MPhi = MSSA->createMemoryPhi(BBIDF); 386 NewInsertedPHIs.push_back(MPhi); 387 } else { 388 ExistingPhis.insert(MPhi); 389 } 390 // Add the phis created into the IDF blocks to NonOptPhis, so they are not 391 // optimized out as trivial by the call to getPreviousDefFromEnd below. 392 // Once they are complete, all these Phis are added to the FixupList, and 393 // removed from NonOptPhis inside fixupDefs(). Existing Phis in IDF may 394 // need fixing as well, and potentially be trivial before this insertion, 395 // hence add all IDF Phis. See PR43044. 396 NonOptPhis.insert(MPhi); 397 } 398 for (auto &MPhi : NewInsertedPHIs) { 399 auto *BBIDF = MPhi->getBlock(); 400 for (auto *Pred : predecessors(BBIDF)) { 401 DenseMap<BasicBlock *, TrackingVH<MemoryAccess>> CachedPreviousDef; 402 MPhi->addIncoming(getPreviousDefFromEnd(Pred, CachedPreviousDef), Pred); 403 } 404 } 405 406 // Re-take the index where we're adding the new phis, because the above call 407 // to getPreviousDefFromEnd, may have inserted into InsertedPHIs. 408 NewPhiIndex = InsertedPHIs.size(); 409 for (auto &MPhi : NewInsertedPHIs) { 410 InsertedPHIs.push_back(&*MPhi); 411 FixupList.push_back(&*MPhi); 412 } 413 414 FixupList.push_back(MD); 415 } 416 417 // Remember the index where we stopped inserting new phis above, since the 418 // fixupDefs call in the loop below may insert more, that are already minimal. 419 unsigned NewPhiIndexEnd = InsertedPHIs.size(); 420 421 while (!FixupList.empty()) { 422 unsigned StartingPHISize = InsertedPHIs.size(); 423 fixupDefs(FixupList); 424 FixupList.clear(); 425 // Put any new phis on the fixup list, and process them 426 FixupList.append(InsertedPHIs.begin() + StartingPHISize, InsertedPHIs.end()); 427 } 428 429 // Optimize potentially non-minimal phis added in this method. 430 unsigned NewPhiSize = NewPhiIndexEnd - NewPhiIndex; 431 if (NewPhiSize) 432 tryRemoveTrivialPhis(ArrayRef<WeakVH>(&InsertedPHIs[NewPhiIndex], NewPhiSize)); 433 434 // Now that all fixups are done, rename all uses if we are asked. Skip 435 // renaming for defs in unreachable blocks. 436 BasicBlock *StartBlock = MD->getBlock(); 437 if (RenameUses && MSSA->getDomTree().getNode(StartBlock)) { 438 SmallPtrSet<BasicBlock *, 16> Visited; 439 // We are guaranteed there is a def in the block, because we just got it 440 // handed to us in this function. 441 MemoryAccess *FirstDef = &*MSSA->getWritableBlockDefs(StartBlock)->begin(); 442 // Convert to incoming value if it's a memorydef. A phi *is* already an 443 // incoming value. 444 if (auto *MD = dyn_cast<MemoryDef>(FirstDef)) 445 FirstDef = MD->getDefiningAccess(); 446 447 MSSA->renamePass(MD->getBlock(), FirstDef, Visited); 448 // We just inserted a phi into this block, so the incoming value will become 449 // the phi anyway, so it does not matter what we pass. 450 for (auto &MP : InsertedPHIs) { 451 MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP); 452 if (Phi) 453 MSSA->renamePass(Phi->getBlock(), nullptr, Visited); 454 } 455 // Existing Phi blocks may need renaming too, if an access was previously 456 // optimized and the inserted Defs "covers" the Optimized value. 457 for (auto &MP : ExistingPhis) { 458 MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MP); 459 if (Phi) 460 MSSA->renamePass(Phi->getBlock(), nullptr, Visited); 461 } 462 } 463 } 464 465 void MemorySSAUpdater::fixupDefs(const SmallVectorImpl<WeakVH> &Vars) { 466 SmallPtrSet<const BasicBlock *, 8> Seen; 467 SmallVector<const BasicBlock *, 16> Worklist; 468 for (auto &Var : Vars) { 469 MemoryAccess *NewDef = dyn_cast_or_null<MemoryAccess>(Var); 470 if (!NewDef) 471 continue; 472 // First, see if there is a local def after the operand. 473 auto *Defs = MSSA->getWritableBlockDefs(NewDef->getBlock()); 474 auto DefIter = NewDef->getDefsIterator(); 475 476 // The temporary Phi is being fixed, unmark it for not to optimize. 477 if (MemoryPhi *Phi = dyn_cast<MemoryPhi>(NewDef)) 478 NonOptPhis.erase(Phi); 479 480 // If there is a local def after us, we only have to rename that. 481 if (++DefIter != Defs->end()) { 482 cast<MemoryDef>(DefIter)->setDefiningAccess(NewDef); 483 continue; 484 } 485 486 // Otherwise, we need to search down through the CFG. 487 // For each of our successors, handle it directly if their is a phi, or 488 // place on the fixup worklist. 489 for (const auto *S : successors(NewDef->getBlock())) { 490 if (auto *MP = MSSA->getMemoryAccess(S)) 491 setMemoryPhiValueForBlock(MP, NewDef->getBlock(), NewDef); 492 else 493 Worklist.push_back(S); 494 } 495 496 while (!Worklist.empty()) { 497 const BasicBlock *FixupBlock = Worklist.back(); 498 Worklist.pop_back(); 499 500 // Get the first def in the block that isn't a phi node. 501 if (auto *Defs = MSSA->getWritableBlockDefs(FixupBlock)) { 502 auto *FirstDef = &*Defs->begin(); 503 // The loop above and below should have taken care of phi nodes 504 assert(!isa<MemoryPhi>(FirstDef) && 505 "Should have already handled phi nodes!"); 506 // We are now this def's defining access, make sure we actually dominate 507 // it 508 assert(MSSA->dominates(NewDef, FirstDef) && 509 "Should have dominated the new access"); 510 511 // This may insert new phi nodes, because we are not guaranteed the 512 // block we are processing has a single pred, and depending where the 513 // store was inserted, it may require phi nodes below it. 514 cast<MemoryDef>(FirstDef)->setDefiningAccess(getPreviousDef(FirstDef)); 515 return; 516 } 517 // We didn't find a def, so we must continue. 518 for (const auto *S : successors(FixupBlock)) { 519 // If there is a phi node, handle it. 520 // Otherwise, put the block on the worklist 521 if (auto *MP = MSSA->getMemoryAccess(S)) 522 setMemoryPhiValueForBlock(MP, FixupBlock, NewDef); 523 else { 524 // If we cycle, we should have ended up at a phi node that we already 525 // processed. FIXME: Double check this 526 if (!Seen.insert(S).second) 527 continue; 528 Worklist.push_back(S); 529 } 530 } 531 } 532 } 533 } 534 535 void MemorySSAUpdater::removeEdge(BasicBlock *From, BasicBlock *To) { 536 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) { 537 MPhi->unorderedDeleteIncomingBlock(From); 538 tryRemoveTrivialPhi(MPhi); 539 } 540 } 541 542 void MemorySSAUpdater::removeDuplicatePhiEdgesBetween(const BasicBlock *From, 543 const BasicBlock *To) { 544 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(To)) { 545 bool Found = false; 546 MPhi->unorderedDeleteIncomingIf([&](const MemoryAccess *, BasicBlock *B) { 547 if (From != B) 548 return false; 549 if (Found) 550 return true; 551 Found = true; 552 return false; 553 }); 554 tryRemoveTrivialPhi(MPhi); 555 } 556 } 557 558 /// If all arguments of a MemoryPHI are defined by the same incoming 559 /// argument, return that argument. 560 static MemoryAccess *onlySingleValue(MemoryPhi *MP) { 561 MemoryAccess *MA = nullptr; 562 563 for (auto &Arg : MP->operands()) { 564 if (!MA) 565 MA = cast<MemoryAccess>(Arg); 566 else if (MA != Arg) 567 return nullptr; 568 } 569 return MA; 570 } 571 572 static MemoryAccess *getNewDefiningAccessForClone(MemoryAccess *MA, 573 const ValueToValueMapTy &VMap, 574 PhiToDefMap &MPhiMap, 575 bool CloneWasSimplified, 576 MemorySSA *MSSA) { 577 MemoryAccess *InsnDefining = MA; 578 if (MemoryDef *DefMUD = dyn_cast<MemoryDef>(InsnDefining)) { 579 if (!MSSA->isLiveOnEntryDef(DefMUD)) { 580 Instruction *DefMUDI = DefMUD->getMemoryInst(); 581 assert(DefMUDI && "Found MemoryUseOrDef with no Instruction."); 582 if (Instruction *NewDefMUDI = 583 cast_or_null<Instruction>(VMap.lookup(DefMUDI))) { 584 InsnDefining = MSSA->getMemoryAccess(NewDefMUDI); 585 if (!CloneWasSimplified) 586 assert(InsnDefining && "Defining instruction cannot be nullptr."); 587 else if (!InsnDefining || isa<MemoryUse>(InsnDefining)) { 588 // The clone was simplified, it's no longer a MemoryDef, look up. 589 auto DefIt = DefMUD->getDefsIterator(); 590 // Since simplified clones only occur in single block cloning, a 591 // previous definition must exist, otherwise NewDefMUDI would not 592 // have been found in VMap. 593 assert(DefIt != MSSA->getBlockDefs(DefMUD->getBlock())->begin() && 594 "Previous def must exist"); 595 InsnDefining = getNewDefiningAccessForClone( 596 &*(--DefIt), VMap, MPhiMap, CloneWasSimplified, MSSA); 597 } 598 } 599 } 600 } else { 601 MemoryPhi *DefPhi = cast<MemoryPhi>(InsnDefining); 602 if (MemoryAccess *NewDefPhi = MPhiMap.lookup(DefPhi)) 603 InsnDefining = NewDefPhi; 604 } 605 assert(InsnDefining && "Defining instruction cannot be nullptr."); 606 return InsnDefining; 607 } 608 609 void MemorySSAUpdater::cloneUsesAndDefs(BasicBlock *BB, BasicBlock *NewBB, 610 const ValueToValueMapTy &VMap, 611 PhiToDefMap &MPhiMap, 612 bool CloneWasSimplified) { 613 const MemorySSA::AccessList *Acc = MSSA->getBlockAccesses(BB); 614 if (!Acc) 615 return; 616 for (const MemoryAccess &MA : *Acc) { 617 if (const MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&MA)) { 618 Instruction *Insn = MUD->getMemoryInst(); 619 // Entry does not exist if the clone of the block did not clone all 620 // instructions. This occurs in LoopRotate when cloning instructions 621 // from the old header to the old preheader. The cloned instruction may 622 // also be a simplified Value, not an Instruction (see LoopRotate). 623 // Also in LoopRotate, even when it's an instruction, due to it being 624 // simplified, it may be a Use rather than a Def, so we cannot use MUD as 625 // template. Calls coming from updateForClonedBlockIntoPred, ensure this. 626 if (Instruction *NewInsn = 627 dyn_cast_or_null<Instruction>(VMap.lookup(Insn))) { 628 MemoryAccess *NewUseOrDef = MSSA->createDefinedAccess( 629 NewInsn, 630 getNewDefiningAccessForClone(MUD->getDefiningAccess(), VMap, 631 MPhiMap, CloneWasSimplified, MSSA), 632 /*Template=*/CloneWasSimplified ? nullptr : MUD, 633 /*CreationMustSucceed=*/CloneWasSimplified ? false : true); 634 if (NewUseOrDef) 635 MSSA->insertIntoListsForBlock(NewUseOrDef, NewBB, MemorySSA::End); 636 } 637 } 638 } 639 } 640 641 void MemorySSAUpdater::updatePhisWhenInsertingUniqueBackedgeBlock( 642 BasicBlock *Header, BasicBlock *Preheader, BasicBlock *BEBlock) { 643 auto *MPhi = MSSA->getMemoryAccess(Header); 644 if (!MPhi) 645 return; 646 647 // Create phi node in the backedge block and populate it with the same 648 // incoming values as MPhi. Skip incoming values coming from Preheader. 649 auto *NewMPhi = MSSA->createMemoryPhi(BEBlock); 650 bool HasUniqueIncomingValue = true; 651 MemoryAccess *UniqueValue = nullptr; 652 for (unsigned I = 0, E = MPhi->getNumIncomingValues(); I != E; ++I) { 653 BasicBlock *IBB = MPhi->getIncomingBlock(I); 654 MemoryAccess *IV = MPhi->getIncomingValue(I); 655 if (IBB != Preheader) { 656 NewMPhi->addIncoming(IV, IBB); 657 if (HasUniqueIncomingValue) { 658 if (!UniqueValue) 659 UniqueValue = IV; 660 else if (UniqueValue != IV) 661 HasUniqueIncomingValue = false; 662 } 663 } 664 } 665 666 // Update incoming edges into MPhi. Remove all but the incoming edge from 667 // Preheader. Add an edge from NewMPhi 668 auto *AccFromPreheader = MPhi->getIncomingValueForBlock(Preheader); 669 MPhi->setIncomingValue(0, AccFromPreheader); 670 MPhi->setIncomingBlock(0, Preheader); 671 for (unsigned I = MPhi->getNumIncomingValues() - 1; I >= 1; --I) 672 MPhi->unorderedDeleteIncoming(I); 673 MPhi->addIncoming(NewMPhi, BEBlock); 674 675 // If NewMPhi is a trivial phi, remove it. Its use in the header MPhi will be 676 // replaced with the unique value. 677 tryRemoveTrivialPhi(NewMPhi); 678 } 679 680 void MemorySSAUpdater::updateForClonedLoop(const LoopBlocksRPO &LoopBlocks, 681 ArrayRef<BasicBlock *> ExitBlocks, 682 const ValueToValueMapTy &VMap, 683 bool IgnoreIncomingWithNoClones) { 684 PhiToDefMap MPhiMap; 685 686 auto FixPhiIncomingValues = [&](MemoryPhi *Phi, MemoryPhi *NewPhi) { 687 assert(Phi && NewPhi && "Invalid Phi nodes."); 688 BasicBlock *NewPhiBB = NewPhi->getBlock(); 689 SmallPtrSet<BasicBlock *, 4> NewPhiBBPreds(pred_begin(NewPhiBB), 690 pred_end(NewPhiBB)); 691 for (unsigned It = 0, E = Phi->getNumIncomingValues(); It < E; ++It) { 692 MemoryAccess *IncomingAccess = Phi->getIncomingValue(It); 693 BasicBlock *IncBB = Phi->getIncomingBlock(It); 694 695 if (BasicBlock *NewIncBB = cast_or_null<BasicBlock>(VMap.lookup(IncBB))) 696 IncBB = NewIncBB; 697 else if (IgnoreIncomingWithNoClones) 698 continue; 699 700 // Now we have IncBB, and will need to add incoming from it to NewPhi. 701 702 // If IncBB is not a predecessor of NewPhiBB, then do not add it. 703 // NewPhiBB was cloned without that edge. 704 if (!NewPhiBBPreds.count(IncBB)) 705 continue; 706 707 // Determine incoming value and add it as incoming from IncBB. 708 if (MemoryUseOrDef *IncMUD = dyn_cast<MemoryUseOrDef>(IncomingAccess)) { 709 if (!MSSA->isLiveOnEntryDef(IncMUD)) { 710 Instruction *IncI = IncMUD->getMemoryInst(); 711 assert(IncI && "Found MemoryUseOrDef with no Instruction."); 712 if (Instruction *NewIncI = 713 cast_or_null<Instruction>(VMap.lookup(IncI))) { 714 IncMUD = MSSA->getMemoryAccess(NewIncI); 715 assert(IncMUD && 716 "MemoryUseOrDef cannot be null, all preds processed."); 717 } 718 } 719 NewPhi->addIncoming(IncMUD, IncBB); 720 } else { 721 MemoryPhi *IncPhi = cast<MemoryPhi>(IncomingAccess); 722 if (MemoryAccess *NewDefPhi = MPhiMap.lookup(IncPhi)) 723 NewPhi->addIncoming(NewDefPhi, IncBB); 724 else 725 NewPhi->addIncoming(IncPhi, IncBB); 726 } 727 } 728 if (auto *SingleAccess = onlySingleValue(NewPhi)) { 729 MPhiMap[Phi] = SingleAccess; 730 removeMemoryAccess(NewPhi); 731 } 732 }; 733 734 auto ProcessBlock = [&](BasicBlock *BB) { 735 BasicBlock *NewBlock = cast_or_null<BasicBlock>(VMap.lookup(BB)); 736 if (!NewBlock) 737 return; 738 739 assert(!MSSA->getWritableBlockAccesses(NewBlock) && 740 "Cloned block should have no accesses"); 741 742 // Add MemoryPhi. 743 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB)) { 744 MemoryPhi *NewPhi = MSSA->createMemoryPhi(NewBlock); 745 MPhiMap[MPhi] = NewPhi; 746 } 747 // Update Uses and Defs. 748 cloneUsesAndDefs(BB, NewBlock, VMap, MPhiMap); 749 }; 750 751 for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks)) 752 ProcessBlock(BB); 753 754 for (auto BB : llvm::concat<BasicBlock *const>(LoopBlocks, ExitBlocks)) 755 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB)) 756 if (MemoryAccess *NewPhi = MPhiMap.lookup(MPhi)) 757 FixPhiIncomingValues(MPhi, cast<MemoryPhi>(NewPhi)); 758 } 759 760 void MemorySSAUpdater::updateForClonedBlockIntoPred( 761 BasicBlock *BB, BasicBlock *P1, const ValueToValueMapTy &VM) { 762 // All defs/phis from outside BB that are used in BB, are valid uses in P1. 763 // Since those defs/phis must have dominated BB, and also dominate P1. 764 // Defs from BB being used in BB will be replaced with the cloned defs from 765 // VM. The uses of BB's Phi (if it exists) in BB will be replaced by the 766 // incoming def into the Phi from P1. 767 // Instructions cloned into the predecessor are in practice sometimes 768 // simplified, so disable the use of the template, and create an access from 769 // scratch. 770 PhiToDefMap MPhiMap; 771 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(BB)) 772 MPhiMap[MPhi] = MPhi->getIncomingValueForBlock(P1); 773 cloneUsesAndDefs(BB, P1, VM, MPhiMap, /*CloneWasSimplified=*/true); 774 } 775 776 template <typename Iter> 777 void MemorySSAUpdater::privateUpdateExitBlocksForClonedLoop( 778 ArrayRef<BasicBlock *> ExitBlocks, Iter ValuesBegin, Iter ValuesEnd, 779 DominatorTree &DT) { 780 SmallVector<CFGUpdate, 4> Updates; 781 // Update/insert phis in all successors of exit blocks. 782 for (auto *Exit : ExitBlocks) 783 for (const ValueToValueMapTy *VMap : make_range(ValuesBegin, ValuesEnd)) 784 if (BasicBlock *NewExit = cast_or_null<BasicBlock>(VMap->lookup(Exit))) { 785 BasicBlock *ExitSucc = NewExit->getTerminator()->getSuccessor(0); 786 Updates.push_back({DT.Insert, NewExit, ExitSucc}); 787 } 788 applyInsertUpdates(Updates, DT); 789 } 790 791 void MemorySSAUpdater::updateExitBlocksForClonedLoop( 792 ArrayRef<BasicBlock *> ExitBlocks, const ValueToValueMapTy &VMap, 793 DominatorTree &DT) { 794 const ValueToValueMapTy *const Arr[] = {&VMap}; 795 privateUpdateExitBlocksForClonedLoop(ExitBlocks, std::begin(Arr), 796 std::end(Arr), DT); 797 } 798 799 void MemorySSAUpdater::updateExitBlocksForClonedLoop( 800 ArrayRef<BasicBlock *> ExitBlocks, 801 ArrayRef<std::unique_ptr<ValueToValueMapTy>> VMaps, DominatorTree &DT) { 802 auto GetPtr = [&](const std::unique_ptr<ValueToValueMapTy> &I) { 803 return I.get(); 804 }; 805 using MappedIteratorType = 806 mapped_iterator<const std::unique_ptr<ValueToValueMapTy> *, 807 decltype(GetPtr)>; 808 auto MapBegin = MappedIteratorType(VMaps.begin(), GetPtr); 809 auto MapEnd = MappedIteratorType(VMaps.end(), GetPtr); 810 privateUpdateExitBlocksForClonedLoop(ExitBlocks, MapBegin, MapEnd, DT); 811 } 812 813 void MemorySSAUpdater::applyUpdates(ArrayRef<CFGUpdate> Updates, 814 DominatorTree &DT) { 815 SmallVector<CFGUpdate, 4> DeleteUpdates; 816 SmallVector<CFGUpdate, 4> RevDeleteUpdates; 817 SmallVector<CFGUpdate, 4> InsertUpdates; 818 for (auto &Update : Updates) { 819 if (Update.getKind() == DT.Insert) 820 InsertUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()}); 821 else { 822 DeleteUpdates.push_back({DT.Delete, Update.getFrom(), Update.getTo()}); 823 RevDeleteUpdates.push_back({DT.Insert, Update.getFrom(), Update.getTo()}); 824 } 825 } 826 827 if (!DeleteUpdates.empty()) { 828 SmallVector<CFGUpdate, 0> Empty; 829 // Deletes are reversed applied, because this CFGView is pretending the 830 // deletes did not happen yet, hence the edges still exist. 831 DT.applyUpdates(Empty, RevDeleteUpdates); 832 833 // Note: the MSSA update below doesn't distinguish between a GD with 834 // (RevDelete,false) and (Delete, true), but this matters for the DT 835 // updates above; for "children" purposes they are equivalent; but the 836 // updates themselves convey the desired update, used inside DT only. 837 GraphDiff<BasicBlock *> GD(RevDeleteUpdates); 838 applyInsertUpdates(InsertUpdates, DT, &GD); 839 // Update DT to redelete edges; this matches the real CFG so we can perform 840 // the standard update without a postview of the CFG. 841 DT.applyUpdates(DeleteUpdates); 842 } else { 843 GraphDiff<BasicBlock *> GD; 844 applyInsertUpdates(InsertUpdates, DT, &GD); 845 } 846 847 // Update for deleted edges 848 for (auto &Update : DeleteUpdates) 849 removeEdge(Update.getFrom(), Update.getTo()); 850 } 851 852 void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates, 853 DominatorTree &DT) { 854 GraphDiff<BasicBlock *> GD; 855 applyInsertUpdates(Updates, DT, &GD); 856 } 857 858 void MemorySSAUpdater::applyInsertUpdates(ArrayRef<CFGUpdate> Updates, 859 DominatorTree &DT, 860 const GraphDiff<BasicBlock *> *GD) { 861 // Get recursive last Def, assuming well formed MSSA and updated DT. 862 auto GetLastDef = [&](BasicBlock *BB) -> MemoryAccess * { 863 while (true) { 864 MemorySSA::DefsList *Defs = MSSA->getWritableBlockDefs(BB); 865 // Return last Def or Phi in BB, if it exists. 866 if (Defs) 867 return &*(--Defs->end()); 868 869 // Check number of predecessors, we only care if there's more than one. 870 unsigned Count = 0; 871 BasicBlock *Pred = nullptr; 872 for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BB)) { 873 Pred = Pi; 874 Count++; 875 if (Count == 2) 876 break; 877 } 878 879 // If BB has multiple predecessors, get last definition from IDom. 880 if (Count != 1) { 881 // [SimpleLoopUnswitch] If BB is a dead block, about to be deleted, its 882 // DT is invalidated. Return LoE as its last def. This will be added to 883 // MemoryPhi node, and later deleted when the block is deleted. 884 if (!DT.getNode(BB)) 885 return MSSA->getLiveOnEntryDef(); 886 if (auto *IDom = DT.getNode(BB)->getIDom()) 887 if (IDom->getBlock() != BB) { 888 BB = IDom->getBlock(); 889 continue; 890 } 891 return MSSA->getLiveOnEntryDef(); 892 } else { 893 // Single predecessor, BB cannot be dead. GetLastDef of Pred. 894 assert(Count == 1 && Pred && "Single predecessor expected."); 895 // BB can be unreachable though, return LoE if that is the case. 896 if (!DT.getNode(BB)) 897 return MSSA->getLiveOnEntryDef(); 898 BB = Pred; 899 } 900 }; 901 llvm_unreachable("Unable to get last definition."); 902 }; 903 904 // Get nearest IDom given a set of blocks. 905 // TODO: this can be optimized by starting the search at the node with the 906 // lowest level (highest in the tree). 907 auto FindNearestCommonDominator = 908 [&](const SmallSetVector<BasicBlock *, 2> &BBSet) -> BasicBlock * { 909 BasicBlock *PrevIDom = *BBSet.begin(); 910 for (auto *BB : BBSet) 911 PrevIDom = DT.findNearestCommonDominator(PrevIDom, BB); 912 return PrevIDom; 913 }; 914 915 // Get all blocks that dominate PrevIDom, stop when reaching CurrIDom. Do not 916 // include CurrIDom. 917 auto GetNoLongerDomBlocks = 918 [&](BasicBlock *PrevIDom, BasicBlock *CurrIDom, 919 SmallVectorImpl<BasicBlock *> &BlocksPrevDom) { 920 if (PrevIDom == CurrIDom) 921 return; 922 BlocksPrevDom.push_back(PrevIDom); 923 BasicBlock *NextIDom = PrevIDom; 924 while (BasicBlock *UpIDom = 925 DT.getNode(NextIDom)->getIDom()->getBlock()) { 926 if (UpIDom == CurrIDom) 927 break; 928 BlocksPrevDom.push_back(UpIDom); 929 NextIDom = UpIDom; 930 } 931 }; 932 933 // Map a BB to its predecessors: added + previously existing. To get a 934 // deterministic order, store predecessors as SetVectors. The order in each 935 // will be defined by the order in Updates (fixed) and the order given by 936 // children<> (also fixed). Since we further iterate over these ordered sets, 937 // we lose the information of multiple edges possibly existing between two 938 // blocks, so we'll keep and EdgeCount map for that. 939 // An alternate implementation could keep unordered set for the predecessors, 940 // traverse either Updates or children<> each time to get the deterministic 941 // order, and drop the usage of EdgeCount. This alternate approach would still 942 // require querying the maps for each predecessor, and children<> call has 943 // additional computation inside for creating the snapshot-graph predecessors. 944 // As such, we favor using a little additional storage and less compute time. 945 // This decision can be revisited if we find the alternative more favorable. 946 947 struct PredInfo { 948 SmallSetVector<BasicBlock *, 2> Added; 949 SmallSetVector<BasicBlock *, 2> Prev; 950 }; 951 SmallDenseMap<BasicBlock *, PredInfo> PredMap; 952 953 for (auto &Edge : Updates) { 954 BasicBlock *BB = Edge.getTo(); 955 auto &AddedBlockSet = PredMap[BB].Added; 956 AddedBlockSet.insert(Edge.getFrom()); 957 } 958 959 // Store all existing predecessor for each BB, at least one must exist. 960 SmallDenseMap<std::pair<BasicBlock *, BasicBlock *>, int> EdgeCountMap; 961 SmallPtrSet<BasicBlock *, 2> NewBlocks; 962 for (auto &BBPredPair : PredMap) { 963 auto *BB = BBPredPair.first; 964 const auto &AddedBlockSet = BBPredPair.second.Added; 965 auto &PrevBlockSet = BBPredPair.second.Prev; 966 for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BB)) { 967 if (!AddedBlockSet.count(Pi)) 968 PrevBlockSet.insert(Pi); 969 EdgeCountMap[{Pi, BB}]++; 970 } 971 972 if (PrevBlockSet.empty()) { 973 assert(pred_size(BB) == AddedBlockSet.size() && "Duplicate edges added."); 974 LLVM_DEBUG( 975 dbgs() 976 << "Adding a predecessor to a block with no predecessors. " 977 "This must be an edge added to a new, likely cloned, block. " 978 "Its memory accesses must be already correct, assuming completed " 979 "via the updateExitBlocksForClonedLoop API. " 980 "Assert a single such edge is added so no phi addition or " 981 "additional processing is required.\n"); 982 assert(AddedBlockSet.size() == 1 && 983 "Can only handle adding one predecessor to a new block."); 984 // Need to remove new blocks from PredMap. Remove below to not invalidate 985 // iterator here. 986 NewBlocks.insert(BB); 987 } 988 } 989 // Nothing to process for new/cloned blocks. 990 for (auto *BB : NewBlocks) 991 PredMap.erase(BB); 992 993 SmallVector<BasicBlock *, 16> BlocksWithDefsToReplace; 994 SmallVector<WeakVH, 8> InsertedPhis; 995 996 // First create MemoryPhis in all blocks that don't have one. Create in the 997 // order found in Updates, not in PredMap, to get deterministic numbering. 998 for (auto &Edge : Updates) { 999 BasicBlock *BB = Edge.getTo(); 1000 if (PredMap.count(BB) && !MSSA->getMemoryAccess(BB)) 1001 InsertedPhis.push_back(MSSA->createMemoryPhi(BB)); 1002 } 1003 1004 // Now we'll fill in the MemoryPhis with the right incoming values. 1005 for (auto &BBPredPair : PredMap) { 1006 auto *BB = BBPredPair.first; 1007 const auto &PrevBlockSet = BBPredPair.second.Prev; 1008 const auto &AddedBlockSet = BBPredPair.second.Added; 1009 assert(!PrevBlockSet.empty() && 1010 "At least one previous predecessor must exist."); 1011 1012 // TODO: if this becomes a bottleneck, we can save on GetLastDef calls by 1013 // keeping this map before the loop. We can reuse already populated entries 1014 // if an edge is added from the same predecessor to two different blocks, 1015 // and this does happen in rotate. Note that the map needs to be updated 1016 // when deleting non-necessary phis below, if the phi is in the map by 1017 // replacing the value with DefP1. 1018 SmallDenseMap<BasicBlock *, MemoryAccess *> LastDefAddedPred; 1019 for (auto *AddedPred : AddedBlockSet) { 1020 auto *DefPn = GetLastDef(AddedPred); 1021 assert(DefPn != nullptr && "Unable to find last definition."); 1022 LastDefAddedPred[AddedPred] = DefPn; 1023 } 1024 1025 MemoryPhi *NewPhi = MSSA->getMemoryAccess(BB); 1026 // If Phi is not empty, add an incoming edge from each added pred. Must 1027 // still compute blocks with defs to replace for this block below. 1028 if (NewPhi->getNumOperands()) { 1029 for (auto *Pred : AddedBlockSet) { 1030 auto *LastDefForPred = LastDefAddedPred[Pred]; 1031 for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I) 1032 NewPhi->addIncoming(LastDefForPred, Pred); 1033 } 1034 } else { 1035 // Pick any existing predecessor and get its definition. All other 1036 // existing predecessors should have the same one, since no phi existed. 1037 auto *P1 = *PrevBlockSet.begin(); 1038 MemoryAccess *DefP1 = GetLastDef(P1); 1039 1040 // Check DefP1 against all Defs in LastDefPredPair. If all the same, 1041 // nothing to add. 1042 bool InsertPhi = false; 1043 for (auto LastDefPredPair : LastDefAddedPred) 1044 if (DefP1 != LastDefPredPair.second) { 1045 InsertPhi = true; 1046 break; 1047 } 1048 if (!InsertPhi) { 1049 // Since NewPhi may be used in other newly added Phis, replace all uses 1050 // of NewPhi with the definition coming from all predecessors (DefP1), 1051 // before deleting it. 1052 NewPhi->replaceAllUsesWith(DefP1); 1053 removeMemoryAccess(NewPhi); 1054 continue; 1055 } 1056 1057 // Update Phi with new values for new predecessors and old value for all 1058 // other predecessors. Since AddedBlockSet and PrevBlockSet are ordered 1059 // sets, the order of entries in NewPhi is deterministic. 1060 for (auto *Pred : AddedBlockSet) { 1061 auto *LastDefForPred = LastDefAddedPred[Pred]; 1062 for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I) 1063 NewPhi->addIncoming(LastDefForPred, Pred); 1064 } 1065 for (auto *Pred : PrevBlockSet) 1066 for (int I = 0, E = EdgeCountMap[{Pred, BB}]; I < E; ++I) 1067 NewPhi->addIncoming(DefP1, Pred); 1068 } 1069 1070 // Get all blocks that used to dominate BB and no longer do after adding 1071 // AddedBlockSet, where PrevBlockSet are the previously known predecessors. 1072 assert(DT.getNode(BB)->getIDom() && "BB does not have valid idom"); 1073 BasicBlock *PrevIDom = FindNearestCommonDominator(PrevBlockSet); 1074 assert(PrevIDom && "Previous IDom should exists"); 1075 BasicBlock *NewIDom = DT.getNode(BB)->getIDom()->getBlock(); 1076 assert(NewIDom && "BB should have a new valid idom"); 1077 assert(DT.dominates(NewIDom, PrevIDom) && 1078 "New idom should dominate old idom"); 1079 GetNoLongerDomBlocks(PrevIDom, NewIDom, BlocksWithDefsToReplace); 1080 } 1081 1082 tryRemoveTrivialPhis(InsertedPhis); 1083 // Create the set of blocks that now have a definition. We'll use this to 1084 // compute IDF and add Phis there next. 1085 SmallVector<BasicBlock *, 8> BlocksToProcess; 1086 for (auto &VH : InsertedPhis) 1087 if (auto *MPhi = cast_or_null<MemoryPhi>(VH)) 1088 BlocksToProcess.push_back(MPhi->getBlock()); 1089 1090 // Compute IDF and add Phis in all IDF blocks that do not have one. 1091 SmallVector<BasicBlock *, 32> IDFBlocks; 1092 if (!BlocksToProcess.empty()) { 1093 ForwardIDFCalculator IDFs(DT, GD); 1094 SmallPtrSet<BasicBlock *, 16> DefiningBlocks(BlocksToProcess.begin(), 1095 BlocksToProcess.end()); 1096 IDFs.setDefiningBlocks(DefiningBlocks); 1097 IDFs.calculate(IDFBlocks); 1098 1099 SmallSetVector<MemoryPhi *, 4> PhisToFill; 1100 // First create all needed Phis. 1101 for (auto *BBIDF : IDFBlocks) 1102 if (!MSSA->getMemoryAccess(BBIDF)) { 1103 auto *IDFPhi = MSSA->createMemoryPhi(BBIDF); 1104 InsertedPhis.push_back(IDFPhi); 1105 PhisToFill.insert(IDFPhi); 1106 } 1107 // Then update or insert their correct incoming values. 1108 for (auto *BBIDF : IDFBlocks) { 1109 auto *IDFPhi = MSSA->getMemoryAccess(BBIDF); 1110 assert(IDFPhi && "Phi must exist"); 1111 if (!PhisToFill.count(IDFPhi)) { 1112 // Update existing Phi. 1113 // FIXME: some updates may be redundant, try to optimize and skip some. 1114 for (unsigned I = 0, E = IDFPhi->getNumIncomingValues(); I < E; ++I) 1115 IDFPhi->setIncomingValue(I, GetLastDef(IDFPhi->getIncomingBlock(I))); 1116 } else { 1117 for (auto *Pi : GD->template getChildren</*InverseEdge=*/true>(BBIDF)) 1118 IDFPhi->addIncoming(GetLastDef(Pi), Pi); 1119 } 1120 } 1121 } 1122 1123 // Now for all defs in BlocksWithDefsToReplace, if there are uses they no 1124 // longer dominate, replace those with the closest dominating def. 1125 // This will also update optimized accesses, as they're also uses. 1126 for (auto *BlockWithDefsToReplace : BlocksWithDefsToReplace) { 1127 if (auto DefsList = MSSA->getWritableBlockDefs(BlockWithDefsToReplace)) { 1128 for (auto &DefToReplaceUses : *DefsList) { 1129 BasicBlock *DominatingBlock = DefToReplaceUses.getBlock(); 1130 Value::use_iterator UI = DefToReplaceUses.use_begin(), 1131 E = DefToReplaceUses.use_end(); 1132 for (; UI != E;) { 1133 Use &U = *UI; 1134 ++UI; 1135 MemoryAccess *Usr = cast<MemoryAccess>(U.getUser()); 1136 if (MemoryPhi *UsrPhi = dyn_cast<MemoryPhi>(Usr)) { 1137 BasicBlock *DominatedBlock = UsrPhi->getIncomingBlock(U); 1138 if (!DT.dominates(DominatingBlock, DominatedBlock)) 1139 U.set(GetLastDef(DominatedBlock)); 1140 } else { 1141 BasicBlock *DominatedBlock = Usr->getBlock(); 1142 if (!DT.dominates(DominatingBlock, DominatedBlock)) { 1143 if (auto *DomBlPhi = MSSA->getMemoryAccess(DominatedBlock)) 1144 U.set(DomBlPhi); 1145 else { 1146 auto *IDom = DT.getNode(DominatedBlock)->getIDom(); 1147 assert(IDom && "Block must have a valid IDom."); 1148 U.set(GetLastDef(IDom->getBlock())); 1149 } 1150 cast<MemoryUseOrDef>(Usr)->resetOptimized(); 1151 } 1152 } 1153 } 1154 } 1155 } 1156 } 1157 tryRemoveTrivialPhis(InsertedPhis); 1158 } 1159 1160 // Move What before Where in the MemorySSA IR. 1161 template <class WhereType> 1162 void MemorySSAUpdater::moveTo(MemoryUseOrDef *What, BasicBlock *BB, 1163 WhereType Where) { 1164 // Mark MemoryPhi users of What not to be optimized. 1165 for (auto *U : What->users()) 1166 if (MemoryPhi *PhiUser = dyn_cast<MemoryPhi>(U)) 1167 NonOptPhis.insert(PhiUser); 1168 1169 // Replace all our users with our defining access. 1170 What->replaceAllUsesWith(What->getDefiningAccess()); 1171 1172 // Let MemorySSA take care of moving it around in the lists. 1173 MSSA->moveTo(What, BB, Where); 1174 1175 // Now reinsert it into the IR and do whatever fixups needed. 1176 if (auto *MD = dyn_cast<MemoryDef>(What)) 1177 insertDef(MD, /*RenameUses=*/true); 1178 else 1179 insertUse(cast<MemoryUse>(What), /*RenameUses=*/true); 1180 1181 // Clear dangling pointers. We added all MemoryPhi users, but not all 1182 // of them are removed by fixupDefs(). 1183 NonOptPhis.clear(); 1184 } 1185 1186 // Move What before Where in the MemorySSA IR. 1187 void MemorySSAUpdater::moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where) { 1188 moveTo(What, Where->getBlock(), Where->getIterator()); 1189 } 1190 1191 // Move What after Where in the MemorySSA IR. 1192 void MemorySSAUpdater::moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where) { 1193 moveTo(What, Where->getBlock(), ++Where->getIterator()); 1194 } 1195 1196 void MemorySSAUpdater::moveToPlace(MemoryUseOrDef *What, BasicBlock *BB, 1197 MemorySSA::InsertionPlace Where) { 1198 if (Where != MemorySSA::InsertionPlace::BeforeTerminator) 1199 return moveTo(What, BB, Where); 1200 1201 if (auto *Where = MSSA->getMemoryAccess(BB->getTerminator())) 1202 return moveBefore(What, Where); 1203 else 1204 return moveTo(What, BB, MemorySSA::InsertionPlace::End); 1205 } 1206 1207 // All accesses in To used to be in From. Move to end and update access lists. 1208 void MemorySSAUpdater::moveAllAccesses(BasicBlock *From, BasicBlock *To, 1209 Instruction *Start) { 1210 1211 MemorySSA::AccessList *Accs = MSSA->getWritableBlockAccesses(From); 1212 if (!Accs) 1213 return; 1214 1215 assert(Start->getParent() == To && "Incorrect Start instruction"); 1216 MemoryAccess *FirstInNew = nullptr; 1217 for (Instruction &I : make_range(Start->getIterator(), To->end())) 1218 if ((FirstInNew = MSSA->getMemoryAccess(&I))) 1219 break; 1220 if (FirstInNew) { 1221 auto *MUD = cast<MemoryUseOrDef>(FirstInNew); 1222 do { 1223 auto NextIt = ++MUD->getIterator(); 1224 MemoryUseOrDef *NextMUD = (!Accs || NextIt == Accs->end()) 1225 ? nullptr 1226 : cast<MemoryUseOrDef>(&*NextIt); 1227 MSSA->moveTo(MUD, To, MemorySSA::End); 1228 // Moving MUD from Accs in the moveTo above, may delete Accs, so we need 1229 // to retrieve it again. 1230 Accs = MSSA->getWritableBlockAccesses(From); 1231 MUD = NextMUD; 1232 } while (MUD); 1233 } 1234 1235 // If all accesses were moved and only a trivial Phi remains, we try to remove 1236 // that Phi. This is needed when From is going to be deleted. 1237 auto *Defs = MSSA->getWritableBlockDefs(From); 1238 if (Defs && !Defs->empty()) 1239 if (auto *Phi = dyn_cast<MemoryPhi>(&*Defs->begin())) 1240 tryRemoveTrivialPhi(Phi); 1241 } 1242 1243 void MemorySSAUpdater::moveAllAfterSpliceBlocks(BasicBlock *From, 1244 BasicBlock *To, 1245 Instruction *Start) { 1246 assert(MSSA->getBlockAccesses(To) == nullptr && 1247 "To block is expected to be free of MemoryAccesses."); 1248 moveAllAccesses(From, To, Start); 1249 for (BasicBlock *Succ : successors(To)) 1250 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ)) 1251 MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To); 1252 } 1253 1254 void MemorySSAUpdater::moveAllAfterMergeBlocks(BasicBlock *From, BasicBlock *To, 1255 Instruction *Start) { 1256 assert(From->getUniquePredecessor() == To && 1257 "From block is expected to have a single predecessor (To)."); 1258 moveAllAccesses(From, To, Start); 1259 for (BasicBlock *Succ : successors(From)) 1260 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Succ)) 1261 MPhi->setIncomingBlock(MPhi->getBasicBlockIndex(From), To); 1262 } 1263 1264 void MemorySSAUpdater::wireOldPredecessorsToNewImmediatePredecessor( 1265 BasicBlock *Old, BasicBlock *New, ArrayRef<BasicBlock *> Preds, 1266 bool IdenticalEdgesWereMerged) { 1267 assert(!MSSA->getWritableBlockAccesses(New) && 1268 "Access list should be null for a new block."); 1269 MemoryPhi *Phi = MSSA->getMemoryAccess(Old); 1270 if (!Phi) 1271 return; 1272 if (Old->hasNPredecessors(1)) { 1273 assert(pred_size(New) == Preds.size() && 1274 "Should have moved all predecessors."); 1275 MSSA->moveTo(Phi, New, MemorySSA::Beginning); 1276 } else { 1277 assert(!Preds.empty() && "Must be moving at least one predecessor to the " 1278 "new immediate predecessor."); 1279 MemoryPhi *NewPhi = MSSA->createMemoryPhi(New); 1280 SmallPtrSet<BasicBlock *, 16> PredsSet(Preds.begin(), Preds.end()); 1281 // Currently only support the case of removing a single incoming edge when 1282 // identical edges were not merged. 1283 if (!IdenticalEdgesWereMerged) 1284 assert(PredsSet.size() == Preds.size() && 1285 "If identical edges were not merged, we cannot have duplicate " 1286 "blocks in the predecessors"); 1287 Phi->unorderedDeleteIncomingIf([&](MemoryAccess *MA, BasicBlock *B) { 1288 if (PredsSet.count(B)) { 1289 NewPhi->addIncoming(MA, B); 1290 if (!IdenticalEdgesWereMerged) 1291 PredsSet.erase(B); 1292 return true; 1293 } 1294 return false; 1295 }); 1296 Phi->addIncoming(NewPhi, New); 1297 tryRemoveTrivialPhi(NewPhi); 1298 } 1299 } 1300 1301 void MemorySSAUpdater::removeMemoryAccess(MemoryAccess *MA, bool OptimizePhis) { 1302 assert(!MSSA->isLiveOnEntryDef(MA) && 1303 "Trying to remove the live on entry def"); 1304 // We can only delete phi nodes if they have no uses, or we can replace all 1305 // uses with a single definition. 1306 MemoryAccess *NewDefTarget = nullptr; 1307 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) { 1308 // Note that it is sufficient to know that all edges of the phi node have 1309 // the same argument. If they do, by the definition of dominance frontiers 1310 // (which we used to place this phi), that argument must dominate this phi, 1311 // and thus, must dominate the phi's uses, and so we will not hit the assert 1312 // below. 1313 NewDefTarget = onlySingleValue(MP); 1314 assert((NewDefTarget || MP->use_empty()) && 1315 "We can't delete this memory phi"); 1316 } else { 1317 NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess(); 1318 } 1319 1320 SmallSetVector<MemoryPhi *, 4> PhisToCheck; 1321 1322 // Re-point the uses at our defining access 1323 if (!isa<MemoryUse>(MA) && !MA->use_empty()) { 1324 // Reset optimized on users of this store, and reset the uses. 1325 // A few notes: 1326 // 1. This is a slightly modified version of RAUW to avoid walking the 1327 // uses twice here. 1328 // 2. If we wanted to be complete, we would have to reset the optimized 1329 // flags on users of phi nodes if doing the below makes a phi node have all 1330 // the same arguments. Instead, we prefer users to removeMemoryAccess those 1331 // phi nodes, because doing it here would be N^3. 1332 if (MA->hasValueHandle()) 1333 ValueHandleBase::ValueIsRAUWd(MA, NewDefTarget); 1334 // Note: We assume MemorySSA is not used in metadata since it's not really 1335 // part of the IR. 1336 1337 assert(NewDefTarget != MA && "Going into an infinite loop"); 1338 while (!MA->use_empty()) { 1339 Use &U = *MA->use_begin(); 1340 if (auto *MUD = dyn_cast<MemoryUseOrDef>(U.getUser())) 1341 MUD->resetOptimized(); 1342 if (OptimizePhis) 1343 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U.getUser())) 1344 PhisToCheck.insert(MP); 1345 U.set(NewDefTarget); 1346 } 1347 } 1348 1349 // The call below to erase will destroy MA, so we can't change the order we 1350 // are doing things here 1351 MSSA->removeFromLookups(MA); 1352 MSSA->removeFromLists(MA); 1353 1354 // Optionally optimize Phi uses. This will recursively remove trivial phis. 1355 if (!PhisToCheck.empty()) { 1356 SmallVector<WeakVH, 16> PhisToOptimize{PhisToCheck.begin(), 1357 PhisToCheck.end()}; 1358 PhisToCheck.clear(); 1359 1360 unsigned PhisSize = PhisToOptimize.size(); 1361 while (PhisSize-- > 0) 1362 if (MemoryPhi *MP = 1363 cast_or_null<MemoryPhi>(PhisToOptimize.pop_back_val())) 1364 tryRemoveTrivialPhi(MP); 1365 } 1366 } 1367 1368 void MemorySSAUpdater::removeBlocks( 1369 const SmallSetVector<BasicBlock *, 8> &DeadBlocks) { 1370 // First delete all uses of BB in MemoryPhis. 1371 for (BasicBlock *BB : DeadBlocks) { 1372 Instruction *TI = BB->getTerminator(); 1373 assert(TI && "Basic block expected to have a terminator instruction"); 1374 for (BasicBlock *Succ : successors(TI)) 1375 if (!DeadBlocks.count(Succ)) 1376 if (MemoryPhi *MP = MSSA->getMemoryAccess(Succ)) { 1377 MP->unorderedDeleteIncomingBlock(BB); 1378 tryRemoveTrivialPhi(MP); 1379 } 1380 // Drop all references of all accesses in BB 1381 if (MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB)) 1382 for (MemoryAccess &MA : *Acc) 1383 MA.dropAllReferences(); 1384 } 1385 1386 // Next, delete all memory accesses in each block 1387 for (BasicBlock *BB : DeadBlocks) { 1388 MemorySSA::AccessList *Acc = MSSA->getWritableBlockAccesses(BB); 1389 if (!Acc) 1390 continue; 1391 for (auto AB = Acc->begin(), AE = Acc->end(); AB != AE;) { 1392 MemoryAccess *MA = &*AB; 1393 ++AB; 1394 MSSA->removeFromLookups(MA); 1395 MSSA->removeFromLists(MA); 1396 } 1397 } 1398 } 1399 1400 void MemorySSAUpdater::tryRemoveTrivialPhis(ArrayRef<WeakVH> UpdatedPHIs) { 1401 for (auto &VH : UpdatedPHIs) 1402 if (auto *MPhi = cast_or_null<MemoryPhi>(VH)) 1403 tryRemoveTrivialPhi(MPhi); 1404 } 1405 1406 void MemorySSAUpdater::changeToUnreachable(const Instruction *I) { 1407 const BasicBlock *BB = I->getParent(); 1408 // Remove memory accesses in BB for I and all following instructions. 1409 auto BBI = I->getIterator(), BBE = BB->end(); 1410 // FIXME: If this becomes too expensive, iterate until the first instruction 1411 // with a memory access, then iterate over MemoryAccesses. 1412 while (BBI != BBE) 1413 removeMemoryAccess(&*(BBI++)); 1414 // Update phis in BB's successors to remove BB. 1415 SmallVector<WeakVH, 16> UpdatedPHIs; 1416 for (const BasicBlock *Successor : successors(BB)) { 1417 removeDuplicatePhiEdgesBetween(BB, Successor); 1418 if (MemoryPhi *MPhi = MSSA->getMemoryAccess(Successor)) { 1419 MPhi->unorderedDeleteIncomingBlock(BB); 1420 UpdatedPHIs.push_back(MPhi); 1421 } 1422 } 1423 // Optimize trivial phis. 1424 tryRemoveTrivialPhis(UpdatedPHIs); 1425 } 1426 1427 void MemorySSAUpdater::changeCondBranchToUnconditionalTo(const BranchInst *BI, 1428 const BasicBlock *To) { 1429 const BasicBlock *BB = BI->getParent(); 1430 SmallVector<WeakVH, 16> UpdatedPHIs; 1431 for (const BasicBlock *Succ : successors(BB)) { 1432 removeDuplicatePhiEdgesBetween(BB, Succ); 1433 if (Succ != To) 1434 if (auto *MPhi = MSSA->getMemoryAccess(Succ)) { 1435 MPhi->unorderedDeleteIncomingBlock(BB); 1436 UpdatedPHIs.push_back(MPhi); 1437 } 1438 } 1439 // Optimize trivial phis. 1440 tryRemoveTrivialPhis(UpdatedPHIs); 1441 } 1442 1443 MemoryAccess *MemorySSAUpdater::createMemoryAccessInBB( 1444 Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, 1445 MemorySSA::InsertionPlace Point) { 1446 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition); 1447 MSSA->insertIntoListsForBlock(NewAccess, BB, Point); 1448 return NewAccess; 1449 } 1450 1451 MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessBefore( 1452 Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt) { 1453 assert(I->getParent() == InsertPt->getBlock() && 1454 "New and old access must be in the same block"); 1455 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition); 1456 MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(), 1457 InsertPt->getIterator()); 1458 return NewAccess; 1459 } 1460 1461 MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessAfter( 1462 Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt) { 1463 assert(I->getParent() == InsertPt->getBlock() && 1464 "New and old access must be in the same block"); 1465 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition); 1466 MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(), 1467 ++InsertPt->getIterator()); 1468 return NewAccess; 1469 } 1470