1 //===-- MemorySSAUpdater.cpp - Memory SSA Updater--------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------===// 9 // 10 // This file implements the MemorySSAUpdater class. 11 // 12 //===----------------------------------------------------------------===// 13 #include "llvm/Analysis/MemorySSAUpdater.h" 14 #include "llvm/ADT/STLExtras.h" 15 #include "llvm/ADT/SmallPtrSet.h" 16 #include "llvm/ADT/SmallSet.h" 17 #include "llvm/IR/DataLayout.h" 18 #include "llvm/IR/Dominators.h" 19 #include "llvm/IR/GlobalVariable.h" 20 #include "llvm/IR/IRBuilder.h" 21 #include "llvm/IR/IntrinsicInst.h" 22 #include "llvm/IR/LLVMContext.h" 23 #include "llvm/IR/Metadata.h" 24 #include "llvm/IR/Module.h" 25 #include "llvm/Support/Debug.h" 26 #include "llvm/Support/FormattedStream.h" 27 #include "llvm/Analysis/MemorySSA.h" 28 #include <algorithm> 29 30 #define DEBUG_TYPE "memoryssa" 31 using namespace llvm; 32 33 // This is the marker algorithm from "Simple and Efficient Construction of 34 // Static Single Assignment Form" 35 // The simple, non-marker algorithm places phi nodes at any join 36 // Here, we place markers, and only place phi nodes if they end up necessary. 37 // They are only necessary if they break a cycle (IE we recursively visit 38 // ourselves again), or we discover, while getting the value of the operands, 39 // that there are two or more definitions needing to be merged. 40 // This still will leave non-minimal form in the case of irreducible control 41 // flow, where phi nodes may be in cycles with themselves, but unnecessary. 42 MemoryAccess *MemorySSAUpdater::getPreviousDefRecursive(BasicBlock *BB) { 43 // Single predecessor case, just recurse, we can only have one definition. 44 if (BasicBlock *Pred = BB->getSinglePredecessor()) { 45 return getPreviousDefFromEnd(Pred); 46 } else if (VisitedBlocks.count(BB)) { 47 // We hit our node again, meaning we had a cycle, we must insert a phi 48 // node to break it so we have an operand. The only case this will 49 // insert useless phis is if we have irreducible control flow. 50 return MSSA->createMemoryPhi(BB); 51 } else if (VisitedBlocks.insert(BB).second) { 52 // Mark us visited so we can detect a cycle 53 SmallVector<MemoryAccess *, 8> PhiOps; 54 55 // Recurse to get the values in our predecessors for placement of a 56 // potential phi node. This will insert phi nodes if we cycle in order to 57 // break the cycle and have an operand. 58 for (auto *Pred : predecessors(BB)) 59 PhiOps.push_back(getPreviousDefFromEnd(Pred)); 60 61 // Now try to simplify the ops to avoid placing a phi. 62 // This may return null if we never created a phi yet, that's okay 63 MemoryPhi *Phi = dyn_cast_or_null<MemoryPhi>(MSSA->getMemoryAccess(BB)); 64 bool PHIExistsButNeedsUpdate = false; 65 // See if the existing phi operands match what we need. 66 // Unlike normal SSA, we only allow one phi node per block, so we can't just 67 // create a new one. 68 if (Phi && Phi->getNumOperands() != 0) 69 if (!std::equal(Phi->op_begin(), Phi->op_end(), PhiOps.begin())) { 70 PHIExistsButNeedsUpdate = true; 71 } 72 73 // See if we can avoid the phi by simplifying it. 74 auto *Result = tryRemoveTrivialPhi(Phi, PhiOps); 75 // If we couldn't simplify, we may have to create a phi 76 if (Result == Phi) { 77 if (!Phi) 78 Phi = MSSA->createMemoryPhi(BB); 79 80 // These will have been filled in by the recursive read we did above. 81 if (PHIExistsButNeedsUpdate) { 82 std::copy(PhiOps.begin(), PhiOps.end(), Phi->op_begin()); 83 std::copy(pred_begin(BB), pred_end(BB), Phi->block_begin()); 84 } else { 85 unsigned i = 0; 86 for (auto *Pred : predecessors(BB)) 87 Phi->addIncoming(PhiOps[i++], Pred); 88 } 89 90 Result = Phi; 91 } 92 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Result)) 93 InsertedPHIs.push_back(MP); 94 // Set ourselves up for the next variable by resetting visited state. 95 VisitedBlocks.erase(BB); 96 return Result; 97 } 98 llvm_unreachable("Should have hit one of the three cases above"); 99 } 100 101 // This starts at the memory access, and goes backwards in the block to find the 102 // previous definition. If a definition is not found the block of the access, 103 // it continues globally, creating phi nodes to ensure we have a single 104 // definition. 105 MemoryAccess *MemorySSAUpdater::getPreviousDef(MemoryAccess *MA) { 106 auto *LocalResult = getPreviousDefInBlock(MA); 107 108 return LocalResult ? LocalResult : getPreviousDefRecursive(MA->getBlock()); 109 } 110 111 // This starts at the memory access, and goes backwards in the block to the find 112 // the previous definition. If the definition is not found in the block of the 113 // access, it returns nullptr. 114 MemoryAccess *MemorySSAUpdater::getPreviousDefInBlock(MemoryAccess *MA) { 115 auto *Defs = MSSA->getWritableBlockDefs(MA->getBlock()); 116 117 // It's possible there are no defs, or we got handed the first def to start. 118 if (Defs) { 119 // If this is a def, we can just use the def iterators. 120 if (!isa<MemoryUse>(MA)) { 121 auto Iter = MA->getReverseDefsIterator(); 122 ++Iter; 123 if (Iter != Defs->rend()) 124 return &*Iter; 125 } else { 126 // Otherwise, have to walk the all access iterator. 127 auto Iter = MA->getReverseIterator(); 128 ++Iter; 129 while (&*Iter != &*Defs->begin()) { 130 if (!isa<MemoryUse>(*Iter)) 131 return &*Iter; 132 --Iter; 133 } 134 // At this point it must be pointing at firstdef 135 assert(&*Iter == &*Defs->begin() && 136 "Should have hit first def walking backwards"); 137 return &*Iter; 138 } 139 } 140 return nullptr; 141 } 142 143 // This starts at the end of block 144 MemoryAccess *MemorySSAUpdater::getPreviousDefFromEnd(BasicBlock *BB) { 145 auto *Defs = MSSA->getWritableBlockDefs(BB); 146 147 if (Defs) 148 return &*Defs->rbegin(); 149 150 return getPreviousDefRecursive(BB); 151 } 152 // Recurse over a set of phi uses to eliminate the trivial ones 153 MemoryAccess *MemorySSAUpdater::recursePhi(MemoryAccess *Phi) { 154 if (!Phi) 155 return nullptr; 156 TrackingVH<MemoryAccess> Res(Phi); 157 SmallVector<TrackingVH<Value>, 8> Uses; 158 std::copy(Phi->user_begin(), Phi->user_end(), std::back_inserter(Uses)); 159 for (auto &U : Uses) { 160 if (MemoryPhi *UsePhi = dyn_cast<MemoryPhi>(&*U)) { 161 auto OperRange = UsePhi->operands(); 162 tryRemoveTrivialPhi(UsePhi, OperRange); 163 } 164 } 165 return Res; 166 } 167 168 // Eliminate trivial phis 169 // Phis are trivial if they are defined either by themselves, or all the same 170 // argument. 171 // IE phi(a, a) or b = phi(a, b) or c = phi(a, a, c) 172 // We recursively try to remove them. 173 template <class RangeType> 174 MemoryAccess *MemorySSAUpdater::tryRemoveTrivialPhi(MemoryPhi *Phi, 175 RangeType &Operands) { 176 // Detect equal or self arguments 177 MemoryAccess *Same = nullptr; 178 for (auto &Op : Operands) { 179 // If the same or self, good so far 180 if (Op == Phi || Op == Same) 181 continue; 182 // not the same, return the phi since it's not eliminatable by us 183 if (Same) 184 return Phi; 185 Same = cast<MemoryAccess>(Op); 186 } 187 // Never found a non-self reference, the phi is undef 188 if (Same == nullptr) 189 return MSSA->getLiveOnEntryDef(); 190 if (Phi) { 191 Phi->replaceAllUsesWith(Same); 192 removeMemoryAccess(Phi); 193 } 194 195 // We should only end up recursing in case we replaced something, in which 196 // case, we may have made other Phis trivial. 197 return recursePhi(Same); 198 } 199 200 void MemorySSAUpdater::insertUse(MemoryUse *MU) { 201 InsertedPHIs.clear(); 202 MU->setDefiningAccess(getPreviousDef(MU)); 203 // Unlike for defs, there is no extra work to do. Because uses do not create 204 // new may-defs, there are only two cases: 205 // 206 // 1. There was a def already below us, and therefore, we should not have 207 // created a phi node because it was already needed for the def. 208 // 209 // 2. There is no def below us, and therefore, there is no extra renaming work 210 // to do. 211 } 212 213 // Set every incoming edge {BB, MP->getBlock()} of MemoryPhi MP to NewDef. 214 static void setMemoryPhiValueForBlock(MemoryPhi *MP, const BasicBlock *BB, 215 MemoryAccess *NewDef) { 216 // Replace any operand with us an incoming block with the new defining 217 // access. 218 int i = MP->getBasicBlockIndex(BB); 219 assert(i != -1 && "Should have found the basic block in the phi"); 220 // We can't just compare i against getNumOperands since one is signed and the 221 // other not. So use it to index into the block iterator. 222 for (auto BBIter = MP->block_begin() + i; BBIter != MP->block_end(); 223 ++BBIter) { 224 if (*BBIter != BB) 225 break; 226 MP->setIncomingValue(i, NewDef); 227 ++i; 228 } 229 } 230 231 // A brief description of the algorithm: 232 // First, we compute what should define the new def, using the SSA 233 // construction algorithm. 234 // Then, we update the defs below us (and any new phi nodes) in the graph to 235 // point to the correct new defs, to ensure we only have one variable, and no 236 // disconnected stores. 237 void MemorySSAUpdater::insertDef(MemoryDef *MD, bool RenameUses) { 238 InsertedPHIs.clear(); 239 240 // See if we had a local def, and if not, go hunting. 241 MemoryAccess *DefBefore = getPreviousDefInBlock(MD); 242 bool DefBeforeSameBlock = DefBefore != nullptr; 243 if (!DefBefore) 244 DefBefore = getPreviousDefRecursive(MD->getBlock()); 245 246 // There is a def before us, which means we can replace any store/phi uses 247 // of that thing with us, since we are in the way of whatever was there 248 // before. 249 // We now define that def's memorydefs and memoryphis 250 if (DefBeforeSameBlock) { 251 for (auto UI = DefBefore->use_begin(), UE = DefBefore->use_end(); 252 UI != UE;) { 253 Use &U = *UI++; 254 // Leave the uses alone 255 if (isa<MemoryUse>(U.getUser())) 256 continue; 257 U.set(MD); 258 } 259 } 260 261 // and that def is now our defining access. 262 // We change them in this order otherwise we will appear in the use list 263 // above and reset ourselves. 264 MD->setDefiningAccess(DefBefore); 265 266 SmallVector<MemoryAccess *, 8> FixupList(InsertedPHIs.begin(), 267 InsertedPHIs.end()); 268 if (!DefBeforeSameBlock) { 269 // If there was a local def before us, we must have the same effect it 270 // did. Because every may-def is the same, any phis/etc we would create, it 271 // would also have created. If there was no local def before us, we 272 // performed a global update, and have to search all successors and make 273 // sure we update the first def in each of them (following all paths until 274 // we hit the first def along each path). This may also insert phi nodes. 275 // TODO: There are other cases we can skip this work, such as when we have a 276 // single successor, and only used a straight line of single pred blocks 277 // backwards to find the def. To make that work, we'd have to track whether 278 // getDefRecursive only ever used the single predecessor case. These types 279 // of paths also only exist in between CFG simplifications. 280 FixupList.push_back(MD); 281 } 282 283 while (!FixupList.empty()) { 284 unsigned StartingPHISize = InsertedPHIs.size(); 285 fixupDefs(FixupList); 286 FixupList.clear(); 287 // Put any new phis on the fixup list, and process them 288 FixupList.append(InsertedPHIs.end() - StartingPHISize, InsertedPHIs.end()); 289 } 290 // Now that all fixups are done, rename all uses if we are asked. 291 if (RenameUses) { 292 SmallPtrSet<BasicBlock *, 16> Visited; 293 BasicBlock *StartBlock = MD->getBlock(); 294 // We are guaranteed there is a def in the block, because we just got it 295 // handed to us in this function. 296 MemoryAccess *FirstDef = &*MSSA->getWritableBlockDefs(StartBlock)->begin(); 297 // Convert to incoming value if it's a memorydef. A phi *is* already an 298 // incoming value. 299 if (auto *MD = dyn_cast<MemoryDef>(FirstDef)) 300 FirstDef = MD->getDefiningAccess(); 301 302 MSSA->renamePass(MD->getBlock(), FirstDef, Visited); 303 // We just inserted a phi into this block, so the incoming value will become 304 // the phi anyway, so it does not matter what we pass. 305 for (auto *MP : InsertedPHIs) 306 MSSA->renamePass(MP->getBlock(), nullptr, Visited); 307 } 308 } 309 310 void MemorySSAUpdater::fixupDefs(const SmallVectorImpl<MemoryAccess *> &Vars) { 311 SmallPtrSet<const BasicBlock *, 8> Seen; 312 SmallVector<const BasicBlock *, 16> Worklist; 313 for (auto *NewDef : Vars) { 314 // First, see if there is a local def after the operand. 315 auto *Defs = MSSA->getWritableBlockDefs(NewDef->getBlock()); 316 auto DefIter = NewDef->getDefsIterator(); 317 318 // If there is a local def after us, we only have to rename that. 319 if (++DefIter != Defs->end()) { 320 cast<MemoryDef>(DefIter)->setDefiningAccess(NewDef); 321 continue; 322 } 323 324 // Otherwise, we need to search down through the CFG. 325 // For each of our successors, handle it directly if their is a phi, or 326 // place on the fixup worklist. 327 for (const auto *S : successors(NewDef->getBlock())) { 328 if (auto *MP = MSSA->getMemoryAccess(S)) 329 setMemoryPhiValueForBlock(MP, NewDef->getBlock(), NewDef); 330 else 331 Worklist.push_back(S); 332 } 333 334 while (!Worklist.empty()) { 335 const BasicBlock *FixupBlock = Worklist.back(); 336 Worklist.pop_back(); 337 338 // Get the first def in the block that isn't a phi node. 339 if (auto *Defs = MSSA->getWritableBlockDefs(FixupBlock)) { 340 auto *FirstDef = &*Defs->begin(); 341 // The loop above and below should have taken care of phi nodes 342 assert(!isa<MemoryPhi>(FirstDef) && 343 "Should have already handled phi nodes!"); 344 // We are now this def's defining access, make sure we actually dominate 345 // it 346 assert(MSSA->dominates(NewDef, FirstDef) && 347 "Should have dominated the new access"); 348 349 // This may insert new phi nodes, because we are not guaranteed the 350 // block we are processing has a single pred, and depending where the 351 // store was inserted, it may require phi nodes below it. 352 cast<MemoryDef>(FirstDef)->setDefiningAccess(getPreviousDef(FirstDef)); 353 return; 354 } 355 // We didn't find a def, so we must continue. 356 for (const auto *S : successors(FixupBlock)) { 357 // If there is a phi node, handle it. 358 // Otherwise, put the block on the worklist 359 if (auto *MP = MSSA->getMemoryAccess(S)) 360 setMemoryPhiValueForBlock(MP, FixupBlock, NewDef); 361 else { 362 // If we cycle, we should have ended up at a phi node that we already 363 // processed. FIXME: Double check this 364 if (!Seen.insert(S).second) 365 continue; 366 Worklist.push_back(S); 367 } 368 } 369 } 370 } 371 } 372 373 // Move What before Where in the MemorySSA IR. 374 template <class WhereType> 375 void MemorySSAUpdater::moveTo(MemoryUseOrDef *What, BasicBlock *BB, 376 WhereType Where) { 377 // Replace all our users with our defining access. 378 What->replaceAllUsesWith(What->getDefiningAccess()); 379 380 // Let MemorySSA take care of moving it around in the lists. 381 MSSA->moveTo(What, BB, Where); 382 383 // Now reinsert it into the IR and do whatever fixups needed. 384 if (auto *MD = dyn_cast<MemoryDef>(What)) 385 insertDef(MD); 386 else 387 insertUse(cast<MemoryUse>(What)); 388 } 389 390 // Move What before Where in the MemorySSA IR. 391 void MemorySSAUpdater::moveBefore(MemoryUseOrDef *What, MemoryUseOrDef *Where) { 392 moveTo(What, Where->getBlock(), Where->getIterator()); 393 } 394 395 // Move What after Where in the MemorySSA IR. 396 void MemorySSAUpdater::moveAfter(MemoryUseOrDef *What, MemoryUseOrDef *Where) { 397 moveTo(What, Where->getBlock(), ++Where->getIterator()); 398 } 399 400 void MemorySSAUpdater::moveToPlace(MemoryUseOrDef *What, BasicBlock *BB, 401 MemorySSA::InsertionPlace Where) { 402 return moveTo(What, BB, Where); 403 } 404 405 /// \brief If all arguments of a MemoryPHI are defined by the same incoming 406 /// argument, return that argument. 407 static MemoryAccess *onlySingleValue(MemoryPhi *MP) { 408 MemoryAccess *MA = nullptr; 409 410 for (auto &Arg : MP->operands()) { 411 if (!MA) 412 MA = cast<MemoryAccess>(Arg); 413 else if (MA != Arg) 414 return nullptr; 415 } 416 return MA; 417 } 418 419 void MemorySSAUpdater::removeMemoryAccess(MemoryAccess *MA) { 420 assert(!MSSA->isLiveOnEntryDef(MA) && 421 "Trying to remove the live on entry def"); 422 // We can only delete phi nodes if they have no uses, or we can replace all 423 // uses with a single definition. 424 MemoryAccess *NewDefTarget = nullptr; 425 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(MA)) { 426 // Note that it is sufficient to know that all edges of the phi node have 427 // the same argument. If they do, by the definition of dominance frontiers 428 // (which we used to place this phi), that argument must dominate this phi, 429 // and thus, must dominate the phi's uses, and so we will not hit the assert 430 // below. 431 NewDefTarget = onlySingleValue(MP); 432 assert((NewDefTarget || MP->use_empty()) && 433 "We can't delete this memory phi"); 434 } else { 435 NewDefTarget = cast<MemoryUseOrDef>(MA)->getDefiningAccess(); 436 } 437 438 // Re-point the uses at our defining access 439 if (!isa<MemoryUse>(MA) && !MA->use_empty()) { 440 // Reset optimized on users of this store, and reset the uses. 441 // A few notes: 442 // 1. This is a slightly modified version of RAUW to avoid walking the 443 // uses twice here. 444 // 2. If we wanted to be complete, we would have to reset the optimized 445 // flags on users of phi nodes if doing the below makes a phi node have all 446 // the same arguments. Instead, we prefer users to removeMemoryAccess those 447 // phi nodes, because doing it here would be N^3. 448 if (MA->hasValueHandle()) 449 ValueHandleBase::ValueIsRAUWd(MA, NewDefTarget); 450 // Note: We assume MemorySSA is not used in metadata since it's not really 451 // part of the IR. 452 453 while (!MA->use_empty()) { 454 Use &U = *MA->use_begin(); 455 if (auto *MUD = dyn_cast<MemoryUseOrDef>(U.getUser())) 456 MUD->resetOptimized(); 457 U.set(NewDefTarget); 458 } 459 } 460 461 // The call below to erase will destroy MA, so we can't change the order we 462 // are doing things here 463 MSSA->removeFromLookups(MA); 464 MSSA->removeFromLists(MA); 465 } 466 467 MemoryAccess *MemorySSAUpdater::createMemoryAccessInBB( 468 Instruction *I, MemoryAccess *Definition, const BasicBlock *BB, 469 MemorySSA::InsertionPlace Point) { 470 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition); 471 MSSA->insertIntoListsForBlock(NewAccess, BB, Point); 472 return NewAccess; 473 } 474 475 MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessBefore( 476 Instruction *I, MemoryAccess *Definition, MemoryUseOrDef *InsertPt) { 477 assert(I->getParent() == InsertPt->getBlock() && 478 "New and old access must be in the same block"); 479 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition); 480 MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(), 481 InsertPt->getIterator()); 482 return NewAccess; 483 } 484 485 MemoryUseOrDef *MemorySSAUpdater::createMemoryAccessAfter( 486 Instruction *I, MemoryAccess *Definition, MemoryAccess *InsertPt) { 487 assert(I->getParent() == InsertPt->getBlock() && 488 "New and old access must be in the same block"); 489 MemoryUseOrDef *NewAccess = MSSA->createDefinedAccess(I, Definition); 490 MSSA->insertIntoListsBefore(NewAccess, InsertPt->getBlock(), 491 ++InsertPt->getIterator()); 492 return NewAccess; 493 } 494