1 //===- PromoteMemoryToRegister.cpp - Convert allocas to registers ---------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file promotes memory references to be register references. It promotes 11 // alloca instructions which only have loads and stores as uses. An alloca is 12 // transformed by using iterated dominator frontiers to place PHI nodes, then 13 // traversing the function in depth-first order to rewrite loads and stores as 14 // appropriate. 15 // 16 // The algorithm used here is based on: 17 // 18 // Sreedhar and Gao. A linear time algorithm for placing phi-nodes. 19 // In Proceedings of the 22nd ACM SIGPLAN-SIGACT Symposium on Principles of 20 // Programming Languages 21 // POPL '95. ACM, New York, NY, 62-73. 22 // 23 // It has been modified to not explicitly use the DJ graph data structure and to 24 // directly compute pruned SSA using per-variable liveness information. 25 // 26 //===----------------------------------------------------------------------===// 27 28 #define DEBUG_TYPE "mem2reg" 29 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 30 #include "llvm/Constants.h" 31 #include "llvm/DerivedTypes.h" 32 #include "llvm/Function.h" 33 #include "llvm/Instructions.h" 34 #include "llvm/IntrinsicInst.h" 35 #include "llvm/Metadata.h" 36 #include "llvm/Analysis/AliasSetTracker.h" 37 #include "llvm/Analysis/DebugInfo.h" 38 #include "llvm/Analysis/DIBuilder.h" 39 #include "llvm/Analysis/Dominators.h" 40 #include "llvm/Analysis/InstructionSimplify.h" 41 #include "llvm/ADT/DenseMap.h" 42 #include "llvm/ADT/SmallPtrSet.h" 43 #include "llvm/ADT/SmallVector.h" 44 #include "llvm/ADT/Statistic.h" 45 #include "llvm/ADT/STLExtras.h" 46 #include "llvm/Support/CFG.h" 47 #include <algorithm> 48 #include <map> 49 #include <queue> 50 using namespace llvm; 51 52 STATISTIC(NumLocalPromoted, "Number of alloca's promoted within one block"); 53 STATISTIC(NumSingleStore, "Number of alloca's promoted with a single store"); 54 STATISTIC(NumDeadAlloca, "Number of dead alloca's removed"); 55 STATISTIC(NumPHIInsert, "Number of PHI nodes inserted"); 56 57 namespace llvm { 58 template<> 59 struct DenseMapInfo<std::pair<BasicBlock*, unsigned> > { 60 typedef std::pair<BasicBlock*, unsigned> EltTy; 61 static inline EltTy getEmptyKey() { 62 return EltTy(reinterpret_cast<BasicBlock*>(-1), ~0U); 63 } 64 static inline EltTy getTombstoneKey() { 65 return EltTy(reinterpret_cast<BasicBlock*>(-2), 0U); 66 } 67 static unsigned getHashValue(const std::pair<BasicBlock*, unsigned> &Val) { 68 return DenseMapInfo<void*>::getHashValue(Val.first) + Val.second*2; 69 } 70 static bool isEqual(const EltTy &LHS, const EltTy &RHS) { 71 return LHS == RHS; 72 } 73 }; 74 } 75 76 /// isAllocaPromotable - Return true if this alloca is legal for promotion. 77 /// This is true if there are only loads and stores to the alloca. 78 /// 79 bool llvm::isAllocaPromotable(const AllocaInst *AI) { 80 // FIXME: If the memory unit is of pointer or integer type, we can permit 81 // assignments to subsections of the memory unit. 82 83 // Only allow direct and non-volatile loads and stores... 84 for (Value::const_use_iterator UI = AI->use_begin(), UE = AI->use_end(); 85 UI != UE; ++UI) { // Loop over all of the uses of the alloca 86 const User *U = *UI; 87 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) { 88 if (LI->isVolatile()) 89 return false; 90 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) { 91 if (SI->getOperand(0) == AI) 92 return false; // Don't allow a store OF the AI, only INTO the AI. 93 if (SI->isVolatile()) 94 return false; 95 } else { 96 return false; 97 } 98 } 99 100 return true; 101 } 102 103 /// FindAllocaDbgDeclare - Finds the llvm.dbg.declare intrinsic describing the 104 /// alloca 'V', if any. 105 static DbgDeclareInst *FindAllocaDbgDeclare(Value *V) { 106 if (MDNode *DebugNode = MDNode::getIfExists(V->getContext(), &V, 1)) 107 for (Value::use_iterator UI = DebugNode->use_begin(), 108 E = DebugNode->use_end(); UI != E; ++UI) 109 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(*UI)) 110 return DDI; 111 112 return 0; 113 } 114 115 namespace { 116 struct AllocaInfo; 117 118 // Data package used by RenamePass() 119 class RenamePassData { 120 public: 121 typedef std::vector<Value *> ValVector; 122 123 RenamePassData() : BB(NULL), Pred(NULL), Values() {} 124 RenamePassData(BasicBlock *B, BasicBlock *P, 125 const ValVector &V) : BB(B), Pred(P), Values(V) {} 126 BasicBlock *BB; 127 BasicBlock *Pred; 128 ValVector Values; 129 130 void swap(RenamePassData &RHS) { 131 std::swap(BB, RHS.BB); 132 std::swap(Pred, RHS.Pred); 133 Values.swap(RHS.Values); 134 } 135 }; 136 137 /// LargeBlockInfo - This assigns and keeps a per-bb relative ordering of 138 /// load/store instructions in the block that directly load or store an alloca. 139 /// 140 /// This functionality is important because it avoids scanning large basic 141 /// blocks multiple times when promoting many allocas in the same block. 142 class LargeBlockInfo { 143 /// InstNumbers - For each instruction that we track, keep the index of the 144 /// instruction. The index starts out as the number of the instruction from 145 /// the start of the block. 146 DenseMap<const Instruction *, unsigned> InstNumbers; 147 public: 148 149 /// isInterestingInstruction - This code only looks at accesses to allocas. 150 static bool isInterestingInstruction(const Instruction *I) { 151 return (isa<LoadInst>(I) && isa<AllocaInst>(I->getOperand(0))) || 152 (isa<StoreInst>(I) && isa<AllocaInst>(I->getOperand(1))); 153 } 154 155 /// getInstructionIndex - Get or calculate the index of the specified 156 /// instruction. 157 unsigned getInstructionIndex(const Instruction *I) { 158 assert(isInterestingInstruction(I) && 159 "Not a load/store to/from an alloca?"); 160 161 // If we already have this instruction number, return it. 162 DenseMap<const Instruction *, unsigned>::iterator It = InstNumbers.find(I); 163 if (It != InstNumbers.end()) return It->second; 164 165 // Scan the whole block to get the instruction. This accumulates 166 // information for every interesting instruction in the block, in order to 167 // avoid gratuitus rescans. 168 const BasicBlock *BB = I->getParent(); 169 unsigned InstNo = 0; 170 for (BasicBlock::const_iterator BBI = BB->begin(), E = BB->end(); 171 BBI != E; ++BBI) 172 if (isInterestingInstruction(BBI)) 173 InstNumbers[BBI] = InstNo++; 174 It = InstNumbers.find(I); 175 176 assert(It != InstNumbers.end() && "Didn't insert instruction?"); 177 return It->second; 178 } 179 180 void deleteValue(const Instruction *I) { 181 InstNumbers.erase(I); 182 } 183 184 void clear() { 185 InstNumbers.clear(); 186 } 187 }; 188 189 struct PromoteMem2Reg { 190 /// Allocas - The alloca instructions being promoted. 191 /// 192 std::vector<AllocaInst*> Allocas; 193 DominatorTree &DT; 194 DIBuilder *DIB; 195 196 /// AST - An AliasSetTracker object to update. If null, don't update it. 197 /// 198 AliasSetTracker *AST; 199 200 /// AllocaLookup - Reverse mapping of Allocas. 201 /// 202 DenseMap<AllocaInst*, unsigned> AllocaLookup; 203 204 /// NewPhiNodes - The PhiNodes we're adding. 205 /// 206 DenseMap<std::pair<BasicBlock*, unsigned>, PHINode*> NewPhiNodes; 207 208 /// PhiToAllocaMap - For each PHI node, keep track of which entry in Allocas 209 /// it corresponds to. 210 DenseMap<PHINode*, unsigned> PhiToAllocaMap; 211 212 /// PointerAllocaValues - If we are updating an AliasSetTracker, then for 213 /// each alloca that is of pointer type, we keep track of what to copyValue 214 /// to the inserted PHI nodes here. 215 /// 216 std::vector<Value*> PointerAllocaValues; 217 218 /// AllocaDbgDeclares - For each alloca, we keep track of the dbg.declare 219 /// intrinsic that describes it, if any, so that we can convert it to a 220 /// dbg.value intrinsic if the alloca gets promoted. 221 SmallVector<DbgDeclareInst*, 8> AllocaDbgDeclares; 222 223 /// Visited - The set of basic blocks the renamer has already visited. 224 /// 225 SmallPtrSet<BasicBlock*, 16> Visited; 226 227 /// BBNumbers - Contains a stable numbering of basic blocks to avoid 228 /// non-determinstic behavior. 229 DenseMap<BasicBlock*, unsigned> BBNumbers; 230 231 /// DomLevels - Maps DomTreeNodes to their level in the dominator tree. 232 DenseMap<DomTreeNode*, unsigned> DomLevels; 233 234 /// BBNumPreds - Lazily compute the number of predecessors a block has. 235 DenseMap<const BasicBlock*, unsigned> BBNumPreds; 236 public: 237 PromoteMem2Reg(const std::vector<AllocaInst*> &A, DominatorTree &dt, 238 AliasSetTracker *ast) 239 : Allocas(A), DT(dt), DIB(0), AST(ast) {} 240 ~PromoteMem2Reg() { 241 delete DIB; 242 } 243 244 void run(); 245 246 /// dominates - Return true if BB1 dominates BB2 using the DominatorTree. 247 /// 248 bool dominates(BasicBlock *BB1, BasicBlock *BB2) const { 249 return DT.dominates(BB1, BB2); 250 } 251 252 private: 253 void RemoveFromAllocasList(unsigned &AllocaIdx) { 254 Allocas[AllocaIdx] = Allocas.back(); 255 Allocas.pop_back(); 256 --AllocaIdx; 257 } 258 259 unsigned getNumPreds(const BasicBlock *BB) { 260 unsigned &NP = BBNumPreds[BB]; 261 if (NP == 0) 262 NP = std::distance(pred_begin(BB), pred_end(BB))+1; 263 return NP-1; 264 } 265 266 void DetermineInsertionPoint(AllocaInst *AI, unsigned AllocaNum, 267 AllocaInfo &Info); 268 void ComputeLiveInBlocks(AllocaInst *AI, AllocaInfo &Info, 269 const SmallPtrSet<BasicBlock*, 32> &DefBlocks, 270 SmallPtrSet<BasicBlock*, 32> &LiveInBlocks); 271 272 void RewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info, 273 LargeBlockInfo &LBI); 274 void PromoteSingleBlockAlloca(AllocaInst *AI, AllocaInfo &Info, 275 LargeBlockInfo &LBI); 276 void ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI, StoreInst *SI); 277 278 279 void RenamePass(BasicBlock *BB, BasicBlock *Pred, 280 RenamePassData::ValVector &IncVals, 281 std::vector<RenamePassData> &Worklist); 282 bool QueuePhiNode(BasicBlock *BB, unsigned AllocaIdx, unsigned &Version); 283 }; 284 285 struct AllocaInfo { 286 SmallVector<BasicBlock*, 32> DefiningBlocks; 287 SmallVector<BasicBlock*, 32> UsingBlocks; 288 289 StoreInst *OnlyStore; 290 BasicBlock *OnlyBlock; 291 bool OnlyUsedInOneBlock; 292 293 Value *AllocaPointerVal; 294 DbgDeclareInst *DbgDeclare; 295 296 void clear() { 297 DefiningBlocks.clear(); 298 UsingBlocks.clear(); 299 OnlyStore = 0; 300 OnlyBlock = 0; 301 OnlyUsedInOneBlock = true; 302 AllocaPointerVal = 0; 303 DbgDeclare = 0; 304 } 305 306 /// AnalyzeAlloca - Scan the uses of the specified alloca, filling in our 307 /// ivars. 308 void AnalyzeAlloca(AllocaInst *AI) { 309 clear(); 310 311 // As we scan the uses of the alloca instruction, keep track of stores, 312 // and decide whether all of the loads and stores to the alloca are within 313 // the same basic block. 314 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); 315 UI != E;) { 316 Instruction *User = cast<Instruction>(*UI++); 317 318 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 319 // Remember the basic blocks which define new values for the alloca 320 DefiningBlocks.push_back(SI->getParent()); 321 AllocaPointerVal = SI->getOperand(0); 322 OnlyStore = SI; 323 } else { 324 LoadInst *LI = cast<LoadInst>(User); 325 // Otherwise it must be a load instruction, keep track of variable 326 // reads. 327 UsingBlocks.push_back(LI->getParent()); 328 AllocaPointerVal = LI; 329 } 330 331 if (OnlyUsedInOneBlock) { 332 if (OnlyBlock == 0) 333 OnlyBlock = User->getParent(); 334 else if (OnlyBlock != User->getParent()) 335 OnlyUsedInOneBlock = false; 336 } 337 } 338 339 DbgDeclare = FindAllocaDbgDeclare(AI); 340 } 341 }; 342 343 typedef std::pair<DomTreeNode*, unsigned> DomTreeNodePair; 344 345 struct DomTreeNodeCompare { 346 bool operator()(const DomTreeNodePair &LHS, const DomTreeNodePair &RHS) { 347 return LHS.second < RHS.second; 348 } 349 }; 350 } // end of anonymous namespace 351 352 353 void PromoteMem2Reg::run() { 354 Function &F = *DT.getRoot()->getParent(); 355 356 if (AST) PointerAllocaValues.resize(Allocas.size()); 357 AllocaDbgDeclares.resize(Allocas.size()); 358 359 AllocaInfo Info; 360 LargeBlockInfo LBI; 361 362 for (unsigned AllocaNum = 0; AllocaNum != Allocas.size(); ++AllocaNum) { 363 AllocaInst *AI = Allocas[AllocaNum]; 364 365 assert(isAllocaPromotable(AI) && 366 "Cannot promote non-promotable alloca!"); 367 assert(AI->getParent()->getParent() == &F && 368 "All allocas should be in the same function, which is same as DF!"); 369 370 if (AI->use_empty()) { 371 // If there are no uses of the alloca, just delete it now. 372 if (AST) AST->deleteValue(AI); 373 AI->eraseFromParent(); 374 375 // Remove the alloca from the Allocas list, since it has been processed 376 RemoveFromAllocasList(AllocaNum); 377 ++NumDeadAlloca; 378 continue; 379 } 380 381 // Calculate the set of read and write-locations for each alloca. This is 382 // analogous to finding the 'uses' and 'definitions' of each variable. 383 Info.AnalyzeAlloca(AI); 384 385 // If there is only a single store to this value, replace any loads of 386 // it that are directly dominated by the definition with the value stored. 387 if (Info.DefiningBlocks.size() == 1) { 388 RewriteSingleStoreAlloca(AI, Info, LBI); 389 390 // Finally, after the scan, check to see if the store is all that is left. 391 if (Info.UsingBlocks.empty()) { 392 // Record debuginfo for the store and remove the declaration's debuginfo. 393 if (DbgDeclareInst *DDI = Info.DbgDeclare) { 394 ConvertDebugDeclareToDebugValue(DDI, Info.OnlyStore); 395 DDI->eraseFromParent(); 396 } 397 // Remove the (now dead) store and alloca. 398 Info.OnlyStore->eraseFromParent(); 399 LBI.deleteValue(Info.OnlyStore); 400 401 if (AST) AST->deleteValue(AI); 402 AI->eraseFromParent(); 403 LBI.deleteValue(AI); 404 405 // The alloca has been processed, move on. 406 RemoveFromAllocasList(AllocaNum); 407 408 ++NumSingleStore; 409 continue; 410 } 411 } 412 413 // If the alloca is only read and written in one basic block, just perform a 414 // linear sweep over the block to eliminate it. 415 if (Info.OnlyUsedInOneBlock) { 416 PromoteSingleBlockAlloca(AI, Info, LBI); 417 418 // Finally, after the scan, check to see if the stores are all that is 419 // left. 420 if (Info.UsingBlocks.empty()) { 421 422 // Remove the (now dead) stores and alloca. 423 while (!AI->use_empty()) { 424 StoreInst *SI = cast<StoreInst>(AI->use_back()); 425 // Record debuginfo for the store before removing it. 426 if (DbgDeclareInst *DDI = Info.DbgDeclare) 427 ConvertDebugDeclareToDebugValue(DDI, SI); 428 SI->eraseFromParent(); 429 LBI.deleteValue(SI); 430 } 431 432 if (AST) AST->deleteValue(AI); 433 AI->eraseFromParent(); 434 LBI.deleteValue(AI); 435 436 // The alloca has been processed, move on. 437 RemoveFromAllocasList(AllocaNum); 438 439 // The alloca's debuginfo can be removed as well. 440 if (DbgDeclareInst *DDI = Info.DbgDeclare) 441 DDI->eraseFromParent(); 442 443 ++NumLocalPromoted; 444 continue; 445 } 446 } 447 448 // If we haven't computed dominator tree levels, do so now. 449 if (DomLevels.empty()) { 450 SmallVector<DomTreeNode*, 32> Worklist; 451 452 DomTreeNode *Root = DT.getRootNode(); 453 DomLevels[Root] = 0; 454 Worklist.push_back(Root); 455 456 while (!Worklist.empty()) { 457 DomTreeNode *Node = Worklist.pop_back_val(); 458 unsigned ChildLevel = DomLevels[Node] + 1; 459 for (DomTreeNode::iterator CI = Node->begin(), CE = Node->end(); 460 CI != CE; ++CI) { 461 DomLevels[*CI] = ChildLevel; 462 Worklist.push_back(*CI); 463 } 464 } 465 } 466 467 // If we haven't computed a numbering for the BB's in the function, do so 468 // now. 469 if (BBNumbers.empty()) { 470 unsigned ID = 0; 471 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) 472 BBNumbers[I] = ID++; 473 } 474 475 // If we have an AST to keep updated, remember some pointer value that is 476 // stored into the alloca. 477 if (AST) 478 PointerAllocaValues[AllocaNum] = Info.AllocaPointerVal; 479 480 // Remember the dbg.declare intrinsic describing this alloca, if any. 481 if (Info.DbgDeclare) AllocaDbgDeclares[AllocaNum] = Info.DbgDeclare; 482 483 // Keep the reverse mapping of the 'Allocas' array for the rename pass. 484 AllocaLookup[Allocas[AllocaNum]] = AllocaNum; 485 486 // At this point, we're committed to promoting the alloca using IDF's, and 487 // the standard SSA construction algorithm. Determine which blocks need PHI 488 // nodes and see if we can optimize out some work by avoiding insertion of 489 // dead phi nodes. 490 DetermineInsertionPoint(AI, AllocaNum, Info); 491 } 492 493 if (Allocas.empty()) 494 return; // All of the allocas must have been trivial! 495 496 LBI.clear(); 497 498 499 // Set the incoming values for the basic block to be null values for all of 500 // the alloca's. We do this in case there is a load of a value that has not 501 // been stored yet. In this case, it will get this null value. 502 // 503 RenamePassData::ValVector Values(Allocas.size()); 504 for (unsigned i = 0, e = Allocas.size(); i != e; ++i) 505 Values[i] = UndefValue::get(Allocas[i]->getAllocatedType()); 506 507 // Walks all basic blocks in the function performing the SSA rename algorithm 508 // and inserting the phi nodes we marked as necessary 509 // 510 std::vector<RenamePassData> RenamePassWorkList; 511 RenamePassWorkList.push_back(RenamePassData(F.begin(), 0, Values)); 512 do { 513 RenamePassData RPD; 514 RPD.swap(RenamePassWorkList.back()); 515 RenamePassWorkList.pop_back(); 516 // RenamePass may add new worklist entries. 517 RenamePass(RPD.BB, RPD.Pred, RPD.Values, RenamePassWorkList); 518 } while (!RenamePassWorkList.empty()); 519 520 // The renamer uses the Visited set to avoid infinite loops. Clear it now. 521 Visited.clear(); 522 523 // Remove the allocas themselves from the function. 524 for (unsigned i = 0, e = Allocas.size(); i != e; ++i) { 525 Instruction *A = Allocas[i]; 526 527 // If there are any uses of the alloca instructions left, they must be in 528 // unreachable basic blocks that were not processed by walking the dominator 529 // tree. Just delete the users now. 530 if (!A->use_empty()) 531 A->replaceAllUsesWith(UndefValue::get(A->getType())); 532 if (AST) AST->deleteValue(A); 533 A->eraseFromParent(); 534 } 535 536 // Remove alloca's dbg.declare instrinsics from the function. 537 for (unsigned i = 0, e = AllocaDbgDeclares.size(); i != e; ++i) 538 if (DbgDeclareInst *DDI = AllocaDbgDeclares[i]) 539 DDI->eraseFromParent(); 540 541 // Loop over all of the PHI nodes and see if there are any that we can get 542 // rid of because they merge all of the same incoming values. This can 543 // happen due to undef values coming into the PHI nodes. This process is 544 // iterative, because eliminating one PHI node can cause others to be removed. 545 bool EliminatedAPHI = true; 546 while (EliminatedAPHI) { 547 EliminatedAPHI = false; 548 549 for (DenseMap<std::pair<BasicBlock*, unsigned>, PHINode*>::iterator I = 550 NewPhiNodes.begin(), E = NewPhiNodes.end(); I != E;) { 551 PHINode *PN = I->second; 552 553 // If this PHI node merges one value and/or undefs, get the value. 554 if (Value *V = SimplifyInstruction(PN, 0, &DT)) { 555 if (AST && PN->getType()->isPointerTy()) 556 AST->deleteValue(PN); 557 PN->replaceAllUsesWith(V); 558 PN->eraseFromParent(); 559 NewPhiNodes.erase(I++); 560 EliminatedAPHI = true; 561 continue; 562 } 563 ++I; 564 } 565 } 566 567 // At this point, the renamer has added entries to PHI nodes for all reachable 568 // code. Unfortunately, there may be unreachable blocks which the renamer 569 // hasn't traversed. If this is the case, the PHI nodes may not 570 // have incoming values for all predecessors. Loop over all PHI nodes we have 571 // created, inserting undef values if they are missing any incoming values. 572 // 573 for (DenseMap<std::pair<BasicBlock*, unsigned>, PHINode*>::iterator I = 574 NewPhiNodes.begin(), E = NewPhiNodes.end(); I != E; ++I) { 575 // We want to do this once per basic block. As such, only process a block 576 // when we find the PHI that is the first entry in the block. 577 PHINode *SomePHI = I->second; 578 BasicBlock *BB = SomePHI->getParent(); 579 if (&BB->front() != SomePHI) 580 continue; 581 582 // Only do work here if there the PHI nodes are missing incoming values. We 583 // know that all PHI nodes that were inserted in a block will have the same 584 // number of incoming values, so we can just check any of them. 585 if (SomePHI->getNumIncomingValues() == getNumPreds(BB)) 586 continue; 587 588 // Get the preds for BB. 589 SmallVector<BasicBlock*, 16> Preds(pred_begin(BB), pred_end(BB)); 590 591 // Ok, now we know that all of the PHI nodes are missing entries for some 592 // basic blocks. Start by sorting the incoming predecessors for efficient 593 // access. 594 std::sort(Preds.begin(), Preds.end()); 595 596 // Now we loop through all BB's which have entries in SomePHI and remove 597 // them from the Preds list. 598 for (unsigned i = 0, e = SomePHI->getNumIncomingValues(); i != e; ++i) { 599 // Do a log(n) search of the Preds list for the entry we want. 600 SmallVector<BasicBlock*, 16>::iterator EntIt = 601 std::lower_bound(Preds.begin(), Preds.end(), 602 SomePHI->getIncomingBlock(i)); 603 assert(EntIt != Preds.end() && *EntIt == SomePHI->getIncomingBlock(i)&& 604 "PHI node has entry for a block which is not a predecessor!"); 605 606 // Remove the entry 607 Preds.erase(EntIt); 608 } 609 610 // At this point, the blocks left in the preds list must have dummy 611 // entries inserted into every PHI nodes for the block. Update all the phi 612 // nodes in this block that we are inserting (there could be phis before 613 // mem2reg runs). 614 unsigned NumBadPreds = SomePHI->getNumIncomingValues(); 615 BasicBlock::iterator BBI = BB->begin(); 616 while ((SomePHI = dyn_cast<PHINode>(BBI++)) && 617 SomePHI->getNumIncomingValues() == NumBadPreds) { 618 Value *UndefVal = UndefValue::get(SomePHI->getType()); 619 for (unsigned pred = 0, e = Preds.size(); pred != e; ++pred) 620 SomePHI->addIncoming(UndefVal, Preds[pred]); 621 } 622 } 623 624 NewPhiNodes.clear(); 625 } 626 627 628 /// ComputeLiveInBlocks - Determine which blocks the value is live in. These 629 /// are blocks which lead to uses. Knowing this allows us to avoid inserting 630 /// PHI nodes into blocks which don't lead to uses (thus, the inserted phi nodes 631 /// would be dead). 632 void PromoteMem2Reg:: 633 ComputeLiveInBlocks(AllocaInst *AI, AllocaInfo &Info, 634 const SmallPtrSet<BasicBlock*, 32> &DefBlocks, 635 SmallPtrSet<BasicBlock*, 32> &LiveInBlocks) { 636 637 // To determine liveness, we must iterate through the predecessors of blocks 638 // where the def is live. Blocks are added to the worklist if we need to 639 // check their predecessors. Start with all the using blocks. 640 SmallVector<BasicBlock*, 64> LiveInBlockWorklist(Info.UsingBlocks.begin(), 641 Info.UsingBlocks.end()); 642 643 // If any of the using blocks is also a definition block, check to see if the 644 // definition occurs before or after the use. If it happens before the use, 645 // the value isn't really live-in. 646 for (unsigned i = 0, e = LiveInBlockWorklist.size(); i != e; ++i) { 647 BasicBlock *BB = LiveInBlockWorklist[i]; 648 if (!DefBlocks.count(BB)) continue; 649 650 // Okay, this is a block that both uses and defines the value. If the first 651 // reference to the alloca is a def (store), then we know it isn't live-in. 652 for (BasicBlock::iterator I = BB->begin(); ; ++I) { 653 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 654 if (SI->getOperand(1) != AI) continue; 655 656 // We found a store to the alloca before a load. The alloca is not 657 // actually live-in here. 658 LiveInBlockWorklist[i] = LiveInBlockWorklist.back(); 659 LiveInBlockWorklist.pop_back(); 660 --i, --e; 661 break; 662 } 663 664 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 665 if (LI->getOperand(0) != AI) continue; 666 667 // Okay, we found a load before a store to the alloca. It is actually 668 // live into this block. 669 break; 670 } 671 } 672 } 673 674 // Now that we have a set of blocks where the phi is live-in, recursively add 675 // their predecessors until we find the full region the value is live. 676 while (!LiveInBlockWorklist.empty()) { 677 BasicBlock *BB = LiveInBlockWorklist.pop_back_val(); 678 679 // The block really is live in here, insert it into the set. If already in 680 // the set, then it has already been processed. 681 if (!LiveInBlocks.insert(BB)) 682 continue; 683 684 // Since the value is live into BB, it is either defined in a predecessor or 685 // live into it to. Add the preds to the worklist unless they are a 686 // defining block. 687 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) { 688 BasicBlock *P = *PI; 689 690 // The value is not live into a predecessor if it defines the value. 691 if (DefBlocks.count(P)) 692 continue; 693 694 // Otherwise it is, add to the worklist. 695 LiveInBlockWorklist.push_back(P); 696 } 697 } 698 } 699 700 /// DetermineInsertionPoint - At this point, we're committed to promoting the 701 /// alloca using IDF's, and the standard SSA construction algorithm. Determine 702 /// which blocks need phi nodes and see if we can optimize out some work by 703 /// avoiding insertion of dead phi nodes. 704 void PromoteMem2Reg::DetermineInsertionPoint(AllocaInst *AI, unsigned AllocaNum, 705 AllocaInfo &Info) { 706 // Unique the set of defining blocks for efficient lookup. 707 SmallPtrSet<BasicBlock*, 32> DefBlocks; 708 DefBlocks.insert(Info.DefiningBlocks.begin(), Info.DefiningBlocks.end()); 709 710 // Determine which blocks the value is live in. These are blocks which lead 711 // to uses. 712 SmallPtrSet<BasicBlock*, 32> LiveInBlocks; 713 ComputeLiveInBlocks(AI, Info, DefBlocks, LiveInBlocks); 714 715 // Use a priority queue keyed on dominator tree level so that inserted nodes 716 // are handled from the bottom of the dominator tree upwards. 717 typedef std::priority_queue<DomTreeNodePair, SmallVector<DomTreeNodePair, 32>, 718 DomTreeNodeCompare> IDFPriorityQueue; 719 IDFPriorityQueue PQ; 720 721 for (SmallPtrSet<BasicBlock*, 32>::const_iterator I = DefBlocks.begin(), 722 E = DefBlocks.end(); I != E; ++I) { 723 if (DomTreeNode *Node = DT.getNode(*I)) 724 PQ.push(std::make_pair(Node, DomLevels[Node])); 725 } 726 727 SmallVector<std::pair<unsigned, BasicBlock*>, 32> DFBlocks; 728 SmallPtrSet<DomTreeNode*, 32> Visited; 729 SmallVector<DomTreeNode*, 32> Worklist; 730 while (!PQ.empty()) { 731 DomTreeNodePair RootPair = PQ.top(); 732 PQ.pop(); 733 DomTreeNode *Root = RootPair.first; 734 unsigned RootLevel = RootPair.second; 735 736 // Walk all dominator tree children of Root, inspecting their CFG edges with 737 // targets elsewhere on the dominator tree. Only targets whose level is at 738 // most Root's level are added to the iterated dominance frontier of the 739 // definition set. 740 741 Worklist.clear(); 742 Worklist.push_back(Root); 743 744 while (!Worklist.empty()) { 745 DomTreeNode *Node = Worklist.pop_back_val(); 746 BasicBlock *BB = Node->getBlock(); 747 748 for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; 749 ++SI) { 750 DomTreeNode *SuccNode = DT.getNode(*SI); 751 752 // Quickly skip all CFG edges that are also dominator tree edges instead 753 // of catching them below. 754 if (SuccNode->getIDom() == Node) 755 continue; 756 757 unsigned SuccLevel = DomLevels[SuccNode]; 758 if (SuccLevel > RootLevel) 759 continue; 760 761 if (!Visited.insert(SuccNode)) 762 continue; 763 764 BasicBlock *SuccBB = SuccNode->getBlock(); 765 if (!LiveInBlocks.count(SuccBB)) 766 continue; 767 768 DFBlocks.push_back(std::make_pair(BBNumbers[SuccBB], SuccBB)); 769 if (!DefBlocks.count(SuccBB)) 770 PQ.push(std::make_pair(SuccNode, SuccLevel)); 771 } 772 773 for (DomTreeNode::iterator CI = Node->begin(), CE = Node->end(); CI != CE; 774 ++CI) { 775 if (!Visited.count(*CI)) 776 Worklist.push_back(*CI); 777 } 778 } 779 } 780 781 if (DFBlocks.size() > 1) 782 std::sort(DFBlocks.begin(), DFBlocks.end()); 783 784 unsigned CurrentVersion = 0; 785 for (unsigned i = 0, e = DFBlocks.size(); i != e; ++i) 786 QueuePhiNode(DFBlocks[i].second, AllocaNum, CurrentVersion); 787 } 788 789 /// RewriteSingleStoreAlloca - If there is only a single store to this value, 790 /// replace any loads of it that are directly dominated by the definition with 791 /// the value stored. 792 void PromoteMem2Reg::RewriteSingleStoreAlloca(AllocaInst *AI, 793 AllocaInfo &Info, 794 LargeBlockInfo &LBI) { 795 StoreInst *OnlyStore = Info.OnlyStore; 796 bool StoringGlobalVal = !isa<Instruction>(OnlyStore->getOperand(0)); 797 BasicBlock *StoreBB = OnlyStore->getParent(); 798 int StoreIndex = -1; 799 800 // Clear out UsingBlocks. We will reconstruct it here if needed. 801 Info.UsingBlocks.clear(); 802 803 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); UI != E; ) { 804 Instruction *UserInst = cast<Instruction>(*UI++); 805 if (!isa<LoadInst>(UserInst)) { 806 assert(UserInst == OnlyStore && "Should only have load/stores"); 807 continue; 808 } 809 LoadInst *LI = cast<LoadInst>(UserInst); 810 811 // Okay, if we have a load from the alloca, we want to replace it with the 812 // only value stored to the alloca. We can do this if the value is 813 // dominated by the store. If not, we use the rest of the mem2reg machinery 814 // to insert the phi nodes as needed. 815 if (!StoringGlobalVal) { // Non-instructions are always dominated. 816 if (LI->getParent() == StoreBB) { 817 // If we have a use that is in the same block as the store, compare the 818 // indices of the two instructions to see which one came first. If the 819 // load came before the store, we can't handle it. 820 if (StoreIndex == -1) 821 StoreIndex = LBI.getInstructionIndex(OnlyStore); 822 823 if (unsigned(StoreIndex) > LBI.getInstructionIndex(LI)) { 824 // Can't handle this load, bail out. 825 Info.UsingBlocks.push_back(StoreBB); 826 continue; 827 } 828 829 } else if (LI->getParent() != StoreBB && 830 !dominates(StoreBB, LI->getParent())) { 831 // If the load and store are in different blocks, use BB dominance to 832 // check their relationships. If the store doesn't dom the use, bail 833 // out. 834 Info.UsingBlocks.push_back(LI->getParent()); 835 continue; 836 } 837 } 838 839 // Otherwise, we *can* safely rewrite this load. 840 Value *ReplVal = OnlyStore->getOperand(0); 841 // If the replacement value is the load, this must occur in unreachable 842 // code. 843 if (ReplVal == LI) 844 ReplVal = UndefValue::get(LI->getType()); 845 LI->replaceAllUsesWith(ReplVal); 846 if (AST && LI->getType()->isPointerTy()) 847 AST->deleteValue(LI); 848 LI->eraseFromParent(); 849 LBI.deleteValue(LI); 850 } 851 } 852 853 namespace { 854 855 /// StoreIndexSearchPredicate - This is a helper predicate used to search by the 856 /// first element of a pair. 857 struct StoreIndexSearchPredicate { 858 bool operator()(const std::pair<unsigned, StoreInst*> &LHS, 859 const std::pair<unsigned, StoreInst*> &RHS) { 860 return LHS.first < RHS.first; 861 } 862 }; 863 864 } 865 866 /// PromoteSingleBlockAlloca - Many allocas are only used within a single basic 867 /// block. If this is the case, avoid traversing the CFG and inserting a lot of 868 /// potentially useless PHI nodes by just performing a single linear pass over 869 /// the basic block using the Alloca. 870 /// 871 /// If we cannot promote this alloca (because it is read before it is written), 872 /// return true. This is necessary in cases where, due to control flow, the 873 /// alloca is potentially undefined on some control flow paths. e.g. code like 874 /// this is potentially correct: 875 /// 876 /// for (...) { if (c) { A = undef; undef = B; } } 877 /// 878 /// ... so long as A is not used before undef is set. 879 /// 880 void PromoteMem2Reg::PromoteSingleBlockAlloca(AllocaInst *AI, AllocaInfo &Info, 881 LargeBlockInfo &LBI) { 882 // The trickiest case to handle is when we have large blocks. Because of this, 883 // this code is optimized assuming that large blocks happen. This does not 884 // significantly pessimize the small block case. This uses LargeBlockInfo to 885 // make it efficient to get the index of various operations in the block. 886 887 // Clear out UsingBlocks. We will reconstruct it here if needed. 888 Info.UsingBlocks.clear(); 889 890 // Walk the use-def list of the alloca, getting the locations of all stores. 891 typedef SmallVector<std::pair<unsigned, StoreInst*>, 64> StoresByIndexTy; 892 StoresByIndexTy StoresByIndex; 893 894 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); 895 UI != E; ++UI) 896 if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) 897 StoresByIndex.push_back(std::make_pair(LBI.getInstructionIndex(SI), SI)); 898 899 // If there are no stores to the alloca, just replace any loads with undef. 900 if (StoresByIndex.empty()) { 901 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); UI != E;) 902 if (LoadInst *LI = dyn_cast<LoadInst>(*UI++)) { 903 LI->replaceAllUsesWith(UndefValue::get(LI->getType())); 904 if (AST && LI->getType()->isPointerTy()) 905 AST->deleteValue(LI); 906 LBI.deleteValue(LI); 907 LI->eraseFromParent(); 908 } 909 return; 910 } 911 912 // Sort the stores by their index, making it efficient to do a lookup with a 913 // binary search. 914 std::sort(StoresByIndex.begin(), StoresByIndex.end()); 915 916 // Walk all of the loads from this alloca, replacing them with the nearest 917 // store above them, if any. 918 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); UI != E;) { 919 LoadInst *LI = dyn_cast<LoadInst>(*UI++); 920 if (!LI) continue; 921 922 unsigned LoadIdx = LBI.getInstructionIndex(LI); 923 924 // Find the nearest store that has a lower than this load. 925 StoresByIndexTy::iterator I = 926 std::lower_bound(StoresByIndex.begin(), StoresByIndex.end(), 927 std::pair<unsigned, StoreInst*>(LoadIdx, static_cast<StoreInst*>(0)), 928 StoreIndexSearchPredicate()); 929 930 // If there is no store before this load, then we can't promote this load. 931 if (I == StoresByIndex.begin()) { 932 // Can't handle this load, bail out. 933 Info.UsingBlocks.push_back(LI->getParent()); 934 continue; 935 } 936 937 // Otherwise, there was a store before this load, the load takes its value. 938 --I; 939 LI->replaceAllUsesWith(I->second->getOperand(0)); 940 if (AST && LI->getType()->isPointerTy()) 941 AST->deleteValue(LI); 942 LI->eraseFromParent(); 943 LBI.deleteValue(LI); 944 } 945 } 946 947 // Inserts a llvm.dbg.value instrinsic before the stores to an alloca'd value 948 // that has an associated llvm.dbg.decl intrinsic. 949 void PromoteMem2Reg::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI, 950 StoreInst *SI) { 951 DIVariable DIVar(DDI->getVariable()); 952 if (!DIVar.Verify()) 953 return; 954 955 if (!DIB) 956 DIB = new DIBuilder(*SI->getParent()->getParent()->getParent()); 957 Instruction *DbgVal = DIB->insertDbgValueIntrinsic(SI->getOperand(0), 0, 958 DIVar, SI); 959 960 // Propagate any debug metadata from the store onto the dbg.value. 961 DebugLoc SIDL = SI->getDebugLoc(); 962 if (!SIDL.isUnknown()) 963 DbgVal->setDebugLoc(SIDL); 964 // Otherwise propagate debug metadata from dbg.declare. 965 else 966 DbgVal->setDebugLoc(DDI->getDebugLoc()); 967 } 968 969 // QueuePhiNode - queues a phi-node to be added to a basic-block for a specific 970 // Alloca returns true if there wasn't already a phi-node for that variable 971 // 972 bool PromoteMem2Reg::QueuePhiNode(BasicBlock *BB, unsigned AllocaNo, 973 unsigned &Version) { 974 // Look up the basic-block in question. 975 PHINode *&PN = NewPhiNodes[std::make_pair(BB, AllocaNo)]; 976 977 // If the BB already has a phi node added for the i'th alloca then we're done! 978 if (PN) return false; 979 980 // Create a PhiNode using the dereferenced type... and add the phi-node to the 981 // BasicBlock. 982 PN = PHINode::Create(Allocas[AllocaNo]->getAllocatedType(), 983 Allocas[AllocaNo]->getName() + "." + Twine(Version++), 984 BB->begin()); 985 ++NumPHIInsert; 986 PhiToAllocaMap[PN] = AllocaNo; 987 PN->reserveOperandSpace(getNumPreds(BB)); 988 989 if (AST && PN->getType()->isPointerTy()) 990 AST->copyValue(PointerAllocaValues[AllocaNo], PN); 991 992 return true; 993 } 994 995 // RenamePass - Recursively traverse the CFG of the function, renaming loads and 996 // stores to the allocas which we are promoting. IncomingVals indicates what 997 // value each Alloca contains on exit from the predecessor block Pred. 998 // 999 void PromoteMem2Reg::RenamePass(BasicBlock *BB, BasicBlock *Pred, 1000 RenamePassData::ValVector &IncomingVals, 1001 std::vector<RenamePassData> &Worklist) { 1002 NextIteration: 1003 // If we are inserting any phi nodes into this BB, they will already be in the 1004 // block. 1005 if (PHINode *APN = dyn_cast<PHINode>(BB->begin())) { 1006 // If we have PHI nodes to update, compute the number of edges from Pred to 1007 // BB. 1008 if (PhiToAllocaMap.count(APN)) { 1009 // We want to be able to distinguish between PHI nodes being inserted by 1010 // this invocation of mem2reg from those phi nodes that already existed in 1011 // the IR before mem2reg was run. We determine that APN is being inserted 1012 // because it is missing incoming edges. All other PHI nodes being 1013 // inserted by this pass of mem2reg will have the same number of incoming 1014 // operands so far. Remember this count. 1015 unsigned NewPHINumOperands = APN->getNumOperands(); 1016 1017 unsigned NumEdges = 0; 1018 for (succ_iterator I = succ_begin(Pred), E = succ_end(Pred); I != E; ++I) 1019 if (*I == BB) 1020 ++NumEdges; 1021 assert(NumEdges && "Must be at least one edge from Pred to BB!"); 1022 1023 // Add entries for all the phis. 1024 BasicBlock::iterator PNI = BB->begin(); 1025 do { 1026 unsigned AllocaNo = PhiToAllocaMap[APN]; 1027 1028 // Add N incoming values to the PHI node. 1029 for (unsigned i = 0; i != NumEdges; ++i) 1030 APN->addIncoming(IncomingVals[AllocaNo], Pred); 1031 1032 // The currently active variable for this block is now the PHI. 1033 IncomingVals[AllocaNo] = APN; 1034 1035 // Get the next phi node. 1036 ++PNI; 1037 APN = dyn_cast<PHINode>(PNI); 1038 if (APN == 0) break; 1039 1040 // Verify that it is missing entries. If not, it is not being inserted 1041 // by this mem2reg invocation so we want to ignore it. 1042 } while (APN->getNumOperands() == NewPHINumOperands); 1043 } 1044 } 1045 1046 // Don't revisit blocks. 1047 if (!Visited.insert(BB)) return; 1048 1049 for (BasicBlock::iterator II = BB->begin(); !isa<TerminatorInst>(II); ) { 1050 Instruction *I = II++; // get the instruction, increment iterator 1051 1052 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 1053 AllocaInst *Src = dyn_cast<AllocaInst>(LI->getPointerOperand()); 1054 if (!Src) continue; 1055 1056 DenseMap<AllocaInst*, unsigned>::iterator AI = AllocaLookup.find(Src); 1057 if (AI == AllocaLookup.end()) continue; 1058 1059 Value *V = IncomingVals[AI->second]; 1060 1061 // Anything using the load now uses the current value. 1062 LI->replaceAllUsesWith(V); 1063 if (AST && LI->getType()->isPointerTy()) 1064 AST->deleteValue(LI); 1065 BB->getInstList().erase(LI); 1066 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 1067 // Delete this instruction and mark the name as the current holder of the 1068 // value 1069 AllocaInst *Dest = dyn_cast<AllocaInst>(SI->getPointerOperand()); 1070 if (!Dest) continue; 1071 1072 DenseMap<AllocaInst *, unsigned>::iterator ai = AllocaLookup.find(Dest); 1073 if (ai == AllocaLookup.end()) 1074 continue; 1075 1076 // what value were we writing? 1077 IncomingVals[ai->second] = SI->getOperand(0); 1078 // Record debuginfo for the store before removing it. 1079 if (DbgDeclareInst *DDI = AllocaDbgDeclares[ai->second]) 1080 ConvertDebugDeclareToDebugValue(DDI, SI); 1081 BB->getInstList().erase(SI); 1082 } 1083 } 1084 1085 // 'Recurse' to our successors. 1086 succ_iterator I = succ_begin(BB), E = succ_end(BB); 1087 if (I == E) return; 1088 1089 // Keep track of the successors so we don't visit the same successor twice 1090 SmallPtrSet<BasicBlock*, 8> VisitedSuccs; 1091 1092 // Handle the first successor without using the worklist. 1093 VisitedSuccs.insert(*I); 1094 Pred = BB; 1095 BB = *I; 1096 ++I; 1097 1098 for (; I != E; ++I) 1099 if (VisitedSuccs.insert(*I)) 1100 Worklist.push_back(RenamePassData(*I, Pred, IncomingVals)); 1101 1102 goto NextIteration; 1103 } 1104 1105 /// PromoteMemToReg - Promote the specified list of alloca instructions into 1106 /// scalar registers, inserting PHI nodes as appropriate. This function does 1107 /// not modify the CFG of the function at all. All allocas must be from the 1108 /// same function. 1109 /// 1110 /// If AST is specified, the specified tracker is updated to reflect changes 1111 /// made to the IR. 1112 /// 1113 void llvm::PromoteMemToReg(const std::vector<AllocaInst*> &Allocas, 1114 DominatorTree &DT, AliasSetTracker *AST) { 1115 // If there is nothing to do, bail out... 1116 if (Allocas.empty()) return; 1117 1118 PromoteMem2Reg(Allocas, DT, AST).run(); 1119 } 1120