1 //===- PromoteMemoryToRegister.cpp - Convert allocas to registers ---------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file promotes memory references to be register references. It promotes 11 // alloca instructions which only have loads and stores as uses. An alloca is 12 // transformed by using iterated dominator frontiers to place PHI nodes, then 13 // traversing the function in depth-first order to rewrite loads and stores as 14 // appropriate. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/DenseMap.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/Analysis/AliasSetTracker.h" 25 #include "llvm/Analysis/AssumptionCache.h" 26 #include "llvm/Analysis/InstructionSimplify.h" 27 #include "llvm/Analysis/IteratedDominanceFrontier.h" 28 #include "llvm/Analysis/ValueTracking.h" 29 #include "llvm/IR/CFG.h" 30 #include "llvm/IR/Constants.h" 31 #include "llvm/IR/DIBuilder.h" 32 #include "llvm/IR/DebugInfo.h" 33 #include "llvm/IR/DerivedTypes.h" 34 #include "llvm/IR/Dominators.h" 35 #include "llvm/IR/Function.h" 36 #include "llvm/IR/Instructions.h" 37 #include "llvm/IR/IntrinsicInst.h" 38 #include "llvm/IR/Metadata.h" 39 #include "llvm/IR/Module.h" 40 #include "llvm/Transforms/Utils/Local.h" 41 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 42 #include <algorithm> 43 using namespace llvm; 44 45 #define DEBUG_TYPE "mem2reg" 46 47 STATISTIC(NumLocalPromoted, "Number of alloca's promoted within one block"); 48 STATISTIC(NumSingleStore, "Number of alloca's promoted with a single store"); 49 STATISTIC(NumDeadAlloca, "Number of dead alloca's removed"); 50 STATISTIC(NumPHIInsert, "Number of PHI nodes inserted"); 51 52 bool llvm::isAllocaPromotable(const AllocaInst *AI) { 53 // FIXME: If the memory unit is of pointer or integer type, we can permit 54 // assignments to subsections of the memory unit. 55 unsigned AS = AI->getType()->getAddressSpace(); 56 57 // Only allow direct and non-volatile loads and stores... 58 for (const User *U : AI->users()) { 59 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) { 60 // Note that atomic loads can be transformed; atomic semantics do 61 // not have any meaning for a local alloca. 62 if (LI->isVolatile()) 63 return false; 64 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) { 65 if (SI->getOperand(0) == AI) 66 return false; // Don't allow a store OF the AI, only INTO the AI. 67 // Note that atomic stores can be transformed; atomic semantics do 68 // not have any meaning for a local alloca. 69 if (SI->isVolatile()) 70 return false; 71 } else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) { 72 if (II->getIntrinsicID() != Intrinsic::lifetime_start && 73 II->getIntrinsicID() != Intrinsic::lifetime_end) 74 return false; 75 } else if (const BitCastInst *BCI = dyn_cast<BitCastInst>(U)) { 76 if (BCI->getType() != Type::getInt8PtrTy(U->getContext(), AS)) 77 return false; 78 if (!onlyUsedByLifetimeMarkers(BCI)) 79 return false; 80 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 81 if (GEPI->getType() != Type::getInt8PtrTy(U->getContext(), AS)) 82 return false; 83 if (!GEPI->hasAllZeroIndices()) 84 return false; 85 if (!onlyUsedByLifetimeMarkers(GEPI)) 86 return false; 87 } else { 88 return false; 89 } 90 } 91 92 return true; 93 } 94 95 namespace { 96 97 struct AllocaInfo { 98 SmallVector<BasicBlock *, 32> DefiningBlocks; 99 SmallVector<BasicBlock *, 32> UsingBlocks; 100 101 StoreInst *OnlyStore; 102 BasicBlock *OnlyBlock; 103 bool OnlyUsedInOneBlock; 104 105 Value *AllocaPointerVal; 106 DbgDeclareInst *DbgDeclare; 107 108 void clear() { 109 DefiningBlocks.clear(); 110 UsingBlocks.clear(); 111 OnlyStore = nullptr; 112 OnlyBlock = nullptr; 113 OnlyUsedInOneBlock = true; 114 AllocaPointerVal = nullptr; 115 DbgDeclare = nullptr; 116 } 117 118 /// Scan the uses of the specified alloca, filling in the AllocaInfo used 119 /// by the rest of the pass to reason about the uses of this alloca. 120 void AnalyzeAlloca(AllocaInst *AI) { 121 clear(); 122 123 // As we scan the uses of the alloca instruction, keep track of stores, 124 // and decide whether all of the loads and stores to the alloca are within 125 // the same basic block. 126 for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) { 127 Instruction *User = cast<Instruction>(*UI++); 128 129 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 130 // Remember the basic blocks which define new values for the alloca 131 DefiningBlocks.push_back(SI->getParent()); 132 AllocaPointerVal = SI->getOperand(0); 133 OnlyStore = SI; 134 } else { 135 LoadInst *LI = cast<LoadInst>(User); 136 // Otherwise it must be a load instruction, keep track of variable 137 // reads. 138 UsingBlocks.push_back(LI->getParent()); 139 AllocaPointerVal = LI; 140 } 141 142 if (OnlyUsedInOneBlock) { 143 if (!OnlyBlock) 144 OnlyBlock = User->getParent(); 145 else if (OnlyBlock != User->getParent()) 146 OnlyUsedInOneBlock = false; 147 } 148 } 149 150 DbgDeclare = FindAllocaDbgDeclare(AI); 151 } 152 }; 153 154 // Data package used by RenamePass() 155 class RenamePassData { 156 public: 157 typedef std::vector<Value *> ValVector; 158 159 RenamePassData() : BB(nullptr), Pred(nullptr), Values() {} 160 RenamePassData(BasicBlock *B, BasicBlock *P, const ValVector &V) 161 : BB(B), Pred(P), Values(V) {} 162 BasicBlock *BB; 163 BasicBlock *Pred; 164 ValVector Values; 165 166 void swap(RenamePassData &RHS) { 167 std::swap(BB, RHS.BB); 168 std::swap(Pred, RHS.Pred); 169 Values.swap(RHS.Values); 170 } 171 }; 172 173 /// \brief This assigns and keeps a per-bb relative ordering of load/store 174 /// instructions in the block that directly load or store an alloca. 175 /// 176 /// This functionality is important because it avoids scanning large basic 177 /// blocks multiple times when promoting many allocas in the same block. 178 class LargeBlockInfo { 179 /// \brief For each instruction that we track, keep the index of the 180 /// instruction. 181 /// 182 /// The index starts out as the number of the instruction from the start of 183 /// the block. 184 DenseMap<const Instruction *, unsigned> InstNumbers; 185 186 public: 187 188 /// This code only looks at accesses to allocas. 189 static bool isInterestingInstruction(const Instruction *I) { 190 return (isa<LoadInst>(I) && isa<AllocaInst>(I->getOperand(0))) || 191 (isa<StoreInst>(I) && isa<AllocaInst>(I->getOperand(1))); 192 } 193 194 /// Get or calculate the index of the specified instruction. 195 unsigned getInstructionIndex(const Instruction *I) { 196 assert(isInterestingInstruction(I) && 197 "Not a load/store to/from an alloca?"); 198 199 // If we already have this instruction number, return it. 200 DenseMap<const Instruction *, unsigned>::iterator It = InstNumbers.find(I); 201 if (It != InstNumbers.end()) 202 return It->second; 203 204 // Scan the whole block to get the instruction. This accumulates 205 // information for every interesting instruction in the block, in order to 206 // avoid gratuitus rescans. 207 const BasicBlock *BB = I->getParent(); 208 unsigned InstNo = 0; 209 for (const Instruction &BBI : *BB) 210 if (isInterestingInstruction(&BBI)) 211 InstNumbers[&BBI] = InstNo++; 212 It = InstNumbers.find(I); 213 214 assert(It != InstNumbers.end() && "Didn't insert instruction?"); 215 return It->second; 216 } 217 218 void deleteValue(const Instruction *I) { InstNumbers.erase(I); } 219 220 void clear() { InstNumbers.clear(); } 221 }; 222 223 struct PromoteMem2Reg { 224 /// The alloca instructions being promoted. 225 std::vector<AllocaInst *> Allocas; 226 DominatorTree &DT; 227 DIBuilder DIB; 228 /// A cache of @llvm.assume intrinsics used by SimplifyInstruction. 229 AssumptionCache *AC; 230 231 const SimplifyQuery SQ; 232 /// Reverse mapping of Allocas. 233 DenseMap<AllocaInst *, unsigned> AllocaLookup; 234 235 /// \brief The PhiNodes we're adding. 236 /// 237 /// That map is used to simplify some Phi nodes as we iterate over it, so 238 /// it should have deterministic iterators. We could use a MapVector, but 239 /// since we already maintain a map from BasicBlock* to a stable numbering 240 /// (BBNumbers), the DenseMap is more efficient (also supports removal). 241 DenseMap<std::pair<unsigned, unsigned>, PHINode *> NewPhiNodes; 242 243 /// For each PHI node, keep track of which entry in Allocas it corresponds 244 /// to. 245 DenseMap<PHINode *, unsigned> PhiToAllocaMap; 246 247 /// If we are updating an AliasSetTracker, then for each alloca that is of 248 /// pointer type, we keep track of what to copyValue to the inserted PHI 249 /// nodes here. 250 std::vector<Value *> PointerAllocaValues; 251 252 /// For each alloca, we keep track of the dbg.declare intrinsic that 253 /// describes it, if any, so that we can convert it to a dbg.value 254 /// intrinsic if the alloca gets promoted. 255 SmallVector<DbgDeclareInst *, 8> AllocaDbgDeclares; 256 257 /// The set of basic blocks the renamer has already visited. 258 /// 259 SmallPtrSet<BasicBlock *, 16> Visited; 260 261 /// Contains a stable numbering of basic blocks to avoid non-determinstic 262 /// behavior. 263 DenseMap<BasicBlock *, unsigned> BBNumbers; 264 265 /// Lazily compute the number of predecessors a block has. 266 DenseMap<const BasicBlock *, unsigned> BBNumPreds; 267 268 public: 269 PromoteMem2Reg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT, 270 AssumptionCache *AC) 271 : Allocas(Allocas.begin(), Allocas.end()), DT(DT), 272 DIB(*DT.getRoot()->getParent()->getParent(), /*AllowUnresolved*/ false), 273 AC(AC), SQ(DT.getRoot()->getParent()->getParent()->getDataLayout(), 274 nullptr, &DT, AC) {} 275 276 void run(); 277 278 private: 279 void RemoveFromAllocasList(unsigned &AllocaIdx) { 280 Allocas[AllocaIdx] = Allocas.back(); 281 Allocas.pop_back(); 282 --AllocaIdx; 283 } 284 285 unsigned getNumPreds(const BasicBlock *BB) { 286 unsigned &NP = BBNumPreds[BB]; 287 if (NP == 0) 288 NP = std::distance(pred_begin(BB), pred_end(BB)) + 1; 289 return NP - 1; 290 } 291 292 void ComputeLiveInBlocks(AllocaInst *AI, AllocaInfo &Info, 293 const SmallPtrSetImpl<BasicBlock *> &DefBlocks, 294 SmallPtrSetImpl<BasicBlock *> &LiveInBlocks); 295 void RenamePass(BasicBlock *BB, BasicBlock *Pred, 296 RenamePassData::ValVector &IncVals, 297 std::vector<RenamePassData> &Worklist); 298 bool QueuePhiNode(BasicBlock *BB, unsigned AllocaIdx, unsigned &Version); 299 }; 300 301 } // end of anonymous namespace 302 303 /// Given a LoadInst LI this adds assume(LI != null) after it. 304 static void addAssumeNonNull(AssumptionCache *AC, LoadInst *LI) { 305 Function *AssumeIntrinsic = 306 Intrinsic::getDeclaration(LI->getModule(), Intrinsic::assume); 307 ICmpInst *LoadNotNull = new ICmpInst(ICmpInst::ICMP_NE, LI, 308 Constant::getNullValue(LI->getType())); 309 LoadNotNull->insertAfter(LI); 310 CallInst *CI = CallInst::Create(AssumeIntrinsic, {LoadNotNull}); 311 CI->insertAfter(LoadNotNull); 312 AC->registerAssumption(CI); 313 } 314 315 static void removeLifetimeIntrinsicUsers(AllocaInst *AI) { 316 // Knowing that this alloca is promotable, we know that it's safe to kill all 317 // instructions except for load and store. 318 319 for (auto UI = AI->user_begin(), UE = AI->user_end(); UI != UE;) { 320 Instruction *I = cast<Instruction>(*UI); 321 ++UI; 322 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 323 continue; 324 325 if (!I->getType()->isVoidTy()) { 326 // The only users of this bitcast/GEP instruction are lifetime intrinsics. 327 // Follow the use/def chain to erase them now instead of leaving it for 328 // dead code elimination later. 329 for (auto UUI = I->user_begin(), UUE = I->user_end(); UUI != UUE;) { 330 Instruction *Inst = cast<Instruction>(*UUI); 331 ++UUI; 332 Inst->eraseFromParent(); 333 } 334 } 335 I->eraseFromParent(); 336 } 337 } 338 339 /// \brief Rewrite as many loads as possible given a single store. 340 /// 341 /// When there is only a single store, we can use the domtree to trivially 342 /// replace all of the dominated loads with the stored value. Do so, and return 343 /// true if this has successfully promoted the alloca entirely. If this returns 344 /// false there were some loads which were not dominated by the single store 345 /// and thus must be phi-ed with undef. We fall back to the standard alloca 346 /// promotion algorithm in that case. 347 static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info, 348 LargeBlockInfo &LBI, DominatorTree &DT, 349 AssumptionCache *AC) { 350 StoreInst *OnlyStore = Info.OnlyStore; 351 bool StoringGlobalVal = !isa<Instruction>(OnlyStore->getOperand(0)); 352 BasicBlock *StoreBB = OnlyStore->getParent(); 353 int StoreIndex = -1; 354 355 // Clear out UsingBlocks. We will reconstruct it here if needed. 356 Info.UsingBlocks.clear(); 357 358 for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) { 359 Instruction *UserInst = cast<Instruction>(*UI++); 360 if (!isa<LoadInst>(UserInst)) { 361 assert(UserInst == OnlyStore && "Should only have load/stores"); 362 continue; 363 } 364 LoadInst *LI = cast<LoadInst>(UserInst); 365 366 // Okay, if we have a load from the alloca, we want to replace it with the 367 // only value stored to the alloca. We can do this if the value is 368 // dominated by the store. If not, we use the rest of the mem2reg machinery 369 // to insert the phi nodes as needed. 370 if (!StoringGlobalVal) { // Non-instructions are always dominated. 371 if (LI->getParent() == StoreBB) { 372 // If we have a use that is in the same block as the store, compare the 373 // indices of the two instructions to see which one came first. If the 374 // load came before the store, we can't handle it. 375 if (StoreIndex == -1) 376 StoreIndex = LBI.getInstructionIndex(OnlyStore); 377 378 if (unsigned(StoreIndex) > LBI.getInstructionIndex(LI)) { 379 // Can't handle this load, bail out. 380 Info.UsingBlocks.push_back(StoreBB); 381 continue; 382 } 383 384 } else if (LI->getParent() != StoreBB && 385 !DT.dominates(StoreBB, LI->getParent())) { 386 // If the load and store are in different blocks, use BB dominance to 387 // check their relationships. If the store doesn't dom the use, bail 388 // out. 389 Info.UsingBlocks.push_back(LI->getParent()); 390 continue; 391 } 392 } 393 394 // Otherwise, we *can* safely rewrite this load. 395 Value *ReplVal = OnlyStore->getOperand(0); 396 // If the replacement value is the load, this must occur in unreachable 397 // code. 398 if (ReplVal == LI) 399 ReplVal = UndefValue::get(LI->getType()); 400 401 // If the load was marked as nonnull we don't want to lose 402 // that information when we erase this Load. So we preserve 403 // it with an assume. 404 if (AC && LI->getMetadata(LLVMContext::MD_nonnull) && 405 !llvm::isKnownNonNullAt(ReplVal, LI, &DT)) 406 addAssumeNonNull(AC, LI); 407 408 LI->replaceAllUsesWith(ReplVal); 409 LI->eraseFromParent(); 410 LBI.deleteValue(LI); 411 } 412 413 // Finally, after the scan, check to see if the store is all that is left. 414 if (!Info.UsingBlocks.empty()) 415 return false; // If not, we'll have to fall back for the remainder. 416 417 // Record debuginfo for the store and remove the declaration's 418 // debuginfo. 419 if (DbgDeclareInst *DDI = Info.DbgDeclare) { 420 DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false); 421 ConvertDebugDeclareToDebugValue(DDI, Info.OnlyStore, DIB); 422 DDI->eraseFromParent(); 423 LBI.deleteValue(DDI); 424 } 425 // Remove the (now dead) store and alloca. 426 Info.OnlyStore->eraseFromParent(); 427 LBI.deleteValue(Info.OnlyStore); 428 429 AI->eraseFromParent(); 430 LBI.deleteValue(AI); 431 return true; 432 } 433 434 /// Many allocas are only used within a single basic block. If this is the 435 /// case, avoid traversing the CFG and inserting a lot of potentially useless 436 /// PHI nodes by just performing a single linear pass over the basic block 437 /// using the Alloca. 438 /// 439 /// If we cannot promote this alloca (because it is read before it is written), 440 /// return false. This is necessary in cases where, due to control flow, the 441 /// alloca is undefined only on some control flow paths. e.g. code like 442 /// this is correct in LLVM IR: 443 /// // A is an alloca with no stores so far 444 /// for (...) { 445 /// int t = *A; 446 /// if (!first_iteration) 447 /// use(t); 448 /// *A = 42; 449 /// } 450 static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info, 451 LargeBlockInfo &LBI, 452 DominatorTree &DT, 453 AssumptionCache *AC) { 454 // The trickiest case to handle is when we have large blocks. Because of this, 455 // this code is optimized assuming that large blocks happen. This does not 456 // significantly pessimize the small block case. This uses LargeBlockInfo to 457 // make it efficient to get the index of various operations in the block. 458 459 // Walk the use-def list of the alloca, getting the locations of all stores. 460 typedef SmallVector<std::pair<unsigned, StoreInst *>, 64> StoresByIndexTy; 461 StoresByIndexTy StoresByIndex; 462 463 for (User *U : AI->users()) 464 if (StoreInst *SI = dyn_cast<StoreInst>(U)) 465 StoresByIndex.push_back(std::make_pair(LBI.getInstructionIndex(SI), SI)); 466 467 // Sort the stores by their index, making it efficient to do a lookup with a 468 // binary search. 469 std::sort(StoresByIndex.begin(), StoresByIndex.end(), less_first()); 470 471 // Walk all of the loads from this alloca, replacing them with the nearest 472 // store above them, if any. 473 for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) { 474 LoadInst *LI = dyn_cast<LoadInst>(*UI++); 475 if (!LI) 476 continue; 477 478 unsigned LoadIdx = LBI.getInstructionIndex(LI); 479 480 // Find the nearest store that has a lower index than this load. 481 StoresByIndexTy::iterator I = 482 std::lower_bound(StoresByIndex.begin(), StoresByIndex.end(), 483 std::make_pair(LoadIdx, 484 static_cast<StoreInst *>(nullptr)), 485 less_first()); 486 if (I == StoresByIndex.begin()) { 487 if (StoresByIndex.empty()) 488 // If there are no stores, the load takes the undef value. 489 LI->replaceAllUsesWith(UndefValue::get(LI->getType())); 490 else 491 // There is no store before this load, bail out (load may be affected 492 // by the following stores - see main comment). 493 return false; 494 } else { 495 // Otherwise, there was a store before this load, the load takes its value. 496 // Note, if the load was marked as nonnull we don't want to lose that 497 // information when we erase it. So we preserve it with an assume. 498 Value *ReplVal = std::prev(I)->second->getOperand(0); 499 if (AC && LI->getMetadata(LLVMContext::MD_nonnull) && 500 !llvm::isKnownNonNullAt(ReplVal, LI, &DT)) 501 addAssumeNonNull(AC, LI); 502 503 LI->replaceAllUsesWith(ReplVal); 504 } 505 506 LI->eraseFromParent(); 507 LBI.deleteValue(LI); 508 } 509 510 // Remove the (now dead) stores and alloca. 511 while (!AI->use_empty()) { 512 StoreInst *SI = cast<StoreInst>(AI->user_back()); 513 // Record debuginfo for the store before removing it. 514 if (DbgDeclareInst *DDI = Info.DbgDeclare) { 515 DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false); 516 ConvertDebugDeclareToDebugValue(DDI, SI, DIB); 517 } 518 SI->eraseFromParent(); 519 LBI.deleteValue(SI); 520 } 521 522 AI->eraseFromParent(); 523 LBI.deleteValue(AI); 524 525 // The alloca's debuginfo can be removed as well. 526 if (DbgDeclareInst *DDI = Info.DbgDeclare) { 527 DDI->eraseFromParent(); 528 LBI.deleteValue(DDI); 529 } 530 531 ++NumLocalPromoted; 532 return true; 533 } 534 535 void PromoteMem2Reg::run() { 536 Function &F = *DT.getRoot()->getParent(); 537 538 AllocaDbgDeclares.resize(Allocas.size()); 539 540 AllocaInfo Info; 541 LargeBlockInfo LBI; 542 ForwardIDFCalculator IDF(DT); 543 544 for (unsigned AllocaNum = 0; AllocaNum != Allocas.size(); ++AllocaNum) { 545 AllocaInst *AI = Allocas[AllocaNum]; 546 547 assert(isAllocaPromotable(AI) && "Cannot promote non-promotable alloca!"); 548 assert(AI->getParent()->getParent() == &F && 549 "All allocas should be in the same function, which is same as DF!"); 550 551 removeLifetimeIntrinsicUsers(AI); 552 553 if (AI->use_empty()) { 554 // If there are no uses of the alloca, just delete it now. 555 AI->eraseFromParent(); 556 557 // Remove the alloca from the Allocas list, since it has been processed 558 RemoveFromAllocasList(AllocaNum); 559 ++NumDeadAlloca; 560 continue; 561 } 562 563 // Calculate the set of read and write-locations for each alloca. This is 564 // analogous to finding the 'uses' and 'definitions' of each variable. 565 Info.AnalyzeAlloca(AI); 566 567 // If there is only a single store to this value, replace any loads of 568 // it that are directly dominated by the definition with the value stored. 569 if (Info.DefiningBlocks.size() == 1) { 570 if (rewriteSingleStoreAlloca(AI, Info, LBI, DT, AC)) { 571 // The alloca has been processed, move on. 572 RemoveFromAllocasList(AllocaNum); 573 ++NumSingleStore; 574 continue; 575 } 576 } 577 578 // If the alloca is only read and written in one basic block, just perform a 579 // linear sweep over the block to eliminate it. 580 if (Info.OnlyUsedInOneBlock && 581 promoteSingleBlockAlloca(AI, Info, LBI, DT, AC)) { 582 // The alloca has been processed, move on. 583 RemoveFromAllocasList(AllocaNum); 584 continue; 585 } 586 587 // If we haven't computed a numbering for the BB's in the function, do so 588 // now. 589 if (BBNumbers.empty()) { 590 unsigned ID = 0; 591 for (auto &BB : F) 592 BBNumbers[&BB] = ID++; 593 } 594 595 // Remember the dbg.declare intrinsic describing this alloca, if any. 596 if (Info.DbgDeclare) 597 AllocaDbgDeclares[AllocaNum] = Info.DbgDeclare; 598 599 // Keep the reverse mapping of the 'Allocas' array for the rename pass. 600 AllocaLookup[Allocas[AllocaNum]] = AllocaNum; 601 602 // At this point, we're committed to promoting the alloca using IDF's, and 603 // the standard SSA construction algorithm. Determine which blocks need PHI 604 // nodes and see if we can optimize out some work by avoiding insertion of 605 // dead phi nodes. 606 607 608 // Unique the set of defining blocks for efficient lookup. 609 SmallPtrSet<BasicBlock *, 32> DefBlocks; 610 DefBlocks.insert(Info.DefiningBlocks.begin(), Info.DefiningBlocks.end()); 611 612 // Determine which blocks the value is live in. These are blocks which lead 613 // to uses. 614 SmallPtrSet<BasicBlock *, 32> LiveInBlocks; 615 ComputeLiveInBlocks(AI, Info, DefBlocks, LiveInBlocks); 616 617 // At this point, we're committed to promoting the alloca using IDF's, and 618 // the standard SSA construction algorithm. Determine which blocks need phi 619 // nodes and see if we can optimize out some work by avoiding insertion of 620 // dead phi nodes. 621 IDF.setLiveInBlocks(LiveInBlocks); 622 IDF.setDefiningBlocks(DefBlocks); 623 SmallVector<BasicBlock *, 32> PHIBlocks; 624 IDF.calculate(PHIBlocks); 625 if (PHIBlocks.size() > 1) 626 std::sort(PHIBlocks.begin(), PHIBlocks.end(), 627 [this](BasicBlock *A, BasicBlock *B) { 628 return BBNumbers.lookup(A) < BBNumbers.lookup(B); 629 }); 630 631 unsigned CurrentVersion = 0; 632 for (unsigned i = 0, e = PHIBlocks.size(); i != e; ++i) 633 QueuePhiNode(PHIBlocks[i], AllocaNum, CurrentVersion); 634 } 635 636 if (Allocas.empty()) 637 return; // All of the allocas must have been trivial! 638 639 LBI.clear(); 640 641 // Set the incoming values for the basic block to be null values for all of 642 // the alloca's. We do this in case there is a load of a value that has not 643 // been stored yet. In this case, it will get this null value. 644 // 645 RenamePassData::ValVector Values(Allocas.size()); 646 for (unsigned i = 0, e = Allocas.size(); i != e; ++i) 647 Values[i] = UndefValue::get(Allocas[i]->getAllocatedType()); 648 649 // Walks all basic blocks in the function performing the SSA rename algorithm 650 // and inserting the phi nodes we marked as necessary 651 // 652 std::vector<RenamePassData> RenamePassWorkList; 653 RenamePassWorkList.emplace_back(&F.front(), nullptr, std::move(Values)); 654 do { 655 RenamePassData RPD; 656 RPD.swap(RenamePassWorkList.back()); 657 RenamePassWorkList.pop_back(); 658 // RenamePass may add new worklist entries. 659 RenamePass(RPD.BB, RPD.Pred, RPD.Values, RenamePassWorkList); 660 } while (!RenamePassWorkList.empty()); 661 662 // The renamer uses the Visited set to avoid infinite loops. Clear it now. 663 Visited.clear(); 664 665 // Remove the allocas themselves from the function. 666 for (unsigned i = 0, e = Allocas.size(); i != e; ++i) { 667 Instruction *A = Allocas[i]; 668 669 // If there are any uses of the alloca instructions left, they must be in 670 // unreachable basic blocks that were not processed by walking the dominator 671 // tree. Just delete the users now. 672 if (!A->use_empty()) 673 A->replaceAllUsesWith(UndefValue::get(A->getType())); 674 A->eraseFromParent(); 675 } 676 677 // Remove alloca's dbg.declare instrinsics from the function. 678 for (unsigned i = 0, e = AllocaDbgDeclares.size(); i != e; ++i) 679 if (DbgDeclareInst *DDI = AllocaDbgDeclares[i]) 680 DDI->eraseFromParent(); 681 682 // Loop over all of the PHI nodes and see if there are any that we can get 683 // rid of because they merge all of the same incoming values. This can 684 // happen due to undef values coming into the PHI nodes. This process is 685 // iterative, because eliminating one PHI node can cause others to be removed. 686 bool EliminatedAPHI = true; 687 while (EliminatedAPHI) { 688 EliminatedAPHI = false; 689 690 // Iterating over NewPhiNodes is deterministic, so it is safe to try to 691 // simplify and RAUW them as we go. If it was not, we could add uses to 692 // the values we replace with in a non-deterministic order, thus creating 693 // non-deterministic def->use chains. 694 for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator 695 I = NewPhiNodes.begin(), 696 E = NewPhiNodes.end(); 697 I != E;) { 698 PHINode *PN = I->second; 699 700 // If this PHI node merges one value and/or undefs, get the value. 701 if (Value *V = SimplifyInstruction(PN, SQ)) { 702 PN->replaceAllUsesWith(V); 703 PN->eraseFromParent(); 704 NewPhiNodes.erase(I++); 705 EliminatedAPHI = true; 706 continue; 707 } 708 ++I; 709 } 710 } 711 712 // At this point, the renamer has added entries to PHI nodes for all reachable 713 // code. Unfortunately, there may be unreachable blocks which the renamer 714 // hasn't traversed. If this is the case, the PHI nodes may not 715 // have incoming values for all predecessors. Loop over all PHI nodes we have 716 // created, inserting undef values if they are missing any incoming values. 717 // 718 for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator 719 I = NewPhiNodes.begin(), 720 E = NewPhiNodes.end(); 721 I != E; ++I) { 722 // We want to do this once per basic block. As such, only process a block 723 // when we find the PHI that is the first entry in the block. 724 PHINode *SomePHI = I->second; 725 BasicBlock *BB = SomePHI->getParent(); 726 if (&BB->front() != SomePHI) 727 continue; 728 729 // Only do work here if there the PHI nodes are missing incoming values. We 730 // know that all PHI nodes that were inserted in a block will have the same 731 // number of incoming values, so we can just check any of them. 732 if (SomePHI->getNumIncomingValues() == getNumPreds(BB)) 733 continue; 734 735 // Get the preds for BB. 736 SmallVector<BasicBlock *, 16> Preds(pred_begin(BB), pred_end(BB)); 737 738 // Ok, now we know that all of the PHI nodes are missing entries for some 739 // basic blocks. Start by sorting the incoming predecessors for efficient 740 // access. 741 std::sort(Preds.begin(), Preds.end()); 742 743 // Now we loop through all BB's which have entries in SomePHI and remove 744 // them from the Preds list. 745 for (unsigned i = 0, e = SomePHI->getNumIncomingValues(); i != e; ++i) { 746 // Do a log(n) search of the Preds list for the entry we want. 747 SmallVectorImpl<BasicBlock *>::iterator EntIt = std::lower_bound( 748 Preds.begin(), Preds.end(), SomePHI->getIncomingBlock(i)); 749 assert(EntIt != Preds.end() && *EntIt == SomePHI->getIncomingBlock(i) && 750 "PHI node has entry for a block which is not a predecessor!"); 751 752 // Remove the entry 753 Preds.erase(EntIt); 754 } 755 756 // At this point, the blocks left in the preds list must have dummy 757 // entries inserted into every PHI nodes for the block. Update all the phi 758 // nodes in this block that we are inserting (there could be phis before 759 // mem2reg runs). 760 unsigned NumBadPreds = SomePHI->getNumIncomingValues(); 761 BasicBlock::iterator BBI = BB->begin(); 762 while ((SomePHI = dyn_cast<PHINode>(BBI++)) && 763 SomePHI->getNumIncomingValues() == NumBadPreds) { 764 Value *UndefVal = UndefValue::get(SomePHI->getType()); 765 for (unsigned pred = 0, e = Preds.size(); pred != e; ++pred) 766 SomePHI->addIncoming(UndefVal, Preds[pred]); 767 } 768 } 769 770 NewPhiNodes.clear(); 771 } 772 773 /// \brief Determine which blocks the value is live in. 774 /// 775 /// These are blocks which lead to uses. Knowing this allows us to avoid 776 /// inserting PHI nodes into blocks which don't lead to uses (thus, the 777 /// inserted phi nodes would be dead). 778 void PromoteMem2Reg::ComputeLiveInBlocks( 779 AllocaInst *AI, AllocaInfo &Info, 780 const SmallPtrSetImpl<BasicBlock *> &DefBlocks, 781 SmallPtrSetImpl<BasicBlock *> &LiveInBlocks) { 782 783 // To determine liveness, we must iterate through the predecessors of blocks 784 // where the def is live. Blocks are added to the worklist if we need to 785 // check their predecessors. Start with all the using blocks. 786 SmallVector<BasicBlock *, 64> LiveInBlockWorklist(Info.UsingBlocks.begin(), 787 Info.UsingBlocks.end()); 788 789 // If any of the using blocks is also a definition block, check to see if the 790 // definition occurs before or after the use. If it happens before the use, 791 // the value isn't really live-in. 792 for (unsigned i = 0, e = LiveInBlockWorklist.size(); i != e; ++i) { 793 BasicBlock *BB = LiveInBlockWorklist[i]; 794 if (!DefBlocks.count(BB)) 795 continue; 796 797 // Okay, this is a block that both uses and defines the value. If the first 798 // reference to the alloca is a def (store), then we know it isn't live-in. 799 for (BasicBlock::iterator I = BB->begin();; ++I) { 800 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 801 if (SI->getOperand(1) != AI) 802 continue; 803 804 // We found a store to the alloca before a load. The alloca is not 805 // actually live-in here. 806 LiveInBlockWorklist[i] = LiveInBlockWorklist.back(); 807 LiveInBlockWorklist.pop_back(); 808 --i; 809 --e; 810 break; 811 } 812 813 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 814 if (LI->getOperand(0) != AI) 815 continue; 816 817 // Okay, we found a load before a store to the alloca. It is actually 818 // live into this block. 819 break; 820 } 821 } 822 } 823 824 // Now that we have a set of blocks where the phi is live-in, recursively add 825 // their predecessors until we find the full region the value is live. 826 while (!LiveInBlockWorklist.empty()) { 827 BasicBlock *BB = LiveInBlockWorklist.pop_back_val(); 828 829 // The block really is live in here, insert it into the set. If already in 830 // the set, then it has already been processed. 831 if (!LiveInBlocks.insert(BB).second) 832 continue; 833 834 // Since the value is live into BB, it is either defined in a predecessor or 835 // live into it to. Add the preds to the worklist unless they are a 836 // defining block. 837 for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) { 838 BasicBlock *P = *PI; 839 840 // The value is not live into a predecessor if it defines the value. 841 if (DefBlocks.count(P)) 842 continue; 843 844 // Otherwise it is, add to the worklist. 845 LiveInBlockWorklist.push_back(P); 846 } 847 } 848 } 849 850 /// \brief Queue a phi-node to be added to a basic-block for a specific Alloca. 851 /// 852 /// Returns true if there wasn't already a phi-node for that variable 853 bool PromoteMem2Reg::QueuePhiNode(BasicBlock *BB, unsigned AllocaNo, 854 unsigned &Version) { 855 // Look up the basic-block in question. 856 PHINode *&PN = NewPhiNodes[std::make_pair(BBNumbers[BB], AllocaNo)]; 857 858 // If the BB already has a phi node added for the i'th alloca then we're done! 859 if (PN) 860 return false; 861 862 // Create a PhiNode using the dereferenced type... and add the phi-node to the 863 // BasicBlock. 864 PN = PHINode::Create(Allocas[AllocaNo]->getAllocatedType(), getNumPreds(BB), 865 Allocas[AllocaNo]->getName() + "." + Twine(Version++), 866 &BB->front()); 867 ++NumPHIInsert; 868 PhiToAllocaMap[PN] = AllocaNo; 869 return true; 870 } 871 872 /// \brief Recursively traverse the CFG of the function, renaming loads and 873 /// stores to the allocas which we are promoting. 874 /// 875 /// IncomingVals indicates what value each Alloca contains on exit from the 876 /// predecessor block Pred. 877 void PromoteMem2Reg::RenamePass(BasicBlock *BB, BasicBlock *Pred, 878 RenamePassData::ValVector &IncomingVals, 879 std::vector<RenamePassData> &Worklist) { 880 NextIteration: 881 // If we are inserting any phi nodes into this BB, they will already be in the 882 // block. 883 if (PHINode *APN = dyn_cast<PHINode>(BB->begin())) { 884 // If we have PHI nodes to update, compute the number of edges from Pred to 885 // BB. 886 if (PhiToAllocaMap.count(APN)) { 887 // We want to be able to distinguish between PHI nodes being inserted by 888 // this invocation of mem2reg from those phi nodes that already existed in 889 // the IR before mem2reg was run. We determine that APN is being inserted 890 // because it is missing incoming edges. All other PHI nodes being 891 // inserted by this pass of mem2reg will have the same number of incoming 892 // operands so far. Remember this count. 893 unsigned NewPHINumOperands = APN->getNumOperands(); 894 895 unsigned NumEdges = std::count(succ_begin(Pred), succ_end(Pred), BB); 896 assert(NumEdges && "Must be at least one edge from Pred to BB!"); 897 898 // Add entries for all the phis. 899 BasicBlock::iterator PNI = BB->begin(); 900 do { 901 unsigned AllocaNo = PhiToAllocaMap[APN]; 902 903 // Add N incoming values to the PHI node. 904 for (unsigned i = 0; i != NumEdges; ++i) 905 APN->addIncoming(IncomingVals[AllocaNo], Pred); 906 907 // The currently active variable for this block is now the PHI. 908 IncomingVals[AllocaNo] = APN; 909 if (DbgDeclareInst *DDI = AllocaDbgDeclares[AllocaNo]) 910 ConvertDebugDeclareToDebugValue(DDI, APN, DIB); 911 912 // Get the next phi node. 913 ++PNI; 914 APN = dyn_cast<PHINode>(PNI); 915 if (!APN) 916 break; 917 918 // Verify that it is missing entries. If not, it is not being inserted 919 // by this mem2reg invocation so we want to ignore it. 920 } while (APN->getNumOperands() == NewPHINumOperands); 921 } 922 } 923 924 // Don't revisit blocks. 925 if (!Visited.insert(BB).second) 926 return; 927 928 for (BasicBlock::iterator II = BB->begin(); !isa<TerminatorInst>(II);) { 929 Instruction *I = &*II++; // get the instruction, increment iterator 930 931 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 932 AllocaInst *Src = dyn_cast<AllocaInst>(LI->getPointerOperand()); 933 if (!Src) 934 continue; 935 936 DenseMap<AllocaInst *, unsigned>::iterator AI = AllocaLookup.find(Src); 937 if (AI == AllocaLookup.end()) 938 continue; 939 940 Value *V = IncomingVals[AI->second]; 941 942 // If the load was marked as nonnull we don't want to lose 943 // that information when we erase this Load. So we preserve 944 // it with an assume. 945 if (AC && LI->getMetadata(LLVMContext::MD_nonnull) && 946 !llvm::isKnownNonNullAt(V, LI, &DT)) 947 addAssumeNonNull(AC, LI); 948 949 // Anything using the load now uses the current value. 950 LI->replaceAllUsesWith(V); 951 BB->getInstList().erase(LI); 952 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 953 // Delete this instruction and mark the name as the current holder of the 954 // value 955 AllocaInst *Dest = dyn_cast<AllocaInst>(SI->getPointerOperand()); 956 if (!Dest) 957 continue; 958 959 DenseMap<AllocaInst *, unsigned>::iterator ai = AllocaLookup.find(Dest); 960 if (ai == AllocaLookup.end()) 961 continue; 962 963 // what value were we writing? 964 IncomingVals[ai->second] = SI->getOperand(0); 965 // Record debuginfo for the store before removing it. 966 if (DbgDeclareInst *DDI = AllocaDbgDeclares[ai->second]) 967 ConvertDebugDeclareToDebugValue(DDI, SI, DIB); 968 BB->getInstList().erase(SI); 969 } 970 } 971 972 // 'Recurse' to our successors. 973 succ_iterator I = succ_begin(BB), E = succ_end(BB); 974 if (I == E) 975 return; 976 977 // Keep track of the successors so we don't visit the same successor twice 978 SmallPtrSet<BasicBlock *, 8> VisitedSuccs; 979 980 // Handle the first successor without using the worklist. 981 VisitedSuccs.insert(*I); 982 Pred = BB; 983 BB = *I; 984 ++I; 985 986 for (; I != E; ++I) 987 if (VisitedSuccs.insert(*I).second) 988 Worklist.emplace_back(*I, Pred, IncomingVals); 989 990 goto NextIteration; 991 } 992 993 void llvm::PromoteMemToReg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT, 994 AssumptionCache *AC) { 995 // If there is nothing to do, bail out... 996 if (Allocas.empty()) 997 return; 998 999 PromoteMem2Reg(Allocas, DT, AC).run(); 1000 } 1001