1 //===- PromoteMemoryToRegister.cpp - Convert allocas to registers ---------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file promotes memory references to be register references. It promotes 11 // alloca instructions which only have loads and stores as uses. An alloca is 12 // transformed by using iterated dominator frontiers to place PHI nodes, then 13 // traversing the function in depth-first order to rewrite loads and stores as 14 // appropriate. 15 // 16 //===----------------------------------------------------------------------===// 17 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/DenseMap.h" 20 #include "llvm/ADT/STLExtras.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/ADT/TinyPtrVector.h" 25 #include "llvm/ADT/Twine.h" 26 #include "llvm/Analysis/AssumptionCache.h" 27 #include "llvm/Analysis/InstructionSimplify.h" 28 #include "llvm/Analysis/IteratedDominanceFrontier.h" 29 #include "llvm/Transforms/Utils/Local.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/IR/BasicBlock.h" 32 #include "llvm/IR/CFG.h" 33 #include "llvm/IR/Constant.h" 34 #include "llvm/IR/Constants.h" 35 #include "llvm/IR/DIBuilder.h" 36 #include "llvm/IR/DerivedTypes.h" 37 #include "llvm/IR/Dominators.h" 38 #include "llvm/IR/Function.h" 39 #include "llvm/IR/InstrTypes.h" 40 #include "llvm/IR/Instruction.h" 41 #include "llvm/IR/Instructions.h" 42 #include "llvm/IR/IntrinsicInst.h" 43 #include "llvm/IR/Intrinsics.h" 44 #include "llvm/IR/LLVMContext.h" 45 #include "llvm/IR/Module.h" 46 #include "llvm/IR/Type.h" 47 #include "llvm/IR/User.h" 48 #include "llvm/Support/Casting.h" 49 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 50 #include <algorithm> 51 #include <cassert> 52 #include <iterator> 53 #include <utility> 54 #include <vector> 55 56 using namespace llvm; 57 58 #define DEBUG_TYPE "mem2reg" 59 60 STATISTIC(NumLocalPromoted, "Number of alloca's promoted within one block"); 61 STATISTIC(NumSingleStore, "Number of alloca's promoted with a single store"); 62 STATISTIC(NumDeadAlloca, "Number of dead alloca's removed"); 63 STATISTIC(NumPHIInsert, "Number of PHI nodes inserted"); 64 65 bool llvm::isAllocaPromotable(const AllocaInst *AI) { 66 // FIXME: If the memory unit is of pointer or integer type, we can permit 67 // assignments to subsections of the memory unit. 68 unsigned AS = AI->getType()->getAddressSpace(); 69 70 // Only allow direct and non-volatile loads and stores... 71 for (const User *U : AI->users()) { 72 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) { 73 // Note that atomic loads can be transformed; atomic semantics do 74 // not have any meaning for a local alloca. 75 if (LI->isVolatile()) 76 return false; 77 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) { 78 if (SI->getOperand(0) == AI) 79 return false; // Don't allow a store OF the AI, only INTO the AI. 80 // Note that atomic stores can be transformed; atomic semantics do 81 // not have any meaning for a local alloca. 82 if (SI->isVolatile()) 83 return false; 84 } else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) { 85 if (!II->isLifetimeStartOrEnd()) 86 return false; 87 } else if (const BitCastInst *BCI = dyn_cast<BitCastInst>(U)) { 88 if (BCI->getType() != Type::getInt8PtrTy(U->getContext(), AS)) 89 return false; 90 if (!onlyUsedByLifetimeMarkers(BCI)) 91 return false; 92 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 93 if (GEPI->getType() != Type::getInt8PtrTy(U->getContext(), AS)) 94 return false; 95 if (!GEPI->hasAllZeroIndices()) 96 return false; 97 if (!onlyUsedByLifetimeMarkers(GEPI)) 98 return false; 99 } else { 100 return false; 101 } 102 } 103 104 return true; 105 } 106 107 namespace { 108 109 struct AllocaInfo { 110 SmallVector<BasicBlock *, 32> DefiningBlocks; 111 SmallVector<BasicBlock *, 32> UsingBlocks; 112 113 StoreInst *OnlyStore; 114 BasicBlock *OnlyBlock; 115 bool OnlyUsedInOneBlock; 116 117 Value *AllocaPointerVal; 118 TinyPtrVector<DbgVariableIntrinsic *> DbgDeclares; 119 120 void clear() { 121 DefiningBlocks.clear(); 122 UsingBlocks.clear(); 123 OnlyStore = nullptr; 124 OnlyBlock = nullptr; 125 OnlyUsedInOneBlock = true; 126 AllocaPointerVal = nullptr; 127 DbgDeclares.clear(); 128 } 129 130 /// Scan the uses of the specified alloca, filling in the AllocaInfo used 131 /// by the rest of the pass to reason about the uses of this alloca. 132 void AnalyzeAlloca(AllocaInst *AI) { 133 clear(); 134 135 // As we scan the uses of the alloca instruction, keep track of stores, 136 // and decide whether all of the loads and stores to the alloca are within 137 // the same basic block. 138 for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) { 139 Instruction *User = cast<Instruction>(*UI++); 140 141 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 142 // Remember the basic blocks which define new values for the alloca 143 DefiningBlocks.push_back(SI->getParent()); 144 AllocaPointerVal = SI->getOperand(0); 145 OnlyStore = SI; 146 } else { 147 LoadInst *LI = cast<LoadInst>(User); 148 // Otherwise it must be a load instruction, keep track of variable 149 // reads. 150 UsingBlocks.push_back(LI->getParent()); 151 AllocaPointerVal = LI; 152 } 153 154 if (OnlyUsedInOneBlock) { 155 if (!OnlyBlock) 156 OnlyBlock = User->getParent(); 157 else if (OnlyBlock != User->getParent()) 158 OnlyUsedInOneBlock = false; 159 } 160 } 161 162 DbgDeclares = FindDbgAddrUses(AI); 163 } 164 }; 165 166 /// Data package used by RenamePass(). 167 struct RenamePassData { 168 using ValVector = std::vector<Value *>; 169 using LocationVector = std::vector<DebugLoc>; 170 171 RenamePassData(BasicBlock *B, BasicBlock *P, ValVector V, LocationVector L) 172 : BB(B), Pred(P), Values(std::move(V)), Locations(std::move(L)) {} 173 174 BasicBlock *BB; 175 BasicBlock *Pred; 176 ValVector Values; 177 LocationVector Locations; 178 }; 179 180 /// This assigns and keeps a per-bb relative ordering of load/store 181 /// instructions in the block that directly load or store an alloca. 182 /// 183 /// This functionality is important because it avoids scanning large basic 184 /// blocks multiple times when promoting many allocas in the same block. 185 class LargeBlockInfo { 186 /// For each instruction that we track, keep the index of the 187 /// instruction. 188 /// 189 /// The index starts out as the number of the instruction from the start of 190 /// the block. 191 DenseMap<const Instruction *, unsigned> InstNumbers; 192 193 public: 194 195 /// This code only looks at accesses to allocas. 196 static bool isInterestingInstruction(const Instruction *I) { 197 return (isa<LoadInst>(I) && isa<AllocaInst>(I->getOperand(0))) || 198 (isa<StoreInst>(I) && isa<AllocaInst>(I->getOperand(1))); 199 } 200 201 /// Get or calculate the index of the specified instruction. 202 unsigned getInstructionIndex(const Instruction *I) { 203 assert(isInterestingInstruction(I) && 204 "Not a load/store to/from an alloca?"); 205 206 // If we already have this instruction number, return it. 207 DenseMap<const Instruction *, unsigned>::iterator It = InstNumbers.find(I); 208 if (It != InstNumbers.end()) 209 return It->second; 210 211 // Scan the whole block to get the instruction. This accumulates 212 // information for every interesting instruction in the block, in order to 213 // avoid gratuitus rescans. 214 const BasicBlock *BB = I->getParent(); 215 unsigned InstNo = 0; 216 for (const Instruction &BBI : *BB) 217 if (isInterestingInstruction(&BBI)) 218 InstNumbers[&BBI] = InstNo++; 219 It = InstNumbers.find(I); 220 221 assert(It != InstNumbers.end() && "Didn't insert instruction?"); 222 return It->second; 223 } 224 225 void deleteValue(const Instruction *I) { InstNumbers.erase(I); } 226 227 void clear() { InstNumbers.clear(); } 228 }; 229 230 struct PromoteMem2Reg { 231 /// The alloca instructions being promoted. 232 std::vector<AllocaInst *> Allocas; 233 234 DominatorTree &DT; 235 DIBuilder DIB; 236 237 /// A cache of @llvm.assume intrinsics used by SimplifyInstruction. 238 AssumptionCache *AC; 239 240 const SimplifyQuery SQ; 241 242 /// Reverse mapping of Allocas. 243 DenseMap<AllocaInst *, unsigned> AllocaLookup; 244 245 /// The PhiNodes we're adding. 246 /// 247 /// That map is used to simplify some Phi nodes as we iterate over it, so 248 /// it should have deterministic iterators. We could use a MapVector, but 249 /// since we already maintain a map from BasicBlock* to a stable numbering 250 /// (BBNumbers), the DenseMap is more efficient (also supports removal). 251 DenseMap<std::pair<unsigned, unsigned>, PHINode *> NewPhiNodes; 252 253 /// For each PHI node, keep track of which entry in Allocas it corresponds 254 /// to. 255 DenseMap<PHINode *, unsigned> PhiToAllocaMap; 256 257 /// If we are updating an AliasSetTracker, then for each alloca that is of 258 /// pointer type, we keep track of what to copyValue to the inserted PHI 259 /// nodes here. 260 std::vector<Value *> PointerAllocaValues; 261 262 /// For each alloca, we keep track of the dbg.declare intrinsic that 263 /// describes it, if any, so that we can convert it to a dbg.value 264 /// intrinsic if the alloca gets promoted. 265 SmallVector<TinyPtrVector<DbgVariableIntrinsic *>, 8> AllocaDbgDeclares; 266 267 /// The set of basic blocks the renamer has already visited. 268 SmallPtrSet<BasicBlock *, 16> Visited; 269 270 /// Contains a stable numbering of basic blocks to avoid non-determinstic 271 /// behavior. 272 DenseMap<BasicBlock *, unsigned> BBNumbers; 273 274 /// Lazily compute the number of predecessors a block has. 275 DenseMap<const BasicBlock *, unsigned> BBNumPreds; 276 277 public: 278 PromoteMem2Reg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT, 279 AssumptionCache *AC) 280 : Allocas(Allocas.begin(), Allocas.end()), DT(DT), 281 DIB(*DT.getRoot()->getParent()->getParent(), /*AllowUnresolved*/ false), 282 AC(AC), SQ(DT.getRoot()->getParent()->getParent()->getDataLayout(), 283 nullptr, &DT, AC) {} 284 285 void run(); 286 287 private: 288 void RemoveFromAllocasList(unsigned &AllocaIdx) { 289 Allocas[AllocaIdx] = Allocas.back(); 290 Allocas.pop_back(); 291 --AllocaIdx; 292 } 293 294 unsigned getNumPreds(const BasicBlock *BB) { 295 unsigned &NP = BBNumPreds[BB]; 296 if (NP == 0) 297 NP = pred_size(BB) + 1; 298 return NP - 1; 299 } 300 301 void ComputeLiveInBlocks(AllocaInst *AI, AllocaInfo &Info, 302 const SmallPtrSetImpl<BasicBlock *> &DefBlocks, 303 SmallPtrSetImpl<BasicBlock *> &LiveInBlocks); 304 void RenamePass(BasicBlock *BB, BasicBlock *Pred, 305 RenamePassData::ValVector &IncVals, 306 RenamePassData::LocationVector &IncLocs, 307 std::vector<RenamePassData> &Worklist); 308 bool QueuePhiNode(BasicBlock *BB, unsigned AllocaIdx, unsigned &Version); 309 }; 310 311 } // end anonymous namespace 312 313 /// Given a LoadInst LI this adds assume(LI != null) after it. 314 static void addAssumeNonNull(AssumptionCache *AC, LoadInst *LI) { 315 Function *AssumeIntrinsic = 316 Intrinsic::getDeclaration(LI->getModule(), Intrinsic::assume); 317 ICmpInst *LoadNotNull = new ICmpInst(ICmpInst::ICMP_NE, LI, 318 Constant::getNullValue(LI->getType())); 319 LoadNotNull->insertAfter(LI); 320 CallInst *CI = CallInst::Create(AssumeIntrinsic, {LoadNotNull}); 321 CI->insertAfter(LoadNotNull); 322 AC->registerAssumption(CI); 323 } 324 325 static void removeLifetimeIntrinsicUsers(AllocaInst *AI) { 326 // Knowing that this alloca is promotable, we know that it's safe to kill all 327 // instructions except for load and store. 328 329 for (auto UI = AI->user_begin(), UE = AI->user_end(); UI != UE;) { 330 Instruction *I = cast<Instruction>(*UI); 331 ++UI; 332 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 333 continue; 334 335 if (!I->getType()->isVoidTy()) { 336 // The only users of this bitcast/GEP instruction are lifetime intrinsics. 337 // Follow the use/def chain to erase them now instead of leaving it for 338 // dead code elimination later. 339 for (auto UUI = I->user_begin(), UUE = I->user_end(); UUI != UUE;) { 340 Instruction *Inst = cast<Instruction>(*UUI); 341 ++UUI; 342 Inst->eraseFromParent(); 343 } 344 } 345 I->eraseFromParent(); 346 } 347 } 348 349 /// Rewrite as many loads as possible given a single store. 350 /// 351 /// When there is only a single store, we can use the domtree to trivially 352 /// replace all of the dominated loads with the stored value. Do so, and return 353 /// true if this has successfully promoted the alloca entirely. If this returns 354 /// false there were some loads which were not dominated by the single store 355 /// and thus must be phi-ed with undef. We fall back to the standard alloca 356 /// promotion algorithm in that case. 357 static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info, 358 LargeBlockInfo &LBI, const DataLayout &DL, 359 DominatorTree &DT, AssumptionCache *AC) { 360 StoreInst *OnlyStore = Info.OnlyStore; 361 bool StoringGlobalVal = !isa<Instruction>(OnlyStore->getOperand(0)); 362 BasicBlock *StoreBB = OnlyStore->getParent(); 363 int StoreIndex = -1; 364 365 // Clear out UsingBlocks. We will reconstruct it here if needed. 366 Info.UsingBlocks.clear(); 367 368 for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) { 369 Instruction *UserInst = cast<Instruction>(*UI++); 370 if (!isa<LoadInst>(UserInst)) { 371 assert(UserInst == OnlyStore && "Should only have load/stores"); 372 continue; 373 } 374 LoadInst *LI = cast<LoadInst>(UserInst); 375 376 // Okay, if we have a load from the alloca, we want to replace it with the 377 // only value stored to the alloca. We can do this if the value is 378 // dominated by the store. If not, we use the rest of the mem2reg machinery 379 // to insert the phi nodes as needed. 380 if (!StoringGlobalVal) { // Non-instructions are always dominated. 381 if (LI->getParent() == StoreBB) { 382 // If we have a use that is in the same block as the store, compare the 383 // indices of the two instructions to see which one came first. If the 384 // load came before the store, we can't handle it. 385 if (StoreIndex == -1) 386 StoreIndex = LBI.getInstructionIndex(OnlyStore); 387 388 if (unsigned(StoreIndex) > LBI.getInstructionIndex(LI)) { 389 // Can't handle this load, bail out. 390 Info.UsingBlocks.push_back(StoreBB); 391 continue; 392 } 393 } else if (LI->getParent() != StoreBB && 394 !DT.dominates(StoreBB, LI->getParent())) { 395 // If the load and store are in different blocks, use BB dominance to 396 // check their relationships. If the store doesn't dom the use, bail 397 // out. 398 Info.UsingBlocks.push_back(LI->getParent()); 399 continue; 400 } 401 } 402 403 // Otherwise, we *can* safely rewrite this load. 404 Value *ReplVal = OnlyStore->getOperand(0); 405 // If the replacement value is the load, this must occur in unreachable 406 // code. 407 if (ReplVal == LI) 408 ReplVal = UndefValue::get(LI->getType()); 409 410 // If the load was marked as nonnull we don't want to lose 411 // that information when we erase this Load. So we preserve 412 // it with an assume. 413 if (AC && LI->getMetadata(LLVMContext::MD_nonnull) && 414 !isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT)) 415 addAssumeNonNull(AC, LI); 416 417 LI->replaceAllUsesWith(ReplVal); 418 LI->eraseFromParent(); 419 LBI.deleteValue(LI); 420 } 421 422 // Finally, after the scan, check to see if the store is all that is left. 423 if (!Info.UsingBlocks.empty()) 424 return false; // If not, we'll have to fall back for the remainder. 425 426 // Record debuginfo for the store and remove the declaration's 427 // debuginfo. 428 for (DbgVariableIntrinsic *DII : Info.DbgDeclares) { 429 DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false); 430 ConvertDebugDeclareToDebugValue(DII, Info.OnlyStore, DIB); 431 DII->eraseFromParent(); 432 LBI.deleteValue(DII); 433 } 434 // Remove the (now dead) store and alloca. 435 Info.OnlyStore->eraseFromParent(); 436 LBI.deleteValue(Info.OnlyStore); 437 438 AI->eraseFromParent(); 439 LBI.deleteValue(AI); 440 return true; 441 } 442 443 /// Many allocas are only used within a single basic block. If this is the 444 /// case, avoid traversing the CFG and inserting a lot of potentially useless 445 /// PHI nodes by just performing a single linear pass over the basic block 446 /// using the Alloca. 447 /// 448 /// If we cannot promote this alloca (because it is read before it is written), 449 /// return false. This is necessary in cases where, due to control flow, the 450 /// alloca is undefined only on some control flow paths. e.g. code like 451 /// this is correct in LLVM IR: 452 /// // A is an alloca with no stores so far 453 /// for (...) { 454 /// int t = *A; 455 /// if (!first_iteration) 456 /// use(t); 457 /// *A = 42; 458 /// } 459 static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info, 460 LargeBlockInfo &LBI, 461 const DataLayout &DL, 462 DominatorTree &DT, 463 AssumptionCache *AC) { 464 // The trickiest case to handle is when we have large blocks. Because of this, 465 // this code is optimized assuming that large blocks happen. This does not 466 // significantly pessimize the small block case. This uses LargeBlockInfo to 467 // make it efficient to get the index of various operations in the block. 468 469 // Walk the use-def list of the alloca, getting the locations of all stores. 470 using StoresByIndexTy = SmallVector<std::pair<unsigned, StoreInst *>, 64>; 471 StoresByIndexTy StoresByIndex; 472 473 for (User *U : AI->users()) 474 if (StoreInst *SI = dyn_cast<StoreInst>(U)) 475 StoresByIndex.push_back(std::make_pair(LBI.getInstructionIndex(SI), SI)); 476 477 // Sort the stores by their index, making it efficient to do a lookup with a 478 // binary search. 479 llvm::sort(StoresByIndex, less_first()); 480 481 // Walk all of the loads from this alloca, replacing them with the nearest 482 // store above them, if any. 483 for (auto UI = AI->user_begin(), E = AI->user_end(); UI != E;) { 484 LoadInst *LI = dyn_cast<LoadInst>(*UI++); 485 if (!LI) 486 continue; 487 488 unsigned LoadIdx = LBI.getInstructionIndex(LI); 489 490 // Find the nearest store that has a lower index than this load. 491 StoresByIndexTy::iterator I = 492 std::lower_bound(StoresByIndex.begin(), StoresByIndex.end(), 493 std::make_pair(LoadIdx, 494 static_cast<StoreInst *>(nullptr)), 495 less_first()); 496 if (I == StoresByIndex.begin()) { 497 if (StoresByIndex.empty()) 498 // If there are no stores, the load takes the undef value. 499 LI->replaceAllUsesWith(UndefValue::get(LI->getType())); 500 else 501 // There is no store before this load, bail out (load may be affected 502 // by the following stores - see main comment). 503 return false; 504 } else { 505 // Otherwise, there was a store before this load, the load takes its value. 506 // Note, if the load was marked as nonnull we don't want to lose that 507 // information when we erase it. So we preserve it with an assume. 508 Value *ReplVal = std::prev(I)->second->getOperand(0); 509 if (AC && LI->getMetadata(LLVMContext::MD_nonnull) && 510 !isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT)) 511 addAssumeNonNull(AC, LI); 512 513 // If the replacement value is the load, this must occur in unreachable 514 // code. 515 if (ReplVal == LI) 516 ReplVal = UndefValue::get(LI->getType()); 517 518 LI->replaceAllUsesWith(ReplVal); 519 } 520 521 LI->eraseFromParent(); 522 LBI.deleteValue(LI); 523 } 524 525 // Remove the (now dead) stores and alloca. 526 while (!AI->use_empty()) { 527 StoreInst *SI = cast<StoreInst>(AI->user_back()); 528 // Record debuginfo for the store before removing it. 529 for (DbgVariableIntrinsic *DII : Info.DbgDeclares) { 530 DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false); 531 ConvertDebugDeclareToDebugValue(DII, SI, DIB); 532 } 533 SI->eraseFromParent(); 534 LBI.deleteValue(SI); 535 } 536 537 AI->eraseFromParent(); 538 LBI.deleteValue(AI); 539 540 // The alloca's debuginfo can be removed as well. 541 for (DbgVariableIntrinsic *DII : Info.DbgDeclares) { 542 DII->eraseFromParent(); 543 LBI.deleteValue(DII); 544 } 545 546 ++NumLocalPromoted; 547 return true; 548 } 549 550 void PromoteMem2Reg::run() { 551 Function &F = *DT.getRoot()->getParent(); 552 553 AllocaDbgDeclares.resize(Allocas.size()); 554 555 AllocaInfo Info; 556 LargeBlockInfo LBI; 557 ForwardIDFCalculator IDF(DT); 558 559 for (unsigned AllocaNum = 0; AllocaNum != Allocas.size(); ++AllocaNum) { 560 AllocaInst *AI = Allocas[AllocaNum]; 561 562 assert(isAllocaPromotable(AI) && "Cannot promote non-promotable alloca!"); 563 assert(AI->getParent()->getParent() == &F && 564 "All allocas should be in the same function, which is same as DF!"); 565 566 removeLifetimeIntrinsicUsers(AI); 567 568 if (AI->use_empty()) { 569 // If there are no uses of the alloca, just delete it now. 570 AI->eraseFromParent(); 571 572 // Remove the alloca from the Allocas list, since it has been processed 573 RemoveFromAllocasList(AllocaNum); 574 ++NumDeadAlloca; 575 continue; 576 } 577 578 // Calculate the set of read and write-locations for each alloca. This is 579 // analogous to finding the 'uses' and 'definitions' of each variable. 580 Info.AnalyzeAlloca(AI); 581 582 // If there is only a single store to this value, replace any loads of 583 // it that are directly dominated by the definition with the value stored. 584 if (Info.DefiningBlocks.size() == 1) { 585 if (rewriteSingleStoreAlloca(AI, Info, LBI, SQ.DL, DT, AC)) { 586 // The alloca has been processed, move on. 587 RemoveFromAllocasList(AllocaNum); 588 ++NumSingleStore; 589 continue; 590 } 591 } 592 593 // If the alloca is only read and written in one basic block, just perform a 594 // linear sweep over the block to eliminate it. 595 if (Info.OnlyUsedInOneBlock && 596 promoteSingleBlockAlloca(AI, Info, LBI, SQ.DL, DT, AC)) { 597 // The alloca has been processed, move on. 598 RemoveFromAllocasList(AllocaNum); 599 continue; 600 } 601 602 // If we haven't computed a numbering for the BB's in the function, do so 603 // now. 604 if (BBNumbers.empty()) { 605 unsigned ID = 0; 606 for (auto &BB : F) 607 BBNumbers[&BB] = ID++; 608 } 609 610 // Remember the dbg.declare intrinsic describing this alloca, if any. 611 if (!Info.DbgDeclares.empty()) 612 AllocaDbgDeclares[AllocaNum] = Info.DbgDeclares; 613 614 // Keep the reverse mapping of the 'Allocas' array for the rename pass. 615 AllocaLookup[Allocas[AllocaNum]] = AllocaNum; 616 617 // At this point, we're committed to promoting the alloca using IDF's, and 618 // the standard SSA construction algorithm. Determine which blocks need PHI 619 // nodes and see if we can optimize out some work by avoiding insertion of 620 // dead phi nodes. 621 622 // Unique the set of defining blocks for efficient lookup. 623 SmallPtrSet<BasicBlock *, 32> DefBlocks; 624 DefBlocks.insert(Info.DefiningBlocks.begin(), Info.DefiningBlocks.end()); 625 626 // Determine which blocks the value is live in. These are blocks which lead 627 // to uses. 628 SmallPtrSet<BasicBlock *, 32> LiveInBlocks; 629 ComputeLiveInBlocks(AI, Info, DefBlocks, LiveInBlocks); 630 631 // At this point, we're committed to promoting the alloca using IDF's, and 632 // the standard SSA construction algorithm. Determine which blocks need phi 633 // nodes and see if we can optimize out some work by avoiding insertion of 634 // dead phi nodes. 635 IDF.setLiveInBlocks(LiveInBlocks); 636 IDF.setDefiningBlocks(DefBlocks); 637 SmallVector<BasicBlock *, 32> PHIBlocks; 638 IDF.calculate(PHIBlocks); 639 if (PHIBlocks.size() > 1) 640 llvm::sort(PHIBlocks, [this](BasicBlock *A, BasicBlock *B) { 641 return BBNumbers.lookup(A) < BBNumbers.lookup(B); 642 }); 643 644 unsigned CurrentVersion = 0; 645 for (BasicBlock *BB : PHIBlocks) 646 QueuePhiNode(BB, AllocaNum, CurrentVersion); 647 } 648 649 if (Allocas.empty()) 650 return; // All of the allocas must have been trivial! 651 652 LBI.clear(); 653 654 // Set the incoming values for the basic block to be null values for all of 655 // the alloca's. We do this in case there is a load of a value that has not 656 // been stored yet. In this case, it will get this null value. 657 RenamePassData::ValVector Values(Allocas.size()); 658 for (unsigned i = 0, e = Allocas.size(); i != e; ++i) 659 Values[i] = UndefValue::get(Allocas[i]->getAllocatedType()); 660 661 // When handling debug info, treat all incoming values as if they have unknown 662 // locations until proven otherwise. 663 RenamePassData::LocationVector Locations(Allocas.size()); 664 665 // Walks all basic blocks in the function performing the SSA rename algorithm 666 // and inserting the phi nodes we marked as necessary 667 std::vector<RenamePassData> RenamePassWorkList; 668 RenamePassWorkList.emplace_back(&F.front(), nullptr, std::move(Values), 669 std::move(Locations)); 670 do { 671 RenamePassData RPD = std::move(RenamePassWorkList.back()); 672 RenamePassWorkList.pop_back(); 673 // RenamePass may add new worklist entries. 674 RenamePass(RPD.BB, RPD.Pred, RPD.Values, RPD.Locations, RenamePassWorkList); 675 } while (!RenamePassWorkList.empty()); 676 677 // The renamer uses the Visited set to avoid infinite loops. Clear it now. 678 Visited.clear(); 679 680 // Remove the allocas themselves from the function. 681 for (Instruction *A : Allocas) { 682 // If there are any uses of the alloca instructions left, they must be in 683 // unreachable basic blocks that were not processed by walking the dominator 684 // tree. Just delete the users now. 685 if (!A->use_empty()) 686 A->replaceAllUsesWith(UndefValue::get(A->getType())); 687 A->eraseFromParent(); 688 } 689 690 // Remove alloca's dbg.declare instrinsics from the function. 691 for (auto &Declares : AllocaDbgDeclares) 692 for (auto *DII : Declares) 693 DII->eraseFromParent(); 694 695 // Loop over all of the PHI nodes and see if there are any that we can get 696 // rid of because they merge all of the same incoming values. This can 697 // happen due to undef values coming into the PHI nodes. This process is 698 // iterative, because eliminating one PHI node can cause others to be removed. 699 bool EliminatedAPHI = true; 700 while (EliminatedAPHI) { 701 EliminatedAPHI = false; 702 703 // Iterating over NewPhiNodes is deterministic, so it is safe to try to 704 // simplify and RAUW them as we go. If it was not, we could add uses to 705 // the values we replace with in a non-deterministic order, thus creating 706 // non-deterministic def->use chains. 707 for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator 708 I = NewPhiNodes.begin(), 709 E = NewPhiNodes.end(); 710 I != E;) { 711 PHINode *PN = I->second; 712 713 // If this PHI node merges one value and/or undefs, get the value. 714 if (Value *V = SimplifyInstruction(PN, SQ)) { 715 PN->replaceAllUsesWith(V); 716 PN->eraseFromParent(); 717 NewPhiNodes.erase(I++); 718 EliminatedAPHI = true; 719 continue; 720 } 721 ++I; 722 } 723 } 724 725 // At this point, the renamer has added entries to PHI nodes for all reachable 726 // code. Unfortunately, there may be unreachable blocks which the renamer 727 // hasn't traversed. If this is the case, the PHI nodes may not 728 // have incoming values for all predecessors. Loop over all PHI nodes we have 729 // created, inserting undef values if they are missing any incoming values. 730 for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator 731 I = NewPhiNodes.begin(), 732 E = NewPhiNodes.end(); 733 I != E; ++I) { 734 // We want to do this once per basic block. As such, only process a block 735 // when we find the PHI that is the first entry in the block. 736 PHINode *SomePHI = I->second; 737 BasicBlock *BB = SomePHI->getParent(); 738 if (&BB->front() != SomePHI) 739 continue; 740 741 // Only do work here if there the PHI nodes are missing incoming values. We 742 // know that all PHI nodes that were inserted in a block will have the same 743 // number of incoming values, so we can just check any of them. 744 if (SomePHI->getNumIncomingValues() == getNumPreds(BB)) 745 continue; 746 747 // Get the preds for BB. 748 SmallVector<BasicBlock *, 16> Preds(pred_begin(BB), pred_end(BB)); 749 750 // Ok, now we know that all of the PHI nodes are missing entries for some 751 // basic blocks. Start by sorting the incoming predecessors for efficient 752 // access. 753 auto CompareBBNumbers = [this](BasicBlock *A, BasicBlock *B) { 754 return BBNumbers.lookup(A) < BBNumbers.lookup(B); 755 }; 756 llvm::sort(Preds, CompareBBNumbers); 757 758 // Now we loop through all BB's which have entries in SomePHI and remove 759 // them from the Preds list. 760 for (unsigned i = 0, e = SomePHI->getNumIncomingValues(); i != e; ++i) { 761 // Do a log(n) search of the Preds list for the entry we want. 762 SmallVectorImpl<BasicBlock *>::iterator EntIt = std::lower_bound( 763 Preds.begin(), Preds.end(), SomePHI->getIncomingBlock(i), 764 CompareBBNumbers); 765 assert(EntIt != Preds.end() && *EntIt == SomePHI->getIncomingBlock(i) && 766 "PHI node has entry for a block which is not a predecessor!"); 767 768 // Remove the entry 769 Preds.erase(EntIt); 770 } 771 772 // At this point, the blocks left in the preds list must have dummy 773 // entries inserted into every PHI nodes for the block. Update all the phi 774 // nodes in this block that we are inserting (there could be phis before 775 // mem2reg runs). 776 unsigned NumBadPreds = SomePHI->getNumIncomingValues(); 777 BasicBlock::iterator BBI = BB->begin(); 778 while ((SomePHI = dyn_cast<PHINode>(BBI++)) && 779 SomePHI->getNumIncomingValues() == NumBadPreds) { 780 Value *UndefVal = UndefValue::get(SomePHI->getType()); 781 for (BasicBlock *Pred : Preds) 782 SomePHI->addIncoming(UndefVal, Pred); 783 } 784 } 785 786 NewPhiNodes.clear(); 787 } 788 789 /// Determine which blocks the value is live in. 790 /// 791 /// These are blocks which lead to uses. Knowing this allows us to avoid 792 /// inserting PHI nodes into blocks which don't lead to uses (thus, the 793 /// inserted phi nodes would be dead). 794 void PromoteMem2Reg::ComputeLiveInBlocks( 795 AllocaInst *AI, AllocaInfo &Info, 796 const SmallPtrSetImpl<BasicBlock *> &DefBlocks, 797 SmallPtrSetImpl<BasicBlock *> &LiveInBlocks) { 798 // To determine liveness, we must iterate through the predecessors of blocks 799 // where the def is live. Blocks are added to the worklist if we need to 800 // check their predecessors. Start with all the using blocks. 801 SmallVector<BasicBlock *, 64> LiveInBlockWorklist(Info.UsingBlocks.begin(), 802 Info.UsingBlocks.end()); 803 804 // If any of the using blocks is also a definition block, check to see if the 805 // definition occurs before or after the use. If it happens before the use, 806 // the value isn't really live-in. 807 for (unsigned i = 0, e = LiveInBlockWorklist.size(); i != e; ++i) { 808 BasicBlock *BB = LiveInBlockWorklist[i]; 809 if (!DefBlocks.count(BB)) 810 continue; 811 812 // Okay, this is a block that both uses and defines the value. If the first 813 // reference to the alloca is a def (store), then we know it isn't live-in. 814 for (BasicBlock::iterator I = BB->begin();; ++I) { 815 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 816 if (SI->getOperand(1) != AI) 817 continue; 818 819 // We found a store to the alloca before a load. The alloca is not 820 // actually live-in here. 821 LiveInBlockWorklist[i] = LiveInBlockWorklist.back(); 822 LiveInBlockWorklist.pop_back(); 823 --i; 824 --e; 825 break; 826 } 827 828 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 829 if (LI->getOperand(0) != AI) 830 continue; 831 832 // Okay, we found a load before a store to the alloca. It is actually 833 // live into this block. 834 break; 835 } 836 } 837 } 838 839 // Now that we have a set of blocks where the phi is live-in, recursively add 840 // their predecessors until we find the full region the value is live. 841 while (!LiveInBlockWorklist.empty()) { 842 BasicBlock *BB = LiveInBlockWorklist.pop_back_val(); 843 844 // The block really is live in here, insert it into the set. If already in 845 // the set, then it has already been processed. 846 if (!LiveInBlocks.insert(BB).second) 847 continue; 848 849 // Since the value is live into BB, it is either defined in a predecessor or 850 // live into it to. Add the preds to the worklist unless they are a 851 // defining block. 852 for (BasicBlock *P : predecessors(BB)) { 853 // The value is not live into a predecessor if it defines the value. 854 if (DefBlocks.count(P)) 855 continue; 856 857 // Otherwise it is, add to the worklist. 858 LiveInBlockWorklist.push_back(P); 859 } 860 } 861 } 862 863 /// Queue a phi-node to be added to a basic-block for a specific Alloca. 864 /// 865 /// Returns true if there wasn't already a phi-node for that variable 866 bool PromoteMem2Reg::QueuePhiNode(BasicBlock *BB, unsigned AllocaNo, 867 unsigned &Version) { 868 // Look up the basic-block in question. 869 PHINode *&PN = NewPhiNodes[std::make_pair(BBNumbers[BB], AllocaNo)]; 870 871 // If the BB already has a phi node added for the i'th alloca then we're done! 872 if (PN) 873 return false; 874 875 // Create a PhiNode using the dereferenced type... and add the phi-node to the 876 // BasicBlock. 877 PN = PHINode::Create(Allocas[AllocaNo]->getAllocatedType(), getNumPreds(BB), 878 Allocas[AllocaNo]->getName() + "." + Twine(Version++), 879 &BB->front()); 880 ++NumPHIInsert; 881 PhiToAllocaMap[PN] = AllocaNo; 882 return true; 883 } 884 885 /// Update the debug location of a phi. \p ApplyMergedLoc indicates whether to 886 /// create a merged location incorporating \p DL, or to set \p DL directly. 887 static void updateForIncomingValueLocation(PHINode *PN, DebugLoc DL, 888 bool ApplyMergedLoc) { 889 if (ApplyMergedLoc) 890 PN->applyMergedLocation(PN->getDebugLoc(), DL); 891 else 892 PN->setDebugLoc(DL); 893 } 894 895 /// Recursively traverse the CFG of the function, renaming loads and 896 /// stores to the allocas which we are promoting. 897 /// 898 /// IncomingVals indicates what value each Alloca contains on exit from the 899 /// predecessor block Pred. 900 void PromoteMem2Reg::RenamePass(BasicBlock *BB, BasicBlock *Pred, 901 RenamePassData::ValVector &IncomingVals, 902 RenamePassData::LocationVector &IncomingLocs, 903 std::vector<RenamePassData> &Worklist) { 904 NextIteration: 905 // If we are inserting any phi nodes into this BB, they will already be in the 906 // block. 907 if (PHINode *APN = dyn_cast<PHINode>(BB->begin())) { 908 // If we have PHI nodes to update, compute the number of edges from Pred to 909 // BB. 910 if (PhiToAllocaMap.count(APN)) { 911 // We want to be able to distinguish between PHI nodes being inserted by 912 // this invocation of mem2reg from those phi nodes that already existed in 913 // the IR before mem2reg was run. We determine that APN is being inserted 914 // because it is missing incoming edges. All other PHI nodes being 915 // inserted by this pass of mem2reg will have the same number of incoming 916 // operands so far. Remember this count. 917 unsigned NewPHINumOperands = APN->getNumOperands(); 918 919 unsigned NumEdges = std::count(succ_begin(Pred), succ_end(Pred), BB); 920 assert(NumEdges && "Must be at least one edge from Pred to BB!"); 921 922 // Add entries for all the phis. 923 BasicBlock::iterator PNI = BB->begin(); 924 do { 925 unsigned AllocaNo = PhiToAllocaMap[APN]; 926 927 // Update the location of the phi node. 928 updateForIncomingValueLocation(APN, IncomingLocs[AllocaNo], 929 APN->getNumIncomingValues() > 0); 930 931 // Add N incoming values to the PHI node. 932 for (unsigned i = 0; i != NumEdges; ++i) 933 APN->addIncoming(IncomingVals[AllocaNo], Pred); 934 935 // The currently active variable for this block is now the PHI. 936 IncomingVals[AllocaNo] = APN; 937 for (DbgVariableIntrinsic *DII : AllocaDbgDeclares[AllocaNo]) 938 ConvertDebugDeclareToDebugValue(DII, APN, DIB); 939 940 // Get the next phi node. 941 ++PNI; 942 APN = dyn_cast<PHINode>(PNI); 943 if (!APN) 944 break; 945 946 // Verify that it is missing entries. If not, it is not being inserted 947 // by this mem2reg invocation so we want to ignore it. 948 } while (APN->getNumOperands() == NewPHINumOperands); 949 } 950 } 951 952 // Don't revisit blocks. 953 if (!Visited.insert(BB).second) 954 return; 955 956 for (BasicBlock::iterator II = BB->begin(); !II->isTerminator();) { 957 Instruction *I = &*II++; // get the instruction, increment iterator 958 959 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 960 AllocaInst *Src = dyn_cast<AllocaInst>(LI->getPointerOperand()); 961 if (!Src) 962 continue; 963 964 DenseMap<AllocaInst *, unsigned>::iterator AI = AllocaLookup.find(Src); 965 if (AI == AllocaLookup.end()) 966 continue; 967 968 Value *V = IncomingVals[AI->second]; 969 970 // If the load was marked as nonnull we don't want to lose 971 // that information when we erase this Load. So we preserve 972 // it with an assume. 973 if (AC && LI->getMetadata(LLVMContext::MD_nonnull) && 974 !isKnownNonZero(V, SQ.DL, 0, AC, LI, &DT)) 975 addAssumeNonNull(AC, LI); 976 977 // Anything using the load now uses the current value. 978 LI->replaceAllUsesWith(V); 979 BB->getInstList().erase(LI); 980 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 981 // Delete this instruction and mark the name as the current holder of the 982 // value 983 AllocaInst *Dest = dyn_cast<AllocaInst>(SI->getPointerOperand()); 984 if (!Dest) 985 continue; 986 987 DenseMap<AllocaInst *, unsigned>::iterator ai = AllocaLookup.find(Dest); 988 if (ai == AllocaLookup.end()) 989 continue; 990 991 // what value were we writing? 992 unsigned AllocaNo = ai->second; 993 IncomingVals[AllocaNo] = SI->getOperand(0); 994 995 // Record debuginfo for the store before removing it. 996 IncomingLocs[AllocaNo] = SI->getDebugLoc(); 997 for (DbgVariableIntrinsic *DII : AllocaDbgDeclares[ai->second]) 998 ConvertDebugDeclareToDebugValue(DII, SI, DIB); 999 BB->getInstList().erase(SI); 1000 } 1001 } 1002 1003 // 'Recurse' to our successors. 1004 succ_iterator I = succ_begin(BB), E = succ_end(BB); 1005 if (I == E) 1006 return; 1007 1008 // Keep track of the successors so we don't visit the same successor twice 1009 SmallPtrSet<BasicBlock *, 8> VisitedSuccs; 1010 1011 // Handle the first successor without using the worklist. 1012 VisitedSuccs.insert(*I); 1013 Pred = BB; 1014 BB = *I; 1015 ++I; 1016 1017 for (; I != E; ++I) 1018 if (VisitedSuccs.insert(*I).second) 1019 Worklist.emplace_back(*I, Pred, IncomingVals, IncomingLocs); 1020 1021 goto NextIteration; 1022 } 1023 1024 void llvm::PromoteMemToReg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT, 1025 AssumptionCache *AC) { 1026 // If there is nothing to do, bail out... 1027 if (Allocas.empty()) 1028 return; 1029 1030 PromoteMem2Reg(Allocas, DT, AC).run(); 1031 } 1032