1 //===-- Local.cpp - Functions to perform local transformations ------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This family of functions perform various local transformations to the 11 // program. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Utils/Local.h" 16 #include "llvm/Constants.h" 17 #include "llvm/GlobalAlias.h" 18 #include "llvm/GlobalVariable.h" 19 #include "llvm/DerivedTypes.h" 20 #include "llvm/Instructions.h" 21 #include "llvm/Intrinsics.h" 22 #include "llvm/IntrinsicInst.h" 23 #include "llvm/Metadata.h" 24 #include "llvm/Operator.h" 25 #include "llvm/ADT/DenseMap.h" 26 #include "llvm/ADT/SmallPtrSet.h" 27 #include "llvm/Analysis/DebugInfo.h" 28 #include "llvm/Analysis/DIBuilder.h" 29 #include "llvm/Analysis/Dominators.h" 30 #include "llvm/Analysis/InstructionSimplify.h" 31 #include "llvm/Analysis/MemoryBuiltins.h" 32 #include "llvm/Analysis/ProfileInfo.h" 33 #include "llvm/Analysis/ValueTracking.h" 34 #include "llvm/Target/TargetData.h" 35 #include "llvm/Support/CFG.h" 36 #include "llvm/Support/Debug.h" 37 #include "llvm/Support/GetElementPtrTypeIterator.h" 38 #include "llvm/Support/IRBuilder.h" 39 #include "llvm/Support/MathExtras.h" 40 #include "llvm/Support/ValueHandle.h" 41 #include "llvm/Support/raw_ostream.h" 42 using namespace llvm; 43 44 //===----------------------------------------------------------------------===// 45 // Local constant propagation. 46 // 47 48 /// ConstantFoldTerminator - If a terminator instruction is predicated on a 49 /// constant value, convert it into an unconditional branch to the constant 50 /// destination. This is a nontrivial operation because the successors of this 51 /// basic block must have their PHI nodes updated. 52 /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch 53 /// conditions and indirectbr addresses this might make dead if 54 /// DeleteDeadConditions is true. 55 bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions) { 56 TerminatorInst *T = BB->getTerminator(); 57 IRBuilder<> Builder(T); 58 59 // Branch - See if we are conditional jumping on constant 60 if (BranchInst *BI = dyn_cast<BranchInst>(T)) { 61 if (BI->isUnconditional()) return false; // Can't optimize uncond branch 62 BasicBlock *Dest1 = BI->getSuccessor(0); 63 BasicBlock *Dest2 = BI->getSuccessor(1); 64 65 if (ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition())) { 66 // Are we branching on constant? 67 // YES. Change to unconditional branch... 68 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2; 69 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1; 70 71 //cerr << "Function: " << T->getParent()->getParent() 72 // << "\nRemoving branch from " << T->getParent() 73 // << "\n\nTo: " << OldDest << endl; 74 75 // Let the basic block know that we are letting go of it. Based on this, 76 // it will adjust it's PHI nodes. 77 OldDest->removePredecessor(BB); 78 79 // Replace the conditional branch with an unconditional one. 80 Builder.CreateBr(Destination); 81 BI->eraseFromParent(); 82 return true; 83 } 84 85 if (Dest2 == Dest1) { // Conditional branch to same location? 86 // This branch matches something like this: 87 // br bool %cond, label %Dest, label %Dest 88 // and changes it into: br label %Dest 89 90 // Let the basic block know that we are letting go of one copy of it. 91 assert(BI->getParent() && "Terminator not inserted in block!"); 92 Dest1->removePredecessor(BI->getParent()); 93 94 // Replace the conditional branch with an unconditional one. 95 Builder.CreateBr(Dest1); 96 Value *Cond = BI->getCondition(); 97 BI->eraseFromParent(); 98 if (DeleteDeadConditions) 99 RecursivelyDeleteTriviallyDeadInstructions(Cond); 100 return true; 101 } 102 return false; 103 } 104 105 if (SwitchInst *SI = dyn_cast<SwitchInst>(T)) { 106 // If we are switching on a constant, we can convert the switch into a 107 // single branch instruction! 108 ConstantInt *CI = dyn_cast<ConstantInt>(SI->getCondition()); 109 BasicBlock *TheOnlyDest = SI->getDefaultDest(); // The default dest 110 BasicBlock *DefaultDest = TheOnlyDest; 111 112 // Figure out which case it goes to. 113 for (unsigned i = 0, e = SI->getNumCases(); i != e; ++i) { 114 // Found case matching a constant operand? 115 if (SI->getCaseValue(i) == CI) { 116 TheOnlyDest = SI->getCaseSuccessor(i); 117 break; 118 } 119 120 // Check to see if this branch is going to the same place as the default 121 // dest. If so, eliminate it as an explicit compare. 122 if (SI->getCaseSuccessor(i) == DefaultDest) { 123 // Remove this entry. 124 DefaultDest->removePredecessor(SI->getParent()); 125 SI->removeCase(i); 126 --i; --e; // Don't skip an entry... 127 continue; 128 } 129 130 // Otherwise, check to see if the switch only branches to one destination. 131 // We do this by reseting "TheOnlyDest" to null when we find two non-equal 132 // destinations. 133 if (SI->getCaseSuccessor(i) != TheOnlyDest) TheOnlyDest = 0; 134 } 135 136 if (CI && !TheOnlyDest) { 137 // Branching on a constant, but not any of the cases, go to the default 138 // successor. 139 TheOnlyDest = SI->getDefaultDest(); 140 } 141 142 // If we found a single destination that we can fold the switch into, do so 143 // now. 144 if (TheOnlyDest) { 145 // Insert the new branch. 146 Builder.CreateBr(TheOnlyDest); 147 BasicBlock *BB = SI->getParent(); 148 149 // Remove entries from PHI nodes which we no longer branch to... 150 for (unsigned i = 0, e = SI->getNumSuccessors(); i != e; ++i) { 151 // Found case matching a constant operand? 152 BasicBlock *Succ = SI->getSuccessor(i); 153 if (Succ == TheOnlyDest) 154 TheOnlyDest = 0; // Don't modify the first branch to TheOnlyDest 155 else 156 Succ->removePredecessor(BB); 157 } 158 159 // Delete the old switch. 160 Value *Cond = SI->getCondition(); 161 SI->eraseFromParent(); 162 if (DeleteDeadConditions) 163 RecursivelyDeleteTriviallyDeadInstructions(Cond); 164 return true; 165 } 166 167 if (SI->getNumCases() == 1) { 168 // Otherwise, we can fold this switch into a conditional branch 169 // instruction if it has only one non-default destination. 170 Value *Cond = Builder.CreateICmpEQ(SI->getCondition(), 171 SI->getCaseValue(0), "cond"); 172 173 // Insert the new branch. 174 Builder.CreateCondBr(Cond, SI->getCaseSuccessor(0), SI->getDefaultDest()); 175 176 // Delete the old switch. 177 SI->eraseFromParent(); 178 return true; 179 } 180 return false; 181 } 182 183 if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(T)) { 184 // indirectbr blockaddress(@F, @BB) -> br label @BB 185 if (BlockAddress *BA = 186 dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) { 187 BasicBlock *TheOnlyDest = BA->getBasicBlock(); 188 // Insert the new branch. 189 Builder.CreateBr(TheOnlyDest); 190 191 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) { 192 if (IBI->getDestination(i) == TheOnlyDest) 193 TheOnlyDest = 0; 194 else 195 IBI->getDestination(i)->removePredecessor(IBI->getParent()); 196 } 197 Value *Address = IBI->getAddress(); 198 IBI->eraseFromParent(); 199 if (DeleteDeadConditions) 200 RecursivelyDeleteTriviallyDeadInstructions(Address); 201 202 // If we didn't find our destination in the IBI successor list, then we 203 // have undefined behavior. Replace the unconditional branch with an 204 // 'unreachable' instruction. 205 if (TheOnlyDest) { 206 BB->getTerminator()->eraseFromParent(); 207 new UnreachableInst(BB->getContext(), BB); 208 } 209 210 return true; 211 } 212 } 213 214 return false; 215 } 216 217 218 //===----------------------------------------------------------------------===// 219 // Local dead code elimination. 220 // 221 222 /// isInstructionTriviallyDead - Return true if the result produced by the 223 /// instruction is not used, and the instruction has no side effects. 224 /// 225 bool llvm::isInstructionTriviallyDead(Instruction *I) { 226 if (!I->use_empty() || isa<TerminatorInst>(I)) return false; 227 228 // We don't want the landingpad instruction removed by anything this general. 229 if (isa<LandingPadInst>(I)) 230 return false; 231 232 // We don't want debug info removed by anything this general, unless 233 // debug info is empty. 234 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) { 235 if (DDI->getAddress()) 236 return false; 237 return true; 238 } 239 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) { 240 if (DVI->getValue()) 241 return false; 242 return true; 243 } 244 245 if (!I->mayHaveSideEffects()) return true; 246 247 // Special case intrinsics that "may have side effects" but can be deleted 248 // when dead. 249 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 250 // Safe to delete llvm.stacksave if dead. 251 if (II->getIntrinsicID() == Intrinsic::stacksave) 252 return true; 253 254 // Lifetime intrinsics are dead when their right-hand is undef. 255 if (II->getIntrinsicID() == Intrinsic::lifetime_start || 256 II->getIntrinsicID() == Intrinsic::lifetime_end) 257 return isa<UndefValue>(II->getArgOperand(1)); 258 } 259 260 if (extractMallocCall(I)) return true; 261 262 if (CallInst *CI = isFreeCall(I)) 263 if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0))) 264 return C->isNullValue() || isa<UndefValue>(C); 265 266 return false; 267 } 268 269 /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a 270 /// trivially dead instruction, delete it. If that makes any of its operands 271 /// trivially dead, delete them too, recursively. Return true if any 272 /// instructions were deleted. 273 bool llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V) { 274 Instruction *I = dyn_cast<Instruction>(V); 275 if (!I || !I->use_empty() || !isInstructionTriviallyDead(I)) 276 return false; 277 278 SmallVector<Instruction*, 16> DeadInsts; 279 DeadInsts.push_back(I); 280 281 do { 282 I = DeadInsts.pop_back_val(); 283 284 // Null out all of the instruction's operands to see if any operand becomes 285 // dead as we go. 286 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 287 Value *OpV = I->getOperand(i); 288 I->setOperand(i, 0); 289 290 if (!OpV->use_empty()) continue; 291 292 // If the operand is an instruction that became dead as we nulled out the 293 // operand, and if it is 'trivially' dead, delete it in a future loop 294 // iteration. 295 if (Instruction *OpI = dyn_cast<Instruction>(OpV)) 296 if (isInstructionTriviallyDead(OpI)) 297 DeadInsts.push_back(OpI); 298 } 299 300 I->eraseFromParent(); 301 } while (!DeadInsts.empty()); 302 303 return true; 304 } 305 306 /// areAllUsesEqual - Check whether the uses of a value are all the same. 307 /// This is similar to Instruction::hasOneUse() except this will also return 308 /// true when there are no uses or multiple uses that all refer to the same 309 /// value. 310 static bool areAllUsesEqual(Instruction *I) { 311 Value::use_iterator UI = I->use_begin(); 312 Value::use_iterator UE = I->use_end(); 313 if (UI == UE) 314 return true; 315 316 User *TheUse = *UI; 317 for (++UI; UI != UE; ++UI) { 318 if (*UI != TheUse) 319 return false; 320 } 321 return true; 322 } 323 324 /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively 325 /// dead PHI node, due to being a def-use chain of single-use nodes that 326 /// either forms a cycle or is terminated by a trivially dead instruction, 327 /// delete it. If that makes any of its operands trivially dead, delete them 328 /// too, recursively. Return true if a change was made. 329 bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN) { 330 SmallPtrSet<Instruction*, 4> Visited; 331 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects(); 332 I = cast<Instruction>(*I->use_begin())) { 333 if (I->use_empty()) 334 return RecursivelyDeleteTriviallyDeadInstructions(I); 335 336 // If we find an instruction more than once, we're on a cycle that 337 // won't prove fruitful. 338 if (!Visited.insert(I)) { 339 // Break the cycle and delete the instruction and its operands. 340 I->replaceAllUsesWith(UndefValue::get(I->getType())); 341 (void)RecursivelyDeleteTriviallyDeadInstructions(I); 342 return true; 343 } 344 } 345 return false; 346 } 347 348 /// SimplifyInstructionsInBlock - Scan the specified basic block and try to 349 /// simplify any instructions in it and recursively delete dead instructions. 350 /// 351 /// This returns true if it changed the code, note that it can delete 352 /// instructions in other blocks as well in this block. 353 bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD) { 354 bool MadeChange = false; 355 for (BasicBlock::iterator BI = BB->begin(), E = BB->end(); BI != E; ) { 356 Instruction *Inst = BI++; 357 358 if (Value *V = SimplifyInstruction(Inst, TD)) { 359 WeakVH BIHandle(BI); 360 ReplaceAndSimplifyAllUses(Inst, V, TD); 361 MadeChange = true; 362 if (BIHandle != BI) 363 BI = BB->begin(); 364 continue; 365 } 366 367 if (Inst->isTerminator()) 368 break; 369 370 WeakVH BIHandle(BI); 371 MadeChange |= RecursivelyDeleteTriviallyDeadInstructions(Inst); 372 if (BIHandle != BI) 373 BI = BB->begin(); 374 } 375 return MadeChange; 376 } 377 378 //===----------------------------------------------------------------------===// 379 // Control Flow Graph Restructuring. 380 // 381 382 383 /// RemovePredecessorAndSimplify - Like BasicBlock::removePredecessor, this 384 /// method is called when we're about to delete Pred as a predecessor of BB. If 385 /// BB contains any PHI nodes, this drops the entries in the PHI nodes for Pred. 386 /// 387 /// Unlike the removePredecessor method, this attempts to simplify uses of PHI 388 /// nodes that collapse into identity values. For example, if we have: 389 /// x = phi(1, 0, 0, 0) 390 /// y = and x, z 391 /// 392 /// .. and delete the predecessor corresponding to the '1', this will attempt to 393 /// recursively fold the and to 0. 394 void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred, 395 TargetData *TD) { 396 // This only adjusts blocks with PHI nodes. 397 if (!isa<PHINode>(BB->begin())) 398 return; 399 400 // Remove the entries for Pred from the PHI nodes in BB, but do not simplify 401 // them down. This will leave us with single entry phi nodes and other phis 402 // that can be removed. 403 BB->removePredecessor(Pred, true); 404 405 WeakVH PhiIt = &BB->front(); 406 while (PHINode *PN = dyn_cast<PHINode>(PhiIt)) { 407 PhiIt = &*++BasicBlock::iterator(cast<Instruction>(PhiIt)); 408 409 Value *PNV = SimplifyInstruction(PN, TD); 410 if (PNV == 0) continue; 411 412 // If we're able to simplify the phi to a single value, substitute the new 413 // value into all of its uses. 414 assert(PNV != PN && "SimplifyInstruction broken!"); 415 416 Value *OldPhiIt = PhiIt; 417 ReplaceAndSimplifyAllUses(PN, PNV, TD); 418 419 // If recursive simplification ended up deleting the next PHI node we would 420 // iterate to, then our iterator is invalid, restart scanning from the top 421 // of the block. 422 if (PhiIt != OldPhiIt) PhiIt = &BB->front(); 423 } 424 } 425 426 427 /// MergeBasicBlockIntoOnlyPred - DestBB is a block with one predecessor and its 428 /// predecessor is known to have one successor (DestBB!). Eliminate the edge 429 /// between them, moving the instructions in the predecessor into DestBB and 430 /// deleting the predecessor block. 431 /// 432 void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB, Pass *P) { 433 // If BB has single-entry PHI nodes, fold them. 434 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) { 435 Value *NewVal = PN->getIncomingValue(0); 436 // Replace self referencing PHI with undef, it must be dead. 437 if (NewVal == PN) NewVal = UndefValue::get(PN->getType()); 438 PN->replaceAllUsesWith(NewVal); 439 PN->eraseFromParent(); 440 } 441 442 BasicBlock *PredBB = DestBB->getSinglePredecessor(); 443 assert(PredBB && "Block doesn't have a single predecessor!"); 444 445 // Zap anything that took the address of DestBB. Not doing this will give the 446 // address an invalid value. 447 if (DestBB->hasAddressTaken()) { 448 BlockAddress *BA = BlockAddress::get(DestBB); 449 Constant *Replacement = 450 ConstantInt::get(llvm::Type::getInt32Ty(BA->getContext()), 1); 451 BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement, 452 BA->getType())); 453 BA->destroyConstant(); 454 } 455 456 // Anything that branched to PredBB now branches to DestBB. 457 PredBB->replaceAllUsesWith(DestBB); 458 459 // Splice all the instructions from PredBB to DestBB. 460 PredBB->getTerminator()->eraseFromParent(); 461 DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList()); 462 463 if (P) { 464 DominatorTree *DT = P->getAnalysisIfAvailable<DominatorTree>(); 465 if (DT) { 466 BasicBlock *PredBBIDom = DT->getNode(PredBB)->getIDom()->getBlock(); 467 DT->changeImmediateDominator(DestBB, PredBBIDom); 468 DT->eraseNode(PredBB); 469 } 470 ProfileInfo *PI = P->getAnalysisIfAvailable<ProfileInfo>(); 471 if (PI) { 472 PI->replaceAllUses(PredBB, DestBB); 473 PI->removeEdge(ProfileInfo::getEdge(PredBB, DestBB)); 474 } 475 } 476 // Nuke BB. 477 PredBB->eraseFromParent(); 478 } 479 480 /// CanPropagatePredecessorsForPHIs - Return true if we can fold BB, an 481 /// almost-empty BB ending in an unconditional branch to Succ, into succ. 482 /// 483 /// Assumption: Succ is the single successor for BB. 484 /// 485 static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) { 486 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!"); 487 488 DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into " 489 << Succ->getName() << "\n"); 490 // Shortcut, if there is only a single predecessor it must be BB and merging 491 // is always safe 492 if (Succ->getSinglePredecessor()) return true; 493 494 // Make a list of the predecessors of BB 495 SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB)); 496 497 // Look at all the phi nodes in Succ, to see if they present a conflict when 498 // merging these blocks 499 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) { 500 PHINode *PN = cast<PHINode>(I); 501 502 // If the incoming value from BB is again a PHINode in 503 // BB which has the same incoming value for *PI as PN does, we can 504 // merge the phi nodes and then the blocks can still be merged 505 PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB)); 506 if (BBPN && BBPN->getParent() == BB) { 507 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) { 508 BasicBlock *IBB = PN->getIncomingBlock(PI); 509 if (BBPreds.count(IBB) && 510 BBPN->getIncomingValueForBlock(IBB) != PN->getIncomingValue(PI)) { 511 DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in " 512 << Succ->getName() << " is conflicting with " 513 << BBPN->getName() << " with regard to common predecessor " 514 << IBB->getName() << "\n"); 515 return false; 516 } 517 } 518 } else { 519 Value* Val = PN->getIncomingValueForBlock(BB); 520 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) { 521 // See if the incoming value for the common predecessor is equal to the 522 // one for BB, in which case this phi node will not prevent the merging 523 // of the block. 524 BasicBlock *IBB = PN->getIncomingBlock(PI); 525 if (BBPreds.count(IBB) && Val != PN->getIncomingValue(PI)) { 526 DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in " 527 << Succ->getName() << " is conflicting with regard to common " 528 << "predecessor " << IBB->getName() << "\n"); 529 return false; 530 } 531 } 532 } 533 } 534 535 return true; 536 } 537 538 /// TryToSimplifyUncondBranchFromEmptyBlock - BB is known to contain an 539 /// unconditional branch, and contains no instructions other than PHI nodes, 540 /// potential side-effect free intrinsics and the branch. If possible, 541 /// eliminate BB by rewriting all the predecessors to branch to the successor 542 /// block and return true. If we can't transform, return false. 543 bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB) { 544 assert(BB != &BB->getParent()->getEntryBlock() && 545 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!"); 546 547 // We can't eliminate infinite loops. 548 BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0); 549 if (BB == Succ) return false; 550 551 // Check to see if merging these blocks would cause conflicts for any of the 552 // phi nodes in BB or Succ. If not, we can safely merge. 553 if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false; 554 555 // Check for cases where Succ has multiple predecessors and a PHI node in BB 556 // has uses which will not disappear when the PHI nodes are merged. It is 557 // possible to handle such cases, but difficult: it requires checking whether 558 // BB dominates Succ, which is non-trivial to calculate in the case where 559 // Succ has multiple predecessors. Also, it requires checking whether 560 // constructing the necessary self-referential PHI node doesn't intoduce any 561 // conflicts; this isn't too difficult, but the previous code for doing this 562 // was incorrect. 563 // 564 // Note that if this check finds a live use, BB dominates Succ, so BB is 565 // something like a loop pre-header (or rarely, a part of an irreducible CFG); 566 // folding the branch isn't profitable in that case anyway. 567 if (!Succ->getSinglePredecessor()) { 568 BasicBlock::iterator BBI = BB->begin(); 569 while (isa<PHINode>(*BBI)) { 570 for (Value::use_iterator UI = BBI->use_begin(), E = BBI->use_end(); 571 UI != E; ++UI) { 572 if (PHINode* PN = dyn_cast<PHINode>(*UI)) { 573 if (PN->getIncomingBlock(UI) != BB) 574 return false; 575 } else { 576 return false; 577 } 578 } 579 ++BBI; 580 } 581 } 582 583 DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB); 584 585 if (isa<PHINode>(Succ->begin())) { 586 // If there is more than one pred of succ, and there are PHI nodes in 587 // the successor, then we need to add incoming edges for the PHI nodes 588 // 589 const SmallVector<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB)); 590 591 // Loop over all of the PHI nodes in the successor of BB. 592 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) { 593 PHINode *PN = cast<PHINode>(I); 594 Value *OldVal = PN->removeIncomingValue(BB, false); 595 assert(OldVal && "No entry in PHI for Pred BB!"); 596 597 // If this incoming value is one of the PHI nodes in BB, the new entries 598 // in the PHI node are the entries from the old PHI. 599 if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) { 600 PHINode *OldValPN = cast<PHINode>(OldVal); 601 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) 602 // Note that, since we are merging phi nodes and BB and Succ might 603 // have common predecessors, we could end up with a phi node with 604 // identical incoming branches. This will be cleaned up later (and 605 // will trigger asserts if we try to clean it up now, without also 606 // simplifying the corresponding conditional branch). 607 PN->addIncoming(OldValPN->getIncomingValue(i), 608 OldValPN->getIncomingBlock(i)); 609 } else { 610 // Add an incoming value for each of the new incoming values. 611 for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) 612 PN->addIncoming(OldVal, BBPreds[i]); 613 } 614 } 615 } 616 617 if (Succ->getSinglePredecessor()) { 618 // BB is the only predecessor of Succ, so Succ will end up with exactly 619 // the same predecessors BB had. 620 621 // Copy over any phi, debug or lifetime instruction. 622 BB->getTerminator()->eraseFromParent(); 623 Succ->getInstList().splice(Succ->getFirstNonPHI(), BB->getInstList()); 624 } else { 625 while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) { 626 // We explicitly check for such uses in CanPropagatePredecessorsForPHIs. 627 assert(PN->use_empty() && "There shouldn't be any uses here!"); 628 PN->eraseFromParent(); 629 } 630 } 631 632 // Everything that jumped to BB now goes to Succ. 633 BB->replaceAllUsesWith(Succ); 634 if (!Succ->hasName()) Succ->takeName(BB); 635 BB->eraseFromParent(); // Delete the old basic block. 636 return true; 637 } 638 639 /// EliminateDuplicatePHINodes - Check for and eliminate duplicate PHI 640 /// nodes in this block. This doesn't try to be clever about PHI nodes 641 /// which differ only in the order of the incoming values, but instcombine 642 /// orders them so it usually won't matter. 643 /// 644 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) { 645 bool Changed = false; 646 647 // This implementation doesn't currently consider undef operands 648 // specially. Theoretically, two phis which are identical except for 649 // one having an undef where the other doesn't could be collapsed. 650 651 // Map from PHI hash values to PHI nodes. If multiple PHIs have 652 // the same hash value, the element is the first PHI in the 653 // linked list in CollisionMap. 654 DenseMap<uintptr_t, PHINode *> HashMap; 655 656 // Maintain linked lists of PHI nodes with common hash values. 657 DenseMap<PHINode *, PHINode *> CollisionMap; 658 659 // Examine each PHI. 660 for (BasicBlock::iterator I = BB->begin(); 661 PHINode *PN = dyn_cast<PHINode>(I++); ) { 662 // Compute a hash value on the operands. Instcombine will likely have sorted 663 // them, which helps expose duplicates, but we have to check all the 664 // operands to be safe in case instcombine hasn't run. 665 uintptr_t Hash = 0; 666 // This hash algorithm is quite weak as hash functions go, but it seems 667 // to do a good enough job for this particular purpose, and is very quick. 668 for (User::op_iterator I = PN->op_begin(), E = PN->op_end(); I != E; ++I) { 669 Hash ^= reinterpret_cast<uintptr_t>(static_cast<Value *>(*I)); 670 Hash = (Hash << 7) | (Hash >> (sizeof(uintptr_t) * CHAR_BIT - 7)); 671 } 672 for (PHINode::block_iterator I = PN->block_begin(), E = PN->block_end(); 673 I != E; ++I) { 674 Hash ^= reinterpret_cast<uintptr_t>(static_cast<BasicBlock *>(*I)); 675 Hash = (Hash << 7) | (Hash >> (sizeof(uintptr_t) * CHAR_BIT - 7)); 676 } 677 // Avoid colliding with the DenseMap sentinels ~0 and ~0-1. 678 Hash >>= 1; 679 // If we've never seen this hash value before, it's a unique PHI. 680 std::pair<DenseMap<uintptr_t, PHINode *>::iterator, bool> Pair = 681 HashMap.insert(std::make_pair(Hash, PN)); 682 if (Pair.second) continue; 683 // Otherwise it's either a duplicate or a hash collision. 684 for (PHINode *OtherPN = Pair.first->second; ; ) { 685 if (OtherPN->isIdenticalTo(PN)) { 686 // A duplicate. Replace this PHI with its duplicate. 687 PN->replaceAllUsesWith(OtherPN); 688 PN->eraseFromParent(); 689 Changed = true; 690 break; 691 } 692 // A non-duplicate hash collision. 693 DenseMap<PHINode *, PHINode *>::iterator I = CollisionMap.find(OtherPN); 694 if (I == CollisionMap.end()) { 695 // Set this PHI to be the head of the linked list of colliding PHIs. 696 PHINode *Old = Pair.first->second; 697 Pair.first->second = PN; 698 CollisionMap[PN] = Old; 699 break; 700 } 701 // Procede to the next PHI in the list. 702 OtherPN = I->second; 703 } 704 } 705 706 return Changed; 707 } 708 709 /// enforceKnownAlignment - If the specified pointer points to an object that 710 /// we control, modify the object's alignment to PrefAlign. This isn't 711 /// often possible though. If alignment is important, a more reliable approach 712 /// is to simply align all global variables and allocation instructions to 713 /// their preferred alignment from the beginning. 714 /// 715 static unsigned enforceKnownAlignment(Value *V, unsigned Align, 716 unsigned PrefAlign, const TargetData *TD) { 717 V = V->stripPointerCasts(); 718 719 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 720 // If the preferred alignment is greater than the natural stack alignment 721 // then don't round up. This avoids dynamic stack realignment. 722 if (TD && TD->exceedsNaturalStackAlignment(PrefAlign)) 723 return Align; 724 // If there is a requested alignment and if this is an alloca, round up. 725 if (AI->getAlignment() >= PrefAlign) 726 return AI->getAlignment(); 727 AI->setAlignment(PrefAlign); 728 return PrefAlign; 729 } 730 731 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 732 // If there is a large requested alignment and we can, bump up the alignment 733 // of the global. 734 if (GV->isDeclaration()) return Align; 735 // If the memory we set aside for the global may not be the memory used by 736 // the final program then it is impossible for us to reliably enforce the 737 // preferred alignment. 738 if (GV->isWeakForLinker()) return Align; 739 740 if (GV->getAlignment() >= PrefAlign) 741 return GV->getAlignment(); 742 // We can only increase the alignment of the global if it has no alignment 743 // specified or if it is not assigned a section. If it is assigned a 744 // section, the global could be densely packed with other objects in the 745 // section, increasing the alignment could cause padding issues. 746 if (!GV->hasSection() || GV->getAlignment() == 0) 747 GV->setAlignment(PrefAlign); 748 return GV->getAlignment(); 749 } 750 751 return Align; 752 } 753 754 /// getOrEnforceKnownAlignment - If the specified pointer has an alignment that 755 /// we can determine, return it, otherwise return 0. If PrefAlign is specified, 756 /// and it is more than the alignment of the ultimate object, see if we can 757 /// increase the alignment of the ultimate object, making this check succeed. 758 unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign, 759 const TargetData *TD) { 760 assert(V->getType()->isPointerTy() && 761 "getOrEnforceKnownAlignment expects a pointer!"); 762 unsigned BitWidth = TD ? TD->getPointerSizeInBits() : 64; 763 APInt Mask = APInt::getAllOnesValue(BitWidth); 764 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 765 ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD); 766 unsigned TrailZ = KnownZero.countTrailingOnes(); 767 768 // Avoid trouble with rediculously large TrailZ values, such as 769 // those computed from a null pointer. 770 TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT - 1)); 771 772 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ); 773 774 // LLVM doesn't support alignments larger than this currently. 775 Align = std::min(Align, +Value::MaximumAlignment); 776 777 if (PrefAlign > Align) 778 Align = enforceKnownAlignment(V, Align, PrefAlign, TD); 779 780 // We don't need to make any adjustment. 781 return Align; 782 } 783 784 ///===---------------------------------------------------------------------===// 785 /// Dbg Intrinsic utilities 786 /// 787 788 /// Inserts a llvm.dbg.value instrinsic before the stores to an alloca'd value 789 /// that has an associated llvm.dbg.decl intrinsic. 790 bool llvm::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI, 791 StoreInst *SI, DIBuilder &Builder) { 792 DIVariable DIVar(DDI->getVariable()); 793 if (!DIVar.Verify()) 794 return false; 795 796 Instruction *DbgVal = NULL; 797 // If an argument is zero extended then use argument directly. The ZExt 798 // may be zapped by an optimization pass in future. 799 Argument *ExtendedArg = NULL; 800 if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0))) 801 ExtendedArg = dyn_cast<Argument>(ZExt->getOperand(0)); 802 if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0))) 803 ExtendedArg = dyn_cast<Argument>(SExt->getOperand(0)); 804 if (ExtendedArg) 805 DbgVal = Builder.insertDbgValueIntrinsic(ExtendedArg, 0, DIVar, SI); 806 else 807 DbgVal = Builder.insertDbgValueIntrinsic(SI->getOperand(0), 0, DIVar, SI); 808 809 // Propagate any debug metadata from the store onto the dbg.value. 810 DebugLoc SIDL = SI->getDebugLoc(); 811 if (!SIDL.isUnknown()) 812 DbgVal->setDebugLoc(SIDL); 813 // Otherwise propagate debug metadata from dbg.declare. 814 else 815 DbgVal->setDebugLoc(DDI->getDebugLoc()); 816 return true; 817 } 818 819 /// Inserts a llvm.dbg.value instrinsic before the stores to an alloca'd value 820 /// that has an associated llvm.dbg.decl intrinsic. 821 bool llvm::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI, 822 LoadInst *LI, DIBuilder &Builder) { 823 DIVariable DIVar(DDI->getVariable()); 824 if (!DIVar.Verify()) 825 return false; 826 827 Instruction *DbgVal = 828 Builder.insertDbgValueIntrinsic(LI->getOperand(0), 0, 829 DIVar, LI); 830 831 // Propagate any debug metadata from the store onto the dbg.value. 832 DebugLoc LIDL = LI->getDebugLoc(); 833 if (!LIDL.isUnknown()) 834 DbgVal->setDebugLoc(LIDL); 835 // Otherwise propagate debug metadata from dbg.declare. 836 else 837 DbgVal->setDebugLoc(DDI->getDebugLoc()); 838 return true; 839 } 840 841 /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set 842 /// of llvm.dbg.value intrinsics. 843 bool llvm::LowerDbgDeclare(Function &F) { 844 DIBuilder DIB(*F.getParent()); 845 SmallVector<DbgDeclareInst *, 4> Dbgs; 846 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) 847 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); BI != BE; ++BI) { 848 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(BI)) 849 Dbgs.push_back(DDI); 850 } 851 if (Dbgs.empty()) 852 return false; 853 854 for (SmallVector<DbgDeclareInst *, 4>::iterator I = Dbgs.begin(), 855 E = Dbgs.end(); I != E; ++I) { 856 DbgDeclareInst *DDI = *I; 857 if (AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress())) { 858 bool RemoveDDI = true; 859 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); 860 UI != E; ++UI) 861 if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) 862 ConvertDebugDeclareToDebugValue(DDI, SI, DIB); 863 else if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) 864 ConvertDebugDeclareToDebugValue(DDI, LI, DIB); 865 else 866 RemoveDDI = false; 867 if (RemoveDDI) 868 DDI->eraseFromParent(); 869 } 870 } 871 return true; 872 } 873 874 /// FindAllocaDbgDeclare - Finds the llvm.dbg.declare intrinsic describing the 875 /// alloca 'V', if any. 876 DbgDeclareInst *llvm::FindAllocaDbgDeclare(Value *V) { 877 if (MDNode *DebugNode = MDNode::getIfExists(V->getContext(), V)) 878 for (Value::use_iterator UI = DebugNode->use_begin(), 879 E = DebugNode->use_end(); UI != E; ++UI) 880 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(*UI)) 881 return DDI; 882 883 return 0; 884 } 885