1 //===-- Local.cpp - Functions to perform local transformations ------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This family of functions perform various local transformations to the 11 // program. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Utils/Local.h" 16 #include "llvm/Constants.h" 17 #include "llvm/GlobalAlias.h" 18 #include "llvm/GlobalVariable.h" 19 #include "llvm/DerivedTypes.h" 20 #include "llvm/Instructions.h" 21 #include "llvm/Intrinsics.h" 22 #include "llvm/IntrinsicInst.h" 23 #include "llvm/ADT/DenseMap.h" 24 #include "llvm/ADT/SmallPtrSet.h" 25 #include "llvm/Analysis/Dominators.h" 26 #include "llvm/Analysis/ConstantFolding.h" 27 #include "llvm/Analysis/InstructionSimplify.h" 28 #include "llvm/Analysis/ProfileInfo.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/Target/TargetData.h" 31 #include "llvm/Support/CFG.h" 32 #include "llvm/Support/Debug.h" 33 #include "llvm/Support/GetElementPtrTypeIterator.h" 34 #include "llvm/Support/MathExtras.h" 35 #include "llvm/Support/ValueHandle.h" 36 #include "llvm/Support/raw_ostream.h" 37 using namespace llvm; 38 39 //===----------------------------------------------------------------------===// 40 // Local constant propagation. 41 // 42 43 // ConstantFoldTerminator - If a terminator instruction is predicated on a 44 // constant value, convert it into an unconditional branch to the constant 45 // destination. 46 // 47 bool llvm::ConstantFoldTerminator(BasicBlock *BB) { 48 TerminatorInst *T = BB->getTerminator(); 49 50 // Branch - See if we are conditional jumping on constant 51 if (BranchInst *BI = dyn_cast<BranchInst>(T)) { 52 if (BI->isUnconditional()) return false; // Can't optimize uncond branch 53 BasicBlock *Dest1 = BI->getSuccessor(0); 54 BasicBlock *Dest2 = BI->getSuccessor(1); 55 56 if (ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition())) { 57 // Are we branching on constant? 58 // YES. Change to unconditional branch... 59 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2; 60 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1; 61 62 //cerr << "Function: " << T->getParent()->getParent() 63 // << "\nRemoving branch from " << T->getParent() 64 // << "\n\nTo: " << OldDest << endl; 65 66 // Let the basic block know that we are letting go of it. Based on this, 67 // it will adjust it's PHI nodes. 68 assert(BI->getParent() && "Terminator not inserted in block!"); 69 OldDest->removePredecessor(BI->getParent()); 70 71 // Replace the conditional branch with an unconditional one. 72 BranchInst::Create(Destination, BI); 73 BI->eraseFromParent(); 74 return true; 75 } 76 77 if (Dest2 == Dest1) { // Conditional branch to same location? 78 // This branch matches something like this: 79 // br bool %cond, label %Dest, label %Dest 80 // and changes it into: br label %Dest 81 82 // Let the basic block know that we are letting go of one copy of it. 83 assert(BI->getParent() && "Terminator not inserted in block!"); 84 Dest1->removePredecessor(BI->getParent()); 85 86 // Replace the conditional branch with an unconditional one. 87 BranchInst::Create(Dest1, BI); 88 BI->eraseFromParent(); 89 return true; 90 } 91 return false; 92 } 93 94 if (SwitchInst *SI = dyn_cast<SwitchInst>(T)) { 95 // If we are switching on a constant, we can convert the switch into a 96 // single branch instruction! 97 ConstantInt *CI = dyn_cast<ConstantInt>(SI->getCondition()); 98 BasicBlock *TheOnlyDest = SI->getSuccessor(0); // The default dest 99 BasicBlock *DefaultDest = TheOnlyDest; 100 assert(TheOnlyDest == SI->getDefaultDest() && 101 "Default destination is not successor #0?"); 102 103 // Figure out which case it goes to. 104 for (unsigned i = 1, e = SI->getNumSuccessors(); i != e; ++i) { 105 // Found case matching a constant operand? 106 if (SI->getSuccessorValue(i) == CI) { 107 TheOnlyDest = SI->getSuccessor(i); 108 break; 109 } 110 111 // Check to see if this branch is going to the same place as the default 112 // dest. If so, eliminate it as an explicit compare. 113 if (SI->getSuccessor(i) == DefaultDest) { 114 // Remove this entry. 115 DefaultDest->removePredecessor(SI->getParent()); 116 SI->removeCase(i); 117 --i; --e; // Don't skip an entry... 118 continue; 119 } 120 121 // Otherwise, check to see if the switch only branches to one destination. 122 // We do this by reseting "TheOnlyDest" to null when we find two non-equal 123 // destinations. 124 if (SI->getSuccessor(i) != TheOnlyDest) TheOnlyDest = 0; 125 } 126 127 if (CI && !TheOnlyDest) { 128 // Branching on a constant, but not any of the cases, go to the default 129 // successor. 130 TheOnlyDest = SI->getDefaultDest(); 131 } 132 133 // If we found a single destination that we can fold the switch into, do so 134 // now. 135 if (TheOnlyDest) { 136 // Insert the new branch. 137 BranchInst::Create(TheOnlyDest, SI); 138 BasicBlock *BB = SI->getParent(); 139 140 // Remove entries from PHI nodes which we no longer branch to... 141 for (unsigned i = 0, e = SI->getNumSuccessors(); i != e; ++i) { 142 // Found case matching a constant operand? 143 BasicBlock *Succ = SI->getSuccessor(i); 144 if (Succ == TheOnlyDest) 145 TheOnlyDest = 0; // Don't modify the first branch to TheOnlyDest 146 else 147 Succ->removePredecessor(BB); 148 } 149 150 // Delete the old switch. 151 BB->getInstList().erase(SI); 152 return true; 153 } 154 155 if (SI->getNumSuccessors() == 2) { 156 // Otherwise, we can fold this switch into a conditional branch 157 // instruction if it has only one non-default destination. 158 Value *Cond = new ICmpInst(SI, ICmpInst::ICMP_EQ, SI->getCondition(), 159 SI->getSuccessorValue(1), "cond"); 160 // Insert the new branch. 161 BranchInst::Create(SI->getSuccessor(1), SI->getSuccessor(0), Cond, SI); 162 163 // Delete the old switch. 164 SI->eraseFromParent(); 165 return true; 166 } 167 return false; 168 } 169 170 if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(T)) { 171 // indirectbr blockaddress(@F, @BB) -> br label @BB 172 if (BlockAddress *BA = 173 dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) { 174 BasicBlock *TheOnlyDest = BA->getBasicBlock(); 175 // Insert the new branch. 176 BranchInst::Create(TheOnlyDest, IBI); 177 178 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) { 179 if (IBI->getDestination(i) == TheOnlyDest) 180 TheOnlyDest = 0; 181 else 182 IBI->getDestination(i)->removePredecessor(IBI->getParent()); 183 } 184 IBI->eraseFromParent(); 185 186 // If we didn't find our destination in the IBI successor list, then we 187 // have undefined behavior. Replace the unconditional branch with an 188 // 'unreachable' instruction. 189 if (TheOnlyDest) { 190 BB->getTerminator()->eraseFromParent(); 191 new UnreachableInst(BB->getContext(), BB); 192 } 193 194 return true; 195 } 196 } 197 198 return false; 199 } 200 201 202 //===----------------------------------------------------------------------===// 203 // Local dead code elimination. 204 // 205 206 /// isInstructionTriviallyDead - Return true if the result produced by the 207 /// instruction is not used, and the instruction has no side effects. 208 /// 209 bool llvm::isInstructionTriviallyDead(Instruction *I) { 210 if (!I->use_empty() || isa<TerminatorInst>(I)) return false; 211 212 // We don't want debug info removed by anything this general. 213 if (isa<DbgInfoIntrinsic>(I)) return false; 214 215 if (!I->mayHaveSideEffects()) return true; 216 217 // Special case intrinsics that "may have side effects" but can be deleted 218 // when dead. 219 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) 220 // Safe to delete llvm.stacksave if dead. 221 if (II->getIntrinsicID() == Intrinsic::stacksave) 222 return true; 223 return false; 224 } 225 226 /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a 227 /// trivially dead instruction, delete it. If that makes any of its operands 228 /// trivially dead, delete them too, recursively. Return true if any 229 /// instructions were deleted. 230 bool llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V) { 231 Instruction *I = dyn_cast<Instruction>(V); 232 if (!I || !I->use_empty() || !isInstructionTriviallyDead(I)) 233 return false; 234 235 SmallVector<Instruction*, 16> DeadInsts; 236 DeadInsts.push_back(I); 237 238 do { 239 I = DeadInsts.pop_back_val(); 240 241 // Null out all of the instruction's operands to see if any operand becomes 242 // dead as we go. 243 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 244 Value *OpV = I->getOperand(i); 245 I->setOperand(i, 0); 246 247 if (!OpV->use_empty()) continue; 248 249 // If the operand is an instruction that became dead as we nulled out the 250 // operand, and if it is 'trivially' dead, delete it in a future loop 251 // iteration. 252 if (Instruction *OpI = dyn_cast<Instruction>(OpV)) 253 if (isInstructionTriviallyDead(OpI)) 254 DeadInsts.push_back(OpI); 255 } 256 257 I->eraseFromParent(); 258 } while (!DeadInsts.empty()); 259 260 return true; 261 } 262 263 /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively 264 /// dead PHI node, due to being a def-use chain of single-use nodes that 265 /// either forms a cycle or is terminated by a trivially dead instruction, 266 /// delete it. If that makes any of its operands trivially dead, delete them 267 /// too, recursively. Return true if the PHI node is actually deleted. 268 bool 269 llvm::RecursivelyDeleteDeadPHINode(PHINode *PN) { 270 // We can remove a PHI if it is on a cycle in the def-use graph 271 // where each node in the cycle has degree one, i.e. only one use, 272 // and is an instruction with no side effects. 273 if (!PN->hasOneUse()) 274 return false; 275 276 bool Changed = false; 277 SmallPtrSet<PHINode *, 4> PHIs; 278 PHIs.insert(PN); 279 for (Instruction *J = cast<Instruction>(*PN->use_begin()); 280 J->hasOneUse() && !J->mayHaveSideEffects(); 281 J = cast<Instruction>(*J->use_begin())) 282 // If we find a PHI more than once, we're on a cycle that 283 // won't prove fruitful. 284 if (PHINode *JP = dyn_cast<PHINode>(J)) 285 if (!PHIs.insert(cast<PHINode>(JP))) { 286 // Break the cycle and delete the PHI and its operands. 287 JP->replaceAllUsesWith(UndefValue::get(JP->getType())); 288 (void)RecursivelyDeleteTriviallyDeadInstructions(JP); 289 Changed = true; 290 break; 291 } 292 return Changed; 293 } 294 295 /// SimplifyInstructionsInBlock - Scan the specified basic block and try to 296 /// simplify any instructions in it and recursively delete dead instructions. 297 /// 298 /// This returns true if it changed the code, note that it can delete 299 /// instructions in other blocks as well in this block. 300 bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD) { 301 bool MadeChange = false; 302 for (BasicBlock::iterator BI = BB->begin(), E = BB->end(); BI != E; ) { 303 Instruction *Inst = BI++; 304 305 if (Value *V = SimplifyInstruction(Inst, TD)) { 306 WeakVH BIHandle(BI); 307 ReplaceAndSimplifyAllUses(Inst, V, TD); 308 MadeChange = true; 309 if (BIHandle != BI) 310 BI = BB->begin(); 311 continue; 312 } 313 314 MadeChange |= RecursivelyDeleteTriviallyDeadInstructions(Inst); 315 } 316 return MadeChange; 317 } 318 319 //===----------------------------------------------------------------------===// 320 // Control Flow Graph Restructuring. 321 // 322 323 324 /// RemovePredecessorAndSimplify - Like BasicBlock::removePredecessor, this 325 /// method is called when we're about to delete Pred as a predecessor of BB. If 326 /// BB contains any PHI nodes, this drops the entries in the PHI nodes for Pred. 327 /// 328 /// Unlike the removePredecessor method, this attempts to simplify uses of PHI 329 /// nodes that collapse into identity values. For example, if we have: 330 /// x = phi(1, 0, 0, 0) 331 /// y = and x, z 332 /// 333 /// .. and delete the predecessor corresponding to the '1', this will attempt to 334 /// recursively fold the and to 0. 335 void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred, 336 TargetData *TD) { 337 // This only adjusts blocks with PHI nodes. 338 if (!isa<PHINode>(BB->begin())) 339 return; 340 341 // Remove the entries for Pred from the PHI nodes in BB, but do not simplify 342 // them down. This will leave us with single entry phi nodes and other phis 343 // that can be removed. 344 BB->removePredecessor(Pred, true); 345 346 WeakVH PhiIt = &BB->front(); 347 while (PHINode *PN = dyn_cast<PHINode>(PhiIt)) { 348 PhiIt = &*++BasicBlock::iterator(cast<Instruction>(PhiIt)); 349 350 Value *PNV = SimplifyInstruction(PN, TD); 351 if (PNV == 0) continue; 352 353 // If we're able to simplify the phi to a single value, substitute the new 354 // value into all of its uses. 355 assert(PNV != PN && "SimplifyInstruction broken!"); 356 357 Value *OldPhiIt = PhiIt; 358 ReplaceAndSimplifyAllUses(PN, PNV, TD); 359 360 // If recursive simplification ended up deleting the next PHI node we would 361 // iterate to, then our iterator is invalid, restart scanning from the top 362 // of the block. 363 if (PhiIt != OldPhiIt) PhiIt = &BB->front(); 364 } 365 } 366 367 368 /// MergeBasicBlockIntoOnlyPred - DestBB is a block with one predecessor and its 369 /// predecessor is known to have one successor (DestBB!). Eliminate the edge 370 /// between them, moving the instructions in the predecessor into DestBB and 371 /// deleting the predecessor block. 372 /// 373 void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB, Pass *P) { 374 // If BB has single-entry PHI nodes, fold them. 375 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) { 376 Value *NewVal = PN->getIncomingValue(0); 377 // Replace self referencing PHI with undef, it must be dead. 378 if (NewVal == PN) NewVal = UndefValue::get(PN->getType()); 379 PN->replaceAllUsesWith(NewVal); 380 PN->eraseFromParent(); 381 } 382 383 BasicBlock *PredBB = DestBB->getSinglePredecessor(); 384 assert(PredBB && "Block doesn't have a single predecessor!"); 385 386 // Splice all the instructions from PredBB to DestBB. 387 PredBB->getTerminator()->eraseFromParent(); 388 DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList()); 389 390 // Zap anything that took the address of DestBB. Not doing this will give the 391 // address an invalid value. 392 if (DestBB->hasAddressTaken()) { 393 BlockAddress *BA = BlockAddress::get(DestBB); 394 Constant *Replacement = 395 ConstantInt::get(llvm::Type::getInt32Ty(BA->getContext()), 1); 396 BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement, 397 BA->getType())); 398 BA->destroyConstant(); 399 } 400 401 // Anything that branched to PredBB now branches to DestBB. 402 PredBB->replaceAllUsesWith(DestBB); 403 404 if (P) { 405 DominatorTree *DT = P->getAnalysisIfAvailable<DominatorTree>(); 406 if (DT) { 407 BasicBlock *PredBBIDom = DT->getNode(PredBB)->getIDom()->getBlock(); 408 DT->changeImmediateDominator(DestBB, PredBBIDom); 409 DT->eraseNode(PredBB); 410 } 411 ProfileInfo *PI = P->getAnalysisIfAvailable<ProfileInfo>(); 412 if (PI) { 413 PI->replaceAllUses(PredBB, DestBB); 414 PI->removeEdge(ProfileInfo::getEdge(PredBB, DestBB)); 415 } 416 } 417 // Nuke BB. 418 PredBB->eraseFromParent(); 419 } 420 421 /// CanPropagatePredecessorsForPHIs - Return true if we can fold BB, an 422 /// almost-empty BB ending in an unconditional branch to Succ, into succ. 423 /// 424 /// Assumption: Succ is the single successor for BB. 425 /// 426 static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) { 427 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!"); 428 429 DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into " 430 << Succ->getName() << "\n"); 431 // Shortcut, if there is only a single predecessor it must be BB and merging 432 // is always safe 433 if (Succ->getSinglePredecessor()) return true; 434 435 // Make a list of the predecessors of BB 436 typedef SmallPtrSet<BasicBlock*, 16> BlockSet; 437 BlockSet BBPreds(pred_begin(BB), pred_end(BB)); 438 439 // Use that list to make another list of common predecessors of BB and Succ 440 BlockSet CommonPreds; 441 for (pred_iterator PI = pred_begin(Succ), PE = pred_end(Succ); 442 PI != PE; ++PI) { 443 BasicBlock *P = *PI; 444 if (BBPreds.count(P)) 445 CommonPreds.insert(P); 446 } 447 448 // Shortcut, if there are no common predecessors, merging is always safe 449 if (CommonPreds.empty()) 450 return true; 451 452 // Look at all the phi nodes in Succ, to see if they present a conflict when 453 // merging these blocks 454 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) { 455 PHINode *PN = cast<PHINode>(I); 456 457 // If the incoming value from BB is again a PHINode in 458 // BB which has the same incoming value for *PI as PN does, we can 459 // merge the phi nodes and then the blocks can still be merged 460 PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB)); 461 if (BBPN && BBPN->getParent() == BB) { 462 for (BlockSet::iterator PI = CommonPreds.begin(), PE = CommonPreds.end(); 463 PI != PE; PI++) { 464 if (BBPN->getIncomingValueForBlock(*PI) 465 != PN->getIncomingValueForBlock(*PI)) { 466 DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in " 467 << Succ->getName() << " is conflicting with " 468 << BBPN->getName() << " with regard to common predecessor " 469 << (*PI)->getName() << "\n"); 470 return false; 471 } 472 } 473 } else { 474 Value* Val = PN->getIncomingValueForBlock(BB); 475 for (BlockSet::iterator PI = CommonPreds.begin(), PE = CommonPreds.end(); 476 PI != PE; PI++) { 477 // See if the incoming value for the common predecessor is equal to the 478 // one for BB, in which case this phi node will not prevent the merging 479 // of the block. 480 if (Val != PN->getIncomingValueForBlock(*PI)) { 481 DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in " 482 << Succ->getName() << " is conflicting with regard to common " 483 << "predecessor " << (*PI)->getName() << "\n"); 484 return false; 485 } 486 } 487 } 488 } 489 490 return true; 491 } 492 493 /// TryToSimplifyUncondBranchFromEmptyBlock - BB is known to contain an 494 /// unconditional branch, and contains no instructions other than PHI nodes, 495 /// potential debug intrinsics and the branch. If possible, eliminate BB by 496 /// rewriting all the predecessors to branch to the successor block and return 497 /// true. If we can't transform, return false. 498 bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB) { 499 assert(BB != &BB->getParent()->getEntryBlock() && 500 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!"); 501 502 // We can't eliminate infinite loops. 503 BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0); 504 if (BB == Succ) return false; 505 506 // Check to see if merging these blocks would cause conflicts for any of the 507 // phi nodes in BB or Succ. If not, we can safely merge. 508 if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false; 509 510 // Check for cases where Succ has multiple predecessors and a PHI node in BB 511 // has uses which will not disappear when the PHI nodes are merged. It is 512 // possible to handle such cases, but difficult: it requires checking whether 513 // BB dominates Succ, which is non-trivial to calculate in the case where 514 // Succ has multiple predecessors. Also, it requires checking whether 515 // constructing the necessary self-referential PHI node doesn't intoduce any 516 // conflicts; this isn't too difficult, but the previous code for doing this 517 // was incorrect. 518 // 519 // Note that if this check finds a live use, BB dominates Succ, so BB is 520 // something like a loop pre-header (or rarely, a part of an irreducible CFG); 521 // folding the branch isn't profitable in that case anyway. 522 if (!Succ->getSinglePredecessor()) { 523 BasicBlock::iterator BBI = BB->begin(); 524 while (isa<PHINode>(*BBI)) { 525 for (Value::use_iterator UI = BBI->use_begin(), E = BBI->use_end(); 526 UI != E; ++UI) { 527 if (PHINode* PN = dyn_cast<PHINode>(*UI)) { 528 if (PN->getIncomingBlock(UI) != BB) 529 return false; 530 } else { 531 return false; 532 } 533 } 534 ++BBI; 535 } 536 } 537 538 DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB); 539 540 if (isa<PHINode>(Succ->begin())) { 541 // If there is more than one pred of succ, and there are PHI nodes in 542 // the successor, then we need to add incoming edges for the PHI nodes 543 // 544 const SmallVector<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB)); 545 546 // Loop over all of the PHI nodes in the successor of BB. 547 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) { 548 PHINode *PN = cast<PHINode>(I); 549 Value *OldVal = PN->removeIncomingValue(BB, false); 550 assert(OldVal && "No entry in PHI for Pred BB!"); 551 552 // If this incoming value is one of the PHI nodes in BB, the new entries 553 // in the PHI node are the entries from the old PHI. 554 if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) { 555 PHINode *OldValPN = cast<PHINode>(OldVal); 556 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) 557 // Note that, since we are merging phi nodes and BB and Succ might 558 // have common predecessors, we could end up with a phi node with 559 // identical incoming branches. This will be cleaned up later (and 560 // will trigger asserts if we try to clean it up now, without also 561 // simplifying the corresponding conditional branch). 562 PN->addIncoming(OldValPN->getIncomingValue(i), 563 OldValPN->getIncomingBlock(i)); 564 } else { 565 // Add an incoming value for each of the new incoming values. 566 for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) 567 PN->addIncoming(OldVal, BBPreds[i]); 568 } 569 } 570 } 571 572 while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) { 573 if (Succ->getSinglePredecessor()) { 574 // BB is the only predecessor of Succ, so Succ will end up with exactly 575 // the same predecessors BB had. 576 Succ->getInstList().splice(Succ->begin(), 577 BB->getInstList(), BB->begin()); 578 } else { 579 // We explicitly check for such uses in CanPropagatePredecessorsForPHIs. 580 assert(PN->use_empty() && "There shouldn't be any uses here!"); 581 PN->eraseFromParent(); 582 } 583 } 584 585 // Everything that jumped to BB now goes to Succ. 586 BB->replaceAllUsesWith(Succ); 587 if (!Succ->hasName()) Succ->takeName(BB); 588 BB->eraseFromParent(); // Delete the old basic block. 589 return true; 590 } 591 592 /// EliminateDuplicatePHINodes - Check for and eliminate duplicate PHI 593 /// nodes in this block. This doesn't try to be clever about PHI nodes 594 /// which differ only in the order of the incoming values, but instcombine 595 /// orders them so it usually won't matter. 596 /// 597 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) { 598 bool Changed = false; 599 600 // This implementation doesn't currently consider undef operands 601 // specially. Theroetically, two phis which are identical except for 602 // one having an undef where the other doesn't could be collapsed. 603 604 // Map from PHI hash values to PHI nodes. If multiple PHIs have 605 // the same hash value, the element is the first PHI in the 606 // linked list in CollisionMap. 607 DenseMap<uintptr_t, PHINode *> HashMap; 608 609 // Maintain linked lists of PHI nodes with common hash values. 610 DenseMap<PHINode *, PHINode *> CollisionMap; 611 612 // Examine each PHI. 613 for (BasicBlock::iterator I = BB->begin(); 614 PHINode *PN = dyn_cast<PHINode>(I++); ) { 615 // Compute a hash value on the operands. Instcombine will likely have sorted 616 // them, which helps expose duplicates, but we have to check all the 617 // operands to be safe in case instcombine hasn't run. 618 uintptr_t Hash = 0; 619 for (User::op_iterator I = PN->op_begin(), E = PN->op_end(); I != E; ++I) { 620 // This hash algorithm is quite weak as hash functions go, but it seems 621 // to do a good enough job for this particular purpose, and is very quick. 622 Hash ^= reinterpret_cast<uintptr_t>(static_cast<Value *>(*I)); 623 Hash = (Hash << 7) | (Hash >> (sizeof(uintptr_t) * CHAR_BIT - 7)); 624 } 625 // If we've never seen this hash value before, it's a unique PHI. 626 std::pair<DenseMap<uintptr_t, PHINode *>::iterator, bool> Pair = 627 HashMap.insert(std::make_pair(Hash, PN)); 628 if (Pair.second) continue; 629 // Otherwise it's either a duplicate or a hash collision. 630 for (PHINode *OtherPN = Pair.first->second; ; ) { 631 if (OtherPN->isIdenticalTo(PN)) { 632 // A duplicate. Replace this PHI with its duplicate. 633 PN->replaceAllUsesWith(OtherPN); 634 PN->eraseFromParent(); 635 Changed = true; 636 break; 637 } 638 // A non-duplicate hash collision. 639 DenseMap<PHINode *, PHINode *>::iterator I = CollisionMap.find(OtherPN); 640 if (I == CollisionMap.end()) { 641 // Set this PHI to be the head of the linked list of colliding PHIs. 642 PHINode *Old = Pair.first->second; 643 Pair.first->second = PN; 644 CollisionMap[PN] = Old; 645 break; 646 } 647 // Procede to the next PHI in the list. 648 OtherPN = I->second; 649 } 650 } 651 652 return Changed; 653 } 654 655 /// enforceKnownAlignment - If the specified pointer points to an object that 656 /// we control, modify the object's alignment to PrefAlign. This isn't 657 /// often possible though. If alignment is important, a more reliable approach 658 /// is to simply align all global variables and allocation instructions to 659 /// their preferred alignment from the beginning. 660 /// 661 static unsigned enforceKnownAlignment(Value *V, unsigned Align, 662 unsigned PrefAlign) { 663 664 User *U = dyn_cast<User>(V); 665 if (!U) return Align; 666 667 switch (Operator::getOpcode(U)) { 668 default: break; 669 case Instruction::BitCast: 670 return enforceKnownAlignment(U->getOperand(0), Align, PrefAlign); 671 case Instruction::GetElementPtr: { 672 // If all indexes are zero, it is just the alignment of the base pointer. 673 bool AllZeroOperands = true; 674 for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i) 675 if (!isa<Constant>(*i) || 676 !cast<Constant>(*i)->isNullValue()) { 677 AllZeroOperands = false; 678 break; 679 } 680 681 if (AllZeroOperands) { 682 // Treat this like a bitcast. 683 return enforceKnownAlignment(U->getOperand(0), Align, PrefAlign); 684 } 685 return Align; 686 } 687 case Instruction::Alloca: { 688 AllocaInst *AI = cast<AllocaInst>(V); 689 // If there is a requested alignment and if this is an alloca, round up. 690 if (AI->getAlignment() >= PrefAlign) 691 return AI->getAlignment(); 692 AI->setAlignment(PrefAlign); 693 return PrefAlign; 694 } 695 } 696 697 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 698 // If there is a large requested alignment and we can, bump up the alignment 699 // of the global. 700 if (GV->isDeclaration()) return Align; 701 702 if (GV->getAlignment() >= PrefAlign) 703 return GV->getAlignment(); 704 // We can only increase the alignment of the global if it has no alignment 705 // specified or if it is not assigned a section. If it is assigned a 706 // section, the global could be densely packed with other objects in the 707 // section, increasing the alignment could cause padding issues. 708 if (!GV->hasSection() || GV->getAlignment() == 0) 709 GV->setAlignment(PrefAlign); 710 return GV->getAlignment(); 711 } 712 713 return Align; 714 } 715 716 /// getOrEnforceKnownAlignment - If the specified pointer has an alignment that 717 /// we can determine, return it, otherwise return 0. If PrefAlign is specified, 718 /// and it is more than the alignment of the ultimate object, see if we can 719 /// increase the alignment of the ultimate object, making this check succeed. 720 unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign, 721 const TargetData *TD) { 722 assert(V->getType()->isPointerTy() && 723 "getOrEnforceKnownAlignment expects a pointer!"); 724 unsigned BitWidth = TD ? TD->getPointerSizeInBits() : 64; 725 APInt Mask = APInt::getAllOnesValue(BitWidth); 726 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 727 ComputeMaskedBits(V, Mask, KnownZero, KnownOne, TD); 728 unsigned TrailZ = KnownZero.countTrailingOnes(); 729 730 // Avoid trouble with rediculously large TrailZ values, such as 731 // those computed from a null pointer. 732 TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT - 1)); 733 734 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ); 735 736 // LLVM doesn't support alignments larger than this currently. 737 Align = std::min(Align, +Value::MaximumAlignment); 738 739 if (PrefAlign > Align) 740 Align = enforceKnownAlignment(V, Align, PrefAlign); 741 742 // We don't need to make any adjustment. 743 return Align; 744 } 745 746