1 //===- LoopRotation.cpp - Loop Rotation Pass ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements Loop Rotation Pass. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Scalar.h" 15 #include "llvm/ADT/Statistic.h" 16 #include "llvm/Analysis/AssumptionCache.h" 17 #include "llvm/Analysis/CodeMetrics.h" 18 #include "llvm/Analysis/InstructionSimplify.h" 19 #include "llvm/Analysis/LoopPass.h" 20 #include "llvm/Analysis/ScalarEvolution.h" 21 #include "llvm/Analysis/TargetTransformInfo.h" 22 #include "llvm/Analysis/ValueTracking.h" 23 #include "llvm/IR/CFG.h" 24 #include "llvm/IR/Dominators.h" 25 #include "llvm/IR/Function.h" 26 #include "llvm/IR/IntrinsicInst.h" 27 #include "llvm/Support/CommandLine.h" 28 #include "llvm/Support/Debug.h" 29 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 30 #include "llvm/Transforms/Utils/Local.h" 31 #include "llvm/Transforms/Utils/SSAUpdater.h" 32 #include "llvm/Transforms/Utils/ValueMapper.h" 33 using namespace llvm; 34 35 #define DEBUG_TYPE "loop-rotate" 36 37 static cl::opt<unsigned> 38 DefaultRotationThreshold("rotation-max-header-size", cl::init(16), cl::Hidden, 39 cl::desc("The default maximum header size for automatic loop rotation")); 40 41 STATISTIC(NumRotated, "Number of loops rotated"); 42 namespace { 43 44 class LoopRotate : public LoopPass { 45 public: 46 static char ID; // Pass ID, replacement for typeid 47 LoopRotate(int SpecifiedMaxHeaderSize = -1) : LoopPass(ID) { 48 initializeLoopRotatePass(*PassRegistry::getPassRegistry()); 49 if (SpecifiedMaxHeaderSize == -1) 50 MaxHeaderSize = DefaultRotationThreshold; 51 else 52 MaxHeaderSize = unsigned(SpecifiedMaxHeaderSize); 53 } 54 55 // LCSSA form makes instruction renaming easier. 56 void getAnalysisUsage(AnalysisUsage &AU) const override { 57 AU.addRequired<AssumptionCacheTracker>(); 58 AU.addPreserved<DominatorTreeWrapperPass>(); 59 AU.addRequired<LoopInfo>(); 60 AU.addPreserved<LoopInfo>(); 61 AU.addRequiredID(LoopSimplifyID); 62 AU.addPreservedID(LoopSimplifyID); 63 AU.addRequiredID(LCSSAID); 64 AU.addPreservedID(LCSSAID); 65 AU.addPreserved<ScalarEvolution>(); 66 AU.addRequired<TargetTransformInfo>(); 67 } 68 69 bool runOnLoop(Loop *L, LPPassManager &LPM) override; 70 bool simplifyLoopLatch(Loop *L); 71 bool rotateLoop(Loop *L, bool SimplifiedLatch); 72 73 private: 74 unsigned MaxHeaderSize; 75 LoopInfo *LI; 76 const TargetTransformInfo *TTI; 77 AssumptionCache *AC; 78 }; 79 } 80 81 char LoopRotate::ID = 0; 82 INITIALIZE_PASS_BEGIN(LoopRotate, "loop-rotate", "Rotate Loops", false, false) 83 INITIALIZE_AG_DEPENDENCY(TargetTransformInfo) 84 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 85 INITIALIZE_PASS_DEPENDENCY(LoopInfo) 86 INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 87 INITIALIZE_PASS_DEPENDENCY(LCSSA) 88 INITIALIZE_PASS_END(LoopRotate, "loop-rotate", "Rotate Loops", false, false) 89 90 Pass *llvm::createLoopRotatePass(int MaxHeaderSize) { 91 return new LoopRotate(MaxHeaderSize); 92 } 93 94 /// Rotate Loop L as many times as possible. Return true if 95 /// the loop is rotated at least once. 96 bool LoopRotate::runOnLoop(Loop *L, LPPassManager &LPM) { 97 if (skipOptnoneFunction(L)) 98 return false; 99 100 // Save the loop metadata. 101 MDNode *LoopMD = L->getLoopID(); 102 103 LI = &getAnalysis<LoopInfo>(); 104 TTI = &getAnalysis<TargetTransformInfo>(); 105 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache( 106 *L->getHeader()->getParent()); 107 108 // Simplify the loop latch before attempting to rotate the header 109 // upward. Rotation may not be needed if the loop tail can be folded into the 110 // loop exit. 111 bool SimplifiedLatch = simplifyLoopLatch(L); 112 113 // One loop can be rotated multiple times. 114 bool MadeChange = false; 115 while (rotateLoop(L, SimplifiedLatch)) { 116 MadeChange = true; 117 SimplifiedLatch = false; 118 } 119 120 // Restore the loop metadata. 121 // NB! We presume LoopRotation DOESN'T ADD its own metadata. 122 if ((MadeChange || SimplifiedLatch) && LoopMD) 123 L->setLoopID(LoopMD); 124 125 return MadeChange; 126 } 127 128 /// RewriteUsesOfClonedInstructions - We just cloned the instructions from the 129 /// old header into the preheader. If there were uses of the values produced by 130 /// these instruction that were outside of the loop, we have to insert PHI nodes 131 /// to merge the two values. Do this now. 132 static void RewriteUsesOfClonedInstructions(BasicBlock *OrigHeader, 133 BasicBlock *OrigPreheader, 134 ValueToValueMapTy &ValueMap) { 135 // Remove PHI node entries that are no longer live. 136 BasicBlock::iterator I, E = OrigHeader->end(); 137 for (I = OrigHeader->begin(); PHINode *PN = dyn_cast<PHINode>(I); ++I) 138 PN->removeIncomingValue(PN->getBasicBlockIndex(OrigPreheader)); 139 140 // Now fix up users of the instructions in OrigHeader, inserting PHI nodes 141 // as necessary. 142 SSAUpdater SSA; 143 for (I = OrigHeader->begin(); I != E; ++I) { 144 Value *OrigHeaderVal = I; 145 146 // If there are no uses of the value (e.g. because it returns void), there 147 // is nothing to rewrite. 148 if (OrigHeaderVal->use_empty()) 149 continue; 150 151 Value *OrigPreHeaderVal = ValueMap[OrigHeaderVal]; 152 153 // The value now exits in two versions: the initial value in the preheader 154 // and the loop "next" value in the original header. 155 SSA.Initialize(OrigHeaderVal->getType(), OrigHeaderVal->getName()); 156 SSA.AddAvailableValue(OrigHeader, OrigHeaderVal); 157 SSA.AddAvailableValue(OrigPreheader, OrigPreHeaderVal); 158 159 // Visit each use of the OrigHeader instruction. 160 for (Value::use_iterator UI = OrigHeaderVal->use_begin(), 161 UE = OrigHeaderVal->use_end(); UI != UE; ) { 162 // Grab the use before incrementing the iterator. 163 Use &U = *UI; 164 165 // Increment the iterator before removing the use from the list. 166 ++UI; 167 168 // SSAUpdater can't handle a non-PHI use in the same block as an 169 // earlier def. We can easily handle those cases manually. 170 Instruction *UserInst = cast<Instruction>(U.getUser()); 171 if (!isa<PHINode>(UserInst)) { 172 BasicBlock *UserBB = UserInst->getParent(); 173 174 // The original users in the OrigHeader are already using the 175 // original definitions. 176 if (UserBB == OrigHeader) 177 continue; 178 179 // Users in the OrigPreHeader need to use the value to which the 180 // original definitions are mapped. 181 if (UserBB == OrigPreheader) { 182 U = OrigPreHeaderVal; 183 continue; 184 } 185 } 186 187 // Anything else can be handled by SSAUpdater. 188 SSA.RewriteUse(U); 189 } 190 } 191 } 192 193 /// Determine whether the instructions in this range may be safely and cheaply 194 /// speculated. This is not an important enough situation to develop complex 195 /// heuristics. We handle a single arithmetic instruction along with any type 196 /// conversions. 197 static bool shouldSpeculateInstrs(BasicBlock::iterator Begin, 198 BasicBlock::iterator End, Loop *L) { 199 bool seenIncrement = false; 200 bool MultiExitLoop = false; 201 202 if (!L->getExitingBlock()) 203 MultiExitLoop = true; 204 205 for (BasicBlock::iterator I = Begin; I != End; ++I) { 206 207 if (!isSafeToSpeculativelyExecute(I)) 208 return false; 209 210 if (isa<DbgInfoIntrinsic>(I)) 211 continue; 212 213 switch (I->getOpcode()) { 214 default: 215 return false; 216 case Instruction::GetElementPtr: 217 // GEPs are cheap if all indices are constant. 218 if (!cast<GEPOperator>(I)->hasAllConstantIndices()) 219 return false; 220 // fall-thru to increment case 221 case Instruction::Add: 222 case Instruction::Sub: 223 case Instruction::And: 224 case Instruction::Or: 225 case Instruction::Xor: 226 case Instruction::Shl: 227 case Instruction::LShr: 228 case Instruction::AShr: { 229 Value *IVOpnd = nullptr; 230 if (isa<ConstantInt>(I->getOperand(0))) 231 IVOpnd = I->getOperand(1); 232 233 if (isa<ConstantInt>(I->getOperand(1))) { 234 if (IVOpnd) 235 return false; 236 237 IVOpnd = I->getOperand(0); 238 } 239 240 // If increment operand is used outside of the loop, this speculation 241 // could cause extra live range interference. 242 if (MultiExitLoop && IVOpnd) { 243 for (User *UseI : IVOpnd->users()) { 244 auto *UserInst = cast<Instruction>(UseI); 245 if (!L->contains(UserInst)) 246 return false; 247 } 248 } 249 250 if (seenIncrement) 251 return false; 252 seenIncrement = true; 253 break; 254 } 255 case Instruction::Trunc: 256 case Instruction::ZExt: 257 case Instruction::SExt: 258 // ignore type conversions 259 break; 260 } 261 } 262 return true; 263 } 264 265 /// Fold the loop tail into the loop exit by speculating the loop tail 266 /// instructions. Typically, this is a single post-increment. In the case of a 267 /// simple 2-block loop, hoisting the increment can be much better than 268 /// duplicating the entire loop header. In the case of loops with early exits, 269 /// rotation will not work anyway, but simplifyLoopLatch will put the loop in 270 /// canonical form so downstream passes can handle it. 271 /// 272 /// I don't believe this invalidates SCEV. 273 bool LoopRotate::simplifyLoopLatch(Loop *L) { 274 BasicBlock *Latch = L->getLoopLatch(); 275 if (!Latch || Latch->hasAddressTaken()) 276 return false; 277 278 BranchInst *Jmp = dyn_cast<BranchInst>(Latch->getTerminator()); 279 if (!Jmp || !Jmp->isUnconditional()) 280 return false; 281 282 BasicBlock *LastExit = Latch->getSinglePredecessor(); 283 if (!LastExit || !L->isLoopExiting(LastExit)) 284 return false; 285 286 BranchInst *BI = dyn_cast<BranchInst>(LastExit->getTerminator()); 287 if (!BI) 288 return false; 289 290 if (!shouldSpeculateInstrs(Latch->begin(), Jmp, L)) 291 return false; 292 293 DEBUG(dbgs() << "Folding loop latch " << Latch->getName() << " into " 294 << LastExit->getName() << "\n"); 295 296 // Hoist the instructions from Latch into LastExit. 297 LastExit->getInstList().splice(BI, Latch->getInstList(), Latch->begin(), Jmp); 298 299 unsigned FallThruPath = BI->getSuccessor(0) == Latch ? 0 : 1; 300 BasicBlock *Header = Jmp->getSuccessor(0); 301 assert(Header == L->getHeader() && "expected a backward branch"); 302 303 // Remove Latch from the CFG so that LastExit becomes the new Latch. 304 BI->setSuccessor(FallThruPath, Header); 305 Latch->replaceSuccessorsPhiUsesWith(LastExit); 306 Jmp->eraseFromParent(); 307 308 // Nuke the Latch block. 309 assert(Latch->empty() && "unable to evacuate Latch"); 310 LI->removeBlock(Latch); 311 if (DominatorTreeWrapperPass *DTWP = 312 getAnalysisIfAvailable<DominatorTreeWrapperPass>()) 313 DTWP->getDomTree().eraseNode(Latch); 314 Latch->eraseFromParent(); 315 return true; 316 } 317 318 /// Rotate loop LP. Return true if the loop is rotated. 319 /// 320 /// \param SimplifiedLatch is true if the latch was just folded into the final 321 /// loop exit. In this case we may want to rotate even though the new latch is 322 /// now an exiting branch. This rotation would have happened had the latch not 323 /// been simplified. However, if SimplifiedLatch is false, then we avoid 324 /// rotating loops in which the latch exits to avoid excessive or endless 325 /// rotation. LoopRotate should be repeatable and converge to a canonical 326 /// form. This property is satisfied because simplifying the loop latch can only 327 /// happen once across multiple invocations of the LoopRotate pass. 328 bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) { 329 // If the loop has only one block then there is not much to rotate. 330 if (L->getBlocks().size() == 1) 331 return false; 332 333 BasicBlock *OrigHeader = L->getHeader(); 334 BasicBlock *OrigLatch = L->getLoopLatch(); 335 336 BranchInst *BI = dyn_cast<BranchInst>(OrigHeader->getTerminator()); 337 if (!BI || BI->isUnconditional()) 338 return false; 339 340 // If the loop header is not one of the loop exiting blocks then 341 // either this loop is already rotated or it is not 342 // suitable for loop rotation transformations. 343 if (!L->isLoopExiting(OrigHeader)) 344 return false; 345 346 // If the loop latch already contains a branch that leaves the loop then the 347 // loop is already rotated. 348 if (!OrigLatch) 349 return false; 350 351 // Rotate if either the loop latch does *not* exit the loop, or if the loop 352 // latch was just simplified. 353 if (L->isLoopExiting(OrigLatch) && !SimplifiedLatch) 354 return false; 355 356 // Check size of original header and reject loop if it is very big or we can't 357 // duplicate blocks inside it. 358 { 359 SmallPtrSet<const Value *, 32> EphValues; 360 CodeMetrics::collectEphemeralValues(L, AC, EphValues); 361 362 CodeMetrics Metrics; 363 Metrics.analyzeBasicBlock(OrigHeader, *TTI, EphValues); 364 if (Metrics.notDuplicatable) { 365 DEBUG(dbgs() << "LoopRotation: NOT rotating - contains non-duplicatable" 366 << " instructions: "; L->dump()); 367 return false; 368 } 369 if (Metrics.NumInsts > MaxHeaderSize) 370 return false; 371 } 372 373 // Now, this loop is suitable for rotation. 374 BasicBlock *OrigPreheader = L->getLoopPreheader(); 375 376 // If the loop could not be converted to canonical form, it must have an 377 // indirectbr in it, just give up. 378 if (!OrigPreheader) 379 return false; 380 381 // Anything ScalarEvolution may know about this loop or the PHI nodes 382 // in its header will soon be invalidated. 383 if (ScalarEvolution *SE = getAnalysisIfAvailable<ScalarEvolution>()) 384 SE->forgetLoop(L); 385 386 DEBUG(dbgs() << "LoopRotation: rotating "; L->dump()); 387 388 // Find new Loop header. NewHeader is a Header's one and only successor 389 // that is inside loop. Header's other successor is outside the 390 // loop. Otherwise loop is not suitable for rotation. 391 BasicBlock *Exit = BI->getSuccessor(0); 392 BasicBlock *NewHeader = BI->getSuccessor(1); 393 if (L->contains(Exit)) 394 std::swap(Exit, NewHeader); 395 assert(NewHeader && "Unable to determine new loop header"); 396 assert(L->contains(NewHeader) && !L->contains(Exit) && 397 "Unable to determine loop header and exit blocks"); 398 399 // This code assumes that the new header has exactly one predecessor. 400 // Remove any single-entry PHI nodes in it. 401 assert(NewHeader->getSinglePredecessor() && 402 "New header doesn't have one pred!"); 403 FoldSingleEntryPHINodes(NewHeader); 404 405 // Begin by walking OrigHeader and populating ValueMap with an entry for 406 // each Instruction. 407 BasicBlock::iterator I = OrigHeader->begin(), E = OrigHeader->end(); 408 ValueToValueMapTy ValueMap; 409 410 // For PHI nodes, the value available in OldPreHeader is just the 411 // incoming value from OldPreHeader. 412 for (; PHINode *PN = dyn_cast<PHINode>(I); ++I) 413 ValueMap[PN] = PN->getIncomingValueForBlock(OrigPreheader); 414 415 // For the rest of the instructions, either hoist to the OrigPreheader if 416 // possible or create a clone in the OldPreHeader if not. 417 TerminatorInst *LoopEntryBranch = OrigPreheader->getTerminator(); 418 while (I != E) { 419 Instruction *Inst = I++; 420 421 // If the instruction's operands are invariant and it doesn't read or write 422 // memory, then it is safe to hoist. Doing this doesn't change the order of 423 // execution in the preheader, but does prevent the instruction from 424 // executing in each iteration of the loop. This means it is safe to hoist 425 // something that might trap, but isn't safe to hoist something that reads 426 // memory (without proving that the loop doesn't write). 427 if (L->hasLoopInvariantOperands(Inst) && 428 !Inst->mayReadFromMemory() && !Inst->mayWriteToMemory() && 429 !isa<TerminatorInst>(Inst) && !isa<DbgInfoIntrinsic>(Inst) && 430 !isa<AllocaInst>(Inst)) { 431 Inst->moveBefore(LoopEntryBranch); 432 continue; 433 } 434 435 // Otherwise, create a duplicate of the instruction. 436 Instruction *C = Inst->clone(); 437 438 // Eagerly remap the operands of the instruction. 439 RemapInstruction(C, ValueMap, 440 RF_NoModuleLevelChanges|RF_IgnoreMissingEntries); 441 442 // With the operands remapped, see if the instruction constant folds or is 443 // otherwise simplifyable. This commonly occurs because the entry from PHI 444 // nodes allows icmps and other instructions to fold. 445 // FIXME: Provide DL, TLI, DT, AC to SimplifyInstruction. 446 Value *V = SimplifyInstruction(C); 447 if (V && LI->replacementPreservesLCSSAForm(C, V)) { 448 // If so, then delete the temporary instruction and stick the folded value 449 // in the map. 450 delete C; 451 ValueMap[Inst] = V; 452 } else { 453 // Otherwise, stick the new instruction into the new block! 454 C->setName(Inst->getName()); 455 C->insertBefore(LoopEntryBranch); 456 ValueMap[Inst] = C; 457 } 458 } 459 460 // Along with all the other instructions, we just cloned OrigHeader's 461 // terminator into OrigPreHeader. Fix up the PHI nodes in each of OrigHeader's 462 // successors by duplicating their incoming values for OrigHeader. 463 TerminatorInst *TI = OrigHeader->getTerminator(); 464 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) 465 for (BasicBlock::iterator BI = TI->getSuccessor(i)->begin(); 466 PHINode *PN = dyn_cast<PHINode>(BI); ++BI) 467 PN->addIncoming(PN->getIncomingValueForBlock(OrigHeader), OrigPreheader); 468 469 // Now that OrigPreHeader has a clone of OrigHeader's terminator, remove 470 // OrigPreHeader's old terminator (the original branch into the loop), and 471 // remove the corresponding incoming values from the PHI nodes in OrigHeader. 472 LoopEntryBranch->eraseFromParent(); 473 474 // If there were any uses of instructions in the duplicated block outside the 475 // loop, update them, inserting PHI nodes as required 476 RewriteUsesOfClonedInstructions(OrigHeader, OrigPreheader, ValueMap); 477 478 // NewHeader is now the header of the loop. 479 L->moveToHeader(NewHeader); 480 assert(L->getHeader() == NewHeader && "Latch block is our new header"); 481 482 483 // At this point, we've finished our major CFG changes. As part of cloning 484 // the loop into the preheader we've simplified instructions and the 485 // duplicated conditional branch may now be branching on a constant. If it is 486 // branching on a constant and if that constant means that we enter the loop, 487 // then we fold away the cond branch to an uncond branch. This simplifies the 488 // loop in cases important for nested loops, and it also means we don't have 489 // to split as many edges. 490 BranchInst *PHBI = cast<BranchInst>(OrigPreheader->getTerminator()); 491 assert(PHBI->isConditional() && "Should be clone of BI condbr!"); 492 if (!isa<ConstantInt>(PHBI->getCondition()) || 493 PHBI->getSuccessor(cast<ConstantInt>(PHBI->getCondition())->isZero()) 494 != NewHeader) { 495 // The conditional branch can't be folded, handle the general case. 496 // Update DominatorTree to reflect the CFG change we just made. Then split 497 // edges as necessary to preserve LoopSimplify form. 498 if (DominatorTreeWrapperPass *DTWP = 499 getAnalysisIfAvailable<DominatorTreeWrapperPass>()) { 500 DominatorTree &DT = DTWP->getDomTree(); 501 // Everything that was dominated by the old loop header is now dominated 502 // by the original loop preheader. Conceptually the header was merged 503 // into the preheader, even though we reuse the actual block as a new 504 // loop latch. 505 DomTreeNode *OrigHeaderNode = DT.getNode(OrigHeader); 506 SmallVector<DomTreeNode *, 8> HeaderChildren(OrigHeaderNode->begin(), 507 OrigHeaderNode->end()); 508 DomTreeNode *OrigPreheaderNode = DT.getNode(OrigPreheader); 509 for (unsigned I = 0, E = HeaderChildren.size(); I != E; ++I) 510 DT.changeImmediateDominator(HeaderChildren[I], OrigPreheaderNode); 511 512 assert(DT.getNode(Exit)->getIDom() == OrigPreheaderNode); 513 assert(DT.getNode(NewHeader)->getIDom() == OrigPreheaderNode); 514 515 // Update OrigHeader to be dominated by the new header block. 516 DT.changeImmediateDominator(OrigHeader, OrigLatch); 517 } 518 519 // Right now OrigPreHeader has two successors, NewHeader and ExitBlock, and 520 // thus is not a preheader anymore. 521 // Split the edge to form a real preheader. 522 BasicBlock *NewPH = SplitCriticalEdge(OrigPreheader, NewHeader, this); 523 NewPH->setName(NewHeader->getName() + ".lr.ph"); 524 525 // Preserve canonical loop form, which means that 'Exit' should have only 526 // one predecessor. Note that Exit could be an exit block for multiple 527 // nested loops, causing both of the edges to now be critical and need to 528 // be split. 529 SmallVector<BasicBlock *, 4> ExitPreds(pred_begin(Exit), pred_end(Exit)); 530 bool SplitLatchEdge = false; 531 for (SmallVectorImpl<BasicBlock *>::iterator PI = ExitPreds.begin(), 532 PE = ExitPreds.end(); 533 PI != PE; ++PI) { 534 // We only need to split loop exit edges. 535 Loop *PredLoop = LI->getLoopFor(*PI); 536 if (!PredLoop || PredLoop->contains(Exit)) 537 continue; 538 SplitLatchEdge |= L->getLoopLatch() == *PI; 539 BasicBlock *ExitSplit = SplitCriticalEdge(*PI, Exit, this); 540 ExitSplit->moveBefore(Exit); 541 } 542 assert(SplitLatchEdge && 543 "Despite splitting all preds, failed to split latch exit?"); 544 } else { 545 // We can fold the conditional branch in the preheader, this makes things 546 // simpler. The first step is to remove the extra edge to the Exit block. 547 Exit->removePredecessor(OrigPreheader, true /*preserve LCSSA*/); 548 BranchInst *NewBI = BranchInst::Create(NewHeader, PHBI); 549 NewBI->setDebugLoc(PHBI->getDebugLoc()); 550 PHBI->eraseFromParent(); 551 552 // With our CFG finalized, update DomTree if it is available. 553 if (DominatorTreeWrapperPass *DTWP = 554 getAnalysisIfAvailable<DominatorTreeWrapperPass>()) { 555 DominatorTree &DT = DTWP->getDomTree(); 556 // Update OrigHeader to be dominated by the new header block. 557 DT.changeImmediateDominator(NewHeader, OrigPreheader); 558 DT.changeImmediateDominator(OrigHeader, OrigLatch); 559 560 // Brute force incremental dominator tree update. Call 561 // findNearestCommonDominator on all CFG predecessors of each child of the 562 // original header. 563 DomTreeNode *OrigHeaderNode = DT.getNode(OrigHeader); 564 SmallVector<DomTreeNode *, 8> HeaderChildren(OrigHeaderNode->begin(), 565 OrigHeaderNode->end()); 566 bool Changed; 567 do { 568 Changed = false; 569 for (unsigned I = 0, E = HeaderChildren.size(); I != E; ++I) { 570 DomTreeNode *Node = HeaderChildren[I]; 571 BasicBlock *BB = Node->getBlock(); 572 573 pred_iterator PI = pred_begin(BB); 574 BasicBlock *NearestDom = *PI; 575 for (pred_iterator PE = pred_end(BB); PI != PE; ++PI) 576 NearestDom = DT.findNearestCommonDominator(NearestDom, *PI); 577 578 // Remember if this changes the DomTree. 579 if (Node->getIDom()->getBlock() != NearestDom) { 580 DT.changeImmediateDominator(BB, NearestDom); 581 Changed = true; 582 } 583 } 584 585 // If the dominator changed, this may have an effect on other 586 // predecessors, continue until we reach a fixpoint. 587 } while (Changed); 588 } 589 } 590 591 assert(L->getLoopPreheader() && "Invalid loop preheader after loop rotation"); 592 assert(L->getLoopLatch() && "Invalid loop latch after loop rotation"); 593 594 // Now that the CFG and DomTree are in a consistent state again, try to merge 595 // the OrigHeader block into OrigLatch. This will succeed if they are 596 // connected by an unconditional branch. This is just a cleanup so the 597 // emitted code isn't too gross in this common case. 598 MergeBlockIntoPredecessor(OrigHeader, this); 599 600 DEBUG(dbgs() << "LoopRotation: into "; L->dump()); 601 602 ++NumRotated; 603 return true; 604 } 605