1 //===- LoopRotation.cpp - Loop Rotation Pass ------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements Loop Rotation Pass. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/Transforms/Scalar/LoopRotation.h" 15 #include "llvm/ADT/Statistic.h" 16 #include "llvm/Analysis/AliasAnalysis.h" 17 #include "llvm/Analysis/AssumptionCache.h" 18 #include "llvm/Analysis/BasicAliasAnalysis.h" 19 #include "llvm/Analysis/CodeMetrics.h" 20 #include "llvm/Analysis/GlobalsModRef.h" 21 #include "llvm/Analysis/InstructionSimplify.h" 22 #include "llvm/Analysis/LoopPass.h" 23 #include "llvm/Analysis/ScalarEvolution.h" 24 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" 25 #include "llvm/Analysis/TargetTransformInfo.h" 26 #include "llvm/Analysis/ValueTracking.h" 27 #include "llvm/IR/CFG.h" 28 #include "llvm/IR/Dominators.h" 29 #include "llvm/IR/Function.h" 30 #include "llvm/IR/IntrinsicInst.h" 31 #include "llvm/IR/Module.h" 32 #include "llvm/Support/CommandLine.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Support/raw_ostream.h" 35 #include "llvm/Transforms/Scalar.h" 36 #include "llvm/Transforms/Scalar/LoopPassManager.h" 37 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 38 #include "llvm/Transforms/Utils/Local.h" 39 #include "llvm/Transforms/Utils/LoopUtils.h" 40 #include "llvm/Transforms/Utils/SSAUpdater.h" 41 #include "llvm/Transforms/Utils/ValueMapper.h" 42 using namespace llvm; 43 44 #define DEBUG_TYPE "loop-rotate" 45 46 static cl::opt<unsigned> DefaultRotationThreshold( 47 "rotation-max-header-size", cl::init(16), cl::Hidden, 48 cl::desc("The default maximum header size for automatic loop rotation")); 49 50 STATISTIC(NumRotated, "Number of loops rotated"); 51 52 namespace { 53 /// A simple loop rotation transformation. 54 class LoopRotate { 55 const unsigned MaxHeaderSize; 56 LoopInfo *LI; 57 const TargetTransformInfo *TTI; 58 AssumptionCache *AC; 59 DominatorTree *DT; 60 ScalarEvolution *SE; 61 62 public: 63 LoopRotate(unsigned MaxHeaderSize, LoopInfo *LI, 64 const TargetTransformInfo *TTI, AssumptionCache *AC, 65 DominatorTree *DT, ScalarEvolution *SE) 66 : MaxHeaderSize(MaxHeaderSize), LI(LI), TTI(TTI), AC(AC), DT(DT), SE(SE) { 67 } 68 bool processLoop(Loop *L); 69 70 private: 71 bool rotateLoop(Loop *L, bool SimplifiedLatch); 72 bool simplifyLoopLatch(Loop *L); 73 }; 74 } // end anonymous namespace 75 76 /// RewriteUsesOfClonedInstructions - We just cloned the instructions from the 77 /// old header into the preheader. If there were uses of the values produced by 78 /// these instruction that were outside of the loop, we have to insert PHI nodes 79 /// to merge the two values. Do this now. 80 static void RewriteUsesOfClonedInstructions(BasicBlock *OrigHeader, 81 BasicBlock *OrigPreheader, 82 ValueToValueMapTy &ValueMap, 83 SmallVectorImpl<PHINode*> *InsertedPHIs) { 84 // Remove PHI node entries that are no longer live. 85 BasicBlock::iterator I, E = OrigHeader->end(); 86 for (I = OrigHeader->begin(); PHINode *PN = dyn_cast<PHINode>(I); ++I) 87 PN->removeIncomingValue(PN->getBasicBlockIndex(OrigPreheader)); 88 89 // Now fix up users of the instructions in OrigHeader, inserting PHI nodes 90 // as necessary. 91 SSAUpdater SSA(InsertedPHIs); 92 for (I = OrigHeader->begin(); I != E; ++I) { 93 Value *OrigHeaderVal = &*I; 94 95 // If there are no uses of the value (e.g. because it returns void), there 96 // is nothing to rewrite. 97 if (OrigHeaderVal->use_empty()) 98 continue; 99 100 Value *OrigPreHeaderVal = ValueMap.lookup(OrigHeaderVal); 101 102 // The value now exits in two versions: the initial value in the preheader 103 // and the loop "next" value in the original header. 104 SSA.Initialize(OrigHeaderVal->getType(), OrigHeaderVal->getName()); 105 SSA.AddAvailableValue(OrigHeader, OrigHeaderVal); 106 SSA.AddAvailableValue(OrigPreheader, OrigPreHeaderVal); 107 108 // Visit each use of the OrigHeader instruction. 109 for (Value::use_iterator UI = OrigHeaderVal->use_begin(), 110 UE = OrigHeaderVal->use_end(); 111 UI != UE;) { 112 // Grab the use before incrementing the iterator. 113 Use &U = *UI; 114 115 // Increment the iterator before removing the use from the list. 116 ++UI; 117 118 // SSAUpdater can't handle a non-PHI use in the same block as an 119 // earlier def. We can easily handle those cases manually. 120 Instruction *UserInst = cast<Instruction>(U.getUser()); 121 if (!isa<PHINode>(UserInst)) { 122 BasicBlock *UserBB = UserInst->getParent(); 123 124 // The original users in the OrigHeader are already using the 125 // original definitions. 126 if (UserBB == OrigHeader) 127 continue; 128 129 // Users in the OrigPreHeader need to use the value to which the 130 // original definitions are mapped. 131 if (UserBB == OrigPreheader) { 132 U = OrigPreHeaderVal; 133 continue; 134 } 135 } 136 137 // Anything else can be handled by SSAUpdater. 138 SSA.RewriteUse(U); 139 } 140 141 // Replace MetadataAsValue(ValueAsMetadata(OrigHeaderVal)) uses in debug 142 // intrinsics. 143 LLVMContext &C = OrigHeader->getContext(); 144 if (auto *VAM = ValueAsMetadata::getIfExists(OrigHeaderVal)) { 145 if (auto *MAV = MetadataAsValue::getIfExists(C, VAM)) { 146 for (auto UI = MAV->use_begin(), E = MAV->use_end(); UI != E;) { 147 // Grab the use before incrementing the iterator. Otherwise, altering 148 // the Use will invalidate the iterator. 149 Use &U = *UI++; 150 DbgInfoIntrinsic *UserInst = dyn_cast<DbgInfoIntrinsic>(U.getUser()); 151 if (!UserInst) 152 continue; 153 154 // The original users in the OrigHeader are already using the original 155 // definitions. 156 BasicBlock *UserBB = UserInst->getParent(); 157 if (UserBB == OrigHeader) 158 continue; 159 160 // Users in the OrigPreHeader need to use the value to which the 161 // original definitions are mapped and anything else can be handled by 162 // the SSAUpdater. To avoid adding PHINodes, check if the value is 163 // available in UserBB, if not substitute undef. 164 Value *NewVal; 165 if (UserBB == OrigPreheader) 166 NewVal = OrigPreHeaderVal; 167 else if (SSA.HasValueForBlock(UserBB)) 168 NewVal = SSA.GetValueInMiddleOfBlock(UserBB); 169 else 170 NewVal = UndefValue::get(OrigHeaderVal->getType()); 171 U = MetadataAsValue::get(C, ValueAsMetadata::get(NewVal)); 172 } 173 } 174 } 175 } 176 } 177 178 /// Propagate dbg.value intrinsics through the newly inserted Phis. 179 static void insertDebugValues(BasicBlock *OrigHeader, 180 SmallVectorImpl<PHINode*> &InsertedPHIs) { 181 ValueToValueMapTy DbgValueMap; 182 183 // Map existing PHI nodes to their dbg.values. 184 for (auto &I : *OrigHeader) { 185 if (auto DbgII = dyn_cast<DbgInfoIntrinsic>(&I)) { 186 if (auto *Loc = dyn_cast_or_null<PHINode>(DbgII->getVariableLocation())) 187 DbgValueMap.insert({Loc, DbgII}); 188 } 189 } 190 191 // Then iterate through the new PHIs and look to see if they use one of the 192 // previously mapped PHIs. If so, insert a new dbg.value intrinsic that will 193 // propagate the info through the new PHI. 194 LLVMContext &C = OrigHeader->getContext(); 195 for (auto PHI : InsertedPHIs) { 196 for (auto VI : PHI->operand_values()) { 197 auto V = DbgValueMap.find(VI); 198 if (V != DbgValueMap.end()) { 199 auto *DbgII = cast<DbgInfoIntrinsic>(V->second); 200 Instruction *NewDbgII = DbgII->clone(); 201 auto PhiMAV = MetadataAsValue::get(C, ValueAsMetadata::get(PHI)); 202 NewDbgII->setOperand(0, PhiMAV); 203 BasicBlock *Parent = PHI->getParent(); 204 NewDbgII->insertBefore(Parent->getFirstNonPHIOrDbgOrLifetime()); 205 } 206 } 207 } 208 } 209 210 /// Rotate loop LP. Return true if the loop is rotated. 211 /// 212 /// \param SimplifiedLatch is true if the latch was just folded into the final 213 /// loop exit. In this case we may want to rotate even though the new latch is 214 /// now an exiting branch. This rotation would have happened had the latch not 215 /// been simplified. However, if SimplifiedLatch is false, then we avoid 216 /// rotating loops in which the latch exits to avoid excessive or endless 217 /// rotation. LoopRotate should be repeatable and converge to a canonical 218 /// form. This property is satisfied because simplifying the loop latch can only 219 /// happen once across multiple invocations of the LoopRotate pass. 220 bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) { 221 // If the loop has only one block then there is not much to rotate. 222 if (L->getBlocks().size() == 1) 223 return false; 224 225 BasicBlock *OrigHeader = L->getHeader(); 226 BasicBlock *OrigLatch = L->getLoopLatch(); 227 228 BranchInst *BI = dyn_cast<BranchInst>(OrigHeader->getTerminator()); 229 if (!BI || BI->isUnconditional()) 230 return false; 231 232 // If the loop header is not one of the loop exiting blocks then 233 // either this loop is already rotated or it is not 234 // suitable for loop rotation transformations. 235 if (!L->isLoopExiting(OrigHeader)) 236 return false; 237 238 // If the loop latch already contains a branch that leaves the loop then the 239 // loop is already rotated. 240 if (!OrigLatch) 241 return false; 242 243 // Rotate if either the loop latch does *not* exit the loop, or if the loop 244 // latch was just simplified. 245 if (L->isLoopExiting(OrigLatch) && !SimplifiedLatch) 246 return false; 247 248 // Check size of original header and reject loop if it is very big or we can't 249 // duplicate blocks inside it. 250 { 251 SmallPtrSet<const Value *, 32> EphValues; 252 CodeMetrics::collectEphemeralValues(L, AC, EphValues); 253 254 CodeMetrics Metrics; 255 Metrics.analyzeBasicBlock(OrigHeader, *TTI, EphValues); 256 if (Metrics.notDuplicatable) { 257 DEBUG(dbgs() << "LoopRotation: NOT rotating - contains non-duplicatable" 258 << " instructions: "; 259 L->dump()); 260 return false; 261 } 262 if (Metrics.convergent) { 263 DEBUG(dbgs() << "LoopRotation: NOT rotating - contains convergent " 264 "instructions: "; 265 L->dump()); 266 return false; 267 } 268 if (Metrics.NumInsts > MaxHeaderSize) 269 return false; 270 } 271 272 // Now, this loop is suitable for rotation. 273 BasicBlock *OrigPreheader = L->getLoopPreheader(); 274 275 // If the loop could not be converted to canonical form, it must have an 276 // indirectbr in it, just give up. 277 if (!OrigPreheader) 278 return false; 279 280 // Anything ScalarEvolution may know about this loop or the PHI nodes 281 // in its header will soon be invalidated. 282 if (SE) 283 SE->forgetLoop(L); 284 285 DEBUG(dbgs() << "LoopRotation: rotating "; L->dump()); 286 287 // Find new Loop header. NewHeader is a Header's one and only successor 288 // that is inside loop. Header's other successor is outside the 289 // loop. Otherwise loop is not suitable for rotation. 290 BasicBlock *Exit = BI->getSuccessor(0); 291 BasicBlock *NewHeader = BI->getSuccessor(1); 292 if (L->contains(Exit)) 293 std::swap(Exit, NewHeader); 294 assert(NewHeader && "Unable to determine new loop header"); 295 assert(L->contains(NewHeader) && !L->contains(Exit) && 296 "Unable to determine loop header and exit blocks"); 297 298 // This code assumes that the new header has exactly one predecessor. 299 // Remove any single-entry PHI nodes in it. 300 assert(NewHeader->getSinglePredecessor() && 301 "New header doesn't have one pred!"); 302 FoldSingleEntryPHINodes(NewHeader); 303 304 // Begin by walking OrigHeader and populating ValueMap with an entry for 305 // each Instruction. 306 BasicBlock::iterator I = OrigHeader->begin(), E = OrigHeader->end(); 307 ValueToValueMapTy ValueMap; 308 309 // For PHI nodes, the value available in OldPreHeader is just the 310 // incoming value from OldPreHeader. 311 for (; PHINode *PN = dyn_cast<PHINode>(I); ++I) 312 ValueMap[PN] = PN->getIncomingValueForBlock(OrigPreheader); 313 314 const DataLayout &DL = L->getHeader()->getModule()->getDataLayout(); 315 316 // For the rest of the instructions, either hoist to the OrigPreheader if 317 // possible or create a clone in the OldPreHeader if not. 318 TerminatorInst *LoopEntryBranch = OrigPreheader->getTerminator(); 319 while (I != E) { 320 Instruction *Inst = &*I++; 321 322 // If the instruction's operands are invariant and it doesn't read or write 323 // memory, then it is safe to hoist. Doing this doesn't change the order of 324 // execution in the preheader, but does prevent the instruction from 325 // executing in each iteration of the loop. This means it is safe to hoist 326 // something that might trap, but isn't safe to hoist something that reads 327 // memory (without proving that the loop doesn't write). 328 if (L->hasLoopInvariantOperands(Inst) && !Inst->mayReadFromMemory() && 329 !Inst->mayWriteToMemory() && !isa<TerminatorInst>(Inst) && 330 !isa<DbgInfoIntrinsic>(Inst) && !isa<AllocaInst>(Inst)) { 331 Inst->moveBefore(LoopEntryBranch); 332 continue; 333 } 334 335 // Otherwise, create a duplicate of the instruction. 336 Instruction *C = Inst->clone(); 337 338 // Eagerly remap the operands of the instruction. 339 RemapInstruction(C, ValueMap, 340 RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); 341 342 // With the operands remapped, see if the instruction constant folds or is 343 // otherwise simplifyable. This commonly occurs because the entry from PHI 344 // nodes allows icmps and other instructions to fold. 345 // FIXME: Provide TLI, DT, AC to SimplifyInstruction. 346 Value *V = SimplifyInstruction(C, DL); 347 if (V && LI->replacementPreservesLCSSAForm(C, V)) { 348 // If so, then delete the temporary instruction and stick the folded value 349 // in the map. 350 ValueMap[Inst] = V; 351 if (!C->mayHaveSideEffects()) { 352 delete C; 353 C = nullptr; 354 } 355 } else { 356 ValueMap[Inst] = C; 357 } 358 if (C) { 359 // Otherwise, stick the new instruction into the new block! 360 C->setName(Inst->getName()); 361 C->insertBefore(LoopEntryBranch); 362 363 if (auto *II = dyn_cast<IntrinsicInst>(C)) 364 if (II->getIntrinsicID() == Intrinsic::assume) 365 AC->registerAssumption(II); 366 } 367 } 368 369 // Along with all the other instructions, we just cloned OrigHeader's 370 // terminator into OrigPreHeader. Fix up the PHI nodes in each of OrigHeader's 371 // successors by duplicating their incoming values for OrigHeader. 372 TerminatorInst *TI = OrigHeader->getTerminator(); 373 for (BasicBlock *SuccBB : TI->successors()) 374 for (BasicBlock::iterator BI = SuccBB->begin(); 375 PHINode *PN = dyn_cast<PHINode>(BI); ++BI) 376 PN->addIncoming(PN->getIncomingValueForBlock(OrigHeader), OrigPreheader); 377 378 // Now that OrigPreHeader has a clone of OrigHeader's terminator, remove 379 // OrigPreHeader's old terminator (the original branch into the loop), and 380 // remove the corresponding incoming values from the PHI nodes in OrigHeader. 381 LoopEntryBranch->eraseFromParent(); 382 383 384 SmallVector<PHINode*, 2> InsertedPHIs; 385 // If there were any uses of instructions in the duplicated block outside the 386 // loop, update them, inserting PHI nodes as required 387 RewriteUsesOfClonedInstructions(OrigHeader, OrigPreheader, ValueMap, 388 &InsertedPHIs); 389 390 // Attach dbg.value intrinsics to the new phis if that phi uses a value that 391 // previously had debug metadata attached. This keeps the debug info 392 // up-to-date in the loop body. 393 if (!InsertedPHIs.empty()) 394 insertDebugValues(OrigHeader, InsertedPHIs); 395 396 // NewHeader is now the header of the loop. 397 L->moveToHeader(NewHeader); 398 assert(L->getHeader() == NewHeader && "Latch block is our new header"); 399 400 // At this point, we've finished our major CFG changes. As part of cloning 401 // the loop into the preheader we've simplified instructions and the 402 // duplicated conditional branch may now be branching on a constant. If it is 403 // branching on a constant and if that constant means that we enter the loop, 404 // then we fold away the cond branch to an uncond branch. This simplifies the 405 // loop in cases important for nested loops, and it also means we don't have 406 // to split as many edges. 407 BranchInst *PHBI = cast<BranchInst>(OrigPreheader->getTerminator()); 408 assert(PHBI->isConditional() && "Should be clone of BI condbr!"); 409 if (!isa<ConstantInt>(PHBI->getCondition()) || 410 PHBI->getSuccessor(cast<ConstantInt>(PHBI->getCondition())->isZero()) != 411 NewHeader) { 412 // The conditional branch can't be folded, handle the general case. 413 // Update DominatorTree to reflect the CFG change we just made. Then split 414 // edges as necessary to preserve LoopSimplify form. 415 if (DT) { 416 // Everything that was dominated by the old loop header is now dominated 417 // by the original loop preheader. Conceptually the header was merged 418 // into the preheader, even though we reuse the actual block as a new 419 // loop latch. 420 DomTreeNode *OrigHeaderNode = DT->getNode(OrigHeader); 421 SmallVector<DomTreeNode *, 8> HeaderChildren(OrigHeaderNode->begin(), 422 OrigHeaderNode->end()); 423 DomTreeNode *OrigPreheaderNode = DT->getNode(OrigPreheader); 424 for (unsigned I = 0, E = HeaderChildren.size(); I != E; ++I) 425 DT->changeImmediateDominator(HeaderChildren[I], OrigPreheaderNode); 426 427 assert(DT->getNode(Exit)->getIDom() == OrigPreheaderNode); 428 assert(DT->getNode(NewHeader)->getIDom() == OrigPreheaderNode); 429 430 // Update OrigHeader to be dominated by the new header block. 431 DT->changeImmediateDominator(OrigHeader, OrigLatch); 432 } 433 434 // Right now OrigPreHeader has two successors, NewHeader and ExitBlock, and 435 // thus is not a preheader anymore. 436 // Split the edge to form a real preheader. 437 BasicBlock *NewPH = SplitCriticalEdge( 438 OrigPreheader, NewHeader, 439 CriticalEdgeSplittingOptions(DT, LI).setPreserveLCSSA()); 440 NewPH->setName(NewHeader->getName() + ".lr.ph"); 441 442 // Preserve canonical loop form, which means that 'Exit' should have only 443 // one predecessor. Note that Exit could be an exit block for multiple 444 // nested loops, causing both of the edges to now be critical and need to 445 // be split. 446 SmallVector<BasicBlock *, 4> ExitPreds(pred_begin(Exit), pred_end(Exit)); 447 bool SplitLatchEdge = false; 448 for (BasicBlock *ExitPred : ExitPreds) { 449 // We only need to split loop exit edges. 450 Loop *PredLoop = LI->getLoopFor(ExitPred); 451 if (!PredLoop || PredLoop->contains(Exit)) 452 continue; 453 if (isa<IndirectBrInst>(ExitPred->getTerminator())) 454 continue; 455 SplitLatchEdge |= L->getLoopLatch() == ExitPred; 456 BasicBlock *ExitSplit = SplitCriticalEdge( 457 ExitPred, Exit, 458 CriticalEdgeSplittingOptions(DT, LI).setPreserveLCSSA()); 459 ExitSplit->moveBefore(Exit); 460 } 461 assert(SplitLatchEdge && 462 "Despite splitting all preds, failed to split latch exit?"); 463 } else { 464 // We can fold the conditional branch in the preheader, this makes things 465 // simpler. The first step is to remove the extra edge to the Exit block. 466 Exit->removePredecessor(OrigPreheader, true /*preserve LCSSA*/); 467 BranchInst *NewBI = BranchInst::Create(NewHeader, PHBI); 468 NewBI->setDebugLoc(PHBI->getDebugLoc()); 469 PHBI->eraseFromParent(); 470 471 // With our CFG finalized, update DomTree if it is available. 472 if (DT) { 473 // Update OrigHeader to be dominated by the new header block. 474 DT->changeImmediateDominator(NewHeader, OrigPreheader); 475 DT->changeImmediateDominator(OrigHeader, OrigLatch); 476 477 // Brute force incremental dominator tree update. Call 478 // findNearestCommonDominator on all CFG predecessors of each child of the 479 // original header. 480 DomTreeNode *OrigHeaderNode = DT->getNode(OrigHeader); 481 SmallVector<DomTreeNode *, 8> HeaderChildren(OrigHeaderNode->begin(), 482 OrigHeaderNode->end()); 483 bool Changed; 484 do { 485 Changed = false; 486 for (unsigned I = 0, E = HeaderChildren.size(); I != E; ++I) { 487 DomTreeNode *Node = HeaderChildren[I]; 488 BasicBlock *BB = Node->getBlock(); 489 490 pred_iterator PI = pred_begin(BB); 491 BasicBlock *NearestDom = *PI; 492 for (pred_iterator PE = pred_end(BB); PI != PE; ++PI) 493 NearestDom = DT->findNearestCommonDominator(NearestDom, *PI); 494 495 // Remember if this changes the DomTree. 496 if (Node->getIDom()->getBlock() != NearestDom) { 497 DT->changeImmediateDominator(BB, NearestDom); 498 Changed = true; 499 } 500 } 501 502 // If the dominator changed, this may have an effect on other 503 // predecessors, continue until we reach a fixpoint. 504 } while (Changed); 505 } 506 } 507 508 assert(L->getLoopPreheader() && "Invalid loop preheader after loop rotation"); 509 assert(L->getLoopLatch() && "Invalid loop latch after loop rotation"); 510 511 // Now that the CFG and DomTree are in a consistent state again, try to merge 512 // the OrigHeader block into OrigLatch. This will succeed if they are 513 // connected by an unconditional branch. This is just a cleanup so the 514 // emitted code isn't too gross in this common case. 515 MergeBlockIntoPredecessor(OrigHeader, DT, LI); 516 517 DEBUG(dbgs() << "LoopRotation: into "; L->dump()); 518 519 ++NumRotated; 520 return true; 521 } 522 523 /// Determine whether the instructions in this range may be safely and cheaply 524 /// speculated. This is not an important enough situation to develop complex 525 /// heuristics. We handle a single arithmetic instruction along with any type 526 /// conversions. 527 static bool shouldSpeculateInstrs(BasicBlock::iterator Begin, 528 BasicBlock::iterator End, Loop *L) { 529 bool seenIncrement = false; 530 bool MultiExitLoop = false; 531 532 if (!L->getExitingBlock()) 533 MultiExitLoop = true; 534 535 for (BasicBlock::iterator I = Begin; I != End; ++I) { 536 537 if (!isSafeToSpeculativelyExecute(&*I)) 538 return false; 539 540 if (isa<DbgInfoIntrinsic>(I)) 541 continue; 542 543 switch (I->getOpcode()) { 544 default: 545 return false; 546 case Instruction::GetElementPtr: 547 // GEPs are cheap if all indices are constant. 548 if (!cast<GEPOperator>(I)->hasAllConstantIndices()) 549 return false; 550 // fall-thru to increment case 551 LLVM_FALLTHROUGH; 552 case Instruction::Add: 553 case Instruction::Sub: 554 case Instruction::And: 555 case Instruction::Or: 556 case Instruction::Xor: 557 case Instruction::Shl: 558 case Instruction::LShr: 559 case Instruction::AShr: { 560 Value *IVOpnd = 561 !isa<Constant>(I->getOperand(0)) 562 ? I->getOperand(0) 563 : !isa<Constant>(I->getOperand(1)) ? I->getOperand(1) : nullptr; 564 if (!IVOpnd) 565 return false; 566 567 // If increment operand is used outside of the loop, this speculation 568 // could cause extra live range interference. 569 if (MultiExitLoop) { 570 for (User *UseI : IVOpnd->users()) { 571 auto *UserInst = cast<Instruction>(UseI); 572 if (!L->contains(UserInst)) 573 return false; 574 } 575 } 576 577 if (seenIncrement) 578 return false; 579 seenIncrement = true; 580 break; 581 } 582 case Instruction::Trunc: 583 case Instruction::ZExt: 584 case Instruction::SExt: 585 // ignore type conversions 586 break; 587 } 588 } 589 return true; 590 } 591 592 /// Fold the loop tail into the loop exit by speculating the loop tail 593 /// instructions. Typically, this is a single post-increment. In the case of a 594 /// simple 2-block loop, hoisting the increment can be much better than 595 /// duplicating the entire loop header. In the case of loops with early exits, 596 /// rotation will not work anyway, but simplifyLoopLatch will put the loop in 597 /// canonical form so downstream passes can handle it. 598 /// 599 /// I don't believe this invalidates SCEV. 600 bool LoopRotate::simplifyLoopLatch(Loop *L) { 601 BasicBlock *Latch = L->getLoopLatch(); 602 if (!Latch || Latch->hasAddressTaken()) 603 return false; 604 605 BranchInst *Jmp = dyn_cast<BranchInst>(Latch->getTerminator()); 606 if (!Jmp || !Jmp->isUnconditional()) 607 return false; 608 609 BasicBlock *LastExit = Latch->getSinglePredecessor(); 610 if (!LastExit || !L->isLoopExiting(LastExit)) 611 return false; 612 613 BranchInst *BI = dyn_cast<BranchInst>(LastExit->getTerminator()); 614 if (!BI) 615 return false; 616 617 if (!shouldSpeculateInstrs(Latch->begin(), Jmp->getIterator(), L)) 618 return false; 619 620 DEBUG(dbgs() << "Folding loop latch " << Latch->getName() << " into " 621 << LastExit->getName() << "\n"); 622 623 // Hoist the instructions from Latch into LastExit. 624 LastExit->getInstList().splice(BI->getIterator(), Latch->getInstList(), 625 Latch->begin(), Jmp->getIterator()); 626 627 unsigned FallThruPath = BI->getSuccessor(0) == Latch ? 0 : 1; 628 BasicBlock *Header = Jmp->getSuccessor(0); 629 assert(Header == L->getHeader() && "expected a backward branch"); 630 631 // Remove Latch from the CFG so that LastExit becomes the new Latch. 632 BI->setSuccessor(FallThruPath, Header); 633 Latch->replaceSuccessorsPhiUsesWith(LastExit); 634 Jmp->eraseFromParent(); 635 636 // Nuke the Latch block. 637 assert(Latch->empty() && "unable to evacuate Latch"); 638 LI->removeBlock(Latch); 639 if (DT) 640 DT->eraseNode(Latch); 641 Latch->eraseFromParent(); 642 return true; 643 } 644 645 /// Rotate \c L, and return true if any modification was made. 646 bool LoopRotate::processLoop(Loop *L) { 647 // Save the loop metadata. 648 MDNode *LoopMD = L->getLoopID(); 649 650 // Simplify the loop latch before attempting to rotate the header 651 // upward. Rotation may not be needed if the loop tail can be folded into the 652 // loop exit. 653 bool SimplifiedLatch = simplifyLoopLatch(L); 654 655 bool MadeChange = rotateLoop(L, SimplifiedLatch); 656 assert((!MadeChange || L->isLoopExiting(L->getLoopLatch())) && 657 "Loop latch should be exiting after loop-rotate."); 658 659 // Restore the loop metadata. 660 // NB! We presume LoopRotation DOESN'T ADD its own metadata. 661 if ((MadeChange || SimplifiedLatch) && LoopMD) 662 L->setLoopID(LoopMD); 663 664 return MadeChange; 665 } 666 667 LoopRotatePass::LoopRotatePass(bool EnableHeaderDuplication) 668 : EnableHeaderDuplication(EnableHeaderDuplication) {} 669 670 PreservedAnalyses LoopRotatePass::run(Loop &L, LoopAnalysisManager &AM, 671 LoopStandardAnalysisResults &AR, 672 LPMUpdater &) { 673 int Threshold = EnableHeaderDuplication ? DefaultRotationThreshold : 0; 674 LoopRotate LR(Threshold, &AR.LI, &AR.TTI, &AR.AC, &AR.DT, &AR.SE); 675 676 bool Changed = LR.processLoop(&L); 677 if (!Changed) 678 return PreservedAnalyses::all(); 679 680 return getLoopPassPreservedAnalyses(); 681 } 682 683 namespace { 684 685 class LoopRotateLegacyPass : public LoopPass { 686 unsigned MaxHeaderSize; 687 688 public: 689 static char ID; // Pass ID, replacement for typeid 690 LoopRotateLegacyPass(int SpecifiedMaxHeaderSize = -1) : LoopPass(ID) { 691 initializeLoopRotateLegacyPassPass(*PassRegistry::getPassRegistry()); 692 if (SpecifiedMaxHeaderSize == -1) 693 MaxHeaderSize = DefaultRotationThreshold; 694 else 695 MaxHeaderSize = unsigned(SpecifiedMaxHeaderSize); 696 } 697 698 // LCSSA form makes instruction renaming easier. 699 void getAnalysisUsage(AnalysisUsage &AU) const override { 700 AU.addRequired<AssumptionCacheTracker>(); 701 AU.addRequired<TargetTransformInfoWrapperPass>(); 702 getLoopAnalysisUsage(AU); 703 } 704 705 bool runOnLoop(Loop *L, LPPassManager &LPM) override { 706 if (skipLoop(L)) 707 return false; 708 Function &F = *L->getHeader()->getParent(); 709 710 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 711 const auto *TTI = &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 712 auto *AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 713 auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>(); 714 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr; 715 auto *SEWP = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>(); 716 auto *SE = SEWP ? &SEWP->getSE() : nullptr; 717 LoopRotate LR(MaxHeaderSize, LI, TTI, AC, DT, SE); 718 return LR.processLoop(L); 719 } 720 }; 721 } 722 723 char LoopRotateLegacyPass::ID = 0; 724 INITIALIZE_PASS_BEGIN(LoopRotateLegacyPass, "loop-rotate", "Rotate Loops", 725 false, false) 726 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 727 INITIALIZE_PASS_DEPENDENCY(LoopPass) 728 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 729 INITIALIZE_PASS_END(LoopRotateLegacyPass, "loop-rotate", "Rotate Loops", false, 730 false) 731 732 Pass *llvm::createLoopRotatePass(int MaxHeaderSize) { 733 return new LoopRotateLegacyPass(MaxHeaderSize); 734 } 735