1 //===----------------- LoopRotationUtils.cpp -----------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file provides utilities to convert a loop into a loop with bottom test. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "llvm/Transforms/Utils/LoopRotationUtils.h" 14 #include "llvm/ADT/Statistic.h" 15 #include "llvm/Analysis/AliasAnalysis.h" 16 #include "llvm/Analysis/AssumptionCache.h" 17 #include "llvm/Analysis/BasicAliasAnalysis.h" 18 #include "llvm/Analysis/CodeMetrics.h" 19 #include "llvm/Analysis/DomTreeUpdater.h" 20 #include "llvm/Analysis/GlobalsModRef.h" 21 #include "llvm/Analysis/InstructionSimplify.h" 22 #include "llvm/Analysis/LoopPass.h" 23 #include "llvm/Analysis/MemorySSA.h" 24 #include "llvm/Analysis/MemorySSAUpdater.h" 25 #include "llvm/Analysis/ScalarEvolution.h" 26 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" 27 #include "llvm/Analysis/TargetTransformInfo.h" 28 #include "llvm/Analysis/ValueTracking.h" 29 #include "llvm/IR/CFG.h" 30 #include "llvm/IR/DebugInfoMetadata.h" 31 #include "llvm/IR/Dominators.h" 32 #include "llvm/IR/Function.h" 33 #include "llvm/IR/IntrinsicInst.h" 34 #include "llvm/IR/Module.h" 35 #include "llvm/Support/CommandLine.h" 36 #include "llvm/Support/Debug.h" 37 #include "llvm/Support/raw_ostream.h" 38 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 39 #include "llvm/Transforms/Utils/Local.h" 40 #include "llvm/Transforms/Utils/LoopUtils.h" 41 #include "llvm/Transforms/Utils/SSAUpdater.h" 42 #include "llvm/Transforms/Utils/ValueMapper.h" 43 using namespace llvm; 44 45 #define DEBUG_TYPE "loop-rotate" 46 47 STATISTIC(NumNotRotatedDueToHeaderSize, 48 "Number of loops not rotated due to the header size"); 49 STATISTIC(NumRotated, "Number of loops rotated"); 50 51 static cl::opt<bool> 52 MultiRotate("loop-rotate-multi", cl::init(false), cl::Hidden, 53 cl::desc("Allow loop rotation multiple times in order to reach " 54 "a better latch exit")); 55 56 namespace { 57 /// A simple loop rotation transformation. 58 class LoopRotate { 59 const unsigned MaxHeaderSize; 60 LoopInfo *LI; 61 const TargetTransformInfo *TTI; 62 AssumptionCache *AC; 63 DominatorTree *DT; 64 ScalarEvolution *SE; 65 MemorySSAUpdater *MSSAU; 66 const SimplifyQuery &SQ; 67 bool RotationOnly; 68 bool IsUtilMode; 69 70 public: 71 LoopRotate(unsigned MaxHeaderSize, LoopInfo *LI, 72 const TargetTransformInfo *TTI, AssumptionCache *AC, 73 DominatorTree *DT, ScalarEvolution *SE, MemorySSAUpdater *MSSAU, 74 const SimplifyQuery &SQ, bool RotationOnly, bool IsUtilMode) 75 : MaxHeaderSize(MaxHeaderSize), LI(LI), TTI(TTI), AC(AC), DT(DT), SE(SE), 76 MSSAU(MSSAU), SQ(SQ), RotationOnly(RotationOnly), 77 IsUtilMode(IsUtilMode) {} 78 bool processLoop(Loop *L); 79 80 private: 81 bool rotateLoop(Loop *L, bool SimplifiedLatch); 82 bool simplifyLoopLatch(Loop *L); 83 }; 84 } // end anonymous namespace 85 86 /// Insert (K, V) pair into the ValueToValueMap, and verify the key did not 87 /// previously exist in the map, and the value was inserted. 88 static void InsertNewValueIntoMap(ValueToValueMapTy &VM, Value *K, Value *V) { 89 bool Inserted = VM.insert({K, V}).second; 90 assert(Inserted); 91 (void)Inserted; 92 } 93 /// RewriteUsesOfClonedInstructions - We just cloned the instructions from the 94 /// old header into the preheader. If there were uses of the values produced by 95 /// these instruction that were outside of the loop, we have to insert PHI nodes 96 /// to merge the two values. Do this now. 97 static void RewriteUsesOfClonedInstructions(BasicBlock *OrigHeader, 98 BasicBlock *OrigPreheader, 99 ValueToValueMapTy &ValueMap, 100 SmallVectorImpl<PHINode*> *InsertedPHIs) { 101 // Remove PHI node entries that are no longer live. 102 BasicBlock::iterator I, E = OrigHeader->end(); 103 for (I = OrigHeader->begin(); PHINode *PN = dyn_cast<PHINode>(I); ++I) 104 PN->removeIncomingValue(PN->getBasicBlockIndex(OrigPreheader)); 105 106 // Now fix up users of the instructions in OrigHeader, inserting PHI nodes 107 // as necessary. 108 SSAUpdater SSA(InsertedPHIs); 109 for (I = OrigHeader->begin(); I != E; ++I) { 110 Value *OrigHeaderVal = &*I; 111 112 // If there are no uses of the value (e.g. because it returns void), there 113 // is nothing to rewrite. 114 if (OrigHeaderVal->use_empty()) 115 continue; 116 117 Value *OrigPreHeaderVal = ValueMap.lookup(OrigHeaderVal); 118 119 // The value now exits in two versions: the initial value in the preheader 120 // and the loop "next" value in the original header. 121 SSA.Initialize(OrigHeaderVal->getType(), OrigHeaderVal->getName()); 122 SSA.AddAvailableValue(OrigHeader, OrigHeaderVal); 123 SSA.AddAvailableValue(OrigPreheader, OrigPreHeaderVal); 124 125 // Visit each use of the OrigHeader instruction. 126 for (Value::use_iterator UI = OrigHeaderVal->use_begin(), 127 UE = OrigHeaderVal->use_end(); 128 UI != UE;) { 129 // Grab the use before incrementing the iterator. 130 Use &U = *UI; 131 132 // Increment the iterator before removing the use from the list. 133 ++UI; 134 135 // SSAUpdater can't handle a non-PHI use in the same block as an 136 // earlier def. We can easily handle those cases manually. 137 Instruction *UserInst = cast<Instruction>(U.getUser()); 138 if (!isa<PHINode>(UserInst)) { 139 BasicBlock *UserBB = UserInst->getParent(); 140 141 // The original users in the OrigHeader are already using the 142 // original definitions. 143 if (UserBB == OrigHeader) 144 continue; 145 146 // Users in the OrigPreHeader need to use the value to which the 147 // original definitions are mapped. 148 if (UserBB == OrigPreheader) { 149 U = OrigPreHeaderVal; 150 continue; 151 } 152 } 153 154 // Anything else can be handled by SSAUpdater. 155 SSA.RewriteUse(U); 156 } 157 158 // Replace MetadataAsValue(ValueAsMetadata(OrigHeaderVal)) uses in debug 159 // intrinsics. 160 SmallVector<DbgValueInst *, 1> DbgValues; 161 llvm::findDbgValues(DbgValues, OrigHeaderVal); 162 for (auto &DbgValue : DbgValues) { 163 // The original users in the OrigHeader are already using the original 164 // definitions. 165 BasicBlock *UserBB = DbgValue->getParent(); 166 if (UserBB == OrigHeader) 167 continue; 168 169 // Users in the OrigPreHeader need to use the value to which the 170 // original definitions are mapped and anything else can be handled by 171 // the SSAUpdater. To avoid adding PHINodes, check if the value is 172 // available in UserBB, if not substitute undef. 173 Value *NewVal; 174 if (UserBB == OrigPreheader) 175 NewVal = OrigPreHeaderVal; 176 else if (SSA.HasValueForBlock(UserBB)) 177 NewVal = SSA.GetValueInMiddleOfBlock(UserBB); 178 else 179 NewVal = UndefValue::get(OrigHeaderVal->getType()); 180 DbgValue->setOperand(0, 181 MetadataAsValue::get(OrigHeaderVal->getContext(), 182 ValueAsMetadata::get(NewVal))); 183 } 184 } 185 } 186 187 // Assuming both header and latch are exiting, look for a phi which is only 188 // used outside the loop (via a LCSSA phi) in the exit from the header. 189 // This means that rotating the loop can remove the phi. 190 static bool profitableToRotateLoopExitingLatch(Loop *L) { 191 BasicBlock *Header = L->getHeader(); 192 BranchInst *BI = dyn_cast<BranchInst>(Header->getTerminator()); 193 assert(BI && BI->isConditional() && "need header with conditional exit"); 194 BasicBlock *HeaderExit = BI->getSuccessor(0); 195 if (L->contains(HeaderExit)) 196 HeaderExit = BI->getSuccessor(1); 197 198 for (auto &Phi : Header->phis()) { 199 // Look for uses of this phi in the loop/via exits other than the header. 200 if (llvm::any_of(Phi.users(), [HeaderExit](const User *U) { 201 return cast<Instruction>(U)->getParent() != HeaderExit; 202 })) 203 continue; 204 return true; 205 } 206 return false; 207 } 208 209 // Check that latch exit is deoptimizing (which means - very unlikely to happen) 210 // and there is another exit from the loop which is non-deoptimizing. 211 // If we rotate latch to that exit our loop has a better chance of being fully 212 // canonical. 213 // 214 // It can give false positives in some rare cases. 215 static bool canRotateDeoptimizingLatchExit(Loop *L) { 216 BasicBlock *Latch = L->getLoopLatch(); 217 assert(Latch && "need latch"); 218 BranchInst *BI = dyn_cast<BranchInst>(Latch->getTerminator()); 219 // Need normal exiting latch. 220 if (!BI || !BI->isConditional()) 221 return false; 222 223 BasicBlock *Exit = BI->getSuccessor(1); 224 if (L->contains(Exit)) 225 Exit = BI->getSuccessor(0); 226 227 // Latch exit is non-deoptimizing, no need to rotate. 228 if (!Exit->getPostdominatingDeoptimizeCall()) 229 return false; 230 231 SmallVector<BasicBlock *, 4> Exits; 232 L->getUniqueExitBlocks(Exits); 233 if (!Exits.empty()) { 234 // There is at least one non-deoptimizing exit. 235 // 236 // Note, that BasicBlock::getPostdominatingDeoptimizeCall is not exact, 237 // as it can conservatively return false for deoptimizing exits with 238 // complex enough control flow down to deoptimize call. 239 // 240 // That means here we can report success for a case where 241 // all exits are deoptimizing but one of them has complex enough 242 // control flow (e.g. with loops). 243 // 244 // That should be a very rare case and false positives for this function 245 // have compile-time effect only. 246 return any_of(Exits, [](const BasicBlock *BB) { 247 return !BB->getPostdominatingDeoptimizeCall(); 248 }); 249 } 250 return false; 251 } 252 253 /// Rotate loop LP. Return true if the loop is rotated. 254 /// 255 /// \param SimplifiedLatch is true if the latch was just folded into the final 256 /// loop exit. In this case we may want to rotate even though the new latch is 257 /// now an exiting branch. This rotation would have happened had the latch not 258 /// been simplified. However, if SimplifiedLatch is false, then we avoid 259 /// rotating loops in which the latch exits to avoid excessive or endless 260 /// rotation. LoopRotate should be repeatable and converge to a canonical 261 /// form. This property is satisfied because simplifying the loop latch can only 262 /// happen once across multiple invocations of the LoopRotate pass. 263 /// 264 /// If -loop-rotate-multi is enabled we can do multiple rotations in one go 265 /// so to reach a suitable (non-deoptimizing) exit. 266 bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) { 267 // If the loop has only one block then there is not much to rotate. 268 if (L->getBlocks().size() == 1) 269 return false; 270 271 bool Rotated = false; 272 do { 273 BasicBlock *OrigHeader = L->getHeader(); 274 BasicBlock *OrigLatch = L->getLoopLatch(); 275 276 BranchInst *BI = dyn_cast<BranchInst>(OrigHeader->getTerminator()); 277 if (!BI || BI->isUnconditional()) 278 return Rotated; 279 280 // If the loop header is not one of the loop exiting blocks then 281 // either this loop is already rotated or it is not 282 // suitable for loop rotation transformations. 283 if (!L->isLoopExiting(OrigHeader)) 284 return Rotated; 285 286 // If the loop latch already contains a branch that leaves the loop then the 287 // loop is already rotated. 288 if (!OrigLatch) 289 return Rotated; 290 291 // Rotate if either the loop latch does *not* exit the loop, or if the loop 292 // latch was just simplified. Or if we think it will be profitable. 293 if (L->isLoopExiting(OrigLatch) && !SimplifiedLatch && IsUtilMode == false && 294 !profitableToRotateLoopExitingLatch(L) && 295 !canRotateDeoptimizingLatchExit(L)) 296 return Rotated; 297 298 // Check size of original header and reject loop if it is very big or we can't 299 // duplicate blocks inside it. 300 { 301 SmallPtrSet<const Value *, 32> EphValues; 302 CodeMetrics::collectEphemeralValues(L, AC, EphValues); 303 304 CodeMetrics Metrics; 305 Metrics.analyzeBasicBlock(OrigHeader, *TTI, EphValues); 306 if (Metrics.notDuplicatable) { 307 LLVM_DEBUG( 308 dbgs() << "LoopRotation: NOT rotating - contains non-duplicatable" 309 << " instructions: "; 310 L->dump()); 311 return Rotated; 312 } 313 if (Metrics.convergent) { 314 LLVM_DEBUG(dbgs() << "LoopRotation: NOT rotating - contains convergent " 315 "instructions: "; 316 L->dump()); 317 return Rotated; 318 } 319 if (Metrics.NumInsts > MaxHeaderSize) { 320 LLVM_DEBUG(dbgs() << "LoopRotation: NOT rotating - contains " 321 << Metrics.NumInsts 322 << " instructions, which is more than the threshold (" 323 << MaxHeaderSize << " instructions): "; 324 L->dump()); 325 ++NumNotRotatedDueToHeaderSize; 326 return Rotated; 327 } 328 } 329 330 // Now, this loop is suitable for rotation. 331 BasicBlock *OrigPreheader = L->getLoopPreheader(); 332 333 // If the loop could not be converted to canonical form, it must have an 334 // indirectbr in it, just give up. 335 if (!OrigPreheader || !L->hasDedicatedExits()) 336 return Rotated; 337 338 // Anything ScalarEvolution may know about this loop or the PHI nodes 339 // in its header will soon be invalidated. We should also invalidate 340 // all outer loops because insertion and deletion of blocks that happens 341 // during the rotation may violate invariants related to backedge taken 342 // infos in them. 343 if (SE) 344 SE->forgetTopmostLoop(L); 345 346 LLVM_DEBUG(dbgs() << "LoopRotation: rotating "; L->dump()); 347 if (MSSAU && VerifyMemorySSA) 348 MSSAU->getMemorySSA()->verifyMemorySSA(); 349 350 // Find new Loop header. NewHeader is a Header's one and only successor 351 // that is inside loop. Header's other successor is outside the 352 // loop. Otherwise loop is not suitable for rotation. 353 BasicBlock *Exit = BI->getSuccessor(0); 354 BasicBlock *NewHeader = BI->getSuccessor(1); 355 if (L->contains(Exit)) 356 std::swap(Exit, NewHeader); 357 assert(NewHeader && "Unable to determine new loop header"); 358 assert(L->contains(NewHeader) && !L->contains(Exit) && 359 "Unable to determine loop header and exit blocks"); 360 361 // This code assumes that the new header has exactly one predecessor. 362 // Remove any single-entry PHI nodes in it. 363 assert(NewHeader->getSinglePredecessor() && 364 "New header doesn't have one pred!"); 365 FoldSingleEntryPHINodes(NewHeader); 366 367 // Begin by walking OrigHeader and populating ValueMap with an entry for 368 // each Instruction. 369 BasicBlock::iterator I = OrigHeader->begin(), E = OrigHeader->end(); 370 ValueToValueMapTy ValueMap, ValueMapMSSA; 371 372 // For PHI nodes, the value available in OldPreHeader is just the 373 // incoming value from OldPreHeader. 374 for (; PHINode *PN = dyn_cast<PHINode>(I); ++I) 375 InsertNewValueIntoMap(ValueMap, PN, 376 PN->getIncomingValueForBlock(OrigPreheader)); 377 378 // For the rest of the instructions, either hoist to the OrigPreheader if 379 // possible or create a clone in the OldPreHeader if not. 380 Instruction *LoopEntryBranch = OrigPreheader->getTerminator(); 381 382 // Record all debug intrinsics preceding LoopEntryBranch to avoid duplication. 383 using DbgIntrinsicHash = 384 std::pair<std::pair<Value *, DILocalVariable *>, DIExpression *>; 385 auto makeHash = [](DbgVariableIntrinsic *D) -> DbgIntrinsicHash { 386 return {{D->getVariableLocation(), D->getVariable()}, D->getExpression()}; 387 }; 388 SmallDenseSet<DbgIntrinsicHash, 8> DbgIntrinsics; 389 for (auto I = std::next(OrigPreheader->rbegin()), E = OrigPreheader->rend(); 390 I != E; ++I) { 391 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&*I)) 392 DbgIntrinsics.insert(makeHash(DII)); 393 else 394 break; 395 } 396 397 while (I != E) { 398 Instruction *Inst = &*I++; 399 400 // If the instruction's operands are invariant and it doesn't read or write 401 // memory, then it is safe to hoist. Doing this doesn't change the order of 402 // execution in the preheader, but does prevent the instruction from 403 // executing in each iteration of the loop. This means it is safe to hoist 404 // something that might trap, but isn't safe to hoist something that reads 405 // memory (without proving that the loop doesn't write). 406 if (L->hasLoopInvariantOperands(Inst) && !Inst->mayReadFromMemory() && 407 !Inst->mayWriteToMemory() && !Inst->isTerminator() && 408 !isa<DbgInfoIntrinsic>(Inst) && !isa<AllocaInst>(Inst)) { 409 Inst->moveBefore(LoopEntryBranch); 410 continue; 411 } 412 413 // Otherwise, create a duplicate of the instruction. 414 Instruction *C = Inst->clone(); 415 416 // Eagerly remap the operands of the instruction. 417 RemapInstruction(C, ValueMap, 418 RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); 419 420 // Avoid inserting the same intrinsic twice. 421 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(C)) 422 if (DbgIntrinsics.count(makeHash(DII))) { 423 C->deleteValue(); 424 continue; 425 } 426 427 // With the operands remapped, see if the instruction constant folds or is 428 // otherwise simplifyable. This commonly occurs because the entry from PHI 429 // nodes allows icmps and other instructions to fold. 430 Value *V = SimplifyInstruction(C, SQ); 431 if (V && LI->replacementPreservesLCSSAForm(C, V)) { 432 // If so, then delete the temporary instruction and stick the folded value 433 // in the map. 434 InsertNewValueIntoMap(ValueMap, Inst, V); 435 if (!C->mayHaveSideEffects()) { 436 C->deleteValue(); 437 C = nullptr; 438 } 439 } else { 440 InsertNewValueIntoMap(ValueMap, Inst, C); 441 } 442 if (C) { 443 // Otherwise, stick the new instruction into the new block! 444 C->setName(Inst->getName()); 445 C->insertBefore(LoopEntryBranch); 446 447 if (auto *II = dyn_cast<IntrinsicInst>(C)) 448 if (II->getIntrinsicID() == Intrinsic::assume) 449 AC->registerAssumption(II); 450 // MemorySSA cares whether the cloned instruction was inserted or not, and 451 // not whether it can be remapped to a simplified value. 452 if (MSSAU) 453 InsertNewValueIntoMap(ValueMapMSSA, Inst, C); 454 } 455 } 456 457 // Along with all the other instructions, we just cloned OrigHeader's 458 // terminator into OrigPreHeader. Fix up the PHI nodes in each of OrigHeader's 459 // successors by duplicating their incoming values for OrigHeader. 460 for (BasicBlock *SuccBB : successors(OrigHeader)) 461 for (BasicBlock::iterator BI = SuccBB->begin(); 462 PHINode *PN = dyn_cast<PHINode>(BI); ++BI) 463 PN->addIncoming(PN->getIncomingValueForBlock(OrigHeader), OrigPreheader); 464 465 // Now that OrigPreHeader has a clone of OrigHeader's terminator, remove 466 // OrigPreHeader's old terminator (the original branch into the loop), and 467 // remove the corresponding incoming values from the PHI nodes in OrigHeader. 468 LoopEntryBranch->eraseFromParent(); 469 470 // Update MemorySSA before the rewrite call below changes the 1:1 471 // instruction:cloned_instruction_or_value mapping. 472 if (MSSAU) { 473 InsertNewValueIntoMap(ValueMapMSSA, OrigHeader, OrigPreheader); 474 MSSAU->updateForClonedBlockIntoPred(OrigHeader, OrigPreheader, 475 ValueMapMSSA); 476 } 477 478 SmallVector<PHINode*, 2> InsertedPHIs; 479 // If there were any uses of instructions in the duplicated block outside the 480 // loop, update them, inserting PHI nodes as required 481 RewriteUsesOfClonedInstructions(OrigHeader, OrigPreheader, ValueMap, 482 &InsertedPHIs); 483 484 // Attach dbg.value intrinsics to the new phis if that phi uses a value that 485 // previously had debug metadata attached. This keeps the debug info 486 // up-to-date in the loop body. 487 if (!InsertedPHIs.empty()) 488 insertDebugValuesForPHIs(OrigHeader, InsertedPHIs); 489 490 // NewHeader is now the header of the loop. 491 L->moveToHeader(NewHeader); 492 assert(L->getHeader() == NewHeader && "Latch block is our new header"); 493 494 // Inform DT about changes to the CFG. 495 if (DT) { 496 // The OrigPreheader branches to the NewHeader and Exit now. Then, inform 497 // the DT about the removed edge to the OrigHeader (that got removed). 498 SmallVector<DominatorTree::UpdateType, 3> Updates; 499 Updates.push_back({DominatorTree::Insert, OrigPreheader, Exit}); 500 Updates.push_back({DominatorTree::Insert, OrigPreheader, NewHeader}); 501 Updates.push_back({DominatorTree::Delete, OrigPreheader, OrigHeader}); 502 DT->applyUpdates(Updates); 503 504 if (MSSAU) { 505 MSSAU->applyUpdates(Updates, *DT); 506 if (VerifyMemorySSA) 507 MSSAU->getMemorySSA()->verifyMemorySSA(); 508 } 509 } 510 511 // At this point, we've finished our major CFG changes. As part of cloning 512 // the loop into the preheader we've simplified instructions and the 513 // duplicated conditional branch may now be branching on a constant. If it is 514 // branching on a constant and if that constant means that we enter the loop, 515 // then we fold away the cond branch to an uncond branch. This simplifies the 516 // loop in cases important for nested loops, and it also means we don't have 517 // to split as many edges. 518 BranchInst *PHBI = cast<BranchInst>(OrigPreheader->getTerminator()); 519 assert(PHBI->isConditional() && "Should be clone of BI condbr!"); 520 if (!isa<ConstantInt>(PHBI->getCondition()) || 521 PHBI->getSuccessor(cast<ConstantInt>(PHBI->getCondition())->isZero()) != 522 NewHeader) { 523 // The conditional branch can't be folded, handle the general case. 524 // Split edges as necessary to preserve LoopSimplify form. 525 526 // Right now OrigPreHeader has two successors, NewHeader and ExitBlock, and 527 // thus is not a preheader anymore. 528 // Split the edge to form a real preheader. 529 BasicBlock *NewPH = SplitCriticalEdge( 530 OrigPreheader, NewHeader, 531 CriticalEdgeSplittingOptions(DT, LI, MSSAU).setPreserveLCSSA()); 532 NewPH->setName(NewHeader->getName() + ".lr.ph"); 533 534 // Preserve canonical loop form, which means that 'Exit' should have only 535 // one predecessor. Note that Exit could be an exit block for multiple 536 // nested loops, causing both of the edges to now be critical and need to 537 // be split. 538 SmallVector<BasicBlock *, 4> ExitPreds(pred_begin(Exit), pred_end(Exit)); 539 bool SplitLatchEdge = false; 540 for (BasicBlock *ExitPred : ExitPreds) { 541 // We only need to split loop exit edges. 542 Loop *PredLoop = LI->getLoopFor(ExitPred); 543 if (!PredLoop || PredLoop->contains(Exit) || 544 ExitPred->getTerminator()->isIndirectTerminator()) 545 continue; 546 SplitLatchEdge |= L->getLoopLatch() == ExitPred; 547 BasicBlock *ExitSplit = SplitCriticalEdge( 548 ExitPred, Exit, 549 CriticalEdgeSplittingOptions(DT, LI, MSSAU).setPreserveLCSSA()); 550 ExitSplit->moveBefore(Exit); 551 } 552 assert(SplitLatchEdge && 553 "Despite splitting all preds, failed to split latch exit?"); 554 } else { 555 // We can fold the conditional branch in the preheader, this makes things 556 // simpler. The first step is to remove the extra edge to the Exit block. 557 Exit->removePredecessor(OrigPreheader, true /*preserve LCSSA*/); 558 BranchInst *NewBI = BranchInst::Create(NewHeader, PHBI); 559 NewBI->setDebugLoc(PHBI->getDebugLoc()); 560 PHBI->eraseFromParent(); 561 562 // With our CFG finalized, update DomTree if it is available. 563 if (DT) DT->deleteEdge(OrigPreheader, Exit); 564 565 // Update MSSA too, if available. 566 if (MSSAU) 567 MSSAU->removeEdge(OrigPreheader, Exit); 568 } 569 570 assert(L->getLoopPreheader() && "Invalid loop preheader after loop rotation"); 571 assert(L->getLoopLatch() && "Invalid loop latch after loop rotation"); 572 573 if (MSSAU && VerifyMemorySSA) 574 MSSAU->getMemorySSA()->verifyMemorySSA(); 575 576 // Now that the CFG and DomTree are in a consistent state again, try to merge 577 // the OrigHeader block into OrigLatch. This will succeed if they are 578 // connected by an unconditional branch. This is just a cleanup so the 579 // emitted code isn't too gross in this common case. 580 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager); 581 MergeBlockIntoPredecessor(OrigHeader, &DTU, LI, MSSAU); 582 583 if (MSSAU && VerifyMemorySSA) 584 MSSAU->getMemorySSA()->verifyMemorySSA(); 585 586 LLVM_DEBUG(dbgs() << "LoopRotation: into "; L->dump()); 587 588 ++NumRotated; 589 590 Rotated = true; 591 SimplifiedLatch = false; 592 593 // Check that new latch is a deoptimizing exit and then repeat rotation if possible. 594 // Deoptimizing latch exit is not a generally typical case, so we just loop over. 595 // TODO: if it becomes a performance bottleneck extend rotation algorithm 596 // to handle multiple rotations in one go. 597 } while (MultiRotate && canRotateDeoptimizingLatchExit(L)); 598 599 600 return true; 601 } 602 603 /// Determine whether the instructions in this range may be safely and cheaply 604 /// speculated. This is not an important enough situation to develop complex 605 /// heuristics. We handle a single arithmetic instruction along with any type 606 /// conversions. 607 static bool shouldSpeculateInstrs(BasicBlock::iterator Begin, 608 BasicBlock::iterator End, Loop *L) { 609 bool seenIncrement = false; 610 bool MultiExitLoop = false; 611 612 if (!L->getExitingBlock()) 613 MultiExitLoop = true; 614 615 for (BasicBlock::iterator I = Begin; I != End; ++I) { 616 617 if (!isSafeToSpeculativelyExecute(&*I)) 618 return false; 619 620 if (isa<DbgInfoIntrinsic>(I)) 621 continue; 622 623 switch (I->getOpcode()) { 624 default: 625 return false; 626 case Instruction::GetElementPtr: 627 // GEPs are cheap if all indices are constant. 628 if (!cast<GEPOperator>(I)->hasAllConstantIndices()) 629 return false; 630 // fall-thru to increment case 631 LLVM_FALLTHROUGH; 632 case Instruction::Add: 633 case Instruction::Sub: 634 case Instruction::And: 635 case Instruction::Or: 636 case Instruction::Xor: 637 case Instruction::Shl: 638 case Instruction::LShr: 639 case Instruction::AShr: { 640 Value *IVOpnd = 641 !isa<Constant>(I->getOperand(0)) 642 ? I->getOperand(0) 643 : !isa<Constant>(I->getOperand(1)) ? I->getOperand(1) : nullptr; 644 if (!IVOpnd) 645 return false; 646 647 // If increment operand is used outside of the loop, this speculation 648 // could cause extra live range interference. 649 if (MultiExitLoop) { 650 for (User *UseI : IVOpnd->users()) { 651 auto *UserInst = cast<Instruction>(UseI); 652 if (!L->contains(UserInst)) 653 return false; 654 } 655 } 656 657 if (seenIncrement) 658 return false; 659 seenIncrement = true; 660 break; 661 } 662 case Instruction::Trunc: 663 case Instruction::ZExt: 664 case Instruction::SExt: 665 // ignore type conversions 666 break; 667 } 668 } 669 return true; 670 } 671 672 /// Fold the loop tail into the loop exit by speculating the loop tail 673 /// instructions. Typically, this is a single post-increment. In the case of a 674 /// simple 2-block loop, hoisting the increment can be much better than 675 /// duplicating the entire loop header. In the case of loops with early exits, 676 /// rotation will not work anyway, but simplifyLoopLatch will put the loop in 677 /// canonical form so downstream passes can handle it. 678 /// 679 /// I don't believe this invalidates SCEV. 680 bool LoopRotate::simplifyLoopLatch(Loop *L) { 681 BasicBlock *Latch = L->getLoopLatch(); 682 if (!Latch || Latch->hasAddressTaken()) 683 return false; 684 685 BranchInst *Jmp = dyn_cast<BranchInst>(Latch->getTerminator()); 686 if (!Jmp || !Jmp->isUnconditional()) 687 return false; 688 689 BasicBlock *LastExit = Latch->getSinglePredecessor(); 690 if (!LastExit || !L->isLoopExiting(LastExit)) 691 return false; 692 693 BranchInst *BI = dyn_cast<BranchInst>(LastExit->getTerminator()); 694 if (!BI) 695 return false; 696 697 if (!shouldSpeculateInstrs(Latch->begin(), Jmp->getIterator(), L)) 698 return false; 699 700 LLVM_DEBUG(dbgs() << "Folding loop latch " << Latch->getName() << " into " 701 << LastExit->getName() << "\n"); 702 703 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager); 704 MergeBlockIntoPredecessor(Latch, &DTU, LI, MSSAU, nullptr, 705 /*PredecessorWithTwoSuccessors=*/true); 706 707 if (MSSAU && VerifyMemorySSA) 708 MSSAU->getMemorySSA()->verifyMemorySSA(); 709 710 return true; 711 } 712 713 /// Rotate \c L, and return true if any modification was made. 714 bool LoopRotate::processLoop(Loop *L) { 715 // Save the loop metadata. 716 MDNode *LoopMD = L->getLoopID(); 717 718 bool SimplifiedLatch = false; 719 720 // Simplify the loop latch before attempting to rotate the header 721 // upward. Rotation may not be needed if the loop tail can be folded into the 722 // loop exit. 723 if (!RotationOnly) 724 SimplifiedLatch = simplifyLoopLatch(L); 725 726 bool MadeChange = rotateLoop(L, SimplifiedLatch); 727 assert((!MadeChange || L->isLoopExiting(L->getLoopLatch())) && 728 "Loop latch should be exiting after loop-rotate."); 729 730 // Restore the loop metadata. 731 // NB! We presume LoopRotation DOESN'T ADD its own metadata. 732 if ((MadeChange || SimplifiedLatch) && LoopMD) 733 L->setLoopID(LoopMD); 734 735 return MadeChange || SimplifiedLatch; 736 } 737 738 739 /// The utility to convert a loop into a loop with bottom test. 740 bool llvm::LoopRotation(Loop *L, LoopInfo *LI, const TargetTransformInfo *TTI, 741 AssumptionCache *AC, DominatorTree *DT, 742 ScalarEvolution *SE, MemorySSAUpdater *MSSAU, 743 const SimplifyQuery &SQ, bool RotationOnly = true, 744 unsigned Threshold = unsigned(-1), 745 bool IsUtilMode = true) { 746 LoopRotate LR(Threshold, LI, TTI, AC, DT, SE, MSSAU, SQ, RotationOnly, 747 IsUtilMode); 748 return LR.processLoop(L); 749 } 750