1 //===-- UnrollLoop.cpp - Loop unrolling utilities -------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements some loop unrolling utilities. It does not define any 11 // actual pass or policy, but provides a single function to perform loop 12 // unrolling. 13 // 14 // The process of unrolling can produce extraneous basic blocks linked with 15 // unconditional branches. This will be corrected in the future. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Transforms/Utils/UnrollLoop.h" 20 #include "llvm/ADT/SmallPtrSet.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/Analysis/AssumptionCache.h" 23 #include "llvm/Analysis/InstructionSimplify.h" 24 #include "llvm/Analysis/LoopIterator.h" 25 #include "llvm/Analysis/LoopPass.h" 26 #include "llvm/Analysis/OptimizationDiagnosticInfo.h" 27 #include "llvm/Analysis/ScalarEvolution.h" 28 #include "llvm/IR/BasicBlock.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/DebugInfoMetadata.h" 31 #include "llvm/IR/Dominators.h" 32 #include "llvm/IR/IntrinsicInst.h" 33 #include "llvm/IR/LLVMContext.h" 34 #include "llvm/Support/Debug.h" 35 #include "llvm/Support/raw_ostream.h" 36 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 37 #include "llvm/Transforms/Utils/Cloning.h" 38 #include "llvm/Transforms/Utils/Local.h" 39 #include "llvm/Transforms/Utils/LoopSimplify.h" 40 #include "llvm/Transforms/Utils/LoopUtils.h" 41 #include "llvm/Transforms/Utils/SimplifyIndVar.h" 42 using namespace llvm; 43 44 #define DEBUG_TYPE "loop-unroll" 45 46 // TODO: Should these be here or in LoopUnroll? 47 STATISTIC(NumCompletelyUnrolled, "Number of loops completely unrolled"); 48 STATISTIC(NumUnrolled, "Number of loops unrolled (completely or otherwise)"); 49 50 static cl::opt<bool> 51 UnrollRuntimeEpilog("unroll-runtime-epilog", cl::init(false), cl::Hidden, 52 cl::desc("Allow runtime unrolled loops to be unrolled " 53 "with epilog instead of prolog.")); 54 55 static cl::opt<bool> 56 UnrollVerifyDomtree("unroll-verify-domtree", cl::Hidden, 57 cl::desc("Verify domtree after unrolling"), 58 #ifdef NDEBUG 59 cl::init(false) 60 #else 61 cl::init(true) 62 #endif 63 ); 64 65 /// Convert the instruction operands from referencing the current values into 66 /// those specified by VMap. 67 static inline void remapInstruction(Instruction *I, 68 ValueToValueMapTy &VMap) { 69 for (unsigned op = 0, E = I->getNumOperands(); op != E; ++op) { 70 Value *Op = I->getOperand(op); 71 ValueToValueMapTy::iterator It = VMap.find(Op); 72 if (It != VMap.end()) 73 I->setOperand(op, It->second); 74 } 75 76 if (PHINode *PN = dyn_cast<PHINode>(I)) { 77 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 78 ValueToValueMapTy::iterator It = VMap.find(PN->getIncomingBlock(i)); 79 if (It != VMap.end()) 80 PN->setIncomingBlock(i, cast<BasicBlock>(It->second)); 81 } 82 } 83 } 84 85 /// Folds a basic block into its predecessor if it only has one predecessor, and 86 /// that predecessor only has one successor. 87 /// The LoopInfo Analysis that is passed will be kept consistent. If folding is 88 /// successful references to the containing loop must be removed from 89 /// ScalarEvolution by calling ScalarEvolution::forgetLoop because SE may have 90 /// references to the eliminated BB. The argument ForgottenLoops contains a set 91 /// of loops that have already been forgotten to prevent redundant, expensive 92 /// calls to ScalarEvolution::forgetLoop. Returns the new combined block. 93 static BasicBlock * 94 foldBlockIntoPredecessor(BasicBlock *BB, LoopInfo *LI, ScalarEvolution *SE, 95 SmallPtrSetImpl<Loop *> &ForgottenLoops, 96 DominatorTree *DT) { 97 // Merge basic blocks into their predecessor if there is only one distinct 98 // pred, and if there is only one distinct successor of the predecessor, and 99 // if there are no PHI nodes. 100 BasicBlock *OnlyPred = BB->getSinglePredecessor(); 101 if (!OnlyPred) return nullptr; 102 103 if (OnlyPred->getTerminator()->getNumSuccessors() != 1) 104 return nullptr; 105 106 DEBUG(dbgs() << "Merging: " << *BB << "into: " << *OnlyPred); 107 108 // Resolve any PHI nodes at the start of the block. They are all 109 // guaranteed to have exactly one entry if they exist, unless there are 110 // multiple duplicate (but guaranteed to be equal) entries for the 111 // incoming edges. This occurs when there are multiple edges from 112 // OnlyPred to OnlySucc. 113 FoldSingleEntryPHINodes(BB); 114 115 // Delete the unconditional branch from the predecessor... 116 OnlyPred->getInstList().pop_back(); 117 118 // Make all PHI nodes that referred to BB now refer to Pred as their 119 // source... 120 BB->replaceAllUsesWith(OnlyPred); 121 122 // Move all definitions in the successor to the predecessor... 123 OnlyPred->getInstList().splice(OnlyPred->end(), BB->getInstList()); 124 125 // OldName will be valid until erased. 126 StringRef OldName = BB->getName(); 127 128 // Erase the old block and update dominator info. 129 if (DT) 130 if (DomTreeNode *DTN = DT->getNode(BB)) { 131 DomTreeNode *PredDTN = DT->getNode(OnlyPred); 132 SmallVector<DomTreeNode *, 8> Children(DTN->begin(), DTN->end()); 133 for (auto *DI : Children) 134 DT->changeImmediateDominator(DI, PredDTN); 135 136 DT->eraseNode(BB); 137 } 138 139 // ScalarEvolution holds references to loop exit blocks. 140 if (SE) { 141 if (Loop *L = LI->getLoopFor(BB)) { 142 if (ForgottenLoops.insert(L).second) 143 SE->forgetLoop(L); 144 } 145 } 146 LI->removeBlock(BB); 147 148 // Inherit predecessor's name if it exists... 149 if (!OldName.empty() && !OnlyPred->hasName()) 150 OnlyPred->setName(OldName); 151 152 BB->eraseFromParent(); 153 154 return OnlyPred; 155 } 156 157 /// Check if unrolling created a situation where we need to insert phi nodes to 158 /// preserve LCSSA form. 159 /// \param Blocks is a vector of basic blocks representing unrolled loop. 160 /// \param L is the outer loop. 161 /// It's possible that some of the blocks are in L, and some are not. In this 162 /// case, if there is a use is outside L, and definition is inside L, we need to 163 /// insert a phi-node, otherwise LCSSA will be broken. 164 /// The function is just a helper function for llvm::UnrollLoop that returns 165 /// true if this situation occurs, indicating that LCSSA needs to be fixed. 166 static bool needToInsertPhisForLCSSA(Loop *L, std::vector<BasicBlock *> Blocks, 167 LoopInfo *LI) { 168 for (BasicBlock *BB : Blocks) { 169 if (LI->getLoopFor(BB) == L) 170 continue; 171 for (Instruction &I : *BB) { 172 for (Use &U : I.operands()) { 173 if (auto Def = dyn_cast<Instruction>(U)) { 174 Loop *DefLoop = LI->getLoopFor(Def->getParent()); 175 if (!DefLoop) 176 continue; 177 if (DefLoop->contains(L)) 178 return true; 179 } 180 } 181 } 182 } 183 return false; 184 } 185 186 /// Adds ClonedBB to LoopInfo, creates a new loop for ClonedBB if necessary 187 /// and adds a mapping from the original loop to the new loop to NewLoops. 188 /// Returns nullptr if no new loop was created and a pointer to the 189 /// original loop OriginalBB was part of otherwise. 190 const Loop* llvm::addClonedBlockToLoopInfo(BasicBlock *OriginalBB, 191 BasicBlock *ClonedBB, LoopInfo *LI, 192 NewLoopsMap &NewLoops) { 193 // Figure out which loop New is in. 194 const Loop *OldLoop = LI->getLoopFor(OriginalBB); 195 assert(OldLoop && "Should (at least) be in the loop being unrolled!"); 196 197 Loop *&NewLoop = NewLoops[OldLoop]; 198 if (!NewLoop) { 199 // Found a new sub-loop. 200 assert(OriginalBB == OldLoop->getHeader() && 201 "Header should be first in RPO"); 202 203 NewLoop = new Loop(); 204 Loop *NewLoopParent = NewLoops.lookup(OldLoop->getParentLoop()); 205 206 if (NewLoopParent) 207 NewLoopParent->addChildLoop(NewLoop); 208 else 209 LI->addTopLevelLoop(NewLoop); 210 211 NewLoop->addBasicBlockToLoop(ClonedBB, *LI); 212 return OldLoop; 213 } else { 214 NewLoop->addBasicBlockToLoop(ClonedBB, *LI); 215 return nullptr; 216 } 217 } 218 219 /// Unroll the given loop by Count. The loop must be in LCSSA form. Returns true 220 /// if unrolling was successful, or false if the loop was unmodified. Unrolling 221 /// can only fail when the loop's latch block is not terminated by a conditional 222 /// branch instruction. However, if the trip count (and multiple) are not known, 223 /// loop unrolling will mostly produce more code that is no faster. 224 /// 225 /// TripCount is the upper bound of the iteration on which control exits 226 /// LatchBlock. Control may exit the loop prior to TripCount iterations either 227 /// via an early branch in other loop block or via LatchBlock terminator. This 228 /// is relaxed from the general definition of trip count which is the number of 229 /// times the loop header executes. Note that UnrollLoop assumes that the loop 230 /// counter test is in LatchBlock in order to remove unnecesssary instances of 231 /// the test. If control can exit the loop from the LatchBlock's terminator 232 /// prior to TripCount iterations, flag PreserveCondBr needs to be set. 233 /// 234 /// PreserveCondBr indicates whether the conditional branch of the LatchBlock 235 /// needs to be preserved. It is needed when we use trip count upper bound to 236 /// fully unroll the loop. If PreserveOnlyFirst is also set then only the first 237 /// conditional branch needs to be preserved. 238 /// 239 /// Similarly, TripMultiple divides the number of times that the LatchBlock may 240 /// execute without exiting the loop. 241 /// 242 /// If AllowRuntime is true then UnrollLoop will consider unrolling loops that 243 /// have a runtime (i.e. not compile time constant) trip count. Unrolling these 244 /// loops require a unroll "prologue" that runs "RuntimeTripCount % Count" 245 /// iterations before branching into the unrolled loop. UnrollLoop will not 246 /// runtime-unroll the loop if computing RuntimeTripCount will be expensive and 247 /// AllowExpensiveTripCount is false. 248 /// 249 /// If we want to perform PGO-based loop peeling, PeelCount is set to the 250 /// number of iterations we want to peel off. 251 /// 252 /// The LoopInfo Analysis that is passed will be kept consistent. 253 /// 254 /// This utility preserves LoopInfo. It will also preserve ScalarEvolution and 255 /// DominatorTree if they are non-null. 256 bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, bool Force, 257 bool AllowRuntime, bool AllowExpensiveTripCount, 258 bool PreserveCondBr, bool PreserveOnlyFirst, 259 unsigned TripMultiple, unsigned PeelCount, LoopInfo *LI, 260 ScalarEvolution *SE, DominatorTree *DT, 261 AssumptionCache *AC, OptimizationRemarkEmitter *ORE, 262 bool PreserveLCSSA) { 263 264 BasicBlock *Preheader = L->getLoopPreheader(); 265 if (!Preheader) { 266 DEBUG(dbgs() << " Can't unroll; loop preheader-insertion failed.\n"); 267 return false; 268 } 269 270 BasicBlock *LatchBlock = L->getLoopLatch(); 271 if (!LatchBlock) { 272 DEBUG(dbgs() << " Can't unroll; loop exit-block-insertion failed.\n"); 273 return false; 274 } 275 276 // Loops with indirectbr cannot be cloned. 277 if (!L->isSafeToClone()) { 278 DEBUG(dbgs() << " Can't unroll; Loop body cannot be cloned.\n"); 279 return false; 280 } 281 282 BasicBlock *Header = L->getHeader(); 283 BranchInst *BI = dyn_cast<BranchInst>(LatchBlock->getTerminator()); 284 285 if (!BI || BI->isUnconditional()) { 286 // The loop-rotate pass can be helpful to avoid this in many cases. 287 DEBUG(dbgs() << 288 " Can't unroll; loop not terminated by a conditional branch.\n"); 289 return false; 290 } 291 292 if (Header->hasAddressTaken()) { 293 // The loop-rotate pass can be helpful to avoid this in many cases. 294 DEBUG(dbgs() << 295 " Won't unroll loop: address of header block is taken.\n"); 296 return false; 297 } 298 299 if (TripCount != 0) 300 DEBUG(dbgs() << " Trip Count = " << TripCount << "\n"); 301 if (TripMultiple != 1) 302 DEBUG(dbgs() << " Trip Multiple = " << TripMultiple << "\n"); 303 304 // Effectively "DCE" unrolled iterations that are beyond the tripcount 305 // and will never be executed. 306 if (TripCount != 0 && Count > TripCount) 307 Count = TripCount; 308 309 // Don't enter the unroll code if there is nothing to do. 310 if (TripCount == 0 && Count < 2 && PeelCount == 0) { 311 DEBUG(dbgs() << "Won't unroll; almost nothing to do\n"); 312 return false; 313 } 314 315 assert(Count > 0); 316 assert(TripMultiple > 0); 317 assert(TripCount == 0 || TripCount % TripMultiple == 0); 318 319 // Are we eliminating the loop control altogether? 320 bool CompletelyUnroll = Count == TripCount; 321 SmallVector<BasicBlock *, 4> ExitBlocks; 322 L->getExitBlocks(ExitBlocks); 323 std::vector<BasicBlock*> OriginalLoopBlocks = L->getBlocks(); 324 325 // Go through all exits of L and see if there are any phi-nodes there. We just 326 // conservatively assume that they're inserted to preserve LCSSA form, which 327 // means that complete unrolling might break this form. We need to either fix 328 // it in-place after the transformation, or entirely rebuild LCSSA. TODO: For 329 // now we just recompute LCSSA for the outer loop, but it should be possible 330 // to fix it in-place. 331 bool NeedToFixLCSSA = PreserveLCSSA && CompletelyUnroll && 332 any_of(ExitBlocks, [](const BasicBlock *BB) { 333 return isa<PHINode>(BB->begin()); 334 }); 335 336 // We assume a run-time trip count if the compiler cannot 337 // figure out the loop trip count and the unroll-runtime 338 // flag is specified. 339 bool RuntimeTripCount = (TripCount == 0 && Count > 0 && AllowRuntime); 340 341 assert((!RuntimeTripCount || !PeelCount) && 342 "Did not expect runtime trip-count unrolling " 343 "and peeling for the same loop"); 344 345 if (PeelCount) 346 peelLoop(L, PeelCount, LI, SE, DT, AC, PreserveLCSSA); 347 348 // Loops containing convergent instructions must have a count that divides 349 // their TripMultiple. 350 DEBUG( 351 { 352 bool HasConvergent = false; 353 for (auto &BB : L->blocks()) 354 for (auto &I : *BB) 355 if (auto CS = CallSite(&I)) 356 HasConvergent |= CS.isConvergent(); 357 assert((!HasConvergent || TripMultiple % Count == 0) && 358 "Unroll count must divide trip multiple if loop contains a " 359 "convergent operation."); 360 }); 361 362 if (RuntimeTripCount && TripMultiple % Count != 0 && 363 !UnrollRuntimeLoopRemainder(L, Count, AllowExpensiveTripCount, 364 UnrollRuntimeEpilog, LI, SE, DT, 365 PreserveLCSSA)) { 366 if (Force) 367 RuntimeTripCount = false; 368 else { 369 DEBUG( 370 dbgs() << "Wont unroll; remainder loop could not be generated" 371 "when assuming runtime trip count\n"); 372 return false; 373 } 374 } 375 376 // Notify ScalarEvolution that the loop will be substantially changed, 377 // if not outright eliminated. 378 if (SE) 379 SE->forgetLoop(L); 380 381 // If we know the trip count, we know the multiple... 382 unsigned BreakoutTrip = 0; 383 if (TripCount != 0) { 384 BreakoutTrip = TripCount % Count; 385 TripMultiple = 0; 386 } else { 387 // Figure out what multiple to use. 388 BreakoutTrip = TripMultiple = 389 (unsigned)GreatestCommonDivisor64(Count, TripMultiple); 390 } 391 392 using namespace ore; 393 // Report the unrolling decision. 394 if (CompletelyUnroll) { 395 DEBUG(dbgs() << "COMPLETELY UNROLLING loop %" << Header->getName() 396 << " with trip count " << TripCount << "!\n"); 397 ORE->emit(OptimizationRemark(DEBUG_TYPE, "FullyUnrolled", L->getStartLoc(), 398 L->getHeader()) 399 << "completely unrolled loop with " 400 << NV("UnrollCount", TripCount) << " iterations"); 401 } else if (PeelCount) { 402 DEBUG(dbgs() << "PEELING loop %" << Header->getName() 403 << " with iteration count " << PeelCount << "!\n"); 404 ORE->emit(OptimizationRemark(DEBUG_TYPE, "Peeled", L->getStartLoc(), 405 L->getHeader()) 406 << " peeled loop by " << NV("PeelCount", PeelCount) 407 << " iterations"); 408 } else { 409 OptimizationRemark Diag(DEBUG_TYPE, "PartialUnrolled", L->getStartLoc(), 410 L->getHeader()); 411 Diag << "unrolled loop by a factor of " << NV("UnrollCount", Count); 412 413 DEBUG(dbgs() << "UNROLLING loop %" << Header->getName() 414 << " by " << Count); 415 if (TripMultiple == 0 || BreakoutTrip != TripMultiple) { 416 DEBUG(dbgs() << " with a breakout at trip " << BreakoutTrip); 417 ORE->emit(Diag << " with a breakout at trip " 418 << NV("BreakoutTrip", BreakoutTrip)); 419 } else if (TripMultiple != 1) { 420 DEBUG(dbgs() << " with " << TripMultiple << " trips per branch"); 421 ORE->emit(Diag << " with " << NV("TripMultiple", TripMultiple) 422 << " trips per branch"); 423 } else if (RuntimeTripCount) { 424 DEBUG(dbgs() << " with run-time trip count"); 425 ORE->emit(Diag << " with run-time trip count"); 426 } 427 DEBUG(dbgs() << "!\n"); 428 } 429 430 bool ContinueOnTrue = L->contains(BI->getSuccessor(0)); 431 BasicBlock *LoopExit = BI->getSuccessor(ContinueOnTrue); 432 433 // For the first iteration of the loop, we should use the precloned values for 434 // PHI nodes. Insert associations now. 435 ValueToValueMapTy LastValueMap; 436 std::vector<PHINode*> OrigPHINode; 437 for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) { 438 OrigPHINode.push_back(cast<PHINode>(I)); 439 } 440 441 std::vector<BasicBlock*> Headers; 442 std::vector<BasicBlock*> Latches; 443 Headers.push_back(Header); 444 Latches.push_back(LatchBlock); 445 446 // The current on-the-fly SSA update requires blocks to be processed in 447 // reverse postorder so that LastValueMap contains the correct value at each 448 // exit. 449 LoopBlocksDFS DFS(L); 450 DFS.perform(LI); 451 452 // Stash the DFS iterators before adding blocks to the loop. 453 LoopBlocksDFS::RPOIterator BlockBegin = DFS.beginRPO(); 454 LoopBlocksDFS::RPOIterator BlockEnd = DFS.endRPO(); 455 456 std::vector<BasicBlock*> UnrolledLoopBlocks = L->getBlocks(); 457 458 // Loop Unrolling might create new loops. While we do preserve LoopInfo, we 459 // might break loop-simplified form for these loops (as they, e.g., would 460 // share the same exit blocks). We'll keep track of loops for which we can 461 // break this so that later we can re-simplify them. 462 SmallSetVector<Loop *, 4> LoopsToSimplify; 463 for (Loop *SubLoop : *L) 464 LoopsToSimplify.insert(SubLoop); 465 466 if (Header->getParent()->isDebugInfoForProfiling()) 467 for (BasicBlock *BB : L->getBlocks()) 468 for (Instruction &I : *BB) 469 if (const DILocation *DIL = I.getDebugLoc()) 470 I.setDebugLoc(DIL->cloneWithDuplicationFactor(Count)); 471 472 for (unsigned It = 1; It != Count; ++It) { 473 std::vector<BasicBlock*> NewBlocks; 474 SmallDenseMap<const Loop *, Loop *, 4> NewLoops; 475 NewLoops[L] = L; 476 477 for (LoopBlocksDFS::RPOIterator BB = BlockBegin; BB != BlockEnd; ++BB) { 478 ValueToValueMapTy VMap; 479 BasicBlock *New = CloneBasicBlock(*BB, VMap, "." + Twine(It)); 480 Header->getParent()->getBasicBlockList().push_back(New); 481 482 assert((*BB != Header || LI->getLoopFor(*BB) == L) && 483 "Header should not be in a sub-loop"); 484 // Tell LI about New. 485 const Loop *OldLoop = addClonedBlockToLoopInfo(*BB, New, LI, NewLoops); 486 if (OldLoop) { 487 LoopsToSimplify.insert(NewLoops[OldLoop]); 488 489 // Forget the old loop, since its inputs may have changed. 490 if (SE) 491 SE->forgetLoop(OldLoop); 492 } 493 494 if (*BB == Header) 495 // Loop over all of the PHI nodes in the block, changing them to use 496 // the incoming values from the previous block. 497 for (PHINode *OrigPHI : OrigPHINode) { 498 PHINode *NewPHI = cast<PHINode>(VMap[OrigPHI]); 499 Value *InVal = NewPHI->getIncomingValueForBlock(LatchBlock); 500 if (Instruction *InValI = dyn_cast<Instruction>(InVal)) 501 if (It > 1 && L->contains(InValI)) 502 InVal = LastValueMap[InValI]; 503 VMap[OrigPHI] = InVal; 504 New->getInstList().erase(NewPHI); 505 } 506 507 // Update our running map of newest clones 508 LastValueMap[*BB] = New; 509 for (ValueToValueMapTy::iterator VI = VMap.begin(), VE = VMap.end(); 510 VI != VE; ++VI) 511 LastValueMap[VI->first] = VI->second; 512 513 // Add phi entries for newly created values to all exit blocks. 514 for (BasicBlock *Succ : successors(*BB)) { 515 if (L->contains(Succ)) 516 continue; 517 for (BasicBlock::iterator BBI = Succ->begin(); 518 PHINode *phi = dyn_cast<PHINode>(BBI); ++BBI) { 519 Value *Incoming = phi->getIncomingValueForBlock(*BB); 520 ValueToValueMapTy::iterator It = LastValueMap.find(Incoming); 521 if (It != LastValueMap.end()) 522 Incoming = It->second; 523 phi->addIncoming(Incoming, New); 524 } 525 } 526 // Keep track of new headers and latches as we create them, so that 527 // we can insert the proper branches later. 528 if (*BB == Header) 529 Headers.push_back(New); 530 if (*BB == LatchBlock) 531 Latches.push_back(New); 532 533 NewBlocks.push_back(New); 534 UnrolledLoopBlocks.push_back(New); 535 536 // Update DomTree: since we just copy the loop body, and each copy has a 537 // dedicated entry block (copy of the header block), this header's copy 538 // dominates all copied blocks. That means, dominance relations in the 539 // copied body are the same as in the original body. 540 if (DT) { 541 if (*BB == Header) 542 DT->addNewBlock(New, Latches[It - 1]); 543 else { 544 auto BBDomNode = DT->getNode(*BB); 545 auto BBIDom = BBDomNode->getIDom(); 546 BasicBlock *OriginalBBIDom = BBIDom->getBlock(); 547 DT->addNewBlock( 548 New, cast<BasicBlock>(LastValueMap[cast<Value>(OriginalBBIDom)])); 549 } 550 } 551 } 552 553 // Remap all instructions in the most recent iteration 554 for (BasicBlock *NewBlock : NewBlocks) { 555 for (Instruction &I : *NewBlock) { 556 ::remapInstruction(&I, LastValueMap); 557 if (auto *II = dyn_cast<IntrinsicInst>(&I)) 558 if (II->getIntrinsicID() == Intrinsic::assume) 559 AC->registerAssumption(II); 560 } 561 } 562 } 563 564 // Loop over the PHI nodes in the original block, setting incoming values. 565 for (PHINode *PN : OrigPHINode) { 566 if (CompletelyUnroll) { 567 PN->replaceAllUsesWith(PN->getIncomingValueForBlock(Preheader)); 568 Header->getInstList().erase(PN); 569 } 570 else if (Count > 1) { 571 Value *InVal = PN->removeIncomingValue(LatchBlock, false); 572 // If this value was defined in the loop, take the value defined by the 573 // last iteration of the loop. 574 if (Instruction *InValI = dyn_cast<Instruction>(InVal)) { 575 if (L->contains(InValI)) 576 InVal = LastValueMap[InVal]; 577 } 578 assert(Latches.back() == LastValueMap[LatchBlock] && "bad last latch"); 579 PN->addIncoming(InVal, Latches.back()); 580 } 581 } 582 583 // Now that all the basic blocks for the unrolled iterations are in place, 584 // set up the branches to connect them. 585 for (unsigned i = 0, e = Latches.size(); i != e; ++i) { 586 // The original branch was replicated in each unrolled iteration. 587 BranchInst *Term = cast<BranchInst>(Latches[i]->getTerminator()); 588 589 // The branch destination. 590 unsigned j = (i + 1) % e; 591 BasicBlock *Dest = Headers[j]; 592 bool NeedConditional = true; 593 594 if (RuntimeTripCount && j != 0) { 595 NeedConditional = false; 596 } 597 598 // For a complete unroll, make the last iteration end with a branch 599 // to the exit block. 600 if (CompletelyUnroll) { 601 if (j == 0) 602 Dest = LoopExit; 603 // If using trip count upper bound to completely unroll, we need to keep 604 // the conditional branch except the last one because the loop may exit 605 // after any iteration. 606 assert(NeedConditional && 607 "NeedCondition cannot be modified by both complete " 608 "unrolling and runtime unrolling"); 609 NeedConditional = (PreserveCondBr && j && !(PreserveOnlyFirst && i != 0)); 610 } else if (j != BreakoutTrip && (TripMultiple == 0 || j % TripMultiple != 0)) { 611 // If we know the trip count or a multiple of it, we can safely use an 612 // unconditional branch for some iterations. 613 NeedConditional = false; 614 } 615 616 if (NeedConditional) { 617 // Update the conditional branch's successor for the following 618 // iteration. 619 Term->setSuccessor(!ContinueOnTrue, Dest); 620 } else { 621 // Remove phi operands at this loop exit 622 if (Dest != LoopExit) { 623 BasicBlock *BB = Latches[i]; 624 for (BasicBlock *Succ: successors(BB)) { 625 if (Succ == Headers[i]) 626 continue; 627 for (BasicBlock::iterator BBI = Succ->begin(); 628 PHINode *Phi = dyn_cast<PHINode>(BBI); ++BBI) { 629 Phi->removeIncomingValue(BB, false); 630 } 631 } 632 } 633 // Replace the conditional branch with an unconditional one. 634 BranchInst::Create(Dest, Term); 635 Term->eraseFromParent(); 636 } 637 } 638 639 // Update dominators of blocks we might reach through exits. 640 // Immediate dominator of such block might change, because we add more 641 // routes which can lead to the exit: we can now reach it from the copied 642 // iterations too. 643 if (DT && Count > 1) { 644 for (auto *BB : OriginalLoopBlocks) { 645 auto *BBDomNode = DT->getNode(BB); 646 SmallVector<BasicBlock *, 16> ChildrenToUpdate; 647 for (auto *ChildDomNode : BBDomNode->getChildren()) { 648 auto *ChildBB = ChildDomNode->getBlock(); 649 if (!L->contains(ChildBB)) 650 ChildrenToUpdate.push_back(ChildBB); 651 } 652 BasicBlock *NewIDom; 653 if (BB == LatchBlock) { 654 // The latch is special because we emit unconditional branches in 655 // some cases where the original loop contained a conditional branch. 656 // Since the latch is always at the bottom of the loop, if the latch 657 // dominated an exit before unrolling, the new dominator of that exit 658 // must also be a latch. Specifically, the dominator is the first 659 // latch which ends in a conditional branch, or the last latch if 660 // there is no such latch. 661 NewIDom = Latches.back(); 662 for (BasicBlock *IterLatch : Latches) { 663 TerminatorInst *Term = IterLatch->getTerminator(); 664 if (isa<BranchInst>(Term) && cast<BranchInst>(Term)->isConditional()) { 665 NewIDom = IterLatch; 666 break; 667 } 668 } 669 } else { 670 // The new idom of the block will be the nearest common dominator 671 // of all copies of the previous idom. This is equivalent to the 672 // nearest common dominator of the previous idom and the first latch, 673 // which dominates all copies of the previous idom. 674 NewIDom = DT->findNearestCommonDominator(BB, LatchBlock); 675 } 676 for (auto *ChildBB : ChildrenToUpdate) 677 DT->changeImmediateDominator(ChildBB, NewIDom); 678 } 679 } 680 681 if (DT && UnrollVerifyDomtree) 682 DT->verifyDomTree(); 683 684 // Merge adjacent basic blocks, if possible. 685 SmallPtrSet<Loop *, 4> ForgottenLoops; 686 for (BasicBlock *Latch : Latches) { 687 BranchInst *Term = cast<BranchInst>(Latch->getTerminator()); 688 if (Term->isUnconditional()) { 689 BasicBlock *Dest = Term->getSuccessor(0); 690 if (BasicBlock *Fold = 691 foldBlockIntoPredecessor(Dest, LI, SE, ForgottenLoops, DT)) { 692 // Dest has been folded into Fold. Update our worklists accordingly. 693 std::replace(Latches.begin(), Latches.end(), Dest, Fold); 694 UnrolledLoopBlocks.erase(std::remove(UnrolledLoopBlocks.begin(), 695 UnrolledLoopBlocks.end(), Dest), 696 UnrolledLoopBlocks.end()); 697 } 698 } 699 } 700 701 // Simplify any new induction variables in the partially unrolled loop. 702 if (SE && !CompletelyUnroll && Count > 1) { 703 SmallVector<WeakVH, 16> DeadInsts; 704 simplifyLoopIVs(L, SE, DT, LI, DeadInsts); 705 706 // Aggressively clean up dead instructions that simplifyLoopIVs already 707 // identified. Any remaining should be cleaned up below. 708 while (!DeadInsts.empty()) 709 if (Instruction *Inst = 710 dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val())) 711 RecursivelyDeleteTriviallyDeadInstructions(Inst); 712 } 713 714 // At this point, the code is well formed. We now do a quick sweep over the 715 // inserted code, doing constant propagation and dead code elimination as we 716 // go. 717 const DataLayout &DL = Header->getModule()->getDataLayout(); 718 const std::vector<BasicBlock*> &NewLoopBlocks = L->getBlocks(); 719 for (BasicBlock *BB : NewLoopBlocks) { 720 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) { 721 Instruction *Inst = &*I++; 722 723 if (Value *V = SimplifyInstruction(Inst, DL)) 724 if (LI->replacementPreservesLCSSAForm(Inst, V)) 725 Inst->replaceAllUsesWith(V); 726 if (isInstructionTriviallyDead(Inst)) 727 BB->getInstList().erase(Inst); 728 } 729 } 730 731 // TODO: after peeling or unrolling, previously loop variant conditions are 732 // likely to fold to constants, eagerly propagating those here will require 733 // fewer cleanup passes to be run. Alternatively, a LoopEarlyCSE might be 734 // appropriate. 735 736 NumCompletelyUnrolled += CompletelyUnroll; 737 ++NumUnrolled; 738 739 Loop *OuterL = L->getParentLoop(); 740 // Update LoopInfo if the loop is completely removed. 741 if (CompletelyUnroll) 742 LI->markAsRemoved(L); 743 744 // After complete unrolling most of the blocks should be contained in OuterL. 745 // However, some of them might happen to be out of OuterL (e.g. if they 746 // precede a loop exit). In this case we might need to insert PHI nodes in 747 // order to preserve LCSSA form. 748 // We don't need to check this if we already know that we need to fix LCSSA 749 // form. 750 // TODO: For now we just recompute LCSSA for the outer loop in this case, but 751 // it should be possible to fix it in-place. 752 if (PreserveLCSSA && OuterL && CompletelyUnroll && !NeedToFixLCSSA) 753 NeedToFixLCSSA |= ::needToInsertPhisForLCSSA(OuterL, UnrolledLoopBlocks, LI); 754 755 // If we have a pass and a DominatorTree we should re-simplify impacted loops 756 // to ensure subsequent analyses can rely on this form. We want to simplify 757 // at least one layer outside of the loop that was unrolled so that any 758 // changes to the parent loop exposed by the unrolling are considered. 759 if (DT) { 760 if (OuterL) { 761 // OuterL includes all loops for which we can break loop-simplify, so 762 // it's sufficient to simplify only it (it'll recursively simplify inner 763 // loops too). 764 if (NeedToFixLCSSA) { 765 // LCSSA must be performed on the outermost affected loop. The unrolled 766 // loop's last loop latch is guaranteed to be in the outermost loop 767 // after LoopInfo's been updated by markAsRemoved. 768 Loop *LatchLoop = LI->getLoopFor(Latches.back()); 769 Loop *FixLCSSALoop = OuterL; 770 if (!FixLCSSALoop->contains(LatchLoop)) 771 while (FixLCSSALoop->getParentLoop() != LatchLoop) 772 FixLCSSALoop = FixLCSSALoop->getParentLoop(); 773 774 formLCSSARecursively(*FixLCSSALoop, *DT, LI, SE); 775 } else if (PreserveLCSSA) { 776 assert(OuterL->isLCSSAForm(*DT) && 777 "Loops should be in LCSSA form after loop-unroll."); 778 } 779 780 // TODO: That potentially might be compile-time expensive. We should try 781 // to fix the loop-simplified form incrementally. 782 simplifyLoop(OuterL, DT, LI, SE, AC, PreserveLCSSA); 783 } else { 784 // Simplify loops for which we might've broken loop-simplify form. 785 for (Loop *SubLoop : LoopsToSimplify) 786 simplifyLoop(SubLoop, DT, LI, SE, AC, PreserveLCSSA); 787 } 788 } 789 790 return true; 791 } 792 793 /// Given an llvm.loop loop id metadata node, returns the loop hint metadata 794 /// node with the given name (for example, "llvm.loop.unroll.count"). If no 795 /// such metadata node exists, then nullptr is returned. 796 MDNode *llvm::GetUnrollMetadata(MDNode *LoopID, StringRef Name) { 797 // First operand should refer to the loop id itself. 798 assert(LoopID->getNumOperands() > 0 && "requires at least one operand"); 799 assert(LoopID->getOperand(0) == LoopID && "invalid loop id"); 800 801 for (unsigned i = 1, e = LoopID->getNumOperands(); i < e; ++i) { 802 MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 803 if (!MD) 804 continue; 805 806 MDString *S = dyn_cast<MDString>(MD->getOperand(0)); 807 if (!S) 808 continue; 809 810 if (Name.equals(S->getString())) 811 return MD; 812 } 813 return nullptr; 814 } 815