1 //===-- UnrollLoop.cpp - Loop unrolling utilities -------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements some loop unrolling utilities. It does not define any 11 // actual pass or policy, but provides a single function to perform loop 12 // unrolling. 13 // 14 // The process of unrolling can produce extraneous basic blocks linked with 15 // unconditional branches. This will be corrected in the future. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/ADT/SmallPtrSet.h" 20 #include "llvm/ADT/Statistic.h" 21 #include "llvm/Analysis/AssumptionCache.h" 22 #include "llvm/Analysis/InstructionSimplify.h" 23 #include "llvm/Analysis/LoopIterator.h" 24 #include "llvm/Analysis/LoopPass.h" 25 #include "llvm/Analysis/OptimizationRemarkEmitter.h" 26 #include "llvm/Analysis/ScalarEvolution.h" 27 #include "llvm/IR/BasicBlock.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/DebugInfoMetadata.h" 30 #include "llvm/IR/Dominators.h" 31 #include "llvm/IR/IntrinsicInst.h" 32 #include "llvm/IR/LLVMContext.h" 33 #include "llvm/Support/Debug.h" 34 #include "llvm/Support/raw_ostream.h" 35 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 36 #include "llvm/Transforms/Utils/Cloning.h" 37 #include "llvm/Transforms/Utils/Local.h" 38 #include "llvm/Transforms/Utils/LoopSimplify.h" 39 #include "llvm/Transforms/Utils/LoopUtils.h" 40 #include "llvm/Transforms/Utils/SimplifyIndVar.h" 41 #include "llvm/Transforms/Utils/UnrollLoop.h" 42 using namespace llvm; 43 44 #define DEBUG_TYPE "loop-unroll" 45 46 // TODO: Should these be here or in LoopUnroll? 47 STATISTIC(NumCompletelyUnrolled, "Number of loops completely unrolled"); 48 STATISTIC(NumUnrolled, "Number of loops unrolled (completely or otherwise)"); 49 50 static cl::opt<bool> 51 UnrollRuntimeEpilog("unroll-runtime-epilog", cl::init(false), cl::Hidden, 52 cl::desc("Allow runtime unrolled loops to be unrolled " 53 "with epilog instead of prolog.")); 54 55 static cl::opt<bool> 56 UnrollVerifyDomtree("unroll-verify-domtree", cl::Hidden, 57 cl::desc("Verify domtree after unrolling"), 58 #ifdef NDEBUG 59 cl::init(false) 60 #else 61 cl::init(true) 62 #endif 63 ); 64 65 /// Convert the instruction operands from referencing the current values into 66 /// those specified by VMap. 67 static inline void remapInstruction(Instruction *I, 68 ValueToValueMapTy &VMap) { 69 for (unsigned op = 0, E = I->getNumOperands(); op != E; ++op) { 70 Value *Op = I->getOperand(op); 71 ValueToValueMapTy::iterator It = VMap.find(Op); 72 if (It != VMap.end()) 73 I->setOperand(op, It->second); 74 } 75 76 if (PHINode *PN = dyn_cast<PHINode>(I)) { 77 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 78 ValueToValueMapTy::iterator It = VMap.find(PN->getIncomingBlock(i)); 79 if (It != VMap.end()) 80 PN->setIncomingBlock(i, cast<BasicBlock>(It->second)); 81 } 82 } 83 } 84 85 /// Folds a basic block into its predecessor if it only has one predecessor, and 86 /// that predecessor only has one successor. 87 /// The LoopInfo Analysis that is passed will be kept consistent. If folding is 88 /// successful references to the containing loop must be removed from 89 /// ScalarEvolution by calling ScalarEvolution::forgetLoop because SE may have 90 /// references to the eliminated BB. The argument ForgottenLoops contains a set 91 /// of loops that have already been forgotten to prevent redundant, expensive 92 /// calls to ScalarEvolution::forgetLoop. Returns the new combined block. 93 static BasicBlock * 94 foldBlockIntoPredecessor(BasicBlock *BB, LoopInfo *LI, ScalarEvolution *SE, 95 SmallPtrSetImpl<Loop *> &ForgottenLoops, 96 DominatorTree *DT) { 97 // Merge basic blocks into their predecessor if there is only one distinct 98 // pred, and if there is only one distinct successor of the predecessor, and 99 // if there are no PHI nodes. 100 BasicBlock *OnlyPred = BB->getSinglePredecessor(); 101 if (!OnlyPred) return nullptr; 102 103 if (OnlyPred->getTerminator()->getNumSuccessors() != 1) 104 return nullptr; 105 106 DEBUG(dbgs() << "Merging: " << *BB << "into: " << *OnlyPred); 107 108 // Resolve any PHI nodes at the start of the block. They are all 109 // guaranteed to have exactly one entry if they exist, unless there are 110 // multiple duplicate (but guaranteed to be equal) entries for the 111 // incoming edges. This occurs when there are multiple edges from 112 // OnlyPred to OnlySucc. 113 FoldSingleEntryPHINodes(BB); 114 115 // Delete the unconditional branch from the predecessor... 116 OnlyPred->getInstList().pop_back(); 117 118 // Make all PHI nodes that referred to BB now refer to Pred as their 119 // source... 120 BB->replaceAllUsesWith(OnlyPred); 121 122 // Move all definitions in the successor to the predecessor... 123 OnlyPred->getInstList().splice(OnlyPred->end(), BB->getInstList()); 124 125 // OldName will be valid until erased. 126 StringRef OldName = BB->getName(); 127 128 // Erase the old block and update dominator info. 129 if (DT) 130 if (DomTreeNode *DTN = DT->getNode(BB)) { 131 DomTreeNode *PredDTN = DT->getNode(OnlyPred); 132 SmallVector<DomTreeNode *, 8> Children(DTN->begin(), DTN->end()); 133 for (auto *DI : Children) 134 DT->changeImmediateDominator(DI, PredDTN); 135 136 DT->eraseNode(BB); 137 } 138 139 // ScalarEvolution holds references to loop exit blocks. 140 if (SE) { 141 if (Loop *L = LI->getLoopFor(BB)) { 142 if (ForgottenLoops.insert(L).second) 143 SE->forgetLoop(L); 144 } 145 } 146 LI->removeBlock(BB); 147 148 // Inherit predecessor's name if it exists... 149 if (!OldName.empty() && !OnlyPred->hasName()) 150 OnlyPred->setName(OldName); 151 152 BB->eraseFromParent(); 153 154 return OnlyPred; 155 } 156 157 /// Check if unrolling created a situation where we need to insert phi nodes to 158 /// preserve LCSSA form. 159 /// \param Blocks is a vector of basic blocks representing unrolled loop. 160 /// \param L is the outer loop. 161 /// It's possible that some of the blocks are in L, and some are not. In this 162 /// case, if there is a use is outside L, and definition is inside L, we need to 163 /// insert a phi-node, otherwise LCSSA will be broken. 164 /// The function is just a helper function for llvm::UnrollLoop that returns 165 /// true if this situation occurs, indicating that LCSSA needs to be fixed. 166 static bool needToInsertPhisForLCSSA(Loop *L, std::vector<BasicBlock *> Blocks, 167 LoopInfo *LI) { 168 for (BasicBlock *BB : Blocks) { 169 if (LI->getLoopFor(BB) == L) 170 continue; 171 for (Instruction &I : *BB) { 172 for (Use &U : I.operands()) { 173 if (auto Def = dyn_cast<Instruction>(U)) { 174 Loop *DefLoop = LI->getLoopFor(Def->getParent()); 175 if (!DefLoop) 176 continue; 177 if (DefLoop->contains(L)) 178 return true; 179 } 180 } 181 } 182 } 183 return false; 184 } 185 186 /// Adds ClonedBB to LoopInfo, creates a new loop for ClonedBB if necessary 187 /// and adds a mapping from the original loop to the new loop to NewLoops. 188 /// Returns nullptr if no new loop was created and a pointer to the 189 /// original loop OriginalBB was part of otherwise. 190 const Loop* llvm::addClonedBlockToLoopInfo(BasicBlock *OriginalBB, 191 BasicBlock *ClonedBB, LoopInfo *LI, 192 NewLoopsMap &NewLoops) { 193 // Figure out which loop New is in. 194 const Loop *OldLoop = LI->getLoopFor(OriginalBB); 195 assert(OldLoop && "Should (at least) be in the loop being unrolled!"); 196 197 Loop *&NewLoop = NewLoops[OldLoop]; 198 if (!NewLoop) { 199 // Found a new sub-loop. 200 assert(OriginalBB == OldLoop->getHeader() && 201 "Header should be first in RPO"); 202 203 NewLoop = LI->AllocateLoop(); 204 Loop *NewLoopParent = NewLoops.lookup(OldLoop->getParentLoop()); 205 206 if (NewLoopParent) 207 NewLoopParent->addChildLoop(NewLoop); 208 else 209 LI->addTopLevelLoop(NewLoop); 210 211 NewLoop->addBasicBlockToLoop(ClonedBB, *LI); 212 return OldLoop; 213 } else { 214 NewLoop->addBasicBlockToLoop(ClonedBB, *LI); 215 return nullptr; 216 } 217 } 218 219 /// The function chooses which type of unroll (epilog or prolog) is more 220 /// profitabale. 221 /// Epilog unroll is more profitable when there is PHI that starts from 222 /// constant. In this case epilog will leave PHI start from constant, 223 /// but prolog will convert it to non-constant. 224 /// 225 /// loop: 226 /// PN = PHI [I, Latch], [CI, PreHeader] 227 /// I = foo(PN) 228 /// ... 229 /// 230 /// Epilog unroll case. 231 /// loop: 232 /// PN = PHI [I2, Latch], [CI, PreHeader] 233 /// I1 = foo(PN) 234 /// I2 = foo(I1) 235 /// ... 236 /// Prolog unroll case. 237 /// NewPN = PHI [PrologI, Prolog], [CI, PreHeader] 238 /// loop: 239 /// PN = PHI [I2, Latch], [NewPN, PreHeader] 240 /// I1 = foo(PN) 241 /// I2 = foo(I1) 242 /// ... 243 /// 244 static bool isEpilogProfitable(Loop *L) { 245 BasicBlock *PreHeader = L->getLoopPreheader(); 246 BasicBlock *Header = L->getHeader(); 247 assert(PreHeader && Header); 248 for (Instruction &BBI : *Header) { 249 PHINode *PN = dyn_cast<PHINode>(&BBI); 250 if (!PN) 251 break; 252 if (isa<ConstantInt>(PN->getIncomingValueForBlock(PreHeader))) 253 return true; 254 } 255 return false; 256 } 257 258 /// Unroll the given loop by Count. The loop must be in LCSSA form. Unrolling 259 /// can only fail when the loop's latch block is not terminated by a conditional 260 /// branch instruction. However, if the trip count (and multiple) are not known, 261 /// loop unrolling will mostly produce more code that is no faster. 262 /// 263 /// TripCount is the upper bound of the iteration on which control exits 264 /// LatchBlock. Control may exit the loop prior to TripCount iterations either 265 /// via an early branch in other loop block or via LatchBlock terminator. This 266 /// is relaxed from the general definition of trip count which is the number of 267 /// times the loop header executes. Note that UnrollLoop assumes that the loop 268 /// counter test is in LatchBlock in order to remove unnecesssary instances of 269 /// the test. If control can exit the loop from the LatchBlock's terminator 270 /// prior to TripCount iterations, flag PreserveCondBr needs to be set. 271 /// 272 /// PreserveCondBr indicates whether the conditional branch of the LatchBlock 273 /// needs to be preserved. It is needed when we use trip count upper bound to 274 /// fully unroll the loop. If PreserveOnlyFirst is also set then only the first 275 /// conditional branch needs to be preserved. 276 /// 277 /// Similarly, TripMultiple divides the number of times that the LatchBlock may 278 /// execute without exiting the loop. 279 /// 280 /// If AllowRuntime is true then UnrollLoop will consider unrolling loops that 281 /// have a runtime (i.e. not compile time constant) trip count. Unrolling these 282 /// loops require a unroll "prologue" that runs "RuntimeTripCount % Count" 283 /// iterations before branching into the unrolled loop. UnrollLoop will not 284 /// runtime-unroll the loop if computing RuntimeTripCount will be expensive and 285 /// AllowExpensiveTripCount is false. 286 /// 287 /// If we want to perform PGO-based loop peeling, PeelCount is set to the 288 /// number of iterations we want to peel off. 289 /// 290 /// The LoopInfo Analysis that is passed will be kept consistent. 291 /// 292 /// This utility preserves LoopInfo. It will also preserve ScalarEvolution and 293 /// DominatorTree if they are non-null. 294 LoopUnrollResult llvm::UnrollLoop( 295 Loop *L, unsigned Count, unsigned TripCount, bool Force, bool AllowRuntime, 296 bool AllowExpensiveTripCount, bool PreserveCondBr, bool PreserveOnlyFirst, 297 unsigned TripMultiple, unsigned PeelCount, bool UnrollRemainder, 298 LoopInfo *LI, ScalarEvolution *SE, DominatorTree *DT, AssumptionCache *AC, 299 OptimizationRemarkEmitter *ORE, bool PreserveLCSSA) { 300 301 BasicBlock *Preheader = L->getLoopPreheader(); 302 if (!Preheader) { 303 DEBUG(dbgs() << " Can't unroll; loop preheader-insertion failed.\n"); 304 return LoopUnrollResult::Unmodified; 305 } 306 307 BasicBlock *LatchBlock = L->getLoopLatch(); 308 if (!LatchBlock) { 309 DEBUG(dbgs() << " Can't unroll; loop exit-block-insertion failed.\n"); 310 return LoopUnrollResult::Unmodified; 311 } 312 313 // Loops with indirectbr cannot be cloned. 314 if (!L->isSafeToClone()) { 315 DEBUG(dbgs() << " Can't unroll; Loop body cannot be cloned.\n"); 316 return LoopUnrollResult::Unmodified; 317 } 318 319 // The current loop unroll pass can only unroll loops with a single latch 320 // that's a conditional branch exiting the loop. 321 // FIXME: The implementation can be extended to work with more complicated 322 // cases, e.g. loops with multiple latches. 323 BasicBlock *Header = L->getHeader(); 324 BranchInst *BI = dyn_cast<BranchInst>(LatchBlock->getTerminator()); 325 326 if (!BI || BI->isUnconditional()) { 327 // The loop-rotate pass can be helpful to avoid this in many cases. 328 DEBUG(dbgs() << 329 " Can't unroll; loop not terminated by a conditional branch.\n"); 330 return LoopUnrollResult::Unmodified; 331 } 332 333 auto CheckSuccessors = [&](unsigned S1, unsigned S2) { 334 return BI->getSuccessor(S1) == Header && !L->contains(BI->getSuccessor(S2)); 335 }; 336 337 if (!CheckSuccessors(0, 1) && !CheckSuccessors(1, 0)) { 338 DEBUG(dbgs() << "Can't unroll; only loops with one conditional latch" 339 " exiting the loop can be unrolled\n"); 340 return LoopUnrollResult::Unmodified; 341 } 342 343 if (Header->hasAddressTaken()) { 344 // The loop-rotate pass can be helpful to avoid this in many cases. 345 DEBUG(dbgs() << 346 " Won't unroll loop: address of header block is taken.\n"); 347 return LoopUnrollResult::Unmodified; 348 } 349 350 if (TripCount != 0) 351 DEBUG(dbgs() << " Trip Count = " << TripCount << "\n"); 352 if (TripMultiple != 1) 353 DEBUG(dbgs() << " Trip Multiple = " << TripMultiple << "\n"); 354 355 // Effectively "DCE" unrolled iterations that are beyond the tripcount 356 // and will never be executed. 357 if (TripCount != 0 && Count > TripCount) 358 Count = TripCount; 359 360 // Don't enter the unroll code if there is nothing to do. 361 if (TripCount == 0 && Count < 2 && PeelCount == 0) { 362 DEBUG(dbgs() << "Won't unroll; almost nothing to do\n"); 363 return LoopUnrollResult::Unmodified; 364 } 365 366 assert(Count > 0); 367 assert(TripMultiple > 0); 368 assert(TripCount == 0 || TripCount % TripMultiple == 0); 369 370 // Are we eliminating the loop control altogether? 371 bool CompletelyUnroll = Count == TripCount; 372 SmallVector<BasicBlock *, 4> ExitBlocks; 373 L->getExitBlocks(ExitBlocks); 374 std::vector<BasicBlock*> OriginalLoopBlocks = L->getBlocks(); 375 376 // Go through all exits of L and see if there are any phi-nodes there. We just 377 // conservatively assume that they're inserted to preserve LCSSA form, which 378 // means that complete unrolling might break this form. We need to either fix 379 // it in-place after the transformation, or entirely rebuild LCSSA. TODO: For 380 // now we just recompute LCSSA for the outer loop, but it should be possible 381 // to fix it in-place. 382 bool NeedToFixLCSSA = PreserveLCSSA && CompletelyUnroll && 383 any_of(ExitBlocks, [](const BasicBlock *BB) { 384 return isa<PHINode>(BB->begin()); 385 }); 386 387 // We assume a run-time trip count if the compiler cannot 388 // figure out the loop trip count and the unroll-runtime 389 // flag is specified. 390 bool RuntimeTripCount = (TripCount == 0 && Count > 0 && AllowRuntime); 391 392 assert((!RuntimeTripCount || !PeelCount) && 393 "Did not expect runtime trip-count unrolling " 394 "and peeling for the same loop"); 395 396 if (PeelCount) { 397 bool Peeled = peelLoop(L, PeelCount, LI, SE, DT, AC, PreserveLCSSA); 398 399 // Successful peeling may result in a change in the loop preheader/trip 400 // counts. If we later unroll the loop, we want these to be updated. 401 if (Peeled) { 402 BasicBlock *ExitingBlock = L->getExitingBlock(); 403 assert(ExitingBlock && "Loop without exiting block?"); 404 Preheader = L->getLoopPreheader(); 405 TripCount = SE->getSmallConstantTripCount(L, ExitingBlock); 406 TripMultiple = SE->getSmallConstantTripMultiple(L, ExitingBlock); 407 } 408 } 409 410 // Loops containing convergent instructions must have a count that divides 411 // their TripMultiple. 412 DEBUG( 413 { 414 bool HasConvergent = false; 415 for (auto &BB : L->blocks()) 416 for (auto &I : *BB) 417 if (auto CS = CallSite(&I)) 418 HasConvergent |= CS.isConvergent(); 419 assert((!HasConvergent || TripMultiple % Count == 0) && 420 "Unroll count must divide trip multiple if loop contains a " 421 "convergent operation."); 422 }); 423 424 bool EpilogProfitability = 425 UnrollRuntimeEpilog.getNumOccurrences() ? UnrollRuntimeEpilog 426 : isEpilogProfitable(L); 427 428 if (RuntimeTripCount && TripMultiple % Count != 0 && 429 !UnrollRuntimeLoopRemainder(L, Count, AllowExpensiveTripCount, 430 EpilogProfitability, UnrollRemainder, 431 LI, SE, DT, AC, ORE, 432 PreserveLCSSA)) { 433 if (Force) 434 RuntimeTripCount = false; 435 else { 436 DEBUG( 437 dbgs() << "Wont unroll; remainder loop could not be generated" 438 "when assuming runtime trip count\n"); 439 return LoopUnrollResult::Unmodified; 440 } 441 } 442 443 // Notify ScalarEvolution that the loop will be substantially changed, 444 // if not outright eliminated. 445 if (SE) 446 SE->forgetLoop(L); 447 448 // If we know the trip count, we know the multiple... 449 unsigned BreakoutTrip = 0; 450 if (TripCount != 0) { 451 BreakoutTrip = TripCount % Count; 452 TripMultiple = 0; 453 } else { 454 // Figure out what multiple to use. 455 BreakoutTrip = TripMultiple = 456 (unsigned)GreatestCommonDivisor64(Count, TripMultiple); 457 } 458 459 using namespace ore; 460 // Report the unrolling decision. 461 if (CompletelyUnroll) { 462 DEBUG(dbgs() << "COMPLETELY UNROLLING loop %" << Header->getName() 463 << " with trip count " << TripCount << "!\n"); 464 ORE->emit([&]() { 465 return OptimizationRemark(DEBUG_TYPE, "FullyUnrolled", L->getStartLoc(), 466 L->getHeader()) 467 << "completely unrolled loop with " << NV("UnrollCount", TripCount) 468 << " iterations"; 469 }); 470 } else if (PeelCount) { 471 DEBUG(dbgs() << "PEELING loop %" << Header->getName() 472 << " with iteration count " << PeelCount << "!\n"); 473 ORE->emit([&]() { 474 return OptimizationRemark(DEBUG_TYPE, "Peeled", L->getStartLoc(), 475 L->getHeader()) 476 << " peeled loop by " << NV("PeelCount", PeelCount) 477 << " iterations"; 478 }); 479 } else { 480 auto DiagBuilder = [&]() { 481 OptimizationRemark Diag(DEBUG_TYPE, "PartialUnrolled", L->getStartLoc(), 482 L->getHeader()); 483 return Diag << "unrolled loop by a factor of " 484 << NV("UnrollCount", Count); 485 }; 486 487 DEBUG(dbgs() << "UNROLLING loop %" << Header->getName() 488 << " by " << Count); 489 if (TripMultiple == 0 || BreakoutTrip != TripMultiple) { 490 DEBUG(dbgs() << " with a breakout at trip " << BreakoutTrip); 491 ORE->emit([&]() { 492 return DiagBuilder() << " with a breakout at trip " 493 << NV("BreakoutTrip", BreakoutTrip); 494 }); 495 } else if (TripMultiple != 1) { 496 DEBUG(dbgs() << " with " << TripMultiple << " trips per branch"); 497 ORE->emit([&]() { 498 return DiagBuilder() << " with " << NV("TripMultiple", TripMultiple) 499 << " trips per branch"; 500 }); 501 } else if (RuntimeTripCount) { 502 DEBUG(dbgs() << " with run-time trip count"); 503 ORE->emit([&]() { return DiagBuilder() << " with run-time trip count"; }); 504 } 505 DEBUG(dbgs() << "!\n"); 506 } 507 508 bool ContinueOnTrue = L->contains(BI->getSuccessor(0)); 509 BasicBlock *LoopExit = BI->getSuccessor(ContinueOnTrue); 510 511 // For the first iteration of the loop, we should use the precloned values for 512 // PHI nodes. Insert associations now. 513 ValueToValueMapTy LastValueMap; 514 std::vector<PHINode*> OrigPHINode; 515 for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) { 516 OrigPHINode.push_back(cast<PHINode>(I)); 517 } 518 519 std::vector<BasicBlock*> Headers; 520 std::vector<BasicBlock*> Latches; 521 Headers.push_back(Header); 522 Latches.push_back(LatchBlock); 523 524 // The current on-the-fly SSA update requires blocks to be processed in 525 // reverse postorder so that LastValueMap contains the correct value at each 526 // exit. 527 LoopBlocksDFS DFS(L); 528 DFS.perform(LI); 529 530 // Stash the DFS iterators before adding blocks to the loop. 531 LoopBlocksDFS::RPOIterator BlockBegin = DFS.beginRPO(); 532 LoopBlocksDFS::RPOIterator BlockEnd = DFS.endRPO(); 533 534 std::vector<BasicBlock*> UnrolledLoopBlocks = L->getBlocks(); 535 536 // Loop Unrolling might create new loops. While we do preserve LoopInfo, we 537 // might break loop-simplified form for these loops (as they, e.g., would 538 // share the same exit blocks). We'll keep track of loops for which we can 539 // break this so that later we can re-simplify them. 540 SmallSetVector<Loop *, 4> LoopsToSimplify; 541 for (Loop *SubLoop : *L) 542 LoopsToSimplify.insert(SubLoop); 543 544 if (Header->getParent()->isDebugInfoForProfiling()) 545 for (BasicBlock *BB : L->getBlocks()) 546 for (Instruction &I : *BB) 547 if (const DILocation *DIL = I.getDebugLoc()) 548 I.setDebugLoc(DIL->cloneWithDuplicationFactor(Count)); 549 550 for (unsigned It = 1; It != Count; ++It) { 551 std::vector<BasicBlock*> NewBlocks; 552 SmallDenseMap<const Loop *, Loop *, 4> NewLoops; 553 NewLoops[L] = L; 554 555 for (LoopBlocksDFS::RPOIterator BB = BlockBegin; BB != BlockEnd; ++BB) { 556 ValueToValueMapTy VMap; 557 BasicBlock *New = CloneBasicBlock(*BB, VMap, "." + Twine(It)); 558 Header->getParent()->getBasicBlockList().push_back(New); 559 560 assert((*BB != Header || LI->getLoopFor(*BB) == L) && 561 "Header should not be in a sub-loop"); 562 // Tell LI about New. 563 const Loop *OldLoop = addClonedBlockToLoopInfo(*BB, New, LI, NewLoops); 564 if (OldLoop) { 565 LoopsToSimplify.insert(NewLoops[OldLoop]); 566 567 // Forget the old loop, since its inputs may have changed. 568 if (SE) 569 SE->forgetLoop(OldLoop); 570 } 571 572 if (*BB == Header) 573 // Loop over all of the PHI nodes in the block, changing them to use 574 // the incoming values from the previous block. 575 for (PHINode *OrigPHI : OrigPHINode) { 576 PHINode *NewPHI = cast<PHINode>(VMap[OrigPHI]); 577 Value *InVal = NewPHI->getIncomingValueForBlock(LatchBlock); 578 if (Instruction *InValI = dyn_cast<Instruction>(InVal)) 579 if (It > 1 && L->contains(InValI)) 580 InVal = LastValueMap[InValI]; 581 VMap[OrigPHI] = InVal; 582 New->getInstList().erase(NewPHI); 583 } 584 585 // Update our running map of newest clones 586 LastValueMap[*BB] = New; 587 for (ValueToValueMapTy::iterator VI = VMap.begin(), VE = VMap.end(); 588 VI != VE; ++VI) 589 LastValueMap[VI->first] = VI->second; 590 591 // Add phi entries for newly created values to all exit blocks. 592 for (BasicBlock *Succ : successors(*BB)) { 593 if (L->contains(Succ)) 594 continue; 595 for (BasicBlock::iterator BBI = Succ->begin(); 596 PHINode *phi = dyn_cast<PHINode>(BBI); ++BBI) { 597 Value *Incoming = phi->getIncomingValueForBlock(*BB); 598 ValueToValueMapTy::iterator It = LastValueMap.find(Incoming); 599 if (It != LastValueMap.end()) 600 Incoming = It->second; 601 phi->addIncoming(Incoming, New); 602 } 603 } 604 // Keep track of new headers and latches as we create them, so that 605 // we can insert the proper branches later. 606 if (*BB == Header) 607 Headers.push_back(New); 608 if (*BB == LatchBlock) 609 Latches.push_back(New); 610 611 NewBlocks.push_back(New); 612 UnrolledLoopBlocks.push_back(New); 613 614 // Update DomTree: since we just copy the loop body, and each copy has a 615 // dedicated entry block (copy of the header block), this header's copy 616 // dominates all copied blocks. That means, dominance relations in the 617 // copied body are the same as in the original body. 618 if (DT) { 619 if (*BB == Header) 620 DT->addNewBlock(New, Latches[It - 1]); 621 else { 622 auto BBDomNode = DT->getNode(*BB); 623 auto BBIDom = BBDomNode->getIDom(); 624 BasicBlock *OriginalBBIDom = BBIDom->getBlock(); 625 DT->addNewBlock( 626 New, cast<BasicBlock>(LastValueMap[cast<Value>(OriginalBBIDom)])); 627 } 628 } 629 } 630 631 // Remap all instructions in the most recent iteration 632 for (BasicBlock *NewBlock : NewBlocks) { 633 for (Instruction &I : *NewBlock) { 634 ::remapInstruction(&I, LastValueMap); 635 if (auto *II = dyn_cast<IntrinsicInst>(&I)) 636 if (II->getIntrinsicID() == Intrinsic::assume) 637 AC->registerAssumption(II); 638 } 639 } 640 } 641 642 // Loop over the PHI nodes in the original block, setting incoming values. 643 for (PHINode *PN : OrigPHINode) { 644 if (CompletelyUnroll) { 645 PN->replaceAllUsesWith(PN->getIncomingValueForBlock(Preheader)); 646 Header->getInstList().erase(PN); 647 } 648 else if (Count > 1) { 649 Value *InVal = PN->removeIncomingValue(LatchBlock, false); 650 // If this value was defined in the loop, take the value defined by the 651 // last iteration of the loop. 652 if (Instruction *InValI = dyn_cast<Instruction>(InVal)) { 653 if (L->contains(InValI)) 654 InVal = LastValueMap[InVal]; 655 } 656 assert(Latches.back() == LastValueMap[LatchBlock] && "bad last latch"); 657 PN->addIncoming(InVal, Latches.back()); 658 } 659 } 660 661 // Now that all the basic blocks for the unrolled iterations are in place, 662 // set up the branches to connect them. 663 for (unsigned i = 0, e = Latches.size(); i != e; ++i) { 664 // The original branch was replicated in each unrolled iteration. 665 BranchInst *Term = cast<BranchInst>(Latches[i]->getTerminator()); 666 667 // The branch destination. 668 unsigned j = (i + 1) % e; 669 BasicBlock *Dest = Headers[j]; 670 bool NeedConditional = true; 671 672 if (RuntimeTripCount && j != 0) { 673 NeedConditional = false; 674 } 675 676 // For a complete unroll, make the last iteration end with a branch 677 // to the exit block. 678 if (CompletelyUnroll) { 679 if (j == 0) 680 Dest = LoopExit; 681 // If using trip count upper bound to completely unroll, we need to keep 682 // the conditional branch except the last one because the loop may exit 683 // after any iteration. 684 assert(NeedConditional && 685 "NeedCondition cannot be modified by both complete " 686 "unrolling and runtime unrolling"); 687 NeedConditional = (PreserveCondBr && j && !(PreserveOnlyFirst && i != 0)); 688 } else if (j != BreakoutTrip && (TripMultiple == 0 || j % TripMultiple != 0)) { 689 // If we know the trip count or a multiple of it, we can safely use an 690 // unconditional branch for some iterations. 691 NeedConditional = false; 692 } 693 694 if (NeedConditional) { 695 // Update the conditional branch's successor for the following 696 // iteration. 697 Term->setSuccessor(!ContinueOnTrue, Dest); 698 } else { 699 // Remove phi operands at this loop exit 700 if (Dest != LoopExit) { 701 BasicBlock *BB = Latches[i]; 702 for (BasicBlock *Succ: successors(BB)) { 703 if (Succ == Headers[i]) 704 continue; 705 for (BasicBlock::iterator BBI = Succ->begin(); 706 PHINode *Phi = dyn_cast<PHINode>(BBI); ++BBI) { 707 Phi->removeIncomingValue(BB, false); 708 } 709 } 710 } 711 // Replace the conditional branch with an unconditional one. 712 BranchInst::Create(Dest, Term); 713 Term->eraseFromParent(); 714 } 715 } 716 717 // Update dominators of blocks we might reach through exits. 718 // Immediate dominator of such block might change, because we add more 719 // routes which can lead to the exit: we can now reach it from the copied 720 // iterations too. 721 if (DT && Count > 1) { 722 for (auto *BB : OriginalLoopBlocks) { 723 auto *BBDomNode = DT->getNode(BB); 724 SmallVector<BasicBlock *, 16> ChildrenToUpdate; 725 for (auto *ChildDomNode : BBDomNode->getChildren()) { 726 auto *ChildBB = ChildDomNode->getBlock(); 727 if (!L->contains(ChildBB)) 728 ChildrenToUpdate.push_back(ChildBB); 729 } 730 BasicBlock *NewIDom; 731 if (BB == LatchBlock) { 732 // The latch is special because we emit unconditional branches in 733 // some cases where the original loop contained a conditional branch. 734 // Since the latch is always at the bottom of the loop, if the latch 735 // dominated an exit before unrolling, the new dominator of that exit 736 // must also be a latch. Specifically, the dominator is the first 737 // latch which ends in a conditional branch, or the last latch if 738 // there is no such latch. 739 NewIDom = Latches.back(); 740 for (BasicBlock *IterLatch : Latches) { 741 TerminatorInst *Term = IterLatch->getTerminator(); 742 if (isa<BranchInst>(Term) && cast<BranchInst>(Term)->isConditional()) { 743 NewIDom = IterLatch; 744 break; 745 } 746 } 747 } else { 748 // The new idom of the block will be the nearest common dominator 749 // of all copies of the previous idom. This is equivalent to the 750 // nearest common dominator of the previous idom and the first latch, 751 // which dominates all copies of the previous idom. 752 NewIDom = DT->findNearestCommonDominator(BB, LatchBlock); 753 } 754 for (auto *ChildBB : ChildrenToUpdate) 755 DT->changeImmediateDominator(ChildBB, NewIDom); 756 } 757 } 758 759 if (DT && UnrollVerifyDomtree) 760 DT->verifyDomTree(); 761 762 // Merge adjacent basic blocks, if possible. 763 SmallPtrSet<Loop *, 4> ForgottenLoops; 764 for (BasicBlock *Latch : Latches) { 765 BranchInst *Term = cast<BranchInst>(Latch->getTerminator()); 766 if (Term->isUnconditional()) { 767 BasicBlock *Dest = Term->getSuccessor(0); 768 if (BasicBlock *Fold = 769 foldBlockIntoPredecessor(Dest, LI, SE, ForgottenLoops, DT)) { 770 // Dest has been folded into Fold. Update our worklists accordingly. 771 std::replace(Latches.begin(), Latches.end(), Dest, Fold); 772 UnrolledLoopBlocks.erase(std::remove(UnrolledLoopBlocks.begin(), 773 UnrolledLoopBlocks.end(), Dest), 774 UnrolledLoopBlocks.end()); 775 } 776 } 777 } 778 779 // Simplify any new induction variables in the partially unrolled loop. 780 if (SE && !CompletelyUnroll && Count > 1) { 781 SmallVector<WeakTrackingVH, 16> DeadInsts; 782 simplifyLoopIVs(L, SE, DT, LI, DeadInsts); 783 784 // Aggressively clean up dead instructions that simplifyLoopIVs already 785 // identified. Any remaining should be cleaned up below. 786 while (!DeadInsts.empty()) 787 if (Instruction *Inst = 788 dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val())) 789 RecursivelyDeleteTriviallyDeadInstructions(Inst); 790 } 791 792 // At this point, the code is well formed. We now do a quick sweep over the 793 // inserted code, doing constant propagation and dead code elimination as we 794 // go. 795 const DataLayout &DL = Header->getModule()->getDataLayout(); 796 const std::vector<BasicBlock*> &NewLoopBlocks = L->getBlocks(); 797 for (BasicBlock *BB : NewLoopBlocks) { 798 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) { 799 Instruction *Inst = &*I++; 800 801 if (Value *V = SimplifyInstruction(Inst, {DL, nullptr, DT, AC})) 802 if (LI->replacementPreservesLCSSAForm(Inst, V)) 803 Inst->replaceAllUsesWith(V); 804 if (isInstructionTriviallyDead(Inst)) 805 BB->getInstList().erase(Inst); 806 } 807 } 808 809 // TODO: after peeling or unrolling, previously loop variant conditions are 810 // likely to fold to constants, eagerly propagating those here will require 811 // fewer cleanup passes to be run. Alternatively, a LoopEarlyCSE might be 812 // appropriate. 813 814 NumCompletelyUnrolled += CompletelyUnroll; 815 ++NumUnrolled; 816 817 Loop *OuterL = L->getParentLoop(); 818 // Update LoopInfo if the loop is completely removed. 819 if (CompletelyUnroll) 820 LI->erase(L); 821 822 // After complete unrolling most of the blocks should be contained in OuterL. 823 // However, some of them might happen to be out of OuterL (e.g. if they 824 // precede a loop exit). In this case we might need to insert PHI nodes in 825 // order to preserve LCSSA form. 826 // We don't need to check this if we already know that we need to fix LCSSA 827 // form. 828 // TODO: For now we just recompute LCSSA for the outer loop in this case, but 829 // it should be possible to fix it in-place. 830 if (PreserveLCSSA && OuterL && CompletelyUnroll && !NeedToFixLCSSA) 831 NeedToFixLCSSA |= ::needToInsertPhisForLCSSA(OuterL, UnrolledLoopBlocks, LI); 832 833 // If we have a pass and a DominatorTree we should re-simplify impacted loops 834 // to ensure subsequent analyses can rely on this form. We want to simplify 835 // at least one layer outside of the loop that was unrolled so that any 836 // changes to the parent loop exposed by the unrolling are considered. 837 if (DT) { 838 if (OuterL) { 839 // OuterL includes all loops for which we can break loop-simplify, so 840 // it's sufficient to simplify only it (it'll recursively simplify inner 841 // loops too). 842 if (NeedToFixLCSSA) { 843 // LCSSA must be performed on the outermost affected loop. The unrolled 844 // loop's last loop latch is guaranteed to be in the outermost loop 845 // after LoopInfo's been updated by LoopInfo::erase. 846 Loop *LatchLoop = LI->getLoopFor(Latches.back()); 847 Loop *FixLCSSALoop = OuterL; 848 if (!FixLCSSALoop->contains(LatchLoop)) 849 while (FixLCSSALoop->getParentLoop() != LatchLoop) 850 FixLCSSALoop = FixLCSSALoop->getParentLoop(); 851 852 formLCSSARecursively(*FixLCSSALoop, *DT, LI, SE); 853 } else if (PreserveLCSSA) { 854 assert(OuterL->isLCSSAForm(*DT) && 855 "Loops should be in LCSSA form after loop-unroll."); 856 } 857 858 // TODO: That potentially might be compile-time expensive. We should try 859 // to fix the loop-simplified form incrementally. 860 simplifyLoop(OuterL, DT, LI, SE, AC, PreserveLCSSA); 861 } else { 862 // Simplify loops for which we might've broken loop-simplify form. 863 for (Loop *SubLoop : LoopsToSimplify) 864 simplifyLoop(SubLoop, DT, LI, SE, AC, PreserveLCSSA); 865 } 866 } 867 868 return CompletelyUnroll ? LoopUnrollResult::FullyUnrolled 869 : LoopUnrollResult::PartiallyUnrolled; 870 } 871 872 /// Given an llvm.loop loop id metadata node, returns the loop hint metadata 873 /// node with the given name (for example, "llvm.loop.unroll.count"). If no 874 /// such metadata node exists, then nullptr is returned. 875 MDNode *llvm::GetUnrollMetadata(MDNode *LoopID, StringRef Name) { 876 // First operand should refer to the loop id itself. 877 assert(LoopID->getNumOperands() > 0 && "requires at least one operand"); 878 assert(LoopID->getOperand(0) == LoopID && "invalid loop id"); 879 880 for (unsigned i = 1, e = LoopID->getNumOperands(); i < e; ++i) { 881 MDNode *MD = dyn_cast<MDNode>(LoopID->getOperand(i)); 882 if (!MD) 883 continue; 884 885 MDString *S = dyn_cast<MDString>(MD->getOperand(0)); 886 if (!S) 887 continue; 888 889 if (Name.equals(S->getString())) 890 return MD; 891 } 892 return nullptr; 893 } 894