1 //===-- UnrollLoop.cpp - Loop unrolling utilities -------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements some loop unrolling utilities. It does not define any 11 // actual pass or policy, but provides a single function to perform loop 12 // unrolling. 13 // 14 // The process of unrolling can produce extraneous basic blocks linked with 15 // unconditional branches. This will be corrected in the future. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #include "llvm/Transforms/Utils/UnrollLoop.h" 20 #include "llvm/ADT/SmallPtrSet.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/Analysis/InstructionSimplify.h" 23 #include "llvm/Analysis/LoopIterator.h" 24 #include "llvm/Analysis/LoopPass.h" 25 #include "llvm/Analysis/ScalarEvolution.h" 26 #include "llvm/IR/BasicBlock.h" 27 #include "llvm/IR/DataLayout.h" 28 #include "llvm/IR/Dominators.h" 29 #include "llvm/IR/DiagnosticInfo.h" 30 #include "llvm/IR/LLVMContext.h" 31 #include "llvm/Support/Debug.h" 32 #include "llvm/Support/raw_ostream.h" 33 #include "llvm/Transforms/Utils/BasicBlockUtils.h" 34 #include "llvm/Transforms/Utils/Cloning.h" 35 #include "llvm/Transforms/Utils/Local.h" 36 #include "llvm/Transforms/Utils/LoopUtils.h" 37 #include "llvm/Transforms/Utils/SimplifyIndVar.h" 38 using namespace llvm; 39 40 #define DEBUG_TYPE "loop-unroll" 41 42 // TODO: Should these be here or in LoopUnroll? 43 STATISTIC(NumCompletelyUnrolled, "Number of loops completely unrolled"); 44 STATISTIC(NumUnrolled, "Number of loops unrolled (completely or otherwise)"); 45 46 /// RemapInstruction - Convert the instruction operands from referencing the 47 /// current values into those specified by VMap. 48 static inline void RemapInstruction(Instruction *I, 49 ValueToValueMapTy &VMap) { 50 for (unsigned op = 0, E = I->getNumOperands(); op != E; ++op) { 51 Value *Op = I->getOperand(op); 52 ValueToValueMapTy::iterator It = VMap.find(Op); 53 if (It != VMap.end()) 54 I->setOperand(op, It->second); 55 } 56 57 if (PHINode *PN = dyn_cast<PHINode>(I)) { 58 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 59 ValueToValueMapTy::iterator It = VMap.find(PN->getIncomingBlock(i)); 60 if (It != VMap.end()) 61 PN->setIncomingBlock(i, cast<BasicBlock>(It->second)); 62 } 63 } 64 } 65 66 /// FoldBlockIntoPredecessor - Folds a basic block into its predecessor if it 67 /// only has one predecessor, and that predecessor only has one successor. 68 /// The LoopInfo Analysis that is passed will be kept consistent. If folding is 69 /// successful references to the containing loop must be removed from 70 /// ScalarEvolution by calling ScalarEvolution::forgetLoop because SE may have 71 /// references to the eliminated BB. The argument ForgottenLoops contains a set 72 /// of loops that have already been forgotten to prevent redundant, expensive 73 /// calls to ScalarEvolution::forgetLoop. Returns the new combined block. 74 static BasicBlock * 75 FoldBlockIntoPredecessor(BasicBlock *BB, LoopInfo* LI, LPPassManager *LPM, 76 SmallPtrSetImpl<Loop *> &ForgottenLoops) { 77 // Merge basic blocks into their predecessor if there is only one distinct 78 // pred, and if there is only one distinct successor of the predecessor, and 79 // if there are no PHI nodes. 80 BasicBlock *OnlyPred = BB->getSinglePredecessor(); 81 if (!OnlyPred) return nullptr; 82 83 if (OnlyPred->getTerminator()->getNumSuccessors() != 1) 84 return nullptr; 85 86 DEBUG(dbgs() << "Merging: " << *BB << "into: " << *OnlyPred); 87 88 // Resolve any PHI nodes at the start of the block. They are all 89 // guaranteed to have exactly one entry if they exist, unless there are 90 // multiple duplicate (but guaranteed to be equal) entries for the 91 // incoming edges. This occurs when there are multiple edges from 92 // OnlyPred to OnlySucc. 93 FoldSingleEntryPHINodes(BB); 94 95 // Delete the unconditional branch from the predecessor... 96 OnlyPred->getInstList().pop_back(); 97 98 // Make all PHI nodes that referred to BB now refer to Pred as their 99 // source... 100 BB->replaceAllUsesWith(OnlyPred); 101 102 // Move all definitions in the successor to the predecessor... 103 OnlyPred->getInstList().splice(OnlyPred->end(), BB->getInstList()); 104 105 // OldName will be valid until erased. 106 StringRef OldName = BB->getName(); 107 108 // Erase basic block from the function... 109 110 // ScalarEvolution holds references to loop exit blocks. 111 if (LPM) { 112 if (ScalarEvolution *SE = LPM->getAnalysisIfAvailable<ScalarEvolution>()) { 113 if (Loop *L = LI->getLoopFor(BB)) { 114 if (ForgottenLoops.insert(L)) 115 SE->forgetLoop(L); 116 } 117 } 118 } 119 LI->removeBlock(BB); 120 121 // Inherit predecessor's name if it exists... 122 if (!OldName.empty() && !OnlyPred->hasName()) 123 OnlyPred->setName(OldName); 124 125 BB->eraseFromParent(); 126 127 return OnlyPred; 128 } 129 130 /// Unroll the given loop by Count. The loop must be in LCSSA form. Returns true 131 /// if unrolling was successful, or false if the loop was unmodified. Unrolling 132 /// can only fail when the loop's latch block is not terminated by a conditional 133 /// branch instruction. However, if the trip count (and multiple) are not known, 134 /// loop unrolling will mostly produce more code that is no faster. 135 /// 136 /// TripCount is generally defined as the number of times the loop header 137 /// executes. UnrollLoop relaxes the definition to permit early exits: here 138 /// TripCount is the iteration on which control exits LatchBlock if no early 139 /// exits were taken. Note that UnrollLoop assumes that the loop counter test 140 /// terminates LatchBlock in order to remove unnecesssary instances of the 141 /// test. In other words, control may exit the loop prior to TripCount 142 /// iterations via an early branch, but control may not exit the loop from the 143 /// LatchBlock's terminator prior to TripCount iterations. 144 /// 145 /// Similarly, TripMultiple divides the number of times that the LatchBlock may 146 /// execute without exiting the loop. 147 /// 148 /// The LoopInfo Analysis that is passed will be kept consistent. 149 /// 150 /// If a LoopPassManager is passed in, and the loop is fully removed, it will be 151 /// removed from the LoopPassManager as well. LPM can also be NULL. 152 /// 153 /// This utility preserves LoopInfo. If DominatorTree or ScalarEvolution are 154 /// available from the Pass it must also preserve those analyses. 155 bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount, 156 bool AllowRuntime, unsigned TripMultiple, 157 LoopInfo *LI, Pass *PP, LPPassManager *LPM) { 158 BasicBlock *Preheader = L->getLoopPreheader(); 159 if (!Preheader) { 160 DEBUG(dbgs() << " Can't unroll; loop preheader-insertion failed.\n"); 161 return false; 162 } 163 164 BasicBlock *LatchBlock = L->getLoopLatch(); 165 if (!LatchBlock) { 166 DEBUG(dbgs() << " Can't unroll; loop exit-block-insertion failed.\n"); 167 return false; 168 } 169 170 // Loops with indirectbr cannot be cloned. 171 if (!L->isSafeToClone()) { 172 DEBUG(dbgs() << " Can't unroll; Loop body cannot be cloned.\n"); 173 return false; 174 } 175 176 BasicBlock *Header = L->getHeader(); 177 BranchInst *BI = dyn_cast<BranchInst>(LatchBlock->getTerminator()); 178 179 if (!BI || BI->isUnconditional()) { 180 // The loop-rotate pass can be helpful to avoid this in many cases. 181 DEBUG(dbgs() << 182 " Can't unroll; loop not terminated by a conditional branch.\n"); 183 return false; 184 } 185 186 if (Header->hasAddressTaken()) { 187 // The loop-rotate pass can be helpful to avoid this in many cases. 188 DEBUG(dbgs() << 189 " Won't unroll loop: address of header block is taken.\n"); 190 return false; 191 } 192 193 if (TripCount != 0) 194 DEBUG(dbgs() << " Trip Count = " << TripCount << "\n"); 195 if (TripMultiple != 1) 196 DEBUG(dbgs() << " Trip Multiple = " << TripMultiple << "\n"); 197 198 // Effectively "DCE" unrolled iterations that are beyond the tripcount 199 // and will never be executed. 200 if (TripCount != 0 && Count > TripCount) 201 Count = TripCount; 202 203 // Don't enter the unroll code if there is nothing to do. This way we don't 204 // need to support "partial unrolling by 1". 205 if (TripCount == 0 && Count < 2) 206 return false; 207 208 assert(Count > 0); 209 assert(TripMultiple > 0); 210 assert(TripCount == 0 || TripCount % TripMultiple == 0); 211 212 // Are we eliminating the loop control altogether? 213 bool CompletelyUnroll = Count == TripCount; 214 215 // We assume a run-time trip count if the compiler cannot 216 // figure out the loop trip count and the unroll-runtime 217 // flag is specified. 218 bool RuntimeTripCount = (TripCount == 0 && Count > 0 && AllowRuntime); 219 220 if (RuntimeTripCount && !UnrollRuntimeLoopProlog(L, Count, LI, LPM)) 221 return false; 222 223 // Notify ScalarEvolution that the loop will be substantially changed, 224 // if not outright eliminated. 225 if (PP) { 226 ScalarEvolution *SE = PP->getAnalysisIfAvailable<ScalarEvolution>(); 227 if (SE) 228 SE->forgetLoop(L); 229 } 230 231 // If we know the trip count, we know the multiple... 232 unsigned BreakoutTrip = 0; 233 if (TripCount != 0) { 234 BreakoutTrip = TripCount % Count; 235 TripMultiple = 0; 236 } else { 237 // Figure out what multiple to use. 238 BreakoutTrip = TripMultiple = 239 (unsigned)GreatestCommonDivisor64(Count, TripMultiple); 240 } 241 242 // Report the unrolling decision. 243 DebugLoc LoopLoc = L->getStartLoc(); 244 Function *F = Header->getParent(); 245 LLVMContext &Ctx = F->getContext(); 246 247 if (CompletelyUnroll) { 248 DEBUG(dbgs() << "COMPLETELY UNROLLING loop %" << Header->getName() 249 << " with trip count " << TripCount << "!\n"); 250 emitOptimizationRemark(Ctx, DEBUG_TYPE, *F, LoopLoc, 251 Twine("completely unrolled loop with ") + 252 Twine(TripCount) + " iterations"); 253 } else { 254 auto EmitDiag = [&](const Twine &T) { 255 emitOptimizationRemark(Ctx, DEBUG_TYPE, *F, LoopLoc, 256 "unrolled loop by a factor of " + Twine(Count) + 257 T); 258 }; 259 260 DEBUG(dbgs() << "UNROLLING loop %" << Header->getName() 261 << " by " << Count); 262 if (TripMultiple == 0 || BreakoutTrip != TripMultiple) { 263 DEBUG(dbgs() << " with a breakout at trip " << BreakoutTrip); 264 EmitDiag(" with a breakout at trip " + Twine(BreakoutTrip)); 265 } else if (TripMultiple != 1) { 266 DEBUG(dbgs() << " with " << TripMultiple << " trips per branch"); 267 EmitDiag(" with " + Twine(TripMultiple) + " trips per branch"); 268 } else if (RuntimeTripCount) { 269 DEBUG(dbgs() << " with run-time trip count"); 270 EmitDiag(" with run-time trip count"); 271 } 272 DEBUG(dbgs() << "!\n"); 273 } 274 275 bool ContinueOnTrue = L->contains(BI->getSuccessor(0)); 276 BasicBlock *LoopExit = BI->getSuccessor(ContinueOnTrue); 277 278 // For the first iteration of the loop, we should use the precloned values for 279 // PHI nodes. Insert associations now. 280 ValueToValueMapTy LastValueMap; 281 std::vector<PHINode*> OrigPHINode; 282 for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) { 283 OrigPHINode.push_back(cast<PHINode>(I)); 284 } 285 286 std::vector<BasicBlock*> Headers; 287 std::vector<BasicBlock*> Latches; 288 Headers.push_back(Header); 289 Latches.push_back(LatchBlock); 290 291 // The current on-the-fly SSA update requires blocks to be processed in 292 // reverse postorder so that LastValueMap contains the correct value at each 293 // exit. 294 LoopBlocksDFS DFS(L); 295 DFS.perform(LI); 296 297 // Stash the DFS iterators before adding blocks to the loop. 298 LoopBlocksDFS::RPOIterator BlockBegin = DFS.beginRPO(); 299 LoopBlocksDFS::RPOIterator BlockEnd = DFS.endRPO(); 300 301 for (unsigned It = 1; It != Count; ++It) { 302 std::vector<BasicBlock*> NewBlocks; 303 304 for (LoopBlocksDFS::RPOIterator BB = BlockBegin; BB != BlockEnd; ++BB) { 305 ValueToValueMapTy VMap; 306 BasicBlock *New = CloneBasicBlock(*BB, VMap, "." + Twine(It)); 307 Header->getParent()->getBasicBlockList().push_back(New); 308 309 // Loop over all of the PHI nodes in the block, changing them to use the 310 // incoming values from the previous block. 311 if (*BB == Header) 312 for (unsigned i = 0, e = OrigPHINode.size(); i != e; ++i) { 313 PHINode *NewPHI = cast<PHINode>(VMap[OrigPHINode[i]]); 314 Value *InVal = NewPHI->getIncomingValueForBlock(LatchBlock); 315 if (Instruction *InValI = dyn_cast<Instruction>(InVal)) 316 if (It > 1 && L->contains(InValI)) 317 InVal = LastValueMap[InValI]; 318 VMap[OrigPHINode[i]] = InVal; 319 New->getInstList().erase(NewPHI); 320 } 321 322 // Update our running map of newest clones 323 LastValueMap[*BB] = New; 324 for (ValueToValueMapTy::iterator VI = VMap.begin(), VE = VMap.end(); 325 VI != VE; ++VI) 326 LastValueMap[VI->first] = VI->second; 327 328 L->addBasicBlockToLoop(New, LI->getBase()); 329 330 // Add phi entries for newly created values to all exit blocks. 331 for (succ_iterator SI = succ_begin(*BB), SE = succ_end(*BB); 332 SI != SE; ++SI) { 333 if (L->contains(*SI)) 334 continue; 335 for (BasicBlock::iterator BBI = (*SI)->begin(); 336 PHINode *phi = dyn_cast<PHINode>(BBI); ++BBI) { 337 Value *Incoming = phi->getIncomingValueForBlock(*BB); 338 ValueToValueMapTy::iterator It = LastValueMap.find(Incoming); 339 if (It != LastValueMap.end()) 340 Incoming = It->second; 341 phi->addIncoming(Incoming, New); 342 } 343 } 344 // Keep track of new headers and latches as we create them, so that 345 // we can insert the proper branches later. 346 if (*BB == Header) 347 Headers.push_back(New); 348 if (*BB == LatchBlock) 349 Latches.push_back(New); 350 351 NewBlocks.push_back(New); 352 } 353 354 // Remap all instructions in the most recent iteration 355 for (unsigned i = 0; i < NewBlocks.size(); ++i) 356 for (BasicBlock::iterator I = NewBlocks[i]->begin(), 357 E = NewBlocks[i]->end(); I != E; ++I) 358 ::RemapInstruction(I, LastValueMap); 359 } 360 361 // Loop over the PHI nodes in the original block, setting incoming values. 362 for (unsigned i = 0, e = OrigPHINode.size(); i != e; ++i) { 363 PHINode *PN = OrigPHINode[i]; 364 if (CompletelyUnroll) { 365 PN->replaceAllUsesWith(PN->getIncomingValueForBlock(Preheader)); 366 Header->getInstList().erase(PN); 367 } 368 else if (Count > 1) { 369 Value *InVal = PN->removeIncomingValue(LatchBlock, false); 370 // If this value was defined in the loop, take the value defined by the 371 // last iteration of the loop. 372 if (Instruction *InValI = dyn_cast<Instruction>(InVal)) { 373 if (L->contains(InValI)) 374 InVal = LastValueMap[InVal]; 375 } 376 assert(Latches.back() == LastValueMap[LatchBlock] && "bad last latch"); 377 PN->addIncoming(InVal, Latches.back()); 378 } 379 } 380 381 // Now that all the basic blocks for the unrolled iterations are in place, 382 // set up the branches to connect them. 383 for (unsigned i = 0, e = Latches.size(); i != e; ++i) { 384 // The original branch was replicated in each unrolled iteration. 385 BranchInst *Term = cast<BranchInst>(Latches[i]->getTerminator()); 386 387 // The branch destination. 388 unsigned j = (i + 1) % e; 389 BasicBlock *Dest = Headers[j]; 390 bool NeedConditional = true; 391 392 if (RuntimeTripCount && j != 0) { 393 NeedConditional = false; 394 } 395 396 // For a complete unroll, make the last iteration end with a branch 397 // to the exit block. 398 if (CompletelyUnroll && j == 0) { 399 Dest = LoopExit; 400 NeedConditional = false; 401 } 402 403 // If we know the trip count or a multiple of it, we can safely use an 404 // unconditional branch for some iterations. 405 if (j != BreakoutTrip && (TripMultiple == 0 || j % TripMultiple != 0)) { 406 NeedConditional = false; 407 } 408 409 if (NeedConditional) { 410 // Update the conditional branch's successor for the following 411 // iteration. 412 Term->setSuccessor(!ContinueOnTrue, Dest); 413 } else { 414 // Remove phi operands at this loop exit 415 if (Dest != LoopExit) { 416 BasicBlock *BB = Latches[i]; 417 for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); 418 SI != SE; ++SI) { 419 if (*SI == Headers[i]) 420 continue; 421 for (BasicBlock::iterator BBI = (*SI)->begin(); 422 PHINode *Phi = dyn_cast<PHINode>(BBI); ++BBI) { 423 Phi->removeIncomingValue(BB, false); 424 } 425 } 426 } 427 // Replace the conditional branch with an unconditional one. 428 BranchInst::Create(Dest, Term); 429 Term->eraseFromParent(); 430 } 431 } 432 433 // Merge adjacent basic blocks, if possible. 434 SmallPtrSet<Loop *, 4> ForgottenLoops; 435 for (unsigned i = 0, e = Latches.size(); i != e; ++i) { 436 BranchInst *Term = cast<BranchInst>(Latches[i]->getTerminator()); 437 if (Term->isUnconditional()) { 438 BasicBlock *Dest = Term->getSuccessor(0); 439 if (BasicBlock *Fold = FoldBlockIntoPredecessor(Dest, LI, LPM, 440 ForgottenLoops)) 441 std::replace(Latches.begin(), Latches.end(), Dest, Fold); 442 } 443 } 444 445 DominatorTree *DT = nullptr; 446 if (PP) { 447 // FIXME: Reconstruct dom info, because it is not preserved properly. 448 // Incrementally updating domtree after loop unrolling would be easy. 449 if (DominatorTreeWrapperPass *DTWP = 450 PP->getAnalysisIfAvailable<DominatorTreeWrapperPass>()) { 451 DT = &DTWP->getDomTree(); 452 DT->recalculate(*L->getHeader()->getParent()); 453 } 454 455 // Simplify any new induction variables in the partially unrolled loop. 456 ScalarEvolution *SE = PP->getAnalysisIfAvailable<ScalarEvolution>(); 457 if (SE && !CompletelyUnroll) { 458 SmallVector<WeakVH, 16> DeadInsts; 459 simplifyLoopIVs(L, SE, LPM, DeadInsts); 460 461 // Aggressively clean up dead instructions that simplifyLoopIVs already 462 // identified. Any remaining should be cleaned up below. 463 while (!DeadInsts.empty()) 464 if (Instruction *Inst = 465 dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val())) 466 RecursivelyDeleteTriviallyDeadInstructions(Inst); 467 } 468 } 469 // At this point, the code is well formed. We now do a quick sweep over the 470 // inserted code, doing constant propagation and dead code elimination as we 471 // go. 472 const std::vector<BasicBlock*> &NewLoopBlocks = L->getBlocks(); 473 for (std::vector<BasicBlock*>::const_iterator BB = NewLoopBlocks.begin(), 474 BBE = NewLoopBlocks.end(); BB != BBE; ++BB) 475 for (BasicBlock::iterator I = (*BB)->begin(), E = (*BB)->end(); I != E; ) { 476 Instruction *Inst = I++; 477 478 if (isInstructionTriviallyDead(Inst)) 479 (*BB)->getInstList().erase(Inst); 480 else if (Value *V = SimplifyInstruction(Inst)) 481 if (LI->replacementPreservesLCSSAForm(Inst, V)) { 482 Inst->replaceAllUsesWith(V); 483 (*BB)->getInstList().erase(Inst); 484 } 485 } 486 487 NumCompletelyUnrolled += CompletelyUnroll; 488 ++NumUnrolled; 489 490 Loop *OuterL = L->getParentLoop(); 491 // Remove the loop from the LoopPassManager if it's completely removed. 492 if (CompletelyUnroll && LPM != nullptr) 493 LPM->deleteLoopFromQueue(L); 494 495 // If we have a pass and a DominatorTree we should re-simplify impacted loops 496 // to ensure subsequent analyses can rely on this form. We want to simplify 497 // at least one layer outside of the loop that was unrolled so that any 498 // changes to the parent loop exposed by the unrolling are considered. 499 if (PP && DT) { 500 if (!OuterL && !CompletelyUnroll) 501 OuterL = L; 502 if (OuterL) { 503 DataLayoutPass *DLP = PP->getAnalysisIfAvailable<DataLayoutPass>(); 504 const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr; 505 ScalarEvolution *SE = PP->getAnalysisIfAvailable<ScalarEvolution>(); 506 simplifyLoop(OuterL, DT, LI, PP, /*AliasAnalysis*/ nullptr, SE, DL); 507 508 // LCSSA must be performed on the outermost affected loop. The unrolled 509 // loop's last loop latch is guaranteed to be in the outermost loop after 510 // deleteLoopFromQueue updates LoopInfo. 511 Loop *LatchLoop = LI->getLoopFor(Latches.back()); 512 if (!OuterL->contains(LatchLoop)) 513 while (OuterL->getParentLoop() != LatchLoop) 514 OuterL = OuterL->getParentLoop(); 515 516 formLCSSARecursively(*OuterL, *DT, SE); 517 } 518 } 519 520 return true; 521 } 522