1 //===-- MachineSink.cpp - Sinking for machine instructions ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass moves instructions into successor blocks when possible, so that 11 // they aren't executed on paths where their results aren't needed. 12 // 13 // This pass is not intended to be a replacement or a complete alternative 14 // for an LLVM-IR-level sinking pass. It is only designed to sink simple 15 // constructs that are not exposed before lowering and instruction selection. 16 // 17 //===----------------------------------------------------------------------===// 18 19 #define DEBUG_TYPE "machine-sink" 20 #include "llvm/CodeGen/Passes.h" 21 #include "llvm/CodeGen/MachineRegisterInfo.h" 22 #include "llvm/CodeGen/MachineDominators.h" 23 #include "llvm/CodeGen/MachineLoopInfo.h" 24 #include "llvm/Analysis/AliasAnalysis.h" 25 #include "llvm/Target/TargetRegisterInfo.h" 26 #include "llvm/Target/TargetInstrInfo.h" 27 #include "llvm/Target/TargetMachine.h" 28 #include "llvm/ADT/SmallSet.h" 29 #include "llvm/ADT/Statistic.h" 30 #include "llvm/Support/CommandLine.h" 31 #include "llvm/Support/Debug.h" 32 #include "llvm/Support/raw_ostream.h" 33 using namespace llvm; 34 35 static cl::opt<bool> 36 SplitEdges("machine-sink-split", 37 cl::desc("Split critical edges during machine sinking"), 38 cl::init(true), cl::Hidden); 39 40 STATISTIC(NumSunk, "Number of machine instructions sunk"); 41 STATISTIC(NumSplit, "Number of critical edges split"); 42 STATISTIC(NumCoalesces, "Number of copies coalesced"); 43 44 namespace { 45 class MachineSinking : public MachineFunctionPass { 46 const TargetInstrInfo *TII; 47 const TargetRegisterInfo *TRI; 48 MachineRegisterInfo *MRI; // Machine register information 49 MachineDominatorTree *DT; // Machine dominator tree 50 MachineLoopInfo *LI; 51 AliasAnalysis *AA; 52 BitVector AllocatableSet; // Which physregs are allocatable? 53 54 // Remember which edges have been considered for breaking. 55 SmallSet<std::pair<MachineBasicBlock*,MachineBasicBlock*>, 8> 56 CEBCandidates; 57 58 public: 59 static char ID; // Pass identification 60 MachineSinking() : MachineFunctionPass(ID) { 61 initializeMachineSinkingPass(*PassRegistry::getPassRegistry()); 62 } 63 64 virtual bool runOnMachineFunction(MachineFunction &MF); 65 66 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 67 AU.setPreservesCFG(); 68 MachineFunctionPass::getAnalysisUsage(AU); 69 AU.addRequired<AliasAnalysis>(); 70 AU.addRequired<MachineDominatorTree>(); 71 AU.addRequired<MachineLoopInfo>(); 72 AU.addPreserved<MachineDominatorTree>(); 73 AU.addPreserved<MachineLoopInfo>(); 74 } 75 76 virtual void releaseMemory() { 77 CEBCandidates.clear(); 78 } 79 80 private: 81 bool ProcessBlock(MachineBasicBlock &MBB); 82 bool isWorthBreakingCriticalEdge(MachineInstr *MI, 83 MachineBasicBlock *From, 84 MachineBasicBlock *To); 85 MachineBasicBlock *SplitCriticalEdge(MachineInstr *MI, 86 MachineBasicBlock *From, 87 MachineBasicBlock *To, 88 bool BreakPHIEdge); 89 bool SinkInstruction(MachineInstr *MI, bool &SawStore); 90 bool AllUsesDominatedByBlock(unsigned Reg, MachineBasicBlock *MBB, 91 MachineBasicBlock *DefMBB, 92 bool &BreakPHIEdge, bool &LocalUse) const; 93 MachineBasicBlock *FindSuccToSinkTo(MachineInstr *MI, bool &BreakPHIEdge); 94 95 bool PerformTrivialForwardCoalescing(MachineInstr *MI, 96 MachineBasicBlock *MBB); 97 }; 98 } // end anonymous namespace 99 100 char MachineSinking::ID = 0; 101 INITIALIZE_PASS_BEGIN(MachineSinking, "machine-sink", 102 "Machine code sinking", false, false) 103 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 104 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 105 INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 106 INITIALIZE_PASS_END(MachineSinking, "machine-sink", 107 "Machine code sinking", false, false) 108 109 FunctionPass *llvm::createMachineSinkingPass() { return new MachineSinking(); } 110 111 bool MachineSinking::PerformTrivialForwardCoalescing(MachineInstr *MI, 112 MachineBasicBlock *MBB) { 113 if (!MI->isCopy()) 114 return false; 115 116 unsigned SrcReg = MI->getOperand(1).getReg(); 117 unsigned DstReg = MI->getOperand(0).getReg(); 118 if (!TargetRegisterInfo::isVirtualRegister(SrcReg) || 119 !TargetRegisterInfo::isVirtualRegister(DstReg) || 120 !MRI->hasOneNonDBGUse(SrcReg)) 121 return false; 122 123 const TargetRegisterClass *SRC = MRI->getRegClass(SrcReg); 124 const TargetRegisterClass *DRC = MRI->getRegClass(DstReg); 125 if (SRC != DRC) 126 return false; 127 128 MachineInstr *DefMI = MRI->getVRegDef(SrcReg); 129 if (DefMI->isCopyLike()) 130 return false; 131 DEBUG(dbgs() << "Coalescing: " << *DefMI); 132 DEBUG(dbgs() << "*** to: " << *MI); 133 MRI->replaceRegWith(DstReg, SrcReg); 134 MI->eraseFromParent(); 135 ++NumCoalesces; 136 return true; 137 } 138 139 /// AllUsesDominatedByBlock - Return true if all uses of the specified register 140 /// occur in blocks dominated by the specified block. If any use is in the 141 /// definition block, then return false since it is never legal to move def 142 /// after uses. 143 bool 144 MachineSinking::AllUsesDominatedByBlock(unsigned Reg, 145 MachineBasicBlock *MBB, 146 MachineBasicBlock *DefMBB, 147 bool &BreakPHIEdge, 148 bool &LocalUse) const { 149 assert(TargetRegisterInfo::isVirtualRegister(Reg) && 150 "Only makes sense for vregs"); 151 152 // Ignore debug uses because debug info doesn't affect the code. 153 if (MRI->use_nodbg_empty(Reg)) 154 return true; 155 156 // BreakPHIEdge is true if all the uses are in the successor MBB being sunken 157 // into and they are all PHI nodes. In this case, machine-sink must break 158 // the critical edge first. e.g. 159 // 160 // BB#1: derived from LLVM BB %bb4.preheader 161 // Predecessors according to CFG: BB#0 162 // ... 163 // %reg16385<def> = DEC64_32r %reg16437, %EFLAGS<imp-def,dead> 164 // ... 165 // JE_4 <BB#37>, %EFLAGS<imp-use> 166 // Successors according to CFG: BB#37 BB#2 167 // 168 // BB#2: derived from LLVM BB %bb.nph 169 // Predecessors according to CFG: BB#0 BB#1 170 // %reg16386<def> = PHI %reg16434, <BB#0>, %reg16385, <BB#1> 171 BreakPHIEdge = true; 172 for (MachineRegisterInfo::use_nodbg_iterator 173 I = MRI->use_nodbg_begin(Reg), E = MRI->use_nodbg_end(); 174 I != E; ++I) { 175 MachineInstr *UseInst = &*I; 176 MachineBasicBlock *UseBlock = UseInst->getParent(); 177 if (!(UseBlock == MBB && UseInst->isPHI() && 178 UseInst->getOperand(I.getOperandNo()+1).getMBB() == DefMBB)) { 179 BreakPHIEdge = false; 180 break; 181 } 182 } 183 if (BreakPHIEdge) 184 return true; 185 186 for (MachineRegisterInfo::use_nodbg_iterator 187 I = MRI->use_nodbg_begin(Reg), E = MRI->use_nodbg_end(); 188 I != E; ++I) { 189 // Determine the block of the use. 190 MachineInstr *UseInst = &*I; 191 MachineBasicBlock *UseBlock = UseInst->getParent(); 192 if (UseInst->isPHI()) { 193 // PHI nodes use the operand in the predecessor block, not the block with 194 // the PHI. 195 UseBlock = UseInst->getOperand(I.getOperandNo()+1).getMBB(); 196 } else if (UseBlock == DefMBB) { 197 LocalUse = true; 198 return false; 199 } 200 201 // Check that it dominates. 202 if (!DT->dominates(MBB, UseBlock)) 203 return false; 204 } 205 206 return true; 207 } 208 209 bool MachineSinking::runOnMachineFunction(MachineFunction &MF) { 210 DEBUG(dbgs() << "******** Machine Sinking ********\n"); 211 212 const TargetMachine &TM = MF.getTarget(); 213 TII = TM.getInstrInfo(); 214 TRI = TM.getRegisterInfo(); 215 MRI = &MF.getRegInfo(); 216 DT = &getAnalysis<MachineDominatorTree>(); 217 LI = &getAnalysis<MachineLoopInfo>(); 218 AA = &getAnalysis<AliasAnalysis>(); 219 AllocatableSet = TRI->getAllocatableSet(MF); 220 221 bool EverMadeChange = false; 222 223 while (1) { 224 bool MadeChange = false; 225 226 // Process all basic blocks. 227 CEBCandidates.clear(); 228 for (MachineFunction::iterator I = MF.begin(), E = MF.end(); 229 I != E; ++I) 230 MadeChange |= ProcessBlock(*I); 231 232 // If this iteration over the code changed anything, keep iterating. 233 if (!MadeChange) break; 234 EverMadeChange = true; 235 } 236 return EverMadeChange; 237 } 238 239 bool MachineSinking::ProcessBlock(MachineBasicBlock &MBB) { 240 // Can't sink anything out of a block that has less than two successors. 241 if (MBB.succ_size() <= 1 || MBB.empty()) return false; 242 243 // Don't bother sinking code out of unreachable blocks. In addition to being 244 // unprofitable, it can also lead to infinite looping, because in an 245 // unreachable loop there may be nowhere to stop. 246 if (!DT->isReachableFromEntry(&MBB)) return false; 247 248 bool MadeChange = false; 249 250 // Walk the basic block bottom-up. Remember if we saw a store. 251 MachineBasicBlock::iterator I = MBB.end(); 252 --I; 253 bool ProcessedBegin, SawStore = false; 254 do { 255 MachineInstr *MI = I; // The instruction to sink. 256 257 // Predecrement I (if it's not begin) so that it isn't invalidated by 258 // sinking. 259 ProcessedBegin = I == MBB.begin(); 260 if (!ProcessedBegin) 261 --I; 262 263 if (MI->isDebugValue()) 264 continue; 265 266 bool Joined = PerformTrivialForwardCoalescing(MI, &MBB); 267 if (Joined) { 268 MadeChange = true; 269 continue; 270 } 271 272 if (SinkInstruction(MI, SawStore)) 273 ++NumSunk, MadeChange = true; 274 275 // If we just processed the first instruction in the block, we're done. 276 } while (!ProcessedBegin); 277 278 return MadeChange; 279 } 280 281 bool MachineSinking::isWorthBreakingCriticalEdge(MachineInstr *MI, 282 MachineBasicBlock *From, 283 MachineBasicBlock *To) { 284 // FIXME: Need much better heuristics. 285 286 // If the pass has already considered breaking this edge (during this pass 287 // through the function), then let's go ahead and break it. This means 288 // sinking multiple "cheap" instructions into the same block. 289 if (!CEBCandidates.insert(std::make_pair(From, To))) 290 return true; 291 292 if (!MI->isCopy() && !MI->isAsCheapAsAMove()) 293 return true; 294 295 // MI is cheap, we probably don't want to break the critical edge for it. 296 // However, if this would allow some definitions of its source operands 297 // to be sunk then it's probably worth it. 298 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 299 const MachineOperand &MO = MI->getOperand(i); 300 if (!MO.isReg()) continue; 301 unsigned Reg = MO.getReg(); 302 if (Reg == 0 || !TargetRegisterInfo::isPhysicalRegister(Reg)) 303 continue; 304 if (MRI->hasOneNonDBGUse(Reg)) 305 return true; 306 } 307 308 return false; 309 } 310 311 MachineBasicBlock *MachineSinking::SplitCriticalEdge(MachineInstr *MI, 312 MachineBasicBlock *FromBB, 313 MachineBasicBlock *ToBB, 314 bool BreakPHIEdge) { 315 if (!isWorthBreakingCriticalEdge(MI, FromBB, ToBB)) 316 return 0; 317 318 // Avoid breaking back edge. From == To means backedge for single BB loop. 319 if (!SplitEdges || FromBB == ToBB) 320 return 0; 321 322 // Check for backedges of more "complex" loops. 323 if (LI->getLoopFor(FromBB) == LI->getLoopFor(ToBB) && 324 LI->isLoopHeader(ToBB)) 325 return 0; 326 327 // It's not always legal to break critical edges and sink the computation 328 // to the edge. 329 // 330 // BB#1: 331 // v1024 332 // Beq BB#3 333 // <fallthrough> 334 // BB#2: 335 // ... no uses of v1024 336 // <fallthrough> 337 // BB#3: 338 // ... 339 // = v1024 340 // 341 // If BB#1 -> BB#3 edge is broken and computation of v1024 is inserted: 342 // 343 // BB#1: 344 // ... 345 // Bne BB#2 346 // BB#4: 347 // v1024 = 348 // B BB#3 349 // BB#2: 350 // ... no uses of v1024 351 // <fallthrough> 352 // BB#3: 353 // ... 354 // = v1024 355 // 356 // This is incorrect since v1024 is not computed along the BB#1->BB#2->BB#3 357 // flow. We need to ensure the new basic block where the computation is 358 // sunk to dominates all the uses. 359 // It's only legal to break critical edge and sink the computation to the 360 // new block if all the predecessors of "To", except for "From", are 361 // not dominated by "From". Given SSA property, this means these 362 // predecessors are dominated by "To". 363 // 364 // There is no need to do this check if all the uses are PHI nodes. PHI 365 // sources are only defined on the specific predecessor edges. 366 if (!BreakPHIEdge) { 367 for (MachineBasicBlock::pred_iterator PI = ToBB->pred_begin(), 368 E = ToBB->pred_end(); PI != E; ++PI) { 369 if (*PI == FromBB) 370 continue; 371 if (!DT->dominates(ToBB, *PI)) 372 return 0; 373 } 374 } 375 376 return FromBB->SplitCriticalEdge(ToBB, this); 377 } 378 379 static bool AvoidsSinking(MachineInstr *MI, MachineRegisterInfo *MRI) { 380 return MI->isInsertSubreg() || MI->isSubregToReg() || MI->isRegSequence(); 381 } 382 383 /// collectDebgValues - Scan instructions following MI and collect any 384 /// matching DBG_VALUEs. 385 static void collectDebugValues(MachineInstr *MI, 386 SmallVector<MachineInstr *, 2> & DbgValues) { 387 DbgValues.clear(); 388 if (!MI->getOperand(0).isReg()) 389 return; 390 391 MachineBasicBlock::iterator DI = MI; ++DI; 392 for (MachineBasicBlock::iterator DE = MI->getParent()->end(); 393 DI != DE; ++DI) { 394 if (!DI->isDebugValue()) 395 return; 396 if (DI->getOperand(0).isReg() && 397 DI->getOperand(0).getReg() == MI->getOperand(0).getReg()) 398 DbgValues.push_back(DI); 399 } 400 } 401 402 /// FindSuccToSinkTo - Find a successor to sink this instruction to. 403 MachineBasicBlock *MachineSinking::FindSuccToSinkTo(MachineInstr *MI, 404 bool &BreakPHIEdge) { 405 406 // Loop over all the operands of the specified instruction. If there is 407 // anything we can't handle, bail out. 408 MachineBasicBlock *ParentBlock = MI->getParent(); 409 410 // SuccToSinkTo - This is the successor to sink this instruction to, once we 411 // decide. 412 MachineBasicBlock *SuccToSinkTo = 0; 413 414 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 415 const MachineOperand &MO = MI->getOperand(i); 416 if (!MO.isReg()) continue; // Ignore non-register operands. 417 418 unsigned Reg = MO.getReg(); 419 if (Reg == 0) continue; 420 421 if (TargetRegisterInfo::isPhysicalRegister(Reg)) { 422 if (MO.isUse()) { 423 // If the physreg has no defs anywhere, it's just an ambient register 424 // and we can freely move its uses. Alternatively, if it's allocatable, 425 // it could get allocated to something with a def during allocation. 426 if (!MRI->def_empty(Reg)) 427 return NULL; 428 429 if (AllocatableSet.test(Reg)) 430 return NULL; 431 432 // Check for a def among the register's aliases too. 433 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 434 unsigned AliasReg = *Alias; 435 if (!MRI->def_empty(AliasReg)) 436 return NULL; 437 438 if (AllocatableSet.test(AliasReg)) 439 return NULL; 440 } 441 } else if (!MO.isDead()) { 442 // A def that isn't dead. We can't move it. 443 return NULL; 444 } 445 } else { 446 // Virtual register uses are always safe to sink. 447 if (MO.isUse()) continue; 448 449 // If it's not safe to move defs of the register class, then abort. 450 if (!TII->isSafeToMoveRegClassDefs(MRI->getRegClass(Reg))) 451 return NULL; 452 453 // FIXME: This picks a successor to sink into based on having one 454 // successor that dominates all the uses. However, there are cases where 455 // sinking can happen but where the sink point isn't a successor. For 456 // example: 457 // 458 // x = computation 459 // if () {} else {} 460 // use x 461 // 462 // the instruction could be sunk over the whole diamond for the 463 // if/then/else (or loop, etc), allowing it to be sunk into other blocks 464 // after that. 465 466 // Virtual register defs can only be sunk if all their uses are in blocks 467 // dominated by one of the successors. 468 if (SuccToSinkTo) { 469 // If a previous operand picked a block to sink to, then this operand 470 // must be sinkable to the same block. 471 bool LocalUse = false; 472 if (!AllUsesDominatedByBlock(Reg, SuccToSinkTo, ParentBlock, 473 BreakPHIEdge, LocalUse)) 474 return NULL; 475 476 continue; 477 } 478 479 // Otherwise, we should look at all the successors and decide which one 480 // we should sink to. 481 for (MachineBasicBlock::succ_iterator SI = ParentBlock->succ_begin(), 482 E = ParentBlock->succ_end(); SI != E; ++SI) { 483 MachineBasicBlock *SuccBlock = *SI; 484 bool LocalUse = false; 485 if (AllUsesDominatedByBlock(Reg, SuccBlock, ParentBlock, 486 BreakPHIEdge, LocalUse)) { 487 SuccToSinkTo = SuccBlock; 488 break; 489 } 490 if (LocalUse) 491 // Def is used locally, it's never safe to move this def. 492 return NULL; 493 } 494 495 // If we couldn't find a block to sink to, ignore this instruction. 496 if (SuccToSinkTo == 0) 497 return NULL; 498 } 499 } 500 501 // It is not possible to sink an instruction into its own block. This can 502 // happen with loops. 503 if (ParentBlock == SuccToSinkTo) 504 return NULL; 505 506 // It's not safe to sink instructions to EH landing pad. Control flow into 507 // landing pad is implicitly defined. 508 if (SuccToSinkTo && SuccToSinkTo->isLandingPad()) 509 return NULL; 510 511 return SuccToSinkTo; 512 } 513 514 /// SinkInstruction - Determine whether it is safe to sink the specified machine 515 /// instruction out of its current block into a successor. 516 bool MachineSinking::SinkInstruction(MachineInstr *MI, bool &SawStore) { 517 // Don't sink insert_subreg, subreg_to_reg, reg_sequence. These are meant to 518 // be close to the source to make it easier to coalesce. 519 if (AvoidsSinking(MI, MRI)) 520 return false; 521 522 // Check if it's safe to move the instruction. 523 if (!MI->isSafeToMove(TII, AA, SawStore)) 524 return false; 525 526 // FIXME: This should include support for sinking instructions within the 527 // block they are currently in to shorten the live ranges. We often get 528 // instructions sunk into the top of a large block, but it would be better to 529 // also sink them down before their first use in the block. This xform has to 530 // be careful not to *increase* register pressure though, e.g. sinking 531 // "x = y + z" down if it kills y and z would increase the live ranges of y 532 // and z and only shrink the live range of x. 533 534 bool BreakPHIEdge = false; 535 MachineBasicBlock *SuccToSinkTo = FindSuccToSinkTo(MI, BreakPHIEdge); 536 537 // If there are no outputs, it must have side-effects. 538 if (SuccToSinkTo == 0) 539 return false; 540 541 542 // If the instruction to move defines a dead physical register which is live 543 // when leaving the basic block, don't move it because it could turn into a 544 // "zombie" define of that preg. E.g., EFLAGS. (<rdar://problem/8030636>) 545 for (unsigned I = 0, E = MI->getNumOperands(); I != E; ++I) { 546 const MachineOperand &MO = MI->getOperand(I); 547 if (!MO.isReg()) continue; 548 unsigned Reg = MO.getReg(); 549 if (Reg == 0 || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue; 550 if (SuccToSinkTo->isLiveIn(Reg)) 551 return false; 552 } 553 554 DEBUG(dbgs() << "Sink instr " << *MI << "\tinto block " << *SuccToSinkTo); 555 556 MachineBasicBlock *ParentBlock = MI->getParent(); 557 558 // If the block has multiple predecessors, this would introduce computation on 559 // a path that it doesn't already exist. We could split the critical edge, 560 // but for now we just punt. 561 if (SuccToSinkTo->pred_size() > 1) { 562 // We cannot sink a load across a critical edge - there may be stores in 563 // other code paths. 564 bool TryBreak = false; 565 bool store = true; 566 if (!MI->isSafeToMove(TII, AA, store)) { 567 DEBUG(dbgs() << " *** NOTE: Won't sink load along critical edge.\n"); 568 TryBreak = true; 569 } 570 571 // We don't want to sink across a critical edge if we don't dominate the 572 // successor. We could be introducing calculations to new code paths. 573 if (!TryBreak && !DT->dominates(ParentBlock, SuccToSinkTo)) { 574 DEBUG(dbgs() << " *** NOTE: Critical edge found\n"); 575 TryBreak = true; 576 } 577 578 // Don't sink instructions into a loop. 579 if (!TryBreak && LI->isLoopHeader(SuccToSinkTo)) { 580 DEBUG(dbgs() << " *** NOTE: Loop header found\n"); 581 TryBreak = true; 582 } 583 584 // Otherwise we are OK with sinking along a critical edge. 585 if (!TryBreak) 586 DEBUG(dbgs() << "Sinking along critical edge.\n"); 587 else { 588 MachineBasicBlock *NewSucc = 589 SplitCriticalEdge(MI, ParentBlock, SuccToSinkTo, BreakPHIEdge); 590 if (!NewSucc) { 591 DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to " 592 "break critical edge\n"); 593 return false; 594 } else { 595 DEBUG(dbgs() << " *** Splitting critical edge:" 596 " BB#" << ParentBlock->getNumber() 597 << " -- BB#" << NewSucc->getNumber() 598 << " -- BB#" << SuccToSinkTo->getNumber() << '\n'); 599 SuccToSinkTo = NewSucc; 600 ++NumSplit; 601 BreakPHIEdge = false; 602 } 603 } 604 } 605 606 if (BreakPHIEdge) { 607 // BreakPHIEdge is true if all the uses are in the successor MBB being 608 // sunken into and they are all PHI nodes. In this case, machine-sink must 609 // break the critical edge first. 610 MachineBasicBlock *NewSucc = SplitCriticalEdge(MI, ParentBlock, 611 SuccToSinkTo, BreakPHIEdge); 612 if (!NewSucc) { 613 DEBUG(dbgs() << " *** PUNTING: Not legal or profitable to " 614 "break critical edge\n"); 615 return false; 616 } 617 618 DEBUG(dbgs() << " *** Splitting critical edge:" 619 " BB#" << ParentBlock->getNumber() 620 << " -- BB#" << NewSucc->getNumber() 621 << " -- BB#" << SuccToSinkTo->getNumber() << '\n'); 622 SuccToSinkTo = NewSucc; 623 ++NumSplit; 624 } 625 626 // Determine where to insert into. Skip phi nodes. 627 MachineBasicBlock::iterator InsertPos = SuccToSinkTo->begin(); 628 while (InsertPos != SuccToSinkTo->end() && InsertPos->isPHI()) 629 ++InsertPos; 630 631 // collect matching debug values. 632 SmallVector<MachineInstr *, 2> DbgValuesToSink; 633 collectDebugValues(MI, DbgValuesToSink); 634 635 // Move the instruction. 636 SuccToSinkTo->splice(InsertPos, ParentBlock, MI, 637 ++MachineBasicBlock::iterator(MI)); 638 639 // Move debug values. 640 for (SmallVector<MachineInstr *, 2>::iterator DBI = DbgValuesToSink.begin(), 641 DBE = DbgValuesToSink.end(); DBI != DBE; ++DBI) { 642 MachineInstr *DbgMI = *DBI; 643 SuccToSinkTo->splice(InsertPos, ParentBlock, DbgMI, 644 ++MachineBasicBlock::iterator(DbgMI)); 645 } 646 647 // Conservatively, clear any kill flags, since it's possible that they are no 648 // longer correct. 649 MI->clearKillInfo(); 650 651 return true; 652 } 653