1 //===-- PeepholeOptimizer.cpp - Peephole Optimizations --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Perform peephole optimizations on the machine code: 11 // 12 // - Optimize Extensions 13 // 14 // Optimization of sign / zero extension instructions. It may be extended to 15 // handle other instructions with similar properties. 16 // 17 // On some targets, some instructions, e.g. X86 sign / zero extension, may 18 // leave the source value in the lower part of the result. This optimization 19 // will replace some uses of the pre-extension value with uses of the 20 // sub-register of the results. 21 // 22 // - Optimize Comparisons 23 // 24 // Optimization of comparison instructions. For instance, in this code: 25 // 26 // sub r1, 1 27 // cmp r1, 0 28 // bz L1 29 // 30 // If the "sub" instruction all ready sets (or could be modified to set) the 31 // same flag that the "cmp" instruction sets and that "bz" uses, then we can 32 // eliminate the "cmp" instruction. 33 // 34 // Another instance, in this code: 35 // 36 // sub r1, r3 | sub r1, imm 37 // cmp r3, r1 or cmp r1, r3 | cmp r1, imm 38 // bge L1 39 // 40 // If the branch instruction can use flag from "sub", then we can replace 41 // "sub" with "subs" and eliminate the "cmp" instruction. 42 // 43 // - Optimize Loads: 44 // 45 // Loads that can be folded into a later instruction. A load is foldable 46 // if it loads to virtual registers and the virtual register defined has 47 // a single use. 48 // 49 // - Optimize Copies and Bitcast: 50 // 51 // Rewrite copies and bitcasts to avoid cross register bank copies 52 // when possible. 53 // E.g., Consider the following example, where capital and lower 54 // letters denote different register file: 55 // b = copy A <-- cross-bank copy 56 // C = copy b <-- cross-bank copy 57 // => 58 // b = copy A <-- cross-bank copy 59 // C = copy A <-- same-bank copy 60 // 61 // E.g., for bitcast: 62 // b = bitcast A <-- cross-bank copy 63 // C = bitcast b <-- cross-bank copy 64 // => 65 // b = bitcast A <-- cross-bank copy 66 // C = copy A <-- same-bank copy 67 //===----------------------------------------------------------------------===// 68 69 #define DEBUG_TYPE "peephole-opt" 70 #include "llvm/CodeGen/Passes.h" 71 #include "llvm/ADT/DenseMap.h" 72 #include "llvm/ADT/SmallPtrSet.h" 73 #include "llvm/ADT/SmallSet.h" 74 #include "llvm/ADT/Statistic.h" 75 #include "llvm/CodeGen/MachineDominators.h" 76 #include "llvm/CodeGen/MachineInstrBuilder.h" 77 #include "llvm/CodeGen/MachineRegisterInfo.h" 78 #include "llvm/Support/CommandLine.h" 79 #include "llvm/Support/Debug.h" 80 #include "llvm/Target/TargetInstrInfo.h" 81 #include "llvm/Target/TargetRegisterInfo.h" 82 using namespace llvm; 83 84 // Optimize Extensions 85 static cl::opt<bool> 86 Aggressive("aggressive-ext-opt", cl::Hidden, 87 cl::desc("Aggressive extension optimization")); 88 89 static cl::opt<bool> 90 DisablePeephole("disable-peephole", cl::Hidden, cl::init(false), 91 cl::desc("Disable the peephole optimizer")); 92 93 STATISTIC(NumReuse, "Number of extension results reused"); 94 STATISTIC(NumCmps, "Number of compares eliminated"); 95 STATISTIC(NumImmFold, "Number of move immediate folded"); 96 STATISTIC(NumLoadFold, "Number of loads folded"); 97 STATISTIC(NumSelects, "Number of selects optimized"); 98 STATISTIC(NumCopiesBitcasts, "Number of copies/bitcasts optimized"); 99 100 namespace { 101 class PeepholeOptimizer : public MachineFunctionPass { 102 const TargetMachine *TM; 103 const TargetInstrInfo *TII; 104 MachineRegisterInfo *MRI; 105 MachineDominatorTree *DT; // Machine dominator tree 106 107 public: 108 static char ID; // Pass identification 109 PeepholeOptimizer() : MachineFunctionPass(ID) { 110 initializePeepholeOptimizerPass(*PassRegistry::getPassRegistry()); 111 } 112 113 bool runOnMachineFunction(MachineFunction &MF) override; 114 115 void getAnalysisUsage(AnalysisUsage &AU) const override { 116 AU.setPreservesCFG(); 117 MachineFunctionPass::getAnalysisUsage(AU); 118 if (Aggressive) { 119 AU.addRequired<MachineDominatorTree>(); 120 AU.addPreserved<MachineDominatorTree>(); 121 } 122 } 123 124 private: 125 bool optimizeCmpInstr(MachineInstr *MI, MachineBasicBlock *MBB); 126 bool optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB, 127 SmallPtrSet<MachineInstr*, 8> &LocalMIs); 128 bool optimizeSelect(MachineInstr *MI); 129 bool optimizeCopyOrBitcast(MachineInstr *MI); 130 bool isMoveImmediate(MachineInstr *MI, 131 SmallSet<unsigned, 4> &ImmDefRegs, 132 DenseMap<unsigned, MachineInstr*> &ImmDefMIs); 133 bool foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB, 134 SmallSet<unsigned, 4> &ImmDefRegs, 135 DenseMap<unsigned, MachineInstr*> &ImmDefMIs); 136 bool isLoadFoldable(MachineInstr *MI, unsigned &FoldAsLoadDefReg); 137 }; 138 } 139 140 char PeepholeOptimizer::ID = 0; 141 char &llvm::PeepholeOptimizerID = PeepholeOptimizer::ID; 142 INITIALIZE_PASS_BEGIN(PeepholeOptimizer, "peephole-opts", 143 "Peephole Optimizations", false, false) 144 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 145 INITIALIZE_PASS_END(PeepholeOptimizer, "peephole-opts", 146 "Peephole Optimizations", false, false) 147 148 /// optimizeExtInstr - If instruction is a copy-like instruction, i.e. it reads 149 /// a single register and writes a single register and it does not modify the 150 /// source, and if the source value is preserved as a sub-register of the 151 /// result, then replace all reachable uses of the source with the subreg of the 152 /// result. 153 /// 154 /// Do not generate an EXTRACT that is used only in a debug use, as this changes 155 /// the code. Since this code does not currently share EXTRACTs, just ignore all 156 /// debug uses. 157 bool PeepholeOptimizer:: 158 optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB, 159 SmallPtrSet<MachineInstr*, 8> &LocalMIs) { 160 unsigned SrcReg, DstReg, SubIdx; 161 if (!TII->isCoalescableExtInstr(*MI, SrcReg, DstReg, SubIdx)) 162 return false; 163 164 if (TargetRegisterInfo::isPhysicalRegister(DstReg) || 165 TargetRegisterInfo::isPhysicalRegister(SrcReg)) 166 return false; 167 168 if (MRI->hasOneNonDBGUse(SrcReg)) 169 // No other uses. 170 return false; 171 172 // Ensure DstReg can get a register class that actually supports 173 // sub-registers. Don't change the class until we commit. 174 const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg); 175 DstRC = TM->getRegisterInfo()->getSubClassWithSubReg(DstRC, SubIdx); 176 if (!DstRC) 177 return false; 178 179 // The ext instr may be operating on a sub-register of SrcReg as well. 180 // PPC::EXTSW is a 32 -> 64-bit sign extension, but it reads a 64-bit 181 // register. 182 // If UseSrcSubIdx is Set, SubIdx also applies to SrcReg, and only uses of 183 // SrcReg:SubIdx should be replaced. 184 bool UseSrcSubIdx = TM->getRegisterInfo()-> 185 getSubClassWithSubReg(MRI->getRegClass(SrcReg), SubIdx) != 0; 186 187 // The source has other uses. See if we can replace the other uses with use of 188 // the result of the extension. 189 SmallPtrSet<MachineBasicBlock*, 4> ReachedBBs; 190 for (MachineInstr &UI : MRI->use_nodbg_instructions(DstReg)) 191 ReachedBBs.insert(UI.getParent()); 192 193 // Uses that are in the same BB of uses of the result of the instruction. 194 SmallVector<MachineOperand*, 8> Uses; 195 196 // Uses that the result of the instruction can reach. 197 SmallVector<MachineOperand*, 8> ExtendedUses; 198 199 bool ExtendLife = true; 200 for (MachineOperand &UseMO : MRI->use_nodbg_operands(SrcReg)) { 201 MachineInstr *UseMI = UseMO.getParent(); 202 if (UseMI == MI) 203 continue; 204 205 if (UseMI->isPHI()) { 206 ExtendLife = false; 207 continue; 208 } 209 210 // Only accept uses of SrcReg:SubIdx. 211 if (UseSrcSubIdx && UseMO.getSubReg() != SubIdx) 212 continue; 213 214 // It's an error to translate this: 215 // 216 // %reg1025 = <sext> %reg1024 217 // ... 218 // %reg1026 = SUBREG_TO_REG 0, %reg1024, 4 219 // 220 // into this: 221 // 222 // %reg1025 = <sext> %reg1024 223 // ... 224 // %reg1027 = COPY %reg1025:4 225 // %reg1026 = SUBREG_TO_REG 0, %reg1027, 4 226 // 227 // The problem here is that SUBREG_TO_REG is there to assert that an 228 // implicit zext occurs. It doesn't insert a zext instruction. If we allow 229 // the COPY here, it will give us the value after the <sext>, not the 230 // original value of %reg1024 before <sext>. 231 if (UseMI->getOpcode() == TargetOpcode::SUBREG_TO_REG) 232 continue; 233 234 MachineBasicBlock *UseMBB = UseMI->getParent(); 235 if (UseMBB == MBB) { 236 // Local uses that come after the extension. 237 if (!LocalMIs.count(UseMI)) 238 Uses.push_back(&UseMO); 239 } else if (ReachedBBs.count(UseMBB)) { 240 // Non-local uses where the result of the extension is used. Always 241 // replace these unless it's a PHI. 242 Uses.push_back(&UseMO); 243 } else if (Aggressive && DT->dominates(MBB, UseMBB)) { 244 // We may want to extend the live range of the extension result in order 245 // to replace these uses. 246 ExtendedUses.push_back(&UseMO); 247 } else { 248 // Both will be live out of the def MBB anyway. Don't extend live range of 249 // the extension result. 250 ExtendLife = false; 251 break; 252 } 253 } 254 255 if (ExtendLife && !ExtendedUses.empty()) 256 // Extend the liveness of the extension result. 257 std::copy(ExtendedUses.begin(), ExtendedUses.end(), 258 std::back_inserter(Uses)); 259 260 // Now replace all uses. 261 bool Changed = false; 262 if (!Uses.empty()) { 263 SmallPtrSet<MachineBasicBlock*, 4> PHIBBs; 264 265 // Look for PHI uses of the extended result, we don't want to extend the 266 // liveness of a PHI input. It breaks all kinds of assumptions down 267 // stream. A PHI use is expected to be the kill of its source values. 268 for (MachineInstr &UI : MRI->use_nodbg_instructions(DstReg)) 269 if (UI.isPHI()) 270 PHIBBs.insert(UI.getParent()); 271 272 const TargetRegisterClass *RC = MRI->getRegClass(SrcReg); 273 for (unsigned i = 0, e = Uses.size(); i != e; ++i) { 274 MachineOperand *UseMO = Uses[i]; 275 MachineInstr *UseMI = UseMO->getParent(); 276 MachineBasicBlock *UseMBB = UseMI->getParent(); 277 if (PHIBBs.count(UseMBB)) 278 continue; 279 280 // About to add uses of DstReg, clear DstReg's kill flags. 281 if (!Changed) { 282 MRI->clearKillFlags(DstReg); 283 MRI->constrainRegClass(DstReg, DstRC); 284 } 285 286 unsigned NewVR = MRI->createVirtualRegister(RC); 287 MachineInstr *Copy = BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(), 288 TII->get(TargetOpcode::COPY), NewVR) 289 .addReg(DstReg, 0, SubIdx); 290 // SubIdx applies to both SrcReg and DstReg when UseSrcSubIdx is set. 291 if (UseSrcSubIdx) { 292 Copy->getOperand(0).setSubReg(SubIdx); 293 Copy->getOperand(0).setIsUndef(); 294 } 295 UseMO->setReg(NewVR); 296 ++NumReuse; 297 Changed = true; 298 } 299 } 300 301 return Changed; 302 } 303 304 /// optimizeCmpInstr - If the instruction is a compare and the previous 305 /// instruction it's comparing against all ready sets (or could be modified to 306 /// set) the same flag as the compare, then we can remove the comparison and use 307 /// the flag from the previous instruction. 308 bool PeepholeOptimizer::optimizeCmpInstr(MachineInstr *MI, 309 MachineBasicBlock *MBB) { 310 // If this instruction is a comparison against zero and isn't comparing a 311 // physical register, we can try to optimize it. 312 unsigned SrcReg, SrcReg2; 313 int CmpMask, CmpValue; 314 if (!TII->analyzeCompare(MI, SrcReg, SrcReg2, CmpMask, CmpValue) || 315 TargetRegisterInfo::isPhysicalRegister(SrcReg) || 316 (SrcReg2 != 0 && TargetRegisterInfo::isPhysicalRegister(SrcReg2))) 317 return false; 318 319 // Attempt to optimize the comparison instruction. 320 if (TII->optimizeCompareInstr(MI, SrcReg, SrcReg2, CmpMask, CmpValue, MRI)) { 321 ++NumCmps; 322 return true; 323 } 324 325 return false; 326 } 327 328 /// Optimize a select instruction. 329 bool PeepholeOptimizer::optimizeSelect(MachineInstr *MI) { 330 unsigned TrueOp = 0; 331 unsigned FalseOp = 0; 332 bool Optimizable = false; 333 SmallVector<MachineOperand, 4> Cond; 334 if (TII->analyzeSelect(MI, Cond, TrueOp, FalseOp, Optimizable)) 335 return false; 336 if (!Optimizable) 337 return false; 338 if (!TII->optimizeSelect(MI)) 339 return false; 340 MI->eraseFromParent(); 341 ++NumSelects; 342 return true; 343 } 344 345 /// \brief Check if the registers defined by the pair (RegisterClass, SubReg) 346 /// share the same register file. 347 static bool shareSameRegisterFile(const TargetRegisterInfo &TRI, 348 const TargetRegisterClass *DefRC, 349 unsigned DefSubReg, 350 const TargetRegisterClass *SrcRC, 351 unsigned SrcSubReg) { 352 // Same register class. 353 if (DefRC == SrcRC) 354 return true; 355 356 // Both operands are sub registers. Check if they share a register class. 357 unsigned SrcIdx, DefIdx; 358 if (SrcSubReg && DefSubReg) 359 return TRI.getCommonSuperRegClass(SrcRC, SrcSubReg, DefRC, DefSubReg, 360 SrcIdx, DefIdx) != NULL; 361 // At most one of the register is a sub register, make it Src to avoid 362 // duplicating the test. 363 if (!SrcSubReg) { 364 std::swap(DefSubReg, SrcSubReg); 365 std::swap(DefRC, SrcRC); 366 } 367 368 // One of the register is a sub register, check if we can get a superclass. 369 if (SrcSubReg) 370 return TRI.getMatchingSuperRegClass(SrcRC, DefRC, SrcSubReg) != NULL; 371 // Plain copy. 372 return TRI.getCommonSubClass(DefRC, SrcRC) != NULL; 373 } 374 375 /// \brief Get the index of the definition and source for \p Copy 376 /// instruction. 377 /// \pre Copy.isCopy() or Copy.isBitcast(). 378 /// \return True if the Copy instruction has only one register source 379 /// and one register definition. Otherwise, \p DefIdx and \p SrcIdx 380 /// are invalid. 381 static bool getCopyOrBitcastDefUseIdx(const MachineInstr &Copy, 382 unsigned &DefIdx, unsigned &SrcIdx) { 383 assert((Copy.isCopy() || Copy.isBitcast()) && "Wrong operation type."); 384 if (Copy.isCopy()) { 385 // Copy instruction are supposed to be: Def = Src. 386 if (Copy.getDesc().getNumOperands() != 2) 387 return false; 388 DefIdx = 0; 389 SrcIdx = 1; 390 assert(Copy.getOperand(DefIdx).isDef() && "Use comes before def!"); 391 return true; 392 } 393 // Bitcast case. 394 // Bitcasts with more than one def are not supported. 395 if (Copy.getDesc().getNumDefs() != 1) 396 return false; 397 // Initialize SrcIdx to an undefined operand. 398 SrcIdx = Copy.getDesc().getNumOperands(); 399 for (unsigned OpIdx = 0, EndOpIdx = SrcIdx; OpIdx != EndOpIdx; ++OpIdx) { 400 const MachineOperand &MO = Copy.getOperand(OpIdx); 401 if (!MO.isReg() || !MO.getReg()) 402 continue; 403 if (MO.isDef()) 404 DefIdx = OpIdx; 405 else if (SrcIdx != EndOpIdx) 406 // Multiple sources? 407 return false; 408 SrcIdx = OpIdx; 409 } 410 return true; 411 } 412 413 /// \brief Optimize a copy or bitcast instruction to avoid cross 414 /// register bank copy. The optimization looks through a chain of 415 /// copies and try to find a source that has a compatible register 416 /// class. 417 /// Two register classes are considered to be compatible if they share 418 /// the same register bank. 419 /// New copies issued by this optimization are register allocator 420 /// friendly. This optimization does not remove any copy as it may 421 /// overconstraint the register allocator, but replaces some when 422 /// possible. 423 /// \pre \p MI is a Copy (MI->isCopy() is true) 424 /// \return True, when \p MI has been optimized. In that case, \p MI has 425 /// been removed from its parent. 426 bool PeepholeOptimizer::optimizeCopyOrBitcast(MachineInstr *MI) { 427 unsigned DefIdx, SrcIdx; 428 if (!MI || !getCopyOrBitcastDefUseIdx(*MI, DefIdx, SrcIdx)) 429 return false; 430 431 const MachineOperand &MODef = MI->getOperand(DefIdx); 432 assert(MODef.isReg() && "Copies must be between registers."); 433 unsigned Def = MODef.getReg(); 434 435 if (TargetRegisterInfo::isPhysicalRegister(Def)) 436 return false; 437 438 const TargetRegisterClass *DefRC = MRI->getRegClass(Def); 439 unsigned DefSubReg = MODef.getSubReg(); 440 441 unsigned Src; 442 unsigned SrcSubReg; 443 bool ShouldRewrite = false; 444 MachineInstr *Copy = MI; 445 const TargetRegisterInfo &TRI = *TM->getRegisterInfo(); 446 447 // Follow the chain of copies until we reach the top or find a 448 // more suitable source. 449 do { 450 unsigned CopyDefIdx, CopySrcIdx; 451 if (!getCopyOrBitcastDefUseIdx(*Copy, CopyDefIdx, CopySrcIdx)) 452 break; 453 const MachineOperand &MO = Copy->getOperand(CopySrcIdx); 454 assert(MO.isReg() && "Copies must be between registers."); 455 Src = MO.getReg(); 456 457 if (TargetRegisterInfo::isPhysicalRegister(Src)) 458 break; 459 460 const TargetRegisterClass *SrcRC = MRI->getRegClass(Src); 461 SrcSubReg = MO.getSubReg(); 462 463 // If this source does not incur a cross register bank copy, use it. 464 ShouldRewrite = shareSameRegisterFile(TRI, DefRC, DefSubReg, SrcRC, 465 SrcSubReg); 466 // Follow the chain of copies: get the definition of Src. 467 Copy = MRI->getVRegDef(Src); 468 } while (!ShouldRewrite && Copy && (Copy->isCopy() || Copy->isBitcast())); 469 470 // If we did not find a more suitable source, there is nothing to optimize. 471 if (!ShouldRewrite || Src == MI->getOperand(SrcIdx).getReg()) 472 return false; 473 474 // Rewrite the copy to avoid a cross register bank penalty. 475 unsigned NewVR = TargetRegisterInfo::isPhysicalRegister(Def) ? Def : 476 MRI->createVirtualRegister(DefRC); 477 MachineInstr *NewCopy = BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), 478 TII->get(TargetOpcode::COPY), NewVR) 479 .addReg(Src, 0, SrcSubReg); 480 NewCopy->getOperand(0).setSubReg(DefSubReg); 481 482 MRI->replaceRegWith(Def, NewVR); 483 MRI->clearKillFlags(NewVR); 484 MI->eraseFromParent(); 485 ++NumCopiesBitcasts; 486 return true; 487 } 488 489 /// isLoadFoldable - Check whether MI is a candidate for folding into a later 490 /// instruction. We only fold loads to virtual registers and the virtual 491 /// register defined has a single use. 492 bool PeepholeOptimizer::isLoadFoldable(MachineInstr *MI, 493 unsigned &FoldAsLoadDefReg) { 494 if (!MI->canFoldAsLoad() || !MI->mayLoad()) 495 return false; 496 const MCInstrDesc &MCID = MI->getDesc(); 497 if (MCID.getNumDefs() != 1) 498 return false; 499 500 unsigned Reg = MI->getOperand(0).getReg(); 501 // To reduce compilation time, we check MRI->hasOneNonDBGUse when inserting 502 // loads. It should be checked when processing uses of the load, since 503 // uses can be removed during peephole. 504 if (!MI->getOperand(0).getSubReg() && 505 TargetRegisterInfo::isVirtualRegister(Reg) && 506 MRI->hasOneNonDBGUse(Reg)) { 507 FoldAsLoadDefReg = Reg; 508 return true; 509 } 510 return false; 511 } 512 513 bool PeepholeOptimizer::isMoveImmediate(MachineInstr *MI, 514 SmallSet<unsigned, 4> &ImmDefRegs, 515 DenseMap<unsigned, MachineInstr*> &ImmDefMIs) { 516 const MCInstrDesc &MCID = MI->getDesc(); 517 if (!MI->isMoveImmediate()) 518 return false; 519 if (MCID.getNumDefs() != 1) 520 return false; 521 unsigned Reg = MI->getOperand(0).getReg(); 522 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 523 ImmDefMIs.insert(std::make_pair(Reg, MI)); 524 ImmDefRegs.insert(Reg); 525 return true; 526 } 527 528 return false; 529 } 530 531 /// foldImmediate - Try folding register operands that are defined by move 532 /// immediate instructions, i.e. a trivial constant folding optimization, if 533 /// and only if the def and use are in the same BB. 534 bool PeepholeOptimizer::foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB, 535 SmallSet<unsigned, 4> &ImmDefRegs, 536 DenseMap<unsigned, MachineInstr*> &ImmDefMIs) { 537 for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) { 538 MachineOperand &MO = MI->getOperand(i); 539 if (!MO.isReg() || MO.isDef()) 540 continue; 541 unsigned Reg = MO.getReg(); 542 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 543 continue; 544 if (ImmDefRegs.count(Reg) == 0) 545 continue; 546 DenseMap<unsigned, MachineInstr*>::iterator II = ImmDefMIs.find(Reg); 547 assert(II != ImmDefMIs.end()); 548 if (TII->FoldImmediate(MI, II->second, Reg, MRI)) { 549 ++NumImmFold; 550 return true; 551 } 552 } 553 return false; 554 } 555 556 bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) { 557 DEBUG(dbgs() << "********** PEEPHOLE OPTIMIZER **********\n"); 558 DEBUG(dbgs() << "********** Function: " << MF.getName() << '\n'); 559 560 if (DisablePeephole) 561 return false; 562 563 TM = &MF.getTarget(); 564 TII = TM->getInstrInfo(); 565 MRI = &MF.getRegInfo(); 566 DT = Aggressive ? &getAnalysis<MachineDominatorTree>() : 0; 567 568 bool Changed = false; 569 570 SmallPtrSet<MachineInstr*, 8> LocalMIs; 571 SmallSet<unsigned, 4> ImmDefRegs; 572 DenseMap<unsigned, MachineInstr*> ImmDefMIs; 573 unsigned FoldAsLoadDefReg; 574 for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) { 575 MachineBasicBlock *MBB = &*I; 576 577 bool SeenMoveImm = false; 578 LocalMIs.clear(); 579 ImmDefRegs.clear(); 580 ImmDefMIs.clear(); 581 FoldAsLoadDefReg = 0; 582 583 for (MachineBasicBlock::iterator 584 MII = I->begin(), MIE = I->end(); MII != MIE; ) { 585 MachineInstr *MI = &*MII; 586 // We may be erasing MI below, increment MII now. 587 ++MII; 588 LocalMIs.insert(MI); 589 590 // Skip debug values. They should not affect this peephole optimization. 591 if (MI->isDebugValue()) 592 continue; 593 594 // If there exists an instruction which belongs to the following 595 // categories, we will discard the load candidate. 596 if (MI->isPosition() || MI->isPHI() || MI->isImplicitDef() || 597 MI->isKill() || MI->isInlineAsm() || 598 MI->hasUnmodeledSideEffects()) { 599 FoldAsLoadDefReg = 0; 600 continue; 601 } 602 if (MI->mayStore() || MI->isCall()) 603 FoldAsLoadDefReg = 0; 604 605 if (((MI->isBitcast() || MI->isCopy()) && optimizeCopyOrBitcast(MI)) || 606 (MI->isCompare() && optimizeCmpInstr(MI, MBB)) || 607 (MI->isSelect() && optimizeSelect(MI))) { 608 // MI is deleted. 609 LocalMIs.erase(MI); 610 Changed = true; 611 continue; 612 } 613 614 if (isMoveImmediate(MI, ImmDefRegs, ImmDefMIs)) { 615 SeenMoveImm = true; 616 } else { 617 Changed |= optimizeExtInstr(MI, MBB, LocalMIs); 618 // optimizeExtInstr might have created new instructions after MI 619 // and before the already incremented MII. Adjust MII so that the 620 // next iteration sees the new instructions. 621 MII = MI; 622 ++MII; 623 if (SeenMoveImm) 624 Changed |= foldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs); 625 } 626 627 // Check whether MI is a load candidate for folding into a later 628 // instruction. If MI is not a candidate, check whether we can fold an 629 // earlier load into MI. 630 if (!isLoadFoldable(MI, FoldAsLoadDefReg) && FoldAsLoadDefReg) { 631 // We need to fold load after optimizeCmpInstr, since optimizeCmpInstr 632 // can enable folding by converting SUB to CMP. 633 // Save FoldAsLoadDefReg because optimizeLoadInstr() resets it and we 634 // need it for markUsesInDebugValueAsUndef(). 635 unsigned FoldedReg = FoldAsLoadDefReg; 636 MachineInstr *DefMI = 0; 637 MachineInstr *FoldMI = TII->optimizeLoadInstr(MI, MRI, 638 FoldAsLoadDefReg, DefMI); 639 if (FoldMI) { 640 // Update LocalMIs since we replaced MI with FoldMI and deleted DefMI. 641 DEBUG(dbgs() << "Replacing: " << *MI); 642 DEBUG(dbgs() << " With: " << *FoldMI); 643 LocalMIs.erase(MI); 644 LocalMIs.erase(DefMI); 645 LocalMIs.insert(FoldMI); 646 MI->eraseFromParent(); 647 DefMI->eraseFromParent(); 648 MRI->markUsesInDebugValueAsUndef(FoldedReg); 649 ++NumLoadFold; 650 651 // MI is replaced with FoldMI. 652 Changed = true; 653 continue; 654 } 655 } 656 } 657 } 658 659 return Changed; 660 } 661