1 //===-- PeepholeOptimizer.cpp - Peephole Optimizations --------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Perform peephole optimizations on the machine code: 11 // 12 // - Optimize Extensions 13 // 14 // Optimization of sign / zero extension instructions. It may be extended to 15 // handle other instructions with similar properties. 16 // 17 // On some targets, some instructions, e.g. X86 sign / zero extension, may 18 // leave the source value in the lower part of the result. This optimization 19 // will replace some uses of the pre-extension value with uses of the 20 // sub-register of the results. 21 // 22 // - Optimize Comparisons 23 // 24 // Optimization of comparison instructions. For instance, in this code: 25 // 26 // sub r1, 1 27 // cmp r1, 0 28 // bz L1 29 // 30 // If the "sub" instruction all ready sets (or could be modified to set) the 31 // same flag that the "cmp" instruction sets and that "bz" uses, then we can 32 // eliminate the "cmp" instruction. 33 // 34 // Another instance, in this code: 35 // 36 // sub r1, r3 | sub r1, imm 37 // cmp r3, r1 or cmp r1, r3 | cmp r1, imm 38 // bge L1 39 // 40 // If the branch instruction can use flag from "sub", then we can replace 41 // "sub" with "subs" and eliminate the "cmp" instruction. 42 // 43 // - Optimize Bitcast pairs: 44 // 45 // v1 = bitcast v0 46 // v2 = bitcast v1 47 // = v2 48 // => 49 // v1 = bitcast v0 50 // = v0 51 // 52 // - Optimize Loads: 53 // 54 // Loads that can be folded into a later instruction. A load is foldable 55 // if it loads to virtual registers and the virtual register defined has 56 // a single use. 57 //===----------------------------------------------------------------------===// 58 59 #define DEBUG_TYPE "peephole-opt" 60 #include "llvm/CodeGen/Passes.h" 61 #include "llvm/ADT/DenseMap.h" 62 #include "llvm/ADT/SmallPtrSet.h" 63 #include "llvm/ADT/SmallSet.h" 64 #include "llvm/ADT/Statistic.h" 65 #include "llvm/CodeGen/MachineDominators.h" 66 #include "llvm/CodeGen/MachineInstrBuilder.h" 67 #include "llvm/CodeGen/MachineRegisterInfo.h" 68 #include "llvm/Support/CommandLine.h" 69 #include "llvm/Target/TargetInstrInfo.h" 70 #include "llvm/Target/TargetRegisterInfo.h" 71 using namespace llvm; 72 73 // Optimize Extensions 74 static cl::opt<bool> 75 Aggressive("aggressive-ext-opt", cl::Hidden, 76 cl::desc("Aggressive extension optimization")); 77 78 static cl::opt<bool> 79 DisablePeephole("disable-peephole", cl::Hidden, cl::init(false), 80 cl::desc("Disable the peephole optimizer")); 81 82 STATISTIC(NumReuse, "Number of extension results reused"); 83 STATISTIC(NumBitcasts, "Number of bitcasts eliminated"); 84 STATISTIC(NumCmps, "Number of compares eliminated"); 85 STATISTIC(NumImmFold, "Number of move immediate folded"); 86 STATISTIC(NumLoadFold, "Number of loads folded"); 87 STATISTIC(NumSelects, "Number of selects optimized"); 88 89 namespace { 90 class PeepholeOptimizer : public MachineFunctionPass { 91 const TargetMachine *TM; 92 const TargetInstrInfo *TII; 93 MachineRegisterInfo *MRI; 94 MachineDominatorTree *DT; // Machine dominator tree 95 96 public: 97 static char ID; // Pass identification 98 PeepholeOptimizer() : MachineFunctionPass(ID) { 99 initializePeepholeOptimizerPass(*PassRegistry::getPassRegistry()); 100 } 101 102 virtual bool runOnMachineFunction(MachineFunction &MF); 103 104 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 105 AU.setPreservesCFG(); 106 MachineFunctionPass::getAnalysisUsage(AU); 107 if (Aggressive) { 108 AU.addRequired<MachineDominatorTree>(); 109 AU.addPreserved<MachineDominatorTree>(); 110 } 111 } 112 113 private: 114 bool optimizeBitcastInstr(MachineInstr *MI, MachineBasicBlock *MBB); 115 bool optimizeCmpInstr(MachineInstr *MI, MachineBasicBlock *MBB); 116 bool optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB, 117 SmallPtrSet<MachineInstr*, 8> &LocalMIs); 118 bool optimizeSelect(MachineInstr *MI); 119 bool isMoveImmediate(MachineInstr *MI, 120 SmallSet<unsigned, 4> &ImmDefRegs, 121 DenseMap<unsigned, MachineInstr*> &ImmDefMIs); 122 bool foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB, 123 SmallSet<unsigned, 4> &ImmDefRegs, 124 DenseMap<unsigned, MachineInstr*> &ImmDefMIs); 125 bool isLoadFoldable(MachineInstr *MI, unsigned &FoldAsLoadDefReg); 126 }; 127 } 128 129 char PeepholeOptimizer::ID = 0; 130 char &llvm::PeepholeOptimizerID = PeepholeOptimizer::ID; 131 INITIALIZE_PASS_BEGIN(PeepholeOptimizer, "peephole-opts", 132 "Peephole Optimizations", false, false) 133 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 134 INITIALIZE_PASS_END(PeepholeOptimizer, "peephole-opts", 135 "Peephole Optimizations", false, false) 136 137 /// optimizeExtInstr - If instruction is a copy-like instruction, i.e. it reads 138 /// a single register and writes a single register and it does not modify the 139 /// source, and if the source value is preserved as a sub-register of the 140 /// result, then replace all reachable uses of the source with the subreg of the 141 /// result. 142 /// 143 /// Do not generate an EXTRACT that is used only in a debug use, as this changes 144 /// the code. Since this code does not currently share EXTRACTs, just ignore all 145 /// debug uses. 146 bool PeepholeOptimizer:: 147 optimizeExtInstr(MachineInstr *MI, MachineBasicBlock *MBB, 148 SmallPtrSet<MachineInstr*, 8> &LocalMIs) { 149 unsigned SrcReg, DstReg, SubIdx; 150 if (!TII->isCoalescableExtInstr(*MI, SrcReg, DstReg, SubIdx)) 151 return false; 152 153 if (TargetRegisterInfo::isPhysicalRegister(DstReg) || 154 TargetRegisterInfo::isPhysicalRegister(SrcReg)) 155 return false; 156 157 if (MRI->hasOneNonDBGUse(SrcReg)) 158 // No other uses. 159 return false; 160 161 // Ensure DstReg can get a register class that actually supports 162 // sub-registers. Don't change the class until we commit. 163 const TargetRegisterClass *DstRC = MRI->getRegClass(DstReg); 164 DstRC = TM->getRegisterInfo()->getSubClassWithSubReg(DstRC, SubIdx); 165 if (!DstRC) 166 return false; 167 168 // The ext instr may be operating on a sub-register of SrcReg as well. 169 // PPC::EXTSW is a 32 -> 64-bit sign extension, but it reads a 64-bit 170 // register. 171 // If UseSrcSubIdx is Set, SubIdx also applies to SrcReg, and only uses of 172 // SrcReg:SubIdx should be replaced. 173 bool UseSrcSubIdx = TM->getRegisterInfo()-> 174 getSubClassWithSubReg(MRI->getRegClass(SrcReg), SubIdx) != 0; 175 176 // The source has other uses. See if we can replace the other uses with use of 177 // the result of the extension. 178 SmallPtrSet<MachineBasicBlock*, 4> ReachedBBs; 179 for (MachineRegisterInfo::use_nodbg_iterator 180 UI = MRI->use_nodbg_begin(DstReg), UE = MRI->use_nodbg_end(); 181 UI != UE; ++UI) 182 ReachedBBs.insert(UI->getParent()); 183 184 // Uses that are in the same BB of uses of the result of the instruction. 185 SmallVector<MachineOperand*, 8> Uses; 186 187 // Uses that the result of the instruction can reach. 188 SmallVector<MachineOperand*, 8> ExtendedUses; 189 190 bool ExtendLife = true; 191 for (MachineRegisterInfo::use_nodbg_iterator 192 UI = MRI->use_nodbg_begin(SrcReg), UE = MRI->use_nodbg_end(); 193 UI != UE; ++UI) { 194 MachineOperand &UseMO = UI.getOperand(); 195 MachineInstr *UseMI = &*UI; 196 if (UseMI == MI) 197 continue; 198 199 if (UseMI->isPHI()) { 200 ExtendLife = false; 201 continue; 202 } 203 204 // Only accept uses of SrcReg:SubIdx. 205 if (UseSrcSubIdx && UseMO.getSubReg() != SubIdx) 206 continue; 207 208 // It's an error to translate this: 209 // 210 // %reg1025 = <sext> %reg1024 211 // ... 212 // %reg1026 = SUBREG_TO_REG 0, %reg1024, 4 213 // 214 // into this: 215 // 216 // %reg1025 = <sext> %reg1024 217 // ... 218 // %reg1027 = COPY %reg1025:4 219 // %reg1026 = SUBREG_TO_REG 0, %reg1027, 4 220 // 221 // The problem here is that SUBREG_TO_REG is there to assert that an 222 // implicit zext occurs. It doesn't insert a zext instruction. If we allow 223 // the COPY here, it will give us the value after the <sext>, not the 224 // original value of %reg1024 before <sext>. 225 if (UseMI->getOpcode() == TargetOpcode::SUBREG_TO_REG) 226 continue; 227 228 MachineBasicBlock *UseMBB = UseMI->getParent(); 229 if (UseMBB == MBB) { 230 // Local uses that come after the extension. 231 if (!LocalMIs.count(UseMI)) 232 Uses.push_back(&UseMO); 233 } else if (ReachedBBs.count(UseMBB)) { 234 // Non-local uses where the result of the extension is used. Always 235 // replace these unless it's a PHI. 236 Uses.push_back(&UseMO); 237 } else if (Aggressive && DT->dominates(MBB, UseMBB)) { 238 // We may want to extend the live range of the extension result in order 239 // to replace these uses. 240 ExtendedUses.push_back(&UseMO); 241 } else { 242 // Both will be live out of the def MBB anyway. Don't extend live range of 243 // the extension result. 244 ExtendLife = false; 245 break; 246 } 247 } 248 249 if (ExtendLife && !ExtendedUses.empty()) 250 // Extend the liveness of the extension result. 251 std::copy(ExtendedUses.begin(), ExtendedUses.end(), 252 std::back_inserter(Uses)); 253 254 // Now replace all uses. 255 bool Changed = false; 256 if (!Uses.empty()) { 257 SmallPtrSet<MachineBasicBlock*, 4> PHIBBs; 258 259 // Look for PHI uses of the extended result, we don't want to extend the 260 // liveness of a PHI input. It breaks all kinds of assumptions down 261 // stream. A PHI use is expected to be the kill of its source values. 262 for (MachineRegisterInfo::use_nodbg_iterator 263 UI = MRI->use_nodbg_begin(DstReg), UE = MRI->use_nodbg_end(); 264 UI != UE; ++UI) 265 if (UI->isPHI()) 266 PHIBBs.insert(UI->getParent()); 267 268 const TargetRegisterClass *RC = MRI->getRegClass(SrcReg); 269 for (unsigned i = 0, e = Uses.size(); i != e; ++i) { 270 MachineOperand *UseMO = Uses[i]; 271 MachineInstr *UseMI = UseMO->getParent(); 272 MachineBasicBlock *UseMBB = UseMI->getParent(); 273 if (PHIBBs.count(UseMBB)) 274 continue; 275 276 // About to add uses of DstReg, clear DstReg's kill flags. 277 if (!Changed) { 278 MRI->clearKillFlags(DstReg); 279 MRI->constrainRegClass(DstReg, DstRC); 280 } 281 282 unsigned NewVR = MRI->createVirtualRegister(RC); 283 MachineInstr *Copy = BuildMI(*UseMBB, UseMI, UseMI->getDebugLoc(), 284 TII->get(TargetOpcode::COPY), NewVR) 285 .addReg(DstReg, 0, SubIdx); 286 // SubIdx applies to both SrcReg and DstReg when UseSrcSubIdx is set. 287 if (UseSrcSubIdx) { 288 Copy->getOperand(0).setSubReg(SubIdx); 289 Copy->getOperand(0).setIsUndef(); 290 } 291 UseMO->setReg(NewVR); 292 ++NumReuse; 293 Changed = true; 294 } 295 } 296 297 return Changed; 298 } 299 300 /// optimizeBitcastInstr - If the instruction is a bitcast instruction A that 301 /// cannot be optimized away during isel (e.g. ARM::VMOVSR, which bitcast 302 /// a value cross register classes), and the source is defined by another 303 /// bitcast instruction B. And if the register class of source of B matches 304 /// the register class of instruction A, then it is legal to replace all uses 305 /// of the def of A with source of B. e.g. 306 /// %vreg0<def> = VMOVSR %vreg1 307 /// %vreg3<def> = VMOVRS %vreg0 308 /// Replace all uses of vreg3 with vreg1. 309 310 bool PeepholeOptimizer::optimizeBitcastInstr(MachineInstr *MI, 311 MachineBasicBlock *MBB) { 312 unsigned NumDefs = MI->getDesc().getNumDefs(); 313 unsigned NumSrcs = MI->getDesc().getNumOperands() - NumDefs; 314 if (NumDefs != 1) 315 return false; 316 317 unsigned Def = 0; 318 unsigned Src = 0; 319 for (unsigned i = 0, e = NumDefs + NumSrcs; i != e; ++i) { 320 const MachineOperand &MO = MI->getOperand(i); 321 if (!MO.isReg()) 322 continue; 323 unsigned Reg = MO.getReg(); 324 if (!Reg) 325 continue; 326 if (MO.isDef()) 327 Def = Reg; 328 else if (Src) 329 // Multiple sources? 330 return false; 331 else 332 Src = Reg; 333 } 334 335 assert(Def && Src && "Malformed bitcast instruction!"); 336 337 MachineInstr *DefMI = MRI->getVRegDef(Src); 338 if (!DefMI || !DefMI->isBitcast()) 339 return false; 340 341 unsigned SrcSrc = 0; 342 NumDefs = DefMI->getDesc().getNumDefs(); 343 NumSrcs = DefMI->getDesc().getNumOperands() - NumDefs; 344 if (NumDefs != 1) 345 return false; 346 for (unsigned i = 0, e = NumDefs + NumSrcs; i != e; ++i) { 347 const MachineOperand &MO = DefMI->getOperand(i); 348 if (!MO.isReg() || MO.isDef()) 349 continue; 350 unsigned Reg = MO.getReg(); 351 if (!Reg) 352 continue; 353 if (!MO.isDef()) { 354 if (SrcSrc) 355 // Multiple sources? 356 return false; 357 else 358 SrcSrc = Reg; 359 } 360 } 361 362 if (MRI->getRegClass(SrcSrc) != MRI->getRegClass(Def)) 363 return false; 364 365 MRI->replaceRegWith(Def, SrcSrc); 366 MRI->clearKillFlags(SrcSrc); 367 MI->eraseFromParent(); 368 ++NumBitcasts; 369 return true; 370 } 371 372 /// optimizeCmpInstr - If the instruction is a compare and the previous 373 /// instruction it's comparing against all ready sets (or could be modified to 374 /// set) the same flag as the compare, then we can remove the comparison and use 375 /// the flag from the previous instruction. 376 bool PeepholeOptimizer::optimizeCmpInstr(MachineInstr *MI, 377 MachineBasicBlock *MBB) { 378 // If this instruction is a comparison against zero and isn't comparing a 379 // physical register, we can try to optimize it. 380 unsigned SrcReg, SrcReg2; 381 int CmpMask, CmpValue; 382 if (!TII->analyzeCompare(MI, SrcReg, SrcReg2, CmpMask, CmpValue) || 383 TargetRegisterInfo::isPhysicalRegister(SrcReg) || 384 (SrcReg2 != 0 && TargetRegisterInfo::isPhysicalRegister(SrcReg2))) 385 return false; 386 387 // Attempt to optimize the comparison instruction. 388 if (TII->optimizeCompareInstr(MI, SrcReg, SrcReg2, CmpMask, CmpValue, MRI)) { 389 ++NumCmps; 390 return true; 391 } 392 393 return false; 394 } 395 396 /// Optimize a select instruction. 397 bool PeepholeOptimizer::optimizeSelect(MachineInstr *MI) { 398 unsigned TrueOp = 0; 399 unsigned FalseOp = 0; 400 bool Optimizable = false; 401 SmallVector<MachineOperand, 4> Cond; 402 if (TII->analyzeSelect(MI, Cond, TrueOp, FalseOp, Optimizable)) 403 return false; 404 if (!Optimizable) 405 return false; 406 if (!TII->optimizeSelect(MI)) 407 return false; 408 MI->eraseFromParent(); 409 ++NumSelects; 410 return true; 411 } 412 413 /// isLoadFoldable - Check whether MI is a candidate for folding into a later 414 /// instruction. We only fold loads to virtual registers and the virtual 415 /// register defined has a single use. 416 bool PeepholeOptimizer::isLoadFoldable(MachineInstr *MI, 417 unsigned &FoldAsLoadDefReg) { 418 if (!MI->canFoldAsLoad() || !MI->mayLoad()) 419 return false; 420 const MCInstrDesc &MCID = MI->getDesc(); 421 if (MCID.getNumDefs() != 1) 422 return false; 423 424 unsigned Reg = MI->getOperand(0).getReg(); 425 // To reduce compilation time, we check MRI->hasOneUse when inserting 426 // loads. It should be checked when processing uses of the load, since 427 // uses can be removed during peephole. 428 if (!MI->getOperand(0).getSubReg() && 429 TargetRegisterInfo::isVirtualRegister(Reg) && 430 MRI->hasOneUse(Reg)) { 431 FoldAsLoadDefReg = Reg; 432 return true; 433 } 434 return false; 435 } 436 437 bool PeepholeOptimizer::isMoveImmediate(MachineInstr *MI, 438 SmallSet<unsigned, 4> &ImmDefRegs, 439 DenseMap<unsigned, MachineInstr*> &ImmDefMIs) { 440 const MCInstrDesc &MCID = MI->getDesc(); 441 if (!MI->isMoveImmediate()) 442 return false; 443 if (MCID.getNumDefs() != 1) 444 return false; 445 unsigned Reg = MI->getOperand(0).getReg(); 446 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 447 ImmDefMIs.insert(std::make_pair(Reg, MI)); 448 ImmDefRegs.insert(Reg); 449 return true; 450 } 451 452 return false; 453 } 454 455 /// foldImmediate - Try folding register operands that are defined by move 456 /// immediate instructions, i.e. a trivial constant folding optimization, if 457 /// and only if the def and use are in the same BB. 458 bool PeepholeOptimizer::foldImmediate(MachineInstr *MI, MachineBasicBlock *MBB, 459 SmallSet<unsigned, 4> &ImmDefRegs, 460 DenseMap<unsigned, MachineInstr*> &ImmDefMIs) { 461 for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) { 462 MachineOperand &MO = MI->getOperand(i); 463 if (!MO.isReg() || MO.isDef()) 464 continue; 465 unsigned Reg = MO.getReg(); 466 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 467 continue; 468 if (ImmDefRegs.count(Reg) == 0) 469 continue; 470 DenseMap<unsigned, MachineInstr*>::iterator II = ImmDefMIs.find(Reg); 471 assert(II != ImmDefMIs.end()); 472 if (TII->FoldImmediate(MI, II->second, Reg, MRI)) { 473 ++NumImmFold; 474 return true; 475 } 476 } 477 return false; 478 } 479 480 bool PeepholeOptimizer::runOnMachineFunction(MachineFunction &MF) { 481 if (DisablePeephole) 482 return false; 483 484 TM = &MF.getTarget(); 485 TII = TM->getInstrInfo(); 486 MRI = &MF.getRegInfo(); 487 DT = Aggressive ? &getAnalysis<MachineDominatorTree>() : 0; 488 489 bool Changed = false; 490 491 SmallPtrSet<MachineInstr*, 8> LocalMIs; 492 SmallSet<unsigned, 4> ImmDefRegs; 493 DenseMap<unsigned, MachineInstr*> ImmDefMIs; 494 unsigned FoldAsLoadDefReg; 495 for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) { 496 MachineBasicBlock *MBB = &*I; 497 498 bool SeenMoveImm = false; 499 LocalMIs.clear(); 500 ImmDefRegs.clear(); 501 ImmDefMIs.clear(); 502 FoldAsLoadDefReg = 0; 503 504 for (MachineBasicBlock::iterator 505 MII = I->begin(), MIE = I->end(); MII != MIE; ) { 506 MachineInstr *MI = &*MII; 507 // We may be erasing MI below, increment MII now. 508 ++MII; 509 LocalMIs.insert(MI); 510 511 // If there exists an instruction which belongs to the following 512 // categories, we will discard the load candidate. 513 if (MI->isLabel() || MI->isPHI() || MI->isImplicitDef() || 514 MI->isKill() || MI->isInlineAsm() || MI->isDebugValue() || 515 MI->hasUnmodeledSideEffects()) { 516 FoldAsLoadDefReg = 0; 517 continue; 518 } 519 if (MI->mayStore() || MI->isCall()) 520 FoldAsLoadDefReg = 0; 521 522 if ((MI->isBitcast() && optimizeBitcastInstr(MI, MBB)) || 523 (MI->isCompare() && optimizeCmpInstr(MI, MBB)) || 524 (MI->isSelect() && optimizeSelect(MI))) { 525 // MI is deleted. 526 LocalMIs.erase(MI); 527 Changed = true; 528 continue; 529 } 530 531 if (isMoveImmediate(MI, ImmDefRegs, ImmDefMIs)) { 532 SeenMoveImm = true; 533 } else { 534 Changed |= optimizeExtInstr(MI, MBB, LocalMIs); 535 // optimizeExtInstr might have created new instructions after MI 536 // and before the already incremented MII. Adjust MII so that the 537 // next iteration sees the new instructions. 538 MII = MI; 539 ++MII; 540 if (SeenMoveImm) 541 Changed |= foldImmediate(MI, MBB, ImmDefRegs, ImmDefMIs); 542 } 543 544 // Check whether MI is a load candidate for folding into a later 545 // instruction. If MI is not a candidate, check whether we can fold an 546 // earlier load into MI. 547 if (!isLoadFoldable(MI, FoldAsLoadDefReg) && FoldAsLoadDefReg) { 548 // We need to fold load after optimizeCmpInstr, since optimizeCmpInstr 549 // can enable folding by converting SUB to CMP. 550 MachineInstr *DefMI = 0; 551 MachineInstr *FoldMI = TII->optimizeLoadInstr(MI, MRI, 552 FoldAsLoadDefReg, DefMI); 553 if (FoldMI) { 554 // Update LocalMIs since we replaced MI with FoldMI and deleted DefMI. 555 LocalMIs.erase(MI); 556 LocalMIs.erase(DefMI); 557 LocalMIs.insert(FoldMI); 558 MI->eraseFromParent(); 559 DefMI->eraseFromParent(); 560 ++NumLoadFold; 561 562 // MI is replaced with FoldMI. 563 Changed = true; 564 continue; 565 } 566 } 567 } 568 } 569 570 return Changed; 571 } 572