1 //===-- PPCInstrInfo.cpp - PowerPC Instruction Information ----------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the PowerPC implementation of the TargetInstrInfo class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "PPCInstrInfo.h" 15 #include "MCTargetDesc/PPCPredicates.h" 16 #include "PPC.h" 17 #include "PPCHazardRecognizers.h" 18 #include "PPCInstrBuilder.h" 19 #include "PPCMachineFunctionInfo.h" 20 #include "PPCTargetMachine.h" 21 #include "llvm/ADT/Statistic.h" 22 #include "llvm/ADT/STLExtras.h" 23 #include "llvm/CodeGen/MachineFrameInfo.h" 24 #include "llvm/CodeGen/MachineFunctionPass.h" 25 #include "llvm/CodeGen/MachineInstrBuilder.h" 26 #include "llvm/CodeGen/MachineMemOperand.h" 27 #include "llvm/CodeGen/MachineRegisterInfo.h" 28 #include "llvm/CodeGen/PseudoSourceValue.h" 29 #include "llvm/MC/MCAsmInfo.h" 30 #include "llvm/Support/CommandLine.h" 31 #include "llvm/Support/ErrorHandling.h" 32 #include "llvm/Support/TargetRegistry.h" 33 #include "llvm/Support/raw_ostream.h" 34 35 #define GET_INSTRMAP_INFO 36 #define GET_INSTRINFO_CTOR 37 #include "PPCGenInstrInfo.inc" 38 39 using namespace llvm; 40 41 static cl:: 42 opt<bool> DisableCTRLoopAnal("disable-ppc-ctrloop-analysis", cl::Hidden, 43 cl::desc("Disable analysis for CTR loops")); 44 45 static cl::opt<bool> DisableCmpOpt("disable-ppc-cmp-opt", 46 cl::desc("Disable compare instruction optimization"), cl::Hidden); 47 48 PPCInstrInfo::PPCInstrInfo(PPCTargetMachine &tm) 49 : PPCGenInstrInfo(PPC::ADJCALLSTACKDOWN, PPC::ADJCALLSTACKUP), 50 TM(tm), RI(*TM.getSubtargetImpl(), *this) {} 51 52 /// CreateTargetHazardRecognizer - Return the hazard recognizer to use for 53 /// this target when scheduling the DAG. 54 ScheduleHazardRecognizer *PPCInstrInfo::CreateTargetHazardRecognizer( 55 const TargetMachine *TM, 56 const ScheduleDAG *DAG) const { 57 unsigned Directive = TM->getSubtarget<PPCSubtarget>().getDarwinDirective(); 58 if (Directive == PPC::DIR_440 || Directive == PPC::DIR_A2 || 59 Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500) { 60 const InstrItineraryData *II = TM->getInstrItineraryData(); 61 return new PPCScoreboardHazardRecognizer(II, DAG); 62 } 63 64 return TargetInstrInfo::CreateTargetHazardRecognizer(TM, DAG); 65 } 66 67 /// CreateTargetPostRAHazardRecognizer - Return the postRA hazard recognizer 68 /// to use for this target when scheduling the DAG. 69 ScheduleHazardRecognizer *PPCInstrInfo::CreateTargetPostRAHazardRecognizer( 70 const InstrItineraryData *II, 71 const ScheduleDAG *DAG) const { 72 unsigned Directive = TM.getSubtarget<PPCSubtarget>().getDarwinDirective(); 73 74 // Most subtargets use a PPC970 recognizer. 75 if (Directive != PPC::DIR_440 && Directive != PPC::DIR_A2 && 76 Directive != PPC::DIR_E500mc && Directive != PPC::DIR_E5500) { 77 const TargetInstrInfo *TII = TM.getInstrInfo(); 78 assert(TII && "No InstrInfo?"); 79 80 return new PPCHazardRecognizer970(*TII); 81 } 82 83 return new PPCScoreboardHazardRecognizer(II, DAG); 84 } 85 86 // Detect 32 -> 64-bit extensions where we may reuse the low sub-register. 87 bool PPCInstrInfo::isCoalescableExtInstr(const MachineInstr &MI, 88 unsigned &SrcReg, unsigned &DstReg, 89 unsigned &SubIdx) const { 90 switch (MI.getOpcode()) { 91 default: return false; 92 case PPC::EXTSW: 93 case PPC::EXTSW_32_64: 94 SrcReg = MI.getOperand(1).getReg(); 95 DstReg = MI.getOperand(0).getReg(); 96 SubIdx = PPC::sub_32; 97 return true; 98 } 99 } 100 101 unsigned PPCInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, 102 int &FrameIndex) const { 103 // Note: This list must be kept consistent with LoadRegFromStackSlot. 104 switch (MI->getOpcode()) { 105 default: break; 106 case PPC::LD: 107 case PPC::LWZ: 108 case PPC::LFS: 109 case PPC::LFD: 110 case PPC::RESTORE_CR: 111 case PPC::LVX: 112 case PPC::RESTORE_VRSAVE: 113 // Check for the operands added by addFrameReference (the immediate is the 114 // offset which defaults to 0). 115 if (MI->getOperand(1).isImm() && !MI->getOperand(1).getImm() && 116 MI->getOperand(2).isFI()) { 117 FrameIndex = MI->getOperand(2).getIndex(); 118 return MI->getOperand(0).getReg(); 119 } 120 break; 121 } 122 return 0; 123 } 124 125 unsigned PPCInstrInfo::isStoreToStackSlot(const MachineInstr *MI, 126 int &FrameIndex) const { 127 // Note: This list must be kept consistent with StoreRegToStackSlot. 128 switch (MI->getOpcode()) { 129 default: break; 130 case PPC::STD: 131 case PPC::STW: 132 case PPC::STFS: 133 case PPC::STFD: 134 case PPC::SPILL_CR: 135 case PPC::STVX: 136 case PPC::SPILL_VRSAVE: 137 // Check for the operands added by addFrameReference (the immediate is the 138 // offset which defaults to 0). 139 if (MI->getOperand(1).isImm() && !MI->getOperand(1).getImm() && 140 MI->getOperand(2).isFI()) { 141 FrameIndex = MI->getOperand(2).getIndex(); 142 return MI->getOperand(0).getReg(); 143 } 144 break; 145 } 146 return 0; 147 } 148 149 // commuteInstruction - We can commute rlwimi instructions, but only if the 150 // rotate amt is zero. We also have to munge the immediates a bit. 151 MachineInstr * 152 PPCInstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const { 153 MachineFunction &MF = *MI->getParent()->getParent(); 154 155 // Normal instructions can be commuted the obvious way. 156 if (MI->getOpcode() != PPC::RLWIMI && 157 MI->getOpcode() != PPC::RLWIMIo) 158 return TargetInstrInfo::commuteInstruction(MI, NewMI); 159 160 // Cannot commute if it has a non-zero rotate count. 161 if (MI->getOperand(3).getImm() != 0) 162 return 0; 163 164 // If we have a zero rotate count, we have: 165 // M = mask(MB,ME) 166 // Op0 = (Op1 & ~M) | (Op2 & M) 167 // Change this to: 168 // M = mask((ME+1)&31, (MB-1)&31) 169 // Op0 = (Op2 & ~M) | (Op1 & M) 170 171 // Swap op1/op2 172 unsigned Reg0 = MI->getOperand(0).getReg(); 173 unsigned Reg1 = MI->getOperand(1).getReg(); 174 unsigned Reg2 = MI->getOperand(2).getReg(); 175 bool Reg1IsKill = MI->getOperand(1).isKill(); 176 bool Reg2IsKill = MI->getOperand(2).isKill(); 177 bool ChangeReg0 = false; 178 // If machine instrs are no longer in two-address forms, update 179 // destination register as well. 180 if (Reg0 == Reg1) { 181 // Must be two address instruction! 182 assert(MI->getDesc().getOperandConstraint(0, MCOI::TIED_TO) && 183 "Expecting a two-address instruction!"); 184 Reg2IsKill = false; 185 ChangeReg0 = true; 186 } 187 188 // Masks. 189 unsigned MB = MI->getOperand(4).getImm(); 190 unsigned ME = MI->getOperand(5).getImm(); 191 192 if (NewMI) { 193 // Create a new instruction. 194 unsigned Reg0 = ChangeReg0 ? Reg2 : MI->getOperand(0).getReg(); 195 bool Reg0IsDead = MI->getOperand(0).isDead(); 196 return BuildMI(MF, MI->getDebugLoc(), MI->getDesc()) 197 .addReg(Reg0, RegState::Define | getDeadRegState(Reg0IsDead)) 198 .addReg(Reg2, getKillRegState(Reg2IsKill)) 199 .addReg(Reg1, getKillRegState(Reg1IsKill)) 200 .addImm((ME+1) & 31) 201 .addImm((MB-1) & 31); 202 } 203 204 if (ChangeReg0) 205 MI->getOperand(0).setReg(Reg2); 206 MI->getOperand(2).setReg(Reg1); 207 MI->getOperand(1).setReg(Reg2); 208 MI->getOperand(2).setIsKill(Reg1IsKill); 209 MI->getOperand(1).setIsKill(Reg2IsKill); 210 211 // Swap the mask around. 212 MI->getOperand(4).setImm((ME+1) & 31); 213 MI->getOperand(5).setImm((MB-1) & 31); 214 return MI; 215 } 216 217 void PPCInstrInfo::insertNoop(MachineBasicBlock &MBB, 218 MachineBasicBlock::iterator MI) const { 219 DebugLoc DL; 220 BuildMI(MBB, MI, DL, get(PPC::NOP)); 221 } 222 223 224 // Branch analysis. 225 // Note: If the condition register is set to CTR or CTR8 then this is a 226 // BDNZ (imm == 1) or BDZ (imm == 0) branch. 227 bool PPCInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,MachineBasicBlock *&TBB, 228 MachineBasicBlock *&FBB, 229 SmallVectorImpl<MachineOperand> &Cond, 230 bool AllowModify) const { 231 bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); 232 233 // If the block has no terminators, it just falls into the block after it. 234 MachineBasicBlock::iterator I = MBB.end(); 235 if (I == MBB.begin()) 236 return false; 237 --I; 238 while (I->isDebugValue()) { 239 if (I == MBB.begin()) 240 return false; 241 --I; 242 } 243 if (!isUnpredicatedTerminator(I)) 244 return false; 245 246 // Get the last instruction in the block. 247 MachineInstr *LastInst = I; 248 249 // If there is only one terminator instruction, process it. 250 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) { 251 if (LastInst->getOpcode() == PPC::B) { 252 if (!LastInst->getOperand(0).isMBB()) 253 return true; 254 TBB = LastInst->getOperand(0).getMBB(); 255 return false; 256 } else if (LastInst->getOpcode() == PPC::BCC) { 257 if (!LastInst->getOperand(2).isMBB()) 258 return true; 259 // Block ends with fall-through condbranch. 260 TBB = LastInst->getOperand(2).getMBB(); 261 Cond.push_back(LastInst->getOperand(0)); 262 Cond.push_back(LastInst->getOperand(1)); 263 return false; 264 } else if (LastInst->getOpcode() == PPC::BDNZ8 || 265 LastInst->getOpcode() == PPC::BDNZ) { 266 if (!LastInst->getOperand(0).isMBB()) 267 return true; 268 if (DisableCTRLoopAnal) 269 return true; 270 TBB = LastInst->getOperand(0).getMBB(); 271 Cond.push_back(MachineOperand::CreateImm(1)); 272 Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR8 : PPC::CTR, 273 true)); 274 return false; 275 } else if (LastInst->getOpcode() == PPC::BDZ8 || 276 LastInst->getOpcode() == PPC::BDZ) { 277 if (!LastInst->getOperand(0).isMBB()) 278 return true; 279 if (DisableCTRLoopAnal) 280 return true; 281 TBB = LastInst->getOperand(0).getMBB(); 282 Cond.push_back(MachineOperand::CreateImm(0)); 283 Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR8 : PPC::CTR, 284 true)); 285 return false; 286 } 287 288 // Otherwise, don't know what this is. 289 return true; 290 } 291 292 // Get the instruction before it if it's a terminator. 293 MachineInstr *SecondLastInst = I; 294 295 // If there are three terminators, we don't know what sort of block this is. 296 if (SecondLastInst && I != MBB.begin() && 297 isUnpredicatedTerminator(--I)) 298 return true; 299 300 // If the block ends with PPC::B and PPC:BCC, handle it. 301 if (SecondLastInst->getOpcode() == PPC::BCC && 302 LastInst->getOpcode() == PPC::B) { 303 if (!SecondLastInst->getOperand(2).isMBB() || 304 !LastInst->getOperand(0).isMBB()) 305 return true; 306 TBB = SecondLastInst->getOperand(2).getMBB(); 307 Cond.push_back(SecondLastInst->getOperand(0)); 308 Cond.push_back(SecondLastInst->getOperand(1)); 309 FBB = LastInst->getOperand(0).getMBB(); 310 return false; 311 } else if ((SecondLastInst->getOpcode() == PPC::BDNZ8 || 312 SecondLastInst->getOpcode() == PPC::BDNZ) && 313 LastInst->getOpcode() == PPC::B) { 314 if (!SecondLastInst->getOperand(0).isMBB() || 315 !LastInst->getOperand(0).isMBB()) 316 return true; 317 if (DisableCTRLoopAnal) 318 return true; 319 TBB = SecondLastInst->getOperand(0).getMBB(); 320 Cond.push_back(MachineOperand::CreateImm(1)); 321 Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR8 : PPC::CTR, 322 true)); 323 FBB = LastInst->getOperand(0).getMBB(); 324 return false; 325 } else if ((SecondLastInst->getOpcode() == PPC::BDZ8 || 326 SecondLastInst->getOpcode() == PPC::BDZ) && 327 LastInst->getOpcode() == PPC::B) { 328 if (!SecondLastInst->getOperand(0).isMBB() || 329 !LastInst->getOperand(0).isMBB()) 330 return true; 331 if (DisableCTRLoopAnal) 332 return true; 333 TBB = SecondLastInst->getOperand(0).getMBB(); 334 Cond.push_back(MachineOperand::CreateImm(0)); 335 Cond.push_back(MachineOperand::CreateReg(isPPC64 ? PPC::CTR8 : PPC::CTR, 336 true)); 337 FBB = LastInst->getOperand(0).getMBB(); 338 return false; 339 } 340 341 // If the block ends with two PPC:Bs, handle it. The second one is not 342 // executed, so remove it. 343 if (SecondLastInst->getOpcode() == PPC::B && 344 LastInst->getOpcode() == PPC::B) { 345 if (!SecondLastInst->getOperand(0).isMBB()) 346 return true; 347 TBB = SecondLastInst->getOperand(0).getMBB(); 348 I = LastInst; 349 if (AllowModify) 350 I->eraseFromParent(); 351 return false; 352 } 353 354 // Otherwise, can't handle this. 355 return true; 356 } 357 358 unsigned PPCInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { 359 MachineBasicBlock::iterator I = MBB.end(); 360 if (I == MBB.begin()) return 0; 361 --I; 362 while (I->isDebugValue()) { 363 if (I == MBB.begin()) 364 return 0; 365 --I; 366 } 367 if (I->getOpcode() != PPC::B && I->getOpcode() != PPC::BCC && 368 I->getOpcode() != PPC::BDNZ8 && I->getOpcode() != PPC::BDNZ && 369 I->getOpcode() != PPC::BDZ8 && I->getOpcode() != PPC::BDZ) 370 return 0; 371 372 // Remove the branch. 373 I->eraseFromParent(); 374 375 I = MBB.end(); 376 377 if (I == MBB.begin()) return 1; 378 --I; 379 if (I->getOpcode() != PPC::BCC && 380 I->getOpcode() != PPC::BDNZ8 && I->getOpcode() != PPC::BDNZ && 381 I->getOpcode() != PPC::BDZ8 && I->getOpcode() != PPC::BDZ) 382 return 1; 383 384 // Remove the branch. 385 I->eraseFromParent(); 386 return 2; 387 } 388 389 unsigned 390 PPCInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 391 MachineBasicBlock *FBB, 392 const SmallVectorImpl<MachineOperand> &Cond, 393 DebugLoc DL) const { 394 // Shouldn't be a fall through. 395 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 396 assert((Cond.size() == 2 || Cond.size() == 0) && 397 "PPC branch conditions have two components!"); 398 399 bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); 400 401 // One-way branch. 402 if (FBB == 0) { 403 if (Cond.empty()) // Unconditional branch 404 BuildMI(&MBB, DL, get(PPC::B)).addMBB(TBB); 405 else if (Cond[1].getReg() == PPC::CTR || Cond[1].getReg() == PPC::CTR8) 406 BuildMI(&MBB, DL, get(Cond[0].getImm() ? 407 (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) : 408 (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).addMBB(TBB); 409 else // Conditional branch 410 BuildMI(&MBB, DL, get(PPC::BCC)) 411 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB); 412 return 1; 413 } 414 415 // Two-way Conditional Branch. 416 if (Cond[1].getReg() == PPC::CTR || Cond[1].getReg() == PPC::CTR8) 417 BuildMI(&MBB, DL, get(Cond[0].getImm() ? 418 (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) : 419 (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).addMBB(TBB); 420 else 421 BuildMI(&MBB, DL, get(PPC::BCC)) 422 .addImm(Cond[0].getImm()).addReg(Cond[1].getReg()).addMBB(TBB); 423 BuildMI(&MBB, DL, get(PPC::B)).addMBB(FBB); 424 return 2; 425 } 426 427 // Select analysis. 428 bool PPCInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, 429 const SmallVectorImpl<MachineOperand> &Cond, 430 unsigned TrueReg, unsigned FalseReg, 431 int &CondCycles, int &TrueCycles, int &FalseCycles) const { 432 if (!TM.getSubtargetImpl()->hasISEL()) 433 return false; 434 435 if (Cond.size() != 2) 436 return false; 437 438 // If this is really a bdnz-like condition, then it cannot be turned into a 439 // select. 440 if (Cond[1].getReg() == PPC::CTR || Cond[1].getReg() == PPC::CTR8) 441 return false; 442 443 // Check register classes. 444 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 445 const TargetRegisterClass *RC = 446 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg)); 447 if (!RC) 448 return false; 449 450 // isel is for regular integer GPRs only. 451 if (!PPC::GPRCRegClass.hasSubClassEq(RC) && 452 !PPC::G8RCRegClass.hasSubClassEq(RC)) 453 return false; 454 455 // FIXME: These numbers are for the A2, how well they work for other cores is 456 // an open question. On the A2, the isel instruction has a 2-cycle latency 457 // but single-cycle throughput. These numbers are used in combination with 458 // the MispredictPenalty setting from the active SchedMachineModel. 459 CondCycles = 1; 460 TrueCycles = 1; 461 FalseCycles = 1; 462 463 return true; 464 } 465 466 void PPCInstrInfo::insertSelect(MachineBasicBlock &MBB, 467 MachineBasicBlock::iterator MI, DebugLoc dl, 468 unsigned DestReg, 469 const SmallVectorImpl<MachineOperand> &Cond, 470 unsigned TrueReg, unsigned FalseReg) const { 471 assert(Cond.size() == 2 && 472 "PPC branch conditions have two components!"); 473 474 assert(TM.getSubtargetImpl()->hasISEL() && 475 "Cannot insert select on target without ISEL support"); 476 477 // Get the register classes. 478 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 479 const TargetRegisterClass *RC = 480 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg)); 481 assert(RC && "TrueReg and FalseReg must have overlapping register classes"); 482 assert((PPC::GPRCRegClass.hasSubClassEq(RC) || 483 PPC::G8RCRegClass.hasSubClassEq(RC)) && 484 "isel is for regular integer GPRs only"); 485 486 unsigned OpCode = 487 PPC::GPRCRegClass.hasSubClassEq(RC) ? PPC::ISEL : PPC::ISEL8; 488 unsigned SelectPred = Cond[0].getImm(); 489 490 unsigned SubIdx; 491 bool SwapOps; 492 switch (SelectPred) { 493 default: llvm_unreachable("invalid predicate for isel"); 494 case PPC::PRED_EQ: SubIdx = PPC::sub_eq; SwapOps = false; break; 495 case PPC::PRED_NE: SubIdx = PPC::sub_eq; SwapOps = true; break; 496 case PPC::PRED_LT: SubIdx = PPC::sub_lt; SwapOps = false; break; 497 case PPC::PRED_GE: SubIdx = PPC::sub_lt; SwapOps = true; break; 498 case PPC::PRED_GT: SubIdx = PPC::sub_gt; SwapOps = false; break; 499 case PPC::PRED_LE: SubIdx = PPC::sub_gt; SwapOps = true; break; 500 case PPC::PRED_UN: SubIdx = PPC::sub_un; SwapOps = false; break; 501 case PPC::PRED_NU: SubIdx = PPC::sub_un; SwapOps = true; break; 502 } 503 504 unsigned FirstReg = SwapOps ? FalseReg : TrueReg, 505 SecondReg = SwapOps ? TrueReg : FalseReg; 506 507 // The first input register of isel cannot be r0. If it is a member 508 // of a register class that can be r0, then copy it first (the 509 // register allocator should eliminate the copy). 510 if (MRI.getRegClass(FirstReg)->contains(PPC::R0) || 511 MRI.getRegClass(FirstReg)->contains(PPC::X0)) { 512 const TargetRegisterClass *FirstRC = 513 MRI.getRegClass(FirstReg)->contains(PPC::X0) ? 514 &PPC::G8RC_NOX0RegClass : &PPC::GPRC_NOR0RegClass; 515 unsigned OldFirstReg = FirstReg; 516 FirstReg = MRI.createVirtualRegister(FirstRC); 517 BuildMI(MBB, MI, dl, get(TargetOpcode::COPY), FirstReg) 518 .addReg(OldFirstReg); 519 } 520 521 BuildMI(MBB, MI, dl, get(OpCode), DestReg) 522 .addReg(FirstReg).addReg(SecondReg) 523 .addReg(Cond[1].getReg(), 0, SubIdx); 524 } 525 526 void PPCInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 527 MachineBasicBlock::iterator I, DebugLoc DL, 528 unsigned DestReg, unsigned SrcReg, 529 bool KillSrc) const { 530 unsigned Opc; 531 if (PPC::GPRCRegClass.contains(DestReg, SrcReg)) 532 Opc = PPC::OR; 533 else if (PPC::G8RCRegClass.contains(DestReg, SrcReg)) 534 Opc = PPC::OR8; 535 else if (PPC::F4RCRegClass.contains(DestReg, SrcReg)) 536 Opc = PPC::FMR; 537 else if (PPC::CRRCRegClass.contains(DestReg, SrcReg)) 538 Opc = PPC::MCRF; 539 else if (PPC::VRRCRegClass.contains(DestReg, SrcReg)) 540 Opc = PPC::VOR; 541 else if (PPC::CRBITRCRegClass.contains(DestReg, SrcReg)) 542 Opc = PPC::CROR; 543 else 544 llvm_unreachable("Impossible reg-to-reg copy"); 545 546 const MCInstrDesc &MCID = get(Opc); 547 if (MCID.getNumOperands() == 3) 548 BuildMI(MBB, I, DL, MCID, DestReg) 549 .addReg(SrcReg).addReg(SrcReg, getKillRegState(KillSrc)); 550 else 551 BuildMI(MBB, I, DL, MCID, DestReg).addReg(SrcReg, getKillRegState(KillSrc)); 552 } 553 554 // This function returns true if a CR spill is necessary and false otherwise. 555 bool 556 PPCInstrInfo::StoreRegToStackSlot(MachineFunction &MF, 557 unsigned SrcReg, bool isKill, 558 int FrameIdx, 559 const TargetRegisterClass *RC, 560 SmallVectorImpl<MachineInstr*> &NewMIs, 561 bool &NonRI, bool &SpillsVRS) const{ 562 // Note: If additional store instructions are added here, 563 // update isStoreToStackSlot. 564 565 DebugLoc DL; 566 if (PPC::GPRCRegClass.hasSubClassEq(RC)) { 567 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STW)) 568 .addReg(SrcReg, 569 getKillRegState(isKill)), 570 FrameIdx)); 571 } else if (PPC::G8RCRegClass.hasSubClassEq(RC)) { 572 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STD)) 573 .addReg(SrcReg, 574 getKillRegState(isKill)), 575 FrameIdx)); 576 } else if (PPC::F8RCRegClass.hasSubClassEq(RC)) { 577 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STFD)) 578 .addReg(SrcReg, 579 getKillRegState(isKill)), 580 FrameIdx)); 581 } else if (PPC::F4RCRegClass.hasSubClassEq(RC)) { 582 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STFS)) 583 .addReg(SrcReg, 584 getKillRegState(isKill)), 585 FrameIdx)); 586 } else if (PPC::CRRCRegClass.hasSubClassEq(RC)) { 587 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::SPILL_CR)) 588 .addReg(SrcReg, 589 getKillRegState(isKill)), 590 FrameIdx)); 591 return true; 592 } else if (PPC::CRBITRCRegClass.hasSubClassEq(RC)) { 593 // FIXME: We use CRi here because there is no mtcrf on a bit. Since the 594 // backend currently only uses CR1EQ as an individual bit, this should 595 // not cause any bug. If we need other uses of CR bits, the following 596 // code may be invalid. 597 unsigned Reg = 0; 598 if (SrcReg == PPC::CR0LT || SrcReg == PPC::CR0GT || 599 SrcReg == PPC::CR0EQ || SrcReg == PPC::CR0UN) 600 Reg = PPC::CR0; 601 else if (SrcReg == PPC::CR1LT || SrcReg == PPC::CR1GT || 602 SrcReg == PPC::CR1EQ || SrcReg == PPC::CR1UN) 603 Reg = PPC::CR1; 604 else if (SrcReg == PPC::CR2LT || SrcReg == PPC::CR2GT || 605 SrcReg == PPC::CR2EQ || SrcReg == PPC::CR2UN) 606 Reg = PPC::CR2; 607 else if (SrcReg == PPC::CR3LT || SrcReg == PPC::CR3GT || 608 SrcReg == PPC::CR3EQ || SrcReg == PPC::CR3UN) 609 Reg = PPC::CR3; 610 else if (SrcReg == PPC::CR4LT || SrcReg == PPC::CR4GT || 611 SrcReg == PPC::CR4EQ || SrcReg == PPC::CR4UN) 612 Reg = PPC::CR4; 613 else if (SrcReg == PPC::CR5LT || SrcReg == PPC::CR5GT || 614 SrcReg == PPC::CR5EQ || SrcReg == PPC::CR5UN) 615 Reg = PPC::CR5; 616 else if (SrcReg == PPC::CR6LT || SrcReg == PPC::CR6GT || 617 SrcReg == PPC::CR6EQ || SrcReg == PPC::CR6UN) 618 Reg = PPC::CR6; 619 else if (SrcReg == PPC::CR7LT || SrcReg == PPC::CR7GT || 620 SrcReg == PPC::CR7EQ || SrcReg == PPC::CR7UN) 621 Reg = PPC::CR7; 622 623 return StoreRegToStackSlot(MF, Reg, isKill, FrameIdx, 624 &PPC::CRRCRegClass, NewMIs, NonRI, SpillsVRS); 625 626 } else if (PPC::VRRCRegClass.hasSubClassEq(RC)) { 627 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::STVX)) 628 .addReg(SrcReg, 629 getKillRegState(isKill)), 630 FrameIdx)); 631 NonRI = true; 632 } else if (PPC::VRSAVERCRegClass.hasSubClassEq(RC)) { 633 assert(TM.getSubtargetImpl()->isDarwin() && 634 "VRSAVE only needs spill/restore on Darwin"); 635 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::SPILL_VRSAVE)) 636 .addReg(SrcReg, 637 getKillRegState(isKill)), 638 FrameIdx)); 639 SpillsVRS = true; 640 } else { 641 llvm_unreachable("Unknown regclass!"); 642 } 643 644 return false; 645 } 646 647 void 648 PPCInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 649 MachineBasicBlock::iterator MI, 650 unsigned SrcReg, bool isKill, int FrameIdx, 651 const TargetRegisterClass *RC, 652 const TargetRegisterInfo *TRI) const { 653 MachineFunction &MF = *MBB.getParent(); 654 SmallVector<MachineInstr*, 4> NewMIs; 655 656 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 657 FuncInfo->setHasSpills(); 658 659 bool NonRI = false, SpillsVRS = false; 660 if (StoreRegToStackSlot(MF, SrcReg, isKill, FrameIdx, RC, NewMIs, 661 NonRI, SpillsVRS)) 662 FuncInfo->setSpillsCR(); 663 664 if (SpillsVRS) 665 FuncInfo->setSpillsVRSAVE(); 666 667 if (NonRI) 668 FuncInfo->setHasNonRISpills(); 669 670 for (unsigned i = 0, e = NewMIs.size(); i != e; ++i) 671 MBB.insert(MI, NewMIs[i]); 672 673 const MachineFrameInfo &MFI = *MF.getFrameInfo(); 674 MachineMemOperand *MMO = 675 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx), 676 MachineMemOperand::MOStore, 677 MFI.getObjectSize(FrameIdx), 678 MFI.getObjectAlignment(FrameIdx)); 679 NewMIs.back()->addMemOperand(MF, MMO); 680 } 681 682 bool 683 PPCInstrInfo::LoadRegFromStackSlot(MachineFunction &MF, DebugLoc DL, 684 unsigned DestReg, int FrameIdx, 685 const TargetRegisterClass *RC, 686 SmallVectorImpl<MachineInstr*> &NewMIs, 687 bool &NonRI, bool &SpillsVRS) const{ 688 // Note: If additional load instructions are added here, 689 // update isLoadFromStackSlot. 690 691 if (PPC::GPRCRegClass.hasSubClassEq(RC)) { 692 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LWZ), 693 DestReg), FrameIdx)); 694 } else if (PPC::G8RCRegClass.hasSubClassEq(RC)) { 695 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LD), DestReg), 696 FrameIdx)); 697 } else if (PPC::F8RCRegClass.hasSubClassEq(RC)) { 698 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LFD), DestReg), 699 FrameIdx)); 700 } else if (PPC::F4RCRegClass.hasSubClassEq(RC)) { 701 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LFS), DestReg), 702 FrameIdx)); 703 } else if (PPC::CRRCRegClass.hasSubClassEq(RC)) { 704 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, 705 get(PPC::RESTORE_CR), DestReg), 706 FrameIdx)); 707 return true; 708 } else if (PPC::CRBITRCRegClass.hasSubClassEq(RC)) { 709 710 unsigned Reg = 0; 711 if (DestReg == PPC::CR0LT || DestReg == PPC::CR0GT || 712 DestReg == PPC::CR0EQ || DestReg == PPC::CR0UN) 713 Reg = PPC::CR0; 714 else if (DestReg == PPC::CR1LT || DestReg == PPC::CR1GT || 715 DestReg == PPC::CR1EQ || DestReg == PPC::CR1UN) 716 Reg = PPC::CR1; 717 else if (DestReg == PPC::CR2LT || DestReg == PPC::CR2GT || 718 DestReg == PPC::CR2EQ || DestReg == PPC::CR2UN) 719 Reg = PPC::CR2; 720 else if (DestReg == PPC::CR3LT || DestReg == PPC::CR3GT || 721 DestReg == PPC::CR3EQ || DestReg == PPC::CR3UN) 722 Reg = PPC::CR3; 723 else if (DestReg == PPC::CR4LT || DestReg == PPC::CR4GT || 724 DestReg == PPC::CR4EQ || DestReg == PPC::CR4UN) 725 Reg = PPC::CR4; 726 else if (DestReg == PPC::CR5LT || DestReg == PPC::CR5GT || 727 DestReg == PPC::CR5EQ || DestReg == PPC::CR5UN) 728 Reg = PPC::CR5; 729 else if (DestReg == PPC::CR6LT || DestReg == PPC::CR6GT || 730 DestReg == PPC::CR6EQ || DestReg == PPC::CR6UN) 731 Reg = PPC::CR6; 732 else if (DestReg == PPC::CR7LT || DestReg == PPC::CR7GT || 733 DestReg == PPC::CR7EQ || DestReg == PPC::CR7UN) 734 Reg = PPC::CR7; 735 736 return LoadRegFromStackSlot(MF, DL, Reg, FrameIdx, 737 &PPC::CRRCRegClass, NewMIs, NonRI, SpillsVRS); 738 739 } else if (PPC::VRRCRegClass.hasSubClassEq(RC)) { 740 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, get(PPC::LVX), DestReg), 741 FrameIdx)); 742 NonRI = true; 743 } else if (PPC::VRSAVERCRegClass.hasSubClassEq(RC)) { 744 assert(TM.getSubtargetImpl()->isDarwin() && 745 "VRSAVE only needs spill/restore on Darwin"); 746 NewMIs.push_back(addFrameReference(BuildMI(MF, DL, 747 get(PPC::RESTORE_VRSAVE), 748 DestReg), 749 FrameIdx)); 750 SpillsVRS = true; 751 } else { 752 llvm_unreachable("Unknown regclass!"); 753 } 754 755 return false; 756 } 757 758 void 759 PPCInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 760 MachineBasicBlock::iterator MI, 761 unsigned DestReg, int FrameIdx, 762 const TargetRegisterClass *RC, 763 const TargetRegisterInfo *TRI) const { 764 MachineFunction &MF = *MBB.getParent(); 765 SmallVector<MachineInstr*, 4> NewMIs; 766 DebugLoc DL; 767 if (MI != MBB.end()) DL = MI->getDebugLoc(); 768 769 PPCFunctionInfo *FuncInfo = MF.getInfo<PPCFunctionInfo>(); 770 FuncInfo->setHasSpills(); 771 772 bool NonRI = false, SpillsVRS = false; 773 if (LoadRegFromStackSlot(MF, DL, DestReg, FrameIdx, RC, NewMIs, 774 NonRI, SpillsVRS)) 775 FuncInfo->setSpillsCR(); 776 777 if (SpillsVRS) 778 FuncInfo->setSpillsVRSAVE(); 779 780 if (NonRI) 781 FuncInfo->setHasNonRISpills(); 782 783 for (unsigned i = 0, e = NewMIs.size(); i != e; ++i) 784 MBB.insert(MI, NewMIs[i]); 785 786 const MachineFrameInfo &MFI = *MF.getFrameInfo(); 787 MachineMemOperand *MMO = 788 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FrameIdx), 789 MachineMemOperand::MOLoad, 790 MFI.getObjectSize(FrameIdx), 791 MFI.getObjectAlignment(FrameIdx)); 792 NewMIs.back()->addMemOperand(MF, MMO); 793 } 794 795 MachineInstr* 796 PPCInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF, 797 int FrameIx, uint64_t Offset, 798 const MDNode *MDPtr, 799 DebugLoc DL) const { 800 MachineInstrBuilder MIB = BuildMI(MF, DL, get(PPC::DBG_VALUE)); 801 addFrameReference(MIB, FrameIx, 0, false).addImm(Offset).addMetadata(MDPtr); 802 return &*MIB; 803 } 804 805 bool PPCInstrInfo:: 806 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 807 assert(Cond.size() == 2 && "Invalid PPC branch opcode!"); 808 if (Cond[1].getReg() == PPC::CTR8 || Cond[1].getReg() == PPC::CTR) 809 Cond[0].setImm(Cond[0].getImm() == 0 ? 1 : 0); 810 else 811 // Leave the CR# the same, but invert the condition. 812 Cond[0].setImm(PPC::InvertPredicate((PPC::Predicate)Cond[0].getImm())); 813 return false; 814 } 815 816 bool PPCInstrInfo::FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI, 817 unsigned Reg, MachineRegisterInfo *MRI) const { 818 // For some instructions, it is legal to fold ZERO into the RA register field. 819 // A zero immediate should always be loaded with a single li. 820 unsigned DefOpc = DefMI->getOpcode(); 821 if (DefOpc != PPC::LI && DefOpc != PPC::LI8) 822 return false; 823 if (!DefMI->getOperand(1).isImm()) 824 return false; 825 if (DefMI->getOperand(1).getImm() != 0) 826 return false; 827 828 // Note that we cannot here invert the arguments of an isel in order to fold 829 // a ZERO into what is presented as the second argument. All we have here 830 // is the condition bit, and that might come from a CR-logical bit operation. 831 832 const MCInstrDesc &UseMCID = UseMI->getDesc(); 833 834 // Only fold into real machine instructions. 835 if (UseMCID.isPseudo()) 836 return false; 837 838 unsigned UseIdx; 839 for (UseIdx = 0; UseIdx < UseMI->getNumOperands(); ++UseIdx) 840 if (UseMI->getOperand(UseIdx).isReg() && 841 UseMI->getOperand(UseIdx).getReg() == Reg) 842 break; 843 844 assert(UseIdx < UseMI->getNumOperands() && "Cannot find Reg in UseMI"); 845 assert(UseIdx < UseMCID.getNumOperands() && "No operand description for Reg"); 846 847 const MCOperandInfo *UseInfo = &UseMCID.OpInfo[UseIdx]; 848 849 // We can fold the zero if this register requires a GPRC_NOR0/G8RC_NOX0 850 // register (which might also be specified as a pointer class kind). 851 if (UseInfo->isLookupPtrRegClass()) { 852 if (UseInfo->RegClass /* Kind */ != 1) 853 return false; 854 } else { 855 if (UseInfo->RegClass != PPC::GPRC_NOR0RegClassID && 856 UseInfo->RegClass != PPC::G8RC_NOX0RegClassID) 857 return false; 858 } 859 860 // Make sure this is not tied to an output register (or otherwise 861 // constrained). This is true for ST?UX registers, for example, which 862 // are tied to their output registers. 863 if (UseInfo->Constraints != 0) 864 return false; 865 866 unsigned ZeroReg; 867 if (UseInfo->isLookupPtrRegClass()) { 868 bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); 869 ZeroReg = isPPC64 ? PPC::ZERO8 : PPC::ZERO; 870 } else { 871 ZeroReg = UseInfo->RegClass == PPC::G8RC_NOX0RegClassID ? 872 PPC::ZERO8 : PPC::ZERO; 873 } 874 875 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 876 UseMI->getOperand(UseIdx).setReg(ZeroReg); 877 878 if (DeleteDef) 879 DefMI->eraseFromParent(); 880 881 return true; 882 } 883 884 static bool MBBDefinesCTR(MachineBasicBlock &MBB) { 885 for (MachineBasicBlock::iterator I = MBB.begin(), IE = MBB.end(); 886 I != IE; ++I) 887 if (I->definesRegister(PPC::CTR) || I->definesRegister(PPC::CTR8)) 888 return true; 889 return false; 890 } 891 892 // We should make sure that, if we're going to predicate both sides of a 893 // condition (a diamond), that both sides don't define the counter register. We 894 // can predicate counter-decrement-based branches, but while that predicates 895 // the branching, it does not predicate the counter decrement. If we tried to 896 // merge the triangle into one predicated block, we'd decrement the counter 897 // twice. 898 bool PPCInstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB, 899 unsigned NumT, unsigned ExtraT, 900 MachineBasicBlock &FMBB, 901 unsigned NumF, unsigned ExtraF, 902 const BranchProbability &Probability) const { 903 return !(MBBDefinesCTR(TMBB) && MBBDefinesCTR(FMBB)); 904 } 905 906 907 bool PPCInstrInfo::isPredicated(const MachineInstr *MI) const { 908 // The predicated branches are identified by their type, not really by the 909 // explicit presence of a predicate. Furthermore, some of them can be 910 // predicated more than once. Because if conversion won't try to predicate 911 // any instruction which already claims to be predicated (by returning true 912 // here), always return false. In doing so, we let isPredicable() be the 913 // final word on whether not the instruction can be (further) predicated. 914 915 return false; 916 } 917 918 bool PPCInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const { 919 if (!MI->isTerminator()) 920 return false; 921 922 // Conditional branch is a special case. 923 if (MI->isBranch() && !MI->isBarrier()) 924 return true; 925 926 return !isPredicated(MI); 927 } 928 929 bool PPCInstrInfo::PredicateInstruction( 930 MachineInstr *MI, 931 const SmallVectorImpl<MachineOperand> &Pred) const { 932 unsigned OpC = MI->getOpcode(); 933 if (OpC == PPC::BLR) { 934 if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) { 935 bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); 936 MI->setDesc(get(Pred[0].getImm() ? 937 (isPPC64 ? PPC::BDNZLR8 : PPC::BDNZLR) : 938 (isPPC64 ? PPC::BDZLR8 : PPC::BDZLR))); 939 } else { 940 MI->setDesc(get(PPC::BCLR)); 941 MachineInstrBuilder(*MI->getParent()->getParent(), MI) 942 .addImm(Pred[0].getImm()) 943 .addReg(Pred[1].getReg()); 944 } 945 946 return true; 947 } else if (OpC == PPC::B) { 948 if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) { 949 bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); 950 MI->setDesc(get(Pred[0].getImm() ? 951 (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) : 952 (isPPC64 ? PPC::BDZ8 : PPC::BDZ))); 953 } else { 954 MachineBasicBlock *MBB = MI->getOperand(0).getMBB(); 955 MI->RemoveOperand(0); 956 957 MI->setDesc(get(PPC::BCC)); 958 MachineInstrBuilder(*MI->getParent()->getParent(), MI) 959 .addImm(Pred[0].getImm()) 960 .addReg(Pred[1].getReg()) 961 .addMBB(MBB); 962 } 963 964 return true; 965 } else if (OpC == PPC::BCTR || OpC == PPC::BCTR8 || 966 OpC == PPC::BCTRL || OpC == PPC::BCTRL8) { 967 if (Pred[1].getReg() == PPC::CTR8 || Pred[1].getReg() == PPC::CTR) 968 llvm_unreachable("Cannot predicate bctr[l] on the ctr register"); 969 970 bool setLR = OpC == PPC::BCTRL || OpC == PPC::BCTRL8; 971 bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); 972 MI->setDesc(get(isPPC64 ? (setLR ? PPC::BCCTRL8 : PPC::BCCTR8) : 973 (setLR ? PPC::BCCTRL : PPC::BCCTR))); 974 MachineInstrBuilder(*MI->getParent()->getParent(), MI) 975 .addImm(Pred[0].getImm()) 976 .addReg(Pred[1].getReg()); 977 return true; 978 } 979 980 return false; 981 } 982 983 bool PPCInstrInfo::SubsumesPredicate( 984 const SmallVectorImpl<MachineOperand> &Pred1, 985 const SmallVectorImpl<MachineOperand> &Pred2) const { 986 assert(Pred1.size() == 2 && "Invalid PPC first predicate"); 987 assert(Pred2.size() == 2 && "Invalid PPC second predicate"); 988 989 if (Pred1[1].getReg() == PPC::CTR8 || Pred1[1].getReg() == PPC::CTR) 990 return false; 991 if (Pred2[1].getReg() == PPC::CTR8 || Pred2[1].getReg() == PPC::CTR) 992 return false; 993 994 PPC::Predicate P1 = (PPC::Predicate) Pred1[0].getImm(); 995 PPC::Predicate P2 = (PPC::Predicate) Pred2[0].getImm(); 996 997 if (P1 == P2) 998 return true; 999 1000 // Does P1 subsume P2, e.g. GE subsumes GT. 1001 if (P1 == PPC::PRED_LE && 1002 (P2 == PPC::PRED_LT || P2 == PPC::PRED_EQ)) 1003 return true; 1004 if (P1 == PPC::PRED_GE && 1005 (P2 == PPC::PRED_GT || P2 == PPC::PRED_EQ)) 1006 return true; 1007 1008 return false; 1009 } 1010 1011 bool PPCInstrInfo::DefinesPredicate(MachineInstr *MI, 1012 std::vector<MachineOperand> &Pred) const { 1013 // Note: At the present time, the contents of Pred from this function is 1014 // unused by IfConversion. This implementation follows ARM by pushing the 1015 // CR-defining operand. Because the 'DZ' and 'DNZ' count as types of 1016 // predicate, instructions defining CTR or CTR8 are also included as 1017 // predicate-defining instructions. 1018 1019 const TargetRegisterClass *RCs[] = 1020 { &PPC::CRRCRegClass, &PPC::CRBITRCRegClass, 1021 &PPC::CTRRCRegClass, &PPC::CTRRC8RegClass }; 1022 1023 bool Found = false; 1024 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1025 const MachineOperand &MO = MI->getOperand(i); 1026 for (unsigned c = 0; c < array_lengthof(RCs) && !Found; ++c) { 1027 const TargetRegisterClass *RC = RCs[c]; 1028 if (MO.isReg()) { 1029 if (MO.isDef() && RC->contains(MO.getReg())) { 1030 Pred.push_back(MO); 1031 Found = true; 1032 } 1033 } else if (MO.isRegMask()) { 1034 for (TargetRegisterClass::iterator I = RC->begin(), 1035 IE = RC->end(); I != IE; ++I) 1036 if (MO.clobbersPhysReg(*I)) { 1037 Pred.push_back(MO); 1038 Found = true; 1039 } 1040 } 1041 } 1042 } 1043 1044 return Found; 1045 } 1046 1047 bool PPCInstrInfo::isPredicable(MachineInstr *MI) const { 1048 unsigned OpC = MI->getOpcode(); 1049 switch (OpC) { 1050 default: 1051 return false; 1052 case PPC::B: 1053 case PPC::BLR: 1054 case PPC::BCTR: 1055 case PPC::BCTR8: 1056 case PPC::BCTRL: 1057 case PPC::BCTRL8: 1058 return true; 1059 } 1060 } 1061 1062 bool PPCInstrInfo::analyzeCompare(const MachineInstr *MI, 1063 unsigned &SrcReg, unsigned &SrcReg2, 1064 int &Mask, int &Value) const { 1065 unsigned Opc = MI->getOpcode(); 1066 1067 switch (Opc) { 1068 default: return false; 1069 case PPC::CMPWI: 1070 case PPC::CMPLWI: 1071 case PPC::CMPDI: 1072 case PPC::CMPLDI: 1073 SrcReg = MI->getOperand(1).getReg(); 1074 SrcReg2 = 0; 1075 Value = MI->getOperand(2).getImm(); 1076 Mask = 0xFFFF; 1077 return true; 1078 case PPC::CMPW: 1079 case PPC::CMPLW: 1080 case PPC::CMPD: 1081 case PPC::CMPLD: 1082 case PPC::FCMPUS: 1083 case PPC::FCMPUD: 1084 SrcReg = MI->getOperand(1).getReg(); 1085 SrcReg2 = MI->getOperand(2).getReg(); 1086 return true; 1087 } 1088 } 1089 1090 bool PPCInstrInfo::optimizeCompareInstr(MachineInstr *CmpInstr, 1091 unsigned SrcReg, unsigned SrcReg2, 1092 int Mask, int Value, 1093 const MachineRegisterInfo *MRI) const { 1094 if (DisableCmpOpt) 1095 return false; 1096 1097 int OpC = CmpInstr->getOpcode(); 1098 unsigned CRReg = CmpInstr->getOperand(0).getReg(); 1099 bool isFP = OpC == PPC::FCMPUS || OpC == PPC::FCMPUD; 1100 unsigned CRRecReg = isFP ? PPC::CR1 : PPC::CR0; 1101 1102 // The record forms set the condition register based on a signed comparison 1103 // with zero (so says the ISA manual). This is not as straightforward as it 1104 // seems, however, because this is always a 64-bit comparison on PPC64, even 1105 // for instructions that are 32-bit in nature (like slw for example). 1106 // So, on PPC32, for unsigned comparisons, we can use the record forms only 1107 // for equality checks (as those don't depend on the sign). On PPC64, 1108 // we are restricted to equality for unsigned 64-bit comparisons and for 1109 // signed 32-bit comparisons the applicability is more restricted. 1110 bool isPPC64 = TM.getSubtargetImpl()->isPPC64(); 1111 bool is32BitSignedCompare = OpC == PPC::CMPWI || OpC == PPC::CMPW; 1112 bool is32BitUnsignedCompare = OpC == PPC::CMPLWI || OpC == PPC::CMPLW; 1113 bool is64BitUnsignedCompare = OpC == PPC::CMPLDI || OpC == PPC::CMPLD; 1114 1115 // Get the unique definition of SrcReg. 1116 MachineInstr *MI = MRI->getUniqueVRegDef(SrcReg); 1117 if (!MI) return false; 1118 int MIOpC = MI->getOpcode(); 1119 1120 bool equalityOnly = false; 1121 bool noSub = false; 1122 if (isPPC64) { 1123 if (is32BitSignedCompare) { 1124 // We can perform this optimization only if MI is sign-extending. 1125 if (MIOpC == PPC::SRAW || MIOpC == PPC::SRAWo || 1126 MIOpC == PPC::SRAWI || MIOpC == PPC::SRAWIo || 1127 MIOpC == PPC::EXTSB || MIOpC == PPC::EXTSBo || 1128 MIOpC == PPC::EXTSH || MIOpC == PPC::EXTSHo || 1129 MIOpC == PPC::EXTSW || MIOpC == PPC::EXTSWo) { 1130 noSub = true; 1131 } else 1132 return false; 1133 } else if (is32BitUnsignedCompare) { 1134 // We can perform this optimization, equality only, if MI is 1135 // zero-extending. 1136 if (MIOpC == PPC::CNTLZW || MIOpC == PPC::CNTLZWo || 1137 MIOpC == PPC::SLW || MIOpC == PPC::SLWo || 1138 MIOpC == PPC::SRW || MIOpC == PPC::SRWo) { 1139 noSub = true; 1140 equalityOnly = true; 1141 } else 1142 return false; 1143 } else if (!isFP) 1144 equalityOnly = is64BitUnsignedCompare; 1145 } else if (!isFP) 1146 equalityOnly = is32BitUnsignedCompare; 1147 1148 if (equalityOnly) { 1149 // We need to check the uses of the condition register in order to reject 1150 // non-equality comparisons. 1151 for (MachineRegisterInfo::use_iterator I = MRI->use_begin(CRReg), 1152 IE = MRI->use_end(); I != IE; ++I) { 1153 MachineInstr *UseMI = &*I; 1154 if (UseMI->getOpcode() == PPC::BCC) { 1155 unsigned Pred = UseMI->getOperand(0).getImm(); 1156 if (Pred == PPC::PRED_EQ || Pred == PPC::PRED_NE) 1157 continue; 1158 1159 return false; 1160 } else if (UseMI->getOpcode() == PPC::ISEL || 1161 UseMI->getOpcode() == PPC::ISEL8) { 1162 unsigned SubIdx = UseMI->getOperand(3).getSubReg(); 1163 if (SubIdx == PPC::sub_eq) 1164 continue; 1165 1166 return false; 1167 } else 1168 return false; 1169 } 1170 } 1171 1172 // Get ready to iterate backward from CmpInstr. 1173 MachineBasicBlock::iterator I = CmpInstr, E = MI, 1174 B = CmpInstr->getParent()->begin(); 1175 1176 // Scan forward to find the first use of the compare. 1177 for (MachineBasicBlock::iterator EL = CmpInstr->getParent()->end(); 1178 I != EL; ++I) { 1179 bool FoundUse = false; 1180 for (MachineRegisterInfo::use_iterator J = MRI->use_begin(CRReg), 1181 JE = MRI->use_end(); J != JE; ++J) 1182 if (&*J == &*I) { 1183 FoundUse = true; 1184 break; 1185 } 1186 1187 if (FoundUse) 1188 break; 1189 } 1190 1191 // Early exit if we're at the beginning of the BB. 1192 if (I == B) return false; 1193 1194 // There are two possible candidates which can be changed to set CR[01]. 1195 // One is MI, the other is a SUB instruction. 1196 // For CMPrr(r1,r2), we are looking for SUB(r1,r2) or SUB(r2,r1). 1197 MachineInstr *Sub = NULL; 1198 if (SrcReg2 != 0) 1199 // MI is not a candidate for CMPrr. 1200 MI = NULL; 1201 // FIXME: Conservatively refuse to convert an instruction which isn't in the 1202 // same BB as the comparison. This is to allow the check below to avoid calls 1203 // (and other explicit clobbers); instead we should really check for these 1204 // more explicitly (in at least a few predecessors). 1205 else if (MI->getParent() != CmpInstr->getParent() || Value != 0) { 1206 // PPC does not have a record-form SUBri. 1207 return false; 1208 } 1209 1210 // Search for Sub. 1211 const TargetRegisterInfo *TRI = &getRegisterInfo(); 1212 --I; 1213 for (; I != E && !noSub; --I) { 1214 const MachineInstr &Instr = *I; 1215 unsigned IOpC = Instr.getOpcode(); 1216 1217 if (&*I != CmpInstr && ( 1218 Instr.modifiesRegister(CRRecReg, TRI) || 1219 Instr.readsRegister(CRRecReg, TRI))) 1220 // This instruction modifies or uses the record condition register after 1221 // the one we want to change. While we could do this transformation, it 1222 // would likely not be profitable. This transformation removes one 1223 // instruction, and so even forcing RA to generate one move probably 1224 // makes it unprofitable. 1225 return false; 1226 1227 // Check whether CmpInstr can be made redundant by the current instruction. 1228 if ((OpC == PPC::CMPW || OpC == PPC::CMPLW || 1229 OpC == PPC::CMPD || OpC == PPC::CMPLD) && 1230 (IOpC == PPC::SUBF || IOpC == PPC::SUBF8) && 1231 ((Instr.getOperand(1).getReg() == SrcReg && 1232 Instr.getOperand(2).getReg() == SrcReg2) || 1233 (Instr.getOperand(1).getReg() == SrcReg2 && 1234 Instr.getOperand(2).getReg() == SrcReg))) { 1235 Sub = &*I; 1236 break; 1237 } 1238 1239 if (isFP && (IOpC == PPC::FSUB || IOpC == PPC::FSUBS) && 1240 ((Instr.getOperand(1).getReg() == SrcReg && 1241 Instr.getOperand(2).getReg() == SrcReg2) || 1242 (Instr.getOperand(1).getReg() == SrcReg2 && 1243 Instr.getOperand(2).getReg() == SrcReg))) { 1244 Sub = &*I; 1245 break; 1246 } 1247 1248 if (I == B) 1249 // The 'and' is below the comparison instruction. 1250 return false; 1251 } 1252 1253 // Return false if no candidates exist. 1254 if (!MI && !Sub) 1255 return false; 1256 1257 // The single candidate is called MI. 1258 if (!MI) MI = Sub; 1259 1260 int NewOpC = -1; 1261 MIOpC = MI->getOpcode(); 1262 if (MIOpC == PPC::ANDIo || MIOpC == PPC::ANDIo8) 1263 NewOpC = MIOpC; 1264 else { 1265 NewOpC = PPC::getRecordFormOpcode(MIOpC); 1266 if (NewOpC == -1 && PPC::getNonRecordFormOpcode(MIOpC) != -1) 1267 NewOpC = MIOpC; 1268 } 1269 1270 // FIXME: On the non-embedded POWER architectures, only some of the record 1271 // forms are fast, and we should use only the fast ones. 1272 1273 // The defining instruction has a record form (or is already a record 1274 // form). It is possible, however, that we'll need to reverse the condition 1275 // code of the users. 1276 if (NewOpC == -1) 1277 return false; 1278 1279 SmallVector<std::pair<MachineOperand*, PPC::Predicate>, 4> PredsToUpdate; 1280 SmallVector<std::pair<MachineOperand*, unsigned>, 4> SubRegsToUpdate; 1281 1282 // If we have SUB(r1, r2) and CMP(r2, r1), the condition code based on CMP 1283 // needs to be updated to be based on SUB. Push the condition code 1284 // operands to OperandsToUpdate. If it is safe to remove CmpInstr, the 1285 // condition code of these operands will be modified. 1286 bool ShouldSwap = false; 1287 if (Sub) { 1288 ShouldSwap = SrcReg2 != 0 && Sub->getOperand(1).getReg() == SrcReg2 && 1289 Sub->getOperand(2).getReg() == SrcReg; 1290 1291 // The operands to subf are the opposite of sub, so only in the fixed-point 1292 // case, invert the order. 1293 if (!isFP) 1294 ShouldSwap = !ShouldSwap; 1295 } 1296 1297 if (ShouldSwap) 1298 for (MachineRegisterInfo::use_iterator I = MRI->use_begin(CRReg), 1299 IE = MRI->use_end(); I != IE; ++I) { 1300 MachineInstr *UseMI = &*I; 1301 if (UseMI->getOpcode() == PPC::BCC) { 1302 PPC::Predicate Pred = (PPC::Predicate) UseMI->getOperand(0).getImm(); 1303 assert((!equalityOnly || 1304 Pred == PPC::PRED_EQ || Pred == PPC::PRED_NE) && 1305 "Invalid predicate for equality-only optimization"); 1306 PredsToUpdate.push_back(std::make_pair(&((*I).getOperand(0)), 1307 PPC::getSwappedPredicate(Pred))); 1308 } else if (UseMI->getOpcode() == PPC::ISEL || 1309 UseMI->getOpcode() == PPC::ISEL8) { 1310 unsigned NewSubReg = UseMI->getOperand(3).getSubReg(); 1311 assert((!equalityOnly || NewSubReg == PPC::sub_eq) && 1312 "Invalid CR bit for equality-only optimization"); 1313 1314 if (NewSubReg == PPC::sub_lt) 1315 NewSubReg = PPC::sub_gt; 1316 else if (NewSubReg == PPC::sub_gt) 1317 NewSubReg = PPC::sub_lt; 1318 1319 SubRegsToUpdate.push_back(std::make_pair(&((*I).getOperand(3)), 1320 NewSubReg)); 1321 } else // We need to abort on a user we don't understand. 1322 return false; 1323 } 1324 1325 // Create a new virtual register to hold the value of the CR set by the 1326 // record-form instruction. If the instruction was not previously in 1327 // record form, then set the kill flag on the CR. 1328 CmpInstr->eraseFromParent(); 1329 1330 MachineBasicBlock::iterator MII = MI; 1331 BuildMI(*MI->getParent(), llvm::next(MII), MI->getDebugLoc(), 1332 get(TargetOpcode::COPY), CRReg) 1333 .addReg(CRRecReg, MIOpC != NewOpC ? RegState::Kill : 0); 1334 1335 if (MIOpC != NewOpC) { 1336 // We need to be careful here: we're replacing one instruction with 1337 // another, and we need to make sure that we get all of the right 1338 // implicit uses and defs. On the other hand, the caller may be holding 1339 // an iterator to this instruction, and so we can't delete it (this is 1340 // specifically the case if this is the instruction directly after the 1341 // compare). 1342 1343 const MCInstrDesc &NewDesc = get(NewOpC); 1344 MI->setDesc(NewDesc); 1345 1346 if (NewDesc.ImplicitDefs) 1347 for (const uint16_t *ImpDefs = NewDesc.getImplicitDefs(); 1348 *ImpDefs; ++ImpDefs) 1349 if (!MI->definesRegister(*ImpDefs)) 1350 MI->addOperand(*MI->getParent()->getParent(), 1351 MachineOperand::CreateReg(*ImpDefs, true, true)); 1352 if (NewDesc.ImplicitUses) 1353 for (const uint16_t *ImpUses = NewDesc.getImplicitUses(); 1354 *ImpUses; ++ImpUses) 1355 if (!MI->readsRegister(*ImpUses)) 1356 MI->addOperand(*MI->getParent()->getParent(), 1357 MachineOperand::CreateReg(*ImpUses, false, true)); 1358 } 1359 1360 // Modify the condition code of operands in OperandsToUpdate. 1361 // Since we have SUB(r1, r2) and CMP(r2, r1), the condition code needs to 1362 // be changed from r2 > r1 to r1 < r2, from r2 < r1 to r1 > r2, etc. 1363 for (unsigned i = 0, e = PredsToUpdate.size(); i < e; i++) 1364 PredsToUpdate[i].first->setImm(PredsToUpdate[i].second); 1365 1366 for (unsigned i = 0, e = SubRegsToUpdate.size(); i < e; i++) 1367 SubRegsToUpdate[i].first->setSubReg(SubRegsToUpdate[i].second); 1368 1369 return true; 1370 } 1371 1372 /// GetInstSize - Return the number of bytes of code the specified 1373 /// instruction may be. This returns the maximum number of bytes. 1374 /// 1375 unsigned PPCInstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const { 1376 switch (MI->getOpcode()) { 1377 case PPC::INLINEASM: { // Inline Asm: Variable size. 1378 const MachineFunction *MF = MI->getParent()->getParent(); 1379 const char *AsmStr = MI->getOperand(0).getSymbolName(); 1380 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo()); 1381 } 1382 case PPC::PROLOG_LABEL: 1383 case PPC::EH_LABEL: 1384 case PPC::GC_LABEL: 1385 case PPC::DBG_VALUE: 1386 return 0; 1387 case PPC::BL8_NOP: 1388 case PPC::BLA8_NOP: 1389 return 8; 1390 default: 1391 return 4; // PowerPC instructions are all 4 bytes 1392 } 1393 } 1394 1395 #undef DEBUG_TYPE 1396 #define DEBUG_TYPE "ppc-early-ret" 1397 STATISTIC(NumBCLR, "Number of early conditional returns"); 1398 STATISTIC(NumBLR, "Number of early returns"); 1399 1400 namespace llvm { 1401 void initializePPCEarlyReturnPass(PassRegistry&); 1402 } 1403 1404 namespace { 1405 // PPCEarlyReturn pass - For simple functions without epilogue code, move 1406 // returns up, and create conditional returns, to avoid unnecessary 1407 // branch-to-blr sequences. 1408 struct PPCEarlyReturn : public MachineFunctionPass { 1409 static char ID; 1410 PPCEarlyReturn() : MachineFunctionPass(ID) { 1411 initializePPCEarlyReturnPass(*PassRegistry::getPassRegistry()); 1412 } 1413 1414 const PPCTargetMachine *TM; 1415 const PPCInstrInfo *TII; 1416 1417 protected: 1418 bool processBlock(MachineBasicBlock &ReturnMBB) { 1419 bool Changed = false; 1420 1421 MachineBasicBlock::iterator I = ReturnMBB.begin(); 1422 I = ReturnMBB.SkipPHIsAndLabels(I); 1423 1424 // The block must be essentially empty except for the blr. 1425 if (I == ReturnMBB.end() || I->getOpcode() != PPC::BLR || 1426 I != ReturnMBB.getLastNonDebugInstr()) 1427 return Changed; 1428 1429 SmallVector<MachineBasicBlock*, 8> PredToRemove; 1430 for (MachineBasicBlock::pred_iterator PI = ReturnMBB.pred_begin(), 1431 PIE = ReturnMBB.pred_end(); PI != PIE; ++PI) { 1432 bool OtherReference = false, BlockChanged = false; 1433 for (MachineBasicBlock::iterator J = (*PI)->getLastNonDebugInstr();;) { 1434 if (J->getOpcode() == PPC::B) { 1435 if (J->getOperand(0).getMBB() == &ReturnMBB) { 1436 // This is an unconditional branch to the return. Replace the 1437 // branch with a blr. 1438 BuildMI(**PI, J, J->getDebugLoc(), TII->get(PPC::BLR)); 1439 MachineBasicBlock::iterator K = J--; 1440 K->eraseFromParent(); 1441 BlockChanged = true; 1442 ++NumBLR; 1443 continue; 1444 } 1445 } else if (J->getOpcode() == PPC::BCC) { 1446 if (J->getOperand(2).getMBB() == &ReturnMBB) { 1447 // This is a conditional branch to the return. Replace the branch 1448 // with a bclr. 1449 BuildMI(**PI, J, J->getDebugLoc(), TII->get(PPC::BCLR)) 1450 .addImm(J->getOperand(0).getImm()) 1451 .addReg(J->getOperand(1).getReg()); 1452 MachineBasicBlock::iterator K = J--; 1453 K->eraseFromParent(); 1454 BlockChanged = true; 1455 ++NumBCLR; 1456 continue; 1457 } 1458 } else if (J->isBranch()) { 1459 if (J->isIndirectBranch()) { 1460 if (ReturnMBB.hasAddressTaken()) 1461 OtherReference = true; 1462 } else 1463 for (unsigned i = 0; i < J->getNumOperands(); ++i) 1464 if (J->getOperand(i).isMBB() && 1465 J->getOperand(i).getMBB() == &ReturnMBB) 1466 OtherReference = true; 1467 } else if (!J->isTerminator() && !J->isDebugValue()) 1468 break; 1469 1470 if (J == (*PI)->begin()) 1471 break; 1472 1473 --J; 1474 } 1475 1476 if ((*PI)->canFallThrough() && (*PI)->isLayoutSuccessor(&ReturnMBB)) 1477 OtherReference = true; 1478 1479 // Predecessors are stored in a vector and can't be removed here. 1480 if (!OtherReference && BlockChanged) { 1481 PredToRemove.push_back(*PI); 1482 } 1483 1484 if (BlockChanged) 1485 Changed = true; 1486 } 1487 1488 for (unsigned i = 0, ie = PredToRemove.size(); i != ie; ++i) 1489 PredToRemove[i]->removeSuccessor(&ReturnMBB); 1490 1491 if (Changed && !ReturnMBB.hasAddressTaken()) { 1492 // We now might be able to merge this blr-only block into its 1493 // by-layout predecessor. 1494 if (ReturnMBB.pred_size() == 1 && 1495 (*ReturnMBB.pred_begin())->isLayoutSuccessor(&ReturnMBB)) { 1496 // Move the blr into the preceding block. 1497 MachineBasicBlock &PrevMBB = **ReturnMBB.pred_begin(); 1498 PrevMBB.splice(PrevMBB.end(), &ReturnMBB, I); 1499 PrevMBB.removeSuccessor(&ReturnMBB); 1500 } 1501 1502 if (ReturnMBB.pred_empty()) 1503 ReturnMBB.eraseFromParent(); 1504 } 1505 1506 return Changed; 1507 } 1508 1509 public: 1510 virtual bool runOnMachineFunction(MachineFunction &MF) { 1511 TM = static_cast<const PPCTargetMachine *>(&MF.getTarget()); 1512 TII = TM->getInstrInfo(); 1513 1514 bool Changed = false; 1515 1516 // If the function does not have at least two blocks, then there is 1517 // nothing to do. 1518 if (MF.size() < 2) 1519 return Changed; 1520 1521 for (MachineFunction::iterator I = MF.begin(); I != MF.end();) { 1522 MachineBasicBlock &B = *I++; 1523 if (processBlock(B)) 1524 Changed = true; 1525 } 1526 1527 return Changed; 1528 } 1529 1530 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 1531 MachineFunctionPass::getAnalysisUsage(AU); 1532 } 1533 }; 1534 } 1535 1536 INITIALIZE_PASS(PPCEarlyReturn, DEBUG_TYPE, 1537 "PowerPC Early-Return Creation", false, false) 1538 1539 char PPCEarlyReturn::ID = 0; 1540 FunctionPass* 1541 llvm::createPPCEarlyReturnPass() { return new PPCEarlyReturn(); } 1542 1543