1 //===-- SystemZInstrInfo.cpp - SystemZ instruction information ------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the SystemZ implementation of the TargetInstrInfo class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SystemZInstrInfo.h" 15 #include "SystemZInstrBuilder.h" 16 #include "SystemZTargetMachine.h" 17 #include "llvm/CodeGen/LiveVariables.h" 18 #include "llvm/CodeGen/LiveIntervalAnalysis.h" 19 #include "llvm/CodeGen/MachineRegisterInfo.h" 20 21 using namespace llvm; 22 23 #define GET_INSTRINFO_CTOR_DTOR 24 #define GET_INSTRMAP_INFO 25 #include "SystemZGenInstrInfo.inc" 26 27 // Return a mask with Count low bits set. 28 static uint64_t allOnes(unsigned int Count) { 29 return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1; 30 } 31 32 // Reg should be a 32-bit GPR. Return true if it is a high register rather 33 // than a low register. 34 static bool isHighReg(unsigned int Reg) { 35 if (SystemZ::GRH32BitRegClass.contains(Reg)) 36 return true; 37 assert(SystemZ::GR32BitRegClass.contains(Reg) && "Invalid GRX32"); 38 return false; 39 } 40 41 // Pin the vtable to this file. 42 void SystemZInstrInfo::anchor() {} 43 44 SystemZInstrInfo::SystemZInstrInfo(SystemZSubtarget &sti) 45 : SystemZGenInstrInfo(SystemZ::ADJCALLSTACKDOWN, SystemZ::ADJCALLSTACKUP), 46 RI(), STI(sti) { 47 } 48 49 // MI is a 128-bit load or store. Split it into two 64-bit loads or stores, 50 // each having the opcode given by NewOpcode. 51 void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI, 52 unsigned NewOpcode) const { 53 MachineBasicBlock *MBB = MI->getParent(); 54 MachineFunction &MF = *MBB->getParent(); 55 56 // Get two load or store instructions. Use the original instruction for one 57 // of them (arbitrarily the second here) and create a clone for the other. 58 MachineInstr *EarlierMI = MF.CloneMachineInstr(MI); 59 MBB->insert(MI, EarlierMI); 60 61 // Set up the two 64-bit registers. 62 MachineOperand &HighRegOp = EarlierMI->getOperand(0); 63 MachineOperand &LowRegOp = MI->getOperand(0); 64 HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_h64)); 65 LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_l64)); 66 67 // The address in the first (high) instruction is already correct. 68 // Adjust the offset in the second (low) instruction. 69 MachineOperand &HighOffsetOp = EarlierMI->getOperand(2); 70 MachineOperand &LowOffsetOp = MI->getOperand(2); 71 LowOffsetOp.setImm(LowOffsetOp.getImm() + 8); 72 73 // Clear the kill flags for the base and index registers in the first 74 // instruction. 75 EarlierMI->getOperand(1).setIsKill(false); 76 EarlierMI->getOperand(3).setIsKill(false); 77 78 // Set the opcodes. 79 unsigned HighOpcode = getOpcodeForOffset(NewOpcode, HighOffsetOp.getImm()); 80 unsigned LowOpcode = getOpcodeForOffset(NewOpcode, LowOffsetOp.getImm()); 81 assert(HighOpcode && LowOpcode && "Both offsets should be in range"); 82 83 EarlierMI->setDesc(get(HighOpcode)); 84 MI->setDesc(get(LowOpcode)); 85 } 86 87 // Split ADJDYNALLOC instruction MI. 88 void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const { 89 MachineBasicBlock *MBB = MI->getParent(); 90 MachineFunction &MF = *MBB->getParent(); 91 MachineFrameInfo *MFFrame = MF.getFrameInfo(); 92 MachineOperand &OffsetMO = MI->getOperand(2); 93 94 uint64_t Offset = (MFFrame->getMaxCallFrameSize() + 95 SystemZMC::CallFrameSize + 96 OffsetMO.getImm()); 97 unsigned NewOpcode = getOpcodeForOffset(SystemZ::LA, Offset); 98 assert(NewOpcode && "No support for huge argument lists yet"); 99 MI->setDesc(get(NewOpcode)); 100 OffsetMO.setImm(Offset); 101 } 102 103 // MI is an RI-style pseudo instruction. Replace it with LowOpcode 104 // if the first operand is a low GR32 and HighOpcode if the first operand 105 // is a high GR32. ConvertHigh is true if LowOpcode takes a signed operand 106 // and HighOpcode takes an unsigned 32-bit operand. In those cases, 107 // MI has the same kind of operand as LowOpcode, so needs to be converted 108 // if HighOpcode is used. 109 void SystemZInstrInfo::expandRIPseudo(MachineInstr &MI, unsigned LowOpcode, 110 unsigned HighOpcode, 111 bool ConvertHigh) const { 112 unsigned Reg = MI.getOperand(0).getReg(); 113 bool IsHigh = isHighReg(Reg); 114 MI.setDesc(get(IsHigh ? HighOpcode : LowOpcode)); 115 if (IsHigh && ConvertHigh) 116 MI.getOperand(1).setImm(uint32_t(MI.getOperand(1).getImm())); 117 } 118 119 // MI is a three-operand RIE-style pseudo instruction. Replace it with 120 // LowOpcodeK if the registers are both low GR32s, otherwise use a move 121 // followed by HighOpcode or LowOpcode, depending on whether the target 122 // is a high or low GR32. 123 void SystemZInstrInfo::expandRIEPseudo(MachineInstr &MI, unsigned LowOpcode, 124 unsigned LowOpcodeK, 125 unsigned HighOpcode) const { 126 unsigned DestReg = MI.getOperand(0).getReg(); 127 unsigned SrcReg = MI.getOperand(1).getReg(); 128 bool DestIsHigh = isHighReg(DestReg); 129 bool SrcIsHigh = isHighReg(SrcReg); 130 if (!DestIsHigh && !SrcIsHigh) 131 MI.setDesc(get(LowOpcodeK)); 132 else { 133 emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(), DestReg, SrcReg, 134 SystemZ::LR, 32, MI.getOperand(1).isKill()); 135 MI.setDesc(get(DestIsHigh ? HighOpcode : LowOpcode)); 136 MI.getOperand(1).setReg(DestReg); 137 MI.tieOperands(0, 1); 138 } 139 } 140 141 // MI is an RXY-style pseudo instruction. Replace it with LowOpcode 142 // if the first operand is a low GR32 and HighOpcode if the first operand 143 // is a high GR32. 144 void SystemZInstrInfo::expandRXYPseudo(MachineInstr &MI, unsigned LowOpcode, 145 unsigned HighOpcode) const { 146 unsigned Reg = MI.getOperand(0).getReg(); 147 unsigned Opcode = getOpcodeForOffset(isHighReg(Reg) ? HighOpcode : LowOpcode, 148 MI.getOperand(2).getImm()); 149 MI.setDesc(get(Opcode)); 150 } 151 152 // MI is an RR-style pseudo instruction that zero-extends the low Size bits 153 // of one GRX32 into another. Replace it with LowOpcode if both operands 154 // are low registers, otherwise use RISB[LH]G. 155 void SystemZInstrInfo::expandZExtPseudo(MachineInstr &MI, unsigned LowOpcode, 156 unsigned Size) const { 157 emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(), 158 MI.getOperand(0).getReg(), MI.getOperand(1).getReg(), LowOpcode, 159 Size, MI.getOperand(1).isKill()); 160 MI.eraseFromParent(); 161 } 162 163 void SystemZInstrInfo::expandLoadStackGuard(MachineInstr *MI) const { 164 MachineBasicBlock *MBB = MI->getParent(); 165 MachineFunction &MF = *MBB->getParent(); 166 const unsigned Reg = MI->getOperand(0).getReg(); 167 168 // Conveniently, all 4 instructions are cloned from LOAD_STACK_GUARD, 169 // so they already have operand 0 set to reg. 170 171 // ear <reg>, %a0 172 MachineInstr *Ear1MI = MF.CloneMachineInstr(MI); 173 MBB->insert(MI, Ear1MI); 174 Ear1MI->setDesc(get(SystemZ::EAR)); 175 MachineInstrBuilder(MF, Ear1MI).addImm(0); 176 177 // sllg <reg>, <reg>, 32 178 MachineInstr *SllgMI = MF.CloneMachineInstr(MI); 179 MBB->insert(MI, SllgMI); 180 SllgMI->setDesc(get(SystemZ::SLLG)); 181 MachineInstrBuilder(MF, SllgMI).addReg(Reg).addReg(0).addImm(32); 182 183 // ear <reg>, %a1 184 MachineInstr *Ear2MI = MF.CloneMachineInstr(MI); 185 MBB->insert(MI, Ear2MI); 186 Ear2MI->setDesc(get(SystemZ::EAR)); 187 MachineInstrBuilder(MF, Ear2MI).addImm(1); 188 189 // lg <reg>, 40(<reg>) 190 MI->setDesc(get(SystemZ::LG)); 191 MachineInstrBuilder(MF, MI).addReg(Reg).addImm(40).addReg(0); 192 } 193 194 // Emit a zero-extending move from 32-bit GPR SrcReg to 32-bit GPR 195 // DestReg before MBBI in MBB. Use LowLowOpcode when both DestReg and SrcReg 196 // are low registers, otherwise use RISB[LH]G. Size is the number of bits 197 // taken from the low end of SrcReg (8 for LLCR, 16 for LLHR and 32 for LR). 198 // KillSrc is true if this move is the last use of SrcReg. 199 void SystemZInstrInfo::emitGRX32Move(MachineBasicBlock &MBB, 200 MachineBasicBlock::iterator MBBI, 201 const DebugLoc &DL, unsigned DestReg, 202 unsigned SrcReg, unsigned LowLowOpcode, 203 unsigned Size, bool KillSrc) const { 204 unsigned Opcode; 205 bool DestIsHigh = isHighReg(DestReg); 206 bool SrcIsHigh = isHighReg(SrcReg); 207 if (DestIsHigh && SrcIsHigh) 208 Opcode = SystemZ::RISBHH; 209 else if (DestIsHigh && !SrcIsHigh) 210 Opcode = SystemZ::RISBHL; 211 else if (!DestIsHigh && SrcIsHigh) 212 Opcode = SystemZ::RISBLH; 213 else { 214 BuildMI(MBB, MBBI, DL, get(LowLowOpcode), DestReg) 215 .addReg(SrcReg, getKillRegState(KillSrc)); 216 return; 217 } 218 unsigned Rotate = (DestIsHigh != SrcIsHigh ? 32 : 0); 219 BuildMI(MBB, MBBI, DL, get(Opcode), DestReg) 220 .addReg(DestReg, RegState::Undef) 221 .addReg(SrcReg, getKillRegState(KillSrc)) 222 .addImm(32 - Size).addImm(128 + 31).addImm(Rotate); 223 } 224 225 // If MI is a simple load or store for a frame object, return the register 226 // it loads or stores and set FrameIndex to the index of the frame object. 227 // Return 0 otherwise. 228 // 229 // Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores. 230 static int isSimpleMove(const MachineInstr &MI, int &FrameIndex, 231 unsigned Flag) { 232 const MCInstrDesc &MCID = MI.getDesc(); 233 if ((MCID.TSFlags & Flag) && MI.getOperand(1).isFI() && 234 MI.getOperand(2).getImm() == 0 && MI.getOperand(3).getReg() == 0) { 235 FrameIndex = MI.getOperand(1).getIndex(); 236 return MI.getOperand(0).getReg(); 237 } 238 return 0; 239 } 240 241 unsigned SystemZInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 242 int &FrameIndex) const { 243 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXLoad); 244 } 245 246 unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 247 int &FrameIndex) const { 248 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore); 249 } 250 251 bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr &MI, 252 int &DestFrameIndex, 253 int &SrcFrameIndex) const { 254 // Check for MVC 0(Length,FI1),0(FI2) 255 const MachineFrameInfo *MFI = MI.getParent()->getParent()->getFrameInfo(); 256 if (MI.getOpcode() != SystemZ::MVC || !MI.getOperand(0).isFI() || 257 MI.getOperand(1).getImm() != 0 || !MI.getOperand(3).isFI() || 258 MI.getOperand(4).getImm() != 0) 259 return false; 260 261 // Check that Length covers the full slots. 262 int64_t Length = MI.getOperand(2).getImm(); 263 unsigned FI1 = MI.getOperand(0).getIndex(); 264 unsigned FI2 = MI.getOperand(3).getIndex(); 265 if (MFI->getObjectSize(FI1) != Length || 266 MFI->getObjectSize(FI2) != Length) 267 return false; 268 269 DestFrameIndex = FI1; 270 SrcFrameIndex = FI2; 271 return true; 272 } 273 274 bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, 275 MachineBasicBlock *&TBB, 276 MachineBasicBlock *&FBB, 277 SmallVectorImpl<MachineOperand> &Cond, 278 bool AllowModify) const { 279 // Most of the code and comments here are boilerplate. 280 281 // Start from the bottom of the block and work up, examining the 282 // terminator instructions. 283 MachineBasicBlock::iterator I = MBB.end(); 284 while (I != MBB.begin()) { 285 --I; 286 if (I->isDebugValue()) 287 continue; 288 289 // Working from the bottom, when we see a non-terminator instruction, we're 290 // done. 291 if (!isUnpredicatedTerminator(*I)) 292 break; 293 294 // A terminator that isn't a branch can't easily be handled by this 295 // analysis. 296 if (!I->isBranch()) 297 return true; 298 299 // Can't handle indirect branches. 300 SystemZII::Branch Branch(getBranchInfo(*I)); 301 if (!Branch.Target->isMBB()) 302 return true; 303 304 // Punt on compound branches. 305 if (Branch.Type != SystemZII::BranchNormal) 306 return true; 307 308 if (Branch.CCMask == SystemZ::CCMASK_ANY) { 309 // Handle unconditional branches. 310 if (!AllowModify) { 311 TBB = Branch.Target->getMBB(); 312 continue; 313 } 314 315 // If the block has any instructions after a JMP, delete them. 316 while (std::next(I) != MBB.end()) 317 std::next(I)->eraseFromParent(); 318 319 Cond.clear(); 320 FBB = nullptr; 321 322 // Delete the JMP if it's equivalent to a fall-through. 323 if (MBB.isLayoutSuccessor(Branch.Target->getMBB())) { 324 TBB = nullptr; 325 I->eraseFromParent(); 326 I = MBB.end(); 327 continue; 328 } 329 330 // TBB is used to indicate the unconditinal destination. 331 TBB = Branch.Target->getMBB(); 332 continue; 333 } 334 335 // Working from the bottom, handle the first conditional branch. 336 if (Cond.empty()) { 337 // FIXME: add X86-style branch swap 338 FBB = TBB; 339 TBB = Branch.Target->getMBB(); 340 Cond.push_back(MachineOperand::CreateImm(Branch.CCValid)); 341 Cond.push_back(MachineOperand::CreateImm(Branch.CCMask)); 342 continue; 343 } 344 345 // Handle subsequent conditional branches. 346 assert(Cond.size() == 2 && TBB && "Should have seen a conditional branch"); 347 348 // Only handle the case where all conditional branches branch to the same 349 // destination. 350 if (TBB != Branch.Target->getMBB()) 351 return true; 352 353 // If the conditions are the same, we can leave them alone. 354 unsigned OldCCValid = Cond[0].getImm(); 355 unsigned OldCCMask = Cond[1].getImm(); 356 if (OldCCValid == Branch.CCValid && OldCCMask == Branch.CCMask) 357 continue; 358 359 // FIXME: Try combining conditions like X86 does. Should be easy on Z! 360 return false; 361 } 362 363 return false; 364 } 365 366 unsigned SystemZInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { 367 // Most of the code and comments here are boilerplate. 368 MachineBasicBlock::iterator I = MBB.end(); 369 unsigned Count = 0; 370 371 while (I != MBB.begin()) { 372 --I; 373 if (I->isDebugValue()) 374 continue; 375 if (!I->isBranch()) 376 break; 377 if (!getBranchInfo(*I).Target->isMBB()) 378 break; 379 // Remove the branch. 380 I->eraseFromParent(); 381 I = MBB.end(); 382 ++Count; 383 } 384 385 return Count; 386 } 387 388 bool SystemZInstrInfo:: 389 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 390 assert(Cond.size() == 2 && "Invalid condition"); 391 Cond[1].setImm(Cond[1].getImm() ^ Cond[0].getImm()); 392 return false; 393 } 394 395 unsigned SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB, 396 MachineBasicBlock *TBB, 397 MachineBasicBlock *FBB, 398 ArrayRef<MachineOperand> Cond, 399 const DebugLoc &DL) const { 400 // In this function we output 32-bit branches, which should always 401 // have enough range. They can be shortened and relaxed by later code 402 // in the pipeline, if desired. 403 404 // Shouldn't be a fall through. 405 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 406 assert((Cond.size() == 2 || Cond.size() == 0) && 407 "SystemZ branch conditions have one component!"); 408 409 if (Cond.empty()) { 410 // Unconditional branch? 411 assert(!FBB && "Unconditional branch with multiple successors!"); 412 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(TBB); 413 return 1; 414 } 415 416 // Conditional branch. 417 unsigned Count = 0; 418 unsigned CCValid = Cond[0].getImm(); 419 unsigned CCMask = Cond[1].getImm(); 420 BuildMI(&MBB, DL, get(SystemZ::BRC)) 421 .addImm(CCValid).addImm(CCMask).addMBB(TBB); 422 ++Count; 423 424 if (FBB) { 425 // Two-way Conditional branch. Insert the second branch. 426 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(FBB); 427 ++Count; 428 } 429 return Count; 430 } 431 432 bool SystemZInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg, 433 unsigned &SrcReg2, int &Mask, 434 int &Value) const { 435 assert(MI.isCompare() && "Caller should have checked for a comparison"); 436 437 if (MI.getNumExplicitOperands() == 2 && MI.getOperand(0).isReg() && 438 MI.getOperand(1).isImm()) { 439 SrcReg = MI.getOperand(0).getReg(); 440 SrcReg2 = 0; 441 Value = MI.getOperand(1).getImm(); 442 Mask = ~0; 443 return true; 444 } 445 446 return false; 447 } 448 449 // If Reg is a virtual register, return its definition, otherwise return null. 450 static MachineInstr *getDef(unsigned Reg, 451 const MachineRegisterInfo *MRI) { 452 if (TargetRegisterInfo::isPhysicalRegister(Reg)) 453 return nullptr; 454 return MRI->getUniqueVRegDef(Reg); 455 } 456 457 // Return true if MI is a shift of type Opcode by Imm bits. 458 static bool isShift(MachineInstr *MI, unsigned Opcode, int64_t Imm) { 459 return (MI->getOpcode() == Opcode && 460 !MI->getOperand(2).getReg() && 461 MI->getOperand(3).getImm() == Imm); 462 } 463 464 // If the destination of MI has no uses, delete it as dead. 465 static void eraseIfDead(MachineInstr *MI, const MachineRegisterInfo *MRI) { 466 if (MRI->use_nodbg_empty(MI->getOperand(0).getReg())) 467 MI->eraseFromParent(); 468 } 469 470 // Compare compares SrcReg against zero. Check whether SrcReg contains 471 // the result of an IPM sequence whose input CC survives until Compare, 472 // and whether Compare is therefore redundant. Delete it and return 473 // true if so. 474 static bool removeIPMBasedCompare(MachineInstr &Compare, unsigned SrcReg, 475 const MachineRegisterInfo *MRI, 476 const TargetRegisterInfo *TRI) { 477 MachineInstr *LGFR = nullptr; 478 MachineInstr *RLL = getDef(SrcReg, MRI); 479 if (RLL && RLL->getOpcode() == SystemZ::LGFR) { 480 LGFR = RLL; 481 RLL = getDef(LGFR->getOperand(1).getReg(), MRI); 482 } 483 if (!RLL || !isShift(RLL, SystemZ::RLL, 31)) 484 return false; 485 486 MachineInstr *SRL = getDef(RLL->getOperand(1).getReg(), MRI); 487 if (!SRL || !isShift(SRL, SystemZ::SRL, SystemZ::IPM_CC)) 488 return false; 489 490 MachineInstr *IPM = getDef(SRL->getOperand(1).getReg(), MRI); 491 if (!IPM || IPM->getOpcode() != SystemZ::IPM) 492 return false; 493 494 // Check that there are no assignments to CC between the IPM and Compare, 495 if (IPM->getParent() != Compare.getParent()) 496 return false; 497 MachineBasicBlock::iterator MBBI = IPM, MBBE = Compare.getIterator(); 498 for (++MBBI; MBBI != MBBE; ++MBBI) { 499 MachineInstr *MI = MBBI; 500 if (MI->modifiesRegister(SystemZ::CC, TRI)) 501 return false; 502 } 503 504 Compare.eraseFromParent(); 505 if (LGFR) 506 eraseIfDead(LGFR, MRI); 507 eraseIfDead(RLL, MRI); 508 eraseIfDead(SRL, MRI); 509 eraseIfDead(IPM, MRI); 510 511 return true; 512 } 513 514 bool SystemZInstrInfo::optimizeCompareInstr( 515 MachineInstr &Compare, unsigned SrcReg, unsigned SrcReg2, int Mask, 516 int Value, const MachineRegisterInfo *MRI) const { 517 assert(!SrcReg2 && "Only optimizing constant comparisons so far"); 518 bool IsLogical = (Compare.getDesc().TSFlags & SystemZII::IsLogical) != 0; 519 return Value == 0 && !IsLogical && 520 removeIPMBasedCompare(Compare, SrcReg, MRI, &RI); 521 } 522 523 // If Opcode is a move that has a conditional variant, return that variant, 524 // otherwise return 0. 525 static unsigned getConditionalMove(unsigned Opcode) { 526 switch (Opcode) { 527 case SystemZ::LR: return SystemZ::LOCR; 528 case SystemZ::LGR: return SystemZ::LOCGR; 529 default: return 0; 530 } 531 } 532 533 bool SystemZInstrInfo::isPredicable(MachineInstr &MI) const { 534 unsigned Opcode = MI.getOpcode(); 535 if (STI.hasLoadStoreOnCond() && getConditionalMove(Opcode)) 536 return true; 537 if (Opcode == SystemZ::Return || 538 Opcode == SystemZ::Trap || 539 Opcode == SystemZ::CallJG || 540 Opcode == SystemZ::CallBR) 541 return true; 542 return false; 543 } 544 545 bool SystemZInstrInfo:: 546 isProfitableToIfCvt(MachineBasicBlock &MBB, 547 unsigned NumCycles, unsigned ExtraPredCycles, 548 BranchProbability Probability) const { 549 // Avoid using conditional returns at the end of a loop (since then 550 // we'd need to emit an unconditional branch to the beginning anyway, 551 // making the loop body longer). This doesn't apply for low-probability 552 // loops (eg. compare-and-swap retry), so just decide based on branch 553 // probability instead of looping structure. 554 // However, since Compare and Trap instructions cost the same as a regular 555 // Compare instruction, we should allow the if conversion to convert this 556 // into a Conditional Compare regardless of the branch probability. 557 if (MBB.getLastNonDebugInstr()->getOpcode() != SystemZ::Trap && 558 MBB.succ_empty() && Probability < BranchProbability(1, 8)) 559 return false; 560 // For now only convert single instructions. 561 return NumCycles == 1; 562 } 563 564 bool SystemZInstrInfo:: 565 isProfitableToIfCvt(MachineBasicBlock &TMBB, 566 unsigned NumCyclesT, unsigned ExtraPredCyclesT, 567 MachineBasicBlock &FMBB, 568 unsigned NumCyclesF, unsigned ExtraPredCyclesF, 569 BranchProbability Probability) const { 570 // For now avoid converting mutually-exclusive cases. 571 return false; 572 } 573 574 bool SystemZInstrInfo:: 575 isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, 576 BranchProbability Probability) const { 577 // For now only duplicate single instructions. 578 return NumCycles == 1; 579 } 580 581 bool SystemZInstrInfo::PredicateInstruction( 582 MachineInstr &MI, ArrayRef<MachineOperand> Pred) const { 583 assert(Pred.size() == 2 && "Invalid condition"); 584 unsigned CCValid = Pred[0].getImm(); 585 unsigned CCMask = Pred[1].getImm(); 586 assert(CCMask > 0 && CCMask < 15 && "Invalid predicate"); 587 unsigned Opcode = MI.getOpcode(); 588 if (STI.hasLoadStoreOnCond()) { 589 if (unsigned CondOpcode = getConditionalMove(Opcode)) { 590 MI.setDesc(get(CondOpcode)); 591 MachineInstrBuilder(*MI.getParent()->getParent(), MI) 592 .addImm(CCValid) 593 .addImm(CCMask) 594 .addReg(SystemZ::CC, RegState::Implicit); 595 return true; 596 } 597 } 598 if (Opcode == SystemZ::Trap) { 599 MI.setDesc(get(SystemZ::CondTrap)); 600 MachineInstrBuilder(*MI.getParent()->getParent(), MI) 601 .addImm(CCValid).addImm(CCMask) 602 .addReg(SystemZ::CC, RegState::Implicit); 603 return true; 604 } 605 if (Opcode == SystemZ::Return) { 606 MI.setDesc(get(SystemZ::CondReturn)); 607 MachineInstrBuilder(*MI.getParent()->getParent(), MI) 608 .addImm(CCValid).addImm(CCMask) 609 .addReg(SystemZ::CC, RegState::Implicit); 610 return true; 611 } 612 if (Opcode == SystemZ::CallJG) { 613 const GlobalValue *Global = MI.getOperand(0).getGlobal(); 614 const uint32_t *RegMask = MI.getOperand(1).getRegMask(); 615 MI.RemoveOperand(1); 616 MI.RemoveOperand(0); 617 MI.setDesc(get(SystemZ::CallBRCL)); 618 MachineInstrBuilder(*MI.getParent()->getParent(), MI) 619 .addImm(CCValid).addImm(CCMask) 620 .addGlobalAddress(Global) 621 .addRegMask(RegMask) 622 .addReg(SystemZ::CC, RegState::Implicit); 623 return true; 624 } 625 if (Opcode == SystemZ::CallBR) { 626 const uint32_t *RegMask = MI.getOperand(0).getRegMask(); 627 MI.RemoveOperand(0); 628 MI.setDesc(get(SystemZ::CallBCR)); 629 MachineInstrBuilder(*MI.getParent()->getParent(), MI) 630 .addImm(CCValid).addImm(CCMask) 631 .addRegMask(RegMask) 632 .addReg(SystemZ::CC, RegState::Implicit); 633 return true; 634 } 635 return false; 636 } 637 638 void SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 639 MachineBasicBlock::iterator MBBI, 640 const DebugLoc &DL, unsigned DestReg, 641 unsigned SrcReg, bool KillSrc) const { 642 // Split 128-bit GPR moves into two 64-bit moves. This handles ADDR128 too. 643 if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) { 644 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_h64), 645 RI.getSubReg(SrcReg, SystemZ::subreg_h64), KillSrc); 646 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_l64), 647 RI.getSubReg(SrcReg, SystemZ::subreg_l64), KillSrc); 648 return; 649 } 650 651 if (SystemZ::GRX32BitRegClass.contains(DestReg, SrcReg)) { 652 emitGRX32Move(MBB, MBBI, DL, DestReg, SrcReg, SystemZ::LR, 32, KillSrc); 653 return; 654 } 655 656 // Everything else needs only one instruction. 657 unsigned Opcode; 658 if (SystemZ::GR64BitRegClass.contains(DestReg, SrcReg)) 659 Opcode = SystemZ::LGR; 660 else if (SystemZ::FP32BitRegClass.contains(DestReg, SrcReg)) 661 // For z13 we prefer LDR over LER to avoid partial register dependencies. 662 Opcode = STI.hasVector() ? SystemZ::LDR32 : SystemZ::LER; 663 else if (SystemZ::FP64BitRegClass.contains(DestReg, SrcReg)) 664 Opcode = SystemZ::LDR; 665 else if (SystemZ::FP128BitRegClass.contains(DestReg, SrcReg)) 666 Opcode = SystemZ::LXR; 667 else if (SystemZ::VR32BitRegClass.contains(DestReg, SrcReg)) 668 Opcode = SystemZ::VLR32; 669 else if (SystemZ::VR64BitRegClass.contains(DestReg, SrcReg)) 670 Opcode = SystemZ::VLR64; 671 else if (SystemZ::VR128BitRegClass.contains(DestReg, SrcReg)) 672 Opcode = SystemZ::VLR; 673 else 674 llvm_unreachable("Impossible reg-to-reg copy"); 675 676 BuildMI(MBB, MBBI, DL, get(Opcode), DestReg) 677 .addReg(SrcReg, getKillRegState(KillSrc)); 678 } 679 680 void SystemZInstrInfo::storeRegToStackSlot( 681 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg, 682 bool isKill, int FrameIdx, const TargetRegisterClass *RC, 683 const TargetRegisterInfo *TRI) const { 684 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 685 686 // Callers may expect a single instruction, so keep 128-bit moves 687 // together for now and lower them after register allocation. 688 unsigned LoadOpcode, StoreOpcode; 689 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode); 690 addFrameReference(BuildMI(MBB, MBBI, DL, get(StoreOpcode)) 691 .addReg(SrcReg, getKillRegState(isKill)), 692 FrameIdx); 693 } 694 695 void SystemZInstrInfo::loadRegFromStackSlot( 696 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, 697 int FrameIdx, const TargetRegisterClass *RC, 698 const TargetRegisterInfo *TRI) const { 699 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 700 701 // Callers may expect a single instruction, so keep 128-bit moves 702 // together for now and lower them after register allocation. 703 unsigned LoadOpcode, StoreOpcode; 704 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode); 705 addFrameReference(BuildMI(MBB, MBBI, DL, get(LoadOpcode), DestReg), 706 FrameIdx); 707 } 708 709 // Return true if MI is a simple load or store with a 12-bit displacement 710 // and no index. Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores. 711 static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) { 712 const MCInstrDesc &MCID = MI->getDesc(); 713 return ((MCID.TSFlags & Flag) && 714 isUInt<12>(MI->getOperand(2).getImm()) && 715 MI->getOperand(3).getReg() == 0); 716 } 717 718 namespace { 719 struct LogicOp { 720 LogicOp() : RegSize(0), ImmLSB(0), ImmSize(0) {} 721 LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize) 722 : RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {} 723 724 explicit operator bool() const { return RegSize; } 725 726 unsigned RegSize, ImmLSB, ImmSize; 727 }; 728 } // end anonymous namespace 729 730 static LogicOp interpretAndImmediate(unsigned Opcode) { 731 switch (Opcode) { 732 case SystemZ::NILMux: return LogicOp(32, 0, 16); 733 case SystemZ::NIHMux: return LogicOp(32, 16, 16); 734 case SystemZ::NILL64: return LogicOp(64, 0, 16); 735 case SystemZ::NILH64: return LogicOp(64, 16, 16); 736 case SystemZ::NIHL64: return LogicOp(64, 32, 16); 737 case SystemZ::NIHH64: return LogicOp(64, 48, 16); 738 case SystemZ::NIFMux: return LogicOp(32, 0, 32); 739 case SystemZ::NILF64: return LogicOp(64, 0, 32); 740 case SystemZ::NIHF64: return LogicOp(64, 32, 32); 741 default: return LogicOp(); 742 } 743 } 744 745 static void transferDeadCC(MachineInstr *OldMI, MachineInstr *NewMI) { 746 if (OldMI->registerDefIsDead(SystemZ::CC)) { 747 MachineOperand *CCDef = NewMI->findRegisterDefOperand(SystemZ::CC); 748 if (CCDef != nullptr) 749 CCDef->setIsDead(true); 750 } 751 } 752 753 // Used to return from convertToThreeAddress after replacing two-address 754 // instruction OldMI with three-address instruction NewMI. 755 static MachineInstr *finishConvertToThreeAddress(MachineInstr *OldMI, 756 MachineInstr *NewMI, 757 LiveVariables *LV) { 758 if (LV) { 759 unsigned NumOps = OldMI->getNumOperands(); 760 for (unsigned I = 1; I < NumOps; ++I) { 761 MachineOperand &Op = OldMI->getOperand(I); 762 if (Op.isReg() && Op.isKill()) 763 LV->replaceKillInstruction(Op.getReg(), OldMI, NewMI); 764 } 765 } 766 transferDeadCC(OldMI, NewMI); 767 return NewMI; 768 } 769 770 MachineInstr *SystemZInstrInfo::convertToThreeAddress( 771 MachineFunction::iterator &MFI, MachineInstr &MI, LiveVariables *LV) const { 772 MachineBasicBlock *MBB = MI.getParent(); 773 MachineFunction *MF = MBB->getParent(); 774 MachineRegisterInfo &MRI = MF->getRegInfo(); 775 776 unsigned Opcode = MI.getOpcode(); 777 unsigned NumOps = MI.getNumOperands(); 778 779 // Try to convert something like SLL into SLLK, if supported. 780 // We prefer to keep the two-operand form where possible both 781 // because it tends to be shorter and because some instructions 782 // have memory forms that can be used during spilling. 783 if (STI.hasDistinctOps()) { 784 MachineOperand &Dest = MI.getOperand(0); 785 MachineOperand &Src = MI.getOperand(1); 786 unsigned DestReg = Dest.getReg(); 787 unsigned SrcReg = Src.getReg(); 788 // AHIMux is only really a three-operand instruction when both operands 789 // are low registers. Try to constrain both operands to be low if 790 // possible. 791 if (Opcode == SystemZ::AHIMux && 792 TargetRegisterInfo::isVirtualRegister(DestReg) && 793 TargetRegisterInfo::isVirtualRegister(SrcReg) && 794 MRI.getRegClass(DestReg)->contains(SystemZ::R1L) && 795 MRI.getRegClass(SrcReg)->contains(SystemZ::R1L)) { 796 MRI.constrainRegClass(DestReg, &SystemZ::GR32BitRegClass); 797 MRI.constrainRegClass(SrcReg, &SystemZ::GR32BitRegClass); 798 } 799 int ThreeOperandOpcode = SystemZ::getThreeOperandOpcode(Opcode); 800 if (ThreeOperandOpcode >= 0) { 801 // Create three address instruction without adding the implicit 802 // operands. Those will instead be copied over from the original 803 // instruction by the loop below. 804 MachineInstrBuilder MIB( 805 *MF, MF->CreateMachineInstr(get(ThreeOperandOpcode), MI.getDebugLoc(), 806 /*NoImplicit=*/true)); 807 MIB.addOperand(Dest); 808 // Keep the kill state, but drop the tied flag. 809 MIB.addReg(Src.getReg(), getKillRegState(Src.isKill()), Src.getSubReg()); 810 // Keep the remaining operands as-is. 811 for (unsigned I = 2; I < NumOps; ++I) 812 MIB.addOperand(MI.getOperand(I)); 813 MBB->insert(MI, MIB); 814 return finishConvertToThreeAddress(&MI, MIB, LV); 815 } 816 } 817 818 // Try to convert an AND into an RISBG-type instruction. 819 if (LogicOp And = interpretAndImmediate(Opcode)) { 820 uint64_t Imm = MI.getOperand(2).getImm() << And.ImmLSB; 821 // AND IMMEDIATE leaves the other bits of the register unchanged. 822 Imm |= allOnes(And.RegSize) & ~(allOnes(And.ImmSize) << And.ImmLSB); 823 unsigned Start, End; 824 if (isRxSBGMask(Imm, And.RegSize, Start, End)) { 825 unsigned NewOpcode; 826 if (And.RegSize == 64) { 827 NewOpcode = SystemZ::RISBG; 828 // Prefer RISBGN if available, since it does not clobber CC. 829 if (STI.hasMiscellaneousExtensions()) 830 NewOpcode = SystemZ::RISBGN; 831 } else { 832 NewOpcode = SystemZ::RISBMux; 833 Start &= 31; 834 End &= 31; 835 } 836 MachineOperand &Dest = MI.getOperand(0); 837 MachineOperand &Src = MI.getOperand(1); 838 MachineInstrBuilder MIB = 839 BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpcode)) 840 .addOperand(Dest) 841 .addReg(0) 842 .addReg(Src.getReg(), getKillRegState(Src.isKill()), 843 Src.getSubReg()) 844 .addImm(Start) 845 .addImm(End + 128) 846 .addImm(0); 847 return finishConvertToThreeAddress(&MI, MIB, LV); 848 } 849 } 850 return nullptr; 851 } 852 853 MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( 854 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, 855 MachineBasicBlock::iterator InsertPt, int FrameIndex, 856 LiveIntervals *LIS) const { 857 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo(); 858 const MachineFrameInfo *MFI = MF.getFrameInfo(); 859 unsigned Size = MFI->getObjectSize(FrameIndex); 860 unsigned Opcode = MI.getOpcode(); 861 862 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) { 863 if (LIS != nullptr && (Opcode == SystemZ::LA || Opcode == SystemZ::LAY) && 864 isInt<8>(MI.getOperand(2).getImm()) && !MI.getOperand(3).getReg()) { 865 866 // Check CC liveness, since new instruction introduces a dead 867 // def of CC. 868 MCRegUnitIterator CCUnit(SystemZ::CC, TRI); 869 LiveRange &CCLiveRange = LIS->getRegUnit(*CCUnit); 870 ++CCUnit; 871 assert (!CCUnit.isValid() && "CC only has one reg unit."); 872 SlotIndex MISlot = 873 LIS->getSlotIndexes()->getInstructionIndex(MI).getRegSlot(); 874 if (!CCLiveRange.liveAt(MISlot)) { 875 // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST 876 MachineInstr *BuiltMI = BuildMI(*InsertPt->getParent(), InsertPt, 877 MI.getDebugLoc(), get(SystemZ::AGSI)) 878 .addFrameIndex(FrameIndex) 879 .addImm(0) 880 .addImm(MI.getOperand(2).getImm()); 881 BuiltMI->findRegisterDefOperand(SystemZ::CC)->setIsDead(true); 882 CCLiveRange.createDeadDef(MISlot, LIS->getVNInfoAllocator()); 883 return BuiltMI; 884 } 885 } 886 return nullptr; 887 } 888 889 // All other cases require a single operand. 890 if (Ops.size() != 1) 891 return nullptr; 892 893 unsigned OpNum = Ops[0]; 894 assert(Size == 895 MF.getRegInfo() 896 .getRegClass(MI.getOperand(OpNum).getReg()) 897 ->getSize() && 898 "Invalid size combination"); 899 900 if ((Opcode == SystemZ::AHI || Opcode == SystemZ::AGHI) && OpNum == 0 && 901 isInt<8>(MI.getOperand(2).getImm())) { 902 // A(G)HI %reg, CONST -> A(G)SI %mem, CONST 903 Opcode = (Opcode == SystemZ::AHI ? SystemZ::ASI : SystemZ::AGSI); 904 MachineInstr *BuiltMI = 905 BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode)) 906 .addFrameIndex(FrameIndex) 907 .addImm(0) 908 .addImm(MI.getOperand(2).getImm()); 909 transferDeadCC(&MI, BuiltMI); 910 return BuiltMI; 911 } 912 913 if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) { 914 bool Op0IsGPR = (Opcode == SystemZ::LGDR); 915 bool Op1IsGPR = (Opcode == SystemZ::LDGR); 916 // If we're spilling the destination of an LDGR or LGDR, store the 917 // source register instead. 918 if (OpNum == 0) { 919 unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD; 920 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), 921 get(StoreOpcode)) 922 .addOperand(MI.getOperand(1)) 923 .addFrameIndex(FrameIndex) 924 .addImm(0) 925 .addReg(0); 926 } 927 // If we're spilling the source of an LDGR or LGDR, load the 928 // destination register instead. 929 if (OpNum == 1) { 930 unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD; 931 unsigned Dest = MI.getOperand(0).getReg(); 932 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), 933 get(LoadOpcode), Dest) 934 .addFrameIndex(FrameIndex) 935 .addImm(0) 936 .addReg(0); 937 } 938 } 939 940 // Look for cases where the source of a simple store or the destination 941 // of a simple load is being spilled. Try to use MVC instead. 942 // 943 // Although MVC is in practice a fast choice in these cases, it is still 944 // logically a bytewise copy. This means that we cannot use it if the 945 // load or store is volatile. We also wouldn't be able to use MVC if 946 // the two memories partially overlap, but that case cannot occur here, 947 // because we know that one of the memories is a full frame index. 948 // 949 // For performance reasons, we also want to avoid using MVC if the addresses 950 // might be equal. We don't worry about that case here, because spill slot 951 // coloring happens later, and because we have special code to remove 952 // MVCs that turn out to be redundant. 953 if (OpNum == 0 && MI.hasOneMemOperand()) { 954 MachineMemOperand *MMO = *MI.memoperands_begin(); 955 if (MMO->getSize() == Size && !MMO->isVolatile()) { 956 // Handle conversion of loads. 957 if (isSimpleBD12Move(&MI, SystemZII::SimpleBDXLoad)) { 958 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), 959 get(SystemZ::MVC)) 960 .addFrameIndex(FrameIndex) 961 .addImm(0) 962 .addImm(Size) 963 .addOperand(MI.getOperand(1)) 964 .addImm(MI.getOperand(2).getImm()) 965 .addMemOperand(MMO); 966 } 967 // Handle conversion of stores. 968 if (isSimpleBD12Move(&MI, SystemZII::SimpleBDXStore)) { 969 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), 970 get(SystemZ::MVC)) 971 .addOperand(MI.getOperand(1)) 972 .addImm(MI.getOperand(2).getImm()) 973 .addImm(Size) 974 .addFrameIndex(FrameIndex) 975 .addImm(0) 976 .addMemOperand(MMO); 977 } 978 } 979 } 980 981 // If the spilled operand is the final one, try to change <INSN>R 982 // into <INSN>. 983 int MemOpcode = SystemZ::getMemOpcode(Opcode); 984 if (MemOpcode >= 0) { 985 unsigned NumOps = MI.getNumExplicitOperands(); 986 if (OpNum == NumOps - 1) { 987 const MCInstrDesc &MemDesc = get(MemOpcode); 988 uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags); 989 assert(AccessBytes != 0 && "Size of access should be known"); 990 assert(AccessBytes <= Size && "Access outside the frame index"); 991 uint64_t Offset = Size - AccessBytes; 992 MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt, 993 MI.getDebugLoc(), get(MemOpcode)); 994 for (unsigned I = 0; I < OpNum; ++I) 995 MIB.addOperand(MI.getOperand(I)); 996 MIB.addFrameIndex(FrameIndex).addImm(Offset); 997 if (MemDesc.TSFlags & SystemZII::HasIndex) 998 MIB.addReg(0); 999 transferDeadCC(&MI, MIB); 1000 return MIB; 1001 } 1002 } 1003 1004 return nullptr; 1005 } 1006 1007 MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl( 1008 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, 1009 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI, 1010 LiveIntervals *LIS) const { 1011 return nullptr; 1012 } 1013 1014 bool SystemZInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 1015 switch (MI.getOpcode()) { 1016 case SystemZ::L128: 1017 splitMove(MI, SystemZ::LG); 1018 return true; 1019 1020 case SystemZ::ST128: 1021 splitMove(MI, SystemZ::STG); 1022 return true; 1023 1024 case SystemZ::LX: 1025 splitMove(MI, SystemZ::LD); 1026 return true; 1027 1028 case SystemZ::STX: 1029 splitMove(MI, SystemZ::STD); 1030 return true; 1031 1032 case SystemZ::LBMux: 1033 expandRXYPseudo(MI, SystemZ::LB, SystemZ::LBH); 1034 return true; 1035 1036 case SystemZ::LHMux: 1037 expandRXYPseudo(MI, SystemZ::LH, SystemZ::LHH); 1038 return true; 1039 1040 case SystemZ::LLCRMux: 1041 expandZExtPseudo(MI, SystemZ::LLCR, 8); 1042 return true; 1043 1044 case SystemZ::LLHRMux: 1045 expandZExtPseudo(MI, SystemZ::LLHR, 16); 1046 return true; 1047 1048 case SystemZ::LLCMux: 1049 expandRXYPseudo(MI, SystemZ::LLC, SystemZ::LLCH); 1050 return true; 1051 1052 case SystemZ::LLHMux: 1053 expandRXYPseudo(MI, SystemZ::LLH, SystemZ::LLHH); 1054 return true; 1055 1056 case SystemZ::LMux: 1057 expandRXYPseudo(MI, SystemZ::L, SystemZ::LFH); 1058 return true; 1059 1060 case SystemZ::STCMux: 1061 expandRXYPseudo(MI, SystemZ::STC, SystemZ::STCH); 1062 return true; 1063 1064 case SystemZ::STHMux: 1065 expandRXYPseudo(MI, SystemZ::STH, SystemZ::STHH); 1066 return true; 1067 1068 case SystemZ::STMux: 1069 expandRXYPseudo(MI, SystemZ::ST, SystemZ::STFH); 1070 return true; 1071 1072 case SystemZ::LHIMux: 1073 expandRIPseudo(MI, SystemZ::LHI, SystemZ::IIHF, true); 1074 return true; 1075 1076 case SystemZ::IIFMux: 1077 expandRIPseudo(MI, SystemZ::IILF, SystemZ::IIHF, false); 1078 return true; 1079 1080 case SystemZ::IILMux: 1081 expandRIPseudo(MI, SystemZ::IILL, SystemZ::IIHL, false); 1082 return true; 1083 1084 case SystemZ::IIHMux: 1085 expandRIPseudo(MI, SystemZ::IILH, SystemZ::IIHH, false); 1086 return true; 1087 1088 case SystemZ::NIFMux: 1089 expandRIPseudo(MI, SystemZ::NILF, SystemZ::NIHF, false); 1090 return true; 1091 1092 case SystemZ::NILMux: 1093 expandRIPseudo(MI, SystemZ::NILL, SystemZ::NIHL, false); 1094 return true; 1095 1096 case SystemZ::NIHMux: 1097 expandRIPseudo(MI, SystemZ::NILH, SystemZ::NIHH, false); 1098 return true; 1099 1100 case SystemZ::OIFMux: 1101 expandRIPseudo(MI, SystemZ::OILF, SystemZ::OIHF, false); 1102 return true; 1103 1104 case SystemZ::OILMux: 1105 expandRIPseudo(MI, SystemZ::OILL, SystemZ::OIHL, false); 1106 return true; 1107 1108 case SystemZ::OIHMux: 1109 expandRIPseudo(MI, SystemZ::OILH, SystemZ::OIHH, false); 1110 return true; 1111 1112 case SystemZ::XIFMux: 1113 expandRIPseudo(MI, SystemZ::XILF, SystemZ::XIHF, false); 1114 return true; 1115 1116 case SystemZ::TMLMux: 1117 expandRIPseudo(MI, SystemZ::TMLL, SystemZ::TMHL, false); 1118 return true; 1119 1120 case SystemZ::TMHMux: 1121 expandRIPseudo(MI, SystemZ::TMLH, SystemZ::TMHH, false); 1122 return true; 1123 1124 case SystemZ::AHIMux: 1125 expandRIPseudo(MI, SystemZ::AHI, SystemZ::AIH, false); 1126 return true; 1127 1128 case SystemZ::AHIMuxK: 1129 expandRIEPseudo(MI, SystemZ::AHI, SystemZ::AHIK, SystemZ::AIH); 1130 return true; 1131 1132 case SystemZ::AFIMux: 1133 expandRIPseudo(MI, SystemZ::AFI, SystemZ::AIH, false); 1134 return true; 1135 1136 case SystemZ::CFIMux: 1137 expandRIPseudo(MI, SystemZ::CFI, SystemZ::CIH, false); 1138 return true; 1139 1140 case SystemZ::CLFIMux: 1141 expandRIPseudo(MI, SystemZ::CLFI, SystemZ::CLIH, false); 1142 return true; 1143 1144 case SystemZ::CMux: 1145 expandRXYPseudo(MI, SystemZ::C, SystemZ::CHF); 1146 return true; 1147 1148 case SystemZ::CLMux: 1149 expandRXYPseudo(MI, SystemZ::CL, SystemZ::CLHF); 1150 return true; 1151 1152 case SystemZ::RISBMux: { 1153 bool DestIsHigh = isHighReg(MI.getOperand(0).getReg()); 1154 bool SrcIsHigh = isHighReg(MI.getOperand(2).getReg()); 1155 if (SrcIsHigh == DestIsHigh) 1156 MI.setDesc(get(DestIsHigh ? SystemZ::RISBHH : SystemZ::RISBLL)); 1157 else { 1158 MI.setDesc(get(DestIsHigh ? SystemZ::RISBHL : SystemZ::RISBLH)); 1159 MI.getOperand(5).setImm(MI.getOperand(5).getImm() ^ 32); 1160 } 1161 return true; 1162 } 1163 1164 case SystemZ::ADJDYNALLOC: 1165 splitAdjDynAlloc(MI); 1166 return true; 1167 1168 case TargetOpcode::LOAD_STACK_GUARD: 1169 expandLoadStackGuard(&MI); 1170 return true; 1171 1172 default: 1173 return false; 1174 } 1175 } 1176 1177 uint64_t SystemZInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { 1178 if (MI.getOpcode() == TargetOpcode::INLINEASM) { 1179 const MachineFunction *MF = MI.getParent()->getParent(); 1180 const char *AsmStr = MI.getOperand(0).getSymbolName(); 1181 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo()); 1182 } 1183 return MI.getDesc().getSize(); 1184 } 1185 1186 SystemZII::Branch 1187 SystemZInstrInfo::getBranchInfo(const MachineInstr &MI) const { 1188 switch (MI.getOpcode()) { 1189 case SystemZ::BR: 1190 case SystemZ::J: 1191 case SystemZ::JG: 1192 return SystemZII::Branch(SystemZII::BranchNormal, SystemZ::CCMASK_ANY, 1193 SystemZ::CCMASK_ANY, &MI.getOperand(0)); 1194 1195 case SystemZ::BRC: 1196 case SystemZ::BRCL: 1197 return SystemZII::Branch(SystemZII::BranchNormal, MI.getOperand(0).getImm(), 1198 MI.getOperand(1).getImm(), &MI.getOperand(2)); 1199 1200 case SystemZ::BRCT: 1201 return SystemZII::Branch(SystemZII::BranchCT, SystemZ::CCMASK_ICMP, 1202 SystemZ::CCMASK_CMP_NE, &MI.getOperand(2)); 1203 1204 case SystemZ::BRCTG: 1205 return SystemZII::Branch(SystemZII::BranchCTG, SystemZ::CCMASK_ICMP, 1206 SystemZ::CCMASK_CMP_NE, &MI.getOperand(2)); 1207 1208 case SystemZ::CIJ: 1209 case SystemZ::CRJ: 1210 return SystemZII::Branch(SystemZII::BranchC, SystemZ::CCMASK_ICMP, 1211 MI.getOperand(2).getImm(), &MI.getOperand(3)); 1212 1213 case SystemZ::CLIJ: 1214 case SystemZ::CLRJ: 1215 return SystemZII::Branch(SystemZII::BranchCL, SystemZ::CCMASK_ICMP, 1216 MI.getOperand(2).getImm(), &MI.getOperand(3)); 1217 1218 case SystemZ::CGIJ: 1219 case SystemZ::CGRJ: 1220 return SystemZII::Branch(SystemZII::BranchCG, SystemZ::CCMASK_ICMP, 1221 MI.getOperand(2).getImm(), &MI.getOperand(3)); 1222 1223 case SystemZ::CLGIJ: 1224 case SystemZ::CLGRJ: 1225 return SystemZII::Branch(SystemZII::BranchCLG, SystemZ::CCMASK_ICMP, 1226 MI.getOperand(2).getImm(), &MI.getOperand(3)); 1227 1228 default: 1229 llvm_unreachable("Unrecognized branch opcode"); 1230 } 1231 } 1232 1233 void SystemZInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC, 1234 unsigned &LoadOpcode, 1235 unsigned &StoreOpcode) const { 1236 if (RC == &SystemZ::GR32BitRegClass || RC == &SystemZ::ADDR32BitRegClass) { 1237 LoadOpcode = SystemZ::L; 1238 StoreOpcode = SystemZ::ST; 1239 } else if (RC == &SystemZ::GRH32BitRegClass) { 1240 LoadOpcode = SystemZ::LFH; 1241 StoreOpcode = SystemZ::STFH; 1242 } else if (RC == &SystemZ::GRX32BitRegClass) { 1243 LoadOpcode = SystemZ::LMux; 1244 StoreOpcode = SystemZ::STMux; 1245 } else if (RC == &SystemZ::GR64BitRegClass || 1246 RC == &SystemZ::ADDR64BitRegClass) { 1247 LoadOpcode = SystemZ::LG; 1248 StoreOpcode = SystemZ::STG; 1249 } else if (RC == &SystemZ::GR128BitRegClass || 1250 RC == &SystemZ::ADDR128BitRegClass) { 1251 LoadOpcode = SystemZ::L128; 1252 StoreOpcode = SystemZ::ST128; 1253 } else if (RC == &SystemZ::FP32BitRegClass) { 1254 LoadOpcode = SystemZ::LE; 1255 StoreOpcode = SystemZ::STE; 1256 } else if (RC == &SystemZ::FP64BitRegClass) { 1257 LoadOpcode = SystemZ::LD; 1258 StoreOpcode = SystemZ::STD; 1259 } else if (RC == &SystemZ::FP128BitRegClass) { 1260 LoadOpcode = SystemZ::LX; 1261 StoreOpcode = SystemZ::STX; 1262 } else if (RC == &SystemZ::VR32BitRegClass) { 1263 LoadOpcode = SystemZ::VL32; 1264 StoreOpcode = SystemZ::VST32; 1265 } else if (RC == &SystemZ::VR64BitRegClass) { 1266 LoadOpcode = SystemZ::VL64; 1267 StoreOpcode = SystemZ::VST64; 1268 } else if (RC == &SystemZ::VF128BitRegClass || 1269 RC == &SystemZ::VR128BitRegClass) { 1270 LoadOpcode = SystemZ::VL; 1271 StoreOpcode = SystemZ::VST; 1272 } else 1273 llvm_unreachable("Unsupported regclass to load or store"); 1274 } 1275 1276 unsigned SystemZInstrInfo::getOpcodeForOffset(unsigned Opcode, 1277 int64_t Offset) const { 1278 const MCInstrDesc &MCID = get(Opcode); 1279 int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset); 1280 if (isUInt<12>(Offset) && isUInt<12>(Offset2)) { 1281 // Get the instruction to use for unsigned 12-bit displacements. 1282 int Disp12Opcode = SystemZ::getDisp12Opcode(Opcode); 1283 if (Disp12Opcode >= 0) 1284 return Disp12Opcode; 1285 1286 // All address-related instructions can use unsigned 12-bit 1287 // displacements. 1288 return Opcode; 1289 } 1290 if (isInt<20>(Offset) && isInt<20>(Offset2)) { 1291 // Get the instruction to use for signed 20-bit displacements. 1292 int Disp20Opcode = SystemZ::getDisp20Opcode(Opcode); 1293 if (Disp20Opcode >= 0) 1294 return Disp20Opcode; 1295 1296 // Check whether Opcode allows signed 20-bit displacements. 1297 if (MCID.TSFlags & SystemZII::Has20BitOffset) 1298 return Opcode; 1299 } 1300 return 0; 1301 } 1302 1303 unsigned SystemZInstrInfo::getLoadAndTest(unsigned Opcode) const { 1304 switch (Opcode) { 1305 case SystemZ::L: return SystemZ::LT; 1306 case SystemZ::LY: return SystemZ::LT; 1307 case SystemZ::LG: return SystemZ::LTG; 1308 case SystemZ::LGF: return SystemZ::LTGF; 1309 case SystemZ::LR: return SystemZ::LTR; 1310 case SystemZ::LGFR: return SystemZ::LTGFR; 1311 case SystemZ::LGR: return SystemZ::LTGR; 1312 case SystemZ::LER: return SystemZ::LTEBR; 1313 case SystemZ::LDR: return SystemZ::LTDBR; 1314 case SystemZ::LXR: return SystemZ::LTXBR; 1315 case SystemZ::LCDFR: return SystemZ::LCDBR; 1316 case SystemZ::LPDFR: return SystemZ::LPDBR; 1317 case SystemZ::LNDFR: return SystemZ::LNDBR; 1318 case SystemZ::LCDFR_32: return SystemZ::LCEBR; 1319 case SystemZ::LPDFR_32: return SystemZ::LPEBR; 1320 case SystemZ::LNDFR_32: return SystemZ::LNEBR; 1321 // On zEC12 we prefer to use RISBGN. But if there is a chance to 1322 // actually use the condition code, we may turn it back into RISGB. 1323 // Note that RISBG is not really a "load-and-test" instruction, 1324 // but sets the same condition code values, so is OK to use here. 1325 case SystemZ::RISBGN: return SystemZ::RISBG; 1326 default: return 0; 1327 } 1328 } 1329 1330 // Return true if Mask matches the regexp 0*1+0*, given that zero masks 1331 // have already been filtered out. Store the first set bit in LSB and 1332 // the number of set bits in Length if so. 1333 static bool isStringOfOnes(uint64_t Mask, unsigned &LSB, unsigned &Length) { 1334 unsigned First = findFirstSet(Mask); 1335 uint64_t Top = (Mask >> First) + 1; 1336 if ((Top & -Top) == Top) { 1337 LSB = First; 1338 Length = findFirstSet(Top); 1339 return true; 1340 } 1341 return false; 1342 } 1343 1344 bool SystemZInstrInfo::isRxSBGMask(uint64_t Mask, unsigned BitSize, 1345 unsigned &Start, unsigned &End) const { 1346 // Reject trivial all-zero masks. 1347 Mask &= allOnes(BitSize); 1348 if (Mask == 0) 1349 return false; 1350 1351 // Handle the 1+0+ or 0+1+0* cases. Start then specifies the index of 1352 // the msb and End specifies the index of the lsb. 1353 unsigned LSB, Length; 1354 if (isStringOfOnes(Mask, LSB, Length)) { 1355 Start = 63 - (LSB + Length - 1); 1356 End = 63 - LSB; 1357 return true; 1358 } 1359 1360 // Handle the wrap-around 1+0+1+ cases. Start then specifies the msb 1361 // of the low 1s and End specifies the lsb of the high 1s. 1362 if (isStringOfOnes(Mask ^ allOnes(BitSize), LSB, Length)) { 1363 assert(LSB > 0 && "Bottom bit must be set"); 1364 assert(LSB + Length < BitSize && "Top bit must be set"); 1365 Start = 63 - (LSB - 1); 1366 End = 63 - (LSB + Length); 1367 return true; 1368 } 1369 1370 return false; 1371 } 1372 1373 unsigned SystemZInstrInfo::getFusedCompare(unsigned Opcode, 1374 SystemZII::FusedCompareType Type, 1375 const MachineInstr *MI) const { 1376 switch (Opcode) { 1377 case SystemZ::CHI: 1378 case SystemZ::CGHI: 1379 if (!(MI && isInt<8>(MI->getOperand(1).getImm()))) 1380 return 0; 1381 break; 1382 case SystemZ::CLFI: 1383 case SystemZ::CLGFI: 1384 if (!(MI && isUInt<8>(MI->getOperand(1).getImm()))) 1385 return 0; 1386 } 1387 switch (Type) { 1388 case SystemZII::CompareAndBranch: 1389 switch (Opcode) { 1390 case SystemZ::CR: 1391 return SystemZ::CRJ; 1392 case SystemZ::CGR: 1393 return SystemZ::CGRJ; 1394 case SystemZ::CHI: 1395 return SystemZ::CIJ; 1396 case SystemZ::CGHI: 1397 return SystemZ::CGIJ; 1398 case SystemZ::CLR: 1399 return SystemZ::CLRJ; 1400 case SystemZ::CLGR: 1401 return SystemZ::CLGRJ; 1402 case SystemZ::CLFI: 1403 return SystemZ::CLIJ; 1404 case SystemZ::CLGFI: 1405 return SystemZ::CLGIJ; 1406 default: 1407 return 0; 1408 } 1409 case SystemZII::CompareAndReturn: 1410 switch (Opcode) { 1411 case SystemZ::CR: 1412 return SystemZ::CRBReturn; 1413 case SystemZ::CGR: 1414 return SystemZ::CGRBReturn; 1415 case SystemZ::CHI: 1416 return SystemZ::CIBReturn; 1417 case SystemZ::CGHI: 1418 return SystemZ::CGIBReturn; 1419 case SystemZ::CLR: 1420 return SystemZ::CLRBReturn; 1421 case SystemZ::CLGR: 1422 return SystemZ::CLGRBReturn; 1423 case SystemZ::CLFI: 1424 return SystemZ::CLIBReturn; 1425 case SystemZ::CLGFI: 1426 return SystemZ::CLGIBReturn; 1427 default: 1428 return 0; 1429 } 1430 case SystemZII::CompareAndSibcall: 1431 switch (Opcode) { 1432 case SystemZ::CR: 1433 return SystemZ::CRBCall; 1434 case SystemZ::CGR: 1435 return SystemZ::CGRBCall; 1436 case SystemZ::CHI: 1437 return SystemZ::CIBCall; 1438 case SystemZ::CGHI: 1439 return SystemZ::CGIBCall; 1440 case SystemZ::CLR: 1441 return SystemZ::CLRBCall; 1442 case SystemZ::CLGR: 1443 return SystemZ::CLGRBCall; 1444 case SystemZ::CLFI: 1445 return SystemZ::CLIBCall; 1446 case SystemZ::CLGFI: 1447 return SystemZ::CLGIBCall; 1448 default: 1449 return 0; 1450 } 1451 case SystemZII::CompareAndTrap: 1452 switch (Opcode) { 1453 case SystemZ::CR: 1454 return SystemZ::CRT; 1455 case SystemZ::CGR: 1456 return SystemZ::CGRT; 1457 case SystemZ::CHI: 1458 return SystemZ::CIT; 1459 case SystemZ::CGHI: 1460 return SystemZ::CGIT; 1461 case SystemZ::CLR: 1462 return SystemZ::CLRT; 1463 case SystemZ::CLGR: 1464 return SystemZ::CLGRT; 1465 case SystemZ::CLFI: 1466 return SystemZ::CLFIT; 1467 case SystemZ::CLGFI: 1468 return SystemZ::CLGIT; 1469 default: 1470 return 0; 1471 } 1472 } 1473 return 0; 1474 } 1475 1476 void SystemZInstrInfo::loadImmediate(MachineBasicBlock &MBB, 1477 MachineBasicBlock::iterator MBBI, 1478 unsigned Reg, uint64_t Value) const { 1479 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 1480 unsigned Opcode; 1481 if (isInt<16>(Value)) 1482 Opcode = SystemZ::LGHI; 1483 else if (SystemZ::isImmLL(Value)) 1484 Opcode = SystemZ::LLILL; 1485 else if (SystemZ::isImmLH(Value)) { 1486 Opcode = SystemZ::LLILH; 1487 Value >>= 16; 1488 } else { 1489 assert(isInt<32>(Value) && "Huge values not handled yet"); 1490 Opcode = SystemZ::LGFI; 1491 } 1492 BuildMI(MBB, MBBI, DL, get(Opcode), Reg).addImm(Value); 1493 } 1494