1 //===-- SystemZInstrInfo.cpp - SystemZ instruction information ------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the SystemZ implementation of the TargetInstrInfo class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SystemZInstrInfo.h" 15 #include "SystemZTargetMachine.h" 16 #include "SystemZInstrBuilder.h" 17 #include "llvm/CodeGen/LiveVariables.h" 18 #include "llvm/CodeGen/MachineRegisterInfo.h" 19 20 #define GET_INSTRINFO_CTOR 21 #define GET_INSTRMAP_INFO 22 #include "SystemZGenInstrInfo.inc" 23 24 using namespace llvm; 25 26 SystemZInstrInfo::SystemZInstrInfo(SystemZTargetMachine &tm) 27 : SystemZGenInstrInfo(SystemZ::ADJCALLSTACKDOWN, SystemZ::ADJCALLSTACKUP), 28 RI(tm), TM(tm) { 29 } 30 31 // MI is a 128-bit load or store. Split it into two 64-bit loads or stores, 32 // each having the opcode given by NewOpcode. 33 void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI, 34 unsigned NewOpcode) const { 35 MachineBasicBlock *MBB = MI->getParent(); 36 MachineFunction &MF = *MBB->getParent(); 37 38 // Get two load or store instructions. Use the original instruction for one 39 // of them (arbitarily the second here) and create a clone for the other. 40 MachineInstr *EarlierMI = MF.CloneMachineInstr(MI); 41 MBB->insert(MI, EarlierMI); 42 43 // Set up the two 64-bit registers. 44 MachineOperand &HighRegOp = EarlierMI->getOperand(0); 45 MachineOperand &LowRegOp = MI->getOperand(0); 46 HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_high)); 47 LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_low)); 48 49 // The address in the first (high) instruction is already correct. 50 // Adjust the offset in the second (low) instruction. 51 MachineOperand &HighOffsetOp = EarlierMI->getOperand(2); 52 MachineOperand &LowOffsetOp = MI->getOperand(2); 53 LowOffsetOp.setImm(LowOffsetOp.getImm() + 8); 54 55 // Set the opcodes. 56 unsigned HighOpcode = getOpcodeForOffset(NewOpcode, HighOffsetOp.getImm()); 57 unsigned LowOpcode = getOpcodeForOffset(NewOpcode, LowOffsetOp.getImm()); 58 assert(HighOpcode && LowOpcode && "Both offsets should be in range"); 59 60 EarlierMI->setDesc(get(HighOpcode)); 61 MI->setDesc(get(LowOpcode)); 62 } 63 64 // Split ADJDYNALLOC instruction MI. 65 void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const { 66 MachineBasicBlock *MBB = MI->getParent(); 67 MachineFunction &MF = *MBB->getParent(); 68 MachineFrameInfo *MFFrame = MF.getFrameInfo(); 69 MachineOperand &OffsetMO = MI->getOperand(2); 70 71 uint64_t Offset = (MFFrame->getMaxCallFrameSize() + 72 SystemZMC::CallFrameSize + 73 OffsetMO.getImm()); 74 unsigned NewOpcode = getOpcodeForOffset(SystemZ::LA, Offset); 75 assert(NewOpcode && "No support for huge argument lists yet"); 76 MI->setDesc(get(NewOpcode)); 77 OffsetMO.setImm(Offset); 78 } 79 80 // If MI is a simple load or store for a frame object, return the register 81 // it loads or stores and set FrameIndex to the index of the frame object. 82 // Return 0 otherwise. 83 // 84 // Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores. 85 static int isSimpleMove(const MachineInstr *MI, int &FrameIndex, 86 unsigned Flag) { 87 const MCInstrDesc &MCID = MI->getDesc(); 88 if ((MCID.TSFlags & Flag) && 89 MI->getOperand(1).isFI() && 90 MI->getOperand(2).getImm() == 0 && 91 MI->getOperand(3).getReg() == 0) { 92 FrameIndex = MI->getOperand(1).getIndex(); 93 return MI->getOperand(0).getReg(); 94 } 95 return 0; 96 } 97 98 unsigned SystemZInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, 99 int &FrameIndex) const { 100 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXLoad); 101 } 102 103 unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr *MI, 104 int &FrameIndex) const { 105 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore); 106 } 107 108 bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr *MI, 109 int &DestFrameIndex, 110 int &SrcFrameIndex) const { 111 // Check for MVC 0(Length,FI1),0(FI2) 112 const MachineFrameInfo *MFI = MI->getParent()->getParent()->getFrameInfo(); 113 if (MI->getOpcode() != SystemZ::MVC || 114 !MI->getOperand(0).isFI() || 115 MI->getOperand(1).getImm() != 0 || 116 !MI->getOperand(3).isFI() || 117 MI->getOperand(4).getImm() != 0) 118 return false; 119 120 // Check that Length covers the full slots. 121 int64_t Length = MI->getOperand(2).getImm(); 122 unsigned FI1 = MI->getOperand(0).getIndex(); 123 unsigned FI2 = MI->getOperand(3).getIndex(); 124 if (MFI->getObjectSize(FI1) != Length || 125 MFI->getObjectSize(FI2) != Length) 126 return false; 127 128 DestFrameIndex = FI1; 129 SrcFrameIndex = FI2; 130 return true; 131 } 132 133 bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, 134 MachineBasicBlock *&TBB, 135 MachineBasicBlock *&FBB, 136 SmallVectorImpl<MachineOperand> &Cond, 137 bool AllowModify) const { 138 // Most of the code and comments here are boilerplate. 139 140 // Start from the bottom of the block and work up, examining the 141 // terminator instructions. 142 MachineBasicBlock::iterator I = MBB.end(); 143 while (I != MBB.begin()) { 144 --I; 145 if (I->isDebugValue()) 146 continue; 147 148 // Working from the bottom, when we see a non-terminator instruction, we're 149 // done. 150 if (!isUnpredicatedTerminator(I)) 151 break; 152 153 // A terminator that isn't a branch can't easily be handled by this 154 // analysis. 155 if (!I->isBranch()) 156 return true; 157 158 // Can't handle indirect branches. 159 SystemZII::Branch Branch(getBranchInfo(I)); 160 if (!Branch.Target->isMBB()) 161 return true; 162 163 // Punt on compound branches. 164 if (Branch.Type != SystemZII::BranchNormal) 165 return true; 166 167 if (Branch.CCMask == SystemZ::CCMASK_ANY) { 168 // Handle unconditional branches. 169 if (!AllowModify) { 170 TBB = Branch.Target->getMBB(); 171 continue; 172 } 173 174 // If the block has any instructions after a JMP, delete them. 175 while (llvm::next(I) != MBB.end()) 176 llvm::next(I)->eraseFromParent(); 177 178 Cond.clear(); 179 FBB = 0; 180 181 // Delete the JMP if it's equivalent to a fall-through. 182 if (MBB.isLayoutSuccessor(Branch.Target->getMBB())) { 183 TBB = 0; 184 I->eraseFromParent(); 185 I = MBB.end(); 186 continue; 187 } 188 189 // TBB is used to indicate the unconditinal destination. 190 TBB = Branch.Target->getMBB(); 191 continue; 192 } 193 194 // Working from the bottom, handle the first conditional branch. 195 if (Cond.empty()) { 196 // FIXME: add X86-style branch swap 197 FBB = TBB; 198 TBB = Branch.Target->getMBB(); 199 Cond.push_back(MachineOperand::CreateImm(Branch.CCMask)); 200 continue; 201 } 202 203 // Handle subsequent conditional branches. 204 assert(Cond.size() == 1); 205 assert(TBB); 206 207 // Only handle the case where all conditional branches branch to the same 208 // destination. 209 if (TBB != Branch.Target->getMBB()) 210 return true; 211 212 // If the conditions are the same, we can leave them alone. 213 unsigned OldCond = Cond[0].getImm(); 214 if (OldCond == Branch.CCMask) 215 continue; 216 217 // FIXME: Try combining conditions like X86 does. Should be easy on Z! 218 } 219 220 return false; 221 } 222 223 unsigned SystemZInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { 224 // Most of the code and comments here are boilerplate. 225 MachineBasicBlock::iterator I = MBB.end(); 226 unsigned Count = 0; 227 228 while (I != MBB.begin()) { 229 --I; 230 if (I->isDebugValue()) 231 continue; 232 if (!I->isBranch()) 233 break; 234 if (!getBranchInfo(I).Target->isMBB()) 235 break; 236 // Remove the branch. 237 I->eraseFromParent(); 238 I = MBB.end(); 239 ++Count; 240 } 241 242 return Count; 243 } 244 245 unsigned 246 SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 247 MachineBasicBlock *FBB, 248 const SmallVectorImpl<MachineOperand> &Cond, 249 DebugLoc DL) const { 250 // In this function we output 32-bit branches, which should always 251 // have enough range. They can be shortened and relaxed by later code 252 // in the pipeline, if desired. 253 254 // Shouldn't be a fall through. 255 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 256 assert((Cond.size() == 1 || Cond.size() == 0) && 257 "SystemZ branch conditions have one component!"); 258 259 if (Cond.empty()) { 260 // Unconditional branch? 261 assert(!FBB && "Unconditional branch with multiple successors!"); 262 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(TBB); 263 return 1; 264 } 265 266 // Conditional branch. 267 unsigned Count = 0; 268 unsigned CC = Cond[0].getImm(); 269 BuildMI(&MBB, DL, get(SystemZ::BRC)).addImm(CC).addMBB(TBB); 270 ++Count; 271 272 if (FBB) { 273 // Two-way Conditional branch. Insert the second branch. 274 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(FBB); 275 ++Count; 276 } 277 return Count; 278 } 279 280 void 281 SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 282 MachineBasicBlock::iterator MBBI, DebugLoc DL, 283 unsigned DestReg, unsigned SrcReg, 284 bool KillSrc) const { 285 // Split 128-bit GPR moves into two 64-bit moves. This handles ADDR128 too. 286 if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) { 287 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_high), 288 RI.getSubReg(SrcReg, SystemZ::subreg_high), KillSrc); 289 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_low), 290 RI.getSubReg(SrcReg, SystemZ::subreg_low), KillSrc); 291 return; 292 } 293 294 // Everything else needs only one instruction. 295 unsigned Opcode; 296 if (SystemZ::GR32BitRegClass.contains(DestReg, SrcReg)) 297 Opcode = SystemZ::LR; 298 else if (SystemZ::GR64BitRegClass.contains(DestReg, SrcReg)) 299 Opcode = SystemZ::LGR; 300 else if (SystemZ::FP32BitRegClass.contains(DestReg, SrcReg)) 301 Opcode = SystemZ::LER; 302 else if (SystemZ::FP64BitRegClass.contains(DestReg, SrcReg)) 303 Opcode = SystemZ::LDR; 304 else if (SystemZ::FP128BitRegClass.contains(DestReg, SrcReg)) 305 Opcode = SystemZ::LXR; 306 else 307 llvm_unreachable("Impossible reg-to-reg copy"); 308 309 BuildMI(MBB, MBBI, DL, get(Opcode), DestReg) 310 .addReg(SrcReg, getKillRegState(KillSrc)); 311 } 312 313 void 314 SystemZInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 315 MachineBasicBlock::iterator MBBI, 316 unsigned SrcReg, bool isKill, 317 int FrameIdx, 318 const TargetRegisterClass *RC, 319 const TargetRegisterInfo *TRI) const { 320 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 321 322 // Callers may expect a single instruction, so keep 128-bit moves 323 // together for now and lower them after register allocation. 324 unsigned LoadOpcode, StoreOpcode; 325 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode); 326 addFrameReference(BuildMI(MBB, MBBI, DL, get(StoreOpcode)) 327 .addReg(SrcReg, getKillRegState(isKill)), FrameIdx); 328 } 329 330 void 331 SystemZInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 332 MachineBasicBlock::iterator MBBI, 333 unsigned DestReg, int FrameIdx, 334 const TargetRegisterClass *RC, 335 const TargetRegisterInfo *TRI) const { 336 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 337 338 // Callers may expect a single instruction, so keep 128-bit moves 339 // together for now and lower them after register allocation. 340 unsigned LoadOpcode, StoreOpcode; 341 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode); 342 addFrameReference(BuildMI(MBB, MBBI, DL, get(LoadOpcode), DestReg), 343 FrameIdx); 344 } 345 346 // Return true if MI is a simple load or store with a 12-bit displacement 347 // and no index. Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores. 348 static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) { 349 const MCInstrDesc &MCID = MI->getDesc(); 350 return ((MCID.TSFlags & Flag) && 351 isUInt<12>(MI->getOperand(2).getImm()) && 352 MI->getOperand(3).getReg() == 0); 353 } 354 355 MachineInstr * 356 SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, 357 MachineBasicBlock::iterator &MBBI, 358 LiveVariables *LV) const { 359 MachineInstr *MI = MBBI; 360 MachineBasicBlock *MBB = MI->getParent(); 361 362 unsigned Opcode = MI->getOpcode(); 363 unsigned NumOps = MI->getNumOperands(); 364 365 // Try to convert something like SLL into SLLK, if supported. 366 // We prefer to keep the two-operand form where possible both 367 // because it tends to be shorter and because some instructions 368 // have memory forms that can be used during spilling. 369 if (TM.getSubtargetImpl()->hasDistinctOps()) { 370 int ThreeOperandOpcode = SystemZ::getThreeOperandOpcode(Opcode); 371 if (ThreeOperandOpcode >= 0) { 372 unsigned DestReg = MI->getOperand(0).getReg(); 373 MachineOperand &Src = MI->getOperand(1); 374 MachineInstrBuilder MIB = BuildMI(*MBB, MBBI, MI->getDebugLoc(), 375 get(ThreeOperandOpcode), DestReg); 376 // Keep the kill state, but drop the tied flag. 377 MIB.addReg(Src.getReg(), getKillRegState(Src.isKill())); 378 // Keep the remaining operands as-is. 379 for (unsigned I = 2; I < NumOps; ++I) 380 MIB.addOperand(MI->getOperand(I)); 381 MachineInstr *NewMI = MIB; 382 383 // Transfer killing information to the new instruction. 384 if (LV) { 385 for (unsigned I = 1; I < NumOps; ++I) { 386 MachineOperand &Op = MI->getOperand(I); 387 if (Op.isReg() && Op.isKill()) 388 LV->replaceKillInstruction(Op.getReg(), MI, NewMI); 389 } 390 } 391 return MIB; 392 } 393 } 394 return 0; 395 } 396 397 MachineInstr * 398 SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, 399 MachineInstr *MI, 400 const SmallVectorImpl<unsigned> &Ops, 401 int FrameIndex) const { 402 const MachineFrameInfo *MFI = MF.getFrameInfo(); 403 unsigned Size = MFI->getObjectSize(FrameIndex); 404 405 // Eary exit for cases we don't care about 406 if (Ops.size() != 1) 407 return 0; 408 409 unsigned OpNum = Ops[0]; 410 assert(Size == MF.getRegInfo() 411 .getRegClass(MI->getOperand(OpNum).getReg())->getSize() && 412 "Invalid size combination"); 413 414 unsigned Opcode = MI->getOpcode(); 415 if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) { 416 bool Op0IsGPR = (Opcode == SystemZ::LGDR); 417 bool Op1IsGPR = (Opcode == SystemZ::LDGR); 418 // If we're spilling the destination of an LDGR or LGDR, store the 419 // source register instead. 420 if (OpNum == 0) { 421 unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD; 422 return BuildMI(MF, MI->getDebugLoc(), get(StoreOpcode)) 423 .addOperand(MI->getOperand(1)).addFrameIndex(FrameIndex) 424 .addImm(0).addReg(0); 425 } 426 // If we're spilling the source of an LDGR or LGDR, load the 427 // destination register instead. 428 if (OpNum == 1) { 429 unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD; 430 unsigned Dest = MI->getOperand(0).getReg(); 431 return BuildMI(MF, MI->getDebugLoc(), get(LoadOpcode), Dest) 432 .addFrameIndex(FrameIndex).addImm(0).addReg(0); 433 } 434 } 435 436 // Look for cases where the source of a simple store or the destination 437 // of a simple load is being spilled. Try to use MVC instead. 438 // 439 // Although MVC is in practice a fast choice in these cases, it is still 440 // logically a bytewise copy. This means that we cannot use it if the 441 // load or store is volatile. It also means that the transformation is 442 // not valid in cases where the two memories partially overlap; however, 443 // that is not a problem here, because we know that one of the memories 444 // is a full frame index. 445 if (OpNum == 0 && MI->hasOneMemOperand()) { 446 MachineMemOperand *MMO = *MI->memoperands_begin(); 447 if (MMO->getSize() == Size && !MMO->isVolatile()) { 448 // Handle conversion of loads. 449 if (isSimpleBD12Move(MI, SystemZII::SimpleBDXLoad)) { 450 return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC)) 451 .addFrameIndex(FrameIndex).addImm(0).addImm(Size) 452 .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm()) 453 .addMemOperand(MMO); 454 } 455 // Handle conversion of stores. 456 if (isSimpleBD12Move(MI, SystemZII::SimpleBDXStore)) { 457 return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC)) 458 .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm()) 459 .addImm(Size).addFrameIndex(FrameIndex).addImm(0) 460 .addMemOperand(MMO); 461 } 462 } 463 } 464 465 // If the spilled operand is the final one, try to change <INSN>R 466 // into <INSN>. 467 int MemOpcode = SystemZ::getMemOpcode(Opcode); 468 if (MemOpcode >= 0) { 469 unsigned NumOps = MI->getNumExplicitOperands(); 470 if (OpNum == NumOps - 1) { 471 const MCInstrDesc &MemDesc = get(MemOpcode); 472 uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags); 473 assert(AccessBytes != 0 && "Size of access should be known"); 474 assert(AccessBytes <= Size && "Access outside the frame index"); 475 uint64_t Offset = Size - AccessBytes; 476 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(MemOpcode)); 477 for (unsigned I = 0; I < OpNum; ++I) 478 MIB.addOperand(MI->getOperand(I)); 479 MIB.addFrameIndex(FrameIndex).addImm(Offset); 480 if (MemDesc.TSFlags & SystemZII::HasIndex) 481 MIB.addReg(0); 482 return MIB; 483 } 484 } 485 486 return 0; 487 } 488 489 MachineInstr * 490 SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI, 491 const SmallVectorImpl<unsigned> &Ops, 492 MachineInstr* LoadMI) const { 493 return 0; 494 } 495 496 bool 497 SystemZInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { 498 switch (MI->getOpcode()) { 499 case SystemZ::L128: 500 splitMove(MI, SystemZ::LG); 501 return true; 502 503 case SystemZ::ST128: 504 splitMove(MI, SystemZ::STG); 505 return true; 506 507 case SystemZ::LX: 508 splitMove(MI, SystemZ::LD); 509 return true; 510 511 case SystemZ::STX: 512 splitMove(MI, SystemZ::STD); 513 return true; 514 515 case SystemZ::ADJDYNALLOC: 516 splitAdjDynAlloc(MI); 517 return true; 518 519 default: 520 return false; 521 } 522 } 523 524 bool SystemZInstrInfo:: 525 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 526 assert(Cond.size() == 1 && "Invalid branch condition!"); 527 Cond[0].setImm(Cond[0].getImm() ^ SystemZ::CCMASK_ANY); 528 return false; 529 } 530 531 uint64_t SystemZInstrInfo::getInstSizeInBytes(const MachineInstr *MI) const { 532 if (MI->getOpcode() == TargetOpcode::INLINEASM) { 533 const MachineFunction *MF = MI->getParent()->getParent(); 534 const char *AsmStr = MI->getOperand(0).getSymbolName(); 535 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo()); 536 } 537 return MI->getDesc().getSize(); 538 } 539 540 SystemZII::Branch 541 SystemZInstrInfo::getBranchInfo(const MachineInstr *MI) const { 542 switch (MI->getOpcode()) { 543 case SystemZ::BR: 544 case SystemZ::J: 545 case SystemZ::JG: 546 return SystemZII::Branch(SystemZII::BranchNormal, SystemZ::CCMASK_ANY, 547 &MI->getOperand(0)); 548 549 case SystemZ::BRC: 550 case SystemZ::BRCL: 551 return SystemZII::Branch(SystemZII::BranchNormal, 552 MI->getOperand(0).getImm(), &MI->getOperand(1)); 553 554 case SystemZ::CIJ: 555 case SystemZ::CRJ: 556 return SystemZII::Branch(SystemZII::BranchC, MI->getOperand(2).getImm(), 557 &MI->getOperand(3)); 558 559 case SystemZ::CGIJ: 560 case SystemZ::CGRJ: 561 return SystemZII::Branch(SystemZII::BranchCG, MI->getOperand(2).getImm(), 562 &MI->getOperand(3)); 563 564 default: 565 llvm_unreachable("Unrecognized branch opcode"); 566 } 567 } 568 569 void SystemZInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC, 570 unsigned &LoadOpcode, 571 unsigned &StoreOpcode) const { 572 if (RC == &SystemZ::GR32BitRegClass || RC == &SystemZ::ADDR32BitRegClass) { 573 LoadOpcode = SystemZ::L; 574 StoreOpcode = SystemZ::ST32; 575 } else if (RC == &SystemZ::GR64BitRegClass || 576 RC == &SystemZ::ADDR64BitRegClass) { 577 LoadOpcode = SystemZ::LG; 578 StoreOpcode = SystemZ::STG; 579 } else if (RC == &SystemZ::GR128BitRegClass || 580 RC == &SystemZ::ADDR128BitRegClass) { 581 LoadOpcode = SystemZ::L128; 582 StoreOpcode = SystemZ::ST128; 583 } else if (RC == &SystemZ::FP32BitRegClass) { 584 LoadOpcode = SystemZ::LE; 585 StoreOpcode = SystemZ::STE; 586 } else if (RC == &SystemZ::FP64BitRegClass) { 587 LoadOpcode = SystemZ::LD; 588 StoreOpcode = SystemZ::STD; 589 } else if (RC == &SystemZ::FP128BitRegClass) { 590 LoadOpcode = SystemZ::LX; 591 StoreOpcode = SystemZ::STX; 592 } else 593 llvm_unreachable("Unsupported regclass to load or store"); 594 } 595 596 unsigned SystemZInstrInfo::getOpcodeForOffset(unsigned Opcode, 597 int64_t Offset) const { 598 const MCInstrDesc &MCID = get(Opcode); 599 int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset); 600 if (isUInt<12>(Offset) && isUInt<12>(Offset2)) { 601 // Get the instruction to use for unsigned 12-bit displacements. 602 int Disp12Opcode = SystemZ::getDisp12Opcode(Opcode); 603 if (Disp12Opcode >= 0) 604 return Disp12Opcode; 605 606 // All address-related instructions can use unsigned 12-bit 607 // displacements. 608 return Opcode; 609 } 610 if (isInt<20>(Offset) && isInt<20>(Offset2)) { 611 // Get the instruction to use for signed 20-bit displacements. 612 int Disp20Opcode = SystemZ::getDisp20Opcode(Opcode); 613 if (Disp20Opcode >= 0) 614 return Disp20Opcode; 615 616 // Check whether Opcode allows signed 20-bit displacements. 617 if (MCID.TSFlags & SystemZII::Has20BitOffset) 618 return Opcode; 619 } 620 return 0; 621 } 622 623 unsigned SystemZInstrInfo::getCompareAndBranch(unsigned Opcode, 624 const MachineInstr *MI) const { 625 switch (Opcode) { 626 case SystemZ::CR: 627 return SystemZ::CRJ; 628 case SystemZ::CGR: 629 return SystemZ::CGRJ; 630 case SystemZ::CHI: 631 return MI && isInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CIJ : 0; 632 case SystemZ::CGHI: 633 return MI && isInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CGIJ : 0; 634 default: 635 return 0; 636 } 637 } 638 639 void SystemZInstrInfo::loadImmediate(MachineBasicBlock &MBB, 640 MachineBasicBlock::iterator MBBI, 641 unsigned Reg, uint64_t Value) const { 642 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 643 unsigned Opcode; 644 if (isInt<16>(Value)) 645 Opcode = SystemZ::LGHI; 646 else if (SystemZ::isImmLL(Value)) 647 Opcode = SystemZ::LLILL; 648 else if (SystemZ::isImmLH(Value)) { 649 Opcode = SystemZ::LLILH; 650 Value >>= 16; 651 } else { 652 assert(isInt<32>(Value) && "Huge values not handled yet"); 653 Opcode = SystemZ::LGFI; 654 } 655 BuildMI(MBB, MBBI, DL, get(Opcode), Reg).addImm(Value); 656 } 657