1 //===-- SystemZInstrInfo.cpp - SystemZ instruction information ------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the SystemZ implementation of the TargetInstrInfo class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SystemZInstrInfo.h" 15 #include "SystemZInstrBuilder.h" 16 #include "llvm/CodeGen/MachineRegisterInfo.h" 17 #include "llvm/Target/TargetMachine.h" 18 19 #define GET_INSTRINFO_CTOR 20 #define GET_INSTRMAP_INFO 21 #include "SystemZGenInstrInfo.inc" 22 23 using namespace llvm; 24 25 SystemZInstrInfo::SystemZInstrInfo(SystemZTargetMachine &tm) 26 : SystemZGenInstrInfo(SystemZ::ADJCALLSTACKDOWN, SystemZ::ADJCALLSTACKUP), 27 RI(tm) { 28 } 29 30 // MI is a 128-bit load or store. Split it into two 64-bit loads or stores, 31 // each having the opcode given by NewOpcode. 32 void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI, 33 unsigned NewOpcode) const { 34 MachineBasicBlock *MBB = MI->getParent(); 35 MachineFunction &MF = *MBB->getParent(); 36 37 // Get two load or store instructions. Use the original instruction for one 38 // of them (arbitarily the second here) and create a clone for the other. 39 MachineInstr *EarlierMI = MF.CloneMachineInstr(MI); 40 MBB->insert(MI, EarlierMI); 41 42 // Set up the two 64-bit registers. 43 MachineOperand &HighRegOp = EarlierMI->getOperand(0); 44 MachineOperand &LowRegOp = MI->getOperand(0); 45 HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_high)); 46 LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_low)); 47 48 // The address in the first (high) instruction is already correct. 49 // Adjust the offset in the second (low) instruction. 50 MachineOperand &HighOffsetOp = EarlierMI->getOperand(2); 51 MachineOperand &LowOffsetOp = MI->getOperand(2); 52 LowOffsetOp.setImm(LowOffsetOp.getImm() + 8); 53 54 // Set the opcodes. 55 unsigned HighOpcode = getOpcodeForOffset(NewOpcode, HighOffsetOp.getImm()); 56 unsigned LowOpcode = getOpcodeForOffset(NewOpcode, LowOffsetOp.getImm()); 57 assert(HighOpcode && LowOpcode && "Both offsets should be in range"); 58 59 EarlierMI->setDesc(get(HighOpcode)); 60 MI->setDesc(get(LowOpcode)); 61 } 62 63 // Split ADJDYNALLOC instruction MI. 64 void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const { 65 MachineBasicBlock *MBB = MI->getParent(); 66 MachineFunction &MF = *MBB->getParent(); 67 MachineFrameInfo *MFFrame = MF.getFrameInfo(); 68 MachineOperand &OffsetMO = MI->getOperand(2); 69 70 uint64_t Offset = (MFFrame->getMaxCallFrameSize() + 71 SystemZMC::CallFrameSize + 72 OffsetMO.getImm()); 73 unsigned NewOpcode = getOpcodeForOffset(SystemZ::LA, Offset); 74 assert(NewOpcode && "No support for huge argument lists yet"); 75 MI->setDesc(get(NewOpcode)); 76 OffsetMO.setImm(Offset); 77 } 78 79 // If MI is a simple load or store for a frame object, return the register 80 // it loads or stores and set FrameIndex to the index of the frame object. 81 // Return 0 otherwise. 82 // 83 // Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores. 84 static int isSimpleMove(const MachineInstr *MI, int &FrameIndex, 85 unsigned Flag) { 86 const MCInstrDesc &MCID = MI->getDesc(); 87 if ((MCID.TSFlags & Flag) && 88 MI->getOperand(1).isFI() && 89 MI->getOperand(2).getImm() == 0 && 90 MI->getOperand(3).getReg() == 0) { 91 FrameIndex = MI->getOperand(1).getIndex(); 92 return MI->getOperand(0).getReg(); 93 } 94 return 0; 95 } 96 97 unsigned SystemZInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, 98 int &FrameIndex) const { 99 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXLoad); 100 } 101 102 unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr *MI, 103 int &FrameIndex) const { 104 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore); 105 } 106 107 bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr *MI, 108 int &DestFrameIndex, 109 int &SrcFrameIndex) const { 110 // Check for MVC 0(Length,FI1),0(FI2) 111 const MachineFrameInfo *MFI = MI->getParent()->getParent()->getFrameInfo(); 112 if (MI->getOpcode() != SystemZ::MVC || 113 !MI->getOperand(0).isFI() || 114 MI->getOperand(1).getImm() != 0 || 115 !MI->getOperand(3).isFI() || 116 MI->getOperand(4).getImm() != 0) 117 return false; 118 119 // Check that Length covers the full slots. 120 int64_t Length = MI->getOperand(2).getImm(); 121 unsigned FI1 = MI->getOperand(0).getIndex(); 122 unsigned FI2 = MI->getOperand(3).getIndex(); 123 if (MFI->getObjectSize(FI1) != Length || 124 MFI->getObjectSize(FI2) != Length) 125 return false; 126 127 DestFrameIndex = FI1; 128 SrcFrameIndex = FI2; 129 return true; 130 } 131 132 bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, 133 MachineBasicBlock *&TBB, 134 MachineBasicBlock *&FBB, 135 SmallVectorImpl<MachineOperand> &Cond, 136 bool AllowModify) const { 137 // Most of the code and comments here are boilerplate. 138 139 // Start from the bottom of the block and work up, examining the 140 // terminator instructions. 141 MachineBasicBlock::iterator I = MBB.end(); 142 while (I != MBB.begin()) { 143 --I; 144 if (I->isDebugValue()) 145 continue; 146 147 // Working from the bottom, when we see a non-terminator instruction, we're 148 // done. 149 if (!isUnpredicatedTerminator(I)) 150 break; 151 152 // A terminator that isn't a branch can't easily be handled by this 153 // analysis. 154 if (!I->isBranch()) 155 return true; 156 157 // Can't handle indirect branches. 158 SystemZII::Branch Branch(getBranchInfo(I)); 159 if (!Branch.Target->isMBB()) 160 return true; 161 162 // Punt on compound branches. 163 if (Branch.Type != SystemZII::BranchNormal) 164 return true; 165 166 if (Branch.CCMask == SystemZ::CCMASK_ANY) { 167 // Handle unconditional branches. 168 if (!AllowModify) { 169 TBB = Branch.Target->getMBB(); 170 continue; 171 } 172 173 // If the block has any instructions after a JMP, delete them. 174 while (llvm::next(I) != MBB.end()) 175 llvm::next(I)->eraseFromParent(); 176 177 Cond.clear(); 178 FBB = 0; 179 180 // Delete the JMP if it's equivalent to a fall-through. 181 if (MBB.isLayoutSuccessor(Branch.Target->getMBB())) { 182 TBB = 0; 183 I->eraseFromParent(); 184 I = MBB.end(); 185 continue; 186 } 187 188 // TBB is used to indicate the unconditinal destination. 189 TBB = Branch.Target->getMBB(); 190 continue; 191 } 192 193 // Working from the bottom, handle the first conditional branch. 194 if (Cond.empty()) { 195 // FIXME: add X86-style branch swap 196 FBB = TBB; 197 TBB = Branch.Target->getMBB(); 198 Cond.push_back(MachineOperand::CreateImm(Branch.CCMask)); 199 continue; 200 } 201 202 // Handle subsequent conditional branches. 203 assert(Cond.size() == 1); 204 assert(TBB); 205 206 // Only handle the case where all conditional branches branch to the same 207 // destination. 208 if (TBB != Branch.Target->getMBB()) 209 return true; 210 211 // If the conditions are the same, we can leave them alone. 212 unsigned OldCond = Cond[0].getImm(); 213 if (OldCond == Branch.CCMask) 214 continue; 215 216 // FIXME: Try combining conditions like X86 does. Should be easy on Z! 217 } 218 219 return false; 220 } 221 222 unsigned SystemZInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { 223 // Most of the code and comments here are boilerplate. 224 MachineBasicBlock::iterator I = MBB.end(); 225 unsigned Count = 0; 226 227 while (I != MBB.begin()) { 228 --I; 229 if (I->isDebugValue()) 230 continue; 231 if (!I->isBranch()) 232 break; 233 if (!getBranchInfo(I).Target->isMBB()) 234 break; 235 // Remove the branch. 236 I->eraseFromParent(); 237 I = MBB.end(); 238 ++Count; 239 } 240 241 return Count; 242 } 243 244 unsigned 245 SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 246 MachineBasicBlock *FBB, 247 const SmallVectorImpl<MachineOperand> &Cond, 248 DebugLoc DL) const { 249 // In this function we output 32-bit branches, which should always 250 // have enough range. They can be shortened and relaxed by later code 251 // in the pipeline, if desired. 252 253 // Shouldn't be a fall through. 254 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 255 assert((Cond.size() == 1 || Cond.size() == 0) && 256 "SystemZ branch conditions have one component!"); 257 258 if (Cond.empty()) { 259 // Unconditional branch? 260 assert(!FBB && "Unconditional branch with multiple successors!"); 261 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(TBB); 262 return 1; 263 } 264 265 // Conditional branch. 266 unsigned Count = 0; 267 unsigned CC = Cond[0].getImm(); 268 BuildMI(&MBB, DL, get(SystemZ::BRC)).addImm(CC).addMBB(TBB); 269 ++Count; 270 271 if (FBB) { 272 // Two-way Conditional branch. Insert the second branch. 273 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(FBB); 274 ++Count; 275 } 276 return Count; 277 } 278 279 void 280 SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 281 MachineBasicBlock::iterator MBBI, DebugLoc DL, 282 unsigned DestReg, unsigned SrcReg, 283 bool KillSrc) const { 284 // Split 128-bit GPR moves into two 64-bit moves. This handles ADDR128 too. 285 if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) { 286 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_high), 287 RI.getSubReg(SrcReg, SystemZ::subreg_high), KillSrc); 288 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_low), 289 RI.getSubReg(SrcReg, SystemZ::subreg_low), KillSrc); 290 return; 291 } 292 293 // Everything else needs only one instruction. 294 unsigned Opcode; 295 if (SystemZ::GR32BitRegClass.contains(DestReg, SrcReg)) 296 Opcode = SystemZ::LR; 297 else if (SystemZ::GR64BitRegClass.contains(DestReg, SrcReg)) 298 Opcode = SystemZ::LGR; 299 else if (SystemZ::FP32BitRegClass.contains(DestReg, SrcReg)) 300 Opcode = SystemZ::LER; 301 else if (SystemZ::FP64BitRegClass.contains(DestReg, SrcReg)) 302 Opcode = SystemZ::LDR; 303 else if (SystemZ::FP128BitRegClass.contains(DestReg, SrcReg)) 304 Opcode = SystemZ::LXR; 305 else 306 llvm_unreachable("Impossible reg-to-reg copy"); 307 308 BuildMI(MBB, MBBI, DL, get(Opcode), DestReg) 309 .addReg(SrcReg, getKillRegState(KillSrc)); 310 } 311 312 void 313 SystemZInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 314 MachineBasicBlock::iterator MBBI, 315 unsigned SrcReg, bool isKill, 316 int FrameIdx, 317 const TargetRegisterClass *RC, 318 const TargetRegisterInfo *TRI) const { 319 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 320 321 // Callers may expect a single instruction, so keep 128-bit moves 322 // together for now and lower them after register allocation. 323 unsigned LoadOpcode, StoreOpcode; 324 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode); 325 addFrameReference(BuildMI(MBB, MBBI, DL, get(StoreOpcode)) 326 .addReg(SrcReg, getKillRegState(isKill)), FrameIdx); 327 } 328 329 void 330 SystemZInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 331 MachineBasicBlock::iterator MBBI, 332 unsigned DestReg, int FrameIdx, 333 const TargetRegisterClass *RC, 334 const TargetRegisterInfo *TRI) const { 335 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 336 337 // Callers may expect a single instruction, so keep 128-bit moves 338 // together for now and lower them after register allocation. 339 unsigned LoadOpcode, StoreOpcode; 340 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode); 341 addFrameReference(BuildMI(MBB, MBBI, DL, get(LoadOpcode), DestReg), 342 FrameIdx); 343 } 344 345 // Return true if MI is a simple load or store with a 12-bit displacement 346 // and no index. Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores. 347 static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) { 348 const MCInstrDesc &MCID = MI->getDesc(); 349 return ((MCID.TSFlags & Flag) && 350 isUInt<12>(MI->getOperand(2).getImm()) && 351 MI->getOperand(3).getReg() == 0); 352 } 353 354 MachineInstr * 355 SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, 356 MachineInstr *MI, 357 const SmallVectorImpl<unsigned> &Ops, 358 int FrameIndex) const { 359 const MachineFrameInfo *MFI = MF.getFrameInfo(); 360 unsigned Size = MFI->getObjectSize(FrameIndex); 361 362 // Eary exit for cases we don't care about 363 if (Ops.size() != 1) 364 return 0; 365 366 unsigned OpNum = Ops[0]; 367 assert(Size == MF.getRegInfo() 368 .getRegClass(MI->getOperand(OpNum).getReg())->getSize() && 369 "Invalid size combination"); 370 371 unsigned Opcode = MI->getOpcode(); 372 if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) { 373 bool Op0IsGPR = (Opcode == SystemZ::LGDR); 374 bool Op1IsGPR = (Opcode == SystemZ::LDGR); 375 // If we're spilling the destination of an LDGR or LGDR, store the 376 // source register instead. 377 if (OpNum == 0) { 378 unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD; 379 return BuildMI(MF, MI->getDebugLoc(), get(StoreOpcode)) 380 .addOperand(MI->getOperand(1)).addFrameIndex(FrameIndex) 381 .addImm(0).addReg(0); 382 } 383 // If we're spilling the source of an LDGR or LGDR, load the 384 // destination register instead. 385 if (OpNum == 1) { 386 unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD; 387 unsigned Dest = MI->getOperand(0).getReg(); 388 return BuildMI(MF, MI->getDebugLoc(), get(LoadOpcode), Dest) 389 .addFrameIndex(FrameIndex).addImm(0).addReg(0); 390 } 391 } 392 393 // Look for cases where the source of a simple store or the destination 394 // of a simple load is being spilled. Try to use MVC instead. 395 // 396 // Although MVC is in practice a fast choice in these cases, it is still 397 // logically a bytewise copy. This means that we cannot use it if the 398 // load or store is volatile. It also means that the transformation is 399 // not valid in cases where the two memories partially overlap; however, 400 // that is not a problem here, because we know that one of the memories 401 // is a full frame index. 402 if (OpNum == 0 && MI->hasOneMemOperand()) { 403 MachineMemOperand *MMO = *MI->memoperands_begin(); 404 if (MMO->getSize() == Size && !MMO->isVolatile()) { 405 // Handle conversion of loads. 406 if (isSimpleBD12Move(MI, SystemZII::SimpleBDXLoad)) { 407 return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC)) 408 .addFrameIndex(FrameIndex).addImm(0).addImm(Size) 409 .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm()) 410 .addMemOperand(MMO); 411 } 412 // Handle conversion of stores. 413 if (isSimpleBD12Move(MI, SystemZII::SimpleBDXStore)) { 414 return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC)) 415 .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm()) 416 .addImm(Size).addFrameIndex(FrameIndex).addImm(0) 417 .addMemOperand(MMO); 418 } 419 } 420 } 421 422 // If the spilled operand is the final one, try to change <INSN>R 423 // into <INSN>. 424 int MemOpcode = SystemZ::getMemOpcode(Opcode); 425 if (MemOpcode >= 0) { 426 unsigned NumOps = MI->getNumExplicitOperands(); 427 if (OpNum == NumOps - 1) { 428 const MCInstrDesc &MemDesc = get(MemOpcode); 429 uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags); 430 assert(AccessBytes != 0 && "Size of access should be known"); 431 assert(AccessBytes <= Size && "Access outside the frame index"); 432 uint64_t Offset = Size - AccessBytes; 433 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(MemOpcode)); 434 for (unsigned I = 0; I < OpNum; ++I) 435 MIB.addOperand(MI->getOperand(I)); 436 MIB.addFrameIndex(FrameIndex).addImm(Offset); 437 if (MemDesc.TSFlags & SystemZII::HasIndex) 438 MIB.addReg(0); 439 return MIB; 440 } 441 } 442 443 return 0; 444 } 445 446 MachineInstr * 447 SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI, 448 const SmallVectorImpl<unsigned> &Ops, 449 MachineInstr* LoadMI) const { 450 return 0; 451 } 452 453 bool 454 SystemZInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { 455 switch (MI->getOpcode()) { 456 case SystemZ::L128: 457 splitMove(MI, SystemZ::LG); 458 return true; 459 460 case SystemZ::ST128: 461 splitMove(MI, SystemZ::STG); 462 return true; 463 464 case SystemZ::LX: 465 splitMove(MI, SystemZ::LD); 466 return true; 467 468 case SystemZ::STX: 469 splitMove(MI, SystemZ::STD); 470 return true; 471 472 case SystemZ::ADJDYNALLOC: 473 splitAdjDynAlloc(MI); 474 return true; 475 476 default: 477 return false; 478 } 479 } 480 481 bool SystemZInstrInfo:: 482 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 483 assert(Cond.size() == 1 && "Invalid branch condition!"); 484 Cond[0].setImm(Cond[0].getImm() ^ SystemZ::CCMASK_ANY); 485 return false; 486 } 487 488 uint64_t SystemZInstrInfo::getInstSizeInBytes(const MachineInstr *MI) const { 489 if (MI->getOpcode() == TargetOpcode::INLINEASM) { 490 const MachineFunction *MF = MI->getParent()->getParent(); 491 const char *AsmStr = MI->getOperand(0).getSymbolName(); 492 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo()); 493 } 494 return MI->getDesc().getSize(); 495 } 496 497 SystemZII::Branch 498 SystemZInstrInfo::getBranchInfo(const MachineInstr *MI) const { 499 switch (MI->getOpcode()) { 500 case SystemZ::BR: 501 case SystemZ::J: 502 case SystemZ::JG: 503 return SystemZII::Branch(SystemZII::BranchNormal, SystemZ::CCMASK_ANY, 504 &MI->getOperand(0)); 505 506 case SystemZ::BRC: 507 case SystemZ::BRCL: 508 return SystemZII::Branch(SystemZII::BranchNormal, 509 MI->getOperand(0).getImm(), &MI->getOperand(1)); 510 511 case SystemZ::CIJ: 512 case SystemZ::CRJ: 513 return SystemZII::Branch(SystemZII::BranchC, MI->getOperand(2).getImm(), 514 &MI->getOperand(3)); 515 516 case SystemZ::CGIJ: 517 case SystemZ::CGRJ: 518 return SystemZII::Branch(SystemZII::BranchCG, MI->getOperand(2).getImm(), 519 &MI->getOperand(3)); 520 521 default: 522 llvm_unreachable("Unrecognized branch opcode"); 523 } 524 } 525 526 void SystemZInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC, 527 unsigned &LoadOpcode, 528 unsigned &StoreOpcode) const { 529 if (RC == &SystemZ::GR32BitRegClass || RC == &SystemZ::ADDR32BitRegClass) { 530 LoadOpcode = SystemZ::L; 531 StoreOpcode = SystemZ::ST32; 532 } else if (RC == &SystemZ::GR64BitRegClass || 533 RC == &SystemZ::ADDR64BitRegClass) { 534 LoadOpcode = SystemZ::LG; 535 StoreOpcode = SystemZ::STG; 536 } else if (RC == &SystemZ::GR128BitRegClass || 537 RC == &SystemZ::ADDR128BitRegClass) { 538 LoadOpcode = SystemZ::L128; 539 StoreOpcode = SystemZ::ST128; 540 } else if (RC == &SystemZ::FP32BitRegClass) { 541 LoadOpcode = SystemZ::LE; 542 StoreOpcode = SystemZ::STE; 543 } else if (RC == &SystemZ::FP64BitRegClass) { 544 LoadOpcode = SystemZ::LD; 545 StoreOpcode = SystemZ::STD; 546 } else if (RC == &SystemZ::FP128BitRegClass) { 547 LoadOpcode = SystemZ::LX; 548 StoreOpcode = SystemZ::STX; 549 } else 550 llvm_unreachable("Unsupported regclass to load or store"); 551 } 552 553 unsigned SystemZInstrInfo::getOpcodeForOffset(unsigned Opcode, 554 int64_t Offset) const { 555 const MCInstrDesc &MCID = get(Opcode); 556 int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset); 557 if (isUInt<12>(Offset) && isUInt<12>(Offset2)) { 558 // Get the instruction to use for unsigned 12-bit displacements. 559 int Disp12Opcode = SystemZ::getDisp12Opcode(Opcode); 560 if (Disp12Opcode >= 0) 561 return Disp12Opcode; 562 563 // All address-related instructions can use unsigned 12-bit 564 // displacements. 565 return Opcode; 566 } 567 if (isInt<20>(Offset) && isInt<20>(Offset2)) { 568 // Get the instruction to use for signed 20-bit displacements. 569 int Disp20Opcode = SystemZ::getDisp20Opcode(Opcode); 570 if (Disp20Opcode >= 0) 571 return Disp20Opcode; 572 573 // Check whether Opcode allows signed 20-bit displacements. 574 if (MCID.TSFlags & SystemZII::Has20BitOffset) 575 return Opcode; 576 } 577 return 0; 578 } 579 580 unsigned SystemZInstrInfo::getCompareAndBranch(unsigned Opcode, 581 const MachineInstr *MI) const { 582 switch (Opcode) { 583 case SystemZ::CR: 584 return SystemZ::CRJ; 585 case SystemZ::CGR: 586 return SystemZ::CGRJ; 587 case SystemZ::CHI: 588 return MI && isInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CIJ : 0; 589 case SystemZ::CGHI: 590 return MI && isInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CGIJ : 0; 591 default: 592 return 0; 593 } 594 } 595 596 void SystemZInstrInfo::loadImmediate(MachineBasicBlock &MBB, 597 MachineBasicBlock::iterator MBBI, 598 unsigned Reg, uint64_t Value) const { 599 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 600 unsigned Opcode; 601 if (isInt<16>(Value)) 602 Opcode = SystemZ::LGHI; 603 else if (SystemZ::isImmLL(Value)) 604 Opcode = SystemZ::LLILL; 605 else if (SystemZ::isImmLH(Value)) { 606 Opcode = SystemZ::LLILH; 607 Value >>= 16; 608 } else { 609 assert(isInt<32>(Value) && "Huge values not handled yet"); 610 Opcode = SystemZ::LGFI; 611 } 612 BuildMI(MBB, MBBI, DL, get(Opcode), Reg).addImm(Value); 613 } 614