1 //===-- SystemZInstrInfo.cpp - SystemZ instruction information ------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains the SystemZ implementation of the TargetInstrInfo class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SystemZInstrInfo.h" 15 #include "SystemZTargetMachine.h" 16 #include "SystemZInstrBuilder.h" 17 #include "llvm/CodeGen/LiveVariables.h" 18 #include "llvm/CodeGen/MachineRegisterInfo.h" 19 20 #define GET_INSTRINFO_CTOR 21 #define GET_INSTRMAP_INFO 22 #include "SystemZGenInstrInfo.inc" 23 24 using namespace llvm; 25 26 SystemZInstrInfo::SystemZInstrInfo(SystemZTargetMachine &tm) 27 : SystemZGenInstrInfo(SystemZ::ADJCALLSTACKDOWN, SystemZ::ADJCALLSTACKUP), 28 RI(tm), TM(tm) { 29 } 30 31 // MI is a 128-bit load or store. Split it into two 64-bit loads or stores, 32 // each having the opcode given by NewOpcode. 33 void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI, 34 unsigned NewOpcode) const { 35 MachineBasicBlock *MBB = MI->getParent(); 36 MachineFunction &MF = *MBB->getParent(); 37 38 // Get two load or store instructions. Use the original instruction for one 39 // of them (arbitarily the second here) and create a clone for the other. 40 MachineInstr *EarlierMI = MF.CloneMachineInstr(MI); 41 MBB->insert(MI, EarlierMI); 42 43 // Set up the two 64-bit registers. 44 MachineOperand &HighRegOp = EarlierMI->getOperand(0); 45 MachineOperand &LowRegOp = MI->getOperand(0); 46 HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_high)); 47 LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_low)); 48 49 // The address in the first (high) instruction is already correct. 50 // Adjust the offset in the second (low) instruction. 51 MachineOperand &HighOffsetOp = EarlierMI->getOperand(2); 52 MachineOperand &LowOffsetOp = MI->getOperand(2); 53 LowOffsetOp.setImm(LowOffsetOp.getImm() + 8); 54 55 // Set the opcodes. 56 unsigned HighOpcode = getOpcodeForOffset(NewOpcode, HighOffsetOp.getImm()); 57 unsigned LowOpcode = getOpcodeForOffset(NewOpcode, LowOffsetOp.getImm()); 58 assert(HighOpcode && LowOpcode && "Both offsets should be in range"); 59 60 EarlierMI->setDesc(get(HighOpcode)); 61 MI->setDesc(get(LowOpcode)); 62 } 63 64 // Split ADJDYNALLOC instruction MI. 65 void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const { 66 MachineBasicBlock *MBB = MI->getParent(); 67 MachineFunction &MF = *MBB->getParent(); 68 MachineFrameInfo *MFFrame = MF.getFrameInfo(); 69 MachineOperand &OffsetMO = MI->getOperand(2); 70 71 uint64_t Offset = (MFFrame->getMaxCallFrameSize() + 72 SystemZMC::CallFrameSize + 73 OffsetMO.getImm()); 74 unsigned NewOpcode = getOpcodeForOffset(SystemZ::LA, Offset); 75 assert(NewOpcode && "No support for huge argument lists yet"); 76 MI->setDesc(get(NewOpcode)); 77 OffsetMO.setImm(Offset); 78 } 79 80 // If MI is a simple load or store for a frame object, return the register 81 // it loads or stores and set FrameIndex to the index of the frame object. 82 // Return 0 otherwise. 83 // 84 // Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores. 85 static int isSimpleMove(const MachineInstr *MI, int &FrameIndex, 86 unsigned Flag) { 87 const MCInstrDesc &MCID = MI->getDesc(); 88 if ((MCID.TSFlags & Flag) && 89 MI->getOperand(1).isFI() && 90 MI->getOperand(2).getImm() == 0 && 91 MI->getOperand(3).getReg() == 0) { 92 FrameIndex = MI->getOperand(1).getIndex(); 93 return MI->getOperand(0).getReg(); 94 } 95 return 0; 96 } 97 98 unsigned SystemZInstrInfo::isLoadFromStackSlot(const MachineInstr *MI, 99 int &FrameIndex) const { 100 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXLoad); 101 } 102 103 unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr *MI, 104 int &FrameIndex) const { 105 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore); 106 } 107 108 bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr *MI, 109 int &DestFrameIndex, 110 int &SrcFrameIndex) const { 111 // Check for MVC 0(Length,FI1),0(FI2) 112 const MachineFrameInfo *MFI = MI->getParent()->getParent()->getFrameInfo(); 113 if (MI->getOpcode() != SystemZ::MVC || 114 !MI->getOperand(0).isFI() || 115 MI->getOperand(1).getImm() != 0 || 116 !MI->getOperand(3).isFI() || 117 MI->getOperand(4).getImm() != 0) 118 return false; 119 120 // Check that Length covers the full slots. 121 int64_t Length = MI->getOperand(2).getImm(); 122 unsigned FI1 = MI->getOperand(0).getIndex(); 123 unsigned FI2 = MI->getOperand(3).getIndex(); 124 if (MFI->getObjectSize(FI1) != Length || 125 MFI->getObjectSize(FI2) != Length) 126 return false; 127 128 DestFrameIndex = FI1; 129 SrcFrameIndex = FI2; 130 return true; 131 } 132 133 bool SystemZInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, 134 MachineBasicBlock *&TBB, 135 MachineBasicBlock *&FBB, 136 SmallVectorImpl<MachineOperand> &Cond, 137 bool AllowModify) const { 138 // Most of the code and comments here are boilerplate. 139 140 // Start from the bottom of the block and work up, examining the 141 // terminator instructions. 142 MachineBasicBlock::iterator I = MBB.end(); 143 while (I != MBB.begin()) { 144 --I; 145 if (I->isDebugValue()) 146 continue; 147 148 // Working from the bottom, when we see a non-terminator instruction, we're 149 // done. 150 if (!isUnpredicatedTerminator(I)) 151 break; 152 153 // A terminator that isn't a branch can't easily be handled by this 154 // analysis. 155 if (!I->isBranch()) 156 return true; 157 158 // Can't handle indirect branches. 159 SystemZII::Branch Branch(getBranchInfo(I)); 160 if (!Branch.Target->isMBB()) 161 return true; 162 163 // Punt on compound branches. 164 if (Branch.Type != SystemZII::BranchNormal) 165 return true; 166 167 if (Branch.CCMask == SystemZ::CCMASK_ANY) { 168 // Handle unconditional branches. 169 if (!AllowModify) { 170 TBB = Branch.Target->getMBB(); 171 continue; 172 } 173 174 // If the block has any instructions after a JMP, delete them. 175 while (llvm::next(I) != MBB.end()) 176 llvm::next(I)->eraseFromParent(); 177 178 Cond.clear(); 179 FBB = 0; 180 181 // Delete the JMP if it's equivalent to a fall-through. 182 if (MBB.isLayoutSuccessor(Branch.Target->getMBB())) { 183 TBB = 0; 184 I->eraseFromParent(); 185 I = MBB.end(); 186 continue; 187 } 188 189 // TBB is used to indicate the unconditinal destination. 190 TBB = Branch.Target->getMBB(); 191 continue; 192 } 193 194 // Working from the bottom, handle the first conditional branch. 195 if (Cond.empty()) { 196 // FIXME: add X86-style branch swap 197 FBB = TBB; 198 TBB = Branch.Target->getMBB(); 199 Cond.push_back(MachineOperand::CreateImm(Branch.CCMask)); 200 continue; 201 } 202 203 // Handle subsequent conditional branches. 204 assert(Cond.size() == 1); 205 assert(TBB); 206 207 // Only handle the case where all conditional branches branch to the same 208 // destination. 209 if (TBB != Branch.Target->getMBB()) 210 return true; 211 212 // If the conditions are the same, we can leave them alone. 213 unsigned OldCond = Cond[0].getImm(); 214 if (OldCond == Branch.CCMask) 215 continue; 216 217 // FIXME: Try combining conditions like X86 does. Should be easy on Z! 218 } 219 220 return false; 221 } 222 223 unsigned SystemZInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const { 224 // Most of the code and comments here are boilerplate. 225 MachineBasicBlock::iterator I = MBB.end(); 226 unsigned Count = 0; 227 228 while (I != MBB.begin()) { 229 --I; 230 if (I->isDebugValue()) 231 continue; 232 if (!I->isBranch()) 233 break; 234 if (!getBranchInfo(I).Target->isMBB()) 235 break; 236 // Remove the branch. 237 I->eraseFromParent(); 238 I = MBB.end(); 239 ++Count; 240 } 241 242 return Count; 243 } 244 245 unsigned 246 SystemZInstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 247 MachineBasicBlock *FBB, 248 const SmallVectorImpl<MachineOperand> &Cond, 249 DebugLoc DL) const { 250 // In this function we output 32-bit branches, which should always 251 // have enough range. They can be shortened and relaxed by later code 252 // in the pipeline, if desired. 253 254 // Shouldn't be a fall through. 255 assert(TBB && "InsertBranch must not be told to insert a fallthrough"); 256 assert((Cond.size() == 1 || Cond.size() == 0) && 257 "SystemZ branch conditions have one component!"); 258 259 if (Cond.empty()) { 260 // Unconditional branch? 261 assert(!FBB && "Unconditional branch with multiple successors!"); 262 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(TBB); 263 return 1; 264 } 265 266 // Conditional branch. 267 unsigned Count = 0; 268 unsigned CC = Cond[0].getImm(); 269 BuildMI(&MBB, DL, get(SystemZ::BRC)).addImm(CC).addMBB(TBB); 270 ++Count; 271 272 if (FBB) { 273 // Two-way Conditional branch. Insert the second branch. 274 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(FBB); 275 ++Count; 276 } 277 return Count; 278 } 279 280 bool SystemZInstrInfo::analyzeCompare(const MachineInstr *MI, 281 unsigned &SrcReg, unsigned &SrcReg2, 282 int &Mask, int &Value) const { 283 assert(MI->isCompare() && "Caller should check that this is a compare"); 284 285 // Ignore comparisons involving memory for now. 286 if (MI->getNumExplicitOperands() != 2) 287 return false; 288 289 SrcReg = MI->getOperand(0).getReg(); 290 if (MI->getOperand(1).isReg()) { 291 SrcReg2 = MI->getOperand(1).getReg(); 292 Value = 0; 293 Mask = ~0; 294 return true; 295 } else if (MI->getOperand(1).isImm()) { 296 SrcReg2 = 0; 297 Value = MI->getOperand(1).getImm(); 298 Mask = ~0; 299 return true; 300 } 301 return false; 302 } 303 304 // Return true if CC is live after MBBI. We can't rely on kill information 305 // because of the way InsertBranch is used. 306 static bool isCCLiveAfter(MachineBasicBlock::iterator MBBI, 307 const TargetRegisterInfo *TRI) { 308 if (MBBI->killsRegister(SystemZ::CC, TRI)) 309 return false; 310 311 MachineBasicBlock *MBB = MBBI->getParent(); 312 MachineBasicBlock::iterator MBBE = MBB->end(); 313 for (++MBBI; MBBI != MBBE; ++MBBI) 314 if (MBBI->readsRegister(SystemZ::CC, TRI)) 315 return true; 316 317 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(), 318 SE = MBB->succ_end(); SI != SE; ++SI) 319 if ((*SI)->isLiveIn(SystemZ::CC)) 320 return true; 321 322 return false; 323 } 324 325 bool 326 SystemZInstrInfo::optimizeCompareInstr(MachineInstr *Compare, 327 unsigned SrcReg, unsigned SrcReg2, 328 int Mask, int Value, 329 const MachineRegisterInfo *MRI) const { 330 MachineBasicBlock *MBB = Compare->getParent(); 331 const TargetRegisterInfo *TRI = &getRegisterInfo(); 332 333 // Try to fold a comparison into a following branch, if it is only used once. 334 if (unsigned FusedOpcode = getCompareAndBranch(Compare->getOpcode(), 335 Compare)) { 336 MachineBasicBlock::iterator MBBI = Compare, MBBE = MBB->end(); 337 for (++MBBI; MBBI != MBBE; ++MBBI) { 338 if (MBBI->getOpcode() == SystemZ::BRC && !isCCLiveAfter(MBBI, TRI)) { 339 // Read the branch mask and target. 340 MachineOperand CCMask(MBBI->getOperand(0)); 341 MachineOperand Target(MBBI->getOperand(1)); 342 343 // Clear out all current operands. 344 int CCUse = MBBI->findRegisterUseOperandIdx(SystemZ::CC, false, TRI); 345 assert(CCUse >= 0 && "BRC must use CC"); 346 MBBI->RemoveOperand(CCUse); 347 MBBI->RemoveOperand(1); 348 MBBI->RemoveOperand(0); 349 350 // Rebuild MBBI as a fused compare and branch. 351 MBBI->setDesc(get(FusedOpcode)); 352 MachineInstrBuilder(*MBB->getParent(), MBBI) 353 .addOperand(Compare->getOperand(0)) 354 .addOperand(Compare->getOperand(1)) 355 .addOperand(CCMask) 356 .addOperand(Target); 357 358 // Clear any intervening kills of SrcReg and SrcReg2. 359 MBBI = Compare; 360 for (++MBBI; MBBI != MBBE; ++MBBI) { 361 MBBI->clearRegisterKills(SrcReg, TRI); 362 if (SrcReg2) 363 MBBI->clearRegisterKills(SrcReg2, TRI); 364 } 365 Compare->removeFromParent(); 366 return true; 367 } 368 369 // Stop if we find another reference to CC before a branch. 370 if (MBBI->readsRegister(SystemZ::CC, TRI) || 371 MBBI->modifiesRegister(SystemZ::CC, TRI)) 372 break; 373 374 // Stop if we find another assignment to the registers before the branch. 375 if (MBBI->modifiesRegister(SrcReg, TRI) || 376 (SrcReg2 && MBBI->modifiesRegister(SrcReg2, TRI))) 377 break; 378 } 379 } 380 return false; 381 } 382 383 // If Opcode is a move that has a conditional variant, return that variant, 384 // otherwise return 0. 385 static unsigned getConditionalMove(unsigned Opcode) { 386 switch (Opcode) { 387 case SystemZ::LR: return SystemZ::LOCR; 388 case SystemZ::LGR: return SystemZ::LOCGR; 389 default: return 0; 390 } 391 } 392 393 bool SystemZInstrInfo::isPredicable(MachineInstr *MI) const { 394 unsigned Opcode = MI->getOpcode(); 395 if (TM.getSubtargetImpl()->hasLoadStoreOnCond() && 396 getConditionalMove(Opcode)) 397 return true; 398 return false; 399 } 400 401 bool SystemZInstrInfo:: 402 isProfitableToIfCvt(MachineBasicBlock &MBB, 403 unsigned NumCycles, unsigned ExtraPredCycles, 404 const BranchProbability &Probability) const { 405 // For now only convert single instructions. 406 return NumCycles == 1; 407 } 408 409 bool SystemZInstrInfo:: 410 isProfitableToIfCvt(MachineBasicBlock &TMBB, 411 unsigned NumCyclesT, unsigned ExtraPredCyclesT, 412 MachineBasicBlock &FMBB, 413 unsigned NumCyclesF, unsigned ExtraPredCyclesF, 414 const BranchProbability &Probability) const { 415 // For now avoid converting mutually-exclusive cases. 416 return false; 417 } 418 419 bool SystemZInstrInfo:: 420 PredicateInstruction(MachineInstr *MI, 421 const SmallVectorImpl<MachineOperand> &Pred) const { 422 unsigned CCMask = Pred[0].getImm(); 423 assert(CCMask > 0 && CCMask < 15 && "Invalid predicate"); 424 unsigned Opcode = MI->getOpcode(); 425 if (TM.getSubtargetImpl()->hasLoadStoreOnCond()) { 426 if (unsigned CondOpcode = getConditionalMove(Opcode)) { 427 MI->setDesc(get(CondOpcode)); 428 MachineInstrBuilder(*MI->getParent()->getParent(), MI).addImm(CCMask); 429 return true; 430 } 431 } 432 return false; 433 } 434 435 void 436 SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 437 MachineBasicBlock::iterator MBBI, DebugLoc DL, 438 unsigned DestReg, unsigned SrcReg, 439 bool KillSrc) const { 440 // Split 128-bit GPR moves into two 64-bit moves. This handles ADDR128 too. 441 if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) { 442 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_high), 443 RI.getSubReg(SrcReg, SystemZ::subreg_high), KillSrc); 444 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_low), 445 RI.getSubReg(SrcReg, SystemZ::subreg_low), KillSrc); 446 return; 447 } 448 449 // Everything else needs only one instruction. 450 unsigned Opcode; 451 if (SystemZ::GR32BitRegClass.contains(DestReg, SrcReg)) 452 Opcode = SystemZ::LR; 453 else if (SystemZ::GR64BitRegClass.contains(DestReg, SrcReg)) 454 Opcode = SystemZ::LGR; 455 else if (SystemZ::FP32BitRegClass.contains(DestReg, SrcReg)) 456 Opcode = SystemZ::LER; 457 else if (SystemZ::FP64BitRegClass.contains(DestReg, SrcReg)) 458 Opcode = SystemZ::LDR; 459 else if (SystemZ::FP128BitRegClass.contains(DestReg, SrcReg)) 460 Opcode = SystemZ::LXR; 461 else 462 llvm_unreachable("Impossible reg-to-reg copy"); 463 464 BuildMI(MBB, MBBI, DL, get(Opcode), DestReg) 465 .addReg(SrcReg, getKillRegState(KillSrc)); 466 } 467 468 void 469 SystemZInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 470 MachineBasicBlock::iterator MBBI, 471 unsigned SrcReg, bool isKill, 472 int FrameIdx, 473 const TargetRegisterClass *RC, 474 const TargetRegisterInfo *TRI) const { 475 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 476 477 // Callers may expect a single instruction, so keep 128-bit moves 478 // together for now and lower them after register allocation. 479 unsigned LoadOpcode, StoreOpcode; 480 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode); 481 addFrameReference(BuildMI(MBB, MBBI, DL, get(StoreOpcode)) 482 .addReg(SrcReg, getKillRegState(isKill)), FrameIdx); 483 } 484 485 void 486 SystemZInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 487 MachineBasicBlock::iterator MBBI, 488 unsigned DestReg, int FrameIdx, 489 const TargetRegisterClass *RC, 490 const TargetRegisterInfo *TRI) const { 491 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 492 493 // Callers may expect a single instruction, so keep 128-bit moves 494 // together for now and lower them after register allocation. 495 unsigned LoadOpcode, StoreOpcode; 496 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode); 497 addFrameReference(BuildMI(MBB, MBBI, DL, get(LoadOpcode), DestReg), 498 FrameIdx); 499 } 500 501 // Return true if MI is a simple load or store with a 12-bit displacement 502 // and no index. Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores. 503 static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) { 504 const MCInstrDesc &MCID = MI->getDesc(); 505 return ((MCID.TSFlags & Flag) && 506 isUInt<12>(MI->getOperand(2).getImm()) && 507 MI->getOperand(3).getReg() == 0); 508 } 509 510 MachineInstr * 511 SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI, 512 MachineBasicBlock::iterator &MBBI, 513 LiveVariables *LV) const { 514 MachineInstr *MI = MBBI; 515 MachineBasicBlock *MBB = MI->getParent(); 516 517 unsigned Opcode = MI->getOpcode(); 518 unsigned NumOps = MI->getNumOperands(); 519 520 // Try to convert something like SLL into SLLK, if supported. 521 // We prefer to keep the two-operand form where possible both 522 // because it tends to be shorter and because some instructions 523 // have memory forms that can be used during spilling. 524 if (TM.getSubtargetImpl()->hasDistinctOps()) { 525 int ThreeOperandOpcode = SystemZ::getThreeOperandOpcode(Opcode); 526 if (ThreeOperandOpcode >= 0) { 527 unsigned DestReg = MI->getOperand(0).getReg(); 528 MachineOperand &Src = MI->getOperand(1); 529 MachineInstrBuilder MIB = BuildMI(*MBB, MBBI, MI->getDebugLoc(), 530 get(ThreeOperandOpcode), DestReg); 531 // Keep the kill state, but drop the tied flag. 532 MIB.addReg(Src.getReg(), getKillRegState(Src.isKill())); 533 // Keep the remaining operands as-is. 534 for (unsigned I = 2; I < NumOps; ++I) 535 MIB.addOperand(MI->getOperand(I)); 536 MachineInstr *NewMI = MIB; 537 538 // Transfer killing information to the new instruction. 539 if (LV) { 540 for (unsigned I = 1; I < NumOps; ++I) { 541 MachineOperand &Op = MI->getOperand(I); 542 if (Op.isReg() && Op.isKill()) 543 LV->replaceKillInstruction(Op.getReg(), MI, NewMI); 544 } 545 } 546 return MIB; 547 } 548 } 549 return 0; 550 } 551 552 MachineInstr * 553 SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, 554 MachineInstr *MI, 555 const SmallVectorImpl<unsigned> &Ops, 556 int FrameIndex) const { 557 const MachineFrameInfo *MFI = MF.getFrameInfo(); 558 unsigned Size = MFI->getObjectSize(FrameIndex); 559 560 // Eary exit for cases we don't care about 561 if (Ops.size() != 1) 562 return 0; 563 564 unsigned OpNum = Ops[0]; 565 assert(Size == MF.getRegInfo() 566 .getRegClass(MI->getOperand(OpNum).getReg())->getSize() && 567 "Invalid size combination"); 568 569 unsigned Opcode = MI->getOpcode(); 570 if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) { 571 bool Op0IsGPR = (Opcode == SystemZ::LGDR); 572 bool Op1IsGPR = (Opcode == SystemZ::LDGR); 573 // If we're spilling the destination of an LDGR or LGDR, store the 574 // source register instead. 575 if (OpNum == 0) { 576 unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD; 577 return BuildMI(MF, MI->getDebugLoc(), get(StoreOpcode)) 578 .addOperand(MI->getOperand(1)).addFrameIndex(FrameIndex) 579 .addImm(0).addReg(0); 580 } 581 // If we're spilling the source of an LDGR or LGDR, load the 582 // destination register instead. 583 if (OpNum == 1) { 584 unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD; 585 unsigned Dest = MI->getOperand(0).getReg(); 586 return BuildMI(MF, MI->getDebugLoc(), get(LoadOpcode), Dest) 587 .addFrameIndex(FrameIndex).addImm(0).addReg(0); 588 } 589 } 590 591 // Look for cases where the source of a simple store or the destination 592 // of a simple load is being spilled. Try to use MVC instead. 593 // 594 // Although MVC is in practice a fast choice in these cases, it is still 595 // logically a bytewise copy. This means that we cannot use it if the 596 // load or store is volatile. It also means that the transformation is 597 // not valid in cases where the two memories partially overlap; however, 598 // that is not a problem here, because we know that one of the memories 599 // is a full frame index. 600 if (OpNum == 0 && MI->hasOneMemOperand()) { 601 MachineMemOperand *MMO = *MI->memoperands_begin(); 602 if (MMO->getSize() == Size && !MMO->isVolatile()) { 603 // Handle conversion of loads. 604 if (isSimpleBD12Move(MI, SystemZII::SimpleBDXLoad)) { 605 return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC)) 606 .addFrameIndex(FrameIndex).addImm(0).addImm(Size) 607 .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm()) 608 .addMemOperand(MMO); 609 } 610 // Handle conversion of stores. 611 if (isSimpleBD12Move(MI, SystemZII::SimpleBDXStore)) { 612 return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC)) 613 .addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm()) 614 .addImm(Size).addFrameIndex(FrameIndex).addImm(0) 615 .addMemOperand(MMO); 616 } 617 } 618 } 619 620 // If the spilled operand is the final one, try to change <INSN>R 621 // into <INSN>. 622 int MemOpcode = SystemZ::getMemOpcode(Opcode); 623 if (MemOpcode >= 0) { 624 unsigned NumOps = MI->getNumExplicitOperands(); 625 if (OpNum == NumOps - 1) { 626 const MCInstrDesc &MemDesc = get(MemOpcode); 627 uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags); 628 assert(AccessBytes != 0 && "Size of access should be known"); 629 assert(AccessBytes <= Size && "Access outside the frame index"); 630 uint64_t Offset = Size - AccessBytes; 631 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(MemOpcode)); 632 for (unsigned I = 0; I < OpNum; ++I) 633 MIB.addOperand(MI->getOperand(I)); 634 MIB.addFrameIndex(FrameIndex).addImm(Offset); 635 if (MemDesc.TSFlags & SystemZII::HasIndex) 636 MIB.addReg(0); 637 return MIB; 638 } 639 } 640 641 return 0; 642 } 643 644 MachineInstr * 645 SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr* MI, 646 const SmallVectorImpl<unsigned> &Ops, 647 MachineInstr* LoadMI) const { 648 return 0; 649 } 650 651 bool 652 SystemZInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { 653 switch (MI->getOpcode()) { 654 case SystemZ::L128: 655 splitMove(MI, SystemZ::LG); 656 return true; 657 658 case SystemZ::ST128: 659 splitMove(MI, SystemZ::STG); 660 return true; 661 662 case SystemZ::LX: 663 splitMove(MI, SystemZ::LD); 664 return true; 665 666 case SystemZ::STX: 667 splitMove(MI, SystemZ::STD); 668 return true; 669 670 case SystemZ::ADJDYNALLOC: 671 splitAdjDynAlloc(MI); 672 return true; 673 674 default: 675 return false; 676 } 677 } 678 679 bool SystemZInstrInfo:: 680 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const { 681 assert(Cond.size() == 1 && "Invalid branch condition!"); 682 Cond[0].setImm(Cond[0].getImm() ^ SystemZ::CCMASK_ANY); 683 return false; 684 } 685 686 uint64_t SystemZInstrInfo::getInstSizeInBytes(const MachineInstr *MI) const { 687 if (MI->getOpcode() == TargetOpcode::INLINEASM) { 688 const MachineFunction *MF = MI->getParent()->getParent(); 689 const char *AsmStr = MI->getOperand(0).getSymbolName(); 690 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo()); 691 } 692 return MI->getDesc().getSize(); 693 } 694 695 SystemZII::Branch 696 SystemZInstrInfo::getBranchInfo(const MachineInstr *MI) const { 697 switch (MI->getOpcode()) { 698 case SystemZ::BR: 699 case SystemZ::J: 700 case SystemZ::JG: 701 return SystemZII::Branch(SystemZII::BranchNormal, SystemZ::CCMASK_ANY, 702 &MI->getOperand(0)); 703 704 case SystemZ::BRC: 705 case SystemZ::BRCL: 706 return SystemZII::Branch(SystemZII::BranchNormal, 707 MI->getOperand(0).getImm(), &MI->getOperand(1)); 708 709 case SystemZ::CIJ: 710 case SystemZ::CRJ: 711 return SystemZII::Branch(SystemZII::BranchC, MI->getOperand(2).getImm(), 712 &MI->getOperand(3)); 713 714 case SystemZ::CGIJ: 715 case SystemZ::CGRJ: 716 return SystemZII::Branch(SystemZII::BranchCG, MI->getOperand(2).getImm(), 717 &MI->getOperand(3)); 718 719 default: 720 llvm_unreachable("Unrecognized branch opcode"); 721 } 722 } 723 724 void SystemZInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC, 725 unsigned &LoadOpcode, 726 unsigned &StoreOpcode) const { 727 if (RC == &SystemZ::GR32BitRegClass || RC == &SystemZ::ADDR32BitRegClass) { 728 LoadOpcode = SystemZ::L; 729 StoreOpcode = SystemZ::ST32; 730 } else if (RC == &SystemZ::GR64BitRegClass || 731 RC == &SystemZ::ADDR64BitRegClass) { 732 LoadOpcode = SystemZ::LG; 733 StoreOpcode = SystemZ::STG; 734 } else if (RC == &SystemZ::GR128BitRegClass || 735 RC == &SystemZ::ADDR128BitRegClass) { 736 LoadOpcode = SystemZ::L128; 737 StoreOpcode = SystemZ::ST128; 738 } else if (RC == &SystemZ::FP32BitRegClass) { 739 LoadOpcode = SystemZ::LE; 740 StoreOpcode = SystemZ::STE; 741 } else if (RC == &SystemZ::FP64BitRegClass) { 742 LoadOpcode = SystemZ::LD; 743 StoreOpcode = SystemZ::STD; 744 } else if (RC == &SystemZ::FP128BitRegClass) { 745 LoadOpcode = SystemZ::LX; 746 StoreOpcode = SystemZ::STX; 747 } else 748 llvm_unreachable("Unsupported regclass to load or store"); 749 } 750 751 unsigned SystemZInstrInfo::getOpcodeForOffset(unsigned Opcode, 752 int64_t Offset) const { 753 const MCInstrDesc &MCID = get(Opcode); 754 int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset); 755 if (isUInt<12>(Offset) && isUInt<12>(Offset2)) { 756 // Get the instruction to use for unsigned 12-bit displacements. 757 int Disp12Opcode = SystemZ::getDisp12Opcode(Opcode); 758 if (Disp12Opcode >= 0) 759 return Disp12Opcode; 760 761 // All address-related instructions can use unsigned 12-bit 762 // displacements. 763 return Opcode; 764 } 765 if (isInt<20>(Offset) && isInt<20>(Offset2)) { 766 // Get the instruction to use for signed 20-bit displacements. 767 int Disp20Opcode = SystemZ::getDisp20Opcode(Opcode); 768 if (Disp20Opcode >= 0) 769 return Disp20Opcode; 770 771 // Check whether Opcode allows signed 20-bit displacements. 772 if (MCID.TSFlags & SystemZII::Has20BitOffset) 773 return Opcode; 774 } 775 return 0; 776 } 777 778 unsigned SystemZInstrInfo::getCompareAndBranch(unsigned Opcode, 779 const MachineInstr *MI) const { 780 switch (Opcode) { 781 case SystemZ::CR: 782 return SystemZ::CRJ; 783 case SystemZ::CGR: 784 return SystemZ::CGRJ; 785 case SystemZ::CHI: 786 return MI && isInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CIJ : 0; 787 case SystemZ::CGHI: 788 return MI && isInt<8>(MI->getOperand(1).getImm()) ? SystemZ::CGIJ : 0; 789 default: 790 return 0; 791 } 792 } 793 794 void SystemZInstrInfo::loadImmediate(MachineBasicBlock &MBB, 795 MachineBasicBlock::iterator MBBI, 796 unsigned Reg, uint64_t Value) const { 797 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc(); 798 unsigned Opcode; 799 if (isInt<16>(Value)) 800 Opcode = SystemZ::LGHI; 801 else if (SystemZ::isImmLL(Value)) 802 Opcode = SystemZ::LLILL; 803 else if (SystemZ::isImmLH(Value)) { 804 Opcode = SystemZ::LLILH; 805 Value >>= 16; 806 } else { 807 assert(isInt<32>(Value) && "Huge values not handled yet"); 808 Opcode = SystemZ::LGFI; 809 } 810 BuildMI(MBB, MBBI, DL, get(Opcode), Reg).addImm(Value); 811 } 812