1 //===-- SIInstrInfo.cpp - SI Instruction Information ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief SI Implementation of TargetInstrInfo. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "SIInstrInfo.h" 16 #include "AMDGPUTargetMachine.h" 17 #include "GCNHazardRecognizer.h" 18 #include "SIDefines.h" 19 #include "SIMachineFunctionInfo.h" 20 #include "llvm/CodeGen/MachineFrameInfo.h" 21 #include "llvm/CodeGen/MachineInstrBuilder.h" 22 #include "llvm/CodeGen/MachineRegisterInfo.h" 23 #include "llvm/CodeGen/ScheduleDAG.h" 24 #include "llvm/IR/Function.h" 25 #include "llvm/CodeGen/RegisterScavenging.h" 26 #include "llvm/MC/MCInstrDesc.h" 27 #include "llvm/Support/Debug.h" 28 29 using namespace llvm; 30 31 // Must be at least 4 to be able to branch over minimum unconditional branch 32 // code. This is only for making it possible to write reasonably small tests for 33 // long branches. 34 static cl::opt<unsigned> 35 BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16), 36 cl::desc("Restrict range of branch instructions (DEBUG)")); 37 38 SIInstrInfo::SIInstrInfo(const SISubtarget &ST) 39 : AMDGPUInstrInfo(ST), RI(), ST(ST) {} 40 41 //===----------------------------------------------------------------------===// 42 // TargetInstrInfo callbacks 43 //===----------------------------------------------------------------------===// 44 45 static unsigned getNumOperandsNoGlue(SDNode *Node) { 46 unsigned N = Node->getNumOperands(); 47 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) 48 --N; 49 return N; 50 } 51 52 static SDValue findChainOperand(SDNode *Load) { 53 SDValue LastOp = Load->getOperand(getNumOperandsNoGlue(Load) - 1); 54 assert(LastOp.getValueType() == MVT::Other && "Chain missing from load node"); 55 return LastOp; 56 } 57 58 /// \brief Returns true if both nodes have the same value for the given 59 /// operand \p Op, or if both nodes do not have this operand. 60 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { 61 unsigned Opc0 = N0->getMachineOpcode(); 62 unsigned Opc1 = N1->getMachineOpcode(); 63 64 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); 65 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); 66 67 if (Op0Idx == -1 && Op1Idx == -1) 68 return true; 69 70 71 if ((Op0Idx == -1 && Op1Idx != -1) || 72 (Op1Idx == -1 && Op0Idx != -1)) 73 return false; 74 75 // getNamedOperandIdx returns the index for the MachineInstr's operands, 76 // which includes the result as the first operand. We are indexing into the 77 // MachineSDNode's operands, so we need to skip the result operand to get 78 // the real index. 79 --Op0Idx; 80 --Op1Idx; 81 82 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx); 83 } 84 85 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, 86 AliasAnalysis *AA) const { 87 // TODO: The generic check fails for VALU instructions that should be 88 // rematerializable due to implicit reads of exec. We really want all of the 89 // generic logic for this except for this. 90 switch (MI.getOpcode()) { 91 case AMDGPU::V_MOV_B32_e32: 92 case AMDGPU::V_MOV_B32_e64: 93 case AMDGPU::V_MOV_B64_PSEUDO: 94 return true; 95 default: 96 return false; 97 } 98 } 99 100 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, 101 int64_t &Offset0, 102 int64_t &Offset1) const { 103 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode()) 104 return false; 105 106 unsigned Opc0 = Load0->getMachineOpcode(); 107 unsigned Opc1 = Load1->getMachineOpcode(); 108 109 // Make sure both are actually loads. 110 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad()) 111 return false; 112 113 if (isDS(Opc0) && isDS(Opc1)) { 114 115 // FIXME: Handle this case: 116 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1)) 117 return false; 118 119 // Check base reg. 120 if (Load0->getOperand(1) != Load1->getOperand(1)) 121 return false; 122 123 // Check chain. 124 if (findChainOperand(Load0) != findChainOperand(Load1)) 125 return false; 126 127 // Skip read2 / write2 variants for simplicity. 128 // TODO: We should report true if the used offsets are adjacent (excluded 129 // st64 versions). 130 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::data1) != -1 || 131 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::data1) != -1) 132 return false; 133 134 Offset0 = cast<ConstantSDNode>(Load0->getOperand(2))->getZExtValue(); 135 Offset1 = cast<ConstantSDNode>(Load1->getOperand(2))->getZExtValue(); 136 return true; 137 } 138 139 if (isSMRD(Opc0) && isSMRD(Opc1)) { 140 assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1)); 141 142 // Check base reg. 143 if (Load0->getOperand(0) != Load1->getOperand(0)) 144 return false; 145 146 const ConstantSDNode *Load0Offset = 147 dyn_cast<ConstantSDNode>(Load0->getOperand(1)); 148 const ConstantSDNode *Load1Offset = 149 dyn_cast<ConstantSDNode>(Load1->getOperand(1)); 150 151 if (!Load0Offset || !Load1Offset) 152 return false; 153 154 // Check chain. 155 if (findChainOperand(Load0) != findChainOperand(Load1)) 156 return false; 157 158 Offset0 = Load0Offset->getZExtValue(); 159 Offset1 = Load1Offset->getZExtValue(); 160 return true; 161 } 162 163 // MUBUF and MTBUF can access the same addresses. 164 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) { 165 166 // MUBUF and MTBUF have vaddr at different indices. 167 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || 168 findChainOperand(Load0) != findChainOperand(Load1) || 169 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || 170 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) 171 return false; 172 173 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 174 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 175 176 if (OffIdx0 == -1 || OffIdx1 == -1) 177 return false; 178 179 // getNamedOperandIdx returns the index for MachineInstrs. Since they 180 // inlcude the output in the operand list, but SDNodes don't, we need to 181 // subtract the index by one. 182 --OffIdx0; 183 --OffIdx1; 184 185 SDValue Off0 = Load0->getOperand(OffIdx0); 186 SDValue Off1 = Load1->getOperand(OffIdx1); 187 188 // The offset might be a FrameIndexSDNode. 189 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1)) 190 return false; 191 192 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue(); 193 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue(); 194 return true; 195 } 196 197 return false; 198 } 199 200 static bool isStride64(unsigned Opc) { 201 switch (Opc) { 202 case AMDGPU::DS_READ2ST64_B32: 203 case AMDGPU::DS_READ2ST64_B64: 204 case AMDGPU::DS_WRITE2ST64_B32: 205 case AMDGPU::DS_WRITE2ST64_B64: 206 return true; 207 default: 208 return false; 209 } 210 } 211 212 bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg, 213 int64_t &Offset, 214 const TargetRegisterInfo *TRI) const { 215 unsigned Opc = LdSt.getOpcode(); 216 217 if (isDS(LdSt)) { 218 const MachineOperand *OffsetImm = 219 getNamedOperand(LdSt, AMDGPU::OpName::offset); 220 if (OffsetImm) { 221 // Normal, single offset LDS instruction. 222 const MachineOperand *AddrReg = 223 getNamedOperand(LdSt, AMDGPU::OpName::addr); 224 225 BaseReg = AddrReg->getReg(); 226 Offset = OffsetImm->getImm(); 227 return true; 228 } 229 230 // The 2 offset instructions use offset0 and offset1 instead. We can treat 231 // these as a load with a single offset if the 2 offsets are consecutive. We 232 // will use this for some partially aligned loads. 233 const MachineOperand *Offset0Imm = 234 getNamedOperand(LdSt, AMDGPU::OpName::offset0); 235 const MachineOperand *Offset1Imm = 236 getNamedOperand(LdSt, AMDGPU::OpName::offset1); 237 238 uint8_t Offset0 = Offset0Imm->getImm(); 239 uint8_t Offset1 = Offset1Imm->getImm(); 240 241 if (Offset1 > Offset0 && Offset1 - Offset0 == 1) { 242 // Each of these offsets is in element sized units, so we need to convert 243 // to bytes of the individual reads. 244 245 unsigned EltSize; 246 if (LdSt.mayLoad()) 247 EltSize = getOpRegClass(LdSt, 0)->getSize() / 2; 248 else { 249 assert(LdSt.mayStore()); 250 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 251 EltSize = getOpRegClass(LdSt, Data0Idx)->getSize(); 252 } 253 254 if (isStride64(Opc)) 255 EltSize *= 64; 256 257 const MachineOperand *AddrReg = 258 getNamedOperand(LdSt, AMDGPU::OpName::addr); 259 BaseReg = AddrReg->getReg(); 260 Offset = EltSize * Offset0; 261 return true; 262 } 263 264 return false; 265 } 266 267 if (isMUBUF(LdSt) || isMTBUF(LdSt)) { 268 const MachineOperand *SOffset = getNamedOperand(LdSt, AMDGPU::OpName::soffset); 269 if (SOffset && SOffset->isReg()) 270 return false; 271 272 const MachineOperand *AddrReg = 273 getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 274 if (!AddrReg) 275 return false; 276 277 const MachineOperand *OffsetImm = 278 getNamedOperand(LdSt, AMDGPU::OpName::offset); 279 BaseReg = AddrReg->getReg(); 280 Offset = OffsetImm->getImm(); 281 282 if (SOffset) // soffset can be an inline immediate. 283 Offset += SOffset->getImm(); 284 285 return true; 286 } 287 288 if (isSMRD(LdSt)) { 289 const MachineOperand *OffsetImm = 290 getNamedOperand(LdSt, AMDGPU::OpName::offset); 291 if (!OffsetImm) 292 return false; 293 294 const MachineOperand *SBaseReg = 295 getNamedOperand(LdSt, AMDGPU::OpName::sbase); 296 BaseReg = SBaseReg->getReg(); 297 Offset = OffsetImm->getImm(); 298 return true; 299 } 300 301 if (isFLAT(LdSt)) { 302 const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::addr); 303 BaseReg = AddrReg->getReg(); 304 Offset = 0; 305 return true; 306 } 307 308 return false; 309 } 310 311 bool SIInstrInfo::shouldClusterMemOps(MachineInstr &FirstLdSt, 312 MachineInstr &SecondLdSt, 313 unsigned NumLoads) const { 314 const MachineOperand *FirstDst = nullptr; 315 const MachineOperand *SecondDst = nullptr; 316 317 if (isDS(FirstLdSt) && isDS(SecondLdSt)) { 318 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdst); 319 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdst); 320 } 321 322 if (isSMRD(FirstLdSt) && isSMRD(SecondLdSt)) { 323 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::sdst); 324 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::sdst); 325 } 326 327 if ((isMUBUF(FirstLdSt) && isMUBUF(SecondLdSt)) || 328 (isMTBUF(FirstLdSt) && isMTBUF(SecondLdSt))) { 329 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdata); 330 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdata); 331 } 332 333 if (!FirstDst || !SecondDst) 334 return false; 335 336 // Try to limit clustering based on the total number of bytes loaded 337 // rather than the number of instructions. This is done to help reduce 338 // register pressure. The method used is somewhat inexact, though, 339 // because it assumes that all loads in the cluster will load the 340 // same number of bytes as FirstLdSt. 341 342 // The unit of this value is bytes. 343 // FIXME: This needs finer tuning. 344 unsigned LoadClusterThreshold = 16; 345 346 const MachineRegisterInfo &MRI = 347 FirstLdSt.getParent()->getParent()->getRegInfo(); 348 const TargetRegisterClass *DstRC = MRI.getRegClass(FirstDst->getReg()); 349 350 return (NumLoads * DstRC->getSize()) <= LoadClusterThreshold; 351 } 352 353 void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 354 MachineBasicBlock::iterator MI, 355 const DebugLoc &DL, unsigned DestReg, 356 unsigned SrcReg, bool KillSrc) const { 357 const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg); 358 359 if (RC == &AMDGPU::VGPR_32RegClass) { 360 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || 361 AMDGPU::SReg_32RegClass.contains(SrcReg)); 362 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) 363 .addReg(SrcReg, getKillRegState(KillSrc)); 364 return; 365 } 366 367 if (RC == &AMDGPU::SReg_32_XM0RegClass || 368 RC == &AMDGPU::SReg_32RegClass) { 369 if (SrcReg == AMDGPU::SCC) { 370 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg) 371 .addImm(-1) 372 .addImm(0); 373 return; 374 } 375 376 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 377 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 378 .addReg(SrcReg, getKillRegState(KillSrc)); 379 return; 380 } 381 382 if (RC == &AMDGPU::SReg_64RegClass) { 383 if (DestReg == AMDGPU::VCC) { 384 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 385 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC) 386 .addReg(SrcReg, getKillRegState(KillSrc)); 387 } else { 388 // FIXME: Hack until VReg_1 removed. 389 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 390 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 391 .addImm(0) 392 .addReg(SrcReg, getKillRegState(KillSrc)); 393 } 394 395 return; 396 } 397 398 assert(AMDGPU::SReg_64RegClass.contains(SrcReg)); 399 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 400 .addReg(SrcReg, getKillRegState(KillSrc)); 401 return; 402 } 403 404 if (DestReg == AMDGPU::SCC) { 405 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 406 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32)) 407 .addReg(SrcReg, getKillRegState(KillSrc)) 408 .addImm(0); 409 return; 410 } 411 412 unsigned EltSize = 4; 413 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 414 if (RI.isSGPRClass(RC)) { 415 if (RC->getSize() > 4) { 416 Opcode = AMDGPU::S_MOV_B64; 417 EltSize = 8; 418 } else { 419 Opcode = AMDGPU::S_MOV_B32; 420 EltSize = 4; 421 } 422 } 423 424 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize); 425 bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg); 426 427 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 428 unsigned SubIdx; 429 if (Forward) 430 SubIdx = SubIndices[Idx]; 431 else 432 SubIdx = SubIndices[SubIndices.size() - Idx - 1]; 433 434 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 435 get(Opcode), RI.getSubReg(DestReg, SubIdx)); 436 437 Builder.addReg(RI.getSubReg(SrcReg, SubIdx)); 438 439 if (Idx == SubIndices.size() - 1) 440 Builder.addReg(SrcReg, getKillRegState(KillSrc) | RegState::Implicit); 441 442 if (Idx == 0) 443 Builder.addReg(DestReg, RegState::Define | RegState::Implicit); 444 445 Builder.addReg(SrcReg, RegState::Implicit); 446 } 447 } 448 449 int SIInstrInfo::commuteOpcode(unsigned Opcode) const { 450 int NewOpc; 451 452 // Try to map original to commuted opcode 453 NewOpc = AMDGPU::getCommuteRev(Opcode); 454 if (NewOpc != -1) 455 // Check if the commuted (REV) opcode exists on the target. 456 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 457 458 // Try to map commuted to original opcode 459 NewOpc = AMDGPU::getCommuteOrig(Opcode); 460 if (NewOpc != -1) 461 // Check if the original (non-REV) opcode exists on the target. 462 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 463 464 return Opcode; 465 } 466 467 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const { 468 469 if (DstRC->getSize() == 4) { 470 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 471 } else if (DstRC->getSize() == 8 && RI.isSGPRClass(DstRC)) { 472 return AMDGPU::S_MOV_B64; 473 } else if (DstRC->getSize() == 8 && !RI.isSGPRClass(DstRC)) { 474 return AMDGPU::V_MOV_B64_PSEUDO; 475 } 476 return AMDGPU::COPY; 477 } 478 479 static unsigned getSGPRSpillSaveOpcode(unsigned Size) { 480 switch (Size) { 481 case 4: 482 return AMDGPU::SI_SPILL_S32_SAVE; 483 case 8: 484 return AMDGPU::SI_SPILL_S64_SAVE; 485 case 16: 486 return AMDGPU::SI_SPILL_S128_SAVE; 487 case 32: 488 return AMDGPU::SI_SPILL_S256_SAVE; 489 case 64: 490 return AMDGPU::SI_SPILL_S512_SAVE; 491 default: 492 llvm_unreachable("unknown register size"); 493 } 494 } 495 496 static unsigned getVGPRSpillSaveOpcode(unsigned Size) { 497 switch (Size) { 498 case 4: 499 return AMDGPU::SI_SPILL_V32_SAVE; 500 case 8: 501 return AMDGPU::SI_SPILL_V64_SAVE; 502 case 12: 503 return AMDGPU::SI_SPILL_V96_SAVE; 504 case 16: 505 return AMDGPU::SI_SPILL_V128_SAVE; 506 case 32: 507 return AMDGPU::SI_SPILL_V256_SAVE; 508 case 64: 509 return AMDGPU::SI_SPILL_V512_SAVE; 510 default: 511 llvm_unreachable("unknown register size"); 512 } 513 } 514 515 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 516 MachineBasicBlock::iterator MI, 517 unsigned SrcReg, bool isKill, 518 int FrameIndex, 519 const TargetRegisterClass *RC, 520 const TargetRegisterInfo *TRI) const { 521 MachineFunction *MF = MBB.getParent(); 522 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 523 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 524 DebugLoc DL = MBB.findDebugLoc(MI); 525 526 unsigned Size = FrameInfo.getObjectSize(FrameIndex); 527 unsigned Align = FrameInfo.getObjectAlignment(FrameIndex); 528 MachinePointerInfo PtrInfo 529 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 530 MachineMemOperand *MMO 531 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, 532 Size, Align); 533 534 if (RI.isSGPRClass(RC)) { 535 MFI->setHasSpilledSGPRs(); 536 537 // We are only allowed to create one new instruction when spilling 538 // registers, so we need to use pseudo instruction for spilling SGPRs. 539 const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(RC->getSize())); 540 541 // The SGPR spill/restore instructions only work on number sgprs, so we need 542 // to make sure we are using the correct register class. 543 if (TargetRegisterInfo::isVirtualRegister(SrcReg) && RC->getSize() == 4) { 544 MachineRegisterInfo &MRI = MF->getRegInfo(); 545 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0RegClass); 546 } 547 548 MachineInstrBuilder Spill = BuildMI(MBB, MI, DL, OpDesc) 549 .addReg(SrcReg, getKillRegState(isKill)) // data 550 .addFrameIndex(FrameIndex) // addr 551 .addMemOperand(MMO) 552 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) 553 .addReg(MFI->getScratchWaveOffsetReg(), RegState::Implicit); 554 // Add the scratch resource registers as implicit uses because we may end up 555 // needing them, and need to ensure that the reserved registers are 556 // correctly handled. 557 558 if (ST.hasScalarStores()) { 559 // m0 is used for offset to scalar stores if used to spill. 560 Spill.addReg(AMDGPU::M0, RegState::ImplicitDefine); 561 } 562 563 return; 564 } 565 566 if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) { 567 LLVMContext &Ctx = MF->getFunction()->getContext(); 568 Ctx.emitError("SIInstrInfo::storeRegToStackSlot - Do not know how to" 569 " spill register"); 570 BuildMI(MBB, MI, DL, get(AMDGPU::KILL)) 571 .addReg(SrcReg); 572 573 return; 574 } 575 576 assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected"); 577 578 unsigned Opcode = getVGPRSpillSaveOpcode(RC->getSize()); 579 MFI->setHasSpilledVGPRs(); 580 BuildMI(MBB, MI, DL, get(Opcode)) 581 .addReg(SrcReg, getKillRegState(isKill)) // data 582 .addFrameIndex(FrameIndex) // addr 583 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 584 .addReg(MFI->getScratchWaveOffsetReg()) // scratch_offset 585 .addImm(0) // offset 586 .addMemOperand(MMO); 587 } 588 589 static unsigned getSGPRSpillRestoreOpcode(unsigned Size) { 590 switch (Size) { 591 case 4: 592 return AMDGPU::SI_SPILL_S32_RESTORE; 593 case 8: 594 return AMDGPU::SI_SPILL_S64_RESTORE; 595 case 16: 596 return AMDGPU::SI_SPILL_S128_RESTORE; 597 case 32: 598 return AMDGPU::SI_SPILL_S256_RESTORE; 599 case 64: 600 return AMDGPU::SI_SPILL_S512_RESTORE; 601 default: 602 llvm_unreachable("unknown register size"); 603 } 604 } 605 606 static unsigned getVGPRSpillRestoreOpcode(unsigned Size) { 607 switch (Size) { 608 case 4: 609 return AMDGPU::SI_SPILL_V32_RESTORE; 610 case 8: 611 return AMDGPU::SI_SPILL_V64_RESTORE; 612 case 12: 613 return AMDGPU::SI_SPILL_V96_RESTORE; 614 case 16: 615 return AMDGPU::SI_SPILL_V128_RESTORE; 616 case 32: 617 return AMDGPU::SI_SPILL_V256_RESTORE; 618 case 64: 619 return AMDGPU::SI_SPILL_V512_RESTORE; 620 default: 621 llvm_unreachable("unknown register size"); 622 } 623 } 624 625 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 626 MachineBasicBlock::iterator MI, 627 unsigned DestReg, int FrameIndex, 628 const TargetRegisterClass *RC, 629 const TargetRegisterInfo *TRI) const { 630 MachineFunction *MF = MBB.getParent(); 631 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 632 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 633 DebugLoc DL = MBB.findDebugLoc(MI); 634 unsigned Align = FrameInfo.getObjectAlignment(FrameIndex); 635 unsigned Size = FrameInfo.getObjectSize(FrameIndex); 636 637 MachinePointerInfo PtrInfo 638 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 639 640 MachineMemOperand *MMO = MF->getMachineMemOperand( 641 PtrInfo, MachineMemOperand::MOLoad, Size, Align); 642 643 if (RI.isSGPRClass(RC)) { 644 // FIXME: Maybe this should not include a memoperand because it will be 645 // lowered to non-memory instructions. 646 const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(RC->getSize())); 647 if (TargetRegisterInfo::isVirtualRegister(DestReg) && RC->getSize() == 4) { 648 MachineRegisterInfo &MRI = MF->getRegInfo(); 649 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0RegClass); 650 } 651 652 MachineInstrBuilder Spill = BuildMI(MBB, MI, DL, OpDesc, DestReg) 653 .addFrameIndex(FrameIndex) // addr 654 .addMemOperand(MMO) 655 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) 656 .addReg(MFI->getScratchWaveOffsetReg(), RegState::Implicit); 657 658 if (ST.hasScalarStores()) { 659 // m0 is used for offset to scalar stores if used to spill. 660 Spill.addReg(AMDGPU::M0, RegState::ImplicitDefine); 661 } 662 663 return; 664 } 665 666 if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) { 667 LLVMContext &Ctx = MF->getFunction()->getContext(); 668 Ctx.emitError("SIInstrInfo::loadRegFromStackSlot - Do not know how to" 669 " restore register"); 670 BuildMI(MBB, MI, DL, get(AMDGPU::IMPLICIT_DEF), DestReg); 671 672 return; 673 } 674 675 assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected"); 676 677 unsigned Opcode = getVGPRSpillRestoreOpcode(RC->getSize()); 678 BuildMI(MBB, MI, DL, get(Opcode), DestReg) 679 .addFrameIndex(FrameIndex) // vaddr 680 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 681 .addReg(MFI->getScratchWaveOffsetReg()) // scratch_offset 682 .addImm(0) // offset 683 .addMemOperand(MMO); 684 } 685 686 /// \param @Offset Offset in bytes of the FrameIndex being spilled 687 unsigned SIInstrInfo::calculateLDSSpillAddress( 688 MachineBasicBlock &MBB, MachineInstr &MI, RegScavenger *RS, unsigned TmpReg, 689 unsigned FrameOffset, unsigned Size) const { 690 MachineFunction *MF = MBB.getParent(); 691 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 692 const SISubtarget &ST = MF->getSubtarget<SISubtarget>(); 693 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 694 DebugLoc DL = MBB.findDebugLoc(MI); 695 unsigned WorkGroupSize = MFI->getMaxFlatWorkGroupSize(); 696 unsigned WavefrontSize = ST.getWavefrontSize(); 697 698 unsigned TIDReg = MFI->getTIDReg(); 699 if (!MFI->hasCalculatedTID()) { 700 MachineBasicBlock &Entry = MBB.getParent()->front(); 701 MachineBasicBlock::iterator Insert = Entry.front(); 702 DebugLoc DL = Insert->getDebugLoc(); 703 704 TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass, 705 *MF); 706 if (TIDReg == AMDGPU::NoRegister) 707 return TIDReg; 708 709 if (!AMDGPU::isShader(MF->getFunction()->getCallingConv()) && 710 WorkGroupSize > WavefrontSize) { 711 712 unsigned TIDIGXReg 713 = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_X); 714 unsigned TIDIGYReg 715 = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_Y); 716 unsigned TIDIGZReg 717 = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_Z); 718 unsigned InputPtrReg = 719 TRI->getPreloadedValue(*MF, SIRegisterInfo::KERNARG_SEGMENT_PTR); 720 for (unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) { 721 if (!Entry.isLiveIn(Reg)) 722 Entry.addLiveIn(Reg); 723 } 724 725 RS->enterBasicBlock(Entry); 726 // FIXME: Can we scavenge an SReg_64 and access the subregs? 727 unsigned STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); 728 unsigned STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); 729 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp0) 730 .addReg(InputPtrReg) 731 .addImm(SI::KernelInputOffsets::NGROUPS_Z); 732 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp1) 733 .addReg(InputPtrReg) 734 .addImm(SI::KernelInputOffsets::NGROUPS_Y); 735 736 // NGROUPS.X * NGROUPS.Y 737 BuildMI(Entry, Insert, DL, get(AMDGPU::S_MUL_I32), STmp1) 738 .addReg(STmp1) 739 .addReg(STmp0); 740 // (NGROUPS.X * NGROUPS.Y) * TIDIG.X 741 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MUL_U32_U24_e32), TIDReg) 742 .addReg(STmp1) 743 .addReg(TIDIGXReg); 744 // NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X) 745 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MAD_U32_U24), TIDReg) 746 .addReg(STmp0) 747 .addReg(TIDIGYReg) 748 .addReg(TIDReg); 749 // (NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)) + TIDIG.Z 750 BuildMI(Entry, Insert, DL, get(AMDGPU::V_ADD_I32_e32), TIDReg) 751 .addReg(TIDReg) 752 .addReg(TIDIGZReg); 753 } else { 754 // Get the wave id 755 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_LO_U32_B32_e64), 756 TIDReg) 757 .addImm(-1) 758 .addImm(0); 759 760 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_HI_U32_B32_e64), 761 TIDReg) 762 .addImm(-1) 763 .addReg(TIDReg); 764 } 765 766 BuildMI(Entry, Insert, DL, get(AMDGPU::V_LSHLREV_B32_e32), 767 TIDReg) 768 .addImm(2) 769 .addReg(TIDReg); 770 MFI->setTIDReg(TIDReg); 771 } 772 773 // Add FrameIndex to LDS offset 774 unsigned LDSOffset = MFI->getLDSSize() + (FrameOffset * WorkGroupSize); 775 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), TmpReg) 776 .addImm(LDSOffset) 777 .addReg(TIDReg); 778 779 return TmpReg; 780 } 781 782 void SIInstrInfo::insertWaitStates(MachineBasicBlock &MBB, 783 MachineBasicBlock::iterator MI, 784 int Count) const { 785 DebugLoc DL = MBB.findDebugLoc(MI); 786 while (Count > 0) { 787 int Arg; 788 if (Count >= 8) 789 Arg = 7; 790 else 791 Arg = Count - 1; 792 Count -= 8; 793 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP)) 794 .addImm(Arg); 795 } 796 } 797 798 void SIInstrInfo::insertNoop(MachineBasicBlock &MBB, 799 MachineBasicBlock::iterator MI) const { 800 insertWaitStates(MBB, MI, 1); 801 } 802 803 unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) const { 804 switch (MI.getOpcode()) { 805 default: return 1; // FIXME: Do wait states equal cycles? 806 807 case AMDGPU::S_NOP: 808 return MI.getOperand(0).getImm() + 1; 809 } 810 } 811 812 bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 813 MachineBasicBlock &MBB = *MI.getParent(); 814 DebugLoc DL = MBB.findDebugLoc(MI); 815 switch (MI.getOpcode()) { 816 default: return AMDGPUInstrInfo::expandPostRAPseudo(MI); 817 case AMDGPU::S_MOV_B64_term: { 818 // This is only a terminator to get the correct spill code placement during 819 // register allocation. 820 MI.setDesc(get(AMDGPU::S_MOV_B64)); 821 break; 822 } 823 case AMDGPU::S_XOR_B64_term: { 824 // This is only a terminator to get the correct spill code placement during 825 // register allocation. 826 MI.setDesc(get(AMDGPU::S_XOR_B64)); 827 break; 828 } 829 case AMDGPU::S_ANDN2_B64_term: { 830 // This is only a terminator to get the correct spill code placement during 831 // register allocation. 832 MI.setDesc(get(AMDGPU::S_ANDN2_B64)); 833 break; 834 } 835 case AMDGPU::V_MOV_B64_PSEUDO: { 836 unsigned Dst = MI.getOperand(0).getReg(); 837 unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 838 unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 839 840 const MachineOperand &SrcOp = MI.getOperand(1); 841 // FIXME: Will this work for 64-bit floating point immediates? 842 assert(!SrcOp.isFPImm()); 843 if (SrcOp.isImm()) { 844 APInt Imm(64, SrcOp.getImm()); 845 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 846 .addImm(Imm.getLoBits(32).getZExtValue()) 847 .addReg(Dst, RegState::Implicit | RegState::Define); 848 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 849 .addImm(Imm.getHiBits(32).getZExtValue()) 850 .addReg(Dst, RegState::Implicit | RegState::Define); 851 } else { 852 assert(SrcOp.isReg()); 853 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 854 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) 855 .addReg(Dst, RegState::Implicit | RegState::Define); 856 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 857 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) 858 .addReg(Dst, RegState::Implicit | RegState::Define); 859 } 860 MI.eraseFromParent(); 861 break; 862 } 863 case AMDGPU::V_MOVRELD_B32_V1: 864 case AMDGPU::V_MOVRELD_B32_V2: 865 case AMDGPU::V_MOVRELD_B32_V4: 866 case AMDGPU::V_MOVRELD_B32_V8: 867 case AMDGPU::V_MOVRELD_B32_V16: { 868 const MCInstrDesc &MovRelDesc = get(AMDGPU::V_MOVRELD_B32_e32); 869 unsigned VecReg = MI.getOperand(0).getReg(); 870 bool IsUndef = MI.getOperand(1).isUndef(); 871 unsigned SubReg = AMDGPU::sub0 + MI.getOperand(3).getImm(); 872 assert(VecReg == MI.getOperand(1).getReg()); 873 874 MachineInstr *MovRel = 875 BuildMI(MBB, MI, DL, MovRelDesc) 876 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 877 .addOperand(MI.getOperand(2)) 878 .addReg(VecReg, RegState::ImplicitDefine) 879 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0)); 880 881 const int ImpDefIdx = 882 MovRelDesc.getNumOperands() + MovRelDesc.getNumImplicitUses(); 883 const int ImpUseIdx = ImpDefIdx + 1; 884 MovRel->tieOperands(ImpDefIdx, ImpUseIdx); 885 886 MI.eraseFromParent(); 887 break; 888 } 889 case AMDGPU::SI_PC_ADD_REL_OFFSET: { 890 MachineFunction &MF = *MBB.getParent(); 891 unsigned Reg = MI.getOperand(0).getReg(); 892 unsigned RegLo = RI.getSubReg(Reg, AMDGPU::sub0); 893 unsigned RegHi = RI.getSubReg(Reg, AMDGPU::sub1); 894 895 // Create a bundle so these instructions won't be re-ordered by the 896 // post-RA scheduler. 897 MIBundleBuilder Bundler(MBB, MI); 898 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg)); 899 900 // Add 32-bit offset from this instruction to the start of the 901 // constant data. 902 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo) 903 .addReg(RegLo) 904 .addOperand(MI.getOperand(1))); 905 906 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi) 907 .addReg(RegHi); 908 if (MI.getOperand(2).getTargetFlags() == SIInstrInfo::MO_NONE) 909 MIB.addImm(0); 910 else 911 MIB.addOperand(MI.getOperand(2)); 912 913 Bundler.append(MIB); 914 llvm::finalizeBundle(MBB, Bundler.begin()); 915 916 MI.eraseFromParent(); 917 break; 918 } 919 } 920 return true; 921 } 922 923 bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI, 924 MachineOperand &Src0, 925 unsigned Src0OpName, 926 MachineOperand &Src1, 927 unsigned Src1OpName) const { 928 MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName); 929 if (!Src0Mods) 930 return false; 931 932 MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName); 933 assert(Src1Mods && 934 "All commutable instructions have both src0 and src1 modifiers"); 935 936 int Src0ModsVal = Src0Mods->getImm(); 937 int Src1ModsVal = Src1Mods->getImm(); 938 939 Src1Mods->setImm(Src0ModsVal); 940 Src0Mods->setImm(Src1ModsVal); 941 return true; 942 } 943 944 static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI, 945 MachineOperand &RegOp, 946 MachineOperand &NonRegOp) { 947 unsigned Reg = RegOp.getReg(); 948 unsigned SubReg = RegOp.getSubReg(); 949 bool IsKill = RegOp.isKill(); 950 bool IsDead = RegOp.isDead(); 951 bool IsUndef = RegOp.isUndef(); 952 bool IsDebug = RegOp.isDebug(); 953 954 if (NonRegOp.isImm()) 955 RegOp.ChangeToImmediate(NonRegOp.getImm()); 956 else if (NonRegOp.isFI()) 957 RegOp.ChangeToFrameIndex(NonRegOp.getIndex()); 958 else 959 return nullptr; 960 961 NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug); 962 NonRegOp.setSubReg(SubReg); 963 964 return &MI; 965 } 966 967 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, 968 unsigned Src0Idx, 969 unsigned Src1Idx) const { 970 assert(!NewMI && "this should never be used"); 971 972 unsigned Opc = MI.getOpcode(); 973 int CommutedOpcode = commuteOpcode(Opc); 974 if (CommutedOpcode == -1) 975 return nullptr; 976 977 assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == 978 static_cast<int>(Src0Idx) && 979 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == 980 static_cast<int>(Src1Idx) && 981 "inconsistency with findCommutedOpIndices"); 982 983 MachineOperand &Src0 = MI.getOperand(Src0Idx); 984 MachineOperand &Src1 = MI.getOperand(Src1Idx); 985 986 MachineInstr *CommutedMI = nullptr; 987 if (Src0.isReg() && Src1.isReg()) { 988 if (isOperandLegal(MI, Src1Idx, &Src0)) { 989 // Be sure to copy the source modifiers to the right place. 990 CommutedMI 991 = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx); 992 } 993 994 } else if (Src0.isReg() && !Src1.isReg()) { 995 // src0 should always be able to support any operand type, so no need to 996 // check operand legality. 997 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1); 998 } else if (!Src0.isReg() && Src1.isReg()) { 999 if (isOperandLegal(MI, Src1Idx, &Src0)) 1000 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0); 1001 } else { 1002 // FIXME: Found two non registers to commute. This does happen. 1003 return nullptr; 1004 } 1005 1006 1007 if (CommutedMI) { 1008 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers, 1009 Src1, AMDGPU::OpName::src1_modifiers); 1010 1011 CommutedMI->setDesc(get(CommutedOpcode)); 1012 } 1013 1014 return CommutedMI; 1015 } 1016 1017 // This needs to be implemented because the source modifiers may be inserted 1018 // between the true commutable operands, and the base 1019 // TargetInstrInfo::commuteInstruction uses it. 1020 bool SIInstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx0, 1021 unsigned &SrcOpIdx1) const { 1022 if (!MI.isCommutable()) 1023 return false; 1024 1025 unsigned Opc = MI.getOpcode(); 1026 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 1027 if (Src0Idx == -1) 1028 return false; 1029 1030 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 1031 if (Src1Idx == -1) 1032 return false; 1033 1034 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx); 1035 } 1036 1037 bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp, 1038 int64_t BrOffset) const { 1039 // BranchRelaxation should never have to check s_setpc_b64 because its dest 1040 // block is unanalyzable. 1041 assert(BranchOp != AMDGPU::S_SETPC_B64); 1042 1043 // Convert to dwords. 1044 BrOffset /= 4; 1045 1046 // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is 1047 // from the next instruction. 1048 BrOffset -= 1; 1049 1050 return isIntN(BranchOffsetBits, BrOffset); 1051 } 1052 1053 MachineBasicBlock *SIInstrInfo::getBranchDestBlock( 1054 const MachineInstr &MI) const { 1055 if (MI.getOpcode() == AMDGPU::S_SETPC_B64) { 1056 // This would be a difficult analysis to perform, but can always be legal so 1057 // there's no need to analyze it. 1058 return nullptr; 1059 } 1060 1061 return MI.getOperand(0).getMBB(); 1062 } 1063 1064 unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB, 1065 MachineBasicBlock &DestBB, 1066 const DebugLoc &DL, 1067 int64_t BrOffset, 1068 RegScavenger *RS) const { 1069 assert(RS && "RegScavenger required for long branching"); 1070 assert(MBB.empty() && 1071 "new block should be inserted for expanding unconditional branch"); 1072 assert(MBB.pred_size() == 1); 1073 1074 MachineFunction *MF = MBB.getParent(); 1075 MachineRegisterInfo &MRI = MF->getRegInfo(); 1076 1077 // FIXME: Virtual register workaround for RegScavenger not working with empty 1078 // blocks. 1079 unsigned PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1080 1081 auto I = MBB.end(); 1082 1083 // We need to compute the offset relative to the instruction immediately after 1084 // s_getpc_b64. Insert pc arithmetic code before last terminator. 1085 MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg); 1086 1087 // TODO: Handle > 32-bit block address. 1088 if (BrOffset >= 0) { 1089 BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32)) 1090 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 1091 .addReg(PCReg, 0, AMDGPU::sub0) 1092 .addMBB(&DestBB, AMDGPU::TF_LONG_BRANCH_FORWARD); 1093 BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32)) 1094 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 1095 .addReg(PCReg, 0, AMDGPU::sub1) 1096 .addImm(0); 1097 } else { 1098 // Backwards branch. 1099 BuildMI(MBB, I, DL, get(AMDGPU::S_SUB_U32)) 1100 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 1101 .addReg(PCReg, 0, AMDGPU::sub0) 1102 .addMBB(&DestBB, AMDGPU::TF_LONG_BRANCH_BACKWARD); 1103 BuildMI(MBB, I, DL, get(AMDGPU::S_SUBB_U32)) 1104 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 1105 .addReg(PCReg, 0, AMDGPU::sub1) 1106 .addImm(0); 1107 } 1108 1109 // Insert the indirect branch after the other terminator. 1110 BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64)) 1111 .addReg(PCReg); 1112 1113 // FIXME: If spilling is necessary, this will fail because this scavenger has 1114 // no emergency stack slots. It is non-trivial to spill in this situation, 1115 // because the restore code needs to be specially placed after the 1116 // jump. BranchRelaxation then needs to be made aware of the newly inserted 1117 // block. 1118 // 1119 // If a spill is needed for the pc register pair, we need to insert a spill 1120 // restore block right before the destination block, and insert a short branch 1121 // into the old destination block's fallthrough predecessor. 1122 // e.g.: 1123 // 1124 // s_cbranch_scc0 skip_long_branch: 1125 // 1126 // long_branch_bb: 1127 // spill s[8:9] 1128 // s_getpc_b64 s[8:9] 1129 // s_add_u32 s8, s8, restore_bb 1130 // s_addc_u32 s9, s9, 0 1131 // s_setpc_b64 s[8:9] 1132 // 1133 // skip_long_branch: 1134 // foo; 1135 // 1136 // ..... 1137 // 1138 // dest_bb_fallthrough_predecessor: 1139 // bar; 1140 // s_branch dest_bb 1141 // 1142 // restore_bb: 1143 // restore s[8:9] 1144 // fallthrough dest_bb 1145 /// 1146 // dest_bb: 1147 // buzz; 1148 1149 RS->enterBasicBlockEnd(MBB); 1150 unsigned Scav = RS->scavengeRegister(&AMDGPU::SReg_64RegClass, 1151 MachineBasicBlock::iterator(GetPC), 0); 1152 MRI.replaceRegWith(PCReg, Scav); 1153 MRI.clearVirtRegs(); 1154 RS->setRegUsed(Scav); 1155 1156 return 4 + 8 + 4 + 4; 1157 } 1158 1159 unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) { 1160 switch (Cond) { 1161 case SIInstrInfo::SCC_TRUE: 1162 return AMDGPU::S_CBRANCH_SCC1; 1163 case SIInstrInfo::SCC_FALSE: 1164 return AMDGPU::S_CBRANCH_SCC0; 1165 case SIInstrInfo::VCCNZ: 1166 return AMDGPU::S_CBRANCH_VCCNZ; 1167 case SIInstrInfo::VCCZ: 1168 return AMDGPU::S_CBRANCH_VCCZ; 1169 case SIInstrInfo::EXECNZ: 1170 return AMDGPU::S_CBRANCH_EXECNZ; 1171 case SIInstrInfo::EXECZ: 1172 return AMDGPU::S_CBRANCH_EXECZ; 1173 default: 1174 llvm_unreachable("invalid branch predicate"); 1175 } 1176 } 1177 1178 SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) { 1179 switch (Opcode) { 1180 case AMDGPU::S_CBRANCH_SCC0: 1181 return SCC_FALSE; 1182 case AMDGPU::S_CBRANCH_SCC1: 1183 return SCC_TRUE; 1184 case AMDGPU::S_CBRANCH_VCCNZ: 1185 return VCCNZ; 1186 case AMDGPU::S_CBRANCH_VCCZ: 1187 return VCCZ; 1188 case AMDGPU::S_CBRANCH_EXECNZ: 1189 return EXECNZ; 1190 case AMDGPU::S_CBRANCH_EXECZ: 1191 return EXECZ; 1192 default: 1193 return INVALID_BR; 1194 } 1195 } 1196 1197 bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB, 1198 MachineBasicBlock::iterator I, 1199 MachineBasicBlock *&TBB, 1200 MachineBasicBlock *&FBB, 1201 SmallVectorImpl<MachineOperand> &Cond, 1202 bool AllowModify) const { 1203 if (I->getOpcode() == AMDGPU::S_BRANCH) { 1204 // Unconditional Branch 1205 TBB = I->getOperand(0).getMBB(); 1206 return false; 1207 } 1208 1209 BranchPredicate Pred = getBranchPredicate(I->getOpcode()); 1210 if (Pred == INVALID_BR) 1211 return true; 1212 1213 MachineBasicBlock *CondBB = I->getOperand(0).getMBB(); 1214 Cond.push_back(MachineOperand::CreateImm(Pred)); 1215 Cond.push_back(I->getOperand(1)); // Save the branch register. 1216 1217 ++I; 1218 1219 if (I == MBB.end()) { 1220 // Conditional branch followed by fall-through. 1221 TBB = CondBB; 1222 return false; 1223 } 1224 1225 if (I->getOpcode() == AMDGPU::S_BRANCH) { 1226 TBB = CondBB; 1227 FBB = I->getOperand(0).getMBB(); 1228 return false; 1229 } 1230 1231 return true; 1232 } 1233 1234 bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 1235 MachineBasicBlock *&FBB, 1236 SmallVectorImpl<MachineOperand> &Cond, 1237 bool AllowModify) const { 1238 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 1239 if (I == MBB.end()) 1240 return false; 1241 1242 if (I->getOpcode() != AMDGPU::SI_MASK_BRANCH) 1243 return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify); 1244 1245 ++I; 1246 1247 // TODO: Should be able to treat as fallthrough? 1248 if (I == MBB.end()) 1249 return true; 1250 1251 if (analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify)) 1252 return true; 1253 1254 MachineBasicBlock *MaskBrDest = I->getOperand(0).getMBB(); 1255 1256 // Specifically handle the case where the conditional branch is to the same 1257 // destination as the mask branch. e.g. 1258 // 1259 // si_mask_branch BB8 1260 // s_cbranch_execz BB8 1261 // s_cbranch BB9 1262 // 1263 // This is required to understand divergent loops which may need the branches 1264 // to be relaxed. 1265 if (TBB != MaskBrDest || Cond.empty()) 1266 return true; 1267 1268 auto Pred = Cond[0].getImm(); 1269 return (Pred != EXECZ && Pred != EXECNZ); 1270 } 1271 1272 unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB, 1273 int *BytesRemoved) const { 1274 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 1275 1276 unsigned Count = 0; 1277 unsigned RemovedSize = 0; 1278 while (I != MBB.end()) { 1279 MachineBasicBlock::iterator Next = std::next(I); 1280 if (I->getOpcode() == AMDGPU::SI_MASK_BRANCH) { 1281 I = Next; 1282 continue; 1283 } 1284 1285 RemovedSize += getInstSizeInBytes(*I); 1286 I->eraseFromParent(); 1287 ++Count; 1288 I = Next; 1289 } 1290 1291 if (BytesRemoved) 1292 *BytesRemoved = RemovedSize; 1293 1294 return Count; 1295 } 1296 1297 unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB, 1298 MachineBasicBlock *TBB, 1299 MachineBasicBlock *FBB, 1300 ArrayRef<MachineOperand> Cond, 1301 const DebugLoc &DL, 1302 int *BytesAdded) const { 1303 1304 if (!FBB && Cond.empty()) { 1305 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 1306 .addMBB(TBB); 1307 if (BytesAdded) 1308 *BytesAdded = 4; 1309 return 1; 1310 } 1311 1312 assert(TBB && Cond[0].isImm()); 1313 1314 unsigned Opcode 1315 = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm())); 1316 1317 if (!FBB) { 1318 Cond[1].isUndef(); 1319 MachineInstr *CondBr = 1320 BuildMI(&MBB, DL, get(Opcode)) 1321 .addMBB(TBB); 1322 1323 // Copy the flags onto the implicit condition register operand. 1324 MachineOperand &CondReg = CondBr->getOperand(1); 1325 CondReg.setIsUndef(Cond[1].isUndef()); 1326 CondReg.setIsKill(Cond[1].isKill()); 1327 1328 if (BytesAdded) 1329 *BytesAdded = 4; 1330 return 1; 1331 } 1332 1333 assert(TBB && FBB); 1334 1335 MachineInstr *CondBr = 1336 BuildMI(&MBB, DL, get(Opcode)) 1337 .addMBB(TBB); 1338 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 1339 .addMBB(FBB); 1340 1341 MachineOperand &CondReg = CondBr->getOperand(1); 1342 CondReg.setIsUndef(Cond[1].isUndef()); 1343 CondReg.setIsKill(Cond[1].isKill()); 1344 1345 if (BytesAdded) 1346 *BytesAdded = 8; 1347 1348 return 2; 1349 } 1350 1351 bool SIInstrInfo::reverseBranchCondition( 1352 SmallVectorImpl<MachineOperand> &Cond) const { 1353 assert(Cond.size() == 2); 1354 Cond[0].setImm(-Cond[0].getImm()); 1355 return false; 1356 } 1357 1358 static void removeModOperands(MachineInstr &MI) { 1359 unsigned Opc = MI.getOpcode(); 1360 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, 1361 AMDGPU::OpName::src0_modifiers); 1362 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, 1363 AMDGPU::OpName::src1_modifiers); 1364 int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc, 1365 AMDGPU::OpName::src2_modifiers); 1366 1367 MI.RemoveOperand(Src2ModIdx); 1368 MI.RemoveOperand(Src1ModIdx); 1369 MI.RemoveOperand(Src0ModIdx); 1370 } 1371 1372 bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, 1373 unsigned Reg, MachineRegisterInfo *MRI) const { 1374 if (!MRI->hasOneNonDBGUse(Reg)) 1375 return false; 1376 1377 unsigned Opc = UseMI.getOpcode(); 1378 if (Opc == AMDGPU::COPY) { 1379 bool isVGPRCopy = RI.isVGPR(*MRI, UseMI.getOperand(0).getReg()); 1380 switch (DefMI.getOpcode()) { 1381 default: 1382 return false; 1383 case AMDGPU::S_MOV_B64: 1384 // TODO: We could fold 64-bit immediates, but this get compilicated 1385 // when there are sub-registers. 1386 return false; 1387 1388 case AMDGPU::V_MOV_B32_e32: 1389 case AMDGPU::S_MOV_B32: 1390 break; 1391 } 1392 unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32; 1393 const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0); 1394 assert(ImmOp); 1395 // FIXME: We could handle FrameIndex values here. 1396 if (!ImmOp->isImm()) { 1397 return false; 1398 } 1399 UseMI.setDesc(get(NewOpc)); 1400 UseMI.getOperand(1).ChangeToImmediate(ImmOp->getImm()); 1401 UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent()); 1402 return true; 1403 } 1404 1405 if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 || 1406 Opc == AMDGPU::V_MAD_F16 || Opc == AMDGPU::V_MAC_F16_e64) { 1407 bool IsF32 = Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64; 1408 1409 // Don't fold if we are using source modifiers. The new VOP2 instructions 1410 // don't have them. 1411 if (hasModifiersSet(UseMI, AMDGPU::OpName::src0_modifiers) || 1412 hasModifiersSet(UseMI, AMDGPU::OpName::src1_modifiers) || 1413 hasModifiersSet(UseMI, AMDGPU::OpName::src2_modifiers)) { 1414 return false; 1415 } 1416 1417 const MachineOperand &ImmOp = DefMI.getOperand(1); 1418 1419 // If this is a free constant, there's no reason to do this. 1420 // TODO: We could fold this here instead of letting SIFoldOperands do it 1421 // later. 1422 if (isInlineConstant(ImmOp, 4)) 1423 return false; 1424 1425 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0); 1426 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1); 1427 MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2); 1428 1429 // Multiplied part is the constant: Use v_madmk_{f16, f32}. 1430 // We should only expect these to be on src0 due to canonicalizations. 1431 if (Src0->isReg() && Src0->getReg() == Reg) { 1432 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) 1433 return false; 1434 1435 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg()))) 1436 return false; 1437 1438 // We need to swap operands 0 and 1 since madmk constant is at operand 1. 1439 1440 const int64_t Imm = DefMI.getOperand(1).getImm(); 1441 1442 // FIXME: This would be a lot easier if we could return a new instruction 1443 // instead of having to modify in place. 1444 1445 // Remove these first since they are at the end. 1446 UseMI.RemoveOperand( 1447 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 1448 UseMI.RemoveOperand( 1449 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 1450 1451 unsigned Src1Reg = Src1->getReg(); 1452 unsigned Src1SubReg = Src1->getSubReg(); 1453 Src0->setReg(Src1Reg); 1454 Src0->setSubReg(Src1SubReg); 1455 Src0->setIsKill(Src1->isKill()); 1456 1457 if (Opc == AMDGPU::V_MAC_F32_e64 || 1458 Opc == AMDGPU::V_MAC_F16_e64) 1459 UseMI.untieRegOperand( 1460 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 1461 1462 Src1->ChangeToImmediate(Imm); 1463 1464 removeModOperands(UseMI); 1465 UseMI.setDesc(get(IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16)); 1466 1467 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 1468 if (DeleteDef) 1469 DefMI.eraseFromParent(); 1470 1471 return true; 1472 } 1473 1474 // Added part is the constant: Use v_madak_{f16, f32}. 1475 if (Src2->isReg() && Src2->getReg() == Reg) { 1476 // Not allowed to use constant bus for another operand. 1477 // We can however allow an inline immediate as src0. 1478 if (!Src0->isImm() && 1479 (Src0->isReg() && RI.isSGPRClass(MRI->getRegClass(Src0->getReg())))) 1480 return false; 1481 1482 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) 1483 return false; 1484 1485 const int64_t Imm = DefMI.getOperand(1).getImm(); 1486 1487 // FIXME: This would be a lot easier if we could return a new instruction 1488 // instead of having to modify in place. 1489 1490 // Remove these first since they are at the end. 1491 UseMI.RemoveOperand( 1492 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 1493 UseMI.RemoveOperand( 1494 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 1495 1496 if (Opc == AMDGPU::V_MAC_F32_e64 || 1497 Opc == AMDGPU::V_MAC_F16_e64) 1498 UseMI.untieRegOperand( 1499 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 1500 1501 // ChangingToImmediate adds Src2 back to the instruction. 1502 Src2->ChangeToImmediate(Imm); 1503 1504 // These come before src2. 1505 removeModOperands(UseMI); 1506 UseMI.setDesc(get(IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16)); 1507 1508 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 1509 if (DeleteDef) 1510 DefMI.eraseFromParent(); 1511 1512 return true; 1513 } 1514 } 1515 1516 return false; 1517 } 1518 1519 static bool offsetsDoNotOverlap(int WidthA, int OffsetA, 1520 int WidthB, int OffsetB) { 1521 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; 1522 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; 1523 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; 1524 return LowOffset + LowWidth <= HighOffset; 1525 } 1526 1527 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr &MIa, 1528 MachineInstr &MIb) const { 1529 unsigned BaseReg0, BaseReg1; 1530 int64_t Offset0, Offset1; 1531 1532 if (getMemOpBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) && 1533 getMemOpBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) { 1534 1535 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) { 1536 // FIXME: Handle ds_read2 / ds_write2. 1537 return false; 1538 } 1539 unsigned Width0 = (*MIa.memoperands_begin())->getSize(); 1540 unsigned Width1 = (*MIb.memoperands_begin())->getSize(); 1541 if (BaseReg0 == BaseReg1 && 1542 offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1)) { 1543 return true; 1544 } 1545 } 1546 1547 return false; 1548 } 1549 1550 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr &MIa, 1551 MachineInstr &MIb, 1552 AliasAnalysis *AA) const { 1553 assert((MIa.mayLoad() || MIa.mayStore()) && 1554 "MIa must load from or modify a memory location"); 1555 assert((MIb.mayLoad() || MIb.mayStore()) && 1556 "MIb must load from or modify a memory location"); 1557 1558 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects()) 1559 return false; 1560 1561 // XXX - Can we relax this between address spaces? 1562 if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef()) 1563 return false; 1564 1565 if (AA && MIa.hasOneMemOperand() && MIb.hasOneMemOperand()) { 1566 const MachineMemOperand *MMOa = *MIa.memoperands_begin(); 1567 const MachineMemOperand *MMOb = *MIb.memoperands_begin(); 1568 if (MMOa->getValue() && MMOb->getValue()) { 1569 MemoryLocation LocA(MMOa->getValue(), MMOa->getSize(), MMOa->getAAInfo()); 1570 MemoryLocation LocB(MMOb->getValue(), MMOb->getSize(), MMOb->getAAInfo()); 1571 if (!AA->alias(LocA, LocB)) 1572 return true; 1573 } 1574 } 1575 1576 // TODO: Should we check the address space from the MachineMemOperand? That 1577 // would allow us to distinguish objects we know don't alias based on the 1578 // underlying address space, even if it was lowered to a different one, 1579 // e.g. private accesses lowered to use MUBUF instructions on a scratch 1580 // buffer. 1581 if (isDS(MIa)) { 1582 if (isDS(MIb)) 1583 return checkInstOffsetsDoNotOverlap(MIa, MIb); 1584 1585 return !isFLAT(MIb); 1586 } 1587 1588 if (isMUBUF(MIa) || isMTBUF(MIa)) { 1589 if (isMUBUF(MIb) || isMTBUF(MIb)) 1590 return checkInstOffsetsDoNotOverlap(MIa, MIb); 1591 1592 return !isFLAT(MIb) && !isSMRD(MIb); 1593 } 1594 1595 if (isSMRD(MIa)) { 1596 if (isSMRD(MIb)) 1597 return checkInstOffsetsDoNotOverlap(MIa, MIb); 1598 1599 return !isFLAT(MIb) && !isMUBUF(MIa) && !isMTBUF(MIa); 1600 } 1601 1602 if (isFLAT(MIa)) { 1603 if (isFLAT(MIb)) 1604 return checkInstOffsetsDoNotOverlap(MIa, MIb); 1605 1606 return false; 1607 } 1608 1609 return false; 1610 } 1611 1612 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB, 1613 MachineInstr &MI, 1614 LiveVariables *LV) const { 1615 bool IsF16 = false; 1616 1617 switch (MI.getOpcode()) { 1618 default: 1619 return nullptr; 1620 case AMDGPU::V_MAC_F16_e64: 1621 IsF16 = true; 1622 case AMDGPU::V_MAC_F32_e64: 1623 break; 1624 case AMDGPU::V_MAC_F16_e32: 1625 IsF16 = true; 1626 case AMDGPU::V_MAC_F32_e32: { 1627 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0); 1628 if (Src0->isImm() && !isInlineConstant(*Src0, 4)) 1629 return nullptr; 1630 break; 1631 } 1632 } 1633 1634 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 1635 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0); 1636 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 1637 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 1638 1639 return BuildMI(*MBB, MI, MI.getDebugLoc(), 1640 get(IsF16 ? AMDGPU::V_MAD_F16 : AMDGPU::V_MAD_F32)) 1641 .addOperand(*Dst) 1642 .addImm(0) // Src0 mods 1643 .addOperand(*Src0) 1644 .addImm(0) // Src1 mods 1645 .addOperand(*Src1) 1646 .addImm(0) // Src mods 1647 .addOperand(*Src2) 1648 .addImm(0) // clamp 1649 .addImm(0); // omod 1650 } 1651 1652 // It's not generally safe to move VALU instructions across these since it will 1653 // start using the register as a base index rather than directly. 1654 // XXX - Why isn't hasSideEffects sufficient for these? 1655 static bool changesVGPRIndexingMode(const MachineInstr &MI) { 1656 switch (MI.getOpcode()) { 1657 case AMDGPU::S_SET_GPR_IDX_ON: 1658 case AMDGPU::S_SET_GPR_IDX_MODE: 1659 case AMDGPU::S_SET_GPR_IDX_OFF: 1660 return true; 1661 default: 1662 return false; 1663 } 1664 } 1665 1666 bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 1667 const MachineBasicBlock *MBB, 1668 const MachineFunction &MF) const { 1669 // XXX - Do we want the SP check in the base implementation? 1670 1671 // Target-independent instructions do not have an implicit-use of EXEC, even 1672 // when they operate on VGPRs. Treating EXEC modifications as scheduling 1673 // boundaries prevents incorrect movements of such instructions. 1674 return TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF) || 1675 MI.modifiesRegister(AMDGPU::EXEC, &RI) || 1676 changesVGPRIndexingMode(MI); 1677 } 1678 1679 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const { 1680 int64_t SVal = Imm.getSExtValue(); 1681 if (SVal >= -16 && SVal <= 64) 1682 return true; 1683 1684 if (Imm.getBitWidth() == 64) { 1685 uint64_t Val = Imm.getZExtValue(); 1686 return (DoubleToBits(0.0) == Val) || 1687 (DoubleToBits(1.0) == Val) || 1688 (DoubleToBits(-1.0) == Val) || 1689 (DoubleToBits(0.5) == Val) || 1690 (DoubleToBits(-0.5) == Val) || 1691 (DoubleToBits(2.0) == Val) || 1692 (DoubleToBits(-2.0) == Val) || 1693 (DoubleToBits(4.0) == Val) || 1694 (DoubleToBits(-4.0) == Val) || 1695 (ST.hasInv2PiInlineImm() && Val == 0x3fc45f306dc9c882); 1696 } 1697 1698 // The actual type of the operand does not seem to matter as long 1699 // as the bits match one of the inline immediate values. For example: 1700 // 1701 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal, 1702 // so it is a legal inline immediate. 1703 // 1704 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in 1705 // floating-point, so it is a legal inline immediate. 1706 uint32_t Val = Imm.getZExtValue(); 1707 1708 return (FloatToBits(0.0f) == Val) || 1709 (FloatToBits(1.0f) == Val) || 1710 (FloatToBits(-1.0f) == Val) || 1711 (FloatToBits(0.5f) == Val) || 1712 (FloatToBits(-0.5f) == Val) || 1713 (FloatToBits(2.0f) == Val) || 1714 (FloatToBits(-2.0f) == Val) || 1715 (FloatToBits(4.0f) == Val) || 1716 (FloatToBits(-4.0f) == Val) || 1717 (ST.hasInv2PiInlineImm() && Val == 0x3e22f983); 1718 } 1719 1720 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO, 1721 unsigned OpSize) const { 1722 if (MO.isImm()) { 1723 // MachineOperand provides no way to tell the true operand size, since it 1724 // only records a 64-bit value. We need to know the size to determine if a 1725 // 32-bit floating point immediate bit pattern is legal for an integer 1726 // immediate. It would be for any 32-bit integer operand, but would not be 1727 // for a 64-bit one. 1728 1729 unsigned BitSize = 8 * OpSize; 1730 return isInlineConstant(APInt(BitSize, MO.getImm(), true)); 1731 } 1732 1733 return false; 1734 } 1735 1736 bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO, 1737 unsigned OpSize) const { 1738 return MO.isImm() && !isInlineConstant(MO, OpSize); 1739 } 1740 1741 bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO, 1742 unsigned OpSize) const { 1743 switch (MO.getType()) { 1744 case MachineOperand::MO_Register: 1745 return false; 1746 case MachineOperand::MO_Immediate: 1747 return !isInlineConstant(MO, OpSize); 1748 case MachineOperand::MO_FrameIndex: 1749 case MachineOperand::MO_MachineBasicBlock: 1750 case MachineOperand::MO_ExternalSymbol: 1751 case MachineOperand::MO_GlobalAddress: 1752 case MachineOperand::MO_MCSymbol: 1753 return true; 1754 default: 1755 llvm_unreachable("unexpected operand type"); 1756 } 1757 } 1758 1759 static bool compareMachineOp(const MachineOperand &Op0, 1760 const MachineOperand &Op1) { 1761 if (Op0.getType() != Op1.getType()) 1762 return false; 1763 1764 switch (Op0.getType()) { 1765 case MachineOperand::MO_Register: 1766 return Op0.getReg() == Op1.getReg(); 1767 case MachineOperand::MO_Immediate: 1768 return Op0.getImm() == Op1.getImm(); 1769 default: 1770 llvm_unreachable("Didn't expect to be comparing these operand types"); 1771 } 1772 } 1773 1774 bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo, 1775 const MachineOperand &MO) const { 1776 const MCOperandInfo &OpInfo = get(MI.getOpcode()).OpInfo[OpNo]; 1777 1778 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI()); 1779 1780 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE) 1781 return true; 1782 1783 if (OpInfo.RegClass < 0) 1784 return false; 1785 1786 unsigned OpSize = RI.getRegClass(OpInfo.RegClass)->getSize(); 1787 if (isLiteralConstant(MO, OpSize)) 1788 return RI.opCanUseLiteralConstant(OpInfo.OperandType); 1789 1790 return RI.opCanUseInlineConstant(OpInfo.OperandType); 1791 } 1792 1793 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const { 1794 int Op32 = AMDGPU::getVOPe32(Opcode); 1795 if (Op32 == -1) 1796 return false; 1797 1798 return pseudoToMCOpcode(Op32) != -1; 1799 } 1800 1801 bool SIInstrInfo::hasModifiers(unsigned Opcode) const { 1802 // The src0_modifier operand is present on all instructions 1803 // that have modifiers. 1804 1805 return AMDGPU::getNamedOperandIdx(Opcode, 1806 AMDGPU::OpName::src0_modifiers) != -1; 1807 } 1808 1809 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI, 1810 unsigned OpName) const { 1811 const MachineOperand *Mods = getNamedOperand(MI, OpName); 1812 return Mods && Mods->getImm(); 1813 } 1814 1815 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI, 1816 const MachineOperand &MO, 1817 unsigned OpSize) const { 1818 // Literal constants use the constant bus. 1819 if (isLiteralConstant(MO, OpSize)) 1820 return true; 1821 1822 if (!MO.isReg() || !MO.isUse()) 1823 return false; 1824 1825 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) 1826 return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); 1827 1828 // FLAT_SCR is just an SGPR pair. 1829 if (!MO.isImplicit() && (MO.getReg() == AMDGPU::FLAT_SCR)) 1830 return true; 1831 1832 // EXEC register uses the constant bus. 1833 if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC) 1834 return true; 1835 1836 // SGPRs use the constant bus 1837 return (MO.getReg() == AMDGPU::VCC || MO.getReg() == AMDGPU::M0 || 1838 (!MO.isImplicit() && 1839 (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) || 1840 AMDGPU::SGPR_64RegClass.contains(MO.getReg())))); 1841 } 1842 1843 static unsigned findImplicitSGPRRead(const MachineInstr &MI) { 1844 for (const MachineOperand &MO : MI.implicit_operands()) { 1845 // We only care about reads. 1846 if (MO.isDef()) 1847 continue; 1848 1849 switch (MO.getReg()) { 1850 case AMDGPU::VCC: 1851 case AMDGPU::M0: 1852 case AMDGPU::FLAT_SCR: 1853 return MO.getReg(); 1854 1855 default: 1856 break; 1857 } 1858 } 1859 1860 return AMDGPU::NoRegister; 1861 } 1862 1863 static bool shouldReadExec(const MachineInstr &MI) { 1864 if (SIInstrInfo::isVALU(MI)) { 1865 switch (MI.getOpcode()) { 1866 case AMDGPU::V_READLANE_B32: 1867 case AMDGPU::V_READLANE_B32_si: 1868 case AMDGPU::V_READLANE_B32_vi: 1869 case AMDGPU::V_WRITELANE_B32: 1870 case AMDGPU::V_WRITELANE_B32_si: 1871 case AMDGPU::V_WRITELANE_B32_vi: 1872 return false; 1873 } 1874 1875 return true; 1876 } 1877 1878 if (SIInstrInfo::isGenericOpcode(MI.getOpcode()) || 1879 SIInstrInfo::isSALU(MI) || 1880 SIInstrInfo::isSMRD(MI)) 1881 return false; 1882 1883 return true; 1884 } 1885 1886 static bool isSubRegOf(const SIRegisterInfo &TRI, 1887 const MachineOperand &SuperVec, 1888 const MachineOperand &SubReg) { 1889 if (TargetRegisterInfo::isPhysicalRegister(SubReg.getReg())) 1890 return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg()); 1891 1892 return SubReg.getSubReg() != AMDGPU::NoSubRegister && 1893 SubReg.getReg() == SuperVec.getReg(); 1894 } 1895 1896 bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, 1897 StringRef &ErrInfo) const { 1898 uint16_t Opcode = MI.getOpcode(); 1899 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 1900 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); 1901 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1); 1902 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2); 1903 1904 // Make sure the number of operands is correct. 1905 const MCInstrDesc &Desc = get(Opcode); 1906 if (!Desc.isVariadic() && 1907 Desc.getNumOperands() != MI.getNumExplicitOperands()) { 1908 ErrInfo = "Instruction has wrong number of operands."; 1909 return false; 1910 } 1911 1912 if (MI.isInlineAsm()) { 1913 // Verify register classes for inlineasm constraints. 1914 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands(); 1915 I != E; ++I) { 1916 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI); 1917 if (!RC) 1918 continue; 1919 1920 const MachineOperand &Op = MI.getOperand(I); 1921 if (!Op.isReg()) 1922 continue; 1923 1924 unsigned Reg = Op.getReg(); 1925 if (!TargetRegisterInfo::isVirtualRegister(Reg) && !RC->contains(Reg)) { 1926 ErrInfo = "inlineasm operand has incorrect register class."; 1927 return false; 1928 } 1929 } 1930 1931 return true; 1932 } 1933 1934 // Make sure the register classes are correct. 1935 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) { 1936 if (MI.getOperand(i).isFPImm()) { 1937 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast " 1938 "all fp values to integers."; 1939 return false; 1940 } 1941 1942 int RegClass = Desc.OpInfo[i].RegClass; 1943 1944 switch (Desc.OpInfo[i].OperandType) { 1945 case MCOI::OPERAND_REGISTER: 1946 if (MI.getOperand(i).isImm()) { 1947 ErrInfo = "Illegal immediate value for operand."; 1948 return false; 1949 } 1950 break; 1951 case AMDGPU::OPERAND_REG_IMM32_INT: 1952 case AMDGPU::OPERAND_REG_IMM32_FP: 1953 break; 1954 case AMDGPU::OPERAND_REG_INLINE_C_INT: 1955 case AMDGPU::OPERAND_REG_INLINE_C_FP: 1956 if (isLiteralConstant(MI.getOperand(i), 1957 RI.getRegClass(RegClass)->getSize())) { 1958 ErrInfo = "Illegal immediate value for operand."; 1959 return false; 1960 } 1961 break; 1962 case MCOI::OPERAND_IMMEDIATE: 1963 case AMDGPU::OPERAND_KIMM32: 1964 // Check if this operand is an immediate. 1965 // FrameIndex operands will be replaced by immediates, so they are 1966 // allowed. 1967 if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) { 1968 ErrInfo = "Expected immediate, but got non-immediate"; 1969 return false; 1970 } 1971 LLVM_FALLTHROUGH; 1972 default: 1973 continue; 1974 } 1975 1976 if (!MI.getOperand(i).isReg()) 1977 continue; 1978 1979 if (RegClass != -1) { 1980 unsigned Reg = MI.getOperand(i).getReg(); 1981 if (Reg == AMDGPU::NoRegister || 1982 TargetRegisterInfo::isVirtualRegister(Reg)) 1983 continue; 1984 1985 const TargetRegisterClass *RC = RI.getRegClass(RegClass); 1986 if (!RC->contains(Reg)) { 1987 ErrInfo = "Operand has incorrect register class."; 1988 return false; 1989 } 1990 } 1991 } 1992 1993 // Verify VOP* 1994 if (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI)) { 1995 // Only look at the true operands. Only a real operand can use the constant 1996 // bus, and we don't want to check pseudo-operands like the source modifier 1997 // flags. 1998 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx }; 1999 2000 unsigned ConstantBusCount = 0; 2001 2002 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) 2003 ++ConstantBusCount; 2004 2005 unsigned SGPRUsed = findImplicitSGPRRead(MI); 2006 if (SGPRUsed != AMDGPU::NoRegister) 2007 ++ConstantBusCount; 2008 2009 for (int OpIdx : OpIndices) { 2010 if (OpIdx == -1) 2011 break; 2012 const MachineOperand &MO = MI.getOperand(OpIdx); 2013 if (usesConstantBus(MRI, MO, getOpSize(Opcode, OpIdx))) { 2014 if (MO.isReg()) { 2015 if (MO.getReg() != SGPRUsed) 2016 ++ConstantBusCount; 2017 SGPRUsed = MO.getReg(); 2018 } else { 2019 ++ConstantBusCount; 2020 } 2021 } 2022 } 2023 if (ConstantBusCount > 1) { 2024 ErrInfo = "VOP* instruction uses the constant bus more than once"; 2025 return false; 2026 } 2027 } 2028 2029 // Verify misc. restrictions on specific instructions. 2030 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 || 2031 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) { 2032 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 2033 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 2034 const MachineOperand &Src2 = MI.getOperand(Src2Idx); 2035 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) { 2036 if (!compareMachineOp(Src0, Src1) && 2037 !compareMachineOp(Src0, Src2)) { 2038 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2"; 2039 return false; 2040 } 2041 } 2042 } 2043 2044 if (isSOPK(MI)) { 2045 int64_t Imm = getNamedOperand(MI, AMDGPU::OpName::simm16)->getImm(); 2046 if (sopkIsZext(MI)) { 2047 if (!isUInt<16>(Imm)) { 2048 ErrInfo = "invalid immediate for SOPK instruction"; 2049 return false; 2050 } 2051 } else { 2052 if (!isInt<16>(Imm)) { 2053 ErrInfo = "invalid immediate for SOPK instruction"; 2054 return false; 2055 } 2056 } 2057 } 2058 2059 if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 || 2060 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 || 2061 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 2062 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) { 2063 const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 2064 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64; 2065 2066 const unsigned StaticNumOps = Desc.getNumOperands() + 2067 Desc.getNumImplicitUses(); 2068 const unsigned NumImplicitOps = IsDst ? 2 : 1; 2069 2070 // Allow additional implicit operands. This allows a fixup done by the post 2071 // RA scheduler where the main implicit operand is killed and implicit-defs 2072 // are added for sub-registers that remain live after this instruction. 2073 if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) { 2074 ErrInfo = "missing implicit register operands"; 2075 return false; 2076 } 2077 2078 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 2079 if (IsDst) { 2080 if (!Dst->isUse()) { 2081 ErrInfo = "v_movreld_b32 vdst should be a use operand"; 2082 return false; 2083 } 2084 2085 unsigned UseOpIdx; 2086 if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) || 2087 UseOpIdx != StaticNumOps + 1) { 2088 ErrInfo = "movrel implicit operands should be tied"; 2089 return false; 2090 } 2091 } 2092 2093 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 2094 const MachineOperand &ImpUse 2095 = MI.getOperand(StaticNumOps + NumImplicitOps - 1); 2096 if (!ImpUse.isReg() || !ImpUse.isUse() || 2097 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) { 2098 ErrInfo = "src0 should be subreg of implicit vector use"; 2099 return false; 2100 } 2101 } 2102 2103 // Make sure we aren't losing exec uses in the td files. This mostly requires 2104 // being careful when using let Uses to try to add other use registers. 2105 if (shouldReadExec(MI)) { 2106 if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) { 2107 ErrInfo = "VALU instruction does not implicitly read exec mask"; 2108 return false; 2109 } 2110 } 2111 2112 if (isSMRD(MI)) { 2113 if (MI.mayStore()) { 2114 // The register offset form of scalar stores may only use m0 as the 2115 // soffset register. 2116 const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff); 2117 if (Soff && Soff->getReg() != AMDGPU::M0) { 2118 ErrInfo = "scalar stores must use m0 as offset register"; 2119 return false; 2120 } 2121 } 2122 } 2123 2124 return true; 2125 } 2126 2127 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) { 2128 switch (MI.getOpcode()) { 2129 default: return AMDGPU::INSTRUCTION_LIST_END; 2130 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE; 2131 case AMDGPU::COPY: return AMDGPU::COPY; 2132 case AMDGPU::PHI: return AMDGPU::PHI; 2133 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG; 2134 case AMDGPU::S_MOV_B32: 2135 return MI.getOperand(1).isReg() ? 2136 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32; 2137 case AMDGPU::S_ADD_I32: 2138 case AMDGPU::S_ADD_U32: return AMDGPU::V_ADD_I32_e32; 2139 case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32; 2140 case AMDGPU::S_SUB_I32: 2141 case AMDGPU::S_SUB_U32: return AMDGPU::V_SUB_I32_e32; 2142 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32; 2143 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_I32; 2144 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64; 2145 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64; 2146 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64; 2147 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64; 2148 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64; 2149 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64; 2150 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64; 2151 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32; 2152 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64; 2153 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32; 2154 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64; 2155 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32; 2156 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64; 2157 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32; 2158 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32; 2159 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32; 2160 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32; 2161 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64; 2162 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32; 2163 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32; 2164 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32; 2165 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32; 2166 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32; 2167 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32; 2168 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32; 2169 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32; 2170 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32; 2171 case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e32; 2172 case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e32; 2173 case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e32; 2174 case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e32; 2175 case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e32; 2176 case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e32; 2177 case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e32; 2178 case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e32; 2179 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64; 2180 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32; 2181 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32; 2182 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64; 2183 case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ; 2184 case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ; 2185 } 2186 } 2187 2188 bool SIInstrInfo::isSALUOpSupportedOnVALU(const MachineInstr &MI) const { 2189 return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END; 2190 } 2191 2192 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI, 2193 unsigned OpNo) const { 2194 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 2195 const MCInstrDesc &Desc = get(MI.getOpcode()); 2196 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() || 2197 Desc.OpInfo[OpNo].RegClass == -1) { 2198 unsigned Reg = MI.getOperand(OpNo).getReg(); 2199 2200 if (TargetRegisterInfo::isVirtualRegister(Reg)) 2201 return MRI.getRegClass(Reg); 2202 return RI.getPhysRegClass(Reg); 2203 } 2204 2205 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 2206 return RI.getRegClass(RCID); 2207 } 2208 2209 bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const { 2210 switch (MI.getOpcode()) { 2211 case AMDGPU::COPY: 2212 case AMDGPU::REG_SEQUENCE: 2213 case AMDGPU::PHI: 2214 case AMDGPU::INSERT_SUBREG: 2215 return RI.hasVGPRs(getOpRegClass(MI, 0)); 2216 default: 2217 return RI.hasVGPRs(getOpRegClass(MI, OpNo)); 2218 } 2219 } 2220 2221 void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const { 2222 MachineBasicBlock::iterator I = MI; 2223 MachineBasicBlock *MBB = MI.getParent(); 2224 MachineOperand &MO = MI.getOperand(OpIdx); 2225 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 2226 unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass; 2227 const TargetRegisterClass *RC = RI.getRegClass(RCID); 2228 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 2229 if (MO.isReg()) 2230 Opcode = AMDGPU::COPY; 2231 else if (RI.isSGPRClass(RC)) 2232 Opcode = AMDGPU::S_MOV_B32; 2233 2234 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); 2235 if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC)) 2236 VRC = &AMDGPU::VReg_64RegClass; 2237 else 2238 VRC = &AMDGPU::VGPR_32RegClass; 2239 2240 unsigned Reg = MRI.createVirtualRegister(VRC); 2241 DebugLoc DL = MBB->findDebugLoc(I); 2242 BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).addOperand(MO); 2243 MO.ChangeToRegister(Reg, false); 2244 } 2245 2246 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI, 2247 MachineRegisterInfo &MRI, 2248 MachineOperand &SuperReg, 2249 const TargetRegisterClass *SuperRC, 2250 unsigned SubIdx, 2251 const TargetRegisterClass *SubRC) 2252 const { 2253 MachineBasicBlock *MBB = MI->getParent(); 2254 DebugLoc DL = MI->getDebugLoc(); 2255 unsigned SubReg = MRI.createVirtualRegister(SubRC); 2256 2257 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) { 2258 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 2259 .addReg(SuperReg.getReg(), 0, SubIdx); 2260 return SubReg; 2261 } 2262 2263 // Just in case the super register is itself a sub-register, copy it to a new 2264 // value so we don't need to worry about merging its subreg index with the 2265 // SubIdx passed to this function. The register coalescer should be able to 2266 // eliminate this extra copy. 2267 unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC); 2268 2269 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg) 2270 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg()); 2271 2272 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 2273 .addReg(NewSuperReg, 0, SubIdx); 2274 2275 return SubReg; 2276 } 2277 2278 MachineOperand SIInstrInfo::buildExtractSubRegOrImm( 2279 MachineBasicBlock::iterator MII, 2280 MachineRegisterInfo &MRI, 2281 MachineOperand &Op, 2282 const TargetRegisterClass *SuperRC, 2283 unsigned SubIdx, 2284 const TargetRegisterClass *SubRC) const { 2285 if (Op.isImm()) { 2286 if (SubIdx == AMDGPU::sub0) 2287 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm())); 2288 if (SubIdx == AMDGPU::sub1) 2289 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32)); 2290 2291 llvm_unreachable("Unhandled register index for immediate"); 2292 } 2293 2294 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC, 2295 SubIdx, SubRC); 2296 return MachineOperand::CreateReg(SubReg, false); 2297 } 2298 2299 // Change the order of operands from (0, 1, 2) to (0, 2, 1) 2300 void SIInstrInfo::swapOperands(MachineInstr &Inst) const { 2301 assert(Inst.getNumExplicitOperands() == 3); 2302 MachineOperand Op1 = Inst.getOperand(1); 2303 Inst.RemoveOperand(1); 2304 Inst.addOperand(Op1); 2305 } 2306 2307 bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI, 2308 const MCOperandInfo &OpInfo, 2309 const MachineOperand &MO) const { 2310 if (!MO.isReg()) 2311 return false; 2312 2313 unsigned Reg = MO.getReg(); 2314 const TargetRegisterClass *RC = 2315 TargetRegisterInfo::isVirtualRegister(Reg) ? 2316 MRI.getRegClass(Reg) : 2317 RI.getPhysRegClass(Reg); 2318 2319 const SIRegisterInfo *TRI = 2320 static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo()); 2321 RC = TRI->getSubRegClass(RC, MO.getSubReg()); 2322 2323 // In order to be legal, the common sub-class must be equal to the 2324 // class of the current operand. For example: 2325 // 2326 // v_mov_b32 s0 ; Operand defined as vsrc_b32 2327 // ; RI.getCommonSubClass(s0,vsrc_b32) = sgpr ; LEGAL 2328 // 2329 // s_sendmsg 0, s0 ; Operand defined as m0reg 2330 // ; RI.getCommonSubClass(s0,m0reg) = m0reg ; NOT LEGAL 2331 2332 return RI.getCommonSubClass(RC, RI.getRegClass(OpInfo.RegClass)) == RC; 2333 } 2334 2335 bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI, 2336 const MCOperandInfo &OpInfo, 2337 const MachineOperand &MO) const { 2338 if (MO.isReg()) 2339 return isLegalRegOperand(MRI, OpInfo, MO); 2340 2341 // Handle non-register types that are treated like immediates. 2342 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI()); 2343 return true; 2344 } 2345 2346 bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx, 2347 const MachineOperand *MO) const { 2348 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 2349 const MCInstrDesc &InstDesc = MI.getDesc(); 2350 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; 2351 const TargetRegisterClass *DefinedRC = 2352 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; 2353 if (!MO) 2354 MO = &MI.getOperand(OpIdx); 2355 2356 if (isVALU(MI) && usesConstantBus(MRI, *MO, DefinedRC->getSize())) { 2357 2358 RegSubRegPair SGPRUsed; 2359 if (MO->isReg()) 2360 SGPRUsed = RegSubRegPair(MO->getReg(), MO->getSubReg()); 2361 2362 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 2363 if (i == OpIdx) 2364 continue; 2365 const MachineOperand &Op = MI.getOperand(i); 2366 if (Op.isReg()) { 2367 if ((Op.getReg() != SGPRUsed.Reg || Op.getSubReg() != SGPRUsed.SubReg) && 2368 usesConstantBus(MRI, Op, getOpSize(MI, i))) { 2369 return false; 2370 } 2371 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) { 2372 return false; 2373 } 2374 } 2375 } 2376 2377 if (MO->isReg()) { 2378 assert(DefinedRC); 2379 return isLegalRegOperand(MRI, OpInfo, *MO); 2380 } 2381 2382 // Handle non-register types that are treated like immediates. 2383 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI()); 2384 2385 if (!DefinedRC) { 2386 // This operand expects an immediate. 2387 return true; 2388 } 2389 2390 return isImmOperandLegal(MI, OpIdx, *MO); 2391 } 2392 2393 void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI, 2394 MachineInstr &MI) const { 2395 unsigned Opc = MI.getOpcode(); 2396 const MCInstrDesc &InstrDesc = get(Opc); 2397 2398 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 2399 MachineOperand &Src1 = MI.getOperand(Src1Idx); 2400 2401 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32 2402 // we need to only have one constant bus use. 2403 // 2404 // Note we do not need to worry about literal constants here. They are 2405 // disabled for the operand type for instructions because they will always 2406 // violate the one constant bus use rule. 2407 bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister; 2408 if (HasImplicitSGPR) { 2409 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 2410 MachineOperand &Src0 = MI.getOperand(Src0Idx); 2411 2412 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) 2413 legalizeOpWithMove(MI, Src0Idx); 2414 } 2415 2416 // VOP2 src0 instructions support all operand types, so we don't need to check 2417 // their legality. If src1 is already legal, we don't need to do anything. 2418 if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1)) 2419 return; 2420 2421 // We do not use commuteInstruction here because it is too aggressive and will 2422 // commute if it is possible. We only want to commute here if it improves 2423 // legality. This can be called a fairly large number of times so don't waste 2424 // compile time pointlessly swapping and checking legality again. 2425 if (HasImplicitSGPR || !MI.isCommutable()) { 2426 legalizeOpWithMove(MI, Src1Idx); 2427 return; 2428 } 2429 2430 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 2431 MachineOperand &Src0 = MI.getOperand(Src0Idx); 2432 2433 // If src0 can be used as src1, commuting will make the operands legal. 2434 // Otherwise we have to give up and insert a move. 2435 // 2436 // TODO: Other immediate-like operand kinds could be commuted if there was a 2437 // MachineOperand::ChangeTo* for them. 2438 if ((!Src1.isImm() && !Src1.isReg()) || 2439 !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) { 2440 legalizeOpWithMove(MI, Src1Idx); 2441 return; 2442 } 2443 2444 int CommutedOpc = commuteOpcode(MI); 2445 if (CommutedOpc == -1) { 2446 legalizeOpWithMove(MI, Src1Idx); 2447 return; 2448 } 2449 2450 MI.setDesc(get(CommutedOpc)); 2451 2452 unsigned Src0Reg = Src0.getReg(); 2453 unsigned Src0SubReg = Src0.getSubReg(); 2454 bool Src0Kill = Src0.isKill(); 2455 2456 if (Src1.isImm()) 2457 Src0.ChangeToImmediate(Src1.getImm()); 2458 else if (Src1.isReg()) { 2459 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill()); 2460 Src0.setSubReg(Src1.getSubReg()); 2461 } else 2462 llvm_unreachable("Should only have register or immediate operands"); 2463 2464 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill); 2465 Src1.setSubReg(Src0SubReg); 2466 } 2467 2468 // Legalize VOP3 operands. Because all operand types are supported for any 2469 // operand, and since literal constants are not allowed and should never be 2470 // seen, we only need to worry about inserting copies if we use multiple SGPR 2471 // operands. 2472 void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI, 2473 MachineInstr &MI) const { 2474 unsigned Opc = MI.getOpcode(); 2475 2476 int VOP3Idx[3] = { 2477 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), 2478 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1), 2479 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2) 2480 }; 2481 2482 // Find the one SGPR operand we are allowed to use. 2483 unsigned SGPRReg = findUsedSGPR(MI, VOP3Idx); 2484 2485 for (unsigned i = 0; i < 3; ++i) { 2486 int Idx = VOP3Idx[i]; 2487 if (Idx == -1) 2488 break; 2489 MachineOperand &MO = MI.getOperand(Idx); 2490 2491 // We should never see a VOP3 instruction with an illegal immediate operand. 2492 if (!MO.isReg()) 2493 continue; 2494 2495 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg()))) 2496 continue; // VGPRs are legal 2497 2498 if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) { 2499 SGPRReg = MO.getReg(); 2500 // We can use one SGPR in each VOP3 instruction. 2501 continue; 2502 } 2503 2504 // If we make it this far, then the operand is not legal and we must 2505 // legalize it. 2506 legalizeOpWithMove(MI, Idx); 2507 } 2508 } 2509 2510 unsigned SIInstrInfo::readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI, 2511 MachineRegisterInfo &MRI) const { 2512 const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg); 2513 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC); 2514 unsigned DstReg = MRI.createVirtualRegister(SRC); 2515 unsigned SubRegs = VRC->getSize() / 4; 2516 2517 SmallVector<unsigned, 8> SRegs; 2518 for (unsigned i = 0; i < SubRegs; ++i) { 2519 unsigned SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2520 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 2521 get(AMDGPU::V_READFIRSTLANE_B32), SGPR) 2522 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i)); 2523 SRegs.push_back(SGPR); 2524 } 2525 2526 MachineInstrBuilder MIB = 2527 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 2528 get(AMDGPU::REG_SEQUENCE), DstReg); 2529 for (unsigned i = 0; i < SubRegs; ++i) { 2530 MIB.addReg(SRegs[i]); 2531 MIB.addImm(RI.getSubRegFromChannel(i)); 2532 } 2533 return DstReg; 2534 } 2535 2536 void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI, 2537 MachineInstr &MI) const { 2538 2539 // If the pointer is store in VGPRs, then we need to move them to 2540 // SGPRs using v_readfirstlane. This is safe because we only select 2541 // loads with uniform pointers to SMRD instruction so we know the 2542 // pointer value is uniform. 2543 MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase); 2544 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) { 2545 unsigned SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI); 2546 SBase->setReg(SGPR); 2547 } 2548 } 2549 2550 void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB, 2551 MachineBasicBlock::iterator I, 2552 const TargetRegisterClass *DstRC, 2553 MachineOperand &Op, 2554 MachineRegisterInfo &MRI, 2555 const DebugLoc &DL) const { 2556 2557 unsigned OpReg = Op.getReg(); 2558 unsigned OpSubReg = Op.getSubReg(); 2559 2560 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg( 2561 RI.getRegClassForReg(MRI, OpReg), OpSubReg); 2562 2563 // Check if operand is already the correct register class. 2564 if (DstRC == OpRC) 2565 return; 2566 2567 unsigned DstReg = MRI.createVirtualRegister(DstRC); 2568 MachineInstr *Copy = BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg) 2569 .addOperand(Op); 2570 2571 Op.setReg(DstReg); 2572 Op.setSubReg(0); 2573 2574 MachineInstr *Def = MRI.getVRegDef(OpReg); 2575 if (!Def) 2576 return; 2577 2578 // Try to eliminate the copy if it is copying an immediate value. 2579 if (Def->isMoveImmediate()) 2580 FoldImmediate(*Copy, *Def, OpReg, &MRI); 2581 } 2582 2583 void SIInstrInfo::legalizeOperands(MachineInstr &MI) const { 2584 MachineFunction &MF = *MI.getParent()->getParent(); 2585 MachineRegisterInfo &MRI = MF.getRegInfo(); 2586 2587 // Legalize VOP2 2588 if (isVOP2(MI) || isVOPC(MI)) { 2589 legalizeOperandsVOP2(MRI, MI); 2590 return; 2591 } 2592 2593 // Legalize VOP3 2594 if (isVOP3(MI)) { 2595 legalizeOperandsVOP3(MRI, MI); 2596 return; 2597 } 2598 2599 // Legalize SMRD 2600 if (isSMRD(MI)) { 2601 legalizeOperandsSMRD(MRI, MI); 2602 return; 2603 } 2604 2605 // Legalize REG_SEQUENCE and PHI 2606 // The register class of the operands much be the same type as the register 2607 // class of the output. 2608 if (MI.getOpcode() == AMDGPU::PHI) { 2609 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr; 2610 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) { 2611 if (!MI.getOperand(i).isReg() || 2612 !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg())) 2613 continue; 2614 const TargetRegisterClass *OpRC = 2615 MRI.getRegClass(MI.getOperand(i).getReg()); 2616 if (RI.hasVGPRs(OpRC)) { 2617 VRC = OpRC; 2618 } else { 2619 SRC = OpRC; 2620 } 2621 } 2622 2623 // If any of the operands are VGPR registers, then they all most be 2624 // otherwise we will create illegal VGPR->SGPR copies when legalizing 2625 // them. 2626 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) { 2627 if (!VRC) { 2628 assert(SRC); 2629 VRC = RI.getEquivalentVGPRClass(SRC); 2630 } 2631 RC = VRC; 2632 } else { 2633 RC = SRC; 2634 } 2635 2636 // Update all the operands so they have the same type. 2637 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 2638 MachineOperand &Op = MI.getOperand(I); 2639 if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg())) 2640 continue; 2641 2642 // MI is a PHI instruction. 2643 MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB(); 2644 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator(); 2645 2646 // Avoid creating no-op copies with the same src and dst reg class. These 2647 // confuse some of the machine passes. 2648 legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc()); 2649 } 2650 } 2651 2652 // REG_SEQUENCE doesn't really require operand legalization, but if one has a 2653 // VGPR dest type and SGPR sources, insert copies so all operands are 2654 // VGPRs. This seems to help operand folding / the register coalescer. 2655 if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) { 2656 MachineBasicBlock *MBB = MI.getParent(); 2657 const TargetRegisterClass *DstRC = getOpRegClass(MI, 0); 2658 if (RI.hasVGPRs(DstRC)) { 2659 // Update all the operands so they are VGPR register classes. These may 2660 // not be the same register class because REG_SEQUENCE supports mixing 2661 // subregister index types e.g. sub0_sub1 + sub2 + sub3 2662 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 2663 MachineOperand &Op = MI.getOperand(I); 2664 if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg())) 2665 continue; 2666 2667 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg()); 2668 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); 2669 if (VRC == OpRC) 2670 continue; 2671 2672 legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc()); 2673 Op.setIsKill(); 2674 } 2675 } 2676 2677 return; 2678 } 2679 2680 // Legalize INSERT_SUBREG 2681 // src0 must have the same register class as dst 2682 if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) { 2683 unsigned Dst = MI.getOperand(0).getReg(); 2684 unsigned Src0 = MI.getOperand(1).getReg(); 2685 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst); 2686 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0); 2687 if (DstRC != Src0RC) { 2688 MachineBasicBlock *MBB = MI.getParent(); 2689 MachineOperand &Op = MI.getOperand(1); 2690 legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc()); 2691 } 2692 return; 2693 } 2694 2695 // Legalize MIMG and MUBUF/MTBUF for shaders. 2696 // 2697 // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via 2698 // scratch memory access. In both cases, the legalization never involves 2699 // conversion to the addr64 form. 2700 if (isMIMG(MI) || 2701 (AMDGPU::isShader(MF.getFunction()->getCallingConv()) && 2702 (isMUBUF(MI) || isMTBUF(MI)))) { 2703 MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc); 2704 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) { 2705 unsigned SGPR = readlaneVGPRToSGPR(SRsrc->getReg(), MI, MRI); 2706 SRsrc->setReg(SGPR); 2707 } 2708 2709 MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp); 2710 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) { 2711 unsigned SGPR = readlaneVGPRToSGPR(SSamp->getReg(), MI, MRI); 2712 SSamp->setReg(SGPR); 2713 } 2714 return; 2715 } 2716 2717 // Legalize MUBUF* instructions by converting to addr64 form. 2718 // FIXME: If we start using the non-addr64 instructions for compute, we 2719 // may need to legalize them as above. This especially applies to the 2720 // buffer_load_format_* variants and variants with idxen (or bothen). 2721 int SRsrcIdx = 2722 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc); 2723 if (SRsrcIdx != -1) { 2724 // We have an MUBUF instruction 2725 MachineOperand *SRsrc = &MI.getOperand(SRsrcIdx); 2726 unsigned SRsrcRC = get(MI.getOpcode()).OpInfo[SRsrcIdx].RegClass; 2727 if (RI.getCommonSubClass(MRI.getRegClass(SRsrc->getReg()), 2728 RI.getRegClass(SRsrcRC))) { 2729 // The operands are legal. 2730 // FIXME: We may need to legalize operands besided srsrc. 2731 return; 2732 } 2733 2734 MachineBasicBlock &MBB = *MI.getParent(); 2735 2736 // Extract the ptr from the resource descriptor. 2737 unsigned SRsrcPtr = buildExtractSubReg(MI, MRI, *SRsrc, 2738 &AMDGPU::VReg_128RegClass, AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass); 2739 2740 // Create an empty resource descriptor 2741 unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 2742 unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2743 unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2744 unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass); 2745 uint64_t RsrcDataFormat = getDefaultRsrcDataFormat(); 2746 2747 // Zero64 = 0 2748 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B64), Zero64) 2749 .addImm(0); 2750 2751 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0} 2752 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B32), SRsrcFormatLo) 2753 .addImm(RsrcDataFormat & 0xFFFFFFFF); 2754 2755 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32} 2756 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B32), SRsrcFormatHi) 2757 .addImm(RsrcDataFormat >> 32); 2758 2759 // NewSRsrc = {Zero64, SRsrcFormat} 2760 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewSRsrc) 2761 .addReg(Zero64) 2762 .addImm(AMDGPU::sub0_sub1) 2763 .addReg(SRsrcFormatLo) 2764 .addImm(AMDGPU::sub2) 2765 .addReg(SRsrcFormatHi) 2766 .addImm(AMDGPU::sub3); 2767 2768 MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 2769 unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 2770 if (VAddr) { 2771 // This is already an ADDR64 instruction so we need to add the pointer 2772 // extracted from the resource descriptor to the current value of VAddr. 2773 unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2774 unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2775 2776 // NewVaddrLo = SRsrcPtr:sub0 + VAddr:sub0 2777 DebugLoc DL = MI.getDebugLoc(); 2778 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), NewVAddrLo) 2779 .addReg(SRsrcPtr, 0, AMDGPU::sub0) 2780 .addReg(VAddr->getReg(), 0, AMDGPU::sub0); 2781 2782 // NewVaddrHi = SRsrcPtr:sub1 + VAddr:sub1 2783 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e32), NewVAddrHi) 2784 .addReg(SRsrcPtr, 0, AMDGPU::sub1) 2785 .addReg(VAddr->getReg(), 0, AMDGPU::sub1); 2786 2787 // NewVaddr = {NewVaddrHi, NewVaddrLo} 2788 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) 2789 .addReg(NewVAddrLo) 2790 .addImm(AMDGPU::sub0) 2791 .addReg(NewVAddrHi) 2792 .addImm(AMDGPU::sub1); 2793 } else { 2794 // This instructions is the _OFFSET variant, so we need to convert it to 2795 // ADDR64. 2796 assert(MBB.getParent()->getSubtarget<SISubtarget>().getGeneration() 2797 < SISubtarget::VOLCANIC_ISLANDS && 2798 "FIXME: Need to emit flat atomics here"); 2799 2800 MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata); 2801 MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 2802 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset); 2803 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode()); 2804 2805 // Atomics rith return have have an additional tied operand and are 2806 // missing some of the special bits. 2807 MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in); 2808 MachineInstr *Addr64; 2809 2810 if (!VDataIn) { 2811 // Regular buffer load / store. 2812 MachineInstrBuilder MIB = 2813 BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 2814 .addOperand(*VData) 2815 .addReg(AMDGPU::NoRegister) // Dummy value for vaddr. 2816 // This will be replaced later 2817 // with the new value of vaddr. 2818 .addOperand(*SRsrc) 2819 .addOperand(*SOffset) 2820 .addOperand(*Offset); 2821 2822 // Atomics do not have this operand. 2823 if (const MachineOperand *GLC = 2824 getNamedOperand(MI, AMDGPU::OpName::glc)) { 2825 MIB.addImm(GLC->getImm()); 2826 } 2827 2828 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)); 2829 2830 if (const MachineOperand *TFE = 2831 getNamedOperand(MI, AMDGPU::OpName::tfe)) { 2832 MIB.addImm(TFE->getImm()); 2833 } 2834 2835 MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); 2836 Addr64 = MIB; 2837 } else { 2838 // Atomics with return. 2839 Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 2840 .addOperand(*VData) 2841 .addOperand(*VDataIn) 2842 .addReg(AMDGPU::NoRegister) // Dummy value for vaddr. 2843 // This will be replaced later 2844 // with the new value of vaddr. 2845 .addOperand(*SRsrc) 2846 .addOperand(*SOffset) 2847 .addOperand(*Offset) 2848 .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)) 2849 .setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); 2850 } 2851 2852 MI.removeFromParent(); 2853 2854 // NewVaddr = {NewVaddrHi, NewVaddrLo} 2855 BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), 2856 NewVAddr) 2857 .addReg(SRsrcPtr, 0, AMDGPU::sub0) 2858 .addImm(AMDGPU::sub0) 2859 .addReg(SRsrcPtr, 0, AMDGPU::sub1) 2860 .addImm(AMDGPU::sub1); 2861 2862 VAddr = getNamedOperand(*Addr64, AMDGPU::OpName::vaddr); 2863 SRsrc = getNamedOperand(*Addr64, AMDGPU::OpName::srsrc); 2864 } 2865 2866 // Update the instruction to use NewVaddr 2867 VAddr->setReg(NewVAddr); 2868 // Update the instruction to use NewSRsrc 2869 SRsrc->setReg(NewSRsrc); 2870 } 2871 } 2872 2873 void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const { 2874 SmallVector<MachineInstr *, 128> Worklist; 2875 Worklist.push_back(&TopInst); 2876 2877 while (!Worklist.empty()) { 2878 MachineInstr &Inst = *Worklist.pop_back_val(); 2879 MachineBasicBlock *MBB = Inst.getParent(); 2880 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 2881 2882 unsigned Opcode = Inst.getOpcode(); 2883 unsigned NewOpcode = getVALUOp(Inst); 2884 2885 // Handle some special cases 2886 switch (Opcode) { 2887 default: 2888 break; 2889 case AMDGPU::S_AND_B64: 2890 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_AND_B32_e64); 2891 Inst.eraseFromParent(); 2892 continue; 2893 2894 case AMDGPU::S_OR_B64: 2895 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_OR_B32_e64); 2896 Inst.eraseFromParent(); 2897 continue; 2898 2899 case AMDGPU::S_XOR_B64: 2900 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_XOR_B32_e64); 2901 Inst.eraseFromParent(); 2902 continue; 2903 2904 case AMDGPU::S_NOT_B64: 2905 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::V_NOT_B32_e32); 2906 Inst.eraseFromParent(); 2907 continue; 2908 2909 case AMDGPU::S_BCNT1_I32_B64: 2910 splitScalar64BitBCNT(Worklist, Inst); 2911 Inst.eraseFromParent(); 2912 continue; 2913 2914 case AMDGPU::S_BFE_I64: { 2915 splitScalar64BitBFE(Worklist, Inst); 2916 Inst.eraseFromParent(); 2917 continue; 2918 } 2919 2920 case AMDGPU::S_LSHL_B32: 2921 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { 2922 NewOpcode = AMDGPU::V_LSHLREV_B32_e64; 2923 swapOperands(Inst); 2924 } 2925 break; 2926 case AMDGPU::S_ASHR_I32: 2927 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { 2928 NewOpcode = AMDGPU::V_ASHRREV_I32_e64; 2929 swapOperands(Inst); 2930 } 2931 break; 2932 case AMDGPU::S_LSHR_B32: 2933 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { 2934 NewOpcode = AMDGPU::V_LSHRREV_B32_e64; 2935 swapOperands(Inst); 2936 } 2937 break; 2938 case AMDGPU::S_LSHL_B64: 2939 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { 2940 NewOpcode = AMDGPU::V_LSHLREV_B64; 2941 swapOperands(Inst); 2942 } 2943 break; 2944 case AMDGPU::S_ASHR_I64: 2945 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { 2946 NewOpcode = AMDGPU::V_ASHRREV_I64; 2947 swapOperands(Inst); 2948 } 2949 break; 2950 case AMDGPU::S_LSHR_B64: 2951 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { 2952 NewOpcode = AMDGPU::V_LSHRREV_B64; 2953 swapOperands(Inst); 2954 } 2955 break; 2956 2957 case AMDGPU::S_ABS_I32: 2958 lowerScalarAbs(Worklist, Inst); 2959 Inst.eraseFromParent(); 2960 continue; 2961 2962 case AMDGPU::S_CBRANCH_SCC0: 2963 case AMDGPU::S_CBRANCH_SCC1: 2964 // Clear unused bits of vcc 2965 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64), 2966 AMDGPU::VCC) 2967 .addReg(AMDGPU::EXEC) 2968 .addReg(AMDGPU::VCC); 2969 break; 2970 2971 case AMDGPU::S_BFE_U64: 2972 case AMDGPU::S_BFM_B64: 2973 llvm_unreachable("Moving this op to VALU not implemented"); 2974 } 2975 2976 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) { 2977 // We cannot move this instruction to the VALU, so we should try to 2978 // legalize its operands instead. 2979 legalizeOperands(Inst); 2980 continue; 2981 } 2982 2983 // Use the new VALU Opcode. 2984 const MCInstrDesc &NewDesc = get(NewOpcode); 2985 Inst.setDesc(NewDesc); 2986 2987 // Remove any references to SCC. Vector instructions can't read from it, and 2988 // We're just about to add the implicit use / defs of VCC, and we don't want 2989 // both. 2990 for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) { 2991 MachineOperand &Op = Inst.getOperand(i); 2992 if (Op.isReg() && Op.getReg() == AMDGPU::SCC) { 2993 Inst.RemoveOperand(i); 2994 addSCCDefUsersToVALUWorklist(Inst, Worklist); 2995 } 2996 } 2997 2998 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) { 2999 // We are converting these to a BFE, so we need to add the missing 3000 // operands for the size and offset. 3001 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16; 3002 Inst.addOperand(MachineOperand::CreateImm(0)); 3003 Inst.addOperand(MachineOperand::CreateImm(Size)); 3004 3005 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) { 3006 // The VALU version adds the second operand to the result, so insert an 3007 // extra 0 operand. 3008 Inst.addOperand(MachineOperand::CreateImm(0)); 3009 } 3010 3011 Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent()); 3012 3013 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) { 3014 const MachineOperand &OffsetWidthOp = Inst.getOperand(2); 3015 // If we need to move this to VGPRs, we need to unpack the second operand 3016 // back into the 2 separate ones for bit offset and width. 3017 assert(OffsetWidthOp.isImm() && 3018 "Scalar BFE is only implemented for constant width and offset"); 3019 uint32_t Imm = OffsetWidthOp.getImm(); 3020 3021 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 3022 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 3023 Inst.RemoveOperand(2); // Remove old immediate. 3024 Inst.addOperand(MachineOperand::CreateImm(Offset)); 3025 Inst.addOperand(MachineOperand::CreateImm(BitWidth)); 3026 } 3027 3028 bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef(); 3029 unsigned NewDstReg = AMDGPU::NoRegister; 3030 if (HasDst) { 3031 // Update the destination register class. 3032 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst); 3033 if (!NewDstRC) 3034 continue; 3035 3036 unsigned DstReg = Inst.getOperand(0).getReg(); 3037 if (Inst.isCopy() && 3038 TargetRegisterInfo::isVirtualRegister(Inst.getOperand(1).getReg()) && 3039 NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) { 3040 // Instead of creating a copy where src and dst are the same register 3041 // class, we just replace all uses of dst with src. These kinds of 3042 // copies interfere with the heuristics MachineSink uses to decide 3043 // whether or not to split a critical edge. Since the pass assumes 3044 // that copies will end up as machine instructions and not be 3045 // eliminated. 3046 addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist); 3047 MRI.replaceRegWith(DstReg, Inst.getOperand(1).getReg()); 3048 MRI.clearKillFlags(Inst.getOperand(1).getReg()); 3049 Inst.getOperand(0).setReg(DstReg); 3050 continue; 3051 } 3052 3053 NewDstReg = MRI.createVirtualRegister(NewDstRC); 3054 MRI.replaceRegWith(DstReg, NewDstReg); 3055 } 3056 3057 // Legalize the operands 3058 legalizeOperands(Inst); 3059 3060 if (HasDst) 3061 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); 3062 } 3063 } 3064 3065 void SIInstrInfo::lowerScalarAbs(SmallVectorImpl<MachineInstr *> &Worklist, 3066 MachineInstr &Inst) const { 3067 MachineBasicBlock &MBB = *Inst.getParent(); 3068 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 3069 MachineBasicBlock::iterator MII = Inst; 3070 DebugLoc DL = Inst.getDebugLoc(); 3071 3072 MachineOperand &Dest = Inst.getOperand(0); 3073 MachineOperand &Src = Inst.getOperand(1); 3074 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3075 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3076 3077 BuildMI(MBB, MII, DL, get(AMDGPU::V_SUB_I32_e32), TmpReg) 3078 .addImm(0) 3079 .addReg(Src.getReg()); 3080 3081 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg) 3082 .addReg(Src.getReg()) 3083 .addReg(TmpReg); 3084 3085 MRI.replaceRegWith(Dest.getReg(), ResultReg); 3086 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 3087 } 3088 3089 void SIInstrInfo::splitScalar64BitUnaryOp( 3090 SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst, 3091 unsigned Opcode) const { 3092 MachineBasicBlock &MBB = *Inst.getParent(); 3093 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 3094 3095 MachineOperand &Dest = Inst.getOperand(0); 3096 MachineOperand &Src0 = Inst.getOperand(1); 3097 DebugLoc DL = Inst.getDebugLoc(); 3098 3099 MachineBasicBlock::iterator MII = Inst; 3100 3101 const MCInstrDesc &InstDesc = get(Opcode); 3102 const TargetRegisterClass *Src0RC = Src0.isReg() ? 3103 MRI.getRegClass(Src0.getReg()) : 3104 &AMDGPU::SGPR_32RegClass; 3105 3106 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 3107 3108 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 3109 AMDGPU::sub0, Src0SubRC); 3110 3111 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 3112 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 3113 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 3114 3115 unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 3116 BuildMI(MBB, MII, DL, InstDesc, DestSub0) 3117 .addOperand(SrcReg0Sub0); 3118 3119 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 3120 AMDGPU::sub1, Src0SubRC); 3121 3122 unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 3123 BuildMI(MBB, MII, DL, InstDesc, DestSub1) 3124 .addOperand(SrcReg0Sub1); 3125 3126 unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); 3127 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 3128 .addReg(DestSub0) 3129 .addImm(AMDGPU::sub0) 3130 .addReg(DestSub1) 3131 .addImm(AMDGPU::sub1); 3132 3133 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 3134 3135 // We don't need to legalizeOperands here because for a single operand, src0 3136 // will support any kind of input. 3137 3138 // Move all users of this moved value. 3139 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 3140 } 3141 3142 void SIInstrInfo::splitScalar64BitBinaryOp( 3143 SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst, 3144 unsigned Opcode) const { 3145 MachineBasicBlock &MBB = *Inst.getParent(); 3146 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 3147 3148 MachineOperand &Dest = Inst.getOperand(0); 3149 MachineOperand &Src0 = Inst.getOperand(1); 3150 MachineOperand &Src1 = Inst.getOperand(2); 3151 DebugLoc DL = Inst.getDebugLoc(); 3152 3153 MachineBasicBlock::iterator MII = Inst; 3154 3155 const MCInstrDesc &InstDesc = get(Opcode); 3156 const TargetRegisterClass *Src0RC = Src0.isReg() ? 3157 MRI.getRegClass(Src0.getReg()) : 3158 &AMDGPU::SGPR_32RegClass; 3159 3160 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 3161 const TargetRegisterClass *Src1RC = Src1.isReg() ? 3162 MRI.getRegClass(Src1.getReg()) : 3163 &AMDGPU::SGPR_32RegClass; 3164 3165 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 3166 3167 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 3168 AMDGPU::sub0, Src0SubRC); 3169 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 3170 AMDGPU::sub0, Src1SubRC); 3171 3172 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 3173 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 3174 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 3175 3176 unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 3177 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0) 3178 .addOperand(SrcReg0Sub0) 3179 .addOperand(SrcReg1Sub0); 3180 3181 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 3182 AMDGPU::sub1, Src0SubRC); 3183 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 3184 AMDGPU::sub1, Src1SubRC); 3185 3186 unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 3187 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1) 3188 .addOperand(SrcReg0Sub1) 3189 .addOperand(SrcReg1Sub1); 3190 3191 unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); 3192 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 3193 .addReg(DestSub0) 3194 .addImm(AMDGPU::sub0) 3195 .addReg(DestSub1) 3196 .addImm(AMDGPU::sub1); 3197 3198 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 3199 3200 // Try to legalize the operands in case we need to swap the order to keep it 3201 // valid. 3202 legalizeOperands(LoHalf); 3203 legalizeOperands(HiHalf); 3204 3205 // Move all users of this moved vlaue. 3206 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 3207 } 3208 3209 void SIInstrInfo::splitScalar64BitBCNT( 3210 SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst) const { 3211 MachineBasicBlock &MBB = *Inst.getParent(); 3212 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 3213 3214 MachineBasicBlock::iterator MII = Inst; 3215 DebugLoc DL = Inst.getDebugLoc(); 3216 3217 MachineOperand &Dest = Inst.getOperand(0); 3218 MachineOperand &Src = Inst.getOperand(1); 3219 3220 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64); 3221 const TargetRegisterClass *SrcRC = Src.isReg() ? 3222 MRI.getRegClass(Src.getReg()) : 3223 &AMDGPU::SGPR_32RegClass; 3224 3225 unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3226 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3227 3228 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); 3229 3230 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 3231 AMDGPU::sub0, SrcSubRC); 3232 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 3233 AMDGPU::sub1, SrcSubRC); 3234 3235 BuildMI(MBB, MII, DL, InstDesc, MidReg) 3236 .addOperand(SrcRegSub0) 3237 .addImm(0); 3238 3239 BuildMI(MBB, MII, DL, InstDesc, ResultReg) 3240 .addOperand(SrcRegSub1) 3241 .addReg(MidReg); 3242 3243 MRI.replaceRegWith(Dest.getReg(), ResultReg); 3244 3245 // We don't need to legalize operands here. src0 for etiher instruction can be 3246 // an SGPR, and the second input is unused or determined here. 3247 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 3248 } 3249 3250 void SIInstrInfo::splitScalar64BitBFE(SmallVectorImpl<MachineInstr *> &Worklist, 3251 MachineInstr &Inst) const { 3252 MachineBasicBlock &MBB = *Inst.getParent(); 3253 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 3254 MachineBasicBlock::iterator MII = Inst; 3255 DebugLoc DL = Inst.getDebugLoc(); 3256 3257 MachineOperand &Dest = Inst.getOperand(0); 3258 uint32_t Imm = Inst.getOperand(2).getImm(); 3259 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 3260 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 3261 3262 (void) Offset; 3263 3264 // Only sext_inreg cases handled. 3265 assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 && 3266 Offset == 0 && "Not implemented"); 3267 3268 if (BitWidth < 32) { 3269 unsigned MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3270 unsigned MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3271 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 3272 3273 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo) 3274 .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0) 3275 .addImm(0) 3276 .addImm(BitWidth); 3277 3278 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi) 3279 .addImm(31) 3280 .addReg(MidRegLo); 3281 3282 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 3283 .addReg(MidRegLo) 3284 .addImm(AMDGPU::sub0) 3285 .addReg(MidRegHi) 3286 .addImm(AMDGPU::sub1); 3287 3288 MRI.replaceRegWith(Dest.getReg(), ResultReg); 3289 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 3290 return; 3291 } 3292 3293 MachineOperand &Src = Inst.getOperand(1); 3294 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3295 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 3296 3297 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg) 3298 .addImm(31) 3299 .addReg(Src.getReg(), 0, AMDGPU::sub0); 3300 3301 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 3302 .addReg(Src.getReg(), 0, AMDGPU::sub0) 3303 .addImm(AMDGPU::sub0) 3304 .addReg(TmpReg) 3305 .addImm(AMDGPU::sub1); 3306 3307 MRI.replaceRegWith(Dest.getReg(), ResultReg); 3308 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 3309 } 3310 3311 void SIInstrInfo::addUsersToMoveToVALUWorklist( 3312 unsigned DstReg, 3313 MachineRegisterInfo &MRI, 3314 SmallVectorImpl<MachineInstr *> &Worklist) const { 3315 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg), 3316 E = MRI.use_end(); I != E; ++I) { 3317 MachineInstr &UseMI = *I->getParent(); 3318 if (!canReadVGPR(UseMI, I.getOperandNo())) { 3319 Worklist.push_back(&UseMI); 3320 } 3321 } 3322 } 3323 3324 void SIInstrInfo::addSCCDefUsersToVALUWorklist( 3325 MachineInstr &SCCDefInst, SmallVectorImpl<MachineInstr *> &Worklist) const { 3326 // This assumes that all the users of SCC are in the same block 3327 // as the SCC def. 3328 for (MachineInstr &MI : 3329 llvm::make_range(MachineBasicBlock::iterator(SCCDefInst), 3330 SCCDefInst.getParent()->end())) { 3331 // Exit if we find another SCC def. 3332 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC) != -1) 3333 return; 3334 3335 if (MI.findRegisterUseOperandIdx(AMDGPU::SCC) != -1) 3336 Worklist.push_back(&MI); 3337 } 3338 } 3339 3340 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass( 3341 const MachineInstr &Inst) const { 3342 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0); 3343 3344 switch (Inst.getOpcode()) { 3345 // For target instructions, getOpRegClass just returns the virtual register 3346 // class associated with the operand, so we need to find an equivalent VGPR 3347 // register class in order to move the instruction to the VALU. 3348 case AMDGPU::COPY: 3349 case AMDGPU::PHI: 3350 case AMDGPU::REG_SEQUENCE: 3351 case AMDGPU::INSERT_SUBREG: 3352 if (RI.hasVGPRs(NewDstRC)) 3353 return nullptr; 3354 3355 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 3356 if (!NewDstRC) 3357 return nullptr; 3358 return NewDstRC; 3359 default: 3360 return NewDstRC; 3361 } 3362 } 3363 3364 // Find the one SGPR operand we are allowed to use. 3365 unsigned SIInstrInfo::findUsedSGPR(const MachineInstr &MI, 3366 int OpIndices[3]) const { 3367 const MCInstrDesc &Desc = MI.getDesc(); 3368 3369 // Find the one SGPR operand we are allowed to use. 3370 // 3371 // First we need to consider the instruction's operand requirements before 3372 // legalizing. Some operands are required to be SGPRs, such as implicit uses 3373 // of VCC, but we are still bound by the constant bus requirement to only use 3374 // one. 3375 // 3376 // If the operand's class is an SGPR, we can never move it. 3377 3378 unsigned SGPRReg = findImplicitSGPRRead(MI); 3379 if (SGPRReg != AMDGPU::NoRegister) 3380 return SGPRReg; 3381 3382 unsigned UsedSGPRs[3] = { AMDGPU::NoRegister }; 3383 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 3384 3385 for (unsigned i = 0; i < 3; ++i) { 3386 int Idx = OpIndices[i]; 3387 if (Idx == -1) 3388 break; 3389 3390 const MachineOperand &MO = MI.getOperand(Idx); 3391 if (!MO.isReg()) 3392 continue; 3393 3394 // Is this operand statically required to be an SGPR based on the operand 3395 // constraints? 3396 const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); 3397 bool IsRequiredSGPR = RI.isSGPRClass(OpRC); 3398 if (IsRequiredSGPR) 3399 return MO.getReg(); 3400 3401 // If this could be a VGPR or an SGPR, Check the dynamic register class. 3402 unsigned Reg = MO.getReg(); 3403 const TargetRegisterClass *RegRC = MRI.getRegClass(Reg); 3404 if (RI.isSGPRClass(RegRC)) 3405 UsedSGPRs[i] = Reg; 3406 } 3407 3408 // We don't have a required SGPR operand, so we have a bit more freedom in 3409 // selecting operands to move. 3410 3411 // Try to select the most used SGPR. If an SGPR is equal to one of the 3412 // others, we choose that. 3413 // 3414 // e.g. 3415 // V_FMA_F32 v0, s0, s0, s0 -> No moves 3416 // V_FMA_F32 v0, s0, s1, s0 -> Move s1 3417 3418 // TODO: If some of the operands are 64-bit SGPRs and some 32, we should 3419 // prefer those. 3420 3421 if (UsedSGPRs[0] != AMDGPU::NoRegister) { 3422 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2]) 3423 SGPRReg = UsedSGPRs[0]; 3424 } 3425 3426 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) { 3427 if (UsedSGPRs[1] == UsedSGPRs[2]) 3428 SGPRReg = UsedSGPRs[1]; 3429 } 3430 3431 return SGPRReg; 3432 } 3433 3434 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI, 3435 unsigned OperandName) const { 3436 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName); 3437 if (Idx == -1) 3438 return nullptr; 3439 3440 return &MI.getOperand(Idx); 3441 } 3442 3443 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const { 3444 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT; 3445 if (ST.isAmdHsaOS()) { 3446 RsrcDataFormat |= (1ULL << 56); 3447 3448 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) 3449 // Set MTYPE = 2 3450 RsrcDataFormat |= (2ULL << 59); 3451 } 3452 3453 return RsrcDataFormat; 3454 } 3455 3456 uint64_t SIInstrInfo::getScratchRsrcWords23() const { 3457 uint64_t Rsrc23 = getDefaultRsrcDataFormat() | 3458 AMDGPU::RSRC_TID_ENABLE | 3459 0xffffffff; // Size; 3460 3461 uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize()) - 1; 3462 3463 Rsrc23 |= (EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT) | 3464 // IndexStride = 64 3465 (UINT64_C(3) << AMDGPU::RSRC_INDEX_STRIDE_SHIFT); 3466 3467 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17]. 3468 // Clear them unless we want a huge stride. 3469 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) 3470 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT; 3471 3472 return Rsrc23; 3473 } 3474 3475 bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const { 3476 unsigned Opc = MI.getOpcode(); 3477 3478 return isSMRD(Opc); 3479 } 3480 3481 bool SIInstrInfo::isHighLatencyInstruction(const MachineInstr &MI) const { 3482 unsigned Opc = MI.getOpcode(); 3483 3484 return isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc); 3485 } 3486 3487 unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI, 3488 int &FrameIndex) const { 3489 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 3490 if (!Addr || !Addr->isFI()) 3491 return AMDGPU::NoRegister; 3492 3493 assert(!MI.memoperands_empty() && 3494 (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS); 3495 3496 FrameIndex = Addr->getIndex(); 3497 return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg(); 3498 } 3499 3500 unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI, 3501 int &FrameIndex) const { 3502 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr); 3503 assert(Addr && Addr->isFI()); 3504 FrameIndex = Addr->getIndex(); 3505 return getNamedOperand(MI, AMDGPU::OpName::data)->getReg(); 3506 } 3507 3508 unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 3509 int &FrameIndex) const { 3510 3511 if (!MI.mayLoad()) 3512 return AMDGPU::NoRegister; 3513 3514 if (isMUBUF(MI) || isVGPRSpill(MI)) 3515 return isStackAccess(MI, FrameIndex); 3516 3517 if (isSGPRSpill(MI)) 3518 return isSGPRStackAccess(MI, FrameIndex); 3519 3520 return AMDGPU::NoRegister; 3521 } 3522 3523 unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 3524 int &FrameIndex) const { 3525 if (!MI.mayStore()) 3526 return AMDGPU::NoRegister; 3527 3528 if (isMUBUF(MI) || isVGPRSpill(MI)) 3529 return isStackAccess(MI, FrameIndex); 3530 3531 if (isSGPRSpill(MI)) 3532 return isSGPRStackAccess(MI, FrameIndex); 3533 3534 return AMDGPU::NoRegister; 3535 } 3536 3537 unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { 3538 unsigned Opc = MI.getOpcode(); 3539 const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc); 3540 unsigned DescSize = Desc.getSize(); 3541 3542 // If we have a definitive size, we can use it. Otherwise we need to inspect 3543 // the operands to know the size. 3544 // 3545 // FIXME: Instructions that have a base 32-bit encoding report their size as 3546 // 4, even though they are really 8 bytes if they have a literal operand. 3547 if (DescSize != 0 && DescSize != 4) 3548 return DescSize; 3549 3550 if (Opc == AMDGPU::WAVE_BARRIER) 3551 return 0; 3552 3553 // 4-byte instructions may have a 32-bit literal encoded after them. Check 3554 // operands that coud ever be literals. 3555 if (isVALU(MI) || isSALU(MI)) { 3556 if (isFixedSize(MI)) { 3557 assert(DescSize == 4); 3558 return DescSize; 3559 } 3560 3561 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 3562 if (Src0Idx == -1) 3563 return 4; // No operands. 3564 3565 if (isLiteralConstantLike(MI.getOperand(Src0Idx), getOpSize(MI, Src0Idx))) 3566 return 8; 3567 3568 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 3569 if (Src1Idx == -1) 3570 return 4; 3571 3572 if (isLiteralConstantLike(MI.getOperand(Src1Idx), getOpSize(MI, Src1Idx))) 3573 return 8; 3574 3575 return 4; 3576 } 3577 3578 if (DescSize == 4) 3579 return 4; 3580 3581 switch (Opc) { 3582 case AMDGPU::SI_MASK_BRANCH: 3583 case TargetOpcode::IMPLICIT_DEF: 3584 case TargetOpcode::KILL: 3585 case TargetOpcode::DBG_VALUE: 3586 case TargetOpcode::BUNDLE: 3587 case TargetOpcode::EH_LABEL: 3588 return 0; 3589 case TargetOpcode::INLINEASM: { 3590 const MachineFunction *MF = MI.getParent()->getParent(); 3591 const char *AsmStr = MI.getOperand(0).getSymbolName(); 3592 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo()); 3593 } 3594 default: 3595 llvm_unreachable("unable to find instruction size"); 3596 } 3597 } 3598 3599 bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const { 3600 if (!isFLAT(MI)) 3601 return false; 3602 3603 if (MI.memoperands_empty()) 3604 return true; 3605 3606 for (const MachineMemOperand *MMO : MI.memoperands()) { 3607 if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS) 3608 return true; 3609 } 3610 return false; 3611 } 3612 3613 ArrayRef<std::pair<int, const char *>> 3614 SIInstrInfo::getSerializableTargetIndices() const { 3615 static const std::pair<int, const char *> TargetIndices[] = { 3616 {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"}, 3617 {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"}, 3618 {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"}, 3619 {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"}, 3620 {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}}; 3621 return makeArrayRef(TargetIndices); 3622 } 3623 3624 /// This is used by the post-RA scheduler (SchedulePostRAList.cpp). The 3625 /// post-RA version of misched uses CreateTargetMIHazardRecognizer. 3626 ScheduleHazardRecognizer * 3627 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 3628 const ScheduleDAG *DAG) const { 3629 return new GCNHazardRecognizer(DAG->MF); 3630 } 3631 3632 /// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer 3633 /// pass. 3634 ScheduleHazardRecognizer * 3635 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const { 3636 return new GCNHazardRecognizer(MF); 3637 } 3638