1 //===-- SIInstrInfo.cpp - SI Instruction Information ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief SI Implementation of TargetInstrInfo. 12 // 13 //===----------------------------------------------------------------------===// 14 15 16 #include "SIInstrInfo.h" 17 #include "AMDGPUTargetMachine.h" 18 #include "SIDefines.h" 19 #include "SIMachineFunctionInfo.h" 20 #include "llvm/CodeGen/MachineFrameInfo.h" 21 #include "llvm/CodeGen/MachineInstrBuilder.h" 22 #include "llvm/CodeGen/MachineRegisterInfo.h" 23 #include "llvm/IR/Function.h" 24 #include "llvm/CodeGen/RegisterScavenging.h" 25 #include "llvm/MC/MCInstrDesc.h" 26 #include "llvm/Support/Debug.h" 27 28 using namespace llvm; 29 30 SIInstrInfo::SIInstrInfo(const AMDGPUSubtarget &st) 31 : AMDGPUInstrInfo(st), RI() {} 32 33 //===----------------------------------------------------------------------===// 34 // TargetInstrInfo callbacks 35 //===----------------------------------------------------------------------===// 36 37 static unsigned getNumOperandsNoGlue(SDNode *Node) { 38 unsigned N = Node->getNumOperands(); 39 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) 40 --N; 41 return N; 42 } 43 44 static SDValue findChainOperand(SDNode *Load) { 45 SDValue LastOp = Load->getOperand(getNumOperandsNoGlue(Load) - 1); 46 assert(LastOp.getValueType() == MVT::Other && "Chain missing from load node"); 47 return LastOp; 48 } 49 50 /// \brief Returns true if both nodes have the same value for the given 51 /// operand \p Op, or if both nodes do not have this operand. 52 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { 53 unsigned Opc0 = N0->getMachineOpcode(); 54 unsigned Opc1 = N1->getMachineOpcode(); 55 56 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); 57 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); 58 59 if (Op0Idx == -1 && Op1Idx == -1) 60 return true; 61 62 63 if ((Op0Idx == -1 && Op1Idx != -1) || 64 (Op1Idx == -1 && Op0Idx != -1)) 65 return false; 66 67 // getNamedOperandIdx returns the index for the MachineInstr's operands, 68 // which includes the result as the first operand. We are indexing into the 69 // MachineSDNode's operands, so we need to skip the result operand to get 70 // the real index. 71 --Op0Idx; 72 --Op1Idx; 73 74 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx); 75 } 76 77 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI, 78 AliasAnalysis *AA) const { 79 // TODO: The generic check fails for VALU instructions that should be 80 // rematerializable due to implicit reads of exec. We really want all of the 81 // generic logic for this except for this. 82 switch (MI->getOpcode()) { 83 case AMDGPU::V_MOV_B32_e32: 84 case AMDGPU::V_MOV_B32_e64: 85 case AMDGPU::V_MOV_B64_PSEUDO: 86 return true; 87 default: 88 return false; 89 } 90 } 91 92 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, 93 int64_t &Offset0, 94 int64_t &Offset1) const { 95 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode()) 96 return false; 97 98 unsigned Opc0 = Load0->getMachineOpcode(); 99 unsigned Opc1 = Load1->getMachineOpcode(); 100 101 // Make sure both are actually loads. 102 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad()) 103 return false; 104 105 if (isDS(Opc0) && isDS(Opc1)) { 106 107 // FIXME: Handle this case: 108 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1)) 109 return false; 110 111 // Check base reg. 112 if (Load0->getOperand(1) != Load1->getOperand(1)) 113 return false; 114 115 // Check chain. 116 if (findChainOperand(Load0) != findChainOperand(Load1)) 117 return false; 118 119 // Skip read2 / write2 variants for simplicity. 120 // TODO: We should report true if the used offsets are adjacent (excluded 121 // st64 versions). 122 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::data1) != -1 || 123 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::data1) != -1) 124 return false; 125 126 Offset0 = cast<ConstantSDNode>(Load0->getOperand(2))->getZExtValue(); 127 Offset1 = cast<ConstantSDNode>(Load1->getOperand(2))->getZExtValue(); 128 return true; 129 } 130 131 if (isSMRD(Opc0) && isSMRD(Opc1)) { 132 assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1)); 133 134 // Check base reg. 135 if (Load0->getOperand(0) != Load1->getOperand(0)) 136 return false; 137 138 const ConstantSDNode *Load0Offset = 139 dyn_cast<ConstantSDNode>(Load0->getOperand(1)); 140 const ConstantSDNode *Load1Offset = 141 dyn_cast<ConstantSDNode>(Load1->getOperand(1)); 142 143 if (!Load0Offset || !Load1Offset) 144 return false; 145 146 // Check chain. 147 if (findChainOperand(Load0) != findChainOperand(Load1)) 148 return false; 149 150 Offset0 = Load0Offset->getZExtValue(); 151 Offset1 = Load1Offset->getZExtValue(); 152 return true; 153 } 154 155 // MUBUF and MTBUF can access the same addresses. 156 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) { 157 158 // MUBUF and MTBUF have vaddr at different indices. 159 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || 160 findChainOperand(Load0) != findChainOperand(Load1) || 161 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || 162 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) 163 return false; 164 165 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 166 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 167 168 if (OffIdx0 == -1 || OffIdx1 == -1) 169 return false; 170 171 // getNamedOperandIdx returns the index for MachineInstrs. Since they 172 // inlcude the output in the operand list, but SDNodes don't, we need to 173 // subtract the index by one. 174 --OffIdx0; 175 --OffIdx1; 176 177 SDValue Off0 = Load0->getOperand(OffIdx0); 178 SDValue Off1 = Load1->getOperand(OffIdx1); 179 180 // The offset might be a FrameIndexSDNode. 181 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1)) 182 return false; 183 184 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue(); 185 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue(); 186 return true; 187 } 188 189 return false; 190 } 191 192 static bool isStride64(unsigned Opc) { 193 switch (Opc) { 194 case AMDGPU::DS_READ2ST64_B32: 195 case AMDGPU::DS_READ2ST64_B64: 196 case AMDGPU::DS_WRITE2ST64_B32: 197 case AMDGPU::DS_WRITE2ST64_B64: 198 return true; 199 default: 200 return false; 201 } 202 } 203 204 bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg, 205 unsigned &Offset, 206 const TargetRegisterInfo *TRI) const { 207 unsigned Opc = LdSt->getOpcode(); 208 if (isDS(Opc)) { 209 const MachineOperand *OffsetImm = getNamedOperand(*LdSt, 210 AMDGPU::OpName::offset); 211 if (OffsetImm) { 212 // Normal, single offset LDS instruction. 213 const MachineOperand *AddrReg = getNamedOperand(*LdSt, 214 AMDGPU::OpName::addr); 215 216 BaseReg = AddrReg->getReg(); 217 Offset = OffsetImm->getImm(); 218 return true; 219 } 220 221 // The 2 offset instructions use offset0 and offset1 instead. We can treat 222 // these as a load with a single offset if the 2 offsets are consecutive. We 223 // will use this for some partially aligned loads. 224 const MachineOperand *Offset0Imm = getNamedOperand(*LdSt, 225 AMDGPU::OpName::offset0); 226 const MachineOperand *Offset1Imm = getNamedOperand(*LdSt, 227 AMDGPU::OpName::offset1); 228 229 uint8_t Offset0 = Offset0Imm->getImm(); 230 uint8_t Offset1 = Offset1Imm->getImm(); 231 232 if (Offset1 > Offset0 && Offset1 - Offset0 == 1) { 233 // Each of these offsets is in element sized units, so we need to convert 234 // to bytes of the individual reads. 235 236 unsigned EltSize; 237 if (LdSt->mayLoad()) 238 EltSize = getOpRegClass(*LdSt, 0)->getSize() / 2; 239 else { 240 assert(LdSt->mayStore()); 241 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 242 EltSize = getOpRegClass(*LdSt, Data0Idx)->getSize(); 243 } 244 245 if (isStride64(Opc)) 246 EltSize *= 64; 247 248 const MachineOperand *AddrReg = getNamedOperand(*LdSt, 249 AMDGPU::OpName::addr); 250 BaseReg = AddrReg->getReg(); 251 Offset = EltSize * Offset0; 252 return true; 253 } 254 255 return false; 256 } 257 258 if (isMUBUF(Opc) || isMTBUF(Opc)) { 259 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::soffset) != -1) 260 return false; 261 262 const MachineOperand *AddrReg = getNamedOperand(*LdSt, 263 AMDGPU::OpName::vaddr); 264 if (!AddrReg) 265 return false; 266 267 const MachineOperand *OffsetImm = getNamedOperand(*LdSt, 268 AMDGPU::OpName::offset); 269 BaseReg = AddrReg->getReg(); 270 Offset = OffsetImm->getImm(); 271 return true; 272 } 273 274 if (isSMRD(Opc)) { 275 const MachineOperand *OffsetImm = getNamedOperand(*LdSt, 276 AMDGPU::OpName::offset); 277 if (!OffsetImm) 278 return false; 279 280 const MachineOperand *SBaseReg = getNamedOperand(*LdSt, 281 AMDGPU::OpName::sbase); 282 BaseReg = SBaseReg->getReg(); 283 Offset = OffsetImm->getImm(); 284 return true; 285 } 286 287 return false; 288 } 289 290 bool SIInstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt, 291 MachineInstr *SecondLdSt, 292 unsigned NumLoads) const { 293 unsigned Opc0 = FirstLdSt->getOpcode(); 294 unsigned Opc1 = SecondLdSt->getOpcode(); 295 296 // TODO: This needs finer tuning 297 if (NumLoads > 4) 298 return false; 299 300 if (isDS(Opc0) && isDS(Opc1)) 301 return true; 302 303 if (isSMRD(Opc0) && isSMRD(Opc1)) 304 return true; 305 306 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) 307 return true; 308 309 return false; 310 } 311 312 void 313 SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 314 MachineBasicBlock::iterator MI, DebugLoc DL, 315 unsigned DestReg, unsigned SrcReg, 316 bool KillSrc) const { 317 318 // If we are trying to copy to or from SCC, there is a bug somewhere else in 319 // the backend. While it may be theoretically possible to do this, it should 320 // never be necessary. 321 assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC); 322 323 static const int16_t Sub0_15[] = { 324 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 325 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 326 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, 327 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 0 328 }; 329 330 static const int16_t Sub0_7[] = { 331 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 332 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 0 333 }; 334 335 static const int16_t Sub0_3[] = { 336 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0 337 }; 338 339 static const int16_t Sub0_2[] = { 340 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, 0 341 }; 342 343 static const int16_t Sub0_1[] = { 344 AMDGPU::sub0, AMDGPU::sub1, 0 345 }; 346 347 unsigned Opcode; 348 const int16_t *SubIndices; 349 350 if (AMDGPU::SReg_32RegClass.contains(DestReg)) { 351 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 352 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 353 .addReg(SrcReg, getKillRegState(KillSrc)); 354 return; 355 356 } else if (AMDGPU::SReg_64RegClass.contains(DestReg)) { 357 if (DestReg == AMDGPU::VCC) { 358 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 359 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC) 360 .addReg(SrcReg, getKillRegState(KillSrc)); 361 } else { 362 // FIXME: Hack until VReg_1 removed. 363 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 364 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_I32_e32)) 365 .addImm(0) 366 .addReg(SrcReg, getKillRegState(KillSrc)); 367 } 368 369 return; 370 } 371 372 assert(AMDGPU::SReg_64RegClass.contains(SrcReg)); 373 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 374 .addReg(SrcReg, getKillRegState(KillSrc)); 375 return; 376 377 } else if (AMDGPU::SReg_128RegClass.contains(DestReg)) { 378 assert(AMDGPU::SReg_128RegClass.contains(SrcReg)); 379 Opcode = AMDGPU::S_MOV_B32; 380 SubIndices = Sub0_3; 381 382 } else if (AMDGPU::SReg_256RegClass.contains(DestReg)) { 383 assert(AMDGPU::SReg_256RegClass.contains(SrcReg)); 384 Opcode = AMDGPU::S_MOV_B32; 385 SubIndices = Sub0_7; 386 387 } else if (AMDGPU::SReg_512RegClass.contains(DestReg)) { 388 assert(AMDGPU::SReg_512RegClass.contains(SrcReg)); 389 Opcode = AMDGPU::S_MOV_B32; 390 SubIndices = Sub0_15; 391 392 } else if (AMDGPU::VGPR_32RegClass.contains(DestReg)) { 393 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || 394 AMDGPU::SReg_32RegClass.contains(SrcReg)); 395 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) 396 .addReg(SrcReg, getKillRegState(KillSrc)); 397 return; 398 399 } else if (AMDGPU::VReg_64RegClass.contains(DestReg)) { 400 assert(AMDGPU::VReg_64RegClass.contains(SrcReg) || 401 AMDGPU::SReg_64RegClass.contains(SrcReg)); 402 Opcode = AMDGPU::V_MOV_B32_e32; 403 SubIndices = Sub0_1; 404 405 } else if (AMDGPU::VReg_96RegClass.contains(DestReg)) { 406 assert(AMDGPU::VReg_96RegClass.contains(SrcReg)); 407 Opcode = AMDGPU::V_MOV_B32_e32; 408 SubIndices = Sub0_2; 409 410 } else if (AMDGPU::VReg_128RegClass.contains(DestReg)) { 411 assert(AMDGPU::VReg_128RegClass.contains(SrcReg) || 412 AMDGPU::SReg_128RegClass.contains(SrcReg)); 413 Opcode = AMDGPU::V_MOV_B32_e32; 414 SubIndices = Sub0_3; 415 416 } else if (AMDGPU::VReg_256RegClass.contains(DestReg)) { 417 assert(AMDGPU::VReg_256RegClass.contains(SrcReg) || 418 AMDGPU::SReg_256RegClass.contains(SrcReg)); 419 Opcode = AMDGPU::V_MOV_B32_e32; 420 SubIndices = Sub0_7; 421 422 } else if (AMDGPU::VReg_512RegClass.contains(DestReg)) { 423 assert(AMDGPU::VReg_512RegClass.contains(SrcReg) || 424 AMDGPU::SReg_512RegClass.contains(SrcReg)); 425 Opcode = AMDGPU::V_MOV_B32_e32; 426 SubIndices = Sub0_15; 427 428 } else { 429 llvm_unreachable("Can't copy register!"); 430 } 431 432 while (unsigned SubIdx = *SubIndices++) { 433 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 434 get(Opcode), RI.getSubReg(DestReg, SubIdx)); 435 436 Builder.addReg(RI.getSubReg(SrcReg, SubIdx), getKillRegState(KillSrc)); 437 438 if (*SubIndices) 439 Builder.addReg(DestReg, RegState::Define | RegState::Implicit); 440 } 441 } 442 443 int SIInstrInfo::commuteOpcode(const MachineInstr &MI) const { 444 const unsigned Opcode = MI.getOpcode(); 445 446 int NewOpc; 447 448 // Try to map original to commuted opcode 449 NewOpc = AMDGPU::getCommuteRev(Opcode); 450 if (NewOpc != -1) 451 // Check if the commuted (REV) opcode exists on the target. 452 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 453 454 // Try to map commuted to original opcode 455 NewOpc = AMDGPU::getCommuteOrig(Opcode); 456 if (NewOpc != -1) 457 // Check if the original (non-REV) opcode exists on the target. 458 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 459 460 return Opcode; 461 } 462 463 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const { 464 465 if (DstRC->getSize() == 4) { 466 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 467 } else if (DstRC->getSize() == 8 && RI.isSGPRClass(DstRC)) { 468 return AMDGPU::S_MOV_B64; 469 } else if (DstRC->getSize() == 8 && !RI.isSGPRClass(DstRC)) { 470 return AMDGPU::V_MOV_B64_PSEUDO; 471 } 472 return AMDGPU::COPY; 473 } 474 475 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 476 MachineBasicBlock::iterator MI, 477 unsigned SrcReg, bool isKill, 478 int FrameIndex, 479 const TargetRegisterClass *RC, 480 const TargetRegisterInfo *TRI) const { 481 MachineFunction *MF = MBB.getParent(); 482 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 483 MachineFrameInfo *FrameInfo = MF->getFrameInfo(); 484 DebugLoc DL = MBB.findDebugLoc(MI); 485 int Opcode = -1; 486 487 if (RI.isSGPRClass(RC)) { 488 // We are only allowed to create one new instruction when spilling 489 // registers, so we need to use pseudo instruction for spilling 490 // SGPRs. 491 switch (RC->getSize() * 8) { 492 case 32: Opcode = AMDGPU::SI_SPILL_S32_SAVE; break; 493 case 64: Opcode = AMDGPU::SI_SPILL_S64_SAVE; break; 494 case 128: Opcode = AMDGPU::SI_SPILL_S128_SAVE; break; 495 case 256: Opcode = AMDGPU::SI_SPILL_S256_SAVE; break; 496 case 512: Opcode = AMDGPU::SI_SPILL_S512_SAVE; break; 497 } 498 } else if(RI.hasVGPRs(RC) && ST.isVGPRSpillingEnabled(MFI)) { 499 MFI->setHasSpilledVGPRs(); 500 501 switch(RC->getSize() * 8) { 502 case 32: Opcode = AMDGPU::SI_SPILL_V32_SAVE; break; 503 case 64: Opcode = AMDGPU::SI_SPILL_V64_SAVE; break; 504 case 96: Opcode = AMDGPU::SI_SPILL_V96_SAVE; break; 505 case 128: Opcode = AMDGPU::SI_SPILL_V128_SAVE; break; 506 case 256: Opcode = AMDGPU::SI_SPILL_V256_SAVE; break; 507 case 512: Opcode = AMDGPU::SI_SPILL_V512_SAVE; break; 508 } 509 } 510 511 if (Opcode != -1) { 512 MachinePointerInfo PtrInfo 513 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 514 unsigned Size = FrameInfo->getObjectSize(FrameIndex); 515 unsigned Align = FrameInfo->getObjectAlignment(FrameIndex); 516 MachineMemOperand *MMO 517 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, 518 Size, Align); 519 520 FrameInfo->setObjectAlignment(FrameIndex, 4); 521 BuildMI(MBB, MI, DL, get(Opcode)) 522 .addReg(SrcReg) 523 .addFrameIndex(FrameIndex) 524 // Place-holder registers, these will be filled in by 525 // SIPrepareScratchRegs. 526 .addReg(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, RegState::Undef) 527 .addReg(AMDGPU::SGPR0, RegState::Undef) 528 .addMemOperand(MMO); 529 } else { 530 LLVMContext &Ctx = MF->getFunction()->getContext(); 531 Ctx.emitError("SIInstrInfo::storeRegToStackSlot - Do not know how to" 532 " spill register"); 533 BuildMI(MBB, MI, DL, get(AMDGPU::KILL)) 534 .addReg(SrcReg); 535 } 536 } 537 538 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 539 MachineBasicBlock::iterator MI, 540 unsigned DestReg, int FrameIndex, 541 const TargetRegisterClass *RC, 542 const TargetRegisterInfo *TRI) const { 543 MachineFunction *MF = MBB.getParent(); 544 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 545 MachineFrameInfo *FrameInfo = MF->getFrameInfo(); 546 DebugLoc DL = MBB.findDebugLoc(MI); 547 int Opcode = -1; 548 549 if (RI.isSGPRClass(RC)){ 550 switch(RC->getSize() * 8) { 551 case 32: Opcode = AMDGPU::SI_SPILL_S32_RESTORE; break; 552 case 64: Opcode = AMDGPU::SI_SPILL_S64_RESTORE; break; 553 case 128: Opcode = AMDGPU::SI_SPILL_S128_RESTORE; break; 554 case 256: Opcode = AMDGPU::SI_SPILL_S256_RESTORE; break; 555 case 512: Opcode = AMDGPU::SI_SPILL_S512_RESTORE; break; 556 } 557 } else if(RI.hasVGPRs(RC) && ST.isVGPRSpillingEnabled(MFI)) { 558 switch(RC->getSize() * 8) { 559 case 32: Opcode = AMDGPU::SI_SPILL_V32_RESTORE; break; 560 case 64: Opcode = AMDGPU::SI_SPILL_V64_RESTORE; break; 561 case 96: Opcode = AMDGPU::SI_SPILL_V96_RESTORE; break; 562 case 128: Opcode = AMDGPU::SI_SPILL_V128_RESTORE; break; 563 case 256: Opcode = AMDGPU::SI_SPILL_V256_RESTORE; break; 564 case 512: Opcode = AMDGPU::SI_SPILL_V512_RESTORE; break; 565 } 566 } 567 568 if (Opcode != -1) { 569 unsigned Align = 4; 570 FrameInfo->setObjectAlignment(FrameIndex, Align); 571 unsigned Size = FrameInfo->getObjectSize(FrameIndex); 572 573 MachinePointerInfo PtrInfo 574 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 575 MachineMemOperand *MMO = MF->getMachineMemOperand( 576 PtrInfo, MachineMemOperand::MOLoad, Size, Align); 577 578 BuildMI(MBB, MI, DL, get(Opcode), DestReg) 579 .addFrameIndex(FrameIndex) 580 // Place-holder registers, these will be filled in by 581 // SIPrepareScratchRegs. 582 .addReg(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, RegState::Undef) 583 .addReg(AMDGPU::SGPR0, RegState::Undef) 584 .addMemOperand(MMO); 585 } else { 586 LLVMContext &Ctx = MF->getFunction()->getContext(); 587 Ctx.emitError("SIInstrInfo::loadRegFromStackSlot - Do not know how to" 588 " restore register"); 589 BuildMI(MBB, MI, DL, get(AMDGPU::IMPLICIT_DEF), DestReg); 590 } 591 } 592 593 /// \param @Offset Offset in bytes of the FrameIndex being spilled 594 unsigned SIInstrInfo::calculateLDSSpillAddress(MachineBasicBlock &MBB, 595 MachineBasicBlock::iterator MI, 596 RegScavenger *RS, unsigned TmpReg, 597 unsigned FrameOffset, 598 unsigned Size) const { 599 MachineFunction *MF = MBB.getParent(); 600 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 601 const AMDGPUSubtarget &ST = MF->getSubtarget<AMDGPUSubtarget>(); 602 const SIRegisterInfo *TRI = 603 static_cast<const SIRegisterInfo*>(ST.getRegisterInfo()); 604 DebugLoc DL = MBB.findDebugLoc(MI); 605 unsigned WorkGroupSize = MFI->getMaximumWorkGroupSize(*MF); 606 unsigned WavefrontSize = ST.getWavefrontSize(); 607 608 unsigned TIDReg = MFI->getTIDReg(); 609 if (!MFI->hasCalculatedTID()) { 610 MachineBasicBlock &Entry = MBB.getParent()->front(); 611 MachineBasicBlock::iterator Insert = Entry.front(); 612 DebugLoc DL = Insert->getDebugLoc(); 613 614 TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass); 615 if (TIDReg == AMDGPU::NoRegister) 616 return TIDReg; 617 618 619 if (MFI->getShaderType() == ShaderType::COMPUTE && 620 WorkGroupSize > WavefrontSize) { 621 622 unsigned TIDIGXReg = TRI->getPreloadedValue(*MF, SIRegisterInfo::TIDIG_X); 623 unsigned TIDIGYReg = TRI->getPreloadedValue(*MF, SIRegisterInfo::TIDIG_Y); 624 unsigned TIDIGZReg = TRI->getPreloadedValue(*MF, SIRegisterInfo::TIDIG_Z); 625 unsigned InputPtrReg = 626 TRI->getPreloadedValue(*MF, SIRegisterInfo::INPUT_PTR); 627 for (unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) { 628 if (!Entry.isLiveIn(Reg)) 629 Entry.addLiveIn(Reg); 630 } 631 632 RS->enterBasicBlock(&Entry); 633 unsigned STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); 634 unsigned STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); 635 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp0) 636 .addReg(InputPtrReg) 637 .addImm(SI::KernelInputOffsets::NGROUPS_Z); 638 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp1) 639 .addReg(InputPtrReg) 640 .addImm(SI::KernelInputOffsets::NGROUPS_Y); 641 642 // NGROUPS.X * NGROUPS.Y 643 BuildMI(Entry, Insert, DL, get(AMDGPU::S_MUL_I32), STmp1) 644 .addReg(STmp1) 645 .addReg(STmp0); 646 // (NGROUPS.X * NGROUPS.Y) * TIDIG.X 647 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MUL_U32_U24_e32), TIDReg) 648 .addReg(STmp1) 649 .addReg(TIDIGXReg); 650 // NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X) 651 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MAD_U32_U24), TIDReg) 652 .addReg(STmp0) 653 .addReg(TIDIGYReg) 654 .addReg(TIDReg); 655 // (NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)) + TIDIG.Z 656 BuildMI(Entry, Insert, DL, get(AMDGPU::V_ADD_I32_e32), TIDReg) 657 .addReg(TIDReg) 658 .addReg(TIDIGZReg); 659 } else { 660 // Get the wave id 661 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_LO_U32_B32_e64), 662 TIDReg) 663 .addImm(-1) 664 .addImm(0); 665 666 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_HI_U32_B32_e64), 667 TIDReg) 668 .addImm(-1) 669 .addReg(TIDReg); 670 } 671 672 BuildMI(Entry, Insert, DL, get(AMDGPU::V_LSHLREV_B32_e32), 673 TIDReg) 674 .addImm(2) 675 .addReg(TIDReg); 676 MFI->setTIDReg(TIDReg); 677 } 678 679 // Add FrameIndex to LDS offset 680 unsigned LDSOffset = MFI->LDSSize + (FrameOffset * WorkGroupSize); 681 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), TmpReg) 682 .addImm(LDSOffset) 683 .addReg(TIDReg); 684 685 return TmpReg; 686 } 687 688 void SIInstrInfo::insertNOPs(MachineBasicBlock::iterator MI, 689 int Count) const { 690 while (Count > 0) { 691 int Arg; 692 if (Count >= 8) 693 Arg = 7; 694 else 695 Arg = Count - 1; 696 Count -= 8; 697 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(AMDGPU::S_NOP)) 698 .addImm(Arg); 699 } 700 } 701 702 bool SIInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { 703 MachineBasicBlock &MBB = *MI->getParent(); 704 DebugLoc DL = MBB.findDebugLoc(MI); 705 switch (MI->getOpcode()) { 706 default: return AMDGPUInstrInfo::expandPostRAPseudo(MI); 707 708 case AMDGPU::SI_CONSTDATA_PTR: { 709 unsigned Reg = MI->getOperand(0).getReg(); 710 unsigned RegLo = RI.getSubReg(Reg, AMDGPU::sub0); 711 unsigned RegHi = RI.getSubReg(Reg, AMDGPU::sub1); 712 713 BuildMI(MBB, MI, DL, get(AMDGPU::S_GETPC_B64), Reg); 714 715 // Add 32-bit offset from this instruction to the start of the constant data. 716 BuildMI(MBB, MI, DL, get(AMDGPU::S_ADD_U32), RegLo) 717 .addReg(RegLo) 718 .addTargetIndex(AMDGPU::TI_CONSTDATA_START) 719 .addReg(AMDGPU::SCC, RegState::Define | RegState::Implicit); 720 BuildMI(MBB, MI, DL, get(AMDGPU::S_ADDC_U32), RegHi) 721 .addReg(RegHi) 722 .addImm(0) 723 .addReg(AMDGPU::SCC, RegState::Define | RegState::Implicit) 724 .addReg(AMDGPU::SCC, RegState::Implicit); 725 MI->eraseFromParent(); 726 break; 727 } 728 case AMDGPU::SGPR_USE: 729 // This is just a placeholder for register allocation. 730 MI->eraseFromParent(); 731 break; 732 733 case AMDGPU::V_MOV_B64_PSEUDO: { 734 unsigned Dst = MI->getOperand(0).getReg(); 735 unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 736 unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 737 738 const MachineOperand &SrcOp = MI->getOperand(1); 739 // FIXME: Will this work for 64-bit floating point immediates? 740 assert(!SrcOp.isFPImm()); 741 if (SrcOp.isImm()) { 742 APInt Imm(64, SrcOp.getImm()); 743 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 744 .addImm(Imm.getLoBits(32).getZExtValue()) 745 .addReg(Dst, RegState::Implicit); 746 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 747 .addImm(Imm.getHiBits(32).getZExtValue()) 748 .addReg(Dst, RegState::Implicit); 749 } else { 750 assert(SrcOp.isReg()); 751 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 752 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) 753 .addReg(Dst, RegState::Implicit); 754 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 755 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) 756 .addReg(Dst, RegState::Implicit); 757 } 758 MI->eraseFromParent(); 759 break; 760 } 761 762 case AMDGPU::V_CNDMASK_B64_PSEUDO: { 763 unsigned Dst = MI->getOperand(0).getReg(); 764 unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 765 unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 766 unsigned Src0 = MI->getOperand(1).getReg(); 767 unsigned Src1 = MI->getOperand(2).getReg(); 768 const MachineOperand &SrcCond = MI->getOperand(3); 769 770 BuildMI(MBB, MI, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstLo) 771 .addReg(RI.getSubReg(Src0, AMDGPU::sub0)) 772 .addReg(RI.getSubReg(Src1, AMDGPU::sub0)) 773 .addOperand(SrcCond); 774 BuildMI(MBB, MI, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstHi) 775 .addReg(RI.getSubReg(Src0, AMDGPU::sub1)) 776 .addReg(RI.getSubReg(Src1, AMDGPU::sub1)) 777 .addOperand(SrcCond); 778 MI->eraseFromParent(); 779 break; 780 } 781 } 782 return true; 783 } 784 785 /// Commutes the operands in the given instruction. 786 /// The commutable operands are specified by their indices OpIdx0 and OpIdx1. 787 /// 788 /// Do not call this method for a non-commutable instruction or for 789 /// non-commutable pair of operand indices OpIdx0 and OpIdx1. 790 /// Even though the instruction is commutable, the method may still 791 /// fail to commute the operands, null pointer is returned in such cases. 792 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr *MI, 793 bool NewMI, 794 unsigned OpIdx0, 795 unsigned OpIdx1) const { 796 int CommutedOpcode = commuteOpcode(*MI); 797 if (CommutedOpcode == -1) 798 return nullptr; 799 800 int Src0Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), 801 AMDGPU::OpName::src0); 802 MachineOperand &Src0 = MI->getOperand(Src0Idx); 803 if (!Src0.isReg()) 804 return nullptr; 805 806 int Src1Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), 807 AMDGPU::OpName::src1); 808 809 if ((OpIdx0 != static_cast<unsigned>(Src0Idx) || 810 OpIdx1 != static_cast<unsigned>(Src1Idx)) && 811 (OpIdx0 != static_cast<unsigned>(Src1Idx) || 812 OpIdx1 != static_cast<unsigned>(Src0Idx))) 813 return nullptr; 814 815 MachineOperand &Src1 = MI->getOperand(Src1Idx); 816 817 // Make sure it's legal to commute operands for VOP2. 818 if (isVOP2(MI->getOpcode()) && 819 (!isOperandLegal(MI, Src0Idx, &Src1) || 820 !isOperandLegal(MI, Src1Idx, &Src0))) { 821 return nullptr; 822 } 823 824 if (!Src1.isReg()) { 825 // Allow commuting instructions with Imm operands. 826 if (NewMI || !Src1.isImm() || 827 (!isVOP2(MI->getOpcode()) && !isVOP3(MI->getOpcode()))) { 828 return nullptr; 829 } 830 831 // Be sure to copy the source modifiers to the right place. 832 if (MachineOperand *Src0Mods 833 = getNamedOperand(*MI, AMDGPU::OpName::src0_modifiers)) { 834 MachineOperand *Src1Mods 835 = getNamedOperand(*MI, AMDGPU::OpName::src1_modifiers); 836 837 int Src0ModsVal = Src0Mods->getImm(); 838 if (!Src1Mods && Src0ModsVal != 0) 839 return nullptr; 840 841 // XXX - This assert might be a lie. It might be useful to have a neg 842 // modifier with 0.0. 843 int Src1ModsVal = Src1Mods->getImm(); 844 assert((Src1ModsVal == 0) && "Not expecting modifiers with immediates"); 845 846 Src1Mods->setImm(Src0ModsVal); 847 Src0Mods->setImm(Src1ModsVal); 848 } 849 850 unsigned Reg = Src0.getReg(); 851 unsigned SubReg = Src0.getSubReg(); 852 if (Src1.isImm()) 853 Src0.ChangeToImmediate(Src1.getImm()); 854 else 855 llvm_unreachable("Should only have immediates"); 856 857 Src1.ChangeToRegister(Reg, false); 858 Src1.setSubReg(SubReg); 859 } else { 860 MI = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx0, OpIdx1); 861 } 862 863 if (MI) 864 MI->setDesc(get(CommutedOpcode)); 865 866 return MI; 867 } 868 869 // This needs to be implemented because the source modifiers may be inserted 870 // between the true commutable operands, and the base 871 // TargetInstrInfo::commuteInstruction uses it. 872 bool SIInstrInfo::findCommutedOpIndices(MachineInstr *MI, 873 unsigned &SrcOpIdx0, 874 unsigned &SrcOpIdx1) const { 875 const MCInstrDesc &MCID = MI->getDesc(); 876 if (!MCID.isCommutable()) 877 return false; 878 879 unsigned Opc = MI->getOpcode(); 880 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 881 if (Src0Idx == -1) 882 return false; 883 884 // FIXME: Workaround TargetInstrInfo::commuteInstruction asserting on 885 // immediate. Also, immediate src0 operand is not handled in 886 // SIInstrInfo::commuteInstruction(); 887 if (!MI->getOperand(Src0Idx).isReg()) 888 return false; 889 890 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 891 if (Src1Idx == -1) 892 return false; 893 894 MachineOperand &Src1 = MI->getOperand(Src1Idx); 895 if (Src1.isImm()) { 896 // SIInstrInfo::commuteInstruction() does support commuting the immediate 897 // operand src1 in 2 and 3 operand instructions. 898 if (!isVOP2(MI->getOpcode()) && !isVOP3(MI->getOpcode())) 899 return false; 900 } else if (Src1.isReg()) { 901 // If any source modifiers are set, the generic instruction commuting won't 902 // understand how to copy the source modifiers. 903 if (hasModifiersSet(*MI, AMDGPU::OpName::src0_modifiers) || 904 hasModifiersSet(*MI, AMDGPU::OpName::src1_modifiers)) 905 return false; 906 } else 907 return false; 908 909 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx); 910 } 911 912 MachineInstr *SIInstrInfo::buildMovInstr(MachineBasicBlock *MBB, 913 MachineBasicBlock::iterator I, 914 unsigned DstReg, 915 unsigned SrcReg) const { 916 return BuildMI(*MBB, I, MBB->findDebugLoc(I), get(AMDGPU::V_MOV_B32_e32), 917 DstReg) .addReg(SrcReg); 918 } 919 920 bool SIInstrInfo::isMov(unsigned Opcode) const { 921 switch(Opcode) { 922 default: return false; 923 case AMDGPU::S_MOV_B32: 924 case AMDGPU::S_MOV_B64: 925 case AMDGPU::V_MOV_B32_e32: 926 case AMDGPU::V_MOV_B32_e64: 927 return true; 928 } 929 } 930 931 static void removeModOperands(MachineInstr &MI) { 932 unsigned Opc = MI.getOpcode(); 933 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, 934 AMDGPU::OpName::src0_modifiers); 935 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, 936 AMDGPU::OpName::src1_modifiers); 937 int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc, 938 AMDGPU::OpName::src2_modifiers); 939 940 MI.RemoveOperand(Src2ModIdx); 941 MI.RemoveOperand(Src1ModIdx); 942 MI.RemoveOperand(Src0ModIdx); 943 } 944 945 bool SIInstrInfo::FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI, 946 unsigned Reg, MachineRegisterInfo *MRI) const { 947 if (!MRI->hasOneNonDBGUse(Reg)) 948 return false; 949 950 unsigned Opc = UseMI->getOpcode(); 951 if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64) { 952 // Don't fold if we are using source modifiers. The new VOP2 instructions 953 // don't have them. 954 if (hasModifiersSet(*UseMI, AMDGPU::OpName::src0_modifiers) || 955 hasModifiersSet(*UseMI, AMDGPU::OpName::src1_modifiers) || 956 hasModifiersSet(*UseMI, AMDGPU::OpName::src2_modifiers)) { 957 return false; 958 } 959 960 MachineOperand *Src0 = getNamedOperand(*UseMI, AMDGPU::OpName::src0); 961 MachineOperand *Src1 = getNamedOperand(*UseMI, AMDGPU::OpName::src1); 962 MachineOperand *Src2 = getNamedOperand(*UseMI, AMDGPU::OpName::src2); 963 964 // Multiplied part is the constant: Use v_madmk_f32 965 // We should only expect these to be on src0 due to canonicalizations. 966 if (Src0->isReg() && Src0->getReg() == Reg) { 967 if (!Src1->isReg() || 968 (Src1->isReg() && RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) 969 return false; 970 971 if (!Src2->isReg() || 972 (Src2->isReg() && RI.isSGPRClass(MRI->getRegClass(Src2->getReg())))) 973 return false; 974 975 // We need to do some weird looking operand shuffling since the madmk 976 // operands are out of the normal expected order with the multiplied 977 // constant as the last operand. 978 // 979 // v_mad_f32 src0, src1, src2 -> v_madmk_f32 src0 * src2K + src1 980 // src0 -> src2 K 981 // src1 -> src0 982 // src2 -> src1 983 984 const int64_t Imm = DefMI->getOperand(1).getImm(); 985 986 // FIXME: This would be a lot easier if we could return a new instruction 987 // instead of having to modify in place. 988 989 // Remove these first since they are at the end. 990 UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, 991 AMDGPU::OpName::omod)); 992 UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, 993 AMDGPU::OpName::clamp)); 994 995 unsigned Src1Reg = Src1->getReg(); 996 unsigned Src1SubReg = Src1->getSubReg(); 997 unsigned Src2Reg = Src2->getReg(); 998 unsigned Src2SubReg = Src2->getSubReg(); 999 Src0->setReg(Src1Reg); 1000 Src0->setSubReg(Src1SubReg); 1001 Src0->setIsKill(Src1->isKill()); 1002 1003 Src1->setReg(Src2Reg); 1004 Src1->setSubReg(Src2SubReg); 1005 Src1->setIsKill(Src2->isKill()); 1006 1007 if (Opc == AMDGPU::V_MAC_F32_e64) { 1008 UseMI->untieRegOperand( 1009 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 1010 } 1011 1012 Src2->ChangeToImmediate(Imm); 1013 1014 removeModOperands(*UseMI); 1015 UseMI->setDesc(get(AMDGPU::V_MADMK_F32)); 1016 1017 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 1018 if (DeleteDef) 1019 DefMI->eraseFromParent(); 1020 1021 return true; 1022 } 1023 1024 // Added part is the constant: Use v_madak_f32 1025 if (Src2->isReg() && Src2->getReg() == Reg) { 1026 // Not allowed to use constant bus for another operand. 1027 // We can however allow an inline immediate as src0. 1028 if (!Src0->isImm() && 1029 (Src0->isReg() && RI.isSGPRClass(MRI->getRegClass(Src0->getReg())))) 1030 return false; 1031 1032 if (!Src1->isReg() || 1033 (Src1->isReg() && RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) 1034 return false; 1035 1036 const int64_t Imm = DefMI->getOperand(1).getImm(); 1037 1038 // FIXME: This would be a lot easier if we could return a new instruction 1039 // instead of having to modify in place. 1040 1041 // Remove these first since they are at the end. 1042 UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, 1043 AMDGPU::OpName::omod)); 1044 UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, 1045 AMDGPU::OpName::clamp)); 1046 1047 if (Opc == AMDGPU::V_MAC_F32_e64) { 1048 UseMI->untieRegOperand( 1049 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 1050 } 1051 1052 // ChangingToImmediate adds Src2 back to the instruction. 1053 Src2->ChangeToImmediate(Imm); 1054 1055 // These come before src2. 1056 removeModOperands(*UseMI); 1057 UseMI->setDesc(get(AMDGPU::V_MADAK_F32)); 1058 1059 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 1060 if (DeleteDef) 1061 DefMI->eraseFromParent(); 1062 1063 return true; 1064 } 1065 } 1066 1067 return false; 1068 } 1069 1070 static bool offsetsDoNotOverlap(int WidthA, int OffsetA, 1071 int WidthB, int OffsetB) { 1072 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; 1073 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; 1074 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; 1075 return LowOffset + LowWidth <= HighOffset; 1076 } 1077 1078 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr *MIa, 1079 MachineInstr *MIb) const { 1080 unsigned BaseReg0, Offset0; 1081 unsigned BaseReg1, Offset1; 1082 1083 if (getMemOpBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) && 1084 getMemOpBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) { 1085 assert(MIa->hasOneMemOperand() && MIb->hasOneMemOperand() && 1086 "read2 / write2 not expected here yet"); 1087 unsigned Width0 = (*MIa->memoperands_begin())->getSize(); 1088 unsigned Width1 = (*MIb->memoperands_begin())->getSize(); 1089 if (BaseReg0 == BaseReg1 && 1090 offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1)) { 1091 return true; 1092 } 1093 } 1094 1095 return false; 1096 } 1097 1098 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa, 1099 MachineInstr *MIb, 1100 AliasAnalysis *AA) const { 1101 unsigned Opc0 = MIa->getOpcode(); 1102 unsigned Opc1 = MIb->getOpcode(); 1103 1104 assert(MIa && (MIa->mayLoad() || MIa->mayStore()) && 1105 "MIa must load from or modify a memory location"); 1106 assert(MIb && (MIb->mayLoad() || MIb->mayStore()) && 1107 "MIb must load from or modify a memory location"); 1108 1109 if (MIa->hasUnmodeledSideEffects() || MIb->hasUnmodeledSideEffects()) 1110 return false; 1111 1112 // XXX - Can we relax this between address spaces? 1113 if (MIa->hasOrderedMemoryRef() || MIb->hasOrderedMemoryRef()) 1114 return false; 1115 1116 // TODO: Should we check the address space from the MachineMemOperand? That 1117 // would allow us to distinguish objects we know don't alias based on the 1118 // underlying address space, even if it was lowered to a different one, 1119 // e.g. private accesses lowered to use MUBUF instructions on a scratch 1120 // buffer. 1121 if (isDS(Opc0)) { 1122 if (isDS(Opc1)) 1123 return checkInstOffsetsDoNotOverlap(MIa, MIb); 1124 1125 return !isFLAT(Opc1); 1126 } 1127 1128 if (isMUBUF(Opc0) || isMTBUF(Opc0)) { 1129 if (isMUBUF(Opc1) || isMTBUF(Opc1)) 1130 return checkInstOffsetsDoNotOverlap(MIa, MIb); 1131 1132 return !isFLAT(Opc1) && !isSMRD(Opc1); 1133 } 1134 1135 if (isSMRD(Opc0)) { 1136 if (isSMRD(Opc1)) 1137 return checkInstOffsetsDoNotOverlap(MIa, MIb); 1138 1139 return !isFLAT(Opc1) && !isMUBUF(Opc0) && !isMTBUF(Opc0); 1140 } 1141 1142 if (isFLAT(Opc0)) { 1143 if (isFLAT(Opc1)) 1144 return checkInstOffsetsDoNotOverlap(MIa, MIb); 1145 1146 return false; 1147 } 1148 1149 return false; 1150 } 1151 1152 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB, 1153 MachineBasicBlock::iterator &MI, 1154 LiveVariables *LV) const { 1155 1156 switch (MI->getOpcode()) { 1157 default: return nullptr; 1158 case AMDGPU::V_MAC_F32_e64: break; 1159 case AMDGPU::V_MAC_F32_e32: { 1160 const MachineOperand *Src0 = getNamedOperand(*MI, AMDGPU::OpName::src0); 1161 if (Src0->isImm() && !isInlineConstant(*Src0, 4)) 1162 return nullptr; 1163 break; 1164 } 1165 } 1166 1167 const MachineOperand *Dst = getNamedOperand(*MI, AMDGPU::OpName::dst); 1168 const MachineOperand *Src0 = getNamedOperand(*MI, AMDGPU::OpName::src0); 1169 const MachineOperand *Src1 = getNamedOperand(*MI, AMDGPU::OpName::src1); 1170 const MachineOperand *Src2 = getNamedOperand(*MI, AMDGPU::OpName::src2); 1171 1172 return BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::V_MAD_F32)) 1173 .addOperand(*Dst) 1174 .addImm(0) // Src0 mods 1175 .addOperand(*Src0) 1176 .addImm(0) // Src1 mods 1177 .addOperand(*Src1) 1178 .addImm(0) // Src mods 1179 .addOperand(*Src2) 1180 .addImm(0) // clamp 1181 .addImm(0); // omod 1182 } 1183 1184 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const { 1185 int64_t SVal = Imm.getSExtValue(); 1186 if (SVal >= -16 && SVal <= 64) 1187 return true; 1188 1189 if (Imm.getBitWidth() == 64) { 1190 uint64_t Val = Imm.getZExtValue(); 1191 return (DoubleToBits(0.0) == Val) || 1192 (DoubleToBits(1.0) == Val) || 1193 (DoubleToBits(-1.0) == Val) || 1194 (DoubleToBits(0.5) == Val) || 1195 (DoubleToBits(-0.5) == Val) || 1196 (DoubleToBits(2.0) == Val) || 1197 (DoubleToBits(-2.0) == Val) || 1198 (DoubleToBits(4.0) == Val) || 1199 (DoubleToBits(-4.0) == Val); 1200 } 1201 1202 // The actual type of the operand does not seem to matter as long 1203 // as the bits match one of the inline immediate values. For example: 1204 // 1205 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal, 1206 // so it is a legal inline immediate. 1207 // 1208 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in 1209 // floating-point, so it is a legal inline immediate. 1210 uint32_t Val = Imm.getZExtValue(); 1211 1212 return (FloatToBits(0.0f) == Val) || 1213 (FloatToBits(1.0f) == Val) || 1214 (FloatToBits(-1.0f) == Val) || 1215 (FloatToBits(0.5f) == Val) || 1216 (FloatToBits(-0.5f) == Val) || 1217 (FloatToBits(2.0f) == Val) || 1218 (FloatToBits(-2.0f) == Val) || 1219 (FloatToBits(4.0f) == Val) || 1220 (FloatToBits(-4.0f) == Val); 1221 } 1222 1223 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO, 1224 unsigned OpSize) const { 1225 if (MO.isImm()) { 1226 // MachineOperand provides no way to tell the true operand size, since it 1227 // only records a 64-bit value. We need to know the size to determine if a 1228 // 32-bit floating point immediate bit pattern is legal for an integer 1229 // immediate. It would be for any 32-bit integer operand, but would not be 1230 // for a 64-bit one. 1231 1232 unsigned BitSize = 8 * OpSize; 1233 return isInlineConstant(APInt(BitSize, MO.getImm(), true)); 1234 } 1235 1236 return false; 1237 } 1238 1239 bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO, 1240 unsigned OpSize) const { 1241 return MO.isImm() && !isInlineConstant(MO, OpSize); 1242 } 1243 1244 static bool compareMachineOp(const MachineOperand &Op0, 1245 const MachineOperand &Op1) { 1246 if (Op0.getType() != Op1.getType()) 1247 return false; 1248 1249 switch (Op0.getType()) { 1250 case MachineOperand::MO_Register: 1251 return Op0.getReg() == Op1.getReg(); 1252 case MachineOperand::MO_Immediate: 1253 return Op0.getImm() == Op1.getImm(); 1254 default: 1255 llvm_unreachable("Didn't expect to be comparing these operand types"); 1256 } 1257 } 1258 1259 bool SIInstrInfo::isImmOperandLegal(const MachineInstr *MI, unsigned OpNo, 1260 const MachineOperand &MO) const { 1261 const MCOperandInfo &OpInfo = get(MI->getOpcode()).OpInfo[OpNo]; 1262 1263 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI()); 1264 1265 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE) 1266 return true; 1267 1268 if (OpInfo.RegClass < 0) 1269 return false; 1270 1271 unsigned OpSize = RI.getRegClass(OpInfo.RegClass)->getSize(); 1272 if (isLiteralConstant(MO, OpSize)) 1273 return RI.opCanUseLiteralConstant(OpInfo.OperandType); 1274 1275 return RI.opCanUseInlineConstant(OpInfo.OperandType); 1276 } 1277 1278 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const { 1279 int Op32 = AMDGPU::getVOPe32(Opcode); 1280 if (Op32 == -1) 1281 return false; 1282 1283 return pseudoToMCOpcode(Op32) != -1; 1284 } 1285 1286 bool SIInstrInfo::hasModifiers(unsigned Opcode) const { 1287 // The src0_modifier operand is present on all instructions 1288 // that have modifiers. 1289 1290 return AMDGPU::getNamedOperandIdx(Opcode, 1291 AMDGPU::OpName::src0_modifiers) != -1; 1292 } 1293 1294 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI, 1295 unsigned OpName) const { 1296 const MachineOperand *Mods = getNamedOperand(MI, OpName); 1297 return Mods && Mods->getImm(); 1298 } 1299 1300 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI, 1301 const MachineOperand &MO, 1302 unsigned OpSize) const { 1303 // Literal constants use the constant bus. 1304 if (isLiteralConstant(MO, OpSize)) 1305 return true; 1306 1307 if (!MO.isReg() || !MO.isUse()) 1308 return false; 1309 1310 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) 1311 return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); 1312 1313 // FLAT_SCR is just an SGPR pair. 1314 if (!MO.isImplicit() && (MO.getReg() == AMDGPU::FLAT_SCR)) 1315 return true; 1316 1317 // EXEC register uses the constant bus. 1318 if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC) 1319 return true; 1320 1321 // SGPRs use the constant bus 1322 if (MO.getReg() == AMDGPU::M0 || MO.getReg() == AMDGPU::VCC || 1323 (!MO.isImplicit() && 1324 (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) || 1325 AMDGPU::SGPR_64RegClass.contains(MO.getReg())))) { 1326 return true; 1327 } 1328 1329 return false; 1330 } 1331 1332 bool SIInstrInfo::verifyInstruction(const MachineInstr *MI, 1333 StringRef &ErrInfo) const { 1334 uint16_t Opcode = MI->getOpcode(); 1335 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 1336 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); 1337 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1); 1338 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2); 1339 1340 // Make sure the number of operands is correct. 1341 const MCInstrDesc &Desc = get(Opcode); 1342 if (!Desc.isVariadic() && 1343 Desc.getNumOperands() != MI->getNumExplicitOperands()) { 1344 ErrInfo = "Instruction has wrong number of operands."; 1345 return false; 1346 } 1347 1348 // Make sure the register classes are correct 1349 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) { 1350 if (MI->getOperand(i).isFPImm()) { 1351 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast " 1352 "all fp values to integers."; 1353 return false; 1354 } 1355 1356 int RegClass = Desc.OpInfo[i].RegClass; 1357 1358 switch (Desc.OpInfo[i].OperandType) { 1359 case MCOI::OPERAND_REGISTER: 1360 if (MI->getOperand(i).isImm()) { 1361 ErrInfo = "Illegal immediate value for operand."; 1362 return false; 1363 } 1364 break; 1365 case AMDGPU::OPERAND_REG_IMM32: 1366 break; 1367 case AMDGPU::OPERAND_REG_INLINE_C: 1368 if (isLiteralConstant(MI->getOperand(i), 1369 RI.getRegClass(RegClass)->getSize())) { 1370 ErrInfo = "Illegal immediate value for operand."; 1371 return false; 1372 } 1373 break; 1374 case MCOI::OPERAND_IMMEDIATE: 1375 // Check if this operand is an immediate. 1376 // FrameIndex operands will be replaced by immediates, so they are 1377 // allowed. 1378 if (!MI->getOperand(i).isImm() && !MI->getOperand(i).isFI()) { 1379 ErrInfo = "Expected immediate, but got non-immediate"; 1380 return false; 1381 } 1382 // Fall-through 1383 default: 1384 continue; 1385 } 1386 1387 if (!MI->getOperand(i).isReg()) 1388 continue; 1389 1390 if (RegClass != -1) { 1391 unsigned Reg = MI->getOperand(i).getReg(); 1392 if (TargetRegisterInfo::isVirtualRegister(Reg)) 1393 continue; 1394 1395 const TargetRegisterClass *RC = RI.getRegClass(RegClass); 1396 if (!RC->contains(Reg)) { 1397 ErrInfo = "Operand has incorrect register class."; 1398 return false; 1399 } 1400 } 1401 } 1402 1403 1404 // Verify VOP* 1405 if (isVOP1(Opcode) || isVOP2(Opcode) || isVOP3(Opcode) || isVOPC(Opcode)) { 1406 // Only look at the true operands. Only a real operand can use the constant 1407 // bus, and we don't want to check pseudo-operands like the source modifier 1408 // flags. 1409 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx }; 1410 1411 unsigned ConstantBusCount = 0; 1412 unsigned SGPRUsed = AMDGPU::NoRegister; 1413 for (int OpIdx : OpIndices) { 1414 if (OpIdx == -1) 1415 break; 1416 const MachineOperand &MO = MI->getOperand(OpIdx); 1417 if (usesConstantBus(MRI, MO, getOpSize(Opcode, OpIdx))) { 1418 if (MO.isReg()) { 1419 if (MO.getReg() != SGPRUsed) 1420 ++ConstantBusCount; 1421 SGPRUsed = MO.getReg(); 1422 } else { 1423 ++ConstantBusCount; 1424 } 1425 } 1426 } 1427 if (ConstantBusCount > 1) { 1428 ErrInfo = "VOP* instruction uses the constant bus more than once"; 1429 return false; 1430 } 1431 } 1432 1433 // Verify misc. restrictions on specific instructions. 1434 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 || 1435 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) { 1436 const MachineOperand &Src0 = MI->getOperand(Src0Idx); 1437 const MachineOperand &Src1 = MI->getOperand(Src1Idx); 1438 const MachineOperand &Src2 = MI->getOperand(Src2Idx); 1439 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) { 1440 if (!compareMachineOp(Src0, Src1) && 1441 !compareMachineOp(Src0, Src2)) { 1442 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2"; 1443 return false; 1444 } 1445 } 1446 } 1447 1448 // Make sure we aren't losing exec uses in the td files. This mostly requires 1449 // being careful when using let Uses to try to add other use registers. 1450 if (!isGenericOpcode(Opcode) && !isSALU(Opcode) && !isSMRD(Opcode)) { 1451 const MachineOperand *Exec = MI->findRegisterUseOperand(AMDGPU::EXEC); 1452 if (!Exec || !Exec->isImplicit()) { 1453 ErrInfo = "VALU instruction does not implicitly read exec mask"; 1454 return false; 1455 } 1456 } 1457 1458 return true; 1459 } 1460 1461 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) { 1462 switch (MI.getOpcode()) { 1463 default: return AMDGPU::INSTRUCTION_LIST_END; 1464 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE; 1465 case AMDGPU::COPY: return AMDGPU::COPY; 1466 case AMDGPU::PHI: return AMDGPU::PHI; 1467 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG; 1468 case AMDGPU::S_MOV_B32: 1469 return MI.getOperand(1).isReg() ? 1470 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32; 1471 case AMDGPU::S_ADD_I32: 1472 case AMDGPU::S_ADD_U32: return AMDGPU::V_ADD_I32_e32; 1473 case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32; 1474 case AMDGPU::S_SUB_I32: 1475 case AMDGPU::S_SUB_U32: return AMDGPU::V_SUB_I32_e32; 1476 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32; 1477 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_I32; 1478 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e32; 1479 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e32; 1480 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e32; 1481 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e32; 1482 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e32; 1483 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e32; 1484 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e32; 1485 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32; 1486 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64; 1487 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32; 1488 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64; 1489 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32; 1490 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64; 1491 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32; 1492 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32; 1493 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32; 1494 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32; 1495 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64; 1496 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32; 1497 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32; 1498 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32; 1499 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32; 1500 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32; 1501 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32; 1502 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32; 1503 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32; 1504 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32; 1505 case AMDGPU::S_LOAD_DWORD_IMM: 1506 case AMDGPU::S_LOAD_DWORD_SGPR: 1507 case AMDGPU::S_LOAD_DWORD_IMM_ci: 1508 return AMDGPU::BUFFER_LOAD_DWORD_ADDR64; 1509 case AMDGPU::S_LOAD_DWORDX2_IMM: 1510 case AMDGPU::S_LOAD_DWORDX2_SGPR: 1511 case AMDGPU::S_LOAD_DWORDX2_IMM_ci: 1512 return AMDGPU::BUFFER_LOAD_DWORDX2_ADDR64; 1513 case AMDGPU::S_LOAD_DWORDX4_IMM: 1514 case AMDGPU::S_LOAD_DWORDX4_SGPR: 1515 case AMDGPU::S_LOAD_DWORDX4_IMM_ci: 1516 return AMDGPU::BUFFER_LOAD_DWORDX4_ADDR64; 1517 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64; 1518 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32; 1519 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32; 1520 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64; 1521 } 1522 } 1523 1524 bool SIInstrInfo::isSALUOpSupportedOnVALU(const MachineInstr &MI) const { 1525 return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END; 1526 } 1527 1528 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI, 1529 unsigned OpNo) const { 1530 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 1531 const MCInstrDesc &Desc = get(MI.getOpcode()); 1532 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() || 1533 Desc.OpInfo[OpNo].RegClass == -1) { 1534 unsigned Reg = MI.getOperand(OpNo).getReg(); 1535 1536 if (TargetRegisterInfo::isVirtualRegister(Reg)) 1537 return MRI.getRegClass(Reg); 1538 return RI.getPhysRegClass(Reg); 1539 } 1540 1541 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 1542 return RI.getRegClass(RCID); 1543 } 1544 1545 bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const { 1546 switch (MI.getOpcode()) { 1547 case AMDGPU::COPY: 1548 case AMDGPU::REG_SEQUENCE: 1549 case AMDGPU::PHI: 1550 case AMDGPU::INSERT_SUBREG: 1551 return RI.hasVGPRs(getOpRegClass(MI, 0)); 1552 default: 1553 return RI.hasVGPRs(getOpRegClass(MI, OpNo)); 1554 } 1555 } 1556 1557 void SIInstrInfo::legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const { 1558 MachineBasicBlock::iterator I = MI; 1559 MachineBasicBlock *MBB = MI->getParent(); 1560 MachineOperand &MO = MI->getOperand(OpIdx); 1561 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1562 unsigned RCID = get(MI->getOpcode()).OpInfo[OpIdx].RegClass; 1563 const TargetRegisterClass *RC = RI.getRegClass(RCID); 1564 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 1565 if (MO.isReg()) 1566 Opcode = AMDGPU::COPY; 1567 else if (RI.isSGPRClass(RC)) 1568 Opcode = AMDGPU::S_MOV_B32; 1569 1570 1571 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); 1572 if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC)) 1573 VRC = &AMDGPU::VReg_64RegClass; 1574 else 1575 VRC = &AMDGPU::VGPR_32RegClass; 1576 1577 unsigned Reg = MRI.createVirtualRegister(VRC); 1578 DebugLoc DL = MBB->findDebugLoc(I); 1579 BuildMI(*MI->getParent(), I, DL, get(Opcode), Reg) 1580 .addOperand(MO); 1581 MO.ChangeToRegister(Reg, false); 1582 } 1583 1584 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI, 1585 MachineRegisterInfo &MRI, 1586 MachineOperand &SuperReg, 1587 const TargetRegisterClass *SuperRC, 1588 unsigned SubIdx, 1589 const TargetRegisterClass *SubRC) 1590 const { 1591 MachineBasicBlock *MBB = MI->getParent(); 1592 DebugLoc DL = MI->getDebugLoc(); 1593 unsigned SubReg = MRI.createVirtualRegister(SubRC); 1594 1595 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) { 1596 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 1597 .addReg(SuperReg.getReg(), 0, SubIdx); 1598 return SubReg; 1599 } 1600 1601 // Just in case the super register is itself a sub-register, copy it to a new 1602 // value so we don't need to worry about merging its subreg index with the 1603 // SubIdx passed to this function. The register coalescer should be able to 1604 // eliminate this extra copy. 1605 unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC); 1606 1607 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg) 1608 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg()); 1609 1610 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 1611 .addReg(NewSuperReg, 0, SubIdx); 1612 1613 return SubReg; 1614 } 1615 1616 MachineOperand SIInstrInfo::buildExtractSubRegOrImm( 1617 MachineBasicBlock::iterator MII, 1618 MachineRegisterInfo &MRI, 1619 MachineOperand &Op, 1620 const TargetRegisterClass *SuperRC, 1621 unsigned SubIdx, 1622 const TargetRegisterClass *SubRC) const { 1623 if (Op.isImm()) { 1624 // XXX - Is there a better way to do this? 1625 if (SubIdx == AMDGPU::sub0) 1626 return MachineOperand::CreateImm(Op.getImm() & 0xFFFFFFFF); 1627 if (SubIdx == AMDGPU::sub1) 1628 return MachineOperand::CreateImm(Op.getImm() >> 32); 1629 1630 llvm_unreachable("Unhandled register index for immediate"); 1631 } 1632 1633 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC, 1634 SubIdx, SubRC); 1635 return MachineOperand::CreateReg(SubReg, false); 1636 } 1637 1638 // Change the order of operands from (0, 1, 2) to (0, 2, 1) 1639 void SIInstrInfo::swapOperands(MachineBasicBlock::iterator Inst) const { 1640 assert(Inst->getNumExplicitOperands() == 3); 1641 MachineOperand Op1 = Inst->getOperand(1); 1642 Inst->RemoveOperand(1); 1643 Inst->addOperand(Op1); 1644 } 1645 1646 bool SIInstrInfo::isOperandLegal(const MachineInstr *MI, unsigned OpIdx, 1647 const MachineOperand *MO) const { 1648 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 1649 const MCInstrDesc &InstDesc = get(MI->getOpcode()); 1650 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; 1651 const TargetRegisterClass *DefinedRC = 1652 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; 1653 if (!MO) 1654 MO = &MI->getOperand(OpIdx); 1655 1656 if (isVALU(InstDesc.Opcode) && 1657 usesConstantBus(MRI, *MO, DefinedRC->getSize())) { 1658 unsigned SGPRUsed = 1659 MO->isReg() ? MO->getReg() : (unsigned)AMDGPU::NoRegister; 1660 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1661 if (i == OpIdx) 1662 continue; 1663 const MachineOperand &Op = MI->getOperand(i); 1664 if (Op.isReg() && Op.getReg() != SGPRUsed && 1665 usesConstantBus(MRI, Op, getOpSize(*MI, i))) { 1666 return false; 1667 } 1668 } 1669 } 1670 1671 if (MO->isReg()) { 1672 assert(DefinedRC); 1673 const TargetRegisterClass *RC = 1674 TargetRegisterInfo::isVirtualRegister(MO->getReg()) ? 1675 MRI.getRegClass(MO->getReg()) : 1676 RI.getPhysRegClass(MO->getReg()); 1677 1678 // In order to be legal, the common sub-class must be equal to the 1679 // class of the current operand. For example: 1680 // 1681 // v_mov_b32 s0 ; Operand defined as vsrc_32 1682 // ; RI.getCommonSubClass(s0,vsrc_32) = sgpr ; LEGAL 1683 // 1684 // s_sendmsg 0, s0 ; Operand defined as m0reg 1685 // ; RI.getCommonSubClass(s0,m0reg) = m0reg ; NOT LEGAL 1686 1687 return RI.getCommonSubClass(RC, RI.getRegClass(OpInfo.RegClass)) == RC; 1688 } 1689 1690 1691 // Handle non-register types that are treated like immediates. 1692 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI()); 1693 1694 if (!DefinedRC) { 1695 // This operand expects an immediate. 1696 return true; 1697 } 1698 1699 return isImmOperandLegal(MI, OpIdx, *MO); 1700 } 1701 1702 void SIInstrInfo::legalizeOperands(MachineInstr *MI) const { 1703 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 1704 1705 int Src0Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), 1706 AMDGPU::OpName::src0); 1707 int Src1Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), 1708 AMDGPU::OpName::src1); 1709 int Src2Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), 1710 AMDGPU::OpName::src2); 1711 1712 // Legalize VOP2 1713 if (isVOP2(MI->getOpcode()) && Src1Idx != -1) { 1714 // Legalize src0 1715 if (!isOperandLegal(MI, Src0Idx)) 1716 legalizeOpWithMove(MI, Src0Idx); 1717 1718 // Legalize src1 1719 if (isOperandLegal(MI, Src1Idx)) 1720 return; 1721 1722 // Usually src0 of VOP2 instructions allow more types of inputs 1723 // than src1, so try to commute the instruction to decrease our 1724 // chances of having to insert a MOV instruction to legalize src1. 1725 if (MI->isCommutable()) { 1726 if (commuteInstruction(MI)) 1727 // If we are successful in commuting, then we know MI is legal, so 1728 // we are done. 1729 return; 1730 } 1731 1732 legalizeOpWithMove(MI, Src1Idx); 1733 return; 1734 } 1735 1736 // XXX - Do any VOP3 instructions read VCC? 1737 // Legalize VOP3 1738 if (isVOP3(MI->getOpcode())) { 1739 int VOP3Idx[3] = { Src0Idx, Src1Idx, Src2Idx }; 1740 1741 // Find the one SGPR operand we are allowed to use. 1742 unsigned SGPRReg = findUsedSGPR(MI, VOP3Idx); 1743 1744 for (unsigned i = 0; i < 3; ++i) { 1745 int Idx = VOP3Idx[i]; 1746 if (Idx == -1) 1747 break; 1748 MachineOperand &MO = MI->getOperand(Idx); 1749 1750 if (MO.isReg()) { 1751 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg()))) 1752 continue; // VGPRs are legal 1753 1754 assert(MO.getReg() != AMDGPU::SCC && "SCC operand to VOP3 instruction"); 1755 1756 if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) { 1757 SGPRReg = MO.getReg(); 1758 // We can use one SGPR in each VOP3 instruction. 1759 continue; 1760 } 1761 } else if (!isLiteralConstant(MO, getOpSize(MI->getOpcode(), Idx))) { 1762 // If it is not a register and not a literal constant, then it must be 1763 // an inline constant which is always legal. 1764 continue; 1765 } 1766 // If we make it this far, then the operand is not legal and we must 1767 // legalize it. 1768 legalizeOpWithMove(MI, Idx); 1769 } 1770 1771 return; 1772 } 1773 1774 // Legalize REG_SEQUENCE and PHI 1775 // The register class of the operands much be the same type as the register 1776 // class of the output. 1777 if (MI->getOpcode() == AMDGPU::PHI) { 1778 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr; 1779 for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) { 1780 if (!MI->getOperand(i).isReg() || 1781 !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg())) 1782 continue; 1783 const TargetRegisterClass *OpRC = 1784 MRI.getRegClass(MI->getOperand(i).getReg()); 1785 if (RI.hasVGPRs(OpRC)) { 1786 VRC = OpRC; 1787 } else { 1788 SRC = OpRC; 1789 } 1790 } 1791 1792 // If any of the operands are VGPR registers, then they all most be 1793 // otherwise we will create illegal VGPR->SGPR copies when legalizing 1794 // them. 1795 if (VRC || !RI.isSGPRClass(getOpRegClass(*MI, 0))) { 1796 if (!VRC) { 1797 assert(SRC); 1798 VRC = RI.getEquivalentVGPRClass(SRC); 1799 } 1800 RC = VRC; 1801 } else { 1802 RC = SRC; 1803 } 1804 1805 // Update all the operands so they have the same type. 1806 for (unsigned I = 1, E = MI->getNumOperands(); I != E; I += 2) { 1807 MachineOperand &Op = MI->getOperand(I); 1808 if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg())) 1809 continue; 1810 unsigned DstReg = MRI.createVirtualRegister(RC); 1811 1812 // MI is a PHI instruction. 1813 MachineBasicBlock *InsertBB = MI->getOperand(I + 1).getMBB(); 1814 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator(); 1815 1816 BuildMI(*InsertBB, Insert, MI->getDebugLoc(), get(AMDGPU::COPY), DstReg) 1817 .addOperand(Op); 1818 Op.setReg(DstReg); 1819 } 1820 } 1821 1822 // REG_SEQUENCE doesn't really require operand legalization, but if one has a 1823 // VGPR dest type and SGPR sources, insert copies so all operands are 1824 // VGPRs. This seems to help operand folding / the register coalescer. 1825 if (MI->getOpcode() == AMDGPU::REG_SEQUENCE) { 1826 MachineBasicBlock *MBB = MI->getParent(); 1827 const TargetRegisterClass *DstRC = getOpRegClass(*MI, 0); 1828 if (RI.hasVGPRs(DstRC)) { 1829 // Update all the operands so they are VGPR register classes. These may 1830 // not be the same register class because REG_SEQUENCE supports mixing 1831 // subregister index types e.g. sub0_sub1 + sub2 + sub3 1832 for (unsigned I = 1, E = MI->getNumOperands(); I != E; I += 2) { 1833 MachineOperand &Op = MI->getOperand(I); 1834 if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg())) 1835 continue; 1836 1837 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg()); 1838 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); 1839 if (VRC == OpRC) 1840 continue; 1841 1842 unsigned DstReg = MRI.createVirtualRegister(VRC); 1843 1844 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::COPY), DstReg) 1845 .addOperand(Op); 1846 1847 Op.setReg(DstReg); 1848 Op.setIsKill(); 1849 } 1850 } 1851 1852 return; 1853 } 1854 1855 // Legalize INSERT_SUBREG 1856 // src0 must have the same register class as dst 1857 if (MI->getOpcode() == AMDGPU::INSERT_SUBREG) { 1858 unsigned Dst = MI->getOperand(0).getReg(); 1859 unsigned Src0 = MI->getOperand(1).getReg(); 1860 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst); 1861 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0); 1862 if (DstRC != Src0RC) { 1863 MachineBasicBlock &MBB = *MI->getParent(); 1864 unsigned NewSrc0 = MRI.createVirtualRegister(DstRC); 1865 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::COPY), NewSrc0) 1866 .addReg(Src0); 1867 MI->getOperand(1).setReg(NewSrc0); 1868 } 1869 return; 1870 } 1871 1872 // Legalize MUBUF* instructions 1873 // FIXME: If we start using the non-addr64 instructions for compute, we 1874 // may need to legalize them here. 1875 int SRsrcIdx = 1876 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::srsrc); 1877 if (SRsrcIdx != -1) { 1878 // We have an MUBUF instruction 1879 MachineOperand *SRsrc = &MI->getOperand(SRsrcIdx); 1880 unsigned SRsrcRC = get(MI->getOpcode()).OpInfo[SRsrcIdx].RegClass; 1881 if (RI.getCommonSubClass(MRI.getRegClass(SRsrc->getReg()), 1882 RI.getRegClass(SRsrcRC))) { 1883 // The operands are legal. 1884 // FIXME: We may need to legalize operands besided srsrc. 1885 return; 1886 } 1887 1888 MachineBasicBlock &MBB = *MI->getParent(); 1889 1890 // Extract the ptr from the resource descriptor. 1891 unsigned SRsrcPtr = buildExtractSubReg(MI, MRI, *SRsrc, 1892 &AMDGPU::VReg_128RegClass, AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass); 1893 1894 // Create an empty resource descriptor 1895 unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1896 unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 1897 unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 1898 unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass); 1899 uint64_t RsrcDataFormat = getDefaultRsrcDataFormat(); 1900 1901 // Zero64 = 0 1902 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B64), 1903 Zero64) 1904 .addImm(0); 1905 1906 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0} 1907 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), 1908 SRsrcFormatLo) 1909 .addImm(RsrcDataFormat & 0xFFFFFFFF); 1910 1911 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32} 1912 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), 1913 SRsrcFormatHi) 1914 .addImm(RsrcDataFormat >> 32); 1915 1916 // NewSRsrc = {Zero64, SRsrcFormat} 1917 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewSRsrc) 1918 .addReg(Zero64) 1919 .addImm(AMDGPU::sub0_sub1) 1920 .addReg(SRsrcFormatLo) 1921 .addImm(AMDGPU::sub2) 1922 .addReg(SRsrcFormatHi) 1923 .addImm(AMDGPU::sub3); 1924 1925 MachineOperand *VAddr = getNamedOperand(*MI, AMDGPU::OpName::vaddr); 1926 unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 1927 if (VAddr) { 1928 // This is already an ADDR64 instruction so we need to add the pointer 1929 // extracted from the resource descriptor to the current value of VAddr. 1930 unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1931 unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1932 1933 // NewVaddrLo = SRsrcPtr:sub0 + VAddr:sub0 1934 DebugLoc DL = MI->getDebugLoc(); 1935 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), NewVAddrLo) 1936 .addReg(SRsrcPtr, 0, AMDGPU::sub0) 1937 .addReg(VAddr->getReg(), 0, AMDGPU::sub0); 1938 1939 // NewVaddrHi = SRsrcPtr:sub1 + VAddr:sub1 1940 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e32), NewVAddrHi) 1941 .addReg(SRsrcPtr, 0, AMDGPU::sub1) 1942 .addReg(VAddr->getReg(), 0, AMDGPU::sub1); 1943 1944 // NewVaddr = {NewVaddrHi, NewVaddrLo} 1945 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) 1946 .addReg(NewVAddrLo) 1947 .addImm(AMDGPU::sub0) 1948 .addReg(NewVAddrHi) 1949 .addImm(AMDGPU::sub1); 1950 } else { 1951 // This instructions is the _OFFSET variant, so we need to convert it to 1952 // ADDR64. 1953 MachineOperand *VData = getNamedOperand(*MI, AMDGPU::OpName::vdata); 1954 MachineOperand *Offset = getNamedOperand(*MI, AMDGPU::OpName::offset); 1955 MachineOperand *SOffset = getNamedOperand(*MI, AMDGPU::OpName::soffset); 1956 1957 // Create the new instruction. 1958 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI->getOpcode()); 1959 MachineInstr *Addr64 = 1960 BuildMI(MBB, MI, MI->getDebugLoc(), get(Addr64Opcode)) 1961 .addOperand(*VData) 1962 .addReg(AMDGPU::NoRegister) // Dummy value for vaddr. 1963 // This will be replaced later 1964 // with the new value of vaddr. 1965 .addOperand(*SRsrc) 1966 .addOperand(*SOffset) 1967 .addOperand(*Offset) 1968 .addImm(0) // glc 1969 .addImm(0) // slc 1970 .addImm(0) // tfe 1971 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 1972 1973 MI->removeFromParent(); 1974 MI = Addr64; 1975 1976 // NewVaddr = {NewVaddrHi, NewVaddrLo} 1977 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) 1978 .addReg(SRsrcPtr, 0, AMDGPU::sub0) 1979 .addImm(AMDGPU::sub0) 1980 .addReg(SRsrcPtr, 0, AMDGPU::sub1) 1981 .addImm(AMDGPU::sub1); 1982 1983 VAddr = getNamedOperand(*MI, AMDGPU::OpName::vaddr); 1984 SRsrc = getNamedOperand(*MI, AMDGPU::OpName::srsrc); 1985 } 1986 1987 // Update the instruction to use NewVaddr 1988 VAddr->setReg(NewVAddr); 1989 // Update the instruction to use NewSRsrc 1990 SRsrc->setReg(NewSRsrc); 1991 } 1992 } 1993 1994 void SIInstrInfo::splitSMRD(MachineInstr *MI, 1995 const TargetRegisterClass *HalfRC, 1996 unsigned HalfImmOp, unsigned HalfSGPROp, 1997 MachineInstr *&Lo, MachineInstr *&Hi) const { 1998 1999 DebugLoc DL = MI->getDebugLoc(); 2000 MachineBasicBlock *MBB = MI->getParent(); 2001 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 2002 unsigned RegLo = MRI.createVirtualRegister(HalfRC); 2003 unsigned RegHi = MRI.createVirtualRegister(HalfRC); 2004 unsigned HalfSize = HalfRC->getSize(); 2005 const MachineOperand *OffOp = 2006 getNamedOperand(*MI, AMDGPU::OpName::offset); 2007 const MachineOperand *SBase = getNamedOperand(*MI, AMDGPU::OpName::sbase); 2008 2009 // The SMRD has an 8-bit offset in dwords on SI and a 20-bit offset in bytes 2010 // on VI. 2011 2012 bool IsKill = SBase->isKill(); 2013 if (OffOp) { 2014 bool isVI = 2015 MBB->getParent()->getSubtarget<AMDGPUSubtarget>().getGeneration() >= 2016 AMDGPUSubtarget::VOLCANIC_ISLANDS; 2017 unsigned OffScale = isVI ? 1 : 4; 2018 // Handle the _IMM variant 2019 unsigned LoOffset = OffOp->getImm() * OffScale; 2020 unsigned HiOffset = LoOffset + HalfSize; 2021 Lo = BuildMI(*MBB, MI, DL, get(HalfImmOp), RegLo) 2022 // Use addReg instead of addOperand 2023 // to make sure kill flag is cleared. 2024 .addReg(SBase->getReg(), 0, SBase->getSubReg()) 2025 .addImm(LoOffset / OffScale); 2026 2027 if (!isUInt<20>(HiOffset) || (!isVI && !isUInt<8>(HiOffset / OffScale))) { 2028 unsigned OffsetSGPR = 2029 MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 2030 BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32), OffsetSGPR) 2031 .addImm(HiOffset); // The offset in register is in bytes. 2032 Hi = BuildMI(*MBB, MI, DL, get(HalfSGPROp), RegHi) 2033 .addReg(SBase->getReg(), getKillRegState(IsKill), 2034 SBase->getSubReg()) 2035 .addReg(OffsetSGPR); 2036 } else { 2037 Hi = BuildMI(*MBB, MI, DL, get(HalfImmOp), RegHi) 2038 .addReg(SBase->getReg(), getKillRegState(IsKill), 2039 SBase->getSubReg()) 2040 .addImm(HiOffset / OffScale); 2041 } 2042 } else { 2043 // Handle the _SGPR variant 2044 MachineOperand *SOff = getNamedOperand(*MI, AMDGPU::OpName::soff); 2045 Lo = BuildMI(*MBB, MI, DL, get(HalfSGPROp), RegLo) 2046 .addReg(SBase->getReg(), 0, SBase->getSubReg()) 2047 .addOperand(*SOff); 2048 unsigned OffsetSGPR = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 2049 BuildMI(*MBB, MI, DL, get(AMDGPU::S_ADD_I32), OffsetSGPR) 2050 .addReg(SOff->getReg(), 0, SOff->getSubReg()) 2051 .addImm(HalfSize); 2052 Hi = BuildMI(*MBB, MI, DL, get(HalfSGPROp), RegHi) 2053 .addReg(SBase->getReg(), getKillRegState(IsKill), 2054 SBase->getSubReg()) 2055 .addReg(OffsetSGPR); 2056 } 2057 2058 unsigned SubLo, SubHi; 2059 const TargetRegisterClass *NewDstRC; 2060 switch (HalfSize) { 2061 case 4: 2062 SubLo = AMDGPU::sub0; 2063 SubHi = AMDGPU::sub1; 2064 NewDstRC = &AMDGPU::VReg_64RegClass; 2065 break; 2066 case 8: 2067 SubLo = AMDGPU::sub0_sub1; 2068 SubHi = AMDGPU::sub2_sub3; 2069 NewDstRC = &AMDGPU::VReg_128RegClass; 2070 break; 2071 case 16: 2072 SubLo = AMDGPU::sub0_sub1_sub2_sub3; 2073 SubHi = AMDGPU::sub4_sub5_sub6_sub7; 2074 NewDstRC = &AMDGPU::VReg_256RegClass; 2075 break; 2076 case 32: 2077 SubLo = AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7; 2078 SubHi = AMDGPU::sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15; 2079 NewDstRC = &AMDGPU::VReg_512RegClass; 2080 break; 2081 default: 2082 llvm_unreachable("Unhandled HalfSize"); 2083 } 2084 2085 unsigned OldDst = MI->getOperand(0).getReg(); 2086 unsigned NewDst = MRI.createVirtualRegister(NewDstRC); 2087 2088 MRI.replaceRegWith(OldDst, NewDst); 2089 2090 BuildMI(*MBB, MI, DL, get(AMDGPU::REG_SEQUENCE), NewDst) 2091 .addReg(RegLo) 2092 .addImm(SubLo) 2093 .addReg(RegHi) 2094 .addImm(SubHi); 2095 } 2096 2097 void SIInstrInfo::moveSMRDToVALU(MachineInstr *MI, 2098 MachineRegisterInfo &MRI, 2099 SmallVectorImpl<MachineInstr *> &Worklist) const { 2100 MachineBasicBlock *MBB = MI->getParent(); 2101 int DstIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst); 2102 assert(DstIdx != -1); 2103 unsigned DstRCID = get(MI->getOpcode()).OpInfo[DstIdx].RegClass; 2104 switch(RI.getRegClass(DstRCID)->getSize()) { 2105 case 4: 2106 case 8: 2107 case 16: { 2108 unsigned NewOpcode = getVALUOp(*MI); 2109 unsigned RegOffset; 2110 unsigned ImmOffset; 2111 2112 if (MI->getOperand(2).isReg()) { 2113 RegOffset = MI->getOperand(2).getReg(); 2114 ImmOffset = 0; 2115 } else { 2116 assert(MI->getOperand(2).isImm()); 2117 // SMRD instructions take a dword offsets on SI and byte offset on VI 2118 // and MUBUF instructions always take a byte offset. 2119 ImmOffset = MI->getOperand(2).getImm(); 2120 if (MBB->getParent()->getSubtarget<AMDGPUSubtarget>().getGeneration() <= 2121 AMDGPUSubtarget::SEA_ISLANDS) 2122 ImmOffset <<= 2; 2123 RegOffset = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2124 2125 if (isUInt<12>(ImmOffset)) { 2126 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), 2127 RegOffset) 2128 .addImm(0); 2129 } else { 2130 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), 2131 RegOffset) 2132 .addImm(ImmOffset); 2133 ImmOffset = 0; 2134 } 2135 } 2136 2137 unsigned SRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass); 2138 unsigned DWord0 = RegOffset; 2139 unsigned DWord1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2140 unsigned DWord2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2141 unsigned DWord3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2142 uint64_t RsrcDataFormat = getDefaultRsrcDataFormat(); 2143 2144 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord1) 2145 .addImm(0); 2146 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord2) 2147 .addImm(RsrcDataFormat & 0xFFFFFFFF); 2148 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord3) 2149 .addImm(RsrcDataFormat >> 32); 2150 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), SRsrc) 2151 .addReg(DWord0) 2152 .addImm(AMDGPU::sub0) 2153 .addReg(DWord1) 2154 .addImm(AMDGPU::sub1) 2155 .addReg(DWord2) 2156 .addImm(AMDGPU::sub2) 2157 .addReg(DWord3) 2158 .addImm(AMDGPU::sub3); 2159 2160 const MCInstrDesc &NewInstDesc = get(NewOpcode); 2161 const TargetRegisterClass *NewDstRC 2162 = RI.getRegClass(NewInstDesc.OpInfo[0].RegClass); 2163 unsigned NewDstReg = MRI.createVirtualRegister(NewDstRC); 2164 unsigned DstReg = MI->getOperand(0).getReg(); 2165 MRI.replaceRegWith(DstReg, NewDstReg); 2166 2167 MachineInstr *NewInst = 2168 BuildMI(*MBB, MI, MI->getDebugLoc(), NewInstDesc, NewDstReg) 2169 .addOperand(MI->getOperand(1)) // sbase 2170 .addReg(SRsrc) 2171 .addImm(0) 2172 .addImm(ImmOffset) 2173 .addImm(0) // glc 2174 .addImm(0) // slc 2175 .addImm(0) // tfe 2176 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 2177 MI->eraseFromParent(); 2178 2179 legalizeOperands(NewInst); 2180 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); 2181 break; 2182 } 2183 case 32: { 2184 MachineInstr *Lo, *Hi; 2185 splitSMRD(MI, &AMDGPU::SReg_128RegClass, AMDGPU::S_LOAD_DWORDX4_IMM, 2186 AMDGPU::S_LOAD_DWORDX4_SGPR, Lo, Hi); 2187 MI->eraseFromParent(); 2188 moveSMRDToVALU(Lo, MRI, Worklist); 2189 moveSMRDToVALU(Hi, MRI, Worklist); 2190 break; 2191 } 2192 2193 case 64: { 2194 MachineInstr *Lo, *Hi; 2195 splitSMRD(MI, &AMDGPU::SReg_256RegClass, AMDGPU::S_LOAD_DWORDX8_IMM, 2196 AMDGPU::S_LOAD_DWORDX8_SGPR, Lo, Hi); 2197 MI->eraseFromParent(); 2198 moveSMRDToVALU(Lo, MRI, Worklist); 2199 moveSMRDToVALU(Hi, MRI, Worklist); 2200 break; 2201 } 2202 } 2203 } 2204 2205 void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const { 2206 SmallVector<MachineInstr *, 128> Worklist; 2207 Worklist.push_back(&TopInst); 2208 2209 while (!Worklist.empty()) { 2210 MachineInstr *Inst = Worklist.pop_back_val(); 2211 MachineBasicBlock *MBB = Inst->getParent(); 2212 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 2213 2214 unsigned Opcode = Inst->getOpcode(); 2215 unsigned NewOpcode = getVALUOp(*Inst); 2216 2217 // Handle some special cases 2218 switch (Opcode) { 2219 default: 2220 if (isSMRD(Inst->getOpcode())) { 2221 moveSMRDToVALU(Inst, MRI, Worklist); 2222 continue; 2223 } 2224 break; 2225 case AMDGPU::S_AND_B64: 2226 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_AND_B32_e64); 2227 Inst->eraseFromParent(); 2228 continue; 2229 2230 case AMDGPU::S_OR_B64: 2231 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_OR_B32_e64); 2232 Inst->eraseFromParent(); 2233 continue; 2234 2235 case AMDGPU::S_XOR_B64: 2236 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_XOR_B32_e64); 2237 Inst->eraseFromParent(); 2238 continue; 2239 2240 case AMDGPU::S_NOT_B64: 2241 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::V_NOT_B32_e32); 2242 Inst->eraseFromParent(); 2243 continue; 2244 2245 case AMDGPU::S_BCNT1_I32_B64: 2246 splitScalar64BitBCNT(Worklist, Inst); 2247 Inst->eraseFromParent(); 2248 continue; 2249 2250 case AMDGPU::S_BFE_I64: { 2251 splitScalar64BitBFE(Worklist, Inst); 2252 Inst->eraseFromParent(); 2253 continue; 2254 } 2255 2256 case AMDGPU::S_LSHL_B32: 2257 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 2258 NewOpcode = AMDGPU::V_LSHLREV_B32_e64; 2259 swapOperands(Inst); 2260 } 2261 break; 2262 case AMDGPU::S_ASHR_I32: 2263 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 2264 NewOpcode = AMDGPU::V_ASHRREV_I32_e64; 2265 swapOperands(Inst); 2266 } 2267 break; 2268 case AMDGPU::S_LSHR_B32: 2269 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 2270 NewOpcode = AMDGPU::V_LSHRREV_B32_e64; 2271 swapOperands(Inst); 2272 } 2273 break; 2274 case AMDGPU::S_LSHL_B64: 2275 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 2276 NewOpcode = AMDGPU::V_LSHLREV_B64; 2277 swapOperands(Inst); 2278 } 2279 break; 2280 case AMDGPU::S_ASHR_I64: 2281 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 2282 NewOpcode = AMDGPU::V_ASHRREV_I64; 2283 swapOperands(Inst); 2284 } 2285 break; 2286 case AMDGPU::S_LSHR_B64: 2287 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 2288 NewOpcode = AMDGPU::V_LSHRREV_B64; 2289 swapOperands(Inst); 2290 } 2291 break; 2292 2293 case AMDGPU::S_BFE_U64: 2294 case AMDGPU::S_BFM_B64: 2295 llvm_unreachable("Moving this op to VALU not implemented"); 2296 } 2297 2298 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) { 2299 // We cannot move this instruction to the VALU, so we should try to 2300 // legalize its operands instead. 2301 legalizeOperands(Inst); 2302 continue; 2303 } 2304 2305 // Use the new VALU Opcode. 2306 const MCInstrDesc &NewDesc = get(NewOpcode); 2307 Inst->setDesc(NewDesc); 2308 2309 // Remove any references to SCC. Vector instructions can't read from it, and 2310 // We're just about to add the implicit use / defs of VCC, and we don't want 2311 // both. 2312 for (unsigned i = Inst->getNumOperands() - 1; i > 0; --i) { 2313 MachineOperand &Op = Inst->getOperand(i); 2314 if (Op.isReg() && Op.getReg() == AMDGPU::SCC) 2315 Inst->RemoveOperand(i); 2316 } 2317 2318 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) { 2319 // We are converting these to a BFE, so we need to add the missing 2320 // operands for the size and offset. 2321 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16; 2322 Inst->addOperand(MachineOperand::CreateImm(0)); 2323 Inst->addOperand(MachineOperand::CreateImm(Size)); 2324 2325 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) { 2326 // The VALU version adds the second operand to the result, so insert an 2327 // extra 0 operand. 2328 Inst->addOperand(MachineOperand::CreateImm(0)); 2329 } 2330 2331 Inst->addImplicitDefUseOperands(*Inst->getParent()->getParent()); 2332 2333 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) { 2334 const MachineOperand &OffsetWidthOp = Inst->getOperand(2); 2335 // If we need to move this to VGPRs, we need to unpack the second operand 2336 // back into the 2 separate ones for bit offset and width. 2337 assert(OffsetWidthOp.isImm() && 2338 "Scalar BFE is only implemented for constant width and offset"); 2339 uint32_t Imm = OffsetWidthOp.getImm(); 2340 2341 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 2342 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 2343 Inst->RemoveOperand(2); // Remove old immediate. 2344 Inst->addOperand(MachineOperand::CreateImm(Offset)); 2345 Inst->addOperand(MachineOperand::CreateImm(BitWidth)); 2346 } 2347 2348 // Update the destination register class. 2349 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(*Inst); 2350 if (!NewDstRC) 2351 continue; 2352 2353 unsigned DstReg = Inst->getOperand(0).getReg(); 2354 unsigned NewDstReg = MRI.createVirtualRegister(NewDstRC); 2355 MRI.replaceRegWith(DstReg, NewDstReg); 2356 2357 // Legalize the operands 2358 legalizeOperands(Inst); 2359 2360 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); 2361 } 2362 } 2363 2364 //===----------------------------------------------------------------------===// 2365 // Indirect addressing callbacks 2366 //===----------------------------------------------------------------------===// 2367 2368 unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex, 2369 unsigned Channel) const { 2370 assert(Channel == 0); 2371 return RegIndex; 2372 } 2373 2374 const TargetRegisterClass *SIInstrInfo::getIndirectAddrRegClass() const { 2375 return &AMDGPU::VGPR_32RegClass; 2376 } 2377 2378 void SIInstrInfo::splitScalar64BitUnaryOp( 2379 SmallVectorImpl<MachineInstr *> &Worklist, 2380 MachineInstr *Inst, 2381 unsigned Opcode) const { 2382 MachineBasicBlock &MBB = *Inst->getParent(); 2383 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2384 2385 MachineOperand &Dest = Inst->getOperand(0); 2386 MachineOperand &Src0 = Inst->getOperand(1); 2387 DebugLoc DL = Inst->getDebugLoc(); 2388 2389 MachineBasicBlock::iterator MII = Inst; 2390 2391 const MCInstrDesc &InstDesc = get(Opcode); 2392 const TargetRegisterClass *Src0RC = Src0.isReg() ? 2393 MRI.getRegClass(Src0.getReg()) : 2394 &AMDGPU::SGPR_32RegClass; 2395 2396 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 2397 2398 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 2399 AMDGPU::sub0, Src0SubRC); 2400 2401 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 2402 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 2403 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 2404 2405 unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 2406 BuildMI(MBB, MII, DL, InstDesc, DestSub0) 2407 .addOperand(SrcReg0Sub0); 2408 2409 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 2410 AMDGPU::sub1, Src0SubRC); 2411 2412 unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 2413 BuildMI(MBB, MII, DL, InstDesc, DestSub1) 2414 .addOperand(SrcReg0Sub1); 2415 2416 unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); 2417 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 2418 .addReg(DestSub0) 2419 .addImm(AMDGPU::sub0) 2420 .addReg(DestSub1) 2421 .addImm(AMDGPU::sub1); 2422 2423 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 2424 2425 // We don't need to legalizeOperands here because for a single operand, src0 2426 // will support any kind of input. 2427 2428 // Move all users of this moved value. 2429 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 2430 } 2431 2432 void SIInstrInfo::splitScalar64BitBinaryOp( 2433 SmallVectorImpl<MachineInstr *> &Worklist, 2434 MachineInstr *Inst, 2435 unsigned Opcode) const { 2436 MachineBasicBlock &MBB = *Inst->getParent(); 2437 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2438 2439 MachineOperand &Dest = Inst->getOperand(0); 2440 MachineOperand &Src0 = Inst->getOperand(1); 2441 MachineOperand &Src1 = Inst->getOperand(2); 2442 DebugLoc DL = Inst->getDebugLoc(); 2443 2444 MachineBasicBlock::iterator MII = Inst; 2445 2446 const MCInstrDesc &InstDesc = get(Opcode); 2447 const TargetRegisterClass *Src0RC = Src0.isReg() ? 2448 MRI.getRegClass(Src0.getReg()) : 2449 &AMDGPU::SGPR_32RegClass; 2450 2451 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 2452 const TargetRegisterClass *Src1RC = Src1.isReg() ? 2453 MRI.getRegClass(Src1.getReg()) : 2454 &AMDGPU::SGPR_32RegClass; 2455 2456 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 2457 2458 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 2459 AMDGPU::sub0, Src0SubRC); 2460 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 2461 AMDGPU::sub0, Src1SubRC); 2462 2463 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 2464 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 2465 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 2466 2467 unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 2468 MachineInstr *LoHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub0) 2469 .addOperand(SrcReg0Sub0) 2470 .addOperand(SrcReg1Sub0); 2471 2472 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 2473 AMDGPU::sub1, Src0SubRC); 2474 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 2475 AMDGPU::sub1, Src1SubRC); 2476 2477 unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 2478 MachineInstr *HiHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub1) 2479 .addOperand(SrcReg0Sub1) 2480 .addOperand(SrcReg1Sub1); 2481 2482 unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); 2483 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 2484 .addReg(DestSub0) 2485 .addImm(AMDGPU::sub0) 2486 .addReg(DestSub1) 2487 .addImm(AMDGPU::sub1); 2488 2489 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 2490 2491 // Try to legalize the operands in case we need to swap the order to keep it 2492 // valid. 2493 legalizeOperands(LoHalf); 2494 legalizeOperands(HiHalf); 2495 2496 // Move all users of this moved vlaue. 2497 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 2498 } 2499 2500 void SIInstrInfo::splitScalar64BitBCNT(SmallVectorImpl<MachineInstr *> &Worklist, 2501 MachineInstr *Inst) const { 2502 MachineBasicBlock &MBB = *Inst->getParent(); 2503 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2504 2505 MachineBasicBlock::iterator MII = Inst; 2506 DebugLoc DL = Inst->getDebugLoc(); 2507 2508 MachineOperand &Dest = Inst->getOperand(0); 2509 MachineOperand &Src = Inst->getOperand(1); 2510 2511 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64); 2512 const TargetRegisterClass *SrcRC = Src.isReg() ? 2513 MRI.getRegClass(Src.getReg()) : 2514 &AMDGPU::SGPR_32RegClass; 2515 2516 unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2517 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2518 2519 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); 2520 2521 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 2522 AMDGPU::sub0, SrcSubRC); 2523 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 2524 AMDGPU::sub1, SrcSubRC); 2525 2526 BuildMI(MBB, MII, DL, InstDesc, MidReg) 2527 .addOperand(SrcRegSub0) 2528 .addImm(0); 2529 2530 BuildMI(MBB, MII, DL, InstDesc, ResultReg) 2531 .addOperand(SrcRegSub1) 2532 .addReg(MidReg); 2533 2534 MRI.replaceRegWith(Dest.getReg(), ResultReg); 2535 2536 // We don't need to legalize operands here. src0 for etiher instruction can be 2537 // an SGPR, and the second input is unused or determined here. 2538 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 2539 } 2540 2541 void SIInstrInfo::splitScalar64BitBFE(SmallVectorImpl<MachineInstr *> &Worklist, 2542 MachineInstr *Inst) const { 2543 MachineBasicBlock &MBB = *Inst->getParent(); 2544 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2545 MachineBasicBlock::iterator MII = Inst; 2546 DebugLoc DL = Inst->getDebugLoc(); 2547 2548 MachineOperand &Dest = Inst->getOperand(0); 2549 uint32_t Imm = Inst->getOperand(2).getImm(); 2550 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 2551 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 2552 2553 (void) Offset; 2554 2555 // Only sext_inreg cases handled. 2556 assert(Inst->getOpcode() == AMDGPU::S_BFE_I64 && 2557 BitWidth <= 32 && 2558 Offset == 0 && 2559 "Not implemented"); 2560 2561 if (BitWidth < 32) { 2562 unsigned MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2563 unsigned MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2564 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 2565 2566 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo) 2567 .addReg(Inst->getOperand(1).getReg(), 0, AMDGPU::sub0) 2568 .addImm(0) 2569 .addImm(BitWidth); 2570 2571 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi) 2572 .addImm(31) 2573 .addReg(MidRegLo); 2574 2575 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 2576 .addReg(MidRegLo) 2577 .addImm(AMDGPU::sub0) 2578 .addReg(MidRegHi) 2579 .addImm(AMDGPU::sub1); 2580 2581 MRI.replaceRegWith(Dest.getReg(), ResultReg); 2582 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 2583 return; 2584 } 2585 2586 MachineOperand &Src = Inst->getOperand(1); 2587 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2588 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 2589 2590 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg) 2591 .addImm(31) 2592 .addReg(Src.getReg(), 0, AMDGPU::sub0); 2593 2594 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 2595 .addReg(Src.getReg(), 0, AMDGPU::sub0) 2596 .addImm(AMDGPU::sub0) 2597 .addReg(TmpReg) 2598 .addImm(AMDGPU::sub1); 2599 2600 MRI.replaceRegWith(Dest.getReg(), ResultReg); 2601 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 2602 } 2603 2604 void SIInstrInfo::addUsersToMoveToVALUWorklist( 2605 unsigned DstReg, 2606 MachineRegisterInfo &MRI, 2607 SmallVectorImpl<MachineInstr *> &Worklist) const { 2608 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg), 2609 E = MRI.use_end(); I != E; ++I) { 2610 MachineInstr &UseMI = *I->getParent(); 2611 if (!canReadVGPR(UseMI, I.getOperandNo())) { 2612 Worklist.push_back(&UseMI); 2613 } 2614 } 2615 } 2616 2617 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass( 2618 const MachineInstr &Inst) const { 2619 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0); 2620 2621 switch (Inst.getOpcode()) { 2622 // For target instructions, getOpRegClass just returns the virtual register 2623 // class associated with the operand, so we need to find an equivalent VGPR 2624 // register class in order to move the instruction to the VALU. 2625 case AMDGPU::COPY: 2626 case AMDGPU::PHI: 2627 case AMDGPU::REG_SEQUENCE: 2628 case AMDGPU::INSERT_SUBREG: 2629 if (RI.hasVGPRs(NewDstRC)) 2630 return nullptr; 2631 2632 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 2633 if (!NewDstRC) 2634 return nullptr; 2635 return NewDstRC; 2636 default: 2637 return NewDstRC; 2638 } 2639 } 2640 2641 unsigned SIInstrInfo::findUsedSGPR(const MachineInstr *MI, 2642 int OpIndices[3]) const { 2643 const MCInstrDesc &Desc = get(MI->getOpcode()); 2644 2645 // Find the one SGPR operand we are allowed to use. 2646 unsigned SGPRReg = AMDGPU::NoRegister; 2647 2648 // First we need to consider the instruction's operand requirements before 2649 // legalizing. Some operands are required to be SGPRs, such as implicit uses 2650 // of VCC, but we are still bound by the constant bus requirement to only use 2651 // one. 2652 // 2653 // If the operand's class is an SGPR, we can never move it. 2654 2655 for (const MachineOperand &MO : MI->implicit_operands()) { 2656 // We only care about reads. 2657 if (MO.isDef()) 2658 continue; 2659 2660 if (MO.getReg() == AMDGPU::VCC) 2661 return AMDGPU::VCC; 2662 2663 if (MO.getReg() == AMDGPU::FLAT_SCR) 2664 return AMDGPU::FLAT_SCR; 2665 } 2666 2667 unsigned UsedSGPRs[3] = { AMDGPU::NoRegister }; 2668 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 2669 2670 for (unsigned i = 0; i < 3; ++i) { 2671 int Idx = OpIndices[i]; 2672 if (Idx == -1) 2673 break; 2674 2675 const MachineOperand &MO = MI->getOperand(Idx); 2676 if (RI.isSGPRClassID(Desc.OpInfo[Idx].RegClass)) 2677 SGPRReg = MO.getReg(); 2678 2679 if (MO.isReg() && RI.isSGPRClass(MRI.getRegClass(MO.getReg()))) 2680 UsedSGPRs[i] = MO.getReg(); 2681 } 2682 2683 if (SGPRReg != AMDGPU::NoRegister) 2684 return SGPRReg; 2685 2686 // We don't have a required SGPR operand, so we have a bit more freedom in 2687 // selecting operands to move. 2688 2689 // Try to select the most used SGPR. If an SGPR is equal to one of the 2690 // others, we choose that. 2691 // 2692 // e.g. 2693 // V_FMA_F32 v0, s0, s0, s0 -> No moves 2694 // V_FMA_F32 v0, s0, s1, s0 -> Move s1 2695 2696 if (UsedSGPRs[0] != AMDGPU::NoRegister) { 2697 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2]) 2698 SGPRReg = UsedSGPRs[0]; 2699 } 2700 2701 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) { 2702 if (UsedSGPRs[1] == UsedSGPRs[2]) 2703 SGPRReg = UsedSGPRs[1]; 2704 } 2705 2706 return SGPRReg; 2707 } 2708 2709 MachineInstrBuilder SIInstrInfo::buildIndirectWrite( 2710 MachineBasicBlock *MBB, 2711 MachineBasicBlock::iterator I, 2712 unsigned ValueReg, 2713 unsigned Address, unsigned OffsetReg) const { 2714 const DebugLoc &DL = MBB->findDebugLoc(I); 2715 unsigned IndirectBaseReg = AMDGPU::VGPR_32RegClass.getRegister( 2716 getIndirectIndexBegin(*MBB->getParent())); 2717 2718 return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_DST_V1)) 2719 .addReg(IndirectBaseReg, RegState::Define) 2720 .addOperand(I->getOperand(0)) 2721 .addReg(IndirectBaseReg) 2722 .addReg(OffsetReg) 2723 .addImm(0) 2724 .addReg(ValueReg); 2725 } 2726 2727 MachineInstrBuilder SIInstrInfo::buildIndirectRead( 2728 MachineBasicBlock *MBB, 2729 MachineBasicBlock::iterator I, 2730 unsigned ValueReg, 2731 unsigned Address, unsigned OffsetReg) const { 2732 const DebugLoc &DL = MBB->findDebugLoc(I); 2733 unsigned IndirectBaseReg = AMDGPU::VGPR_32RegClass.getRegister( 2734 getIndirectIndexBegin(*MBB->getParent())); 2735 2736 return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_SRC_V1)) 2737 .addOperand(I->getOperand(0)) 2738 .addOperand(I->getOperand(1)) 2739 .addReg(IndirectBaseReg) 2740 .addReg(OffsetReg) 2741 .addImm(0); 2742 2743 } 2744 2745 void SIInstrInfo::reserveIndirectRegisters(BitVector &Reserved, 2746 const MachineFunction &MF) const { 2747 int End = getIndirectIndexEnd(MF); 2748 int Begin = getIndirectIndexBegin(MF); 2749 2750 if (End == -1) 2751 return; 2752 2753 2754 for (int Index = Begin; Index <= End; ++Index) 2755 Reserved.set(AMDGPU::VGPR_32RegClass.getRegister(Index)); 2756 2757 for (int Index = std::max(0, Begin - 1); Index <= End; ++Index) 2758 Reserved.set(AMDGPU::VReg_64RegClass.getRegister(Index)); 2759 2760 for (int Index = std::max(0, Begin - 2); Index <= End; ++Index) 2761 Reserved.set(AMDGPU::VReg_96RegClass.getRegister(Index)); 2762 2763 for (int Index = std::max(0, Begin - 3); Index <= End; ++Index) 2764 Reserved.set(AMDGPU::VReg_128RegClass.getRegister(Index)); 2765 2766 for (int Index = std::max(0, Begin - 7); Index <= End; ++Index) 2767 Reserved.set(AMDGPU::VReg_256RegClass.getRegister(Index)); 2768 2769 for (int Index = std::max(0, Begin - 15); Index <= End; ++Index) 2770 Reserved.set(AMDGPU::VReg_512RegClass.getRegister(Index)); 2771 } 2772 2773 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI, 2774 unsigned OperandName) const { 2775 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName); 2776 if (Idx == -1) 2777 return nullptr; 2778 2779 return &MI.getOperand(Idx); 2780 } 2781 2782 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const { 2783 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT; 2784 if (ST.isAmdHsaOS()) { 2785 RsrcDataFormat |= (1ULL << 56); 2786 2787 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 2788 // Set MTYPE = 2 2789 RsrcDataFormat |= (2ULL << 59); 2790 } 2791 2792 return RsrcDataFormat; 2793 } 2794 2795 uint64_t SIInstrInfo::getScratchRsrcWords23() const { 2796 uint64_t Rsrc23 = getDefaultRsrcDataFormat() | 2797 AMDGPU::RSRC_TID_ENABLE | 2798 0xffffffff; // Size; 2799 2800 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17]. 2801 // Clear them unless we want a huge stride. 2802 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 2803 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT; 2804 2805 return Rsrc23; 2806 } 2807