1 //===-- SIInstrInfo.cpp - SI Instruction Information ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief SI Implementation of TargetInstrInfo. 12 // 13 //===----------------------------------------------------------------------===// 14 15 16 #include "SIInstrInfo.h" 17 #include "AMDGPUTargetMachine.h" 18 #include "SIDefines.h" 19 #include "SIMachineFunctionInfo.h" 20 #include "llvm/CodeGen/MachineFrameInfo.h" 21 #include "llvm/CodeGen/MachineInstrBuilder.h" 22 #include "llvm/CodeGen/MachineRegisterInfo.h" 23 #include "llvm/IR/Function.h" 24 #include "llvm/CodeGen/RegisterScavenging.h" 25 #include "llvm/MC/MCInstrDesc.h" 26 #include "llvm/Support/Debug.h" 27 28 using namespace llvm; 29 30 SIInstrInfo::SIInstrInfo(const AMDGPUSubtarget &st) 31 : AMDGPUInstrInfo(st), RI() {} 32 33 //===----------------------------------------------------------------------===// 34 // TargetInstrInfo callbacks 35 //===----------------------------------------------------------------------===// 36 37 static unsigned getNumOperandsNoGlue(SDNode *Node) { 38 unsigned N = Node->getNumOperands(); 39 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) 40 --N; 41 return N; 42 } 43 44 static SDValue findChainOperand(SDNode *Load) { 45 SDValue LastOp = Load->getOperand(getNumOperandsNoGlue(Load) - 1); 46 assert(LastOp.getValueType() == MVT::Other && "Chain missing from load node"); 47 return LastOp; 48 } 49 50 /// \brief Returns true if both nodes have the same value for the given 51 /// operand \p Op, or if both nodes do not have this operand. 52 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { 53 unsigned Opc0 = N0->getMachineOpcode(); 54 unsigned Opc1 = N1->getMachineOpcode(); 55 56 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); 57 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); 58 59 if (Op0Idx == -1 && Op1Idx == -1) 60 return true; 61 62 63 if ((Op0Idx == -1 && Op1Idx != -1) || 64 (Op1Idx == -1 && Op0Idx != -1)) 65 return false; 66 67 // getNamedOperandIdx returns the index for the MachineInstr's operands, 68 // which includes the result as the first operand. We are indexing into the 69 // MachineSDNode's operands, so we need to skip the result operand to get 70 // the real index. 71 --Op0Idx; 72 --Op1Idx; 73 74 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx); 75 } 76 77 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI, 78 AliasAnalysis *AA) const { 79 // TODO: The generic check fails for VALU instructions that should be 80 // rematerializable due to implicit reads of exec. We really want all of the 81 // generic logic for this except for this. 82 switch (MI->getOpcode()) { 83 case AMDGPU::V_MOV_B32_e32: 84 case AMDGPU::V_MOV_B32_e64: 85 case AMDGPU::V_MOV_B64_PSEUDO: 86 return true; 87 default: 88 return false; 89 } 90 } 91 92 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, 93 int64_t &Offset0, 94 int64_t &Offset1) const { 95 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode()) 96 return false; 97 98 unsigned Opc0 = Load0->getMachineOpcode(); 99 unsigned Opc1 = Load1->getMachineOpcode(); 100 101 // Make sure both are actually loads. 102 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad()) 103 return false; 104 105 if (isDS(Opc0) && isDS(Opc1)) { 106 107 // FIXME: Handle this case: 108 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1)) 109 return false; 110 111 // Check base reg. 112 if (Load0->getOperand(1) != Load1->getOperand(1)) 113 return false; 114 115 // Check chain. 116 if (findChainOperand(Load0) != findChainOperand(Load1)) 117 return false; 118 119 // Skip read2 / write2 variants for simplicity. 120 // TODO: We should report true if the used offsets are adjacent (excluded 121 // st64 versions). 122 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::data1) != -1 || 123 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::data1) != -1) 124 return false; 125 126 Offset0 = cast<ConstantSDNode>(Load0->getOperand(2))->getZExtValue(); 127 Offset1 = cast<ConstantSDNode>(Load1->getOperand(2))->getZExtValue(); 128 return true; 129 } 130 131 if (isSMRD(Opc0) && isSMRD(Opc1)) { 132 assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1)); 133 134 // Check base reg. 135 if (Load0->getOperand(0) != Load1->getOperand(0)) 136 return false; 137 138 const ConstantSDNode *Load0Offset = 139 dyn_cast<ConstantSDNode>(Load0->getOperand(1)); 140 const ConstantSDNode *Load1Offset = 141 dyn_cast<ConstantSDNode>(Load1->getOperand(1)); 142 143 if (!Load0Offset || !Load1Offset) 144 return false; 145 146 // Check chain. 147 if (findChainOperand(Load0) != findChainOperand(Load1)) 148 return false; 149 150 Offset0 = Load0Offset->getZExtValue(); 151 Offset1 = Load1Offset->getZExtValue(); 152 return true; 153 } 154 155 // MUBUF and MTBUF can access the same addresses. 156 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) { 157 158 // MUBUF and MTBUF have vaddr at different indices. 159 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || 160 findChainOperand(Load0) != findChainOperand(Load1) || 161 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || 162 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) 163 return false; 164 165 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 166 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 167 168 if (OffIdx0 == -1 || OffIdx1 == -1) 169 return false; 170 171 // getNamedOperandIdx returns the index for MachineInstrs. Since they 172 // inlcude the output in the operand list, but SDNodes don't, we need to 173 // subtract the index by one. 174 --OffIdx0; 175 --OffIdx1; 176 177 SDValue Off0 = Load0->getOperand(OffIdx0); 178 SDValue Off1 = Load1->getOperand(OffIdx1); 179 180 // The offset might be a FrameIndexSDNode. 181 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1)) 182 return false; 183 184 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue(); 185 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue(); 186 return true; 187 } 188 189 return false; 190 } 191 192 static bool isStride64(unsigned Opc) { 193 switch (Opc) { 194 case AMDGPU::DS_READ2ST64_B32: 195 case AMDGPU::DS_READ2ST64_B64: 196 case AMDGPU::DS_WRITE2ST64_B32: 197 case AMDGPU::DS_WRITE2ST64_B64: 198 return true; 199 default: 200 return false; 201 } 202 } 203 204 bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg, 205 unsigned &Offset, 206 const TargetRegisterInfo *TRI) const { 207 unsigned Opc = LdSt->getOpcode(); 208 209 if (isDS(*LdSt)) { 210 const MachineOperand *OffsetImm = getNamedOperand(*LdSt, 211 AMDGPU::OpName::offset); 212 if (OffsetImm) { 213 // Normal, single offset LDS instruction. 214 const MachineOperand *AddrReg = getNamedOperand(*LdSt, 215 AMDGPU::OpName::addr); 216 217 BaseReg = AddrReg->getReg(); 218 Offset = OffsetImm->getImm(); 219 return true; 220 } 221 222 // The 2 offset instructions use offset0 and offset1 instead. We can treat 223 // these as a load with a single offset if the 2 offsets are consecutive. We 224 // will use this for some partially aligned loads. 225 const MachineOperand *Offset0Imm = getNamedOperand(*LdSt, 226 AMDGPU::OpName::offset0); 227 const MachineOperand *Offset1Imm = getNamedOperand(*LdSt, 228 AMDGPU::OpName::offset1); 229 230 uint8_t Offset0 = Offset0Imm->getImm(); 231 uint8_t Offset1 = Offset1Imm->getImm(); 232 233 if (Offset1 > Offset0 && Offset1 - Offset0 == 1) { 234 // Each of these offsets is in element sized units, so we need to convert 235 // to bytes of the individual reads. 236 237 unsigned EltSize; 238 if (LdSt->mayLoad()) 239 EltSize = getOpRegClass(*LdSt, 0)->getSize() / 2; 240 else { 241 assert(LdSt->mayStore()); 242 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 243 EltSize = getOpRegClass(*LdSt, Data0Idx)->getSize(); 244 } 245 246 if (isStride64(Opc)) 247 EltSize *= 64; 248 249 const MachineOperand *AddrReg = getNamedOperand(*LdSt, 250 AMDGPU::OpName::addr); 251 BaseReg = AddrReg->getReg(); 252 Offset = EltSize * Offset0; 253 return true; 254 } 255 256 return false; 257 } 258 259 if (isMUBUF(*LdSt) || isMTBUF(*LdSt)) { 260 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::soffset) != -1) 261 return false; 262 263 const MachineOperand *AddrReg = getNamedOperand(*LdSt, 264 AMDGPU::OpName::vaddr); 265 if (!AddrReg) 266 return false; 267 268 const MachineOperand *OffsetImm = getNamedOperand(*LdSt, 269 AMDGPU::OpName::offset); 270 BaseReg = AddrReg->getReg(); 271 Offset = OffsetImm->getImm(); 272 return true; 273 } 274 275 if (isSMRD(*LdSt)) { 276 const MachineOperand *OffsetImm = getNamedOperand(*LdSt, 277 AMDGPU::OpName::offset); 278 if (!OffsetImm) 279 return false; 280 281 const MachineOperand *SBaseReg = getNamedOperand(*LdSt, 282 AMDGPU::OpName::sbase); 283 BaseReg = SBaseReg->getReg(); 284 Offset = OffsetImm->getImm(); 285 return true; 286 } 287 288 return false; 289 } 290 291 bool SIInstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt, 292 MachineInstr *SecondLdSt, 293 unsigned NumLoads) const { 294 // TODO: This needs finer tuning 295 if (NumLoads > 4) 296 return false; 297 298 if (isDS(*FirstLdSt) && isDS(*SecondLdSt)) 299 return true; 300 301 if (isSMRD(*FirstLdSt) && isSMRD(*SecondLdSt)) 302 return true; 303 304 if ((isMUBUF(*FirstLdSt) || isMTBUF(*FirstLdSt)) && 305 (isMUBUF(*SecondLdSt) || isMTBUF(*SecondLdSt))) 306 return true; 307 308 return false; 309 } 310 311 void 312 SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 313 MachineBasicBlock::iterator MI, DebugLoc DL, 314 unsigned DestReg, unsigned SrcReg, 315 bool KillSrc) const { 316 317 // If we are trying to copy to or from SCC, there is a bug somewhere else in 318 // the backend. While it may be theoretically possible to do this, it should 319 // never be necessary. 320 assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC); 321 322 static const int16_t Sub0_15[] = { 323 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 324 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 325 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, 326 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 0 327 }; 328 329 static const int16_t Sub0_7[] = { 330 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 331 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 0 332 }; 333 334 static const int16_t Sub0_3[] = { 335 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0 336 }; 337 338 static const int16_t Sub0_2[] = { 339 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, 0 340 }; 341 342 static const int16_t Sub0_1[] = { 343 AMDGPU::sub0, AMDGPU::sub1, 0 344 }; 345 346 unsigned Opcode; 347 const int16_t *SubIndices; 348 349 if (AMDGPU::SReg_32RegClass.contains(DestReg)) { 350 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 351 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 352 .addReg(SrcReg, getKillRegState(KillSrc)); 353 return; 354 355 } else if (AMDGPU::SReg_64RegClass.contains(DestReg)) { 356 if (DestReg == AMDGPU::VCC) { 357 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 358 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC) 359 .addReg(SrcReg, getKillRegState(KillSrc)); 360 } else { 361 // FIXME: Hack until VReg_1 removed. 362 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 363 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_I32_e32)) 364 .addImm(0) 365 .addReg(SrcReg, getKillRegState(KillSrc)); 366 } 367 368 return; 369 } 370 371 assert(AMDGPU::SReg_64RegClass.contains(SrcReg)); 372 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 373 .addReg(SrcReg, getKillRegState(KillSrc)); 374 return; 375 376 } else if (AMDGPU::SReg_128RegClass.contains(DestReg)) { 377 assert(AMDGPU::SReg_128RegClass.contains(SrcReg)); 378 Opcode = AMDGPU::S_MOV_B32; 379 SubIndices = Sub0_3; 380 381 } else if (AMDGPU::SReg_256RegClass.contains(DestReg)) { 382 assert(AMDGPU::SReg_256RegClass.contains(SrcReg)); 383 Opcode = AMDGPU::S_MOV_B32; 384 SubIndices = Sub0_7; 385 386 } else if (AMDGPU::SReg_512RegClass.contains(DestReg)) { 387 assert(AMDGPU::SReg_512RegClass.contains(SrcReg)); 388 Opcode = AMDGPU::S_MOV_B32; 389 SubIndices = Sub0_15; 390 391 } else if (AMDGPU::VGPR_32RegClass.contains(DestReg)) { 392 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || 393 AMDGPU::SReg_32RegClass.contains(SrcReg)); 394 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) 395 .addReg(SrcReg, getKillRegState(KillSrc)); 396 return; 397 398 } else if (AMDGPU::VReg_64RegClass.contains(DestReg)) { 399 assert(AMDGPU::VReg_64RegClass.contains(SrcReg) || 400 AMDGPU::SReg_64RegClass.contains(SrcReg)); 401 Opcode = AMDGPU::V_MOV_B32_e32; 402 SubIndices = Sub0_1; 403 404 } else if (AMDGPU::VReg_96RegClass.contains(DestReg)) { 405 assert(AMDGPU::VReg_96RegClass.contains(SrcReg)); 406 Opcode = AMDGPU::V_MOV_B32_e32; 407 SubIndices = Sub0_2; 408 409 } else if (AMDGPU::VReg_128RegClass.contains(DestReg)) { 410 assert(AMDGPU::VReg_128RegClass.contains(SrcReg) || 411 AMDGPU::SReg_128RegClass.contains(SrcReg)); 412 Opcode = AMDGPU::V_MOV_B32_e32; 413 SubIndices = Sub0_3; 414 415 } else if (AMDGPU::VReg_256RegClass.contains(DestReg)) { 416 assert(AMDGPU::VReg_256RegClass.contains(SrcReg) || 417 AMDGPU::SReg_256RegClass.contains(SrcReg)); 418 Opcode = AMDGPU::V_MOV_B32_e32; 419 SubIndices = Sub0_7; 420 421 } else if (AMDGPU::VReg_512RegClass.contains(DestReg)) { 422 assert(AMDGPU::VReg_512RegClass.contains(SrcReg) || 423 AMDGPU::SReg_512RegClass.contains(SrcReg)); 424 Opcode = AMDGPU::V_MOV_B32_e32; 425 SubIndices = Sub0_15; 426 427 } else { 428 llvm_unreachable("Can't copy register!"); 429 } 430 431 while (unsigned SubIdx = *SubIndices++) { 432 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 433 get(Opcode), RI.getSubReg(DestReg, SubIdx)); 434 435 Builder.addReg(RI.getSubReg(SrcReg, SubIdx), getKillRegState(KillSrc)); 436 437 if (*SubIndices) 438 Builder.addReg(DestReg, RegState::Define | RegState::Implicit); 439 } 440 } 441 442 int SIInstrInfo::commuteOpcode(const MachineInstr &MI) const { 443 const unsigned Opcode = MI.getOpcode(); 444 445 int NewOpc; 446 447 // Try to map original to commuted opcode 448 NewOpc = AMDGPU::getCommuteRev(Opcode); 449 if (NewOpc != -1) 450 // Check if the commuted (REV) opcode exists on the target. 451 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 452 453 // Try to map commuted to original opcode 454 NewOpc = AMDGPU::getCommuteOrig(Opcode); 455 if (NewOpc != -1) 456 // Check if the original (non-REV) opcode exists on the target. 457 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 458 459 return Opcode; 460 } 461 462 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const { 463 464 if (DstRC->getSize() == 4) { 465 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 466 } else if (DstRC->getSize() == 8 && RI.isSGPRClass(DstRC)) { 467 return AMDGPU::S_MOV_B64; 468 } else if (DstRC->getSize() == 8 && !RI.isSGPRClass(DstRC)) { 469 return AMDGPU::V_MOV_B64_PSEUDO; 470 } 471 return AMDGPU::COPY; 472 } 473 474 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 475 MachineBasicBlock::iterator MI, 476 unsigned SrcReg, bool isKill, 477 int FrameIndex, 478 const TargetRegisterClass *RC, 479 const TargetRegisterInfo *TRI) const { 480 MachineFunction *MF = MBB.getParent(); 481 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 482 MachineFrameInfo *FrameInfo = MF->getFrameInfo(); 483 DebugLoc DL = MBB.findDebugLoc(MI); 484 int Opcode = -1; 485 486 if (RI.isSGPRClass(RC)) { 487 MFI->setHasSpilledSGPRs(); 488 489 // We are only allowed to create one new instruction when spilling 490 // registers, so we need to use pseudo instruction for spilling 491 // SGPRs. 492 switch (RC->getSize() * 8) { 493 case 32: Opcode = AMDGPU::SI_SPILL_S32_SAVE; break; 494 case 64: Opcode = AMDGPU::SI_SPILL_S64_SAVE; break; 495 case 128: Opcode = AMDGPU::SI_SPILL_S128_SAVE; break; 496 case 256: Opcode = AMDGPU::SI_SPILL_S256_SAVE; break; 497 case 512: Opcode = AMDGPU::SI_SPILL_S512_SAVE; break; 498 } 499 } else if(RI.hasVGPRs(RC) && ST.isVGPRSpillingEnabled(MFI)) { 500 MFI->setHasSpilledVGPRs(); 501 502 switch(RC->getSize() * 8) { 503 case 32: Opcode = AMDGPU::SI_SPILL_V32_SAVE; break; 504 case 64: Opcode = AMDGPU::SI_SPILL_V64_SAVE; break; 505 case 96: Opcode = AMDGPU::SI_SPILL_V96_SAVE; break; 506 case 128: Opcode = AMDGPU::SI_SPILL_V128_SAVE; break; 507 case 256: Opcode = AMDGPU::SI_SPILL_V256_SAVE; break; 508 case 512: Opcode = AMDGPU::SI_SPILL_V512_SAVE; break; 509 } 510 } 511 512 if (Opcode != -1) { 513 MachinePointerInfo PtrInfo 514 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 515 unsigned Size = FrameInfo->getObjectSize(FrameIndex); 516 unsigned Align = FrameInfo->getObjectAlignment(FrameIndex); 517 MachineMemOperand *MMO 518 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, 519 Size, Align); 520 521 FrameInfo->setObjectAlignment(FrameIndex, 4); 522 BuildMI(MBB, MI, DL, get(Opcode)) 523 .addReg(SrcReg) 524 .addFrameIndex(FrameIndex) 525 // Place-holder registers, these will be filled in by 526 // SIPrepareScratchRegs. 527 .addReg(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, RegState::Undef) 528 .addReg(AMDGPU::SGPR0, RegState::Undef) 529 .addMemOperand(MMO); 530 } else { 531 LLVMContext &Ctx = MF->getFunction()->getContext(); 532 Ctx.emitError("SIInstrInfo::storeRegToStackSlot - Do not know how to" 533 " spill register"); 534 BuildMI(MBB, MI, DL, get(AMDGPU::KILL)) 535 .addReg(SrcReg); 536 } 537 } 538 539 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 540 MachineBasicBlock::iterator MI, 541 unsigned DestReg, int FrameIndex, 542 const TargetRegisterClass *RC, 543 const TargetRegisterInfo *TRI) const { 544 MachineFunction *MF = MBB.getParent(); 545 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 546 MachineFrameInfo *FrameInfo = MF->getFrameInfo(); 547 DebugLoc DL = MBB.findDebugLoc(MI); 548 int Opcode = -1; 549 550 if (RI.isSGPRClass(RC)){ 551 switch(RC->getSize() * 8) { 552 case 32: Opcode = AMDGPU::SI_SPILL_S32_RESTORE; break; 553 case 64: Opcode = AMDGPU::SI_SPILL_S64_RESTORE; break; 554 case 128: Opcode = AMDGPU::SI_SPILL_S128_RESTORE; break; 555 case 256: Opcode = AMDGPU::SI_SPILL_S256_RESTORE; break; 556 case 512: Opcode = AMDGPU::SI_SPILL_S512_RESTORE; break; 557 } 558 } else if(RI.hasVGPRs(RC) && ST.isVGPRSpillingEnabled(MFI)) { 559 switch(RC->getSize() * 8) { 560 case 32: Opcode = AMDGPU::SI_SPILL_V32_RESTORE; break; 561 case 64: Opcode = AMDGPU::SI_SPILL_V64_RESTORE; break; 562 case 96: Opcode = AMDGPU::SI_SPILL_V96_RESTORE; break; 563 case 128: Opcode = AMDGPU::SI_SPILL_V128_RESTORE; break; 564 case 256: Opcode = AMDGPU::SI_SPILL_V256_RESTORE; break; 565 case 512: Opcode = AMDGPU::SI_SPILL_V512_RESTORE; break; 566 } 567 } 568 569 if (Opcode != -1) { 570 unsigned Align = 4; 571 FrameInfo->setObjectAlignment(FrameIndex, Align); 572 unsigned Size = FrameInfo->getObjectSize(FrameIndex); 573 574 MachinePointerInfo PtrInfo 575 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 576 MachineMemOperand *MMO = MF->getMachineMemOperand( 577 PtrInfo, MachineMemOperand::MOLoad, Size, Align); 578 579 BuildMI(MBB, MI, DL, get(Opcode), DestReg) 580 .addFrameIndex(FrameIndex) 581 // Place-holder registers, these will be filled in by 582 // SIPrepareScratchRegs. 583 .addReg(AMDGPU::SGPR0_SGPR1_SGPR2_SGPR3, RegState::Undef) 584 .addReg(AMDGPU::SGPR0, RegState::Undef) 585 .addMemOperand(MMO); 586 } else { 587 LLVMContext &Ctx = MF->getFunction()->getContext(); 588 Ctx.emitError("SIInstrInfo::loadRegFromStackSlot - Do not know how to" 589 " restore register"); 590 BuildMI(MBB, MI, DL, get(AMDGPU::IMPLICIT_DEF), DestReg); 591 } 592 } 593 594 /// \param @Offset Offset in bytes of the FrameIndex being spilled 595 unsigned SIInstrInfo::calculateLDSSpillAddress(MachineBasicBlock &MBB, 596 MachineBasicBlock::iterator MI, 597 RegScavenger *RS, unsigned TmpReg, 598 unsigned FrameOffset, 599 unsigned Size) const { 600 MachineFunction *MF = MBB.getParent(); 601 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 602 const AMDGPUSubtarget &ST = MF->getSubtarget<AMDGPUSubtarget>(); 603 const SIRegisterInfo *TRI = 604 static_cast<const SIRegisterInfo*>(ST.getRegisterInfo()); 605 DebugLoc DL = MBB.findDebugLoc(MI); 606 unsigned WorkGroupSize = MFI->getMaximumWorkGroupSize(*MF); 607 unsigned WavefrontSize = ST.getWavefrontSize(); 608 609 unsigned TIDReg = MFI->getTIDReg(); 610 if (!MFI->hasCalculatedTID()) { 611 MachineBasicBlock &Entry = MBB.getParent()->front(); 612 MachineBasicBlock::iterator Insert = Entry.front(); 613 DebugLoc DL = Insert->getDebugLoc(); 614 615 TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass); 616 if (TIDReg == AMDGPU::NoRegister) 617 return TIDReg; 618 619 620 if (MFI->getShaderType() == ShaderType::COMPUTE && 621 WorkGroupSize > WavefrontSize) { 622 623 unsigned TIDIGXReg = TRI->getPreloadedValue(*MF, SIRegisterInfo::TIDIG_X); 624 unsigned TIDIGYReg = TRI->getPreloadedValue(*MF, SIRegisterInfo::TIDIG_Y); 625 unsigned TIDIGZReg = TRI->getPreloadedValue(*MF, SIRegisterInfo::TIDIG_Z); 626 unsigned InputPtrReg = 627 TRI->getPreloadedValue(*MF, SIRegisterInfo::INPUT_PTR); 628 for (unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) { 629 if (!Entry.isLiveIn(Reg)) 630 Entry.addLiveIn(Reg); 631 } 632 633 RS->enterBasicBlock(&Entry); 634 unsigned STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); 635 unsigned STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); 636 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp0) 637 .addReg(InputPtrReg) 638 .addImm(SI::KernelInputOffsets::NGROUPS_Z); 639 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp1) 640 .addReg(InputPtrReg) 641 .addImm(SI::KernelInputOffsets::NGROUPS_Y); 642 643 // NGROUPS.X * NGROUPS.Y 644 BuildMI(Entry, Insert, DL, get(AMDGPU::S_MUL_I32), STmp1) 645 .addReg(STmp1) 646 .addReg(STmp0); 647 // (NGROUPS.X * NGROUPS.Y) * TIDIG.X 648 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MUL_U32_U24_e32), TIDReg) 649 .addReg(STmp1) 650 .addReg(TIDIGXReg); 651 // NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X) 652 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MAD_U32_U24), TIDReg) 653 .addReg(STmp0) 654 .addReg(TIDIGYReg) 655 .addReg(TIDReg); 656 // (NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)) + TIDIG.Z 657 BuildMI(Entry, Insert, DL, get(AMDGPU::V_ADD_I32_e32), TIDReg) 658 .addReg(TIDReg) 659 .addReg(TIDIGZReg); 660 } else { 661 // Get the wave id 662 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_LO_U32_B32_e64), 663 TIDReg) 664 .addImm(-1) 665 .addImm(0); 666 667 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_HI_U32_B32_e64), 668 TIDReg) 669 .addImm(-1) 670 .addReg(TIDReg); 671 } 672 673 BuildMI(Entry, Insert, DL, get(AMDGPU::V_LSHLREV_B32_e32), 674 TIDReg) 675 .addImm(2) 676 .addReg(TIDReg); 677 MFI->setTIDReg(TIDReg); 678 } 679 680 // Add FrameIndex to LDS offset 681 unsigned LDSOffset = MFI->LDSSize + (FrameOffset * WorkGroupSize); 682 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), TmpReg) 683 .addImm(LDSOffset) 684 .addReg(TIDReg); 685 686 return TmpReg; 687 } 688 689 void SIInstrInfo::insertNOPs(MachineBasicBlock::iterator MI, 690 int Count) const { 691 while (Count > 0) { 692 int Arg; 693 if (Count >= 8) 694 Arg = 7; 695 else 696 Arg = Count - 1; 697 Count -= 8; 698 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(AMDGPU::S_NOP)) 699 .addImm(Arg); 700 } 701 } 702 703 bool SIInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { 704 MachineBasicBlock &MBB = *MI->getParent(); 705 DebugLoc DL = MBB.findDebugLoc(MI); 706 switch (MI->getOpcode()) { 707 default: return AMDGPUInstrInfo::expandPostRAPseudo(MI); 708 709 case AMDGPU::SI_CONSTDATA_PTR: { 710 unsigned Reg = MI->getOperand(0).getReg(); 711 unsigned RegLo = RI.getSubReg(Reg, AMDGPU::sub0); 712 unsigned RegHi = RI.getSubReg(Reg, AMDGPU::sub1); 713 714 BuildMI(MBB, MI, DL, get(AMDGPU::S_GETPC_B64), Reg); 715 716 // Add 32-bit offset from this instruction to the start of the constant data. 717 BuildMI(MBB, MI, DL, get(AMDGPU::S_ADD_U32), RegLo) 718 .addReg(RegLo) 719 .addTargetIndex(AMDGPU::TI_CONSTDATA_START) 720 .addReg(AMDGPU::SCC, RegState::Define | RegState::Implicit); 721 BuildMI(MBB, MI, DL, get(AMDGPU::S_ADDC_U32), RegHi) 722 .addReg(RegHi) 723 .addImm(0) 724 .addReg(AMDGPU::SCC, RegState::Define | RegState::Implicit) 725 .addReg(AMDGPU::SCC, RegState::Implicit); 726 MI->eraseFromParent(); 727 break; 728 } 729 case AMDGPU::SGPR_USE: 730 // This is just a placeholder for register allocation. 731 MI->eraseFromParent(); 732 break; 733 734 case AMDGPU::V_MOV_B64_PSEUDO: { 735 unsigned Dst = MI->getOperand(0).getReg(); 736 unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 737 unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 738 739 const MachineOperand &SrcOp = MI->getOperand(1); 740 // FIXME: Will this work for 64-bit floating point immediates? 741 assert(!SrcOp.isFPImm()); 742 if (SrcOp.isImm()) { 743 APInt Imm(64, SrcOp.getImm()); 744 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 745 .addImm(Imm.getLoBits(32).getZExtValue()) 746 .addReg(Dst, RegState::Implicit); 747 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 748 .addImm(Imm.getHiBits(32).getZExtValue()) 749 .addReg(Dst, RegState::Implicit); 750 } else { 751 assert(SrcOp.isReg()); 752 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 753 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) 754 .addReg(Dst, RegState::Implicit); 755 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 756 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) 757 .addReg(Dst, RegState::Implicit); 758 } 759 MI->eraseFromParent(); 760 break; 761 } 762 763 case AMDGPU::V_CNDMASK_B64_PSEUDO: { 764 unsigned Dst = MI->getOperand(0).getReg(); 765 unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 766 unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 767 unsigned Src0 = MI->getOperand(1).getReg(); 768 unsigned Src1 = MI->getOperand(2).getReg(); 769 const MachineOperand &SrcCond = MI->getOperand(3); 770 771 BuildMI(MBB, MI, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstLo) 772 .addReg(RI.getSubReg(Src0, AMDGPU::sub0)) 773 .addReg(RI.getSubReg(Src1, AMDGPU::sub0)) 774 .addOperand(SrcCond); 775 BuildMI(MBB, MI, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstHi) 776 .addReg(RI.getSubReg(Src0, AMDGPU::sub1)) 777 .addReg(RI.getSubReg(Src1, AMDGPU::sub1)) 778 .addOperand(SrcCond); 779 MI->eraseFromParent(); 780 break; 781 } 782 } 783 return true; 784 } 785 786 /// Commutes the operands in the given instruction. 787 /// The commutable operands are specified by their indices OpIdx0 and OpIdx1. 788 /// 789 /// Do not call this method for a non-commutable instruction or for 790 /// non-commutable pair of operand indices OpIdx0 and OpIdx1. 791 /// Even though the instruction is commutable, the method may still 792 /// fail to commute the operands, null pointer is returned in such cases. 793 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr *MI, 794 bool NewMI, 795 unsigned OpIdx0, 796 unsigned OpIdx1) const { 797 int CommutedOpcode = commuteOpcode(*MI); 798 if (CommutedOpcode == -1) 799 return nullptr; 800 801 int Src0Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), 802 AMDGPU::OpName::src0); 803 MachineOperand &Src0 = MI->getOperand(Src0Idx); 804 if (!Src0.isReg()) 805 return nullptr; 806 807 int Src1Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), 808 AMDGPU::OpName::src1); 809 810 if ((OpIdx0 != static_cast<unsigned>(Src0Idx) || 811 OpIdx1 != static_cast<unsigned>(Src1Idx)) && 812 (OpIdx0 != static_cast<unsigned>(Src1Idx) || 813 OpIdx1 != static_cast<unsigned>(Src0Idx))) 814 return nullptr; 815 816 MachineOperand &Src1 = MI->getOperand(Src1Idx); 817 818 // Make sure it's legal to commute operands for VOP2. 819 if (isVOP2(*MI) && 820 (!isOperandLegal(MI, Src0Idx, &Src1) || 821 !isOperandLegal(MI, Src1Idx, &Src0))) { 822 return nullptr; 823 } 824 825 if (!Src1.isReg()) { 826 // Allow commuting instructions with Imm operands. 827 if (NewMI || !Src1.isImm() || 828 (!isVOP2(*MI) && !isVOP3(*MI))) { 829 return nullptr; 830 } 831 832 // Be sure to copy the source modifiers to the right place. 833 if (MachineOperand *Src0Mods 834 = getNamedOperand(*MI, AMDGPU::OpName::src0_modifiers)) { 835 MachineOperand *Src1Mods 836 = getNamedOperand(*MI, AMDGPU::OpName::src1_modifiers); 837 838 int Src0ModsVal = Src0Mods->getImm(); 839 if (!Src1Mods && Src0ModsVal != 0) 840 return nullptr; 841 842 // XXX - This assert might be a lie. It might be useful to have a neg 843 // modifier with 0.0. 844 int Src1ModsVal = Src1Mods->getImm(); 845 assert((Src1ModsVal == 0) && "Not expecting modifiers with immediates"); 846 847 Src1Mods->setImm(Src0ModsVal); 848 Src0Mods->setImm(Src1ModsVal); 849 } 850 851 unsigned Reg = Src0.getReg(); 852 unsigned SubReg = Src0.getSubReg(); 853 if (Src1.isImm()) 854 Src0.ChangeToImmediate(Src1.getImm()); 855 else 856 llvm_unreachable("Should only have immediates"); 857 858 Src1.ChangeToRegister(Reg, false); 859 Src1.setSubReg(SubReg); 860 } else { 861 MI = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx0, OpIdx1); 862 } 863 864 if (MI) 865 MI->setDesc(get(CommutedOpcode)); 866 867 return MI; 868 } 869 870 // This needs to be implemented because the source modifiers may be inserted 871 // between the true commutable operands, and the base 872 // TargetInstrInfo::commuteInstruction uses it. 873 bool SIInstrInfo::findCommutedOpIndices(MachineInstr *MI, 874 unsigned &SrcOpIdx0, 875 unsigned &SrcOpIdx1) const { 876 const MCInstrDesc &MCID = MI->getDesc(); 877 if (!MCID.isCommutable()) 878 return false; 879 880 unsigned Opc = MI->getOpcode(); 881 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 882 if (Src0Idx == -1) 883 return false; 884 885 // FIXME: Workaround TargetInstrInfo::commuteInstruction asserting on 886 // immediate. Also, immediate src0 operand is not handled in 887 // SIInstrInfo::commuteInstruction(); 888 if (!MI->getOperand(Src0Idx).isReg()) 889 return false; 890 891 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 892 if (Src1Idx == -1) 893 return false; 894 895 MachineOperand &Src1 = MI->getOperand(Src1Idx); 896 if (Src1.isImm()) { 897 // SIInstrInfo::commuteInstruction() does support commuting the immediate 898 // operand src1 in 2 and 3 operand instructions. 899 if (!isVOP2(MI->getOpcode()) && !isVOP3(MI->getOpcode())) 900 return false; 901 } else if (Src1.isReg()) { 902 // If any source modifiers are set, the generic instruction commuting won't 903 // understand how to copy the source modifiers. 904 if (hasModifiersSet(*MI, AMDGPU::OpName::src0_modifiers) || 905 hasModifiersSet(*MI, AMDGPU::OpName::src1_modifiers)) 906 return false; 907 } else 908 return false; 909 910 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx); 911 } 912 913 MachineInstr *SIInstrInfo::buildMovInstr(MachineBasicBlock *MBB, 914 MachineBasicBlock::iterator I, 915 unsigned DstReg, 916 unsigned SrcReg) const { 917 return BuildMI(*MBB, I, MBB->findDebugLoc(I), get(AMDGPU::V_MOV_B32_e32), 918 DstReg) .addReg(SrcReg); 919 } 920 921 bool SIInstrInfo::isMov(unsigned Opcode) const { 922 switch(Opcode) { 923 default: return false; 924 case AMDGPU::S_MOV_B32: 925 case AMDGPU::S_MOV_B64: 926 case AMDGPU::V_MOV_B32_e32: 927 case AMDGPU::V_MOV_B32_e64: 928 return true; 929 } 930 } 931 932 static void removeModOperands(MachineInstr &MI) { 933 unsigned Opc = MI.getOpcode(); 934 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, 935 AMDGPU::OpName::src0_modifiers); 936 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, 937 AMDGPU::OpName::src1_modifiers); 938 int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc, 939 AMDGPU::OpName::src2_modifiers); 940 941 MI.RemoveOperand(Src2ModIdx); 942 MI.RemoveOperand(Src1ModIdx); 943 MI.RemoveOperand(Src0ModIdx); 944 } 945 946 bool SIInstrInfo::FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI, 947 unsigned Reg, MachineRegisterInfo *MRI) const { 948 if (!MRI->hasOneNonDBGUse(Reg)) 949 return false; 950 951 unsigned Opc = UseMI->getOpcode(); 952 if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64) { 953 // Don't fold if we are using source modifiers. The new VOP2 instructions 954 // don't have them. 955 if (hasModifiersSet(*UseMI, AMDGPU::OpName::src0_modifiers) || 956 hasModifiersSet(*UseMI, AMDGPU::OpName::src1_modifiers) || 957 hasModifiersSet(*UseMI, AMDGPU::OpName::src2_modifiers)) { 958 return false; 959 } 960 961 MachineOperand *Src0 = getNamedOperand(*UseMI, AMDGPU::OpName::src0); 962 MachineOperand *Src1 = getNamedOperand(*UseMI, AMDGPU::OpName::src1); 963 MachineOperand *Src2 = getNamedOperand(*UseMI, AMDGPU::OpName::src2); 964 965 // Multiplied part is the constant: Use v_madmk_f32 966 // We should only expect these to be on src0 due to canonicalizations. 967 if (Src0->isReg() && Src0->getReg() == Reg) { 968 if (!Src1->isReg() || 969 (Src1->isReg() && RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) 970 return false; 971 972 if (!Src2->isReg() || 973 (Src2->isReg() && RI.isSGPRClass(MRI->getRegClass(Src2->getReg())))) 974 return false; 975 976 // We need to do some weird looking operand shuffling since the madmk 977 // operands are out of the normal expected order with the multiplied 978 // constant as the last operand. 979 // 980 // v_mad_f32 src0, src1, src2 -> v_madmk_f32 src0 * src2K + src1 981 // src0 -> src2 K 982 // src1 -> src0 983 // src2 -> src1 984 985 const int64_t Imm = DefMI->getOperand(1).getImm(); 986 987 // FIXME: This would be a lot easier if we could return a new instruction 988 // instead of having to modify in place. 989 990 // Remove these first since they are at the end. 991 UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, 992 AMDGPU::OpName::omod)); 993 UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, 994 AMDGPU::OpName::clamp)); 995 996 unsigned Src1Reg = Src1->getReg(); 997 unsigned Src1SubReg = Src1->getSubReg(); 998 unsigned Src2Reg = Src2->getReg(); 999 unsigned Src2SubReg = Src2->getSubReg(); 1000 Src0->setReg(Src1Reg); 1001 Src0->setSubReg(Src1SubReg); 1002 Src0->setIsKill(Src1->isKill()); 1003 1004 Src1->setReg(Src2Reg); 1005 Src1->setSubReg(Src2SubReg); 1006 Src1->setIsKill(Src2->isKill()); 1007 1008 if (Opc == AMDGPU::V_MAC_F32_e64) { 1009 UseMI->untieRegOperand( 1010 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 1011 } 1012 1013 Src2->ChangeToImmediate(Imm); 1014 1015 removeModOperands(*UseMI); 1016 UseMI->setDesc(get(AMDGPU::V_MADMK_F32)); 1017 1018 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 1019 if (DeleteDef) 1020 DefMI->eraseFromParent(); 1021 1022 return true; 1023 } 1024 1025 // Added part is the constant: Use v_madak_f32 1026 if (Src2->isReg() && Src2->getReg() == Reg) { 1027 // Not allowed to use constant bus for another operand. 1028 // We can however allow an inline immediate as src0. 1029 if (!Src0->isImm() && 1030 (Src0->isReg() && RI.isSGPRClass(MRI->getRegClass(Src0->getReg())))) 1031 return false; 1032 1033 if (!Src1->isReg() || 1034 (Src1->isReg() && RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) 1035 return false; 1036 1037 const int64_t Imm = DefMI->getOperand(1).getImm(); 1038 1039 // FIXME: This would be a lot easier if we could return a new instruction 1040 // instead of having to modify in place. 1041 1042 // Remove these first since they are at the end. 1043 UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, 1044 AMDGPU::OpName::omod)); 1045 UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, 1046 AMDGPU::OpName::clamp)); 1047 1048 if (Opc == AMDGPU::V_MAC_F32_e64) { 1049 UseMI->untieRegOperand( 1050 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 1051 } 1052 1053 // ChangingToImmediate adds Src2 back to the instruction. 1054 Src2->ChangeToImmediate(Imm); 1055 1056 // These come before src2. 1057 removeModOperands(*UseMI); 1058 UseMI->setDesc(get(AMDGPU::V_MADAK_F32)); 1059 1060 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 1061 if (DeleteDef) 1062 DefMI->eraseFromParent(); 1063 1064 return true; 1065 } 1066 } 1067 1068 return false; 1069 } 1070 1071 static bool offsetsDoNotOverlap(int WidthA, int OffsetA, 1072 int WidthB, int OffsetB) { 1073 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; 1074 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; 1075 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; 1076 return LowOffset + LowWidth <= HighOffset; 1077 } 1078 1079 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr *MIa, 1080 MachineInstr *MIb) const { 1081 unsigned BaseReg0, Offset0; 1082 unsigned BaseReg1, Offset1; 1083 1084 if (getMemOpBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) && 1085 getMemOpBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) { 1086 assert(MIa->hasOneMemOperand() && MIb->hasOneMemOperand() && 1087 "read2 / write2 not expected here yet"); 1088 unsigned Width0 = (*MIa->memoperands_begin())->getSize(); 1089 unsigned Width1 = (*MIb->memoperands_begin())->getSize(); 1090 if (BaseReg0 == BaseReg1 && 1091 offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1)) { 1092 return true; 1093 } 1094 } 1095 1096 return false; 1097 } 1098 1099 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa, 1100 MachineInstr *MIb, 1101 AliasAnalysis *AA) const { 1102 assert(MIa && (MIa->mayLoad() || MIa->mayStore()) && 1103 "MIa must load from or modify a memory location"); 1104 assert(MIb && (MIb->mayLoad() || MIb->mayStore()) && 1105 "MIb must load from or modify a memory location"); 1106 1107 if (MIa->hasUnmodeledSideEffects() || MIb->hasUnmodeledSideEffects()) 1108 return false; 1109 1110 // XXX - Can we relax this between address spaces? 1111 if (MIa->hasOrderedMemoryRef() || MIb->hasOrderedMemoryRef()) 1112 return false; 1113 1114 // TODO: Should we check the address space from the MachineMemOperand? That 1115 // would allow us to distinguish objects we know don't alias based on the 1116 // underlying address space, even if it was lowered to a different one, 1117 // e.g. private accesses lowered to use MUBUF instructions on a scratch 1118 // buffer. 1119 if (isDS(*MIa)) { 1120 if (isDS(*MIb)) 1121 return checkInstOffsetsDoNotOverlap(MIa, MIb); 1122 1123 return !isFLAT(*MIb); 1124 } 1125 1126 if (isMUBUF(*MIa) || isMTBUF(*MIa)) { 1127 if (isMUBUF(*MIb) || isMTBUF(*MIb)) 1128 return checkInstOffsetsDoNotOverlap(MIa, MIb); 1129 1130 return !isFLAT(*MIb) && !isSMRD(*MIb); 1131 } 1132 1133 if (isSMRD(*MIa)) { 1134 if (isSMRD(*MIb)) 1135 return checkInstOffsetsDoNotOverlap(MIa, MIb); 1136 1137 return !isFLAT(*MIb) && !isMUBUF(*MIa) && !isMTBUF(*MIa); 1138 } 1139 1140 if (isFLAT(*MIa)) { 1141 if (isFLAT(*MIb)) 1142 return checkInstOffsetsDoNotOverlap(MIa, MIb); 1143 1144 return false; 1145 } 1146 1147 return false; 1148 } 1149 1150 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB, 1151 MachineBasicBlock::iterator &MI, 1152 LiveVariables *LV) const { 1153 1154 switch (MI->getOpcode()) { 1155 default: return nullptr; 1156 case AMDGPU::V_MAC_F32_e64: break; 1157 case AMDGPU::V_MAC_F32_e32: { 1158 const MachineOperand *Src0 = getNamedOperand(*MI, AMDGPU::OpName::src0); 1159 if (Src0->isImm() && !isInlineConstant(*Src0, 4)) 1160 return nullptr; 1161 break; 1162 } 1163 } 1164 1165 const MachineOperand *Dst = getNamedOperand(*MI, AMDGPU::OpName::dst); 1166 const MachineOperand *Src0 = getNamedOperand(*MI, AMDGPU::OpName::src0); 1167 const MachineOperand *Src1 = getNamedOperand(*MI, AMDGPU::OpName::src1); 1168 const MachineOperand *Src2 = getNamedOperand(*MI, AMDGPU::OpName::src2); 1169 1170 return BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::V_MAD_F32)) 1171 .addOperand(*Dst) 1172 .addImm(0) // Src0 mods 1173 .addOperand(*Src0) 1174 .addImm(0) // Src1 mods 1175 .addOperand(*Src1) 1176 .addImm(0) // Src mods 1177 .addOperand(*Src2) 1178 .addImm(0) // clamp 1179 .addImm(0); // omod 1180 } 1181 1182 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const { 1183 int64_t SVal = Imm.getSExtValue(); 1184 if (SVal >= -16 && SVal <= 64) 1185 return true; 1186 1187 if (Imm.getBitWidth() == 64) { 1188 uint64_t Val = Imm.getZExtValue(); 1189 return (DoubleToBits(0.0) == Val) || 1190 (DoubleToBits(1.0) == Val) || 1191 (DoubleToBits(-1.0) == Val) || 1192 (DoubleToBits(0.5) == Val) || 1193 (DoubleToBits(-0.5) == Val) || 1194 (DoubleToBits(2.0) == Val) || 1195 (DoubleToBits(-2.0) == Val) || 1196 (DoubleToBits(4.0) == Val) || 1197 (DoubleToBits(-4.0) == Val); 1198 } 1199 1200 // The actual type of the operand does not seem to matter as long 1201 // as the bits match one of the inline immediate values. For example: 1202 // 1203 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal, 1204 // so it is a legal inline immediate. 1205 // 1206 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in 1207 // floating-point, so it is a legal inline immediate. 1208 uint32_t Val = Imm.getZExtValue(); 1209 1210 return (FloatToBits(0.0f) == Val) || 1211 (FloatToBits(1.0f) == Val) || 1212 (FloatToBits(-1.0f) == Val) || 1213 (FloatToBits(0.5f) == Val) || 1214 (FloatToBits(-0.5f) == Val) || 1215 (FloatToBits(2.0f) == Val) || 1216 (FloatToBits(-2.0f) == Val) || 1217 (FloatToBits(4.0f) == Val) || 1218 (FloatToBits(-4.0f) == Val); 1219 } 1220 1221 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO, 1222 unsigned OpSize) const { 1223 if (MO.isImm()) { 1224 // MachineOperand provides no way to tell the true operand size, since it 1225 // only records a 64-bit value. We need to know the size to determine if a 1226 // 32-bit floating point immediate bit pattern is legal for an integer 1227 // immediate. It would be for any 32-bit integer operand, but would not be 1228 // for a 64-bit one. 1229 1230 unsigned BitSize = 8 * OpSize; 1231 return isInlineConstant(APInt(BitSize, MO.getImm(), true)); 1232 } 1233 1234 return false; 1235 } 1236 1237 bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO, 1238 unsigned OpSize) const { 1239 return MO.isImm() && !isInlineConstant(MO, OpSize); 1240 } 1241 1242 static bool compareMachineOp(const MachineOperand &Op0, 1243 const MachineOperand &Op1) { 1244 if (Op0.getType() != Op1.getType()) 1245 return false; 1246 1247 switch (Op0.getType()) { 1248 case MachineOperand::MO_Register: 1249 return Op0.getReg() == Op1.getReg(); 1250 case MachineOperand::MO_Immediate: 1251 return Op0.getImm() == Op1.getImm(); 1252 default: 1253 llvm_unreachable("Didn't expect to be comparing these operand types"); 1254 } 1255 } 1256 1257 bool SIInstrInfo::isImmOperandLegal(const MachineInstr *MI, unsigned OpNo, 1258 const MachineOperand &MO) const { 1259 const MCOperandInfo &OpInfo = get(MI->getOpcode()).OpInfo[OpNo]; 1260 1261 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI()); 1262 1263 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE) 1264 return true; 1265 1266 if (OpInfo.RegClass < 0) 1267 return false; 1268 1269 unsigned OpSize = RI.getRegClass(OpInfo.RegClass)->getSize(); 1270 if (isLiteralConstant(MO, OpSize)) 1271 return RI.opCanUseLiteralConstant(OpInfo.OperandType); 1272 1273 return RI.opCanUseInlineConstant(OpInfo.OperandType); 1274 } 1275 1276 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const { 1277 int Op32 = AMDGPU::getVOPe32(Opcode); 1278 if (Op32 == -1) 1279 return false; 1280 1281 return pseudoToMCOpcode(Op32) != -1; 1282 } 1283 1284 bool SIInstrInfo::hasModifiers(unsigned Opcode) const { 1285 // The src0_modifier operand is present on all instructions 1286 // that have modifiers. 1287 1288 return AMDGPU::getNamedOperandIdx(Opcode, 1289 AMDGPU::OpName::src0_modifiers) != -1; 1290 } 1291 1292 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI, 1293 unsigned OpName) const { 1294 const MachineOperand *Mods = getNamedOperand(MI, OpName); 1295 return Mods && Mods->getImm(); 1296 } 1297 1298 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI, 1299 const MachineOperand &MO, 1300 unsigned OpSize) const { 1301 // Literal constants use the constant bus. 1302 if (isLiteralConstant(MO, OpSize)) 1303 return true; 1304 1305 if (!MO.isReg() || !MO.isUse()) 1306 return false; 1307 1308 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) 1309 return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); 1310 1311 // FLAT_SCR is just an SGPR pair. 1312 if (!MO.isImplicit() && (MO.getReg() == AMDGPU::FLAT_SCR)) 1313 return true; 1314 1315 // EXEC register uses the constant bus. 1316 if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC) 1317 return true; 1318 1319 // SGPRs use the constant bus 1320 if (MO.getReg() == AMDGPU::M0 || MO.getReg() == AMDGPU::VCC || 1321 (!MO.isImplicit() && 1322 (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) || 1323 AMDGPU::SGPR_64RegClass.contains(MO.getReg())))) { 1324 return true; 1325 } 1326 1327 return false; 1328 } 1329 1330 static unsigned findImplicitSGPRRead(const MachineInstr &MI) { 1331 for (const MachineOperand &MO : MI.implicit_operands()) { 1332 // We only care about reads. 1333 if (MO.isDef()) 1334 continue; 1335 1336 switch (MO.getReg()) { 1337 case AMDGPU::VCC: 1338 case AMDGPU::M0: 1339 case AMDGPU::FLAT_SCR: 1340 return MO.getReg(); 1341 1342 default: 1343 break; 1344 } 1345 } 1346 1347 return AMDGPU::NoRegister; 1348 } 1349 1350 bool SIInstrInfo::verifyInstruction(const MachineInstr *MI, 1351 StringRef &ErrInfo) const { 1352 uint16_t Opcode = MI->getOpcode(); 1353 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 1354 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); 1355 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1); 1356 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2); 1357 1358 // Make sure the number of operands is correct. 1359 const MCInstrDesc &Desc = get(Opcode); 1360 if (!Desc.isVariadic() && 1361 Desc.getNumOperands() != MI->getNumExplicitOperands()) { 1362 ErrInfo = "Instruction has wrong number of operands."; 1363 return false; 1364 } 1365 1366 // Make sure the register classes are correct 1367 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) { 1368 if (MI->getOperand(i).isFPImm()) { 1369 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast " 1370 "all fp values to integers."; 1371 return false; 1372 } 1373 1374 int RegClass = Desc.OpInfo[i].RegClass; 1375 1376 switch (Desc.OpInfo[i].OperandType) { 1377 case MCOI::OPERAND_REGISTER: 1378 if (MI->getOperand(i).isImm()) { 1379 ErrInfo = "Illegal immediate value for operand."; 1380 return false; 1381 } 1382 break; 1383 case AMDGPU::OPERAND_REG_IMM32: 1384 break; 1385 case AMDGPU::OPERAND_REG_INLINE_C: 1386 if (isLiteralConstant(MI->getOperand(i), 1387 RI.getRegClass(RegClass)->getSize())) { 1388 ErrInfo = "Illegal immediate value for operand."; 1389 return false; 1390 } 1391 break; 1392 case MCOI::OPERAND_IMMEDIATE: 1393 // Check if this operand is an immediate. 1394 // FrameIndex operands will be replaced by immediates, so they are 1395 // allowed. 1396 if (!MI->getOperand(i).isImm() && !MI->getOperand(i).isFI()) { 1397 ErrInfo = "Expected immediate, but got non-immediate"; 1398 return false; 1399 } 1400 // Fall-through 1401 default: 1402 continue; 1403 } 1404 1405 if (!MI->getOperand(i).isReg()) 1406 continue; 1407 1408 if (RegClass != -1) { 1409 unsigned Reg = MI->getOperand(i).getReg(); 1410 if (TargetRegisterInfo::isVirtualRegister(Reg)) 1411 continue; 1412 1413 const TargetRegisterClass *RC = RI.getRegClass(RegClass); 1414 if (!RC->contains(Reg)) { 1415 ErrInfo = "Operand has incorrect register class."; 1416 return false; 1417 } 1418 } 1419 } 1420 1421 1422 // Verify VOP* 1423 if (isVOP1(*MI) || isVOP2(*MI) || isVOP3(*MI) || isVOPC(*MI)) { 1424 // Only look at the true operands. Only a real operand can use the constant 1425 // bus, and we don't want to check pseudo-operands like the source modifier 1426 // flags. 1427 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx }; 1428 1429 unsigned ConstantBusCount = 0; 1430 unsigned SGPRUsed = findImplicitSGPRRead(*MI); 1431 if (SGPRUsed != AMDGPU::NoRegister) 1432 ++ConstantBusCount; 1433 1434 for (int OpIdx : OpIndices) { 1435 if (OpIdx == -1) 1436 break; 1437 const MachineOperand &MO = MI->getOperand(OpIdx); 1438 if (usesConstantBus(MRI, MO, getOpSize(Opcode, OpIdx))) { 1439 if (MO.isReg()) { 1440 if (MO.getReg() != SGPRUsed) 1441 ++ConstantBusCount; 1442 SGPRUsed = MO.getReg(); 1443 } else { 1444 ++ConstantBusCount; 1445 } 1446 } 1447 } 1448 if (ConstantBusCount > 1) { 1449 ErrInfo = "VOP* instruction uses the constant bus more than once"; 1450 return false; 1451 } 1452 } 1453 1454 // Verify misc. restrictions on specific instructions. 1455 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 || 1456 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) { 1457 const MachineOperand &Src0 = MI->getOperand(Src0Idx); 1458 const MachineOperand &Src1 = MI->getOperand(Src1Idx); 1459 const MachineOperand &Src2 = MI->getOperand(Src2Idx); 1460 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) { 1461 if (!compareMachineOp(Src0, Src1) && 1462 !compareMachineOp(Src0, Src2)) { 1463 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2"; 1464 return false; 1465 } 1466 } 1467 } 1468 1469 // Make sure we aren't losing exec uses in the td files. This mostly requires 1470 // being careful when using let Uses to try to add other use registers. 1471 if (!isGenericOpcode(Opcode) && !isSALU(Opcode) && !isSMRD(Opcode)) { 1472 const MachineOperand *Exec = MI->findRegisterUseOperand(AMDGPU::EXEC); 1473 if (!Exec || !Exec->isImplicit()) { 1474 ErrInfo = "VALU instruction does not implicitly read exec mask"; 1475 return false; 1476 } 1477 } 1478 1479 return true; 1480 } 1481 1482 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) { 1483 switch (MI.getOpcode()) { 1484 default: return AMDGPU::INSTRUCTION_LIST_END; 1485 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE; 1486 case AMDGPU::COPY: return AMDGPU::COPY; 1487 case AMDGPU::PHI: return AMDGPU::PHI; 1488 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG; 1489 case AMDGPU::S_MOV_B32: 1490 return MI.getOperand(1).isReg() ? 1491 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32; 1492 case AMDGPU::S_ADD_I32: 1493 case AMDGPU::S_ADD_U32: return AMDGPU::V_ADD_I32_e32; 1494 case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32; 1495 case AMDGPU::S_SUB_I32: 1496 case AMDGPU::S_SUB_U32: return AMDGPU::V_SUB_I32_e32; 1497 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32; 1498 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_I32; 1499 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e32; 1500 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e32; 1501 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e32; 1502 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e32; 1503 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e32; 1504 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e32; 1505 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e32; 1506 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32; 1507 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64; 1508 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32; 1509 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64; 1510 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32; 1511 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64; 1512 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32; 1513 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32; 1514 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32; 1515 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32; 1516 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64; 1517 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32; 1518 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32; 1519 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32; 1520 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32; 1521 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32; 1522 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32; 1523 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32; 1524 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32; 1525 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32; 1526 case AMDGPU::S_LOAD_DWORD_IMM: 1527 case AMDGPU::S_LOAD_DWORD_SGPR: 1528 case AMDGPU::S_LOAD_DWORD_IMM_ci: 1529 return AMDGPU::BUFFER_LOAD_DWORD_ADDR64; 1530 case AMDGPU::S_LOAD_DWORDX2_IMM: 1531 case AMDGPU::S_LOAD_DWORDX2_SGPR: 1532 case AMDGPU::S_LOAD_DWORDX2_IMM_ci: 1533 return AMDGPU::BUFFER_LOAD_DWORDX2_ADDR64; 1534 case AMDGPU::S_LOAD_DWORDX4_IMM: 1535 case AMDGPU::S_LOAD_DWORDX4_SGPR: 1536 case AMDGPU::S_LOAD_DWORDX4_IMM_ci: 1537 return AMDGPU::BUFFER_LOAD_DWORDX4_ADDR64; 1538 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64; 1539 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32; 1540 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32; 1541 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64; 1542 } 1543 } 1544 1545 bool SIInstrInfo::isSALUOpSupportedOnVALU(const MachineInstr &MI) const { 1546 return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END; 1547 } 1548 1549 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI, 1550 unsigned OpNo) const { 1551 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 1552 const MCInstrDesc &Desc = get(MI.getOpcode()); 1553 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() || 1554 Desc.OpInfo[OpNo].RegClass == -1) { 1555 unsigned Reg = MI.getOperand(OpNo).getReg(); 1556 1557 if (TargetRegisterInfo::isVirtualRegister(Reg)) 1558 return MRI.getRegClass(Reg); 1559 return RI.getPhysRegClass(Reg); 1560 } 1561 1562 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 1563 return RI.getRegClass(RCID); 1564 } 1565 1566 bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const { 1567 switch (MI.getOpcode()) { 1568 case AMDGPU::COPY: 1569 case AMDGPU::REG_SEQUENCE: 1570 case AMDGPU::PHI: 1571 case AMDGPU::INSERT_SUBREG: 1572 return RI.hasVGPRs(getOpRegClass(MI, 0)); 1573 default: 1574 return RI.hasVGPRs(getOpRegClass(MI, OpNo)); 1575 } 1576 } 1577 1578 void SIInstrInfo::legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const { 1579 MachineBasicBlock::iterator I = MI; 1580 MachineBasicBlock *MBB = MI->getParent(); 1581 MachineOperand &MO = MI->getOperand(OpIdx); 1582 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1583 unsigned RCID = get(MI->getOpcode()).OpInfo[OpIdx].RegClass; 1584 const TargetRegisterClass *RC = RI.getRegClass(RCID); 1585 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 1586 if (MO.isReg()) 1587 Opcode = AMDGPU::COPY; 1588 else if (RI.isSGPRClass(RC)) 1589 Opcode = AMDGPU::S_MOV_B32; 1590 1591 1592 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); 1593 if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC)) 1594 VRC = &AMDGPU::VReg_64RegClass; 1595 else 1596 VRC = &AMDGPU::VGPR_32RegClass; 1597 1598 unsigned Reg = MRI.createVirtualRegister(VRC); 1599 DebugLoc DL = MBB->findDebugLoc(I); 1600 BuildMI(*MI->getParent(), I, DL, get(Opcode), Reg) 1601 .addOperand(MO); 1602 MO.ChangeToRegister(Reg, false); 1603 } 1604 1605 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI, 1606 MachineRegisterInfo &MRI, 1607 MachineOperand &SuperReg, 1608 const TargetRegisterClass *SuperRC, 1609 unsigned SubIdx, 1610 const TargetRegisterClass *SubRC) 1611 const { 1612 MachineBasicBlock *MBB = MI->getParent(); 1613 DebugLoc DL = MI->getDebugLoc(); 1614 unsigned SubReg = MRI.createVirtualRegister(SubRC); 1615 1616 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) { 1617 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 1618 .addReg(SuperReg.getReg(), 0, SubIdx); 1619 return SubReg; 1620 } 1621 1622 // Just in case the super register is itself a sub-register, copy it to a new 1623 // value so we don't need to worry about merging its subreg index with the 1624 // SubIdx passed to this function. The register coalescer should be able to 1625 // eliminate this extra copy. 1626 unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC); 1627 1628 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg) 1629 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg()); 1630 1631 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 1632 .addReg(NewSuperReg, 0, SubIdx); 1633 1634 return SubReg; 1635 } 1636 1637 MachineOperand SIInstrInfo::buildExtractSubRegOrImm( 1638 MachineBasicBlock::iterator MII, 1639 MachineRegisterInfo &MRI, 1640 MachineOperand &Op, 1641 const TargetRegisterClass *SuperRC, 1642 unsigned SubIdx, 1643 const TargetRegisterClass *SubRC) const { 1644 if (Op.isImm()) { 1645 // XXX - Is there a better way to do this? 1646 if (SubIdx == AMDGPU::sub0) 1647 return MachineOperand::CreateImm(Op.getImm() & 0xFFFFFFFF); 1648 if (SubIdx == AMDGPU::sub1) 1649 return MachineOperand::CreateImm(Op.getImm() >> 32); 1650 1651 llvm_unreachable("Unhandled register index for immediate"); 1652 } 1653 1654 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC, 1655 SubIdx, SubRC); 1656 return MachineOperand::CreateReg(SubReg, false); 1657 } 1658 1659 // Change the order of operands from (0, 1, 2) to (0, 2, 1) 1660 void SIInstrInfo::swapOperands(MachineBasicBlock::iterator Inst) const { 1661 assert(Inst->getNumExplicitOperands() == 3); 1662 MachineOperand Op1 = Inst->getOperand(1); 1663 Inst->RemoveOperand(1); 1664 Inst->addOperand(Op1); 1665 } 1666 1667 bool SIInstrInfo::isOperandLegal(const MachineInstr *MI, unsigned OpIdx, 1668 const MachineOperand *MO) const { 1669 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 1670 const MCInstrDesc &InstDesc = get(MI->getOpcode()); 1671 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; 1672 const TargetRegisterClass *DefinedRC = 1673 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; 1674 if (!MO) 1675 MO = &MI->getOperand(OpIdx); 1676 1677 if (isVALU(*MI) && 1678 usesConstantBus(MRI, *MO, DefinedRC->getSize())) { 1679 unsigned SGPRUsed = 1680 MO->isReg() ? MO->getReg() : (unsigned)AMDGPU::NoRegister; 1681 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1682 if (i == OpIdx) 1683 continue; 1684 const MachineOperand &Op = MI->getOperand(i); 1685 if (Op.isReg() && Op.getReg() != SGPRUsed && 1686 usesConstantBus(MRI, Op, getOpSize(*MI, i))) { 1687 return false; 1688 } 1689 } 1690 } 1691 1692 if (MO->isReg()) { 1693 assert(DefinedRC); 1694 const TargetRegisterClass *RC = 1695 TargetRegisterInfo::isVirtualRegister(MO->getReg()) ? 1696 MRI.getRegClass(MO->getReg()) : 1697 RI.getPhysRegClass(MO->getReg()); 1698 1699 // In order to be legal, the common sub-class must be equal to the 1700 // class of the current operand. For example: 1701 // 1702 // v_mov_b32 s0 ; Operand defined as vsrc_32 1703 // ; RI.getCommonSubClass(s0,vsrc_32) = sgpr ; LEGAL 1704 // 1705 // s_sendmsg 0, s0 ; Operand defined as m0reg 1706 // ; RI.getCommonSubClass(s0,m0reg) = m0reg ; NOT LEGAL 1707 1708 return RI.getCommonSubClass(RC, RI.getRegClass(OpInfo.RegClass)) == RC; 1709 } 1710 1711 1712 // Handle non-register types that are treated like immediates. 1713 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI()); 1714 1715 if (!DefinedRC) { 1716 // This operand expects an immediate. 1717 return true; 1718 } 1719 1720 return isImmOperandLegal(MI, OpIdx, *MO); 1721 } 1722 1723 // Legalize VOP3 operands. Because all operand types are supported for any 1724 // operand, and since literal constants are not allowed and should never be 1725 // seen, we only need to worry about inserting copies if we use multiple SGPR 1726 // operands. 1727 void SIInstrInfo::legalizeOperandsVOP3( 1728 MachineRegisterInfo &MRI, 1729 MachineInstr *MI) const { 1730 unsigned Opc = MI->getOpcode(); 1731 1732 int VOP3Idx[3] = { 1733 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), 1734 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1), 1735 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2) 1736 }; 1737 1738 // Find the one SGPR operand we are allowed to use. 1739 unsigned SGPRReg = findUsedSGPR(MI, VOP3Idx); 1740 1741 for (unsigned i = 0; i < 3; ++i) { 1742 int Idx = VOP3Idx[i]; 1743 if (Idx == -1) 1744 break; 1745 MachineOperand &MO = MI->getOperand(Idx); 1746 1747 // We should never see a VOP3 instruction with an illegal immediate operand. 1748 if (!MO.isReg()) 1749 continue; 1750 1751 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg()))) 1752 continue; // VGPRs are legal 1753 1754 if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) { 1755 SGPRReg = MO.getReg(); 1756 // We can use one SGPR in each VOP3 instruction. 1757 continue; 1758 } 1759 1760 // If we make it this far, then the operand is not legal and we must 1761 // legalize it. 1762 legalizeOpWithMove(MI, Idx); 1763 } 1764 } 1765 1766 void SIInstrInfo::legalizeOperands(MachineInstr *MI) const { 1767 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 1768 unsigned Opc = MI->getOpcode(); 1769 1770 // Legalize VOP2 1771 if (isVOP2(*MI)) { 1772 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 1773 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 1774 1775 // Legalize src0 1776 if (!isOperandLegal(MI, Src0Idx)) 1777 legalizeOpWithMove(MI, Src0Idx); 1778 1779 // Legalize src1 1780 if (isOperandLegal(MI, Src1Idx)) 1781 return; 1782 1783 // Usually src0 of VOP2 instructions allow more types of inputs 1784 // than src1, so try to commute the instruction to decrease our 1785 // chances of having to insert a MOV instruction to legalize src1. 1786 if (MI->isCommutable()) { 1787 if (commuteInstruction(MI)) 1788 // If we are successful in commuting, then we know MI is legal, so 1789 // we are done. 1790 return; 1791 } 1792 1793 legalizeOpWithMove(MI, Src1Idx); 1794 return; 1795 } 1796 1797 // Legalize VOP3 1798 if (isVOP3(*MI)) { 1799 legalizeOperandsVOP3(MRI, MI); 1800 return; 1801 } 1802 1803 // Legalize REG_SEQUENCE and PHI 1804 // The register class of the operands much be the same type as the register 1805 // class of the output. 1806 if (MI->getOpcode() == AMDGPU::PHI) { 1807 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr; 1808 for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) { 1809 if (!MI->getOperand(i).isReg() || 1810 !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg())) 1811 continue; 1812 const TargetRegisterClass *OpRC = 1813 MRI.getRegClass(MI->getOperand(i).getReg()); 1814 if (RI.hasVGPRs(OpRC)) { 1815 VRC = OpRC; 1816 } else { 1817 SRC = OpRC; 1818 } 1819 } 1820 1821 // If any of the operands are VGPR registers, then they all most be 1822 // otherwise we will create illegal VGPR->SGPR copies when legalizing 1823 // them. 1824 if (VRC || !RI.isSGPRClass(getOpRegClass(*MI, 0))) { 1825 if (!VRC) { 1826 assert(SRC); 1827 VRC = RI.getEquivalentVGPRClass(SRC); 1828 } 1829 RC = VRC; 1830 } else { 1831 RC = SRC; 1832 } 1833 1834 // Update all the operands so they have the same type. 1835 for (unsigned I = 1, E = MI->getNumOperands(); I != E; I += 2) { 1836 MachineOperand &Op = MI->getOperand(I); 1837 if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg())) 1838 continue; 1839 unsigned DstReg = MRI.createVirtualRegister(RC); 1840 1841 // MI is a PHI instruction. 1842 MachineBasicBlock *InsertBB = MI->getOperand(I + 1).getMBB(); 1843 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator(); 1844 1845 BuildMI(*InsertBB, Insert, MI->getDebugLoc(), get(AMDGPU::COPY), DstReg) 1846 .addOperand(Op); 1847 Op.setReg(DstReg); 1848 } 1849 } 1850 1851 // REG_SEQUENCE doesn't really require operand legalization, but if one has a 1852 // VGPR dest type and SGPR sources, insert copies so all operands are 1853 // VGPRs. This seems to help operand folding / the register coalescer. 1854 if (MI->getOpcode() == AMDGPU::REG_SEQUENCE) { 1855 MachineBasicBlock *MBB = MI->getParent(); 1856 const TargetRegisterClass *DstRC = getOpRegClass(*MI, 0); 1857 if (RI.hasVGPRs(DstRC)) { 1858 // Update all the operands so they are VGPR register classes. These may 1859 // not be the same register class because REG_SEQUENCE supports mixing 1860 // subregister index types e.g. sub0_sub1 + sub2 + sub3 1861 for (unsigned I = 1, E = MI->getNumOperands(); I != E; I += 2) { 1862 MachineOperand &Op = MI->getOperand(I); 1863 if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg())) 1864 continue; 1865 1866 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg()); 1867 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); 1868 if (VRC == OpRC) 1869 continue; 1870 1871 unsigned DstReg = MRI.createVirtualRegister(VRC); 1872 1873 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::COPY), DstReg) 1874 .addOperand(Op); 1875 1876 Op.setReg(DstReg); 1877 Op.setIsKill(); 1878 } 1879 } 1880 1881 return; 1882 } 1883 1884 // Legalize INSERT_SUBREG 1885 // src0 must have the same register class as dst 1886 if (MI->getOpcode() == AMDGPU::INSERT_SUBREG) { 1887 unsigned Dst = MI->getOperand(0).getReg(); 1888 unsigned Src0 = MI->getOperand(1).getReg(); 1889 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst); 1890 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0); 1891 if (DstRC != Src0RC) { 1892 MachineBasicBlock &MBB = *MI->getParent(); 1893 unsigned NewSrc0 = MRI.createVirtualRegister(DstRC); 1894 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::COPY), NewSrc0) 1895 .addReg(Src0); 1896 MI->getOperand(1).setReg(NewSrc0); 1897 } 1898 return; 1899 } 1900 1901 // Legalize MUBUF* instructions 1902 // FIXME: If we start using the non-addr64 instructions for compute, we 1903 // may need to legalize them here. 1904 int SRsrcIdx = 1905 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::srsrc); 1906 if (SRsrcIdx != -1) { 1907 // We have an MUBUF instruction 1908 MachineOperand *SRsrc = &MI->getOperand(SRsrcIdx); 1909 unsigned SRsrcRC = get(MI->getOpcode()).OpInfo[SRsrcIdx].RegClass; 1910 if (RI.getCommonSubClass(MRI.getRegClass(SRsrc->getReg()), 1911 RI.getRegClass(SRsrcRC))) { 1912 // The operands are legal. 1913 // FIXME: We may need to legalize operands besided srsrc. 1914 return; 1915 } 1916 1917 MachineBasicBlock &MBB = *MI->getParent(); 1918 1919 // Extract the ptr from the resource descriptor. 1920 unsigned SRsrcPtr = buildExtractSubReg(MI, MRI, *SRsrc, 1921 &AMDGPU::VReg_128RegClass, AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass); 1922 1923 // Create an empty resource descriptor 1924 unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1925 unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 1926 unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 1927 unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass); 1928 uint64_t RsrcDataFormat = getDefaultRsrcDataFormat(); 1929 1930 // Zero64 = 0 1931 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B64), 1932 Zero64) 1933 .addImm(0); 1934 1935 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0} 1936 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), 1937 SRsrcFormatLo) 1938 .addImm(RsrcDataFormat & 0xFFFFFFFF); 1939 1940 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32} 1941 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), 1942 SRsrcFormatHi) 1943 .addImm(RsrcDataFormat >> 32); 1944 1945 // NewSRsrc = {Zero64, SRsrcFormat} 1946 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewSRsrc) 1947 .addReg(Zero64) 1948 .addImm(AMDGPU::sub0_sub1) 1949 .addReg(SRsrcFormatLo) 1950 .addImm(AMDGPU::sub2) 1951 .addReg(SRsrcFormatHi) 1952 .addImm(AMDGPU::sub3); 1953 1954 MachineOperand *VAddr = getNamedOperand(*MI, AMDGPU::OpName::vaddr); 1955 unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 1956 if (VAddr) { 1957 // This is already an ADDR64 instruction so we need to add the pointer 1958 // extracted from the resource descriptor to the current value of VAddr. 1959 unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1960 unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1961 1962 // NewVaddrLo = SRsrcPtr:sub0 + VAddr:sub0 1963 DebugLoc DL = MI->getDebugLoc(); 1964 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), NewVAddrLo) 1965 .addReg(SRsrcPtr, 0, AMDGPU::sub0) 1966 .addReg(VAddr->getReg(), 0, AMDGPU::sub0); 1967 1968 // NewVaddrHi = SRsrcPtr:sub1 + VAddr:sub1 1969 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e32), NewVAddrHi) 1970 .addReg(SRsrcPtr, 0, AMDGPU::sub1) 1971 .addReg(VAddr->getReg(), 0, AMDGPU::sub1); 1972 1973 // NewVaddr = {NewVaddrHi, NewVaddrLo} 1974 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) 1975 .addReg(NewVAddrLo) 1976 .addImm(AMDGPU::sub0) 1977 .addReg(NewVAddrHi) 1978 .addImm(AMDGPU::sub1); 1979 } else { 1980 // This instructions is the _OFFSET variant, so we need to convert it to 1981 // ADDR64. 1982 assert(MBB.getParent()->getSubtarget<AMDGPUSubtarget>().getGeneration() 1983 < AMDGPUSubtarget::VOLCANIC_ISLANDS && 1984 "FIXME: Need to emit flat atomics here"); 1985 1986 MachineOperand *VData = getNamedOperand(*MI, AMDGPU::OpName::vdata); 1987 MachineOperand *Offset = getNamedOperand(*MI, AMDGPU::OpName::offset); 1988 MachineOperand *SOffset = getNamedOperand(*MI, AMDGPU::OpName::soffset); 1989 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI->getOpcode()); 1990 1991 // Atomics rith return have have an additional tied operand and are 1992 // missing some of the special bits. 1993 MachineOperand *VDataIn = getNamedOperand(*MI, AMDGPU::OpName::vdata_in); 1994 MachineInstr *Addr64; 1995 1996 if (!VDataIn) { 1997 // Regular buffer load / store. 1998 MachineInstrBuilder MIB 1999 = BuildMI(MBB, MI, MI->getDebugLoc(), get(Addr64Opcode)) 2000 .addOperand(*VData) 2001 .addReg(AMDGPU::NoRegister) // Dummy value for vaddr. 2002 // This will be replaced later 2003 // with the new value of vaddr. 2004 .addOperand(*SRsrc) 2005 .addOperand(*SOffset) 2006 .addOperand(*Offset); 2007 2008 // Atomics do not have this operand. 2009 if (const MachineOperand *GLC 2010 = getNamedOperand(*MI, AMDGPU::OpName::glc)) { 2011 MIB.addImm(GLC->getImm()); 2012 } 2013 2014 MIB.addImm(getNamedImmOperand(*MI, AMDGPU::OpName::slc)); 2015 2016 if (const MachineOperand *TFE 2017 = getNamedOperand(*MI, AMDGPU::OpName::tfe)) { 2018 MIB.addImm(TFE->getImm()); 2019 } 2020 2021 MIB.setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 2022 Addr64 = MIB; 2023 } else { 2024 // Atomics with return. 2025 Addr64 = BuildMI(MBB, MI, MI->getDebugLoc(), get(Addr64Opcode)) 2026 .addOperand(*VData) 2027 .addOperand(*VDataIn) 2028 .addReg(AMDGPU::NoRegister) // Dummy value for vaddr. 2029 // This will be replaced later 2030 // with the new value of vaddr. 2031 .addOperand(*SRsrc) 2032 .addOperand(*SOffset) 2033 .addOperand(*Offset) 2034 .addImm(getNamedImmOperand(*MI, AMDGPU::OpName::slc)) 2035 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 2036 } 2037 2038 MI->removeFromParent(); 2039 MI = Addr64; 2040 2041 // NewVaddr = {NewVaddrHi, NewVaddrLo} 2042 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) 2043 .addReg(SRsrcPtr, 0, AMDGPU::sub0) 2044 .addImm(AMDGPU::sub0) 2045 .addReg(SRsrcPtr, 0, AMDGPU::sub1) 2046 .addImm(AMDGPU::sub1); 2047 2048 VAddr = getNamedOperand(*MI, AMDGPU::OpName::vaddr); 2049 SRsrc = getNamedOperand(*MI, AMDGPU::OpName::srsrc); 2050 } 2051 2052 // Update the instruction to use NewVaddr 2053 VAddr->setReg(NewVAddr); 2054 // Update the instruction to use NewSRsrc 2055 SRsrc->setReg(NewSRsrc); 2056 } 2057 } 2058 2059 void SIInstrInfo::splitSMRD(MachineInstr *MI, 2060 const TargetRegisterClass *HalfRC, 2061 unsigned HalfImmOp, unsigned HalfSGPROp, 2062 MachineInstr *&Lo, MachineInstr *&Hi) const { 2063 2064 DebugLoc DL = MI->getDebugLoc(); 2065 MachineBasicBlock *MBB = MI->getParent(); 2066 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 2067 unsigned RegLo = MRI.createVirtualRegister(HalfRC); 2068 unsigned RegHi = MRI.createVirtualRegister(HalfRC); 2069 unsigned HalfSize = HalfRC->getSize(); 2070 const MachineOperand *OffOp = 2071 getNamedOperand(*MI, AMDGPU::OpName::offset); 2072 const MachineOperand *SBase = getNamedOperand(*MI, AMDGPU::OpName::sbase); 2073 2074 // The SMRD has an 8-bit offset in dwords on SI and a 20-bit offset in bytes 2075 // on VI. 2076 2077 bool IsKill = SBase->isKill(); 2078 if (OffOp) { 2079 bool isVI = 2080 MBB->getParent()->getSubtarget<AMDGPUSubtarget>().getGeneration() >= 2081 AMDGPUSubtarget::VOLCANIC_ISLANDS; 2082 unsigned OffScale = isVI ? 1 : 4; 2083 // Handle the _IMM variant 2084 unsigned LoOffset = OffOp->getImm() * OffScale; 2085 unsigned HiOffset = LoOffset + HalfSize; 2086 Lo = BuildMI(*MBB, MI, DL, get(HalfImmOp), RegLo) 2087 // Use addReg instead of addOperand 2088 // to make sure kill flag is cleared. 2089 .addReg(SBase->getReg(), 0, SBase->getSubReg()) 2090 .addImm(LoOffset / OffScale); 2091 2092 if (!isUInt<20>(HiOffset) || (!isVI && !isUInt<8>(HiOffset / OffScale))) { 2093 unsigned OffsetSGPR = 2094 MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 2095 BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32), OffsetSGPR) 2096 .addImm(HiOffset); // The offset in register is in bytes. 2097 Hi = BuildMI(*MBB, MI, DL, get(HalfSGPROp), RegHi) 2098 .addReg(SBase->getReg(), getKillRegState(IsKill), 2099 SBase->getSubReg()) 2100 .addReg(OffsetSGPR); 2101 } else { 2102 Hi = BuildMI(*MBB, MI, DL, get(HalfImmOp), RegHi) 2103 .addReg(SBase->getReg(), getKillRegState(IsKill), 2104 SBase->getSubReg()) 2105 .addImm(HiOffset / OffScale); 2106 } 2107 } else { 2108 // Handle the _SGPR variant 2109 MachineOperand *SOff = getNamedOperand(*MI, AMDGPU::OpName::soff); 2110 Lo = BuildMI(*MBB, MI, DL, get(HalfSGPROp), RegLo) 2111 .addReg(SBase->getReg(), 0, SBase->getSubReg()) 2112 .addOperand(*SOff); 2113 unsigned OffsetSGPR = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 2114 BuildMI(*MBB, MI, DL, get(AMDGPU::S_ADD_I32), OffsetSGPR) 2115 .addReg(SOff->getReg(), 0, SOff->getSubReg()) 2116 .addImm(HalfSize); 2117 Hi = BuildMI(*MBB, MI, DL, get(HalfSGPROp), RegHi) 2118 .addReg(SBase->getReg(), getKillRegState(IsKill), 2119 SBase->getSubReg()) 2120 .addReg(OffsetSGPR); 2121 } 2122 2123 unsigned SubLo, SubHi; 2124 const TargetRegisterClass *NewDstRC; 2125 switch (HalfSize) { 2126 case 4: 2127 SubLo = AMDGPU::sub0; 2128 SubHi = AMDGPU::sub1; 2129 NewDstRC = &AMDGPU::VReg_64RegClass; 2130 break; 2131 case 8: 2132 SubLo = AMDGPU::sub0_sub1; 2133 SubHi = AMDGPU::sub2_sub3; 2134 NewDstRC = &AMDGPU::VReg_128RegClass; 2135 break; 2136 case 16: 2137 SubLo = AMDGPU::sub0_sub1_sub2_sub3; 2138 SubHi = AMDGPU::sub4_sub5_sub6_sub7; 2139 NewDstRC = &AMDGPU::VReg_256RegClass; 2140 break; 2141 case 32: 2142 SubLo = AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7; 2143 SubHi = AMDGPU::sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15; 2144 NewDstRC = &AMDGPU::VReg_512RegClass; 2145 break; 2146 default: 2147 llvm_unreachable("Unhandled HalfSize"); 2148 } 2149 2150 unsigned OldDst = MI->getOperand(0).getReg(); 2151 unsigned NewDst = MRI.createVirtualRegister(NewDstRC); 2152 2153 MRI.replaceRegWith(OldDst, NewDst); 2154 2155 BuildMI(*MBB, MI, DL, get(AMDGPU::REG_SEQUENCE), NewDst) 2156 .addReg(RegLo) 2157 .addImm(SubLo) 2158 .addReg(RegHi) 2159 .addImm(SubHi); 2160 } 2161 2162 void SIInstrInfo::moveSMRDToVALU(MachineInstr *MI, 2163 MachineRegisterInfo &MRI, 2164 SmallVectorImpl<MachineInstr *> &Worklist) const { 2165 MachineBasicBlock *MBB = MI->getParent(); 2166 int DstIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst); 2167 assert(DstIdx != -1); 2168 unsigned DstRCID = get(MI->getOpcode()).OpInfo[DstIdx].RegClass; 2169 switch(RI.getRegClass(DstRCID)->getSize()) { 2170 case 4: 2171 case 8: 2172 case 16: { 2173 unsigned NewOpcode = getVALUOp(*MI); 2174 unsigned RegOffset; 2175 unsigned ImmOffset; 2176 2177 if (MI->getOperand(2).isReg()) { 2178 RegOffset = MI->getOperand(2).getReg(); 2179 ImmOffset = 0; 2180 } else { 2181 assert(MI->getOperand(2).isImm()); 2182 // SMRD instructions take a dword offsets on SI and byte offset on VI 2183 // and MUBUF instructions always take a byte offset. 2184 ImmOffset = MI->getOperand(2).getImm(); 2185 if (MBB->getParent()->getSubtarget<AMDGPUSubtarget>().getGeneration() <= 2186 AMDGPUSubtarget::SEA_ISLANDS) 2187 ImmOffset <<= 2; 2188 RegOffset = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2189 2190 if (isUInt<12>(ImmOffset)) { 2191 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), 2192 RegOffset) 2193 .addImm(0); 2194 } else { 2195 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), 2196 RegOffset) 2197 .addImm(ImmOffset); 2198 ImmOffset = 0; 2199 } 2200 } 2201 2202 unsigned SRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass); 2203 unsigned DWord0 = RegOffset; 2204 unsigned DWord1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2205 unsigned DWord2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2206 unsigned DWord3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2207 uint64_t RsrcDataFormat = getDefaultRsrcDataFormat(); 2208 2209 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord1) 2210 .addImm(0); 2211 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord2) 2212 .addImm(RsrcDataFormat & 0xFFFFFFFF); 2213 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord3) 2214 .addImm(RsrcDataFormat >> 32); 2215 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), SRsrc) 2216 .addReg(DWord0) 2217 .addImm(AMDGPU::sub0) 2218 .addReg(DWord1) 2219 .addImm(AMDGPU::sub1) 2220 .addReg(DWord2) 2221 .addImm(AMDGPU::sub2) 2222 .addReg(DWord3) 2223 .addImm(AMDGPU::sub3); 2224 2225 const MCInstrDesc &NewInstDesc = get(NewOpcode); 2226 const TargetRegisterClass *NewDstRC 2227 = RI.getRegClass(NewInstDesc.OpInfo[0].RegClass); 2228 unsigned NewDstReg = MRI.createVirtualRegister(NewDstRC); 2229 unsigned DstReg = MI->getOperand(0).getReg(); 2230 MRI.replaceRegWith(DstReg, NewDstReg); 2231 2232 MachineInstr *NewInst = 2233 BuildMI(*MBB, MI, MI->getDebugLoc(), NewInstDesc, NewDstReg) 2234 .addOperand(MI->getOperand(1)) // sbase 2235 .addReg(SRsrc) 2236 .addImm(0) 2237 .addImm(ImmOffset) 2238 .addImm(0) // glc 2239 .addImm(0) // slc 2240 .addImm(0) // tfe 2241 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 2242 MI->eraseFromParent(); 2243 2244 legalizeOperands(NewInst); 2245 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); 2246 break; 2247 } 2248 case 32: { 2249 MachineInstr *Lo, *Hi; 2250 splitSMRD(MI, &AMDGPU::SReg_128RegClass, AMDGPU::S_LOAD_DWORDX4_IMM, 2251 AMDGPU::S_LOAD_DWORDX4_SGPR, Lo, Hi); 2252 MI->eraseFromParent(); 2253 moveSMRDToVALU(Lo, MRI, Worklist); 2254 moveSMRDToVALU(Hi, MRI, Worklist); 2255 break; 2256 } 2257 2258 case 64: { 2259 MachineInstr *Lo, *Hi; 2260 splitSMRD(MI, &AMDGPU::SReg_256RegClass, AMDGPU::S_LOAD_DWORDX8_IMM, 2261 AMDGPU::S_LOAD_DWORDX8_SGPR, Lo, Hi); 2262 MI->eraseFromParent(); 2263 moveSMRDToVALU(Lo, MRI, Worklist); 2264 moveSMRDToVALU(Hi, MRI, Worklist); 2265 break; 2266 } 2267 } 2268 } 2269 2270 void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const { 2271 SmallVector<MachineInstr *, 128> Worklist; 2272 Worklist.push_back(&TopInst); 2273 2274 while (!Worklist.empty()) { 2275 MachineInstr *Inst = Worklist.pop_back_val(); 2276 MachineBasicBlock *MBB = Inst->getParent(); 2277 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 2278 2279 unsigned Opcode = Inst->getOpcode(); 2280 unsigned NewOpcode = getVALUOp(*Inst); 2281 2282 // Handle some special cases 2283 switch (Opcode) { 2284 default: 2285 if (isSMRD(*Inst)) { 2286 moveSMRDToVALU(Inst, MRI, Worklist); 2287 continue; 2288 } 2289 break; 2290 case AMDGPU::S_AND_B64: 2291 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_AND_B32_e64); 2292 Inst->eraseFromParent(); 2293 continue; 2294 2295 case AMDGPU::S_OR_B64: 2296 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_OR_B32_e64); 2297 Inst->eraseFromParent(); 2298 continue; 2299 2300 case AMDGPU::S_XOR_B64: 2301 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_XOR_B32_e64); 2302 Inst->eraseFromParent(); 2303 continue; 2304 2305 case AMDGPU::S_NOT_B64: 2306 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::V_NOT_B32_e32); 2307 Inst->eraseFromParent(); 2308 continue; 2309 2310 case AMDGPU::S_BCNT1_I32_B64: 2311 splitScalar64BitBCNT(Worklist, Inst); 2312 Inst->eraseFromParent(); 2313 continue; 2314 2315 case AMDGPU::S_BFE_I64: { 2316 splitScalar64BitBFE(Worklist, Inst); 2317 Inst->eraseFromParent(); 2318 continue; 2319 } 2320 2321 case AMDGPU::S_LSHL_B32: 2322 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 2323 NewOpcode = AMDGPU::V_LSHLREV_B32_e64; 2324 swapOperands(Inst); 2325 } 2326 break; 2327 case AMDGPU::S_ASHR_I32: 2328 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 2329 NewOpcode = AMDGPU::V_ASHRREV_I32_e64; 2330 swapOperands(Inst); 2331 } 2332 break; 2333 case AMDGPU::S_LSHR_B32: 2334 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 2335 NewOpcode = AMDGPU::V_LSHRREV_B32_e64; 2336 swapOperands(Inst); 2337 } 2338 break; 2339 case AMDGPU::S_LSHL_B64: 2340 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 2341 NewOpcode = AMDGPU::V_LSHLREV_B64; 2342 swapOperands(Inst); 2343 } 2344 break; 2345 case AMDGPU::S_ASHR_I64: 2346 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 2347 NewOpcode = AMDGPU::V_ASHRREV_I64; 2348 swapOperands(Inst); 2349 } 2350 break; 2351 case AMDGPU::S_LSHR_B64: 2352 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 2353 NewOpcode = AMDGPU::V_LSHRREV_B64; 2354 swapOperands(Inst); 2355 } 2356 break; 2357 2358 case AMDGPU::S_BFE_U64: 2359 case AMDGPU::S_BFM_B64: 2360 llvm_unreachable("Moving this op to VALU not implemented"); 2361 } 2362 2363 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) { 2364 // We cannot move this instruction to the VALU, so we should try to 2365 // legalize its operands instead. 2366 legalizeOperands(Inst); 2367 continue; 2368 } 2369 2370 // Use the new VALU Opcode. 2371 const MCInstrDesc &NewDesc = get(NewOpcode); 2372 Inst->setDesc(NewDesc); 2373 2374 // Remove any references to SCC. Vector instructions can't read from it, and 2375 // We're just about to add the implicit use / defs of VCC, and we don't want 2376 // both. 2377 for (unsigned i = Inst->getNumOperands() - 1; i > 0; --i) { 2378 MachineOperand &Op = Inst->getOperand(i); 2379 if (Op.isReg() && Op.getReg() == AMDGPU::SCC) 2380 Inst->RemoveOperand(i); 2381 } 2382 2383 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) { 2384 // We are converting these to a BFE, so we need to add the missing 2385 // operands for the size and offset. 2386 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16; 2387 Inst->addOperand(MachineOperand::CreateImm(0)); 2388 Inst->addOperand(MachineOperand::CreateImm(Size)); 2389 2390 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) { 2391 // The VALU version adds the second operand to the result, so insert an 2392 // extra 0 operand. 2393 Inst->addOperand(MachineOperand::CreateImm(0)); 2394 } 2395 2396 Inst->addImplicitDefUseOperands(*Inst->getParent()->getParent()); 2397 2398 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) { 2399 const MachineOperand &OffsetWidthOp = Inst->getOperand(2); 2400 // If we need to move this to VGPRs, we need to unpack the second operand 2401 // back into the 2 separate ones for bit offset and width. 2402 assert(OffsetWidthOp.isImm() && 2403 "Scalar BFE is only implemented for constant width and offset"); 2404 uint32_t Imm = OffsetWidthOp.getImm(); 2405 2406 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 2407 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 2408 Inst->RemoveOperand(2); // Remove old immediate. 2409 Inst->addOperand(MachineOperand::CreateImm(Offset)); 2410 Inst->addOperand(MachineOperand::CreateImm(BitWidth)); 2411 } 2412 2413 // Update the destination register class. 2414 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(*Inst); 2415 if (!NewDstRC) 2416 continue; 2417 2418 unsigned DstReg = Inst->getOperand(0).getReg(); 2419 unsigned NewDstReg = MRI.createVirtualRegister(NewDstRC); 2420 MRI.replaceRegWith(DstReg, NewDstReg); 2421 2422 // Legalize the operands 2423 legalizeOperands(Inst); 2424 2425 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); 2426 } 2427 } 2428 2429 //===----------------------------------------------------------------------===// 2430 // Indirect addressing callbacks 2431 //===----------------------------------------------------------------------===// 2432 2433 unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex, 2434 unsigned Channel) const { 2435 assert(Channel == 0); 2436 return RegIndex; 2437 } 2438 2439 const TargetRegisterClass *SIInstrInfo::getIndirectAddrRegClass() const { 2440 return &AMDGPU::VGPR_32RegClass; 2441 } 2442 2443 void SIInstrInfo::splitScalar64BitUnaryOp( 2444 SmallVectorImpl<MachineInstr *> &Worklist, 2445 MachineInstr *Inst, 2446 unsigned Opcode) const { 2447 MachineBasicBlock &MBB = *Inst->getParent(); 2448 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2449 2450 MachineOperand &Dest = Inst->getOperand(0); 2451 MachineOperand &Src0 = Inst->getOperand(1); 2452 DebugLoc DL = Inst->getDebugLoc(); 2453 2454 MachineBasicBlock::iterator MII = Inst; 2455 2456 const MCInstrDesc &InstDesc = get(Opcode); 2457 const TargetRegisterClass *Src0RC = Src0.isReg() ? 2458 MRI.getRegClass(Src0.getReg()) : 2459 &AMDGPU::SGPR_32RegClass; 2460 2461 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 2462 2463 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 2464 AMDGPU::sub0, Src0SubRC); 2465 2466 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 2467 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 2468 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 2469 2470 unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 2471 BuildMI(MBB, MII, DL, InstDesc, DestSub0) 2472 .addOperand(SrcReg0Sub0); 2473 2474 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 2475 AMDGPU::sub1, Src0SubRC); 2476 2477 unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 2478 BuildMI(MBB, MII, DL, InstDesc, DestSub1) 2479 .addOperand(SrcReg0Sub1); 2480 2481 unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); 2482 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 2483 .addReg(DestSub0) 2484 .addImm(AMDGPU::sub0) 2485 .addReg(DestSub1) 2486 .addImm(AMDGPU::sub1); 2487 2488 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 2489 2490 // We don't need to legalizeOperands here because for a single operand, src0 2491 // will support any kind of input. 2492 2493 // Move all users of this moved value. 2494 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 2495 } 2496 2497 void SIInstrInfo::splitScalar64BitBinaryOp( 2498 SmallVectorImpl<MachineInstr *> &Worklist, 2499 MachineInstr *Inst, 2500 unsigned Opcode) const { 2501 MachineBasicBlock &MBB = *Inst->getParent(); 2502 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2503 2504 MachineOperand &Dest = Inst->getOperand(0); 2505 MachineOperand &Src0 = Inst->getOperand(1); 2506 MachineOperand &Src1 = Inst->getOperand(2); 2507 DebugLoc DL = Inst->getDebugLoc(); 2508 2509 MachineBasicBlock::iterator MII = Inst; 2510 2511 const MCInstrDesc &InstDesc = get(Opcode); 2512 const TargetRegisterClass *Src0RC = Src0.isReg() ? 2513 MRI.getRegClass(Src0.getReg()) : 2514 &AMDGPU::SGPR_32RegClass; 2515 2516 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 2517 const TargetRegisterClass *Src1RC = Src1.isReg() ? 2518 MRI.getRegClass(Src1.getReg()) : 2519 &AMDGPU::SGPR_32RegClass; 2520 2521 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 2522 2523 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 2524 AMDGPU::sub0, Src0SubRC); 2525 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 2526 AMDGPU::sub0, Src1SubRC); 2527 2528 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 2529 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 2530 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 2531 2532 unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 2533 MachineInstr *LoHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub0) 2534 .addOperand(SrcReg0Sub0) 2535 .addOperand(SrcReg1Sub0); 2536 2537 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 2538 AMDGPU::sub1, Src0SubRC); 2539 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 2540 AMDGPU::sub1, Src1SubRC); 2541 2542 unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 2543 MachineInstr *HiHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub1) 2544 .addOperand(SrcReg0Sub1) 2545 .addOperand(SrcReg1Sub1); 2546 2547 unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); 2548 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 2549 .addReg(DestSub0) 2550 .addImm(AMDGPU::sub0) 2551 .addReg(DestSub1) 2552 .addImm(AMDGPU::sub1); 2553 2554 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 2555 2556 // Try to legalize the operands in case we need to swap the order to keep it 2557 // valid. 2558 legalizeOperands(LoHalf); 2559 legalizeOperands(HiHalf); 2560 2561 // Move all users of this moved vlaue. 2562 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 2563 } 2564 2565 void SIInstrInfo::splitScalar64BitBCNT(SmallVectorImpl<MachineInstr *> &Worklist, 2566 MachineInstr *Inst) const { 2567 MachineBasicBlock &MBB = *Inst->getParent(); 2568 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2569 2570 MachineBasicBlock::iterator MII = Inst; 2571 DebugLoc DL = Inst->getDebugLoc(); 2572 2573 MachineOperand &Dest = Inst->getOperand(0); 2574 MachineOperand &Src = Inst->getOperand(1); 2575 2576 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64); 2577 const TargetRegisterClass *SrcRC = Src.isReg() ? 2578 MRI.getRegClass(Src.getReg()) : 2579 &AMDGPU::SGPR_32RegClass; 2580 2581 unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2582 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2583 2584 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); 2585 2586 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 2587 AMDGPU::sub0, SrcSubRC); 2588 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 2589 AMDGPU::sub1, SrcSubRC); 2590 2591 BuildMI(MBB, MII, DL, InstDesc, MidReg) 2592 .addOperand(SrcRegSub0) 2593 .addImm(0); 2594 2595 BuildMI(MBB, MII, DL, InstDesc, ResultReg) 2596 .addOperand(SrcRegSub1) 2597 .addReg(MidReg); 2598 2599 MRI.replaceRegWith(Dest.getReg(), ResultReg); 2600 2601 // We don't need to legalize operands here. src0 for etiher instruction can be 2602 // an SGPR, and the second input is unused or determined here. 2603 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 2604 } 2605 2606 void SIInstrInfo::splitScalar64BitBFE(SmallVectorImpl<MachineInstr *> &Worklist, 2607 MachineInstr *Inst) const { 2608 MachineBasicBlock &MBB = *Inst->getParent(); 2609 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2610 MachineBasicBlock::iterator MII = Inst; 2611 DebugLoc DL = Inst->getDebugLoc(); 2612 2613 MachineOperand &Dest = Inst->getOperand(0); 2614 uint32_t Imm = Inst->getOperand(2).getImm(); 2615 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 2616 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 2617 2618 (void) Offset; 2619 2620 // Only sext_inreg cases handled. 2621 assert(Inst->getOpcode() == AMDGPU::S_BFE_I64 && 2622 BitWidth <= 32 && 2623 Offset == 0 && 2624 "Not implemented"); 2625 2626 if (BitWidth < 32) { 2627 unsigned MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2628 unsigned MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2629 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 2630 2631 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo) 2632 .addReg(Inst->getOperand(1).getReg(), 0, AMDGPU::sub0) 2633 .addImm(0) 2634 .addImm(BitWidth); 2635 2636 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi) 2637 .addImm(31) 2638 .addReg(MidRegLo); 2639 2640 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 2641 .addReg(MidRegLo) 2642 .addImm(AMDGPU::sub0) 2643 .addReg(MidRegHi) 2644 .addImm(AMDGPU::sub1); 2645 2646 MRI.replaceRegWith(Dest.getReg(), ResultReg); 2647 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 2648 return; 2649 } 2650 2651 MachineOperand &Src = Inst->getOperand(1); 2652 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2653 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 2654 2655 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg) 2656 .addImm(31) 2657 .addReg(Src.getReg(), 0, AMDGPU::sub0); 2658 2659 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 2660 .addReg(Src.getReg(), 0, AMDGPU::sub0) 2661 .addImm(AMDGPU::sub0) 2662 .addReg(TmpReg) 2663 .addImm(AMDGPU::sub1); 2664 2665 MRI.replaceRegWith(Dest.getReg(), ResultReg); 2666 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 2667 } 2668 2669 void SIInstrInfo::addUsersToMoveToVALUWorklist( 2670 unsigned DstReg, 2671 MachineRegisterInfo &MRI, 2672 SmallVectorImpl<MachineInstr *> &Worklist) const { 2673 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg), 2674 E = MRI.use_end(); I != E; ++I) { 2675 MachineInstr &UseMI = *I->getParent(); 2676 if (!canReadVGPR(UseMI, I.getOperandNo())) { 2677 Worklist.push_back(&UseMI); 2678 } 2679 } 2680 } 2681 2682 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass( 2683 const MachineInstr &Inst) const { 2684 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0); 2685 2686 switch (Inst.getOpcode()) { 2687 // For target instructions, getOpRegClass just returns the virtual register 2688 // class associated with the operand, so we need to find an equivalent VGPR 2689 // register class in order to move the instruction to the VALU. 2690 case AMDGPU::COPY: 2691 case AMDGPU::PHI: 2692 case AMDGPU::REG_SEQUENCE: 2693 case AMDGPU::INSERT_SUBREG: 2694 if (RI.hasVGPRs(NewDstRC)) 2695 return nullptr; 2696 2697 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 2698 if (!NewDstRC) 2699 return nullptr; 2700 return NewDstRC; 2701 default: 2702 return NewDstRC; 2703 } 2704 } 2705 2706 // Find the one SGPR operand we are allowed to use. 2707 unsigned SIInstrInfo::findUsedSGPR(const MachineInstr *MI, 2708 int OpIndices[3]) const { 2709 const MCInstrDesc &Desc = MI->getDesc(); 2710 2711 // Find the one SGPR operand we are allowed to use. 2712 // 2713 // First we need to consider the instruction's operand requirements before 2714 // legalizing. Some operands are required to be SGPRs, such as implicit uses 2715 // of VCC, but we are still bound by the constant bus requirement to only use 2716 // one. 2717 // 2718 // If the operand's class is an SGPR, we can never move it. 2719 2720 unsigned SGPRReg = findImplicitSGPRRead(*MI); 2721 if (SGPRReg != AMDGPU::NoRegister) 2722 return SGPRReg; 2723 2724 unsigned UsedSGPRs[3] = { AMDGPU::NoRegister }; 2725 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 2726 2727 for (unsigned i = 0; i < 3; ++i) { 2728 int Idx = OpIndices[i]; 2729 if (Idx == -1) 2730 break; 2731 2732 const MachineOperand &MO = MI->getOperand(Idx); 2733 if (!MO.isReg()) 2734 continue; 2735 2736 // Is this operand statically required to be an SGPR based on the operand 2737 // constraints? 2738 const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); 2739 bool IsRequiredSGPR = RI.isSGPRClass(OpRC); 2740 if (IsRequiredSGPR) 2741 return MO.getReg(); 2742 2743 // If this could be a VGPR or an SGPR, Check the dynamic register class. 2744 unsigned Reg = MO.getReg(); 2745 const TargetRegisterClass *RegRC = MRI.getRegClass(Reg); 2746 if (RI.isSGPRClass(RegRC)) 2747 UsedSGPRs[i] = Reg; 2748 } 2749 2750 // We don't have a required SGPR operand, so we have a bit more freedom in 2751 // selecting operands to move. 2752 2753 // Try to select the most used SGPR. If an SGPR is equal to one of the 2754 // others, we choose that. 2755 // 2756 // e.g. 2757 // V_FMA_F32 v0, s0, s0, s0 -> No moves 2758 // V_FMA_F32 v0, s0, s1, s0 -> Move s1 2759 2760 // TODO: If some of the operands are 64-bit SGPRs and some 32, we should 2761 // prefer those. 2762 2763 if (UsedSGPRs[0] != AMDGPU::NoRegister) { 2764 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2]) 2765 SGPRReg = UsedSGPRs[0]; 2766 } 2767 2768 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) { 2769 if (UsedSGPRs[1] == UsedSGPRs[2]) 2770 SGPRReg = UsedSGPRs[1]; 2771 } 2772 2773 return SGPRReg; 2774 } 2775 2776 MachineInstrBuilder SIInstrInfo::buildIndirectWrite( 2777 MachineBasicBlock *MBB, 2778 MachineBasicBlock::iterator I, 2779 unsigned ValueReg, 2780 unsigned Address, unsigned OffsetReg) const { 2781 const DebugLoc &DL = MBB->findDebugLoc(I); 2782 unsigned IndirectBaseReg = AMDGPU::VGPR_32RegClass.getRegister( 2783 getIndirectIndexBegin(*MBB->getParent())); 2784 2785 return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_DST_V1)) 2786 .addReg(IndirectBaseReg, RegState::Define) 2787 .addOperand(I->getOperand(0)) 2788 .addReg(IndirectBaseReg) 2789 .addReg(OffsetReg) 2790 .addImm(0) 2791 .addReg(ValueReg); 2792 } 2793 2794 MachineInstrBuilder SIInstrInfo::buildIndirectRead( 2795 MachineBasicBlock *MBB, 2796 MachineBasicBlock::iterator I, 2797 unsigned ValueReg, 2798 unsigned Address, unsigned OffsetReg) const { 2799 const DebugLoc &DL = MBB->findDebugLoc(I); 2800 unsigned IndirectBaseReg = AMDGPU::VGPR_32RegClass.getRegister( 2801 getIndirectIndexBegin(*MBB->getParent())); 2802 2803 return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_SRC_V1)) 2804 .addOperand(I->getOperand(0)) 2805 .addOperand(I->getOperand(1)) 2806 .addReg(IndirectBaseReg) 2807 .addReg(OffsetReg) 2808 .addImm(0); 2809 2810 } 2811 2812 void SIInstrInfo::reserveIndirectRegisters(BitVector &Reserved, 2813 const MachineFunction &MF) const { 2814 int End = getIndirectIndexEnd(MF); 2815 int Begin = getIndirectIndexBegin(MF); 2816 2817 if (End == -1) 2818 return; 2819 2820 2821 for (int Index = Begin; Index <= End; ++Index) 2822 Reserved.set(AMDGPU::VGPR_32RegClass.getRegister(Index)); 2823 2824 for (int Index = std::max(0, Begin - 1); Index <= End; ++Index) 2825 Reserved.set(AMDGPU::VReg_64RegClass.getRegister(Index)); 2826 2827 for (int Index = std::max(0, Begin - 2); Index <= End; ++Index) 2828 Reserved.set(AMDGPU::VReg_96RegClass.getRegister(Index)); 2829 2830 for (int Index = std::max(0, Begin - 3); Index <= End; ++Index) 2831 Reserved.set(AMDGPU::VReg_128RegClass.getRegister(Index)); 2832 2833 for (int Index = std::max(0, Begin - 7); Index <= End; ++Index) 2834 Reserved.set(AMDGPU::VReg_256RegClass.getRegister(Index)); 2835 2836 for (int Index = std::max(0, Begin - 15); Index <= End; ++Index) 2837 Reserved.set(AMDGPU::VReg_512RegClass.getRegister(Index)); 2838 } 2839 2840 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI, 2841 unsigned OperandName) const { 2842 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName); 2843 if (Idx == -1) 2844 return nullptr; 2845 2846 return &MI.getOperand(Idx); 2847 } 2848 2849 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const { 2850 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT; 2851 if (ST.isAmdHsaOS()) { 2852 RsrcDataFormat |= (1ULL << 56); 2853 2854 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 2855 // Set MTYPE = 2 2856 RsrcDataFormat |= (2ULL << 59); 2857 } 2858 2859 return RsrcDataFormat; 2860 } 2861 2862 uint64_t SIInstrInfo::getScratchRsrcWords23() const { 2863 uint64_t Rsrc23 = getDefaultRsrcDataFormat() | 2864 AMDGPU::RSRC_TID_ENABLE | 2865 0xffffffff; // Size; 2866 2867 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17]. 2868 // Clear them unless we want a huge stride. 2869 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 2870 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT; 2871 2872 return Rsrc23; 2873 } 2874