1 //===-- SIInstrInfo.cpp - SI Instruction Information ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief SI Implementation of TargetInstrInfo. 12 // 13 //===----------------------------------------------------------------------===// 14 15 16 #include "SIInstrInfo.h" 17 #include "AMDGPUTargetMachine.h" 18 #include "SIDefines.h" 19 #include "SIMachineFunctionInfo.h" 20 #include "llvm/CodeGen/MachineFrameInfo.h" 21 #include "llvm/CodeGen/MachineInstrBuilder.h" 22 #include "llvm/CodeGen/MachineRegisterInfo.h" 23 #include "llvm/IR/Function.h" 24 #include "llvm/CodeGen/RegisterScavenging.h" 25 #include "llvm/MC/MCInstrDesc.h" 26 #include "llvm/Support/Debug.h" 27 28 using namespace llvm; 29 30 SIInstrInfo::SIInstrInfo(const AMDGPUSubtarget &st) 31 : AMDGPUInstrInfo(st), RI() {} 32 33 //===----------------------------------------------------------------------===// 34 // TargetInstrInfo callbacks 35 //===----------------------------------------------------------------------===// 36 37 static unsigned getNumOperandsNoGlue(SDNode *Node) { 38 unsigned N = Node->getNumOperands(); 39 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) 40 --N; 41 return N; 42 } 43 44 static SDValue findChainOperand(SDNode *Load) { 45 SDValue LastOp = Load->getOperand(getNumOperandsNoGlue(Load) - 1); 46 assert(LastOp.getValueType() == MVT::Other && "Chain missing from load node"); 47 return LastOp; 48 } 49 50 /// \brief Returns true if both nodes have the same value for the given 51 /// operand \p Op, or if both nodes do not have this operand. 52 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { 53 unsigned Opc0 = N0->getMachineOpcode(); 54 unsigned Opc1 = N1->getMachineOpcode(); 55 56 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); 57 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); 58 59 if (Op0Idx == -1 && Op1Idx == -1) 60 return true; 61 62 63 if ((Op0Idx == -1 && Op1Idx != -1) || 64 (Op1Idx == -1 && Op0Idx != -1)) 65 return false; 66 67 // getNamedOperandIdx returns the index for the MachineInstr's operands, 68 // which includes the result as the first operand. We are indexing into the 69 // MachineSDNode's operands, so we need to skip the result operand to get 70 // the real index. 71 --Op0Idx; 72 --Op1Idx; 73 74 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx); 75 } 76 77 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI, 78 AliasAnalysis *AA) const { 79 // TODO: The generic check fails for VALU instructions that should be 80 // rematerializable due to implicit reads of exec. We really want all of the 81 // generic logic for this except for this. 82 switch (MI->getOpcode()) { 83 case AMDGPU::V_MOV_B32_e32: 84 case AMDGPU::V_MOV_B32_e64: 85 case AMDGPU::V_MOV_B64_PSEUDO: 86 return true; 87 default: 88 return false; 89 } 90 } 91 92 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, 93 int64_t &Offset0, 94 int64_t &Offset1) const { 95 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode()) 96 return false; 97 98 unsigned Opc0 = Load0->getMachineOpcode(); 99 unsigned Opc1 = Load1->getMachineOpcode(); 100 101 // Make sure both are actually loads. 102 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad()) 103 return false; 104 105 if (isDS(Opc0) && isDS(Opc1)) { 106 107 // FIXME: Handle this case: 108 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1)) 109 return false; 110 111 // Check base reg. 112 if (Load0->getOperand(1) != Load1->getOperand(1)) 113 return false; 114 115 // Check chain. 116 if (findChainOperand(Load0) != findChainOperand(Load1)) 117 return false; 118 119 // Skip read2 / write2 variants for simplicity. 120 // TODO: We should report true if the used offsets are adjacent (excluded 121 // st64 versions). 122 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::data1) != -1 || 123 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::data1) != -1) 124 return false; 125 126 Offset0 = cast<ConstantSDNode>(Load0->getOperand(2))->getZExtValue(); 127 Offset1 = cast<ConstantSDNode>(Load1->getOperand(2))->getZExtValue(); 128 return true; 129 } 130 131 if (isSMRD(Opc0) && isSMRD(Opc1)) { 132 assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1)); 133 134 // Check base reg. 135 if (Load0->getOperand(0) != Load1->getOperand(0)) 136 return false; 137 138 const ConstantSDNode *Load0Offset = 139 dyn_cast<ConstantSDNode>(Load0->getOperand(1)); 140 const ConstantSDNode *Load1Offset = 141 dyn_cast<ConstantSDNode>(Load1->getOperand(1)); 142 143 if (!Load0Offset || !Load1Offset) 144 return false; 145 146 // Check chain. 147 if (findChainOperand(Load0) != findChainOperand(Load1)) 148 return false; 149 150 Offset0 = Load0Offset->getZExtValue(); 151 Offset1 = Load1Offset->getZExtValue(); 152 return true; 153 } 154 155 // MUBUF and MTBUF can access the same addresses. 156 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) { 157 158 // MUBUF and MTBUF have vaddr at different indices. 159 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || 160 findChainOperand(Load0) != findChainOperand(Load1) || 161 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || 162 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) 163 return false; 164 165 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 166 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 167 168 if (OffIdx0 == -1 || OffIdx1 == -1) 169 return false; 170 171 // getNamedOperandIdx returns the index for MachineInstrs. Since they 172 // inlcude the output in the operand list, but SDNodes don't, we need to 173 // subtract the index by one. 174 --OffIdx0; 175 --OffIdx1; 176 177 SDValue Off0 = Load0->getOperand(OffIdx0); 178 SDValue Off1 = Load1->getOperand(OffIdx1); 179 180 // The offset might be a FrameIndexSDNode. 181 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1)) 182 return false; 183 184 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue(); 185 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue(); 186 return true; 187 } 188 189 return false; 190 } 191 192 static bool isStride64(unsigned Opc) { 193 switch (Opc) { 194 case AMDGPU::DS_READ2ST64_B32: 195 case AMDGPU::DS_READ2ST64_B64: 196 case AMDGPU::DS_WRITE2ST64_B32: 197 case AMDGPU::DS_WRITE2ST64_B64: 198 return true; 199 default: 200 return false; 201 } 202 } 203 204 bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg, 205 unsigned &Offset, 206 const TargetRegisterInfo *TRI) const { 207 unsigned Opc = LdSt->getOpcode(); 208 209 if (isDS(*LdSt)) { 210 const MachineOperand *OffsetImm = getNamedOperand(*LdSt, 211 AMDGPU::OpName::offset); 212 if (OffsetImm) { 213 // Normal, single offset LDS instruction. 214 const MachineOperand *AddrReg = getNamedOperand(*LdSt, 215 AMDGPU::OpName::addr); 216 217 BaseReg = AddrReg->getReg(); 218 Offset = OffsetImm->getImm(); 219 return true; 220 } 221 222 // The 2 offset instructions use offset0 and offset1 instead. We can treat 223 // these as a load with a single offset if the 2 offsets are consecutive. We 224 // will use this for some partially aligned loads. 225 const MachineOperand *Offset0Imm = getNamedOperand(*LdSt, 226 AMDGPU::OpName::offset0); 227 const MachineOperand *Offset1Imm = getNamedOperand(*LdSt, 228 AMDGPU::OpName::offset1); 229 230 uint8_t Offset0 = Offset0Imm->getImm(); 231 uint8_t Offset1 = Offset1Imm->getImm(); 232 233 if (Offset1 > Offset0 && Offset1 - Offset0 == 1) { 234 // Each of these offsets is in element sized units, so we need to convert 235 // to bytes of the individual reads. 236 237 unsigned EltSize; 238 if (LdSt->mayLoad()) 239 EltSize = getOpRegClass(*LdSt, 0)->getSize() / 2; 240 else { 241 assert(LdSt->mayStore()); 242 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 243 EltSize = getOpRegClass(*LdSt, Data0Idx)->getSize(); 244 } 245 246 if (isStride64(Opc)) 247 EltSize *= 64; 248 249 const MachineOperand *AddrReg = getNamedOperand(*LdSt, 250 AMDGPU::OpName::addr); 251 BaseReg = AddrReg->getReg(); 252 Offset = EltSize * Offset0; 253 return true; 254 } 255 256 return false; 257 } 258 259 if (isMUBUF(*LdSt) || isMTBUF(*LdSt)) { 260 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::soffset) != -1) 261 return false; 262 263 const MachineOperand *AddrReg = getNamedOperand(*LdSt, 264 AMDGPU::OpName::vaddr); 265 if (!AddrReg) 266 return false; 267 268 const MachineOperand *OffsetImm = getNamedOperand(*LdSt, 269 AMDGPU::OpName::offset); 270 BaseReg = AddrReg->getReg(); 271 Offset = OffsetImm->getImm(); 272 return true; 273 } 274 275 if (isSMRD(*LdSt)) { 276 const MachineOperand *OffsetImm = getNamedOperand(*LdSt, 277 AMDGPU::OpName::offset); 278 if (!OffsetImm) 279 return false; 280 281 const MachineOperand *SBaseReg = getNamedOperand(*LdSt, 282 AMDGPU::OpName::sbase); 283 BaseReg = SBaseReg->getReg(); 284 Offset = OffsetImm->getImm(); 285 return true; 286 } 287 288 return false; 289 } 290 291 bool SIInstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt, 292 MachineInstr *SecondLdSt, 293 unsigned NumLoads) const { 294 // TODO: This needs finer tuning 295 if (NumLoads > 4) 296 return false; 297 298 if (isDS(*FirstLdSt) && isDS(*SecondLdSt)) 299 return true; 300 301 if (isSMRD(*FirstLdSt) && isSMRD(*SecondLdSt)) 302 return true; 303 304 if ((isMUBUF(*FirstLdSt) || isMTBUF(*FirstLdSt)) && 305 (isMUBUF(*SecondLdSt) || isMTBUF(*SecondLdSt))) 306 return true; 307 308 return false; 309 } 310 311 void 312 SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 313 MachineBasicBlock::iterator MI, DebugLoc DL, 314 unsigned DestReg, unsigned SrcReg, 315 bool KillSrc) const { 316 317 // If we are trying to copy to or from SCC, there is a bug somewhere else in 318 // the backend. While it may be theoretically possible to do this, it should 319 // never be necessary. 320 assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC); 321 322 static const int16_t Sub0_15[] = { 323 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 324 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 325 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, 326 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 327 }; 328 329 static const int16_t Sub0_15_64[] = { 330 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, 331 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7, 332 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11, 333 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15, 334 }; 335 336 static const int16_t Sub0_7[] = { 337 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 338 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 339 }; 340 341 static const int16_t Sub0_7_64[] = { 342 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, 343 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7, 344 }; 345 346 static const int16_t Sub0_3[] = { 347 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 348 }; 349 350 static const int16_t Sub0_3_64[] = { 351 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, 352 }; 353 354 static const int16_t Sub0_2[] = { 355 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, 356 }; 357 358 static const int16_t Sub0_1[] = { 359 AMDGPU::sub0, AMDGPU::sub1, 360 }; 361 362 unsigned Opcode; 363 ArrayRef<int16_t> SubIndices; 364 bool Forward; 365 366 if (AMDGPU::SReg_32RegClass.contains(DestReg)) { 367 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 368 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 369 .addReg(SrcReg, getKillRegState(KillSrc)); 370 return; 371 372 } else if (AMDGPU::SReg_64RegClass.contains(DestReg)) { 373 if (DestReg == AMDGPU::VCC) { 374 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 375 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC) 376 .addReg(SrcReg, getKillRegState(KillSrc)); 377 } else { 378 // FIXME: Hack until VReg_1 removed. 379 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 380 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_I32_e32)) 381 .addImm(0) 382 .addReg(SrcReg, getKillRegState(KillSrc)); 383 } 384 385 return; 386 } 387 388 assert(AMDGPU::SReg_64RegClass.contains(SrcReg)); 389 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 390 .addReg(SrcReg, getKillRegState(KillSrc)); 391 return; 392 393 } else if (AMDGPU::SReg_128RegClass.contains(DestReg)) { 394 assert(AMDGPU::SReg_128RegClass.contains(SrcReg)); 395 Opcode = AMDGPU::S_MOV_B64; 396 SubIndices = Sub0_3_64; 397 398 } else if (AMDGPU::SReg_256RegClass.contains(DestReg)) { 399 assert(AMDGPU::SReg_256RegClass.contains(SrcReg)); 400 Opcode = AMDGPU::S_MOV_B64; 401 SubIndices = Sub0_7_64; 402 403 } else if (AMDGPU::SReg_512RegClass.contains(DestReg)) { 404 assert(AMDGPU::SReg_512RegClass.contains(SrcReg)); 405 Opcode = AMDGPU::S_MOV_B64; 406 SubIndices = Sub0_15_64; 407 408 } else if (AMDGPU::VGPR_32RegClass.contains(DestReg)) { 409 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || 410 AMDGPU::SReg_32RegClass.contains(SrcReg)); 411 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) 412 .addReg(SrcReg, getKillRegState(KillSrc)); 413 return; 414 415 } else if (AMDGPU::VReg_64RegClass.contains(DestReg)) { 416 assert(AMDGPU::VReg_64RegClass.contains(SrcReg) || 417 AMDGPU::SReg_64RegClass.contains(SrcReg)); 418 Opcode = AMDGPU::V_MOV_B32_e32; 419 SubIndices = Sub0_1; 420 421 } else if (AMDGPU::VReg_96RegClass.contains(DestReg)) { 422 assert(AMDGPU::VReg_96RegClass.contains(SrcReg)); 423 Opcode = AMDGPU::V_MOV_B32_e32; 424 SubIndices = Sub0_2; 425 426 } else if (AMDGPU::VReg_128RegClass.contains(DestReg)) { 427 assert(AMDGPU::VReg_128RegClass.contains(SrcReg) || 428 AMDGPU::SReg_128RegClass.contains(SrcReg)); 429 Opcode = AMDGPU::V_MOV_B32_e32; 430 SubIndices = Sub0_3; 431 432 } else if (AMDGPU::VReg_256RegClass.contains(DestReg)) { 433 assert(AMDGPU::VReg_256RegClass.contains(SrcReg) || 434 AMDGPU::SReg_256RegClass.contains(SrcReg)); 435 Opcode = AMDGPU::V_MOV_B32_e32; 436 SubIndices = Sub0_7; 437 438 } else if (AMDGPU::VReg_512RegClass.contains(DestReg)) { 439 assert(AMDGPU::VReg_512RegClass.contains(SrcReg) || 440 AMDGPU::SReg_512RegClass.contains(SrcReg)); 441 Opcode = AMDGPU::V_MOV_B32_e32; 442 SubIndices = Sub0_15; 443 444 } else { 445 llvm_unreachable("Can't copy register!"); 446 } 447 448 if (RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg)) 449 Forward = true; 450 else 451 Forward = false; 452 453 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 454 unsigned SubIdx; 455 if (Forward) 456 SubIdx = SubIndices[Idx]; 457 else 458 SubIdx = SubIndices[SubIndices.size() - Idx - 1]; 459 460 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 461 get(Opcode), RI.getSubReg(DestReg, SubIdx)); 462 463 Builder.addReg(RI.getSubReg(SrcReg, SubIdx)); 464 465 if (Idx == SubIndices.size() - 1) 466 Builder.addReg(SrcReg, RegState::Kill | RegState::Implicit); 467 468 if (Idx == 0) 469 Builder.addReg(DestReg, RegState::Define | RegState::Implicit); 470 } 471 } 472 473 int SIInstrInfo::commuteOpcode(const MachineInstr &MI) const { 474 const unsigned Opcode = MI.getOpcode(); 475 476 int NewOpc; 477 478 // Try to map original to commuted opcode 479 NewOpc = AMDGPU::getCommuteRev(Opcode); 480 if (NewOpc != -1) 481 // Check if the commuted (REV) opcode exists on the target. 482 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 483 484 // Try to map commuted to original opcode 485 NewOpc = AMDGPU::getCommuteOrig(Opcode); 486 if (NewOpc != -1) 487 // Check if the original (non-REV) opcode exists on the target. 488 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 489 490 return Opcode; 491 } 492 493 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const { 494 495 if (DstRC->getSize() == 4) { 496 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 497 } else if (DstRC->getSize() == 8 && RI.isSGPRClass(DstRC)) { 498 return AMDGPU::S_MOV_B64; 499 } else if (DstRC->getSize() == 8 && !RI.isSGPRClass(DstRC)) { 500 return AMDGPU::V_MOV_B64_PSEUDO; 501 } 502 return AMDGPU::COPY; 503 } 504 505 static unsigned getSGPRSpillSaveOpcode(unsigned Size) { 506 switch (Size) { 507 case 4: 508 return AMDGPU::SI_SPILL_S32_SAVE; 509 case 8: 510 return AMDGPU::SI_SPILL_S64_SAVE; 511 case 16: 512 return AMDGPU::SI_SPILL_S128_SAVE; 513 case 32: 514 return AMDGPU::SI_SPILL_S256_SAVE; 515 case 64: 516 return AMDGPU::SI_SPILL_S512_SAVE; 517 default: 518 llvm_unreachable("unknown register size"); 519 } 520 } 521 522 static unsigned getVGPRSpillSaveOpcode(unsigned Size) { 523 switch (Size) { 524 case 4: 525 return AMDGPU::SI_SPILL_V32_SAVE; 526 case 8: 527 return AMDGPU::SI_SPILL_V64_SAVE; 528 case 16: 529 return AMDGPU::SI_SPILL_V128_SAVE; 530 case 32: 531 return AMDGPU::SI_SPILL_V256_SAVE; 532 case 64: 533 return AMDGPU::SI_SPILL_V512_SAVE; 534 default: 535 llvm_unreachable("unknown register size"); 536 } 537 } 538 539 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 540 MachineBasicBlock::iterator MI, 541 unsigned SrcReg, bool isKill, 542 int FrameIndex, 543 const TargetRegisterClass *RC, 544 const TargetRegisterInfo *TRI) const { 545 MachineFunction *MF = MBB.getParent(); 546 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 547 MachineFrameInfo *FrameInfo = MF->getFrameInfo(); 548 DebugLoc DL = MBB.findDebugLoc(MI); 549 550 unsigned Size = FrameInfo->getObjectSize(FrameIndex); 551 unsigned Align = FrameInfo->getObjectAlignment(FrameIndex); 552 MachinePointerInfo PtrInfo 553 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 554 MachineMemOperand *MMO 555 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, 556 Size, Align); 557 558 if (RI.isSGPRClass(RC)) { 559 MFI->setHasSpilledSGPRs(); 560 561 // We are only allowed to create one new instruction when spilling 562 // registers, so we need to use pseudo instruction for spilling 563 // SGPRs. 564 unsigned Opcode = getSGPRSpillSaveOpcode(RC->getSize()); 565 BuildMI(MBB, MI, DL, get(Opcode)) 566 .addReg(SrcReg) // src 567 .addFrameIndex(FrameIndex) // frame_idx 568 .addMemOperand(MMO); 569 570 return; 571 } 572 573 if (!ST.isVGPRSpillingEnabled(MFI)) { 574 LLVMContext &Ctx = MF->getFunction()->getContext(); 575 Ctx.emitError("SIInstrInfo::storeRegToStackSlot - Do not know how to" 576 " spill register"); 577 BuildMI(MBB, MI, DL, get(AMDGPU::KILL)) 578 .addReg(SrcReg); 579 580 return; 581 } 582 583 assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected"); 584 585 unsigned Opcode = getVGPRSpillSaveOpcode(RC->getSize()); 586 MFI->setHasSpilledVGPRs(); 587 BuildMI(MBB, MI, DL, get(Opcode)) 588 .addReg(SrcReg) // src 589 .addFrameIndex(FrameIndex) // frame_idx 590 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 591 .addReg(MFI->getScratchWaveOffsetReg()) // scratch_offset 592 .addMemOperand(MMO); 593 } 594 595 static unsigned getSGPRSpillRestoreOpcode(unsigned Size) { 596 switch (Size) { 597 case 4: 598 return AMDGPU::SI_SPILL_S32_RESTORE; 599 case 8: 600 return AMDGPU::SI_SPILL_S64_RESTORE; 601 case 16: 602 return AMDGPU::SI_SPILL_S128_RESTORE; 603 case 32: 604 return AMDGPU::SI_SPILL_S256_RESTORE; 605 case 64: 606 return AMDGPU::SI_SPILL_S512_RESTORE; 607 default: 608 llvm_unreachable("unknown register size"); 609 } 610 } 611 612 static unsigned getVGPRSpillRestoreOpcode(unsigned Size) { 613 switch (Size) { 614 case 4: 615 return AMDGPU::SI_SPILL_V32_RESTORE; 616 case 8: 617 return AMDGPU::SI_SPILL_V64_RESTORE; 618 case 16: 619 return AMDGPU::SI_SPILL_V128_RESTORE; 620 case 32: 621 return AMDGPU::SI_SPILL_V256_RESTORE; 622 case 64: 623 return AMDGPU::SI_SPILL_V512_RESTORE; 624 default: 625 llvm_unreachable("unknown register size"); 626 } 627 } 628 629 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 630 MachineBasicBlock::iterator MI, 631 unsigned DestReg, int FrameIndex, 632 const TargetRegisterClass *RC, 633 const TargetRegisterInfo *TRI) const { 634 MachineFunction *MF = MBB.getParent(); 635 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 636 MachineFrameInfo *FrameInfo = MF->getFrameInfo(); 637 DebugLoc DL = MBB.findDebugLoc(MI); 638 unsigned Align = FrameInfo->getObjectAlignment(FrameIndex); 639 unsigned Size = FrameInfo->getObjectSize(FrameIndex); 640 641 MachinePointerInfo PtrInfo 642 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 643 644 MachineMemOperand *MMO = MF->getMachineMemOperand( 645 PtrInfo, MachineMemOperand::MOLoad, Size, Align); 646 647 if (RI.isSGPRClass(RC)) { 648 // FIXME: Maybe this should not include a memoperand because it will be 649 // lowered to non-memory instructions. 650 unsigned Opcode = getSGPRSpillRestoreOpcode(RC->getSize()); 651 BuildMI(MBB, MI, DL, get(Opcode), DestReg) 652 .addFrameIndex(FrameIndex) // frame_idx 653 .addMemOperand(MMO); 654 655 return; 656 } 657 658 if (!ST.isVGPRSpillingEnabled(MFI)) { 659 LLVMContext &Ctx = MF->getFunction()->getContext(); 660 Ctx.emitError("SIInstrInfo::loadRegFromStackSlot - Do not know how to" 661 " restore register"); 662 BuildMI(MBB, MI, DL, get(AMDGPU::IMPLICIT_DEF), DestReg); 663 664 return; 665 } 666 667 assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected"); 668 669 unsigned Opcode = getVGPRSpillRestoreOpcode(RC->getSize()); 670 BuildMI(MBB, MI, DL, get(Opcode), DestReg) 671 .addFrameIndex(FrameIndex) // frame_idx 672 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 673 .addReg(MFI->getScratchWaveOffsetReg()) // scratch_offset 674 .addMemOperand(MMO); 675 } 676 677 /// \param @Offset Offset in bytes of the FrameIndex being spilled 678 unsigned SIInstrInfo::calculateLDSSpillAddress(MachineBasicBlock &MBB, 679 MachineBasicBlock::iterator MI, 680 RegScavenger *RS, unsigned TmpReg, 681 unsigned FrameOffset, 682 unsigned Size) const { 683 MachineFunction *MF = MBB.getParent(); 684 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 685 const AMDGPUSubtarget &ST = MF->getSubtarget<AMDGPUSubtarget>(); 686 const SIRegisterInfo *TRI = 687 static_cast<const SIRegisterInfo*>(ST.getRegisterInfo()); 688 DebugLoc DL = MBB.findDebugLoc(MI); 689 unsigned WorkGroupSize = MFI->getMaximumWorkGroupSize(*MF); 690 unsigned WavefrontSize = ST.getWavefrontSize(); 691 692 unsigned TIDReg = MFI->getTIDReg(); 693 if (!MFI->hasCalculatedTID()) { 694 MachineBasicBlock &Entry = MBB.getParent()->front(); 695 MachineBasicBlock::iterator Insert = Entry.front(); 696 DebugLoc DL = Insert->getDebugLoc(); 697 698 TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass); 699 if (TIDReg == AMDGPU::NoRegister) 700 return TIDReg; 701 702 703 if (MFI->getShaderType() == ShaderType::COMPUTE && 704 WorkGroupSize > WavefrontSize) { 705 706 unsigned TIDIGXReg 707 = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_X); 708 unsigned TIDIGYReg 709 = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_Y); 710 unsigned TIDIGZReg 711 = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_Z); 712 unsigned InputPtrReg = 713 TRI->getPreloadedValue(*MF, SIRegisterInfo::KERNARG_SEGMENT_PTR); 714 for (unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) { 715 if (!Entry.isLiveIn(Reg)) 716 Entry.addLiveIn(Reg); 717 } 718 719 RS->enterBasicBlock(&Entry); 720 // FIXME: Can we scavenge an SReg_64 and access the subregs? 721 unsigned STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); 722 unsigned STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); 723 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp0) 724 .addReg(InputPtrReg) 725 .addImm(SI::KernelInputOffsets::NGROUPS_Z); 726 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp1) 727 .addReg(InputPtrReg) 728 .addImm(SI::KernelInputOffsets::NGROUPS_Y); 729 730 // NGROUPS.X * NGROUPS.Y 731 BuildMI(Entry, Insert, DL, get(AMDGPU::S_MUL_I32), STmp1) 732 .addReg(STmp1) 733 .addReg(STmp0); 734 // (NGROUPS.X * NGROUPS.Y) * TIDIG.X 735 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MUL_U32_U24_e32), TIDReg) 736 .addReg(STmp1) 737 .addReg(TIDIGXReg); 738 // NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X) 739 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MAD_U32_U24), TIDReg) 740 .addReg(STmp0) 741 .addReg(TIDIGYReg) 742 .addReg(TIDReg); 743 // (NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)) + TIDIG.Z 744 BuildMI(Entry, Insert, DL, get(AMDGPU::V_ADD_I32_e32), TIDReg) 745 .addReg(TIDReg) 746 .addReg(TIDIGZReg); 747 } else { 748 // Get the wave id 749 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_LO_U32_B32_e64), 750 TIDReg) 751 .addImm(-1) 752 .addImm(0); 753 754 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_HI_U32_B32_e64), 755 TIDReg) 756 .addImm(-1) 757 .addReg(TIDReg); 758 } 759 760 BuildMI(Entry, Insert, DL, get(AMDGPU::V_LSHLREV_B32_e32), 761 TIDReg) 762 .addImm(2) 763 .addReg(TIDReg); 764 MFI->setTIDReg(TIDReg); 765 } 766 767 // Add FrameIndex to LDS offset 768 unsigned LDSOffset = MFI->LDSSize + (FrameOffset * WorkGroupSize); 769 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), TmpReg) 770 .addImm(LDSOffset) 771 .addReg(TIDReg); 772 773 return TmpReg; 774 } 775 776 void SIInstrInfo::insertWaitStates(MachineBasicBlock::iterator MI, 777 int Count) const { 778 while (Count > 0) { 779 int Arg; 780 if (Count >= 8) 781 Arg = 7; 782 else 783 Arg = Count - 1; 784 Count -= 8; 785 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(AMDGPU::S_NOP)) 786 .addImm(Arg); 787 } 788 } 789 790 bool SIInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { 791 MachineBasicBlock &MBB = *MI->getParent(); 792 DebugLoc DL = MBB.findDebugLoc(MI); 793 switch (MI->getOpcode()) { 794 default: return AMDGPUInstrInfo::expandPostRAPseudo(MI); 795 796 case AMDGPU::SGPR_USE: 797 // This is just a placeholder for register allocation. 798 MI->eraseFromParent(); 799 break; 800 801 case AMDGPU::V_MOV_B64_PSEUDO: { 802 unsigned Dst = MI->getOperand(0).getReg(); 803 unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 804 unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 805 806 const MachineOperand &SrcOp = MI->getOperand(1); 807 // FIXME: Will this work for 64-bit floating point immediates? 808 assert(!SrcOp.isFPImm()); 809 if (SrcOp.isImm()) { 810 APInt Imm(64, SrcOp.getImm()); 811 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 812 .addImm(Imm.getLoBits(32).getZExtValue()) 813 .addReg(Dst, RegState::Implicit); 814 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 815 .addImm(Imm.getHiBits(32).getZExtValue()) 816 .addReg(Dst, RegState::Implicit); 817 } else { 818 assert(SrcOp.isReg()); 819 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 820 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) 821 .addReg(Dst, RegState::Implicit); 822 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 823 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) 824 .addReg(Dst, RegState::Implicit); 825 } 826 MI->eraseFromParent(); 827 break; 828 } 829 830 case AMDGPU::V_CNDMASK_B64_PSEUDO: { 831 unsigned Dst = MI->getOperand(0).getReg(); 832 unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 833 unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 834 unsigned Src0 = MI->getOperand(1).getReg(); 835 unsigned Src1 = MI->getOperand(2).getReg(); 836 const MachineOperand &SrcCond = MI->getOperand(3); 837 838 BuildMI(MBB, MI, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstLo) 839 .addReg(RI.getSubReg(Src0, AMDGPU::sub0)) 840 .addReg(RI.getSubReg(Src1, AMDGPU::sub0)) 841 .addOperand(SrcCond); 842 BuildMI(MBB, MI, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstHi) 843 .addReg(RI.getSubReg(Src0, AMDGPU::sub1)) 844 .addReg(RI.getSubReg(Src1, AMDGPU::sub1)) 845 .addOperand(SrcCond); 846 MI->eraseFromParent(); 847 break; 848 } 849 850 case AMDGPU::SI_CONSTDATA_PTR: { 851 const SIRegisterInfo *TRI = 852 static_cast<const SIRegisterInfo *>(ST.getRegisterInfo()); 853 MachineFunction &MF = *MBB.getParent(); 854 unsigned Reg = MI->getOperand(0).getReg(); 855 unsigned RegLo = TRI->getSubReg(Reg, AMDGPU::sub0); 856 unsigned RegHi = TRI->getSubReg(Reg, AMDGPU::sub1); 857 858 // Create a bundle so these instructions won't be re-ordered by the 859 // post-RA scheduler. 860 MIBundleBuilder Bundler(MBB, MI); 861 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg)); 862 863 // Add 32-bit offset from this instruction to the start of the 864 // constant data. 865 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo) 866 .addReg(RegLo) 867 .addOperand(MI->getOperand(1))); 868 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi) 869 .addReg(RegHi) 870 .addImm(0)); 871 872 llvm::finalizeBundle(MBB, Bundler.begin()); 873 874 MI->eraseFromParent(); 875 break; 876 } 877 } 878 return true; 879 } 880 881 /// Commutes the operands in the given instruction. 882 /// The commutable operands are specified by their indices OpIdx0 and OpIdx1. 883 /// 884 /// Do not call this method for a non-commutable instruction or for 885 /// non-commutable pair of operand indices OpIdx0 and OpIdx1. 886 /// Even though the instruction is commutable, the method may still 887 /// fail to commute the operands, null pointer is returned in such cases. 888 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr *MI, 889 bool NewMI, 890 unsigned OpIdx0, 891 unsigned OpIdx1) const { 892 int CommutedOpcode = commuteOpcode(*MI); 893 if (CommutedOpcode == -1) 894 return nullptr; 895 896 int Src0Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), 897 AMDGPU::OpName::src0); 898 MachineOperand &Src0 = MI->getOperand(Src0Idx); 899 if (!Src0.isReg()) 900 return nullptr; 901 902 int Src1Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), 903 AMDGPU::OpName::src1); 904 905 if ((OpIdx0 != static_cast<unsigned>(Src0Idx) || 906 OpIdx1 != static_cast<unsigned>(Src1Idx)) && 907 (OpIdx0 != static_cast<unsigned>(Src1Idx) || 908 OpIdx1 != static_cast<unsigned>(Src0Idx))) 909 return nullptr; 910 911 MachineOperand &Src1 = MI->getOperand(Src1Idx); 912 913 914 if (isVOP2(*MI)) { 915 const MCInstrDesc &InstrDesc = MI->getDesc(); 916 // For VOP2 instructions, any operand type is valid to use for src0. Make 917 // sure we can use the src1 as src0. 918 // 919 // We could be stricter here and only allow commuting if there is a reason 920 // to do so. i.e. if both operands are VGPRs there is no real benefit, 921 // although MachineCSE attempts to find matches by commuting. 922 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 923 if (!isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) 924 return nullptr; 925 } 926 927 if (!Src1.isReg()) { 928 // Allow commuting instructions with Imm operands. 929 if (NewMI || !Src1.isImm() || 930 (!isVOP2(*MI) && !isVOP3(*MI))) { 931 return nullptr; 932 } 933 // Be sure to copy the source modifiers to the right place. 934 if (MachineOperand *Src0Mods 935 = getNamedOperand(*MI, AMDGPU::OpName::src0_modifiers)) { 936 MachineOperand *Src1Mods 937 = getNamedOperand(*MI, AMDGPU::OpName::src1_modifiers); 938 939 int Src0ModsVal = Src0Mods->getImm(); 940 if (!Src1Mods && Src0ModsVal != 0) 941 return nullptr; 942 943 // XXX - This assert might be a lie. It might be useful to have a neg 944 // modifier with 0.0. 945 int Src1ModsVal = Src1Mods->getImm(); 946 assert((Src1ModsVal == 0) && "Not expecting modifiers with immediates"); 947 948 Src1Mods->setImm(Src0ModsVal); 949 Src0Mods->setImm(Src1ModsVal); 950 } 951 952 unsigned Reg = Src0.getReg(); 953 unsigned SubReg = Src0.getSubReg(); 954 if (Src1.isImm()) 955 Src0.ChangeToImmediate(Src1.getImm()); 956 else 957 llvm_unreachable("Should only have immediates"); 958 959 Src1.ChangeToRegister(Reg, false); 960 Src1.setSubReg(SubReg); 961 } else { 962 MI = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx0, OpIdx1); 963 } 964 965 if (MI) 966 MI->setDesc(get(CommutedOpcode)); 967 968 return MI; 969 } 970 971 // This needs to be implemented because the source modifiers may be inserted 972 // between the true commutable operands, and the base 973 // TargetInstrInfo::commuteInstruction uses it. 974 bool SIInstrInfo::findCommutedOpIndices(MachineInstr *MI, 975 unsigned &SrcOpIdx0, 976 unsigned &SrcOpIdx1) const { 977 const MCInstrDesc &MCID = MI->getDesc(); 978 if (!MCID.isCommutable()) 979 return false; 980 981 unsigned Opc = MI->getOpcode(); 982 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 983 if (Src0Idx == -1) 984 return false; 985 986 // FIXME: Workaround TargetInstrInfo::commuteInstruction asserting on 987 // immediate. Also, immediate src0 operand is not handled in 988 // SIInstrInfo::commuteInstruction(); 989 if (!MI->getOperand(Src0Idx).isReg()) 990 return false; 991 992 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 993 if (Src1Idx == -1) 994 return false; 995 996 MachineOperand &Src1 = MI->getOperand(Src1Idx); 997 if (Src1.isImm()) { 998 // SIInstrInfo::commuteInstruction() does support commuting the immediate 999 // operand src1 in 2 and 3 operand instructions. 1000 if (!isVOP2(MI->getOpcode()) && !isVOP3(MI->getOpcode())) 1001 return false; 1002 } else if (Src1.isReg()) { 1003 // If any source modifiers are set, the generic instruction commuting won't 1004 // understand how to copy the source modifiers. 1005 if (hasModifiersSet(*MI, AMDGPU::OpName::src0_modifiers) || 1006 hasModifiersSet(*MI, AMDGPU::OpName::src1_modifiers)) 1007 return false; 1008 } else 1009 return false; 1010 1011 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx); 1012 } 1013 1014 MachineInstr *SIInstrInfo::buildMovInstr(MachineBasicBlock *MBB, 1015 MachineBasicBlock::iterator I, 1016 unsigned DstReg, 1017 unsigned SrcReg) const { 1018 return BuildMI(*MBB, I, MBB->findDebugLoc(I), get(AMDGPU::V_MOV_B32_e32), 1019 DstReg) .addReg(SrcReg); 1020 } 1021 1022 bool SIInstrInfo::isMov(unsigned Opcode) const { 1023 switch(Opcode) { 1024 default: return false; 1025 case AMDGPU::S_MOV_B32: 1026 case AMDGPU::S_MOV_B64: 1027 case AMDGPU::V_MOV_B32_e32: 1028 case AMDGPU::V_MOV_B32_e64: 1029 return true; 1030 } 1031 } 1032 1033 static void removeModOperands(MachineInstr &MI) { 1034 unsigned Opc = MI.getOpcode(); 1035 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, 1036 AMDGPU::OpName::src0_modifiers); 1037 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, 1038 AMDGPU::OpName::src1_modifiers); 1039 int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc, 1040 AMDGPU::OpName::src2_modifiers); 1041 1042 MI.RemoveOperand(Src2ModIdx); 1043 MI.RemoveOperand(Src1ModIdx); 1044 MI.RemoveOperand(Src0ModIdx); 1045 } 1046 1047 bool SIInstrInfo::FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI, 1048 unsigned Reg, MachineRegisterInfo *MRI) const { 1049 if (!MRI->hasOneNonDBGUse(Reg)) 1050 return false; 1051 1052 unsigned Opc = UseMI->getOpcode(); 1053 if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64) { 1054 // Don't fold if we are using source modifiers. The new VOP2 instructions 1055 // don't have them. 1056 if (hasModifiersSet(*UseMI, AMDGPU::OpName::src0_modifiers) || 1057 hasModifiersSet(*UseMI, AMDGPU::OpName::src1_modifiers) || 1058 hasModifiersSet(*UseMI, AMDGPU::OpName::src2_modifiers)) { 1059 return false; 1060 } 1061 1062 MachineOperand *Src0 = getNamedOperand(*UseMI, AMDGPU::OpName::src0); 1063 MachineOperand *Src1 = getNamedOperand(*UseMI, AMDGPU::OpName::src1); 1064 MachineOperand *Src2 = getNamedOperand(*UseMI, AMDGPU::OpName::src2); 1065 1066 // Multiplied part is the constant: Use v_madmk_f32 1067 // We should only expect these to be on src0 due to canonicalizations. 1068 if (Src0->isReg() && Src0->getReg() == Reg) { 1069 if (!Src1->isReg() || 1070 (Src1->isReg() && RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) 1071 return false; 1072 1073 if (!Src2->isReg() || 1074 (Src2->isReg() && RI.isSGPRClass(MRI->getRegClass(Src2->getReg())))) 1075 return false; 1076 1077 // We need to do some weird looking operand shuffling since the madmk 1078 // operands are out of the normal expected order with the multiplied 1079 // constant as the last operand. 1080 // 1081 // v_mad_f32 src0, src1, src2 -> v_madmk_f32 src0 * src2K + src1 1082 // src0 -> src2 K 1083 // src1 -> src0 1084 // src2 -> src1 1085 1086 const int64_t Imm = DefMI->getOperand(1).getImm(); 1087 1088 // FIXME: This would be a lot easier if we could return a new instruction 1089 // instead of having to modify in place. 1090 1091 // Remove these first since they are at the end. 1092 UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, 1093 AMDGPU::OpName::omod)); 1094 UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, 1095 AMDGPU::OpName::clamp)); 1096 1097 unsigned Src1Reg = Src1->getReg(); 1098 unsigned Src1SubReg = Src1->getSubReg(); 1099 unsigned Src2Reg = Src2->getReg(); 1100 unsigned Src2SubReg = Src2->getSubReg(); 1101 Src0->setReg(Src1Reg); 1102 Src0->setSubReg(Src1SubReg); 1103 Src0->setIsKill(Src1->isKill()); 1104 1105 Src1->setReg(Src2Reg); 1106 Src1->setSubReg(Src2SubReg); 1107 Src1->setIsKill(Src2->isKill()); 1108 1109 if (Opc == AMDGPU::V_MAC_F32_e64) { 1110 UseMI->untieRegOperand( 1111 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 1112 } 1113 1114 Src2->ChangeToImmediate(Imm); 1115 1116 removeModOperands(*UseMI); 1117 UseMI->setDesc(get(AMDGPU::V_MADMK_F32)); 1118 1119 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 1120 if (DeleteDef) 1121 DefMI->eraseFromParent(); 1122 1123 return true; 1124 } 1125 1126 // Added part is the constant: Use v_madak_f32 1127 if (Src2->isReg() && Src2->getReg() == Reg) { 1128 // Not allowed to use constant bus for another operand. 1129 // We can however allow an inline immediate as src0. 1130 if (!Src0->isImm() && 1131 (Src0->isReg() && RI.isSGPRClass(MRI->getRegClass(Src0->getReg())))) 1132 return false; 1133 1134 if (!Src1->isReg() || 1135 (Src1->isReg() && RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) 1136 return false; 1137 1138 const int64_t Imm = DefMI->getOperand(1).getImm(); 1139 1140 // FIXME: This would be a lot easier if we could return a new instruction 1141 // instead of having to modify in place. 1142 1143 // Remove these first since they are at the end. 1144 UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, 1145 AMDGPU::OpName::omod)); 1146 UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, 1147 AMDGPU::OpName::clamp)); 1148 1149 if (Opc == AMDGPU::V_MAC_F32_e64) { 1150 UseMI->untieRegOperand( 1151 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 1152 } 1153 1154 // ChangingToImmediate adds Src2 back to the instruction. 1155 Src2->ChangeToImmediate(Imm); 1156 1157 // These come before src2. 1158 removeModOperands(*UseMI); 1159 UseMI->setDesc(get(AMDGPU::V_MADAK_F32)); 1160 1161 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 1162 if (DeleteDef) 1163 DefMI->eraseFromParent(); 1164 1165 return true; 1166 } 1167 } 1168 1169 return false; 1170 } 1171 1172 static bool offsetsDoNotOverlap(int WidthA, int OffsetA, 1173 int WidthB, int OffsetB) { 1174 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; 1175 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; 1176 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; 1177 return LowOffset + LowWidth <= HighOffset; 1178 } 1179 1180 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr *MIa, 1181 MachineInstr *MIb) const { 1182 unsigned BaseReg0, Offset0; 1183 unsigned BaseReg1, Offset1; 1184 1185 if (getMemOpBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) && 1186 getMemOpBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) { 1187 assert(MIa->hasOneMemOperand() && MIb->hasOneMemOperand() && 1188 "read2 / write2 not expected here yet"); 1189 unsigned Width0 = (*MIa->memoperands_begin())->getSize(); 1190 unsigned Width1 = (*MIb->memoperands_begin())->getSize(); 1191 if (BaseReg0 == BaseReg1 && 1192 offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1)) { 1193 return true; 1194 } 1195 } 1196 1197 return false; 1198 } 1199 1200 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa, 1201 MachineInstr *MIb, 1202 AliasAnalysis *AA) const { 1203 assert(MIa && (MIa->mayLoad() || MIa->mayStore()) && 1204 "MIa must load from or modify a memory location"); 1205 assert(MIb && (MIb->mayLoad() || MIb->mayStore()) && 1206 "MIb must load from or modify a memory location"); 1207 1208 if (MIa->hasUnmodeledSideEffects() || MIb->hasUnmodeledSideEffects()) 1209 return false; 1210 1211 // XXX - Can we relax this between address spaces? 1212 if (MIa->hasOrderedMemoryRef() || MIb->hasOrderedMemoryRef()) 1213 return false; 1214 1215 // TODO: Should we check the address space from the MachineMemOperand? That 1216 // would allow us to distinguish objects we know don't alias based on the 1217 // underlying address space, even if it was lowered to a different one, 1218 // e.g. private accesses lowered to use MUBUF instructions on a scratch 1219 // buffer. 1220 if (isDS(*MIa)) { 1221 if (isDS(*MIb)) 1222 return checkInstOffsetsDoNotOverlap(MIa, MIb); 1223 1224 return !isFLAT(*MIb); 1225 } 1226 1227 if (isMUBUF(*MIa) || isMTBUF(*MIa)) { 1228 if (isMUBUF(*MIb) || isMTBUF(*MIb)) 1229 return checkInstOffsetsDoNotOverlap(MIa, MIb); 1230 1231 return !isFLAT(*MIb) && !isSMRD(*MIb); 1232 } 1233 1234 if (isSMRD(*MIa)) { 1235 if (isSMRD(*MIb)) 1236 return checkInstOffsetsDoNotOverlap(MIa, MIb); 1237 1238 return !isFLAT(*MIb) && !isMUBUF(*MIa) && !isMTBUF(*MIa); 1239 } 1240 1241 if (isFLAT(*MIa)) { 1242 if (isFLAT(*MIb)) 1243 return checkInstOffsetsDoNotOverlap(MIa, MIb); 1244 1245 return false; 1246 } 1247 1248 return false; 1249 } 1250 1251 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB, 1252 MachineBasicBlock::iterator &MI, 1253 LiveVariables *LV) const { 1254 1255 switch (MI->getOpcode()) { 1256 default: return nullptr; 1257 case AMDGPU::V_MAC_F32_e64: break; 1258 case AMDGPU::V_MAC_F32_e32: { 1259 const MachineOperand *Src0 = getNamedOperand(*MI, AMDGPU::OpName::src0); 1260 if (Src0->isImm() && !isInlineConstant(*Src0, 4)) 1261 return nullptr; 1262 break; 1263 } 1264 } 1265 1266 const MachineOperand *Dst = getNamedOperand(*MI, AMDGPU::OpName::dst); 1267 const MachineOperand *Src0 = getNamedOperand(*MI, AMDGPU::OpName::src0); 1268 const MachineOperand *Src1 = getNamedOperand(*MI, AMDGPU::OpName::src1); 1269 const MachineOperand *Src2 = getNamedOperand(*MI, AMDGPU::OpName::src2); 1270 1271 return BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::V_MAD_F32)) 1272 .addOperand(*Dst) 1273 .addImm(0) // Src0 mods 1274 .addOperand(*Src0) 1275 .addImm(0) // Src1 mods 1276 .addOperand(*Src1) 1277 .addImm(0) // Src mods 1278 .addOperand(*Src2) 1279 .addImm(0) // clamp 1280 .addImm(0); // omod 1281 } 1282 1283 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const { 1284 int64_t SVal = Imm.getSExtValue(); 1285 if (SVal >= -16 && SVal <= 64) 1286 return true; 1287 1288 if (Imm.getBitWidth() == 64) { 1289 uint64_t Val = Imm.getZExtValue(); 1290 return (DoubleToBits(0.0) == Val) || 1291 (DoubleToBits(1.0) == Val) || 1292 (DoubleToBits(-1.0) == Val) || 1293 (DoubleToBits(0.5) == Val) || 1294 (DoubleToBits(-0.5) == Val) || 1295 (DoubleToBits(2.0) == Val) || 1296 (DoubleToBits(-2.0) == Val) || 1297 (DoubleToBits(4.0) == Val) || 1298 (DoubleToBits(-4.0) == Val); 1299 } 1300 1301 // The actual type of the operand does not seem to matter as long 1302 // as the bits match one of the inline immediate values. For example: 1303 // 1304 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal, 1305 // so it is a legal inline immediate. 1306 // 1307 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in 1308 // floating-point, so it is a legal inline immediate. 1309 uint32_t Val = Imm.getZExtValue(); 1310 1311 return (FloatToBits(0.0f) == Val) || 1312 (FloatToBits(1.0f) == Val) || 1313 (FloatToBits(-1.0f) == Val) || 1314 (FloatToBits(0.5f) == Val) || 1315 (FloatToBits(-0.5f) == Val) || 1316 (FloatToBits(2.0f) == Val) || 1317 (FloatToBits(-2.0f) == Val) || 1318 (FloatToBits(4.0f) == Val) || 1319 (FloatToBits(-4.0f) == Val); 1320 } 1321 1322 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO, 1323 unsigned OpSize) const { 1324 if (MO.isImm()) { 1325 // MachineOperand provides no way to tell the true operand size, since it 1326 // only records a 64-bit value. We need to know the size to determine if a 1327 // 32-bit floating point immediate bit pattern is legal for an integer 1328 // immediate. It would be for any 32-bit integer operand, but would not be 1329 // for a 64-bit one. 1330 1331 unsigned BitSize = 8 * OpSize; 1332 return isInlineConstant(APInt(BitSize, MO.getImm(), true)); 1333 } 1334 1335 return false; 1336 } 1337 1338 bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO, 1339 unsigned OpSize) const { 1340 return MO.isImm() && !isInlineConstant(MO, OpSize); 1341 } 1342 1343 static bool compareMachineOp(const MachineOperand &Op0, 1344 const MachineOperand &Op1) { 1345 if (Op0.getType() != Op1.getType()) 1346 return false; 1347 1348 switch (Op0.getType()) { 1349 case MachineOperand::MO_Register: 1350 return Op0.getReg() == Op1.getReg(); 1351 case MachineOperand::MO_Immediate: 1352 return Op0.getImm() == Op1.getImm(); 1353 default: 1354 llvm_unreachable("Didn't expect to be comparing these operand types"); 1355 } 1356 } 1357 1358 bool SIInstrInfo::isImmOperandLegal(const MachineInstr *MI, unsigned OpNo, 1359 const MachineOperand &MO) const { 1360 const MCOperandInfo &OpInfo = get(MI->getOpcode()).OpInfo[OpNo]; 1361 1362 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI()); 1363 1364 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE) 1365 return true; 1366 1367 if (OpInfo.RegClass < 0) 1368 return false; 1369 1370 unsigned OpSize = RI.getRegClass(OpInfo.RegClass)->getSize(); 1371 if (isLiteralConstant(MO, OpSize)) 1372 return RI.opCanUseLiteralConstant(OpInfo.OperandType); 1373 1374 return RI.opCanUseInlineConstant(OpInfo.OperandType); 1375 } 1376 1377 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const { 1378 int Op32 = AMDGPU::getVOPe32(Opcode); 1379 if (Op32 == -1) 1380 return false; 1381 1382 return pseudoToMCOpcode(Op32) != -1; 1383 } 1384 1385 bool SIInstrInfo::hasModifiers(unsigned Opcode) const { 1386 // The src0_modifier operand is present on all instructions 1387 // that have modifiers. 1388 1389 return AMDGPU::getNamedOperandIdx(Opcode, 1390 AMDGPU::OpName::src0_modifiers) != -1; 1391 } 1392 1393 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI, 1394 unsigned OpName) const { 1395 const MachineOperand *Mods = getNamedOperand(MI, OpName); 1396 return Mods && Mods->getImm(); 1397 } 1398 1399 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI, 1400 const MachineOperand &MO, 1401 unsigned OpSize) const { 1402 // Literal constants use the constant bus. 1403 if (isLiteralConstant(MO, OpSize)) 1404 return true; 1405 1406 if (!MO.isReg() || !MO.isUse()) 1407 return false; 1408 1409 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) 1410 return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); 1411 1412 // FLAT_SCR is just an SGPR pair. 1413 if (!MO.isImplicit() && (MO.getReg() == AMDGPU::FLAT_SCR)) 1414 return true; 1415 1416 // EXEC register uses the constant bus. 1417 if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC) 1418 return true; 1419 1420 // SGPRs use the constant bus 1421 if (MO.getReg() == AMDGPU::M0 || MO.getReg() == AMDGPU::VCC || 1422 (!MO.isImplicit() && 1423 (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) || 1424 AMDGPU::SGPR_64RegClass.contains(MO.getReg())))) { 1425 return true; 1426 } 1427 1428 return false; 1429 } 1430 1431 static unsigned findImplicitSGPRRead(const MachineInstr &MI) { 1432 for (const MachineOperand &MO : MI.implicit_operands()) { 1433 // We only care about reads. 1434 if (MO.isDef()) 1435 continue; 1436 1437 switch (MO.getReg()) { 1438 case AMDGPU::VCC: 1439 case AMDGPU::M0: 1440 case AMDGPU::FLAT_SCR: 1441 return MO.getReg(); 1442 1443 default: 1444 break; 1445 } 1446 } 1447 1448 return AMDGPU::NoRegister; 1449 } 1450 1451 bool SIInstrInfo::verifyInstruction(const MachineInstr *MI, 1452 StringRef &ErrInfo) const { 1453 uint16_t Opcode = MI->getOpcode(); 1454 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 1455 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); 1456 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1); 1457 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2); 1458 1459 // Make sure the number of operands is correct. 1460 const MCInstrDesc &Desc = get(Opcode); 1461 if (!Desc.isVariadic() && 1462 Desc.getNumOperands() != MI->getNumExplicitOperands()) { 1463 ErrInfo = "Instruction has wrong number of operands."; 1464 return false; 1465 } 1466 1467 // Make sure the register classes are correct. 1468 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) { 1469 if (MI->getOperand(i).isFPImm()) { 1470 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast " 1471 "all fp values to integers."; 1472 return false; 1473 } 1474 1475 int RegClass = Desc.OpInfo[i].RegClass; 1476 1477 switch (Desc.OpInfo[i].OperandType) { 1478 case MCOI::OPERAND_REGISTER: 1479 if (MI->getOperand(i).isImm()) { 1480 ErrInfo = "Illegal immediate value for operand."; 1481 return false; 1482 } 1483 break; 1484 case AMDGPU::OPERAND_REG_IMM32: 1485 break; 1486 case AMDGPU::OPERAND_REG_INLINE_C: 1487 if (isLiteralConstant(MI->getOperand(i), 1488 RI.getRegClass(RegClass)->getSize())) { 1489 ErrInfo = "Illegal immediate value for operand."; 1490 return false; 1491 } 1492 break; 1493 case MCOI::OPERAND_IMMEDIATE: 1494 // Check if this operand is an immediate. 1495 // FrameIndex operands will be replaced by immediates, so they are 1496 // allowed. 1497 if (!MI->getOperand(i).isImm() && !MI->getOperand(i).isFI()) { 1498 ErrInfo = "Expected immediate, but got non-immediate"; 1499 return false; 1500 } 1501 // Fall-through 1502 default: 1503 continue; 1504 } 1505 1506 if (!MI->getOperand(i).isReg()) 1507 continue; 1508 1509 if (RegClass != -1) { 1510 unsigned Reg = MI->getOperand(i).getReg(); 1511 if (TargetRegisterInfo::isVirtualRegister(Reg)) 1512 continue; 1513 1514 const TargetRegisterClass *RC = RI.getRegClass(RegClass); 1515 if (!RC->contains(Reg)) { 1516 ErrInfo = "Operand has incorrect register class."; 1517 return false; 1518 } 1519 } 1520 } 1521 1522 1523 // Verify VOP* 1524 if (isVOP1(*MI) || isVOP2(*MI) || isVOP3(*MI) || isVOPC(*MI)) { 1525 // Only look at the true operands. Only a real operand can use the constant 1526 // bus, and we don't want to check pseudo-operands like the source modifier 1527 // flags. 1528 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx }; 1529 1530 unsigned ConstantBusCount = 0; 1531 unsigned SGPRUsed = findImplicitSGPRRead(*MI); 1532 if (SGPRUsed != AMDGPU::NoRegister) 1533 ++ConstantBusCount; 1534 1535 for (int OpIdx : OpIndices) { 1536 if (OpIdx == -1) 1537 break; 1538 const MachineOperand &MO = MI->getOperand(OpIdx); 1539 if (usesConstantBus(MRI, MO, getOpSize(Opcode, OpIdx))) { 1540 if (MO.isReg()) { 1541 if (MO.getReg() != SGPRUsed) 1542 ++ConstantBusCount; 1543 SGPRUsed = MO.getReg(); 1544 } else { 1545 ++ConstantBusCount; 1546 } 1547 } 1548 } 1549 if (ConstantBusCount > 1) { 1550 ErrInfo = "VOP* instruction uses the constant bus more than once"; 1551 return false; 1552 } 1553 } 1554 1555 // Verify misc. restrictions on specific instructions. 1556 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 || 1557 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) { 1558 const MachineOperand &Src0 = MI->getOperand(Src0Idx); 1559 const MachineOperand &Src1 = MI->getOperand(Src1Idx); 1560 const MachineOperand &Src2 = MI->getOperand(Src2Idx); 1561 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) { 1562 if (!compareMachineOp(Src0, Src1) && 1563 !compareMachineOp(Src0, Src2)) { 1564 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2"; 1565 return false; 1566 } 1567 } 1568 } 1569 1570 // Make sure we aren't losing exec uses in the td files. This mostly requires 1571 // being careful when using let Uses to try to add other use registers. 1572 if (!isGenericOpcode(Opcode) && !isSALU(Opcode) && !isSMRD(Opcode)) { 1573 const MachineOperand *Exec = MI->findRegisterUseOperand(AMDGPU::EXEC); 1574 if (!Exec || !Exec->isImplicit()) { 1575 ErrInfo = "VALU instruction does not implicitly read exec mask"; 1576 return false; 1577 } 1578 } 1579 1580 return true; 1581 } 1582 1583 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) { 1584 switch (MI.getOpcode()) { 1585 default: return AMDGPU::INSTRUCTION_LIST_END; 1586 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE; 1587 case AMDGPU::COPY: return AMDGPU::COPY; 1588 case AMDGPU::PHI: return AMDGPU::PHI; 1589 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG; 1590 case AMDGPU::S_MOV_B32: 1591 return MI.getOperand(1).isReg() ? 1592 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32; 1593 case AMDGPU::S_ADD_I32: 1594 case AMDGPU::S_ADD_U32: return AMDGPU::V_ADD_I32_e32; 1595 case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32; 1596 case AMDGPU::S_SUB_I32: 1597 case AMDGPU::S_SUB_U32: return AMDGPU::V_SUB_I32_e32; 1598 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32; 1599 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_I32; 1600 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e32; 1601 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e32; 1602 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e32; 1603 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e32; 1604 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e32; 1605 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e32; 1606 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e32; 1607 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32; 1608 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64; 1609 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32; 1610 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64; 1611 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32; 1612 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64; 1613 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32; 1614 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32; 1615 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32; 1616 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32; 1617 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64; 1618 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32; 1619 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32; 1620 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32; 1621 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32; 1622 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32; 1623 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32; 1624 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32; 1625 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32; 1626 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32; 1627 case AMDGPU::S_LOAD_DWORD_IMM: 1628 case AMDGPU::S_LOAD_DWORD_SGPR: 1629 case AMDGPU::S_LOAD_DWORD_IMM_ci: 1630 return AMDGPU::BUFFER_LOAD_DWORD_ADDR64; 1631 case AMDGPU::S_LOAD_DWORDX2_IMM: 1632 case AMDGPU::S_LOAD_DWORDX2_SGPR: 1633 case AMDGPU::S_LOAD_DWORDX2_IMM_ci: 1634 return AMDGPU::BUFFER_LOAD_DWORDX2_ADDR64; 1635 case AMDGPU::S_LOAD_DWORDX4_IMM: 1636 case AMDGPU::S_LOAD_DWORDX4_SGPR: 1637 case AMDGPU::S_LOAD_DWORDX4_IMM_ci: 1638 return AMDGPU::BUFFER_LOAD_DWORDX4_ADDR64; 1639 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64; 1640 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32; 1641 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32; 1642 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64; 1643 } 1644 } 1645 1646 bool SIInstrInfo::isSALUOpSupportedOnVALU(const MachineInstr &MI) const { 1647 return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END; 1648 } 1649 1650 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI, 1651 unsigned OpNo) const { 1652 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 1653 const MCInstrDesc &Desc = get(MI.getOpcode()); 1654 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() || 1655 Desc.OpInfo[OpNo].RegClass == -1) { 1656 unsigned Reg = MI.getOperand(OpNo).getReg(); 1657 1658 if (TargetRegisterInfo::isVirtualRegister(Reg)) 1659 return MRI.getRegClass(Reg); 1660 return RI.getPhysRegClass(Reg); 1661 } 1662 1663 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 1664 return RI.getRegClass(RCID); 1665 } 1666 1667 bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const { 1668 switch (MI.getOpcode()) { 1669 case AMDGPU::COPY: 1670 case AMDGPU::REG_SEQUENCE: 1671 case AMDGPU::PHI: 1672 case AMDGPU::INSERT_SUBREG: 1673 return RI.hasVGPRs(getOpRegClass(MI, 0)); 1674 default: 1675 return RI.hasVGPRs(getOpRegClass(MI, OpNo)); 1676 } 1677 } 1678 1679 void SIInstrInfo::legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const { 1680 MachineBasicBlock::iterator I = MI; 1681 MachineBasicBlock *MBB = MI->getParent(); 1682 MachineOperand &MO = MI->getOperand(OpIdx); 1683 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1684 unsigned RCID = get(MI->getOpcode()).OpInfo[OpIdx].RegClass; 1685 const TargetRegisterClass *RC = RI.getRegClass(RCID); 1686 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 1687 if (MO.isReg()) 1688 Opcode = AMDGPU::COPY; 1689 else if (RI.isSGPRClass(RC)) 1690 Opcode = AMDGPU::S_MOV_B32; 1691 1692 1693 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); 1694 if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC)) 1695 VRC = &AMDGPU::VReg_64RegClass; 1696 else 1697 VRC = &AMDGPU::VGPR_32RegClass; 1698 1699 unsigned Reg = MRI.createVirtualRegister(VRC); 1700 DebugLoc DL = MBB->findDebugLoc(I); 1701 BuildMI(*MI->getParent(), I, DL, get(Opcode), Reg) 1702 .addOperand(MO); 1703 MO.ChangeToRegister(Reg, false); 1704 } 1705 1706 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI, 1707 MachineRegisterInfo &MRI, 1708 MachineOperand &SuperReg, 1709 const TargetRegisterClass *SuperRC, 1710 unsigned SubIdx, 1711 const TargetRegisterClass *SubRC) 1712 const { 1713 MachineBasicBlock *MBB = MI->getParent(); 1714 DebugLoc DL = MI->getDebugLoc(); 1715 unsigned SubReg = MRI.createVirtualRegister(SubRC); 1716 1717 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) { 1718 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 1719 .addReg(SuperReg.getReg(), 0, SubIdx); 1720 return SubReg; 1721 } 1722 1723 // Just in case the super register is itself a sub-register, copy it to a new 1724 // value so we don't need to worry about merging its subreg index with the 1725 // SubIdx passed to this function. The register coalescer should be able to 1726 // eliminate this extra copy. 1727 unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC); 1728 1729 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg) 1730 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg()); 1731 1732 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 1733 .addReg(NewSuperReg, 0, SubIdx); 1734 1735 return SubReg; 1736 } 1737 1738 MachineOperand SIInstrInfo::buildExtractSubRegOrImm( 1739 MachineBasicBlock::iterator MII, 1740 MachineRegisterInfo &MRI, 1741 MachineOperand &Op, 1742 const TargetRegisterClass *SuperRC, 1743 unsigned SubIdx, 1744 const TargetRegisterClass *SubRC) const { 1745 if (Op.isImm()) { 1746 // XXX - Is there a better way to do this? 1747 if (SubIdx == AMDGPU::sub0) 1748 return MachineOperand::CreateImm(Op.getImm() & 0xFFFFFFFF); 1749 if (SubIdx == AMDGPU::sub1) 1750 return MachineOperand::CreateImm(Op.getImm() >> 32); 1751 1752 llvm_unreachable("Unhandled register index for immediate"); 1753 } 1754 1755 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC, 1756 SubIdx, SubRC); 1757 return MachineOperand::CreateReg(SubReg, false); 1758 } 1759 1760 // Change the order of operands from (0, 1, 2) to (0, 2, 1) 1761 void SIInstrInfo::swapOperands(MachineBasicBlock::iterator Inst) const { 1762 assert(Inst->getNumExplicitOperands() == 3); 1763 MachineOperand Op1 = Inst->getOperand(1); 1764 Inst->RemoveOperand(1); 1765 Inst->addOperand(Op1); 1766 } 1767 1768 bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI, 1769 const MCOperandInfo &OpInfo, 1770 const MachineOperand &MO) const { 1771 if (!MO.isReg()) 1772 return false; 1773 1774 unsigned Reg = MO.getReg(); 1775 const TargetRegisterClass *RC = 1776 TargetRegisterInfo::isVirtualRegister(Reg) ? 1777 MRI.getRegClass(Reg) : 1778 RI.getPhysRegClass(Reg); 1779 1780 const SIRegisterInfo *TRI = 1781 static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo()); 1782 RC = TRI->getSubRegClass(RC, MO.getSubReg()); 1783 1784 // In order to be legal, the common sub-class must be equal to the 1785 // class of the current operand. For example: 1786 // 1787 // v_mov_b32 s0 ; Operand defined as vsrc_32 1788 // ; RI.getCommonSubClass(s0,vsrc_32) = sgpr ; LEGAL 1789 // 1790 // s_sendmsg 0, s0 ; Operand defined as m0reg 1791 // ; RI.getCommonSubClass(s0,m0reg) = m0reg ; NOT LEGAL 1792 1793 return RI.getCommonSubClass(RC, RI.getRegClass(OpInfo.RegClass)) == RC; 1794 } 1795 1796 bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI, 1797 const MCOperandInfo &OpInfo, 1798 const MachineOperand &MO) const { 1799 if (MO.isReg()) 1800 return isLegalRegOperand(MRI, OpInfo, MO); 1801 1802 // Handle non-register types that are treated like immediates. 1803 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI()); 1804 return true; 1805 } 1806 1807 bool SIInstrInfo::isOperandLegal(const MachineInstr *MI, unsigned OpIdx, 1808 const MachineOperand *MO) const { 1809 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 1810 const MCInstrDesc &InstDesc = get(MI->getOpcode()); 1811 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; 1812 const TargetRegisterClass *DefinedRC = 1813 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; 1814 if (!MO) 1815 MO = &MI->getOperand(OpIdx); 1816 1817 if (isVALU(*MI) && 1818 usesConstantBus(MRI, *MO, DefinedRC->getSize())) { 1819 unsigned SGPRUsed = 1820 MO->isReg() ? MO->getReg() : (unsigned)AMDGPU::NoRegister; 1821 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1822 if (i == OpIdx) 1823 continue; 1824 const MachineOperand &Op = MI->getOperand(i); 1825 if (Op.isReg() && Op.getReg() != SGPRUsed && 1826 usesConstantBus(MRI, Op, getOpSize(*MI, i))) { 1827 return false; 1828 } 1829 } 1830 } 1831 1832 if (MO->isReg()) { 1833 assert(DefinedRC); 1834 return isLegalRegOperand(MRI, OpInfo, *MO); 1835 } 1836 1837 1838 // Handle non-register types that are treated like immediates. 1839 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI()); 1840 1841 if (!DefinedRC) { 1842 // This operand expects an immediate. 1843 return true; 1844 } 1845 1846 return isImmOperandLegal(MI, OpIdx, *MO); 1847 } 1848 1849 void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI, 1850 MachineInstr *MI) const { 1851 unsigned Opc = MI->getOpcode(); 1852 const MCInstrDesc &InstrDesc = get(Opc); 1853 1854 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 1855 MachineOperand &Src1 = MI->getOperand(Src1Idx); 1856 1857 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32 1858 // we need to only have one constant bus use. 1859 // 1860 // Note we do not need to worry about literal constants here. They are 1861 // disabled for the operand type for instructions because they will always 1862 // violate the one constant bus use rule. 1863 bool HasImplicitSGPR = findImplicitSGPRRead(*MI) != AMDGPU::NoRegister; 1864 if (HasImplicitSGPR) { 1865 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 1866 MachineOperand &Src0 = MI->getOperand(Src0Idx); 1867 1868 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) 1869 legalizeOpWithMove(MI, Src0Idx); 1870 } 1871 1872 // VOP2 src0 instructions support all operand types, so we don't need to check 1873 // their legality. If src1 is already legal, we don't need to do anything. 1874 if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1)) 1875 return; 1876 1877 // We do not use commuteInstruction here because it is too aggressive and will 1878 // commute if it is possible. We only want to commute here if it improves 1879 // legality. This can be called a fairly large number of times so don't waste 1880 // compile time pointlessly swapping and checking legality again. 1881 if (HasImplicitSGPR || !MI->isCommutable()) { 1882 legalizeOpWithMove(MI, Src1Idx); 1883 return; 1884 } 1885 1886 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 1887 MachineOperand &Src0 = MI->getOperand(Src0Idx); 1888 1889 // If src0 can be used as src1, commuting will make the operands legal. 1890 // Otherwise we have to give up and insert a move. 1891 // 1892 // TODO: Other immediate-like operand kinds could be commuted if there was a 1893 // MachineOperand::ChangeTo* for them. 1894 if ((!Src1.isImm() && !Src1.isReg()) || 1895 !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) { 1896 legalizeOpWithMove(MI, Src1Idx); 1897 return; 1898 } 1899 1900 int CommutedOpc = commuteOpcode(*MI); 1901 if (CommutedOpc == -1) { 1902 legalizeOpWithMove(MI, Src1Idx); 1903 return; 1904 } 1905 1906 MI->setDesc(get(CommutedOpc)); 1907 1908 unsigned Src0Reg = Src0.getReg(); 1909 unsigned Src0SubReg = Src0.getSubReg(); 1910 bool Src0Kill = Src0.isKill(); 1911 1912 if (Src1.isImm()) 1913 Src0.ChangeToImmediate(Src1.getImm()); 1914 else if (Src1.isReg()) { 1915 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill()); 1916 Src0.setSubReg(Src1.getSubReg()); 1917 } else 1918 llvm_unreachable("Should only have register or immediate operands"); 1919 1920 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill); 1921 Src1.setSubReg(Src0SubReg); 1922 } 1923 1924 // Legalize VOP3 operands. Because all operand types are supported for any 1925 // operand, and since literal constants are not allowed and should never be 1926 // seen, we only need to worry about inserting copies if we use multiple SGPR 1927 // operands. 1928 void SIInstrInfo::legalizeOperandsVOP3( 1929 MachineRegisterInfo &MRI, 1930 MachineInstr *MI) const { 1931 unsigned Opc = MI->getOpcode(); 1932 1933 int VOP3Idx[3] = { 1934 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), 1935 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1), 1936 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2) 1937 }; 1938 1939 // Find the one SGPR operand we are allowed to use. 1940 unsigned SGPRReg = findUsedSGPR(MI, VOP3Idx); 1941 1942 for (unsigned i = 0; i < 3; ++i) { 1943 int Idx = VOP3Idx[i]; 1944 if (Idx == -1) 1945 break; 1946 MachineOperand &MO = MI->getOperand(Idx); 1947 1948 // We should never see a VOP3 instruction with an illegal immediate operand. 1949 if (!MO.isReg()) 1950 continue; 1951 1952 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg()))) 1953 continue; // VGPRs are legal 1954 1955 if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) { 1956 SGPRReg = MO.getReg(); 1957 // We can use one SGPR in each VOP3 instruction. 1958 continue; 1959 } 1960 1961 // If we make it this far, then the operand is not legal and we must 1962 // legalize it. 1963 legalizeOpWithMove(MI, Idx); 1964 } 1965 } 1966 1967 void SIInstrInfo::legalizeOperands(MachineInstr *MI) const { 1968 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 1969 1970 // Legalize VOP2 1971 if (isVOP2(*MI)) { 1972 legalizeOperandsVOP2(MRI, MI); 1973 return; 1974 } 1975 1976 // Legalize VOP3 1977 if (isVOP3(*MI)) { 1978 legalizeOperandsVOP3(MRI, MI); 1979 return; 1980 } 1981 1982 // Legalize REG_SEQUENCE and PHI 1983 // The register class of the operands much be the same type as the register 1984 // class of the output. 1985 if (MI->getOpcode() == AMDGPU::PHI) { 1986 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr; 1987 for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) { 1988 if (!MI->getOperand(i).isReg() || 1989 !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg())) 1990 continue; 1991 const TargetRegisterClass *OpRC = 1992 MRI.getRegClass(MI->getOperand(i).getReg()); 1993 if (RI.hasVGPRs(OpRC)) { 1994 VRC = OpRC; 1995 } else { 1996 SRC = OpRC; 1997 } 1998 } 1999 2000 // If any of the operands are VGPR registers, then they all most be 2001 // otherwise we will create illegal VGPR->SGPR copies when legalizing 2002 // them. 2003 if (VRC || !RI.isSGPRClass(getOpRegClass(*MI, 0))) { 2004 if (!VRC) { 2005 assert(SRC); 2006 VRC = RI.getEquivalentVGPRClass(SRC); 2007 } 2008 RC = VRC; 2009 } else { 2010 RC = SRC; 2011 } 2012 2013 // Update all the operands so they have the same type. 2014 for (unsigned I = 1, E = MI->getNumOperands(); I != E; I += 2) { 2015 MachineOperand &Op = MI->getOperand(I); 2016 if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg())) 2017 continue; 2018 unsigned DstReg = MRI.createVirtualRegister(RC); 2019 2020 // MI is a PHI instruction. 2021 MachineBasicBlock *InsertBB = MI->getOperand(I + 1).getMBB(); 2022 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator(); 2023 2024 BuildMI(*InsertBB, Insert, MI->getDebugLoc(), get(AMDGPU::COPY), DstReg) 2025 .addOperand(Op); 2026 Op.setReg(DstReg); 2027 } 2028 } 2029 2030 // REG_SEQUENCE doesn't really require operand legalization, but if one has a 2031 // VGPR dest type and SGPR sources, insert copies so all operands are 2032 // VGPRs. This seems to help operand folding / the register coalescer. 2033 if (MI->getOpcode() == AMDGPU::REG_SEQUENCE) { 2034 MachineBasicBlock *MBB = MI->getParent(); 2035 const TargetRegisterClass *DstRC = getOpRegClass(*MI, 0); 2036 if (RI.hasVGPRs(DstRC)) { 2037 // Update all the operands so they are VGPR register classes. These may 2038 // not be the same register class because REG_SEQUENCE supports mixing 2039 // subregister index types e.g. sub0_sub1 + sub2 + sub3 2040 for (unsigned I = 1, E = MI->getNumOperands(); I != E; I += 2) { 2041 MachineOperand &Op = MI->getOperand(I); 2042 if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg())) 2043 continue; 2044 2045 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg()); 2046 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); 2047 if (VRC == OpRC) 2048 continue; 2049 2050 unsigned DstReg = MRI.createVirtualRegister(VRC); 2051 2052 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::COPY), DstReg) 2053 .addOperand(Op); 2054 2055 Op.setReg(DstReg); 2056 Op.setIsKill(); 2057 } 2058 } 2059 2060 return; 2061 } 2062 2063 // Legalize INSERT_SUBREG 2064 // src0 must have the same register class as dst 2065 if (MI->getOpcode() == AMDGPU::INSERT_SUBREG) { 2066 unsigned Dst = MI->getOperand(0).getReg(); 2067 unsigned Src0 = MI->getOperand(1).getReg(); 2068 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst); 2069 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0); 2070 if (DstRC != Src0RC) { 2071 MachineBasicBlock &MBB = *MI->getParent(); 2072 unsigned NewSrc0 = MRI.createVirtualRegister(DstRC); 2073 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::COPY), NewSrc0) 2074 .addReg(Src0); 2075 MI->getOperand(1).setReg(NewSrc0); 2076 } 2077 return; 2078 } 2079 2080 // Legalize MUBUF* instructions 2081 // FIXME: If we start using the non-addr64 instructions for compute, we 2082 // may need to legalize them here. 2083 int SRsrcIdx = 2084 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::srsrc); 2085 if (SRsrcIdx != -1) { 2086 // We have an MUBUF instruction 2087 MachineOperand *SRsrc = &MI->getOperand(SRsrcIdx); 2088 unsigned SRsrcRC = get(MI->getOpcode()).OpInfo[SRsrcIdx].RegClass; 2089 if (RI.getCommonSubClass(MRI.getRegClass(SRsrc->getReg()), 2090 RI.getRegClass(SRsrcRC))) { 2091 // The operands are legal. 2092 // FIXME: We may need to legalize operands besided srsrc. 2093 return; 2094 } 2095 2096 MachineBasicBlock &MBB = *MI->getParent(); 2097 2098 // Extract the ptr from the resource descriptor. 2099 unsigned SRsrcPtr = buildExtractSubReg(MI, MRI, *SRsrc, 2100 &AMDGPU::VReg_128RegClass, AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass); 2101 2102 // Create an empty resource descriptor 2103 unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 2104 unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2105 unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2106 unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass); 2107 uint64_t RsrcDataFormat = getDefaultRsrcDataFormat(); 2108 2109 // Zero64 = 0 2110 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B64), 2111 Zero64) 2112 .addImm(0); 2113 2114 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0} 2115 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), 2116 SRsrcFormatLo) 2117 .addImm(RsrcDataFormat & 0xFFFFFFFF); 2118 2119 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32} 2120 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), 2121 SRsrcFormatHi) 2122 .addImm(RsrcDataFormat >> 32); 2123 2124 // NewSRsrc = {Zero64, SRsrcFormat} 2125 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewSRsrc) 2126 .addReg(Zero64) 2127 .addImm(AMDGPU::sub0_sub1) 2128 .addReg(SRsrcFormatLo) 2129 .addImm(AMDGPU::sub2) 2130 .addReg(SRsrcFormatHi) 2131 .addImm(AMDGPU::sub3); 2132 2133 MachineOperand *VAddr = getNamedOperand(*MI, AMDGPU::OpName::vaddr); 2134 unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 2135 if (VAddr) { 2136 // This is already an ADDR64 instruction so we need to add the pointer 2137 // extracted from the resource descriptor to the current value of VAddr. 2138 unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2139 unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2140 2141 // NewVaddrLo = SRsrcPtr:sub0 + VAddr:sub0 2142 DebugLoc DL = MI->getDebugLoc(); 2143 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), NewVAddrLo) 2144 .addReg(SRsrcPtr, 0, AMDGPU::sub0) 2145 .addReg(VAddr->getReg(), 0, AMDGPU::sub0); 2146 2147 // NewVaddrHi = SRsrcPtr:sub1 + VAddr:sub1 2148 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e32), NewVAddrHi) 2149 .addReg(SRsrcPtr, 0, AMDGPU::sub1) 2150 .addReg(VAddr->getReg(), 0, AMDGPU::sub1); 2151 2152 // NewVaddr = {NewVaddrHi, NewVaddrLo} 2153 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) 2154 .addReg(NewVAddrLo) 2155 .addImm(AMDGPU::sub0) 2156 .addReg(NewVAddrHi) 2157 .addImm(AMDGPU::sub1); 2158 } else { 2159 // This instructions is the _OFFSET variant, so we need to convert it to 2160 // ADDR64. 2161 assert(MBB.getParent()->getSubtarget<AMDGPUSubtarget>().getGeneration() 2162 < AMDGPUSubtarget::VOLCANIC_ISLANDS && 2163 "FIXME: Need to emit flat atomics here"); 2164 2165 MachineOperand *VData = getNamedOperand(*MI, AMDGPU::OpName::vdata); 2166 MachineOperand *Offset = getNamedOperand(*MI, AMDGPU::OpName::offset); 2167 MachineOperand *SOffset = getNamedOperand(*MI, AMDGPU::OpName::soffset); 2168 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI->getOpcode()); 2169 2170 // Atomics rith return have have an additional tied operand and are 2171 // missing some of the special bits. 2172 MachineOperand *VDataIn = getNamedOperand(*MI, AMDGPU::OpName::vdata_in); 2173 MachineInstr *Addr64; 2174 2175 if (!VDataIn) { 2176 // Regular buffer load / store. 2177 MachineInstrBuilder MIB 2178 = BuildMI(MBB, MI, MI->getDebugLoc(), get(Addr64Opcode)) 2179 .addOperand(*VData) 2180 .addReg(AMDGPU::NoRegister) // Dummy value for vaddr. 2181 // This will be replaced later 2182 // with the new value of vaddr. 2183 .addOperand(*SRsrc) 2184 .addOperand(*SOffset) 2185 .addOperand(*Offset); 2186 2187 // Atomics do not have this operand. 2188 if (const MachineOperand *GLC 2189 = getNamedOperand(*MI, AMDGPU::OpName::glc)) { 2190 MIB.addImm(GLC->getImm()); 2191 } 2192 2193 MIB.addImm(getNamedImmOperand(*MI, AMDGPU::OpName::slc)); 2194 2195 if (const MachineOperand *TFE 2196 = getNamedOperand(*MI, AMDGPU::OpName::tfe)) { 2197 MIB.addImm(TFE->getImm()); 2198 } 2199 2200 MIB.setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 2201 Addr64 = MIB; 2202 } else { 2203 // Atomics with return. 2204 Addr64 = BuildMI(MBB, MI, MI->getDebugLoc(), get(Addr64Opcode)) 2205 .addOperand(*VData) 2206 .addOperand(*VDataIn) 2207 .addReg(AMDGPU::NoRegister) // Dummy value for vaddr. 2208 // This will be replaced later 2209 // with the new value of vaddr. 2210 .addOperand(*SRsrc) 2211 .addOperand(*SOffset) 2212 .addOperand(*Offset) 2213 .addImm(getNamedImmOperand(*MI, AMDGPU::OpName::slc)) 2214 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 2215 } 2216 2217 MI->removeFromParent(); 2218 MI = Addr64; 2219 2220 // NewVaddr = {NewVaddrHi, NewVaddrLo} 2221 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) 2222 .addReg(SRsrcPtr, 0, AMDGPU::sub0) 2223 .addImm(AMDGPU::sub0) 2224 .addReg(SRsrcPtr, 0, AMDGPU::sub1) 2225 .addImm(AMDGPU::sub1); 2226 2227 VAddr = getNamedOperand(*MI, AMDGPU::OpName::vaddr); 2228 SRsrc = getNamedOperand(*MI, AMDGPU::OpName::srsrc); 2229 } 2230 2231 // Update the instruction to use NewVaddr 2232 VAddr->setReg(NewVAddr); 2233 // Update the instruction to use NewSRsrc 2234 SRsrc->setReg(NewSRsrc); 2235 } 2236 } 2237 2238 void SIInstrInfo::splitSMRD(MachineInstr *MI, 2239 const TargetRegisterClass *HalfRC, 2240 unsigned HalfImmOp, unsigned HalfSGPROp, 2241 MachineInstr *&Lo, MachineInstr *&Hi) const { 2242 2243 DebugLoc DL = MI->getDebugLoc(); 2244 MachineBasicBlock *MBB = MI->getParent(); 2245 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 2246 unsigned RegLo = MRI.createVirtualRegister(HalfRC); 2247 unsigned RegHi = MRI.createVirtualRegister(HalfRC); 2248 unsigned HalfSize = HalfRC->getSize(); 2249 const MachineOperand *OffOp = 2250 getNamedOperand(*MI, AMDGPU::OpName::offset); 2251 const MachineOperand *SBase = getNamedOperand(*MI, AMDGPU::OpName::sbase); 2252 2253 // The SMRD has an 8-bit offset in dwords on SI and a 20-bit offset in bytes 2254 // on VI. 2255 2256 bool IsKill = SBase->isKill(); 2257 if (OffOp) { 2258 bool isVI = 2259 MBB->getParent()->getSubtarget<AMDGPUSubtarget>().getGeneration() >= 2260 AMDGPUSubtarget::VOLCANIC_ISLANDS; 2261 unsigned OffScale = isVI ? 1 : 4; 2262 // Handle the _IMM variant 2263 unsigned LoOffset = OffOp->getImm() * OffScale; 2264 unsigned HiOffset = LoOffset + HalfSize; 2265 Lo = BuildMI(*MBB, MI, DL, get(HalfImmOp), RegLo) 2266 // Use addReg instead of addOperand 2267 // to make sure kill flag is cleared. 2268 .addReg(SBase->getReg(), 0, SBase->getSubReg()) 2269 .addImm(LoOffset / OffScale); 2270 2271 if (!isUInt<20>(HiOffset) || (!isVI && !isUInt<8>(HiOffset / OffScale))) { 2272 unsigned OffsetSGPR = 2273 MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 2274 BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32), OffsetSGPR) 2275 .addImm(HiOffset); // The offset in register is in bytes. 2276 Hi = BuildMI(*MBB, MI, DL, get(HalfSGPROp), RegHi) 2277 .addReg(SBase->getReg(), getKillRegState(IsKill), 2278 SBase->getSubReg()) 2279 .addReg(OffsetSGPR); 2280 } else { 2281 Hi = BuildMI(*MBB, MI, DL, get(HalfImmOp), RegHi) 2282 .addReg(SBase->getReg(), getKillRegState(IsKill), 2283 SBase->getSubReg()) 2284 .addImm(HiOffset / OffScale); 2285 } 2286 } else { 2287 // Handle the _SGPR variant 2288 MachineOperand *SOff = getNamedOperand(*MI, AMDGPU::OpName::soff); 2289 Lo = BuildMI(*MBB, MI, DL, get(HalfSGPROp), RegLo) 2290 .addReg(SBase->getReg(), 0, SBase->getSubReg()) 2291 .addOperand(*SOff); 2292 unsigned OffsetSGPR = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 2293 BuildMI(*MBB, MI, DL, get(AMDGPU::S_ADD_I32), OffsetSGPR) 2294 .addReg(SOff->getReg(), 0, SOff->getSubReg()) 2295 .addImm(HalfSize); 2296 Hi = BuildMI(*MBB, MI, DL, get(HalfSGPROp), RegHi) 2297 .addReg(SBase->getReg(), getKillRegState(IsKill), 2298 SBase->getSubReg()) 2299 .addReg(OffsetSGPR); 2300 } 2301 2302 unsigned SubLo, SubHi; 2303 const TargetRegisterClass *NewDstRC; 2304 switch (HalfSize) { 2305 case 4: 2306 SubLo = AMDGPU::sub0; 2307 SubHi = AMDGPU::sub1; 2308 NewDstRC = &AMDGPU::VReg_64RegClass; 2309 break; 2310 case 8: 2311 SubLo = AMDGPU::sub0_sub1; 2312 SubHi = AMDGPU::sub2_sub3; 2313 NewDstRC = &AMDGPU::VReg_128RegClass; 2314 break; 2315 case 16: 2316 SubLo = AMDGPU::sub0_sub1_sub2_sub3; 2317 SubHi = AMDGPU::sub4_sub5_sub6_sub7; 2318 NewDstRC = &AMDGPU::VReg_256RegClass; 2319 break; 2320 case 32: 2321 SubLo = AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7; 2322 SubHi = AMDGPU::sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15; 2323 NewDstRC = &AMDGPU::VReg_512RegClass; 2324 break; 2325 default: 2326 llvm_unreachable("Unhandled HalfSize"); 2327 } 2328 2329 unsigned OldDst = MI->getOperand(0).getReg(); 2330 unsigned NewDst = MRI.createVirtualRegister(NewDstRC); 2331 2332 MRI.replaceRegWith(OldDst, NewDst); 2333 2334 BuildMI(*MBB, MI, DL, get(AMDGPU::REG_SEQUENCE), NewDst) 2335 .addReg(RegLo) 2336 .addImm(SubLo) 2337 .addReg(RegHi) 2338 .addImm(SubHi); 2339 } 2340 2341 void SIInstrInfo::moveSMRDToVALU(MachineInstr *MI, 2342 MachineRegisterInfo &MRI, 2343 SmallVectorImpl<MachineInstr *> &Worklist) const { 2344 MachineBasicBlock *MBB = MI->getParent(); 2345 int DstIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst); 2346 assert(DstIdx != -1); 2347 unsigned DstRCID = get(MI->getOpcode()).OpInfo[DstIdx].RegClass; 2348 switch(RI.getRegClass(DstRCID)->getSize()) { 2349 case 4: 2350 case 8: 2351 case 16: { 2352 unsigned NewOpcode = getVALUOp(*MI); 2353 unsigned RegOffset; 2354 unsigned ImmOffset; 2355 2356 if (MI->getOperand(2).isReg()) { 2357 RegOffset = MI->getOperand(2).getReg(); 2358 ImmOffset = 0; 2359 } else { 2360 assert(MI->getOperand(2).isImm()); 2361 // SMRD instructions take a dword offsets on SI and byte offset on VI 2362 // and MUBUF instructions always take a byte offset. 2363 ImmOffset = MI->getOperand(2).getImm(); 2364 if (MBB->getParent()->getSubtarget<AMDGPUSubtarget>().getGeneration() <= 2365 AMDGPUSubtarget::SEA_ISLANDS) 2366 ImmOffset <<= 2; 2367 RegOffset = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2368 2369 if (isUInt<12>(ImmOffset)) { 2370 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), 2371 RegOffset) 2372 .addImm(0); 2373 } else { 2374 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), 2375 RegOffset) 2376 .addImm(ImmOffset); 2377 ImmOffset = 0; 2378 } 2379 } 2380 2381 unsigned SRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass); 2382 unsigned DWord0 = RegOffset; 2383 unsigned DWord1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2384 unsigned DWord2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2385 unsigned DWord3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2386 uint64_t RsrcDataFormat = getDefaultRsrcDataFormat(); 2387 2388 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord1) 2389 .addImm(0); 2390 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord2) 2391 .addImm(RsrcDataFormat & 0xFFFFFFFF); 2392 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord3) 2393 .addImm(RsrcDataFormat >> 32); 2394 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), SRsrc) 2395 .addReg(DWord0) 2396 .addImm(AMDGPU::sub0) 2397 .addReg(DWord1) 2398 .addImm(AMDGPU::sub1) 2399 .addReg(DWord2) 2400 .addImm(AMDGPU::sub2) 2401 .addReg(DWord3) 2402 .addImm(AMDGPU::sub3); 2403 2404 const MCInstrDesc &NewInstDesc = get(NewOpcode); 2405 const TargetRegisterClass *NewDstRC 2406 = RI.getRegClass(NewInstDesc.OpInfo[0].RegClass); 2407 unsigned NewDstReg = MRI.createVirtualRegister(NewDstRC); 2408 unsigned DstReg = MI->getOperand(0).getReg(); 2409 MRI.replaceRegWith(DstReg, NewDstReg); 2410 2411 MachineInstr *NewInst = 2412 BuildMI(*MBB, MI, MI->getDebugLoc(), NewInstDesc, NewDstReg) 2413 .addOperand(MI->getOperand(1)) // sbase 2414 .addReg(SRsrc) 2415 .addImm(0) 2416 .addImm(ImmOffset) 2417 .addImm(0) // glc 2418 .addImm(0) // slc 2419 .addImm(0) // tfe 2420 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 2421 MI->eraseFromParent(); 2422 2423 legalizeOperands(NewInst); 2424 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); 2425 break; 2426 } 2427 case 32: { 2428 MachineInstr *Lo, *Hi; 2429 splitSMRD(MI, &AMDGPU::SReg_128RegClass, AMDGPU::S_LOAD_DWORDX4_IMM, 2430 AMDGPU::S_LOAD_DWORDX4_SGPR, Lo, Hi); 2431 MI->eraseFromParent(); 2432 moveSMRDToVALU(Lo, MRI, Worklist); 2433 moveSMRDToVALU(Hi, MRI, Worklist); 2434 break; 2435 } 2436 2437 case 64: { 2438 MachineInstr *Lo, *Hi; 2439 splitSMRD(MI, &AMDGPU::SReg_256RegClass, AMDGPU::S_LOAD_DWORDX8_IMM, 2440 AMDGPU::S_LOAD_DWORDX8_SGPR, Lo, Hi); 2441 MI->eraseFromParent(); 2442 moveSMRDToVALU(Lo, MRI, Worklist); 2443 moveSMRDToVALU(Hi, MRI, Worklist); 2444 break; 2445 } 2446 } 2447 } 2448 2449 void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const { 2450 SmallVector<MachineInstr *, 128> Worklist; 2451 Worklist.push_back(&TopInst); 2452 2453 while (!Worklist.empty()) { 2454 MachineInstr *Inst = Worklist.pop_back_val(); 2455 MachineBasicBlock *MBB = Inst->getParent(); 2456 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 2457 2458 unsigned Opcode = Inst->getOpcode(); 2459 unsigned NewOpcode = getVALUOp(*Inst); 2460 2461 // Handle some special cases 2462 switch (Opcode) { 2463 default: 2464 if (isSMRD(*Inst)) { 2465 moveSMRDToVALU(Inst, MRI, Worklist); 2466 continue; 2467 } 2468 break; 2469 case AMDGPU::S_AND_B64: 2470 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_AND_B32_e64); 2471 Inst->eraseFromParent(); 2472 continue; 2473 2474 case AMDGPU::S_OR_B64: 2475 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_OR_B32_e64); 2476 Inst->eraseFromParent(); 2477 continue; 2478 2479 case AMDGPU::S_XOR_B64: 2480 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_XOR_B32_e64); 2481 Inst->eraseFromParent(); 2482 continue; 2483 2484 case AMDGPU::S_NOT_B64: 2485 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::V_NOT_B32_e32); 2486 Inst->eraseFromParent(); 2487 continue; 2488 2489 case AMDGPU::S_BCNT1_I32_B64: 2490 splitScalar64BitBCNT(Worklist, Inst); 2491 Inst->eraseFromParent(); 2492 continue; 2493 2494 case AMDGPU::S_BFE_I64: { 2495 splitScalar64BitBFE(Worklist, Inst); 2496 Inst->eraseFromParent(); 2497 continue; 2498 } 2499 2500 case AMDGPU::S_LSHL_B32: 2501 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 2502 NewOpcode = AMDGPU::V_LSHLREV_B32_e64; 2503 swapOperands(Inst); 2504 } 2505 break; 2506 case AMDGPU::S_ASHR_I32: 2507 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 2508 NewOpcode = AMDGPU::V_ASHRREV_I32_e64; 2509 swapOperands(Inst); 2510 } 2511 break; 2512 case AMDGPU::S_LSHR_B32: 2513 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 2514 NewOpcode = AMDGPU::V_LSHRREV_B32_e64; 2515 swapOperands(Inst); 2516 } 2517 break; 2518 case AMDGPU::S_LSHL_B64: 2519 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 2520 NewOpcode = AMDGPU::V_LSHLREV_B64; 2521 swapOperands(Inst); 2522 } 2523 break; 2524 case AMDGPU::S_ASHR_I64: 2525 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 2526 NewOpcode = AMDGPU::V_ASHRREV_I64; 2527 swapOperands(Inst); 2528 } 2529 break; 2530 case AMDGPU::S_LSHR_B64: 2531 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 2532 NewOpcode = AMDGPU::V_LSHRREV_B64; 2533 swapOperands(Inst); 2534 } 2535 break; 2536 2537 case AMDGPU::S_ABS_I32: 2538 lowerScalarAbs(Worklist, Inst); 2539 Inst->eraseFromParent(); 2540 continue; 2541 2542 case AMDGPU::S_BFE_U64: 2543 case AMDGPU::S_BFM_B64: 2544 llvm_unreachable("Moving this op to VALU not implemented"); 2545 } 2546 2547 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) { 2548 // We cannot move this instruction to the VALU, so we should try to 2549 // legalize its operands instead. 2550 legalizeOperands(Inst); 2551 continue; 2552 } 2553 2554 // Use the new VALU Opcode. 2555 const MCInstrDesc &NewDesc = get(NewOpcode); 2556 Inst->setDesc(NewDesc); 2557 2558 // Remove any references to SCC. Vector instructions can't read from it, and 2559 // We're just about to add the implicit use / defs of VCC, and we don't want 2560 // both. 2561 for (unsigned i = Inst->getNumOperands() - 1; i > 0; --i) { 2562 MachineOperand &Op = Inst->getOperand(i); 2563 if (Op.isReg() && Op.getReg() == AMDGPU::SCC) 2564 Inst->RemoveOperand(i); 2565 } 2566 2567 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) { 2568 // We are converting these to a BFE, so we need to add the missing 2569 // operands for the size and offset. 2570 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16; 2571 Inst->addOperand(MachineOperand::CreateImm(0)); 2572 Inst->addOperand(MachineOperand::CreateImm(Size)); 2573 2574 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) { 2575 // The VALU version adds the second operand to the result, so insert an 2576 // extra 0 operand. 2577 Inst->addOperand(MachineOperand::CreateImm(0)); 2578 } 2579 2580 Inst->addImplicitDefUseOperands(*Inst->getParent()->getParent()); 2581 2582 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) { 2583 const MachineOperand &OffsetWidthOp = Inst->getOperand(2); 2584 // If we need to move this to VGPRs, we need to unpack the second operand 2585 // back into the 2 separate ones for bit offset and width. 2586 assert(OffsetWidthOp.isImm() && 2587 "Scalar BFE is only implemented for constant width and offset"); 2588 uint32_t Imm = OffsetWidthOp.getImm(); 2589 2590 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 2591 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 2592 Inst->RemoveOperand(2); // Remove old immediate. 2593 Inst->addOperand(MachineOperand::CreateImm(Offset)); 2594 Inst->addOperand(MachineOperand::CreateImm(BitWidth)); 2595 } 2596 2597 // Update the destination register class. 2598 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(*Inst); 2599 if (!NewDstRC) 2600 continue; 2601 2602 unsigned DstReg = Inst->getOperand(0).getReg(); 2603 unsigned NewDstReg = MRI.createVirtualRegister(NewDstRC); 2604 MRI.replaceRegWith(DstReg, NewDstReg); 2605 2606 // Legalize the operands 2607 legalizeOperands(Inst); 2608 2609 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); 2610 } 2611 } 2612 2613 //===----------------------------------------------------------------------===// 2614 // Indirect addressing callbacks 2615 //===----------------------------------------------------------------------===// 2616 2617 unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex, 2618 unsigned Channel) const { 2619 assert(Channel == 0); 2620 return RegIndex; 2621 } 2622 2623 const TargetRegisterClass *SIInstrInfo::getIndirectAddrRegClass() const { 2624 return &AMDGPU::VGPR_32RegClass; 2625 } 2626 2627 void SIInstrInfo::lowerScalarAbs(SmallVectorImpl<MachineInstr *> &Worklist, 2628 MachineInstr *Inst) const { 2629 MachineBasicBlock &MBB = *Inst->getParent(); 2630 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2631 MachineBasicBlock::iterator MII = Inst; 2632 DebugLoc DL = Inst->getDebugLoc(); 2633 2634 MachineOperand &Dest = Inst->getOperand(0); 2635 MachineOperand &Src = Inst->getOperand(1); 2636 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2637 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2638 2639 BuildMI(MBB, MII, DL, get(AMDGPU::V_SUB_I32_e32), TmpReg) 2640 .addImm(0) 2641 .addReg(Src.getReg()); 2642 2643 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg) 2644 .addReg(Src.getReg()) 2645 .addReg(TmpReg); 2646 2647 MRI.replaceRegWith(Dest.getReg(), ResultReg); 2648 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 2649 } 2650 2651 void SIInstrInfo::splitScalar64BitUnaryOp( 2652 SmallVectorImpl<MachineInstr *> &Worklist, 2653 MachineInstr *Inst, 2654 unsigned Opcode) const { 2655 MachineBasicBlock &MBB = *Inst->getParent(); 2656 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2657 2658 MachineOperand &Dest = Inst->getOperand(0); 2659 MachineOperand &Src0 = Inst->getOperand(1); 2660 DebugLoc DL = Inst->getDebugLoc(); 2661 2662 MachineBasicBlock::iterator MII = Inst; 2663 2664 const MCInstrDesc &InstDesc = get(Opcode); 2665 const TargetRegisterClass *Src0RC = Src0.isReg() ? 2666 MRI.getRegClass(Src0.getReg()) : 2667 &AMDGPU::SGPR_32RegClass; 2668 2669 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 2670 2671 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 2672 AMDGPU::sub0, Src0SubRC); 2673 2674 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 2675 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 2676 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 2677 2678 unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 2679 BuildMI(MBB, MII, DL, InstDesc, DestSub0) 2680 .addOperand(SrcReg0Sub0); 2681 2682 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 2683 AMDGPU::sub1, Src0SubRC); 2684 2685 unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 2686 BuildMI(MBB, MII, DL, InstDesc, DestSub1) 2687 .addOperand(SrcReg0Sub1); 2688 2689 unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); 2690 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 2691 .addReg(DestSub0) 2692 .addImm(AMDGPU::sub0) 2693 .addReg(DestSub1) 2694 .addImm(AMDGPU::sub1); 2695 2696 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 2697 2698 // We don't need to legalizeOperands here because for a single operand, src0 2699 // will support any kind of input. 2700 2701 // Move all users of this moved value. 2702 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 2703 } 2704 2705 void SIInstrInfo::splitScalar64BitBinaryOp( 2706 SmallVectorImpl<MachineInstr *> &Worklist, 2707 MachineInstr *Inst, 2708 unsigned Opcode) const { 2709 MachineBasicBlock &MBB = *Inst->getParent(); 2710 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2711 2712 MachineOperand &Dest = Inst->getOperand(0); 2713 MachineOperand &Src0 = Inst->getOperand(1); 2714 MachineOperand &Src1 = Inst->getOperand(2); 2715 DebugLoc DL = Inst->getDebugLoc(); 2716 2717 MachineBasicBlock::iterator MII = Inst; 2718 2719 const MCInstrDesc &InstDesc = get(Opcode); 2720 const TargetRegisterClass *Src0RC = Src0.isReg() ? 2721 MRI.getRegClass(Src0.getReg()) : 2722 &AMDGPU::SGPR_32RegClass; 2723 2724 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 2725 const TargetRegisterClass *Src1RC = Src1.isReg() ? 2726 MRI.getRegClass(Src1.getReg()) : 2727 &AMDGPU::SGPR_32RegClass; 2728 2729 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 2730 2731 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 2732 AMDGPU::sub0, Src0SubRC); 2733 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 2734 AMDGPU::sub0, Src1SubRC); 2735 2736 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 2737 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 2738 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 2739 2740 unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 2741 MachineInstr *LoHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub0) 2742 .addOperand(SrcReg0Sub0) 2743 .addOperand(SrcReg1Sub0); 2744 2745 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 2746 AMDGPU::sub1, Src0SubRC); 2747 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 2748 AMDGPU::sub1, Src1SubRC); 2749 2750 unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 2751 MachineInstr *HiHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub1) 2752 .addOperand(SrcReg0Sub1) 2753 .addOperand(SrcReg1Sub1); 2754 2755 unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); 2756 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 2757 .addReg(DestSub0) 2758 .addImm(AMDGPU::sub0) 2759 .addReg(DestSub1) 2760 .addImm(AMDGPU::sub1); 2761 2762 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 2763 2764 // Try to legalize the operands in case we need to swap the order to keep it 2765 // valid. 2766 legalizeOperands(LoHalf); 2767 legalizeOperands(HiHalf); 2768 2769 // Move all users of this moved vlaue. 2770 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 2771 } 2772 2773 void SIInstrInfo::splitScalar64BitBCNT(SmallVectorImpl<MachineInstr *> &Worklist, 2774 MachineInstr *Inst) const { 2775 MachineBasicBlock &MBB = *Inst->getParent(); 2776 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2777 2778 MachineBasicBlock::iterator MII = Inst; 2779 DebugLoc DL = Inst->getDebugLoc(); 2780 2781 MachineOperand &Dest = Inst->getOperand(0); 2782 MachineOperand &Src = Inst->getOperand(1); 2783 2784 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64); 2785 const TargetRegisterClass *SrcRC = Src.isReg() ? 2786 MRI.getRegClass(Src.getReg()) : 2787 &AMDGPU::SGPR_32RegClass; 2788 2789 unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2790 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2791 2792 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); 2793 2794 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 2795 AMDGPU::sub0, SrcSubRC); 2796 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 2797 AMDGPU::sub1, SrcSubRC); 2798 2799 BuildMI(MBB, MII, DL, InstDesc, MidReg) 2800 .addOperand(SrcRegSub0) 2801 .addImm(0); 2802 2803 BuildMI(MBB, MII, DL, InstDesc, ResultReg) 2804 .addOperand(SrcRegSub1) 2805 .addReg(MidReg); 2806 2807 MRI.replaceRegWith(Dest.getReg(), ResultReg); 2808 2809 // We don't need to legalize operands here. src0 for etiher instruction can be 2810 // an SGPR, and the second input is unused or determined here. 2811 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 2812 } 2813 2814 void SIInstrInfo::splitScalar64BitBFE(SmallVectorImpl<MachineInstr *> &Worklist, 2815 MachineInstr *Inst) const { 2816 MachineBasicBlock &MBB = *Inst->getParent(); 2817 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2818 MachineBasicBlock::iterator MII = Inst; 2819 DebugLoc DL = Inst->getDebugLoc(); 2820 2821 MachineOperand &Dest = Inst->getOperand(0); 2822 uint32_t Imm = Inst->getOperand(2).getImm(); 2823 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 2824 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 2825 2826 (void) Offset; 2827 2828 // Only sext_inreg cases handled. 2829 assert(Inst->getOpcode() == AMDGPU::S_BFE_I64 && 2830 BitWidth <= 32 && 2831 Offset == 0 && 2832 "Not implemented"); 2833 2834 if (BitWidth < 32) { 2835 unsigned MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2836 unsigned MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2837 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 2838 2839 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo) 2840 .addReg(Inst->getOperand(1).getReg(), 0, AMDGPU::sub0) 2841 .addImm(0) 2842 .addImm(BitWidth); 2843 2844 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi) 2845 .addImm(31) 2846 .addReg(MidRegLo); 2847 2848 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 2849 .addReg(MidRegLo) 2850 .addImm(AMDGPU::sub0) 2851 .addReg(MidRegHi) 2852 .addImm(AMDGPU::sub1); 2853 2854 MRI.replaceRegWith(Dest.getReg(), ResultReg); 2855 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 2856 return; 2857 } 2858 2859 MachineOperand &Src = Inst->getOperand(1); 2860 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2861 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 2862 2863 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg) 2864 .addImm(31) 2865 .addReg(Src.getReg(), 0, AMDGPU::sub0); 2866 2867 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 2868 .addReg(Src.getReg(), 0, AMDGPU::sub0) 2869 .addImm(AMDGPU::sub0) 2870 .addReg(TmpReg) 2871 .addImm(AMDGPU::sub1); 2872 2873 MRI.replaceRegWith(Dest.getReg(), ResultReg); 2874 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 2875 } 2876 2877 void SIInstrInfo::addUsersToMoveToVALUWorklist( 2878 unsigned DstReg, 2879 MachineRegisterInfo &MRI, 2880 SmallVectorImpl<MachineInstr *> &Worklist) const { 2881 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg), 2882 E = MRI.use_end(); I != E; ++I) { 2883 MachineInstr &UseMI = *I->getParent(); 2884 if (!canReadVGPR(UseMI, I.getOperandNo())) { 2885 Worklist.push_back(&UseMI); 2886 } 2887 } 2888 } 2889 2890 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass( 2891 const MachineInstr &Inst) const { 2892 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0); 2893 2894 switch (Inst.getOpcode()) { 2895 // For target instructions, getOpRegClass just returns the virtual register 2896 // class associated with the operand, so we need to find an equivalent VGPR 2897 // register class in order to move the instruction to the VALU. 2898 case AMDGPU::COPY: 2899 case AMDGPU::PHI: 2900 case AMDGPU::REG_SEQUENCE: 2901 case AMDGPU::INSERT_SUBREG: 2902 if (RI.hasVGPRs(NewDstRC)) 2903 return nullptr; 2904 2905 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 2906 if (!NewDstRC) 2907 return nullptr; 2908 return NewDstRC; 2909 default: 2910 return NewDstRC; 2911 } 2912 } 2913 2914 // Find the one SGPR operand we are allowed to use. 2915 unsigned SIInstrInfo::findUsedSGPR(const MachineInstr *MI, 2916 int OpIndices[3]) const { 2917 const MCInstrDesc &Desc = MI->getDesc(); 2918 2919 // Find the one SGPR operand we are allowed to use. 2920 // 2921 // First we need to consider the instruction's operand requirements before 2922 // legalizing. Some operands are required to be SGPRs, such as implicit uses 2923 // of VCC, but we are still bound by the constant bus requirement to only use 2924 // one. 2925 // 2926 // If the operand's class is an SGPR, we can never move it. 2927 2928 unsigned SGPRReg = findImplicitSGPRRead(*MI); 2929 if (SGPRReg != AMDGPU::NoRegister) 2930 return SGPRReg; 2931 2932 unsigned UsedSGPRs[3] = { AMDGPU::NoRegister }; 2933 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 2934 2935 for (unsigned i = 0; i < 3; ++i) { 2936 int Idx = OpIndices[i]; 2937 if (Idx == -1) 2938 break; 2939 2940 const MachineOperand &MO = MI->getOperand(Idx); 2941 if (!MO.isReg()) 2942 continue; 2943 2944 // Is this operand statically required to be an SGPR based on the operand 2945 // constraints? 2946 const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); 2947 bool IsRequiredSGPR = RI.isSGPRClass(OpRC); 2948 if (IsRequiredSGPR) 2949 return MO.getReg(); 2950 2951 // If this could be a VGPR or an SGPR, Check the dynamic register class. 2952 unsigned Reg = MO.getReg(); 2953 const TargetRegisterClass *RegRC = MRI.getRegClass(Reg); 2954 if (RI.isSGPRClass(RegRC)) 2955 UsedSGPRs[i] = Reg; 2956 } 2957 2958 // We don't have a required SGPR operand, so we have a bit more freedom in 2959 // selecting operands to move. 2960 2961 // Try to select the most used SGPR. If an SGPR is equal to one of the 2962 // others, we choose that. 2963 // 2964 // e.g. 2965 // V_FMA_F32 v0, s0, s0, s0 -> No moves 2966 // V_FMA_F32 v0, s0, s1, s0 -> Move s1 2967 2968 // TODO: If some of the operands are 64-bit SGPRs and some 32, we should 2969 // prefer those. 2970 2971 if (UsedSGPRs[0] != AMDGPU::NoRegister) { 2972 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2]) 2973 SGPRReg = UsedSGPRs[0]; 2974 } 2975 2976 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) { 2977 if (UsedSGPRs[1] == UsedSGPRs[2]) 2978 SGPRReg = UsedSGPRs[1]; 2979 } 2980 2981 return SGPRReg; 2982 } 2983 2984 MachineInstrBuilder SIInstrInfo::buildIndirectWrite( 2985 MachineBasicBlock *MBB, 2986 MachineBasicBlock::iterator I, 2987 unsigned ValueReg, 2988 unsigned Address, unsigned OffsetReg) const { 2989 const DebugLoc &DL = MBB->findDebugLoc(I); 2990 unsigned IndirectBaseReg = AMDGPU::VGPR_32RegClass.getRegister( 2991 getIndirectIndexBegin(*MBB->getParent())); 2992 2993 return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_DST_V1)) 2994 .addReg(IndirectBaseReg, RegState::Define) 2995 .addOperand(I->getOperand(0)) 2996 .addReg(IndirectBaseReg) 2997 .addReg(OffsetReg) 2998 .addImm(0) 2999 .addReg(ValueReg); 3000 } 3001 3002 MachineInstrBuilder SIInstrInfo::buildIndirectRead( 3003 MachineBasicBlock *MBB, 3004 MachineBasicBlock::iterator I, 3005 unsigned ValueReg, 3006 unsigned Address, unsigned OffsetReg) const { 3007 const DebugLoc &DL = MBB->findDebugLoc(I); 3008 unsigned IndirectBaseReg = AMDGPU::VGPR_32RegClass.getRegister( 3009 getIndirectIndexBegin(*MBB->getParent())); 3010 3011 return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_SRC_V1)) 3012 .addOperand(I->getOperand(0)) 3013 .addOperand(I->getOperand(1)) 3014 .addReg(IndirectBaseReg) 3015 .addReg(OffsetReg) 3016 .addImm(0); 3017 3018 } 3019 3020 void SIInstrInfo::reserveIndirectRegisters(BitVector &Reserved, 3021 const MachineFunction &MF) const { 3022 int End = getIndirectIndexEnd(MF); 3023 int Begin = getIndirectIndexBegin(MF); 3024 3025 if (End == -1) 3026 return; 3027 3028 3029 for (int Index = Begin; Index <= End; ++Index) 3030 Reserved.set(AMDGPU::VGPR_32RegClass.getRegister(Index)); 3031 3032 for (int Index = std::max(0, Begin - 1); Index <= End; ++Index) 3033 Reserved.set(AMDGPU::VReg_64RegClass.getRegister(Index)); 3034 3035 for (int Index = std::max(0, Begin - 2); Index <= End; ++Index) 3036 Reserved.set(AMDGPU::VReg_96RegClass.getRegister(Index)); 3037 3038 for (int Index = std::max(0, Begin - 3); Index <= End; ++Index) 3039 Reserved.set(AMDGPU::VReg_128RegClass.getRegister(Index)); 3040 3041 for (int Index = std::max(0, Begin - 7); Index <= End; ++Index) 3042 Reserved.set(AMDGPU::VReg_256RegClass.getRegister(Index)); 3043 3044 for (int Index = std::max(0, Begin - 15); Index <= End; ++Index) 3045 Reserved.set(AMDGPU::VReg_512RegClass.getRegister(Index)); 3046 } 3047 3048 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI, 3049 unsigned OperandName) const { 3050 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName); 3051 if (Idx == -1) 3052 return nullptr; 3053 3054 return &MI.getOperand(Idx); 3055 } 3056 3057 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const { 3058 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT; 3059 if (ST.isAmdHsaOS()) { 3060 RsrcDataFormat |= (1ULL << 56); 3061 3062 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 3063 // Set MTYPE = 2 3064 RsrcDataFormat |= (2ULL << 59); 3065 } 3066 3067 return RsrcDataFormat; 3068 } 3069 3070 uint64_t SIInstrInfo::getScratchRsrcWords23() const { 3071 uint64_t Rsrc23 = getDefaultRsrcDataFormat() | 3072 AMDGPU::RSRC_TID_ENABLE | 3073 0xffffffff; // Size; 3074 3075 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17]. 3076 // Clear them unless we want a huge stride. 3077 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 3078 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT; 3079 3080 return Rsrc23; 3081 } 3082 3083 bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr *MI) const { 3084 unsigned Opc = MI->getOpcode(); 3085 3086 return isSMRD(Opc); 3087 } 3088 3089 bool SIInstrInfo::isHighLatencyInstruction(const MachineInstr *MI) const { 3090 unsigned Opc = MI->getOpcode(); 3091 3092 return isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc); 3093 } 3094