1 //===-- SIInstrInfo.cpp - SI Instruction Information ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief SI Implementation of TargetInstrInfo. 12 // 13 //===----------------------------------------------------------------------===// 14 15 16 #include "SIInstrInfo.h" 17 #include "AMDGPUTargetMachine.h" 18 #include "SIDefines.h" 19 #include "SIMachineFunctionInfo.h" 20 #include "llvm/CodeGen/MachineFrameInfo.h" 21 #include "llvm/CodeGen/MachineInstrBuilder.h" 22 #include "llvm/CodeGen/MachineRegisterInfo.h" 23 #include "llvm/IR/Function.h" 24 #include "llvm/CodeGen/RegisterScavenging.h" 25 #include "llvm/MC/MCInstrDesc.h" 26 #include "llvm/Support/Debug.h" 27 28 using namespace llvm; 29 30 SIInstrInfo::SIInstrInfo(const AMDGPUSubtarget &st) 31 : AMDGPUInstrInfo(st), RI() {} 32 33 //===----------------------------------------------------------------------===// 34 // TargetInstrInfo callbacks 35 //===----------------------------------------------------------------------===// 36 37 static unsigned getNumOperandsNoGlue(SDNode *Node) { 38 unsigned N = Node->getNumOperands(); 39 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) 40 --N; 41 return N; 42 } 43 44 static SDValue findChainOperand(SDNode *Load) { 45 SDValue LastOp = Load->getOperand(getNumOperandsNoGlue(Load) - 1); 46 assert(LastOp.getValueType() == MVT::Other && "Chain missing from load node"); 47 return LastOp; 48 } 49 50 /// \brief Returns true if both nodes have the same value for the given 51 /// operand \p Op, or if both nodes do not have this operand. 52 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { 53 unsigned Opc0 = N0->getMachineOpcode(); 54 unsigned Opc1 = N1->getMachineOpcode(); 55 56 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); 57 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); 58 59 if (Op0Idx == -1 && Op1Idx == -1) 60 return true; 61 62 63 if ((Op0Idx == -1 && Op1Idx != -1) || 64 (Op1Idx == -1 && Op0Idx != -1)) 65 return false; 66 67 // getNamedOperandIdx returns the index for the MachineInstr's operands, 68 // which includes the result as the first operand. We are indexing into the 69 // MachineSDNode's operands, so we need to skip the result operand to get 70 // the real index. 71 --Op0Idx; 72 --Op1Idx; 73 74 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx); 75 } 76 77 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI, 78 AliasAnalysis *AA) const { 79 // TODO: The generic check fails for VALU instructions that should be 80 // rematerializable due to implicit reads of exec. We really want all of the 81 // generic logic for this except for this. 82 switch (MI->getOpcode()) { 83 case AMDGPU::V_MOV_B32_e32: 84 case AMDGPU::V_MOV_B32_e64: 85 case AMDGPU::V_MOV_B64_PSEUDO: 86 return true; 87 default: 88 return false; 89 } 90 } 91 92 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, 93 int64_t &Offset0, 94 int64_t &Offset1) const { 95 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode()) 96 return false; 97 98 unsigned Opc0 = Load0->getMachineOpcode(); 99 unsigned Opc1 = Load1->getMachineOpcode(); 100 101 // Make sure both are actually loads. 102 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad()) 103 return false; 104 105 if (isDS(Opc0) && isDS(Opc1)) { 106 107 // FIXME: Handle this case: 108 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1)) 109 return false; 110 111 // Check base reg. 112 if (Load0->getOperand(1) != Load1->getOperand(1)) 113 return false; 114 115 // Check chain. 116 if (findChainOperand(Load0) != findChainOperand(Load1)) 117 return false; 118 119 // Skip read2 / write2 variants for simplicity. 120 // TODO: We should report true if the used offsets are adjacent (excluded 121 // st64 versions). 122 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::data1) != -1 || 123 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::data1) != -1) 124 return false; 125 126 Offset0 = cast<ConstantSDNode>(Load0->getOperand(2))->getZExtValue(); 127 Offset1 = cast<ConstantSDNode>(Load1->getOperand(2))->getZExtValue(); 128 return true; 129 } 130 131 if (isSMRD(Opc0) && isSMRD(Opc1)) { 132 assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1)); 133 134 // Check base reg. 135 if (Load0->getOperand(0) != Load1->getOperand(0)) 136 return false; 137 138 const ConstantSDNode *Load0Offset = 139 dyn_cast<ConstantSDNode>(Load0->getOperand(1)); 140 const ConstantSDNode *Load1Offset = 141 dyn_cast<ConstantSDNode>(Load1->getOperand(1)); 142 143 if (!Load0Offset || !Load1Offset) 144 return false; 145 146 // Check chain. 147 if (findChainOperand(Load0) != findChainOperand(Load1)) 148 return false; 149 150 Offset0 = Load0Offset->getZExtValue(); 151 Offset1 = Load1Offset->getZExtValue(); 152 return true; 153 } 154 155 // MUBUF and MTBUF can access the same addresses. 156 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) { 157 158 // MUBUF and MTBUF have vaddr at different indices. 159 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || 160 findChainOperand(Load0) != findChainOperand(Load1) || 161 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || 162 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) 163 return false; 164 165 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 166 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 167 168 if (OffIdx0 == -1 || OffIdx1 == -1) 169 return false; 170 171 // getNamedOperandIdx returns the index for MachineInstrs. Since they 172 // inlcude the output in the operand list, but SDNodes don't, we need to 173 // subtract the index by one. 174 --OffIdx0; 175 --OffIdx1; 176 177 SDValue Off0 = Load0->getOperand(OffIdx0); 178 SDValue Off1 = Load1->getOperand(OffIdx1); 179 180 // The offset might be a FrameIndexSDNode. 181 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1)) 182 return false; 183 184 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue(); 185 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue(); 186 return true; 187 } 188 189 return false; 190 } 191 192 static bool isStride64(unsigned Opc) { 193 switch (Opc) { 194 case AMDGPU::DS_READ2ST64_B32: 195 case AMDGPU::DS_READ2ST64_B64: 196 case AMDGPU::DS_WRITE2ST64_B32: 197 case AMDGPU::DS_WRITE2ST64_B64: 198 return true; 199 default: 200 return false; 201 } 202 } 203 204 bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr *LdSt, unsigned &BaseReg, 205 unsigned &Offset, 206 const TargetRegisterInfo *TRI) const { 207 unsigned Opc = LdSt->getOpcode(); 208 209 if (isDS(*LdSt)) { 210 const MachineOperand *OffsetImm = getNamedOperand(*LdSt, 211 AMDGPU::OpName::offset); 212 if (OffsetImm) { 213 // Normal, single offset LDS instruction. 214 const MachineOperand *AddrReg = getNamedOperand(*LdSt, 215 AMDGPU::OpName::addr); 216 217 BaseReg = AddrReg->getReg(); 218 Offset = OffsetImm->getImm(); 219 return true; 220 } 221 222 // The 2 offset instructions use offset0 and offset1 instead. We can treat 223 // these as a load with a single offset if the 2 offsets are consecutive. We 224 // will use this for some partially aligned loads. 225 const MachineOperand *Offset0Imm = getNamedOperand(*LdSt, 226 AMDGPU::OpName::offset0); 227 const MachineOperand *Offset1Imm = getNamedOperand(*LdSt, 228 AMDGPU::OpName::offset1); 229 230 uint8_t Offset0 = Offset0Imm->getImm(); 231 uint8_t Offset1 = Offset1Imm->getImm(); 232 233 if (Offset1 > Offset0 && Offset1 - Offset0 == 1) { 234 // Each of these offsets is in element sized units, so we need to convert 235 // to bytes of the individual reads. 236 237 unsigned EltSize; 238 if (LdSt->mayLoad()) 239 EltSize = getOpRegClass(*LdSt, 0)->getSize() / 2; 240 else { 241 assert(LdSt->mayStore()); 242 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 243 EltSize = getOpRegClass(*LdSt, Data0Idx)->getSize(); 244 } 245 246 if (isStride64(Opc)) 247 EltSize *= 64; 248 249 const MachineOperand *AddrReg = getNamedOperand(*LdSt, 250 AMDGPU::OpName::addr); 251 BaseReg = AddrReg->getReg(); 252 Offset = EltSize * Offset0; 253 return true; 254 } 255 256 return false; 257 } 258 259 if (isMUBUF(*LdSt) || isMTBUF(*LdSt)) { 260 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::soffset) != -1) 261 return false; 262 263 const MachineOperand *AddrReg = getNamedOperand(*LdSt, 264 AMDGPU::OpName::vaddr); 265 if (!AddrReg) 266 return false; 267 268 const MachineOperand *OffsetImm = getNamedOperand(*LdSt, 269 AMDGPU::OpName::offset); 270 BaseReg = AddrReg->getReg(); 271 Offset = OffsetImm->getImm(); 272 return true; 273 } 274 275 if (isSMRD(*LdSt)) { 276 const MachineOperand *OffsetImm = getNamedOperand(*LdSt, 277 AMDGPU::OpName::offset); 278 if (!OffsetImm) 279 return false; 280 281 const MachineOperand *SBaseReg = getNamedOperand(*LdSt, 282 AMDGPU::OpName::sbase); 283 BaseReg = SBaseReg->getReg(); 284 Offset = OffsetImm->getImm(); 285 return true; 286 } 287 288 return false; 289 } 290 291 bool SIInstrInfo::shouldClusterLoads(MachineInstr *FirstLdSt, 292 MachineInstr *SecondLdSt, 293 unsigned NumLoads) const { 294 // TODO: This needs finer tuning 295 if (NumLoads > 4) 296 return false; 297 298 if (isDS(*FirstLdSt) && isDS(*SecondLdSt)) 299 return true; 300 301 if (isSMRD(*FirstLdSt) && isSMRD(*SecondLdSt)) 302 return true; 303 304 if ((isMUBUF(*FirstLdSt) || isMTBUF(*FirstLdSt)) && 305 (isMUBUF(*SecondLdSt) || isMTBUF(*SecondLdSt))) 306 return true; 307 308 return false; 309 } 310 311 void 312 SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 313 MachineBasicBlock::iterator MI, DebugLoc DL, 314 unsigned DestReg, unsigned SrcReg, 315 bool KillSrc) const { 316 317 // If we are trying to copy to or from SCC, there is a bug somewhere else in 318 // the backend. While it may be theoretically possible to do this, it should 319 // never be necessary. 320 assert(DestReg != AMDGPU::SCC && SrcReg != AMDGPU::SCC); 321 322 static const int16_t Sub0_15[] = { 323 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 324 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 325 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, 326 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 0 327 }; 328 329 static const int16_t Sub0_7[] = { 330 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 331 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 0 332 }; 333 334 static const int16_t Sub0_3[] = { 335 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 0 336 }; 337 338 static const int16_t Sub0_2[] = { 339 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, 0 340 }; 341 342 static const int16_t Sub0_1[] = { 343 AMDGPU::sub0, AMDGPU::sub1, 0 344 }; 345 346 unsigned Opcode; 347 const int16_t *SubIndices; 348 349 if (AMDGPU::SReg_32RegClass.contains(DestReg)) { 350 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 351 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 352 .addReg(SrcReg, getKillRegState(KillSrc)); 353 return; 354 355 } else if (AMDGPU::SReg_64RegClass.contains(DestReg)) { 356 if (DestReg == AMDGPU::VCC) { 357 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 358 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC) 359 .addReg(SrcReg, getKillRegState(KillSrc)); 360 } else { 361 // FIXME: Hack until VReg_1 removed. 362 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 363 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_I32_e32)) 364 .addImm(0) 365 .addReg(SrcReg, getKillRegState(KillSrc)); 366 } 367 368 return; 369 } 370 371 assert(AMDGPU::SReg_64RegClass.contains(SrcReg)); 372 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 373 .addReg(SrcReg, getKillRegState(KillSrc)); 374 return; 375 376 } else if (AMDGPU::SReg_128RegClass.contains(DestReg)) { 377 assert(AMDGPU::SReg_128RegClass.contains(SrcReg)); 378 Opcode = AMDGPU::S_MOV_B32; 379 SubIndices = Sub0_3; 380 381 } else if (AMDGPU::SReg_256RegClass.contains(DestReg)) { 382 assert(AMDGPU::SReg_256RegClass.contains(SrcReg)); 383 Opcode = AMDGPU::S_MOV_B32; 384 SubIndices = Sub0_7; 385 386 } else if (AMDGPU::SReg_512RegClass.contains(DestReg)) { 387 assert(AMDGPU::SReg_512RegClass.contains(SrcReg)); 388 Opcode = AMDGPU::S_MOV_B32; 389 SubIndices = Sub0_15; 390 391 } else if (AMDGPU::VGPR_32RegClass.contains(DestReg)) { 392 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || 393 AMDGPU::SReg_32RegClass.contains(SrcReg)); 394 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) 395 .addReg(SrcReg, getKillRegState(KillSrc)); 396 return; 397 398 } else if (AMDGPU::VReg_64RegClass.contains(DestReg)) { 399 assert(AMDGPU::VReg_64RegClass.contains(SrcReg) || 400 AMDGPU::SReg_64RegClass.contains(SrcReg)); 401 Opcode = AMDGPU::V_MOV_B32_e32; 402 SubIndices = Sub0_1; 403 404 } else if (AMDGPU::VReg_96RegClass.contains(DestReg)) { 405 assert(AMDGPU::VReg_96RegClass.contains(SrcReg)); 406 Opcode = AMDGPU::V_MOV_B32_e32; 407 SubIndices = Sub0_2; 408 409 } else if (AMDGPU::VReg_128RegClass.contains(DestReg)) { 410 assert(AMDGPU::VReg_128RegClass.contains(SrcReg) || 411 AMDGPU::SReg_128RegClass.contains(SrcReg)); 412 Opcode = AMDGPU::V_MOV_B32_e32; 413 SubIndices = Sub0_3; 414 415 } else if (AMDGPU::VReg_256RegClass.contains(DestReg)) { 416 assert(AMDGPU::VReg_256RegClass.contains(SrcReg) || 417 AMDGPU::SReg_256RegClass.contains(SrcReg)); 418 Opcode = AMDGPU::V_MOV_B32_e32; 419 SubIndices = Sub0_7; 420 421 } else if (AMDGPU::VReg_512RegClass.contains(DestReg)) { 422 assert(AMDGPU::VReg_512RegClass.contains(SrcReg) || 423 AMDGPU::SReg_512RegClass.contains(SrcReg)); 424 Opcode = AMDGPU::V_MOV_B32_e32; 425 SubIndices = Sub0_15; 426 427 } else { 428 llvm_unreachable("Can't copy register!"); 429 } 430 431 while (unsigned SubIdx = *SubIndices++) { 432 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 433 get(Opcode), RI.getSubReg(DestReg, SubIdx)); 434 435 Builder.addReg(RI.getSubReg(SrcReg, SubIdx), getKillRegState(KillSrc)); 436 437 if (*SubIndices) 438 Builder.addReg(DestReg, RegState::Define | RegState::Implicit); 439 } 440 } 441 442 int SIInstrInfo::commuteOpcode(const MachineInstr &MI) const { 443 const unsigned Opcode = MI.getOpcode(); 444 445 int NewOpc; 446 447 // Try to map original to commuted opcode 448 NewOpc = AMDGPU::getCommuteRev(Opcode); 449 if (NewOpc != -1) 450 // Check if the commuted (REV) opcode exists on the target. 451 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 452 453 // Try to map commuted to original opcode 454 NewOpc = AMDGPU::getCommuteOrig(Opcode); 455 if (NewOpc != -1) 456 // Check if the original (non-REV) opcode exists on the target. 457 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 458 459 return Opcode; 460 } 461 462 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const { 463 464 if (DstRC->getSize() == 4) { 465 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 466 } else if (DstRC->getSize() == 8 && RI.isSGPRClass(DstRC)) { 467 return AMDGPU::S_MOV_B64; 468 } else if (DstRC->getSize() == 8 && !RI.isSGPRClass(DstRC)) { 469 return AMDGPU::V_MOV_B64_PSEUDO; 470 } 471 return AMDGPU::COPY; 472 } 473 474 static unsigned getSGPRSpillSaveOpcode(unsigned Size) { 475 switch (Size) { 476 case 4: 477 return AMDGPU::SI_SPILL_S32_SAVE; 478 case 8: 479 return AMDGPU::SI_SPILL_S64_SAVE; 480 case 16: 481 return AMDGPU::SI_SPILL_S128_SAVE; 482 case 32: 483 return AMDGPU::SI_SPILL_S256_SAVE; 484 case 64: 485 return AMDGPU::SI_SPILL_S512_SAVE; 486 default: 487 llvm_unreachable("unknown register size"); 488 } 489 } 490 491 static unsigned getVGPRSpillSaveOpcode(unsigned Size) { 492 switch (Size) { 493 case 4: 494 return AMDGPU::SI_SPILL_V32_SAVE; 495 case 8: 496 return AMDGPU::SI_SPILL_V64_SAVE; 497 case 16: 498 return AMDGPU::SI_SPILL_V128_SAVE; 499 case 32: 500 return AMDGPU::SI_SPILL_V256_SAVE; 501 case 64: 502 return AMDGPU::SI_SPILL_V512_SAVE; 503 default: 504 llvm_unreachable("unknown register size"); 505 } 506 } 507 508 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 509 MachineBasicBlock::iterator MI, 510 unsigned SrcReg, bool isKill, 511 int FrameIndex, 512 const TargetRegisterClass *RC, 513 const TargetRegisterInfo *TRI) const { 514 MachineFunction *MF = MBB.getParent(); 515 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 516 MachineFrameInfo *FrameInfo = MF->getFrameInfo(); 517 DebugLoc DL = MBB.findDebugLoc(MI); 518 519 unsigned Size = FrameInfo->getObjectSize(FrameIndex); 520 unsigned Align = FrameInfo->getObjectAlignment(FrameIndex); 521 MachinePointerInfo PtrInfo 522 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 523 MachineMemOperand *MMO 524 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, 525 Size, Align); 526 527 if (RI.isSGPRClass(RC)) { 528 MFI->setHasSpilledSGPRs(); 529 530 // We are only allowed to create one new instruction when spilling 531 // registers, so we need to use pseudo instruction for spilling 532 // SGPRs. 533 unsigned Opcode = getSGPRSpillSaveOpcode(RC->getSize()); 534 BuildMI(MBB, MI, DL, get(Opcode)) 535 .addReg(SrcReg) // src 536 .addFrameIndex(FrameIndex) // frame_idx 537 .addMemOperand(MMO); 538 539 return; 540 } 541 542 if (!ST.isVGPRSpillingEnabled(MFI)) { 543 LLVMContext &Ctx = MF->getFunction()->getContext(); 544 Ctx.emitError("SIInstrInfo::storeRegToStackSlot - Do not know how to" 545 " spill register"); 546 BuildMI(MBB, MI, DL, get(AMDGPU::KILL)) 547 .addReg(SrcReg); 548 549 return; 550 } 551 552 assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected"); 553 554 unsigned Opcode = getVGPRSpillSaveOpcode(RC->getSize()); 555 MFI->setHasSpilledVGPRs(); 556 BuildMI(MBB, MI, DL, get(Opcode)) 557 .addReg(SrcReg) // src 558 .addFrameIndex(FrameIndex) // frame_idx 559 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 560 .addReg(MFI->getScratchWaveOffsetReg()) // scratch_offset 561 .addMemOperand(MMO); 562 } 563 564 static unsigned getSGPRSpillRestoreOpcode(unsigned Size) { 565 switch (Size) { 566 case 4: 567 return AMDGPU::SI_SPILL_S32_RESTORE; 568 case 8: 569 return AMDGPU::SI_SPILL_S64_RESTORE; 570 case 16: 571 return AMDGPU::SI_SPILL_S128_RESTORE; 572 case 32: 573 return AMDGPU::SI_SPILL_S256_RESTORE; 574 case 64: 575 return AMDGPU::SI_SPILL_S512_RESTORE; 576 default: 577 llvm_unreachable("unknown register size"); 578 } 579 } 580 581 static unsigned getVGPRSpillRestoreOpcode(unsigned Size) { 582 switch (Size) { 583 case 4: 584 return AMDGPU::SI_SPILL_V32_RESTORE; 585 case 8: 586 return AMDGPU::SI_SPILL_V64_RESTORE; 587 case 16: 588 return AMDGPU::SI_SPILL_V128_RESTORE; 589 case 32: 590 return AMDGPU::SI_SPILL_V256_RESTORE; 591 case 64: 592 return AMDGPU::SI_SPILL_V512_RESTORE; 593 default: 594 llvm_unreachable("unknown register size"); 595 } 596 } 597 598 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 599 MachineBasicBlock::iterator MI, 600 unsigned DestReg, int FrameIndex, 601 const TargetRegisterClass *RC, 602 const TargetRegisterInfo *TRI) const { 603 MachineFunction *MF = MBB.getParent(); 604 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 605 MachineFrameInfo *FrameInfo = MF->getFrameInfo(); 606 DebugLoc DL = MBB.findDebugLoc(MI); 607 unsigned Align = FrameInfo->getObjectAlignment(FrameIndex); 608 unsigned Size = FrameInfo->getObjectSize(FrameIndex); 609 610 MachinePointerInfo PtrInfo 611 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 612 613 MachineMemOperand *MMO = MF->getMachineMemOperand( 614 PtrInfo, MachineMemOperand::MOLoad, Size, Align); 615 616 if (RI.isSGPRClass(RC)) { 617 // FIXME: Maybe this should not include a memoperand because it will be 618 // lowered to non-memory instructions. 619 unsigned Opcode = getSGPRSpillRestoreOpcode(RC->getSize()); 620 BuildMI(MBB, MI, DL, get(Opcode), DestReg) 621 .addFrameIndex(FrameIndex) // frame_idx 622 .addMemOperand(MMO); 623 624 return; 625 } 626 627 if (!ST.isVGPRSpillingEnabled(MFI)) { 628 LLVMContext &Ctx = MF->getFunction()->getContext(); 629 Ctx.emitError("SIInstrInfo::loadRegFromStackSlot - Do not know how to" 630 " restore register"); 631 BuildMI(MBB, MI, DL, get(AMDGPU::IMPLICIT_DEF), DestReg); 632 633 return; 634 } 635 636 assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected"); 637 638 unsigned Opcode = getVGPRSpillRestoreOpcode(RC->getSize()); 639 BuildMI(MBB, MI, DL, get(Opcode), DestReg) 640 .addFrameIndex(FrameIndex) // frame_idx 641 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 642 .addReg(MFI->getScratchWaveOffsetReg()) // scratch_offset 643 .addMemOperand(MMO); 644 } 645 646 /// \param @Offset Offset in bytes of the FrameIndex being spilled 647 unsigned SIInstrInfo::calculateLDSSpillAddress(MachineBasicBlock &MBB, 648 MachineBasicBlock::iterator MI, 649 RegScavenger *RS, unsigned TmpReg, 650 unsigned FrameOffset, 651 unsigned Size) const { 652 MachineFunction *MF = MBB.getParent(); 653 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 654 const AMDGPUSubtarget &ST = MF->getSubtarget<AMDGPUSubtarget>(); 655 const SIRegisterInfo *TRI = 656 static_cast<const SIRegisterInfo*>(ST.getRegisterInfo()); 657 DebugLoc DL = MBB.findDebugLoc(MI); 658 unsigned WorkGroupSize = MFI->getMaximumWorkGroupSize(*MF); 659 unsigned WavefrontSize = ST.getWavefrontSize(); 660 661 unsigned TIDReg = MFI->getTIDReg(); 662 if (!MFI->hasCalculatedTID()) { 663 MachineBasicBlock &Entry = MBB.getParent()->front(); 664 MachineBasicBlock::iterator Insert = Entry.front(); 665 DebugLoc DL = Insert->getDebugLoc(); 666 667 TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass); 668 if (TIDReg == AMDGPU::NoRegister) 669 return TIDReg; 670 671 672 if (MFI->getShaderType() == ShaderType::COMPUTE && 673 WorkGroupSize > WavefrontSize) { 674 675 unsigned TIDIGXReg 676 = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_X); 677 unsigned TIDIGYReg 678 = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_Y); 679 unsigned TIDIGZReg 680 = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_Z); 681 unsigned InputPtrReg = 682 TRI->getPreloadedValue(*MF, SIRegisterInfo::KERNARG_SEGMENT_PTR); 683 for (unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) { 684 if (!Entry.isLiveIn(Reg)) 685 Entry.addLiveIn(Reg); 686 } 687 688 RS->enterBasicBlock(&Entry); 689 // FIXME: Can we scavenge an SReg_64 and access the subregs? 690 unsigned STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); 691 unsigned STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); 692 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp0) 693 .addReg(InputPtrReg) 694 .addImm(SI::KernelInputOffsets::NGROUPS_Z); 695 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp1) 696 .addReg(InputPtrReg) 697 .addImm(SI::KernelInputOffsets::NGROUPS_Y); 698 699 // NGROUPS.X * NGROUPS.Y 700 BuildMI(Entry, Insert, DL, get(AMDGPU::S_MUL_I32), STmp1) 701 .addReg(STmp1) 702 .addReg(STmp0); 703 // (NGROUPS.X * NGROUPS.Y) * TIDIG.X 704 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MUL_U32_U24_e32), TIDReg) 705 .addReg(STmp1) 706 .addReg(TIDIGXReg); 707 // NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X) 708 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MAD_U32_U24), TIDReg) 709 .addReg(STmp0) 710 .addReg(TIDIGYReg) 711 .addReg(TIDReg); 712 // (NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)) + TIDIG.Z 713 BuildMI(Entry, Insert, DL, get(AMDGPU::V_ADD_I32_e32), TIDReg) 714 .addReg(TIDReg) 715 .addReg(TIDIGZReg); 716 } else { 717 // Get the wave id 718 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_LO_U32_B32_e64), 719 TIDReg) 720 .addImm(-1) 721 .addImm(0); 722 723 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_HI_U32_B32_e64), 724 TIDReg) 725 .addImm(-1) 726 .addReg(TIDReg); 727 } 728 729 BuildMI(Entry, Insert, DL, get(AMDGPU::V_LSHLREV_B32_e32), 730 TIDReg) 731 .addImm(2) 732 .addReg(TIDReg); 733 MFI->setTIDReg(TIDReg); 734 } 735 736 // Add FrameIndex to LDS offset 737 unsigned LDSOffset = MFI->LDSSize + (FrameOffset * WorkGroupSize); 738 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), TmpReg) 739 .addImm(LDSOffset) 740 .addReg(TIDReg); 741 742 return TmpReg; 743 } 744 745 void SIInstrInfo::insertNOPs(MachineBasicBlock::iterator MI, 746 int Count) const { 747 while (Count > 0) { 748 int Arg; 749 if (Count >= 8) 750 Arg = 7; 751 else 752 Arg = Count - 1; 753 Count -= 8; 754 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), get(AMDGPU::S_NOP)) 755 .addImm(Arg); 756 } 757 } 758 759 bool SIInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const { 760 MachineBasicBlock &MBB = *MI->getParent(); 761 DebugLoc DL = MBB.findDebugLoc(MI); 762 switch (MI->getOpcode()) { 763 default: return AMDGPUInstrInfo::expandPostRAPseudo(MI); 764 765 case AMDGPU::SI_CONSTDATA_PTR: { 766 unsigned Reg = MI->getOperand(0).getReg(); 767 unsigned RegLo = RI.getSubReg(Reg, AMDGPU::sub0); 768 unsigned RegHi = RI.getSubReg(Reg, AMDGPU::sub1); 769 770 BuildMI(MBB, MI, DL, get(AMDGPU::S_GETPC_B64), Reg); 771 772 // Add 32-bit offset from this instruction to the start of the constant data. 773 BuildMI(MBB, MI, DL, get(AMDGPU::S_ADD_U32), RegLo) 774 .addReg(RegLo) 775 .addTargetIndex(AMDGPU::TI_CONSTDATA_START) 776 .addReg(AMDGPU::SCC, RegState::Define | RegState::Implicit); 777 BuildMI(MBB, MI, DL, get(AMDGPU::S_ADDC_U32), RegHi) 778 .addReg(RegHi) 779 .addImm(0) 780 .addReg(AMDGPU::SCC, RegState::Define | RegState::Implicit) 781 .addReg(AMDGPU::SCC, RegState::Implicit); 782 MI->eraseFromParent(); 783 break; 784 } 785 case AMDGPU::SGPR_USE: 786 // This is just a placeholder for register allocation. 787 MI->eraseFromParent(); 788 break; 789 790 case AMDGPU::V_MOV_B64_PSEUDO: { 791 unsigned Dst = MI->getOperand(0).getReg(); 792 unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 793 unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 794 795 const MachineOperand &SrcOp = MI->getOperand(1); 796 // FIXME: Will this work for 64-bit floating point immediates? 797 assert(!SrcOp.isFPImm()); 798 if (SrcOp.isImm()) { 799 APInt Imm(64, SrcOp.getImm()); 800 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 801 .addImm(Imm.getLoBits(32).getZExtValue()) 802 .addReg(Dst, RegState::Implicit); 803 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 804 .addImm(Imm.getHiBits(32).getZExtValue()) 805 .addReg(Dst, RegState::Implicit); 806 } else { 807 assert(SrcOp.isReg()); 808 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 809 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) 810 .addReg(Dst, RegState::Implicit); 811 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 812 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) 813 .addReg(Dst, RegState::Implicit); 814 } 815 MI->eraseFromParent(); 816 break; 817 } 818 819 case AMDGPU::V_CNDMASK_B64_PSEUDO: { 820 unsigned Dst = MI->getOperand(0).getReg(); 821 unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 822 unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 823 unsigned Src0 = MI->getOperand(1).getReg(); 824 unsigned Src1 = MI->getOperand(2).getReg(); 825 const MachineOperand &SrcCond = MI->getOperand(3); 826 827 BuildMI(MBB, MI, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstLo) 828 .addReg(RI.getSubReg(Src0, AMDGPU::sub0)) 829 .addReg(RI.getSubReg(Src1, AMDGPU::sub0)) 830 .addOperand(SrcCond); 831 BuildMI(MBB, MI, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstHi) 832 .addReg(RI.getSubReg(Src0, AMDGPU::sub1)) 833 .addReg(RI.getSubReg(Src1, AMDGPU::sub1)) 834 .addOperand(SrcCond); 835 MI->eraseFromParent(); 836 break; 837 } 838 } 839 return true; 840 } 841 842 /// Commutes the operands in the given instruction. 843 /// The commutable operands are specified by their indices OpIdx0 and OpIdx1. 844 /// 845 /// Do not call this method for a non-commutable instruction or for 846 /// non-commutable pair of operand indices OpIdx0 and OpIdx1. 847 /// Even though the instruction is commutable, the method may still 848 /// fail to commute the operands, null pointer is returned in such cases. 849 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr *MI, 850 bool NewMI, 851 unsigned OpIdx0, 852 unsigned OpIdx1) const { 853 int CommutedOpcode = commuteOpcode(*MI); 854 if (CommutedOpcode == -1) 855 return nullptr; 856 857 int Src0Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), 858 AMDGPU::OpName::src0); 859 MachineOperand &Src0 = MI->getOperand(Src0Idx); 860 if (!Src0.isReg()) 861 return nullptr; 862 863 int Src1Idx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), 864 AMDGPU::OpName::src1); 865 866 if ((OpIdx0 != static_cast<unsigned>(Src0Idx) || 867 OpIdx1 != static_cast<unsigned>(Src1Idx)) && 868 (OpIdx0 != static_cast<unsigned>(Src1Idx) || 869 OpIdx1 != static_cast<unsigned>(Src0Idx))) 870 return nullptr; 871 872 MachineOperand &Src1 = MI->getOperand(Src1Idx); 873 874 875 if (isVOP2(*MI)) { 876 const MCInstrDesc &InstrDesc = MI->getDesc(); 877 // For VOP2 instructions, any operand type is valid to use for src0. Make 878 // sure we can use the src1 as src0. 879 // 880 // We could be stricter here and only allow commuting if there is a reason 881 // to do so. i.e. if both operands are VGPRs there is no real benefit, 882 // although MachineCSE attempts to find matches by commuting. 883 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 884 if (!isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) 885 return nullptr; 886 } 887 888 if (!Src1.isReg()) { 889 // Allow commuting instructions with Imm operands. 890 if (NewMI || !Src1.isImm() || 891 (!isVOP2(*MI) && !isVOP3(*MI))) { 892 return nullptr; 893 } 894 // Be sure to copy the source modifiers to the right place. 895 if (MachineOperand *Src0Mods 896 = getNamedOperand(*MI, AMDGPU::OpName::src0_modifiers)) { 897 MachineOperand *Src1Mods 898 = getNamedOperand(*MI, AMDGPU::OpName::src1_modifiers); 899 900 int Src0ModsVal = Src0Mods->getImm(); 901 if (!Src1Mods && Src0ModsVal != 0) 902 return nullptr; 903 904 // XXX - This assert might be a lie. It might be useful to have a neg 905 // modifier with 0.0. 906 int Src1ModsVal = Src1Mods->getImm(); 907 assert((Src1ModsVal == 0) && "Not expecting modifiers with immediates"); 908 909 Src1Mods->setImm(Src0ModsVal); 910 Src0Mods->setImm(Src1ModsVal); 911 } 912 913 unsigned Reg = Src0.getReg(); 914 unsigned SubReg = Src0.getSubReg(); 915 if (Src1.isImm()) 916 Src0.ChangeToImmediate(Src1.getImm()); 917 else 918 llvm_unreachable("Should only have immediates"); 919 920 Src1.ChangeToRegister(Reg, false); 921 Src1.setSubReg(SubReg); 922 } else { 923 MI = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx0, OpIdx1); 924 } 925 926 if (MI) 927 MI->setDesc(get(CommutedOpcode)); 928 929 return MI; 930 } 931 932 // This needs to be implemented because the source modifiers may be inserted 933 // between the true commutable operands, and the base 934 // TargetInstrInfo::commuteInstruction uses it. 935 bool SIInstrInfo::findCommutedOpIndices(MachineInstr *MI, 936 unsigned &SrcOpIdx0, 937 unsigned &SrcOpIdx1) const { 938 const MCInstrDesc &MCID = MI->getDesc(); 939 if (!MCID.isCommutable()) 940 return false; 941 942 unsigned Opc = MI->getOpcode(); 943 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 944 if (Src0Idx == -1) 945 return false; 946 947 // FIXME: Workaround TargetInstrInfo::commuteInstruction asserting on 948 // immediate. Also, immediate src0 operand is not handled in 949 // SIInstrInfo::commuteInstruction(); 950 if (!MI->getOperand(Src0Idx).isReg()) 951 return false; 952 953 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 954 if (Src1Idx == -1) 955 return false; 956 957 MachineOperand &Src1 = MI->getOperand(Src1Idx); 958 if (Src1.isImm()) { 959 // SIInstrInfo::commuteInstruction() does support commuting the immediate 960 // operand src1 in 2 and 3 operand instructions. 961 if (!isVOP2(MI->getOpcode()) && !isVOP3(MI->getOpcode())) 962 return false; 963 } else if (Src1.isReg()) { 964 // If any source modifiers are set, the generic instruction commuting won't 965 // understand how to copy the source modifiers. 966 if (hasModifiersSet(*MI, AMDGPU::OpName::src0_modifiers) || 967 hasModifiersSet(*MI, AMDGPU::OpName::src1_modifiers)) 968 return false; 969 } else 970 return false; 971 972 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx); 973 } 974 975 MachineInstr *SIInstrInfo::buildMovInstr(MachineBasicBlock *MBB, 976 MachineBasicBlock::iterator I, 977 unsigned DstReg, 978 unsigned SrcReg) const { 979 return BuildMI(*MBB, I, MBB->findDebugLoc(I), get(AMDGPU::V_MOV_B32_e32), 980 DstReg) .addReg(SrcReg); 981 } 982 983 bool SIInstrInfo::isMov(unsigned Opcode) const { 984 switch(Opcode) { 985 default: return false; 986 case AMDGPU::S_MOV_B32: 987 case AMDGPU::S_MOV_B64: 988 case AMDGPU::V_MOV_B32_e32: 989 case AMDGPU::V_MOV_B32_e64: 990 return true; 991 } 992 } 993 994 static void removeModOperands(MachineInstr &MI) { 995 unsigned Opc = MI.getOpcode(); 996 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, 997 AMDGPU::OpName::src0_modifiers); 998 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, 999 AMDGPU::OpName::src1_modifiers); 1000 int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc, 1001 AMDGPU::OpName::src2_modifiers); 1002 1003 MI.RemoveOperand(Src2ModIdx); 1004 MI.RemoveOperand(Src1ModIdx); 1005 MI.RemoveOperand(Src0ModIdx); 1006 } 1007 1008 bool SIInstrInfo::FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI, 1009 unsigned Reg, MachineRegisterInfo *MRI) const { 1010 if (!MRI->hasOneNonDBGUse(Reg)) 1011 return false; 1012 1013 unsigned Opc = UseMI->getOpcode(); 1014 if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64) { 1015 // Don't fold if we are using source modifiers. The new VOP2 instructions 1016 // don't have them. 1017 if (hasModifiersSet(*UseMI, AMDGPU::OpName::src0_modifiers) || 1018 hasModifiersSet(*UseMI, AMDGPU::OpName::src1_modifiers) || 1019 hasModifiersSet(*UseMI, AMDGPU::OpName::src2_modifiers)) { 1020 return false; 1021 } 1022 1023 MachineOperand *Src0 = getNamedOperand(*UseMI, AMDGPU::OpName::src0); 1024 MachineOperand *Src1 = getNamedOperand(*UseMI, AMDGPU::OpName::src1); 1025 MachineOperand *Src2 = getNamedOperand(*UseMI, AMDGPU::OpName::src2); 1026 1027 // Multiplied part is the constant: Use v_madmk_f32 1028 // We should only expect these to be on src0 due to canonicalizations. 1029 if (Src0->isReg() && Src0->getReg() == Reg) { 1030 if (!Src1->isReg() || 1031 (Src1->isReg() && RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) 1032 return false; 1033 1034 if (!Src2->isReg() || 1035 (Src2->isReg() && RI.isSGPRClass(MRI->getRegClass(Src2->getReg())))) 1036 return false; 1037 1038 // We need to do some weird looking operand shuffling since the madmk 1039 // operands are out of the normal expected order with the multiplied 1040 // constant as the last operand. 1041 // 1042 // v_mad_f32 src0, src1, src2 -> v_madmk_f32 src0 * src2K + src1 1043 // src0 -> src2 K 1044 // src1 -> src0 1045 // src2 -> src1 1046 1047 const int64_t Imm = DefMI->getOperand(1).getImm(); 1048 1049 // FIXME: This would be a lot easier if we could return a new instruction 1050 // instead of having to modify in place. 1051 1052 // Remove these first since they are at the end. 1053 UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, 1054 AMDGPU::OpName::omod)); 1055 UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, 1056 AMDGPU::OpName::clamp)); 1057 1058 unsigned Src1Reg = Src1->getReg(); 1059 unsigned Src1SubReg = Src1->getSubReg(); 1060 unsigned Src2Reg = Src2->getReg(); 1061 unsigned Src2SubReg = Src2->getSubReg(); 1062 Src0->setReg(Src1Reg); 1063 Src0->setSubReg(Src1SubReg); 1064 Src0->setIsKill(Src1->isKill()); 1065 1066 Src1->setReg(Src2Reg); 1067 Src1->setSubReg(Src2SubReg); 1068 Src1->setIsKill(Src2->isKill()); 1069 1070 if (Opc == AMDGPU::V_MAC_F32_e64) { 1071 UseMI->untieRegOperand( 1072 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 1073 } 1074 1075 Src2->ChangeToImmediate(Imm); 1076 1077 removeModOperands(*UseMI); 1078 UseMI->setDesc(get(AMDGPU::V_MADMK_F32)); 1079 1080 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 1081 if (DeleteDef) 1082 DefMI->eraseFromParent(); 1083 1084 return true; 1085 } 1086 1087 // Added part is the constant: Use v_madak_f32 1088 if (Src2->isReg() && Src2->getReg() == Reg) { 1089 // Not allowed to use constant bus for another operand. 1090 // We can however allow an inline immediate as src0. 1091 if (!Src0->isImm() && 1092 (Src0->isReg() && RI.isSGPRClass(MRI->getRegClass(Src0->getReg())))) 1093 return false; 1094 1095 if (!Src1->isReg() || 1096 (Src1->isReg() && RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) 1097 return false; 1098 1099 const int64_t Imm = DefMI->getOperand(1).getImm(); 1100 1101 // FIXME: This would be a lot easier if we could return a new instruction 1102 // instead of having to modify in place. 1103 1104 // Remove these first since they are at the end. 1105 UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, 1106 AMDGPU::OpName::omod)); 1107 UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, 1108 AMDGPU::OpName::clamp)); 1109 1110 if (Opc == AMDGPU::V_MAC_F32_e64) { 1111 UseMI->untieRegOperand( 1112 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 1113 } 1114 1115 // ChangingToImmediate adds Src2 back to the instruction. 1116 Src2->ChangeToImmediate(Imm); 1117 1118 // These come before src2. 1119 removeModOperands(*UseMI); 1120 UseMI->setDesc(get(AMDGPU::V_MADAK_F32)); 1121 1122 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 1123 if (DeleteDef) 1124 DefMI->eraseFromParent(); 1125 1126 return true; 1127 } 1128 } 1129 1130 return false; 1131 } 1132 1133 static bool offsetsDoNotOverlap(int WidthA, int OffsetA, 1134 int WidthB, int OffsetB) { 1135 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; 1136 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; 1137 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; 1138 return LowOffset + LowWidth <= HighOffset; 1139 } 1140 1141 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr *MIa, 1142 MachineInstr *MIb) const { 1143 unsigned BaseReg0, Offset0; 1144 unsigned BaseReg1, Offset1; 1145 1146 if (getMemOpBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) && 1147 getMemOpBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) { 1148 assert(MIa->hasOneMemOperand() && MIb->hasOneMemOperand() && 1149 "read2 / write2 not expected here yet"); 1150 unsigned Width0 = (*MIa->memoperands_begin())->getSize(); 1151 unsigned Width1 = (*MIb->memoperands_begin())->getSize(); 1152 if (BaseReg0 == BaseReg1 && 1153 offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1)) { 1154 return true; 1155 } 1156 } 1157 1158 return false; 1159 } 1160 1161 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr *MIa, 1162 MachineInstr *MIb, 1163 AliasAnalysis *AA) const { 1164 assert(MIa && (MIa->mayLoad() || MIa->mayStore()) && 1165 "MIa must load from or modify a memory location"); 1166 assert(MIb && (MIb->mayLoad() || MIb->mayStore()) && 1167 "MIb must load from or modify a memory location"); 1168 1169 if (MIa->hasUnmodeledSideEffects() || MIb->hasUnmodeledSideEffects()) 1170 return false; 1171 1172 // XXX - Can we relax this between address spaces? 1173 if (MIa->hasOrderedMemoryRef() || MIb->hasOrderedMemoryRef()) 1174 return false; 1175 1176 // TODO: Should we check the address space from the MachineMemOperand? That 1177 // would allow us to distinguish objects we know don't alias based on the 1178 // underlying address space, even if it was lowered to a different one, 1179 // e.g. private accesses lowered to use MUBUF instructions on a scratch 1180 // buffer. 1181 if (isDS(*MIa)) { 1182 if (isDS(*MIb)) 1183 return checkInstOffsetsDoNotOverlap(MIa, MIb); 1184 1185 return !isFLAT(*MIb); 1186 } 1187 1188 if (isMUBUF(*MIa) || isMTBUF(*MIa)) { 1189 if (isMUBUF(*MIb) || isMTBUF(*MIb)) 1190 return checkInstOffsetsDoNotOverlap(MIa, MIb); 1191 1192 return !isFLAT(*MIb) && !isSMRD(*MIb); 1193 } 1194 1195 if (isSMRD(*MIa)) { 1196 if (isSMRD(*MIb)) 1197 return checkInstOffsetsDoNotOverlap(MIa, MIb); 1198 1199 return !isFLAT(*MIb) && !isMUBUF(*MIa) && !isMTBUF(*MIa); 1200 } 1201 1202 if (isFLAT(*MIa)) { 1203 if (isFLAT(*MIb)) 1204 return checkInstOffsetsDoNotOverlap(MIa, MIb); 1205 1206 return false; 1207 } 1208 1209 return false; 1210 } 1211 1212 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB, 1213 MachineBasicBlock::iterator &MI, 1214 LiveVariables *LV) const { 1215 1216 switch (MI->getOpcode()) { 1217 default: return nullptr; 1218 case AMDGPU::V_MAC_F32_e64: break; 1219 case AMDGPU::V_MAC_F32_e32: { 1220 const MachineOperand *Src0 = getNamedOperand(*MI, AMDGPU::OpName::src0); 1221 if (Src0->isImm() && !isInlineConstant(*Src0, 4)) 1222 return nullptr; 1223 break; 1224 } 1225 } 1226 1227 const MachineOperand *Dst = getNamedOperand(*MI, AMDGPU::OpName::dst); 1228 const MachineOperand *Src0 = getNamedOperand(*MI, AMDGPU::OpName::src0); 1229 const MachineOperand *Src1 = getNamedOperand(*MI, AMDGPU::OpName::src1); 1230 const MachineOperand *Src2 = getNamedOperand(*MI, AMDGPU::OpName::src2); 1231 1232 return BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::V_MAD_F32)) 1233 .addOperand(*Dst) 1234 .addImm(0) // Src0 mods 1235 .addOperand(*Src0) 1236 .addImm(0) // Src1 mods 1237 .addOperand(*Src1) 1238 .addImm(0) // Src mods 1239 .addOperand(*Src2) 1240 .addImm(0) // clamp 1241 .addImm(0); // omod 1242 } 1243 1244 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const { 1245 int64_t SVal = Imm.getSExtValue(); 1246 if (SVal >= -16 && SVal <= 64) 1247 return true; 1248 1249 if (Imm.getBitWidth() == 64) { 1250 uint64_t Val = Imm.getZExtValue(); 1251 return (DoubleToBits(0.0) == Val) || 1252 (DoubleToBits(1.0) == Val) || 1253 (DoubleToBits(-1.0) == Val) || 1254 (DoubleToBits(0.5) == Val) || 1255 (DoubleToBits(-0.5) == Val) || 1256 (DoubleToBits(2.0) == Val) || 1257 (DoubleToBits(-2.0) == Val) || 1258 (DoubleToBits(4.0) == Val) || 1259 (DoubleToBits(-4.0) == Val); 1260 } 1261 1262 // The actual type of the operand does not seem to matter as long 1263 // as the bits match one of the inline immediate values. For example: 1264 // 1265 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal, 1266 // so it is a legal inline immediate. 1267 // 1268 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in 1269 // floating-point, so it is a legal inline immediate. 1270 uint32_t Val = Imm.getZExtValue(); 1271 1272 return (FloatToBits(0.0f) == Val) || 1273 (FloatToBits(1.0f) == Val) || 1274 (FloatToBits(-1.0f) == Val) || 1275 (FloatToBits(0.5f) == Val) || 1276 (FloatToBits(-0.5f) == Val) || 1277 (FloatToBits(2.0f) == Val) || 1278 (FloatToBits(-2.0f) == Val) || 1279 (FloatToBits(4.0f) == Val) || 1280 (FloatToBits(-4.0f) == Val); 1281 } 1282 1283 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO, 1284 unsigned OpSize) const { 1285 if (MO.isImm()) { 1286 // MachineOperand provides no way to tell the true operand size, since it 1287 // only records a 64-bit value. We need to know the size to determine if a 1288 // 32-bit floating point immediate bit pattern is legal for an integer 1289 // immediate. It would be for any 32-bit integer operand, but would not be 1290 // for a 64-bit one. 1291 1292 unsigned BitSize = 8 * OpSize; 1293 return isInlineConstant(APInt(BitSize, MO.getImm(), true)); 1294 } 1295 1296 return false; 1297 } 1298 1299 bool SIInstrInfo::isLiteralConstant(const MachineOperand &MO, 1300 unsigned OpSize) const { 1301 return MO.isImm() && !isInlineConstant(MO, OpSize); 1302 } 1303 1304 static bool compareMachineOp(const MachineOperand &Op0, 1305 const MachineOperand &Op1) { 1306 if (Op0.getType() != Op1.getType()) 1307 return false; 1308 1309 switch (Op0.getType()) { 1310 case MachineOperand::MO_Register: 1311 return Op0.getReg() == Op1.getReg(); 1312 case MachineOperand::MO_Immediate: 1313 return Op0.getImm() == Op1.getImm(); 1314 default: 1315 llvm_unreachable("Didn't expect to be comparing these operand types"); 1316 } 1317 } 1318 1319 bool SIInstrInfo::isImmOperandLegal(const MachineInstr *MI, unsigned OpNo, 1320 const MachineOperand &MO) const { 1321 const MCOperandInfo &OpInfo = get(MI->getOpcode()).OpInfo[OpNo]; 1322 1323 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI()); 1324 1325 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE) 1326 return true; 1327 1328 if (OpInfo.RegClass < 0) 1329 return false; 1330 1331 unsigned OpSize = RI.getRegClass(OpInfo.RegClass)->getSize(); 1332 if (isLiteralConstant(MO, OpSize)) 1333 return RI.opCanUseLiteralConstant(OpInfo.OperandType); 1334 1335 return RI.opCanUseInlineConstant(OpInfo.OperandType); 1336 } 1337 1338 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const { 1339 int Op32 = AMDGPU::getVOPe32(Opcode); 1340 if (Op32 == -1) 1341 return false; 1342 1343 return pseudoToMCOpcode(Op32) != -1; 1344 } 1345 1346 bool SIInstrInfo::hasModifiers(unsigned Opcode) const { 1347 // The src0_modifier operand is present on all instructions 1348 // that have modifiers. 1349 1350 return AMDGPU::getNamedOperandIdx(Opcode, 1351 AMDGPU::OpName::src0_modifiers) != -1; 1352 } 1353 1354 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI, 1355 unsigned OpName) const { 1356 const MachineOperand *Mods = getNamedOperand(MI, OpName); 1357 return Mods && Mods->getImm(); 1358 } 1359 1360 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI, 1361 const MachineOperand &MO, 1362 unsigned OpSize) const { 1363 // Literal constants use the constant bus. 1364 if (isLiteralConstant(MO, OpSize)) 1365 return true; 1366 1367 if (!MO.isReg() || !MO.isUse()) 1368 return false; 1369 1370 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) 1371 return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); 1372 1373 // FLAT_SCR is just an SGPR pair. 1374 if (!MO.isImplicit() && (MO.getReg() == AMDGPU::FLAT_SCR)) 1375 return true; 1376 1377 // EXEC register uses the constant bus. 1378 if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC) 1379 return true; 1380 1381 // SGPRs use the constant bus 1382 if (MO.getReg() == AMDGPU::M0 || MO.getReg() == AMDGPU::VCC || 1383 (!MO.isImplicit() && 1384 (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) || 1385 AMDGPU::SGPR_64RegClass.contains(MO.getReg())))) { 1386 return true; 1387 } 1388 1389 return false; 1390 } 1391 1392 static unsigned findImplicitSGPRRead(const MachineInstr &MI) { 1393 for (const MachineOperand &MO : MI.implicit_operands()) { 1394 // We only care about reads. 1395 if (MO.isDef()) 1396 continue; 1397 1398 switch (MO.getReg()) { 1399 case AMDGPU::VCC: 1400 case AMDGPU::M0: 1401 case AMDGPU::FLAT_SCR: 1402 return MO.getReg(); 1403 1404 default: 1405 break; 1406 } 1407 } 1408 1409 return AMDGPU::NoRegister; 1410 } 1411 1412 bool SIInstrInfo::verifyInstruction(const MachineInstr *MI, 1413 StringRef &ErrInfo) const { 1414 uint16_t Opcode = MI->getOpcode(); 1415 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 1416 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); 1417 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1); 1418 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2); 1419 1420 // Make sure the number of operands is correct. 1421 const MCInstrDesc &Desc = get(Opcode); 1422 if (!Desc.isVariadic() && 1423 Desc.getNumOperands() != MI->getNumExplicitOperands()) { 1424 ErrInfo = "Instruction has wrong number of operands."; 1425 return false; 1426 } 1427 1428 // Make sure the register classes are correct 1429 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) { 1430 if (MI->getOperand(i).isFPImm()) { 1431 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast " 1432 "all fp values to integers."; 1433 return false; 1434 } 1435 1436 int RegClass = Desc.OpInfo[i].RegClass; 1437 1438 switch (Desc.OpInfo[i].OperandType) { 1439 case MCOI::OPERAND_REGISTER: 1440 if (MI->getOperand(i).isImm()) { 1441 ErrInfo = "Illegal immediate value for operand."; 1442 return false; 1443 } 1444 break; 1445 case AMDGPU::OPERAND_REG_IMM32: 1446 break; 1447 case AMDGPU::OPERAND_REG_INLINE_C: 1448 if (isLiteralConstant(MI->getOperand(i), 1449 RI.getRegClass(RegClass)->getSize())) { 1450 ErrInfo = "Illegal immediate value for operand."; 1451 return false; 1452 } 1453 break; 1454 case MCOI::OPERAND_IMMEDIATE: 1455 // Check if this operand is an immediate. 1456 // FrameIndex operands will be replaced by immediates, so they are 1457 // allowed. 1458 if (!MI->getOperand(i).isImm() && !MI->getOperand(i).isFI()) { 1459 ErrInfo = "Expected immediate, but got non-immediate"; 1460 return false; 1461 } 1462 // Fall-through 1463 default: 1464 continue; 1465 } 1466 1467 if (!MI->getOperand(i).isReg()) 1468 continue; 1469 1470 if (RegClass != -1) { 1471 unsigned Reg = MI->getOperand(i).getReg(); 1472 if (TargetRegisterInfo::isVirtualRegister(Reg)) 1473 continue; 1474 1475 const TargetRegisterClass *RC = RI.getRegClass(RegClass); 1476 if (!RC->contains(Reg)) { 1477 ErrInfo = "Operand has incorrect register class."; 1478 return false; 1479 } 1480 } 1481 } 1482 1483 1484 // Verify VOP* 1485 if (isVOP1(*MI) || isVOP2(*MI) || isVOP3(*MI) || isVOPC(*MI)) { 1486 // Only look at the true operands. Only a real operand can use the constant 1487 // bus, and we don't want to check pseudo-operands like the source modifier 1488 // flags. 1489 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx }; 1490 1491 unsigned ConstantBusCount = 0; 1492 unsigned SGPRUsed = findImplicitSGPRRead(*MI); 1493 if (SGPRUsed != AMDGPU::NoRegister) 1494 ++ConstantBusCount; 1495 1496 for (int OpIdx : OpIndices) { 1497 if (OpIdx == -1) 1498 break; 1499 const MachineOperand &MO = MI->getOperand(OpIdx); 1500 if (usesConstantBus(MRI, MO, getOpSize(Opcode, OpIdx))) { 1501 if (MO.isReg()) { 1502 if (MO.getReg() != SGPRUsed) 1503 ++ConstantBusCount; 1504 SGPRUsed = MO.getReg(); 1505 } else { 1506 ++ConstantBusCount; 1507 } 1508 } 1509 } 1510 if (ConstantBusCount > 1) { 1511 ErrInfo = "VOP* instruction uses the constant bus more than once"; 1512 return false; 1513 } 1514 } 1515 1516 // Verify misc. restrictions on specific instructions. 1517 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 || 1518 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) { 1519 const MachineOperand &Src0 = MI->getOperand(Src0Idx); 1520 const MachineOperand &Src1 = MI->getOperand(Src1Idx); 1521 const MachineOperand &Src2 = MI->getOperand(Src2Idx); 1522 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) { 1523 if (!compareMachineOp(Src0, Src1) && 1524 !compareMachineOp(Src0, Src2)) { 1525 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2"; 1526 return false; 1527 } 1528 } 1529 } 1530 1531 // Make sure we aren't losing exec uses in the td files. This mostly requires 1532 // being careful when using let Uses to try to add other use registers. 1533 if (!isGenericOpcode(Opcode) && !isSALU(Opcode) && !isSMRD(Opcode)) { 1534 const MachineOperand *Exec = MI->findRegisterUseOperand(AMDGPU::EXEC); 1535 if (!Exec || !Exec->isImplicit()) { 1536 ErrInfo = "VALU instruction does not implicitly read exec mask"; 1537 return false; 1538 } 1539 } 1540 1541 return true; 1542 } 1543 1544 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) { 1545 switch (MI.getOpcode()) { 1546 default: return AMDGPU::INSTRUCTION_LIST_END; 1547 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE; 1548 case AMDGPU::COPY: return AMDGPU::COPY; 1549 case AMDGPU::PHI: return AMDGPU::PHI; 1550 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG; 1551 case AMDGPU::S_MOV_B32: 1552 return MI.getOperand(1).isReg() ? 1553 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32; 1554 case AMDGPU::S_ADD_I32: 1555 case AMDGPU::S_ADD_U32: return AMDGPU::V_ADD_I32_e32; 1556 case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32; 1557 case AMDGPU::S_SUB_I32: 1558 case AMDGPU::S_SUB_U32: return AMDGPU::V_SUB_I32_e32; 1559 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32; 1560 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_I32; 1561 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e32; 1562 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e32; 1563 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e32; 1564 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e32; 1565 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e32; 1566 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e32; 1567 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e32; 1568 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32; 1569 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64; 1570 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32; 1571 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64; 1572 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32; 1573 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64; 1574 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32; 1575 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32; 1576 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32; 1577 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32; 1578 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64; 1579 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32; 1580 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32; 1581 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32; 1582 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32; 1583 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32; 1584 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32; 1585 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32; 1586 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32; 1587 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32; 1588 case AMDGPU::S_LOAD_DWORD_IMM: 1589 case AMDGPU::S_LOAD_DWORD_SGPR: 1590 case AMDGPU::S_LOAD_DWORD_IMM_ci: 1591 return AMDGPU::BUFFER_LOAD_DWORD_ADDR64; 1592 case AMDGPU::S_LOAD_DWORDX2_IMM: 1593 case AMDGPU::S_LOAD_DWORDX2_SGPR: 1594 case AMDGPU::S_LOAD_DWORDX2_IMM_ci: 1595 return AMDGPU::BUFFER_LOAD_DWORDX2_ADDR64; 1596 case AMDGPU::S_LOAD_DWORDX4_IMM: 1597 case AMDGPU::S_LOAD_DWORDX4_SGPR: 1598 case AMDGPU::S_LOAD_DWORDX4_IMM_ci: 1599 return AMDGPU::BUFFER_LOAD_DWORDX4_ADDR64; 1600 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64; 1601 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32; 1602 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32; 1603 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64; 1604 } 1605 } 1606 1607 bool SIInstrInfo::isSALUOpSupportedOnVALU(const MachineInstr &MI) const { 1608 return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END; 1609 } 1610 1611 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI, 1612 unsigned OpNo) const { 1613 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 1614 const MCInstrDesc &Desc = get(MI.getOpcode()); 1615 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() || 1616 Desc.OpInfo[OpNo].RegClass == -1) { 1617 unsigned Reg = MI.getOperand(OpNo).getReg(); 1618 1619 if (TargetRegisterInfo::isVirtualRegister(Reg)) 1620 return MRI.getRegClass(Reg); 1621 return RI.getPhysRegClass(Reg); 1622 } 1623 1624 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 1625 return RI.getRegClass(RCID); 1626 } 1627 1628 bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const { 1629 switch (MI.getOpcode()) { 1630 case AMDGPU::COPY: 1631 case AMDGPU::REG_SEQUENCE: 1632 case AMDGPU::PHI: 1633 case AMDGPU::INSERT_SUBREG: 1634 return RI.hasVGPRs(getOpRegClass(MI, 0)); 1635 default: 1636 return RI.hasVGPRs(getOpRegClass(MI, OpNo)); 1637 } 1638 } 1639 1640 void SIInstrInfo::legalizeOpWithMove(MachineInstr *MI, unsigned OpIdx) const { 1641 MachineBasicBlock::iterator I = MI; 1642 MachineBasicBlock *MBB = MI->getParent(); 1643 MachineOperand &MO = MI->getOperand(OpIdx); 1644 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1645 unsigned RCID = get(MI->getOpcode()).OpInfo[OpIdx].RegClass; 1646 const TargetRegisterClass *RC = RI.getRegClass(RCID); 1647 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 1648 if (MO.isReg()) 1649 Opcode = AMDGPU::COPY; 1650 else if (RI.isSGPRClass(RC)) 1651 Opcode = AMDGPU::S_MOV_B32; 1652 1653 1654 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); 1655 if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC)) 1656 VRC = &AMDGPU::VReg_64RegClass; 1657 else 1658 VRC = &AMDGPU::VGPR_32RegClass; 1659 1660 unsigned Reg = MRI.createVirtualRegister(VRC); 1661 DebugLoc DL = MBB->findDebugLoc(I); 1662 BuildMI(*MI->getParent(), I, DL, get(Opcode), Reg) 1663 .addOperand(MO); 1664 MO.ChangeToRegister(Reg, false); 1665 } 1666 1667 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI, 1668 MachineRegisterInfo &MRI, 1669 MachineOperand &SuperReg, 1670 const TargetRegisterClass *SuperRC, 1671 unsigned SubIdx, 1672 const TargetRegisterClass *SubRC) 1673 const { 1674 MachineBasicBlock *MBB = MI->getParent(); 1675 DebugLoc DL = MI->getDebugLoc(); 1676 unsigned SubReg = MRI.createVirtualRegister(SubRC); 1677 1678 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) { 1679 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 1680 .addReg(SuperReg.getReg(), 0, SubIdx); 1681 return SubReg; 1682 } 1683 1684 // Just in case the super register is itself a sub-register, copy it to a new 1685 // value so we don't need to worry about merging its subreg index with the 1686 // SubIdx passed to this function. The register coalescer should be able to 1687 // eliminate this extra copy. 1688 unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC); 1689 1690 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg) 1691 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg()); 1692 1693 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 1694 .addReg(NewSuperReg, 0, SubIdx); 1695 1696 return SubReg; 1697 } 1698 1699 MachineOperand SIInstrInfo::buildExtractSubRegOrImm( 1700 MachineBasicBlock::iterator MII, 1701 MachineRegisterInfo &MRI, 1702 MachineOperand &Op, 1703 const TargetRegisterClass *SuperRC, 1704 unsigned SubIdx, 1705 const TargetRegisterClass *SubRC) const { 1706 if (Op.isImm()) { 1707 // XXX - Is there a better way to do this? 1708 if (SubIdx == AMDGPU::sub0) 1709 return MachineOperand::CreateImm(Op.getImm() & 0xFFFFFFFF); 1710 if (SubIdx == AMDGPU::sub1) 1711 return MachineOperand::CreateImm(Op.getImm() >> 32); 1712 1713 llvm_unreachable("Unhandled register index for immediate"); 1714 } 1715 1716 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC, 1717 SubIdx, SubRC); 1718 return MachineOperand::CreateReg(SubReg, false); 1719 } 1720 1721 // Change the order of operands from (0, 1, 2) to (0, 2, 1) 1722 void SIInstrInfo::swapOperands(MachineBasicBlock::iterator Inst) const { 1723 assert(Inst->getNumExplicitOperands() == 3); 1724 MachineOperand Op1 = Inst->getOperand(1); 1725 Inst->RemoveOperand(1); 1726 Inst->addOperand(Op1); 1727 } 1728 1729 bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI, 1730 const MCOperandInfo &OpInfo, 1731 const MachineOperand &MO) const { 1732 if (!MO.isReg()) 1733 return false; 1734 1735 unsigned Reg = MO.getReg(); 1736 const TargetRegisterClass *RC = 1737 TargetRegisterInfo::isVirtualRegister(Reg) ? 1738 MRI.getRegClass(Reg) : 1739 RI.getPhysRegClass(Reg); 1740 1741 // In order to be legal, the common sub-class must be equal to the 1742 // class of the current operand. For example: 1743 // 1744 // v_mov_b32 s0 ; Operand defined as vsrc_32 1745 // ; RI.getCommonSubClass(s0,vsrc_32) = sgpr ; LEGAL 1746 // 1747 // s_sendmsg 0, s0 ; Operand defined as m0reg 1748 // ; RI.getCommonSubClass(s0,m0reg) = m0reg ; NOT LEGAL 1749 1750 return RI.getCommonSubClass(RC, RI.getRegClass(OpInfo.RegClass)) == RC; 1751 } 1752 1753 bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI, 1754 const MCOperandInfo &OpInfo, 1755 const MachineOperand &MO) const { 1756 if (MO.isReg()) 1757 return isLegalRegOperand(MRI, OpInfo, MO); 1758 1759 // Handle non-register types that are treated like immediates. 1760 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI()); 1761 return true; 1762 } 1763 1764 bool SIInstrInfo::isOperandLegal(const MachineInstr *MI, unsigned OpIdx, 1765 const MachineOperand *MO) const { 1766 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 1767 const MCInstrDesc &InstDesc = get(MI->getOpcode()); 1768 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; 1769 const TargetRegisterClass *DefinedRC = 1770 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; 1771 if (!MO) 1772 MO = &MI->getOperand(OpIdx); 1773 1774 if (isVALU(*MI) && 1775 usesConstantBus(MRI, *MO, DefinedRC->getSize())) { 1776 unsigned SGPRUsed = 1777 MO->isReg() ? MO->getReg() : (unsigned)AMDGPU::NoRegister; 1778 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1779 if (i == OpIdx) 1780 continue; 1781 const MachineOperand &Op = MI->getOperand(i); 1782 if (Op.isReg() && Op.getReg() != SGPRUsed && 1783 usesConstantBus(MRI, Op, getOpSize(*MI, i))) { 1784 return false; 1785 } 1786 } 1787 } 1788 1789 if (MO->isReg()) { 1790 assert(DefinedRC); 1791 return isLegalRegOperand(MRI, OpInfo, *MO); 1792 } 1793 1794 1795 // Handle non-register types that are treated like immediates. 1796 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI()); 1797 1798 if (!DefinedRC) { 1799 // This operand expects an immediate. 1800 return true; 1801 } 1802 1803 return isImmOperandLegal(MI, OpIdx, *MO); 1804 } 1805 1806 void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI, 1807 MachineInstr *MI) const { 1808 unsigned Opc = MI->getOpcode(); 1809 const MCInstrDesc &InstrDesc = get(Opc); 1810 1811 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 1812 MachineOperand &Src1 = MI->getOperand(Src1Idx); 1813 1814 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32 1815 // we need to only have one constant bus use. 1816 // 1817 // Note we do not need to worry about literal constants here. They are 1818 // disabled for the operand type for instructions because they will always 1819 // violate the one constant bus use rule. 1820 bool HasImplicitSGPR = findImplicitSGPRRead(*MI) != AMDGPU::NoRegister; 1821 if (HasImplicitSGPR) { 1822 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 1823 MachineOperand &Src0 = MI->getOperand(Src0Idx); 1824 1825 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) 1826 legalizeOpWithMove(MI, Src0Idx); 1827 } 1828 1829 // VOP2 src0 instructions support all operand types, so we don't need to check 1830 // their legality. If src1 is already legal, we don't need to do anything. 1831 if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1)) 1832 return; 1833 1834 // We do not use commuteInstruction here because it is too aggressive and will 1835 // commute if it is possible. We only want to commute here if it improves 1836 // legality. This can be called a fairly large number of times so don't waste 1837 // compile time pointlessly swapping and checking legality again. 1838 if (HasImplicitSGPR || !MI->isCommutable()) { 1839 legalizeOpWithMove(MI, Src1Idx); 1840 return; 1841 } 1842 1843 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 1844 MachineOperand &Src0 = MI->getOperand(Src0Idx); 1845 1846 // If src0 can be used as src1, commuting will make the operands legal. 1847 // Otherwise we have to give up and insert a move. 1848 // 1849 // TODO: Other immediate-like operand kinds could be commuted if there was a 1850 // MachineOperand::ChangeTo* for them. 1851 if ((!Src1.isImm() && !Src1.isReg()) || 1852 !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) { 1853 legalizeOpWithMove(MI, Src1Idx); 1854 return; 1855 } 1856 1857 int CommutedOpc = commuteOpcode(*MI); 1858 if (CommutedOpc == -1) { 1859 legalizeOpWithMove(MI, Src1Idx); 1860 return; 1861 } 1862 1863 MI->setDesc(get(CommutedOpc)); 1864 1865 unsigned Src0Reg = Src0.getReg(); 1866 unsigned Src0SubReg = Src0.getSubReg(); 1867 bool Src0Kill = Src0.isKill(); 1868 1869 if (Src1.isImm()) 1870 Src0.ChangeToImmediate(Src1.getImm()); 1871 else if (Src1.isReg()) { 1872 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill()); 1873 Src0.setSubReg(Src1.getSubReg()); 1874 } else 1875 llvm_unreachable("Should only have register or immediate operands"); 1876 1877 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill); 1878 Src1.setSubReg(Src0SubReg); 1879 } 1880 1881 // Legalize VOP3 operands. Because all operand types are supported for any 1882 // operand, and since literal constants are not allowed and should never be 1883 // seen, we only need to worry about inserting copies if we use multiple SGPR 1884 // operands. 1885 void SIInstrInfo::legalizeOperandsVOP3( 1886 MachineRegisterInfo &MRI, 1887 MachineInstr *MI) const { 1888 unsigned Opc = MI->getOpcode(); 1889 1890 int VOP3Idx[3] = { 1891 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), 1892 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1), 1893 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2) 1894 }; 1895 1896 // Find the one SGPR operand we are allowed to use. 1897 unsigned SGPRReg = findUsedSGPR(MI, VOP3Idx); 1898 1899 for (unsigned i = 0; i < 3; ++i) { 1900 int Idx = VOP3Idx[i]; 1901 if (Idx == -1) 1902 break; 1903 MachineOperand &MO = MI->getOperand(Idx); 1904 1905 // We should never see a VOP3 instruction with an illegal immediate operand. 1906 if (!MO.isReg()) 1907 continue; 1908 1909 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg()))) 1910 continue; // VGPRs are legal 1911 1912 if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) { 1913 SGPRReg = MO.getReg(); 1914 // We can use one SGPR in each VOP3 instruction. 1915 continue; 1916 } 1917 1918 // If we make it this far, then the operand is not legal and we must 1919 // legalize it. 1920 legalizeOpWithMove(MI, Idx); 1921 } 1922 } 1923 1924 void SIInstrInfo::legalizeOperands(MachineInstr *MI) const { 1925 MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 1926 1927 // Legalize VOP2 1928 if (isVOP2(*MI)) { 1929 legalizeOperandsVOP2(MRI, MI); 1930 return; 1931 } 1932 1933 // Legalize VOP3 1934 if (isVOP3(*MI)) { 1935 legalizeOperandsVOP3(MRI, MI); 1936 return; 1937 } 1938 1939 // Legalize REG_SEQUENCE and PHI 1940 // The register class of the operands much be the same type as the register 1941 // class of the output. 1942 if (MI->getOpcode() == AMDGPU::PHI) { 1943 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr; 1944 for (unsigned i = 1, e = MI->getNumOperands(); i != e; i+=2) { 1945 if (!MI->getOperand(i).isReg() || 1946 !TargetRegisterInfo::isVirtualRegister(MI->getOperand(i).getReg())) 1947 continue; 1948 const TargetRegisterClass *OpRC = 1949 MRI.getRegClass(MI->getOperand(i).getReg()); 1950 if (RI.hasVGPRs(OpRC)) { 1951 VRC = OpRC; 1952 } else { 1953 SRC = OpRC; 1954 } 1955 } 1956 1957 // If any of the operands are VGPR registers, then they all most be 1958 // otherwise we will create illegal VGPR->SGPR copies when legalizing 1959 // them. 1960 if (VRC || !RI.isSGPRClass(getOpRegClass(*MI, 0))) { 1961 if (!VRC) { 1962 assert(SRC); 1963 VRC = RI.getEquivalentVGPRClass(SRC); 1964 } 1965 RC = VRC; 1966 } else { 1967 RC = SRC; 1968 } 1969 1970 // Update all the operands so they have the same type. 1971 for (unsigned I = 1, E = MI->getNumOperands(); I != E; I += 2) { 1972 MachineOperand &Op = MI->getOperand(I); 1973 if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg())) 1974 continue; 1975 unsigned DstReg = MRI.createVirtualRegister(RC); 1976 1977 // MI is a PHI instruction. 1978 MachineBasicBlock *InsertBB = MI->getOperand(I + 1).getMBB(); 1979 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator(); 1980 1981 BuildMI(*InsertBB, Insert, MI->getDebugLoc(), get(AMDGPU::COPY), DstReg) 1982 .addOperand(Op); 1983 Op.setReg(DstReg); 1984 } 1985 } 1986 1987 // REG_SEQUENCE doesn't really require operand legalization, but if one has a 1988 // VGPR dest type and SGPR sources, insert copies so all operands are 1989 // VGPRs. This seems to help operand folding / the register coalescer. 1990 if (MI->getOpcode() == AMDGPU::REG_SEQUENCE) { 1991 MachineBasicBlock *MBB = MI->getParent(); 1992 const TargetRegisterClass *DstRC = getOpRegClass(*MI, 0); 1993 if (RI.hasVGPRs(DstRC)) { 1994 // Update all the operands so they are VGPR register classes. These may 1995 // not be the same register class because REG_SEQUENCE supports mixing 1996 // subregister index types e.g. sub0_sub1 + sub2 + sub3 1997 for (unsigned I = 1, E = MI->getNumOperands(); I != E; I += 2) { 1998 MachineOperand &Op = MI->getOperand(I); 1999 if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg())) 2000 continue; 2001 2002 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg()); 2003 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); 2004 if (VRC == OpRC) 2005 continue; 2006 2007 unsigned DstReg = MRI.createVirtualRegister(VRC); 2008 2009 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::COPY), DstReg) 2010 .addOperand(Op); 2011 2012 Op.setReg(DstReg); 2013 Op.setIsKill(); 2014 } 2015 } 2016 2017 return; 2018 } 2019 2020 // Legalize INSERT_SUBREG 2021 // src0 must have the same register class as dst 2022 if (MI->getOpcode() == AMDGPU::INSERT_SUBREG) { 2023 unsigned Dst = MI->getOperand(0).getReg(); 2024 unsigned Src0 = MI->getOperand(1).getReg(); 2025 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst); 2026 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0); 2027 if (DstRC != Src0RC) { 2028 MachineBasicBlock &MBB = *MI->getParent(); 2029 unsigned NewSrc0 = MRI.createVirtualRegister(DstRC); 2030 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::COPY), NewSrc0) 2031 .addReg(Src0); 2032 MI->getOperand(1).setReg(NewSrc0); 2033 } 2034 return; 2035 } 2036 2037 // Legalize MUBUF* instructions 2038 // FIXME: If we start using the non-addr64 instructions for compute, we 2039 // may need to legalize them here. 2040 int SRsrcIdx = 2041 AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::srsrc); 2042 if (SRsrcIdx != -1) { 2043 // We have an MUBUF instruction 2044 MachineOperand *SRsrc = &MI->getOperand(SRsrcIdx); 2045 unsigned SRsrcRC = get(MI->getOpcode()).OpInfo[SRsrcIdx].RegClass; 2046 if (RI.getCommonSubClass(MRI.getRegClass(SRsrc->getReg()), 2047 RI.getRegClass(SRsrcRC))) { 2048 // The operands are legal. 2049 // FIXME: We may need to legalize operands besided srsrc. 2050 return; 2051 } 2052 2053 MachineBasicBlock &MBB = *MI->getParent(); 2054 2055 // Extract the ptr from the resource descriptor. 2056 unsigned SRsrcPtr = buildExtractSubReg(MI, MRI, *SRsrc, 2057 &AMDGPU::VReg_128RegClass, AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass); 2058 2059 // Create an empty resource descriptor 2060 unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 2061 unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2062 unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2063 unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass); 2064 uint64_t RsrcDataFormat = getDefaultRsrcDataFormat(); 2065 2066 // Zero64 = 0 2067 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B64), 2068 Zero64) 2069 .addImm(0); 2070 2071 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0} 2072 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), 2073 SRsrcFormatLo) 2074 .addImm(RsrcDataFormat & 0xFFFFFFFF); 2075 2076 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32} 2077 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), 2078 SRsrcFormatHi) 2079 .addImm(RsrcDataFormat >> 32); 2080 2081 // NewSRsrc = {Zero64, SRsrcFormat} 2082 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewSRsrc) 2083 .addReg(Zero64) 2084 .addImm(AMDGPU::sub0_sub1) 2085 .addReg(SRsrcFormatLo) 2086 .addImm(AMDGPU::sub2) 2087 .addReg(SRsrcFormatHi) 2088 .addImm(AMDGPU::sub3); 2089 2090 MachineOperand *VAddr = getNamedOperand(*MI, AMDGPU::OpName::vaddr); 2091 unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 2092 if (VAddr) { 2093 // This is already an ADDR64 instruction so we need to add the pointer 2094 // extracted from the resource descriptor to the current value of VAddr. 2095 unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2096 unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2097 2098 // NewVaddrLo = SRsrcPtr:sub0 + VAddr:sub0 2099 DebugLoc DL = MI->getDebugLoc(); 2100 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), NewVAddrLo) 2101 .addReg(SRsrcPtr, 0, AMDGPU::sub0) 2102 .addReg(VAddr->getReg(), 0, AMDGPU::sub0); 2103 2104 // NewVaddrHi = SRsrcPtr:sub1 + VAddr:sub1 2105 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e32), NewVAddrHi) 2106 .addReg(SRsrcPtr, 0, AMDGPU::sub1) 2107 .addReg(VAddr->getReg(), 0, AMDGPU::sub1); 2108 2109 // NewVaddr = {NewVaddrHi, NewVaddrLo} 2110 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) 2111 .addReg(NewVAddrLo) 2112 .addImm(AMDGPU::sub0) 2113 .addReg(NewVAddrHi) 2114 .addImm(AMDGPU::sub1); 2115 } else { 2116 // This instructions is the _OFFSET variant, so we need to convert it to 2117 // ADDR64. 2118 assert(MBB.getParent()->getSubtarget<AMDGPUSubtarget>().getGeneration() 2119 < AMDGPUSubtarget::VOLCANIC_ISLANDS && 2120 "FIXME: Need to emit flat atomics here"); 2121 2122 MachineOperand *VData = getNamedOperand(*MI, AMDGPU::OpName::vdata); 2123 MachineOperand *Offset = getNamedOperand(*MI, AMDGPU::OpName::offset); 2124 MachineOperand *SOffset = getNamedOperand(*MI, AMDGPU::OpName::soffset); 2125 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI->getOpcode()); 2126 2127 // Atomics rith return have have an additional tied operand and are 2128 // missing some of the special bits. 2129 MachineOperand *VDataIn = getNamedOperand(*MI, AMDGPU::OpName::vdata_in); 2130 MachineInstr *Addr64; 2131 2132 if (!VDataIn) { 2133 // Regular buffer load / store. 2134 MachineInstrBuilder MIB 2135 = BuildMI(MBB, MI, MI->getDebugLoc(), get(Addr64Opcode)) 2136 .addOperand(*VData) 2137 .addReg(AMDGPU::NoRegister) // Dummy value for vaddr. 2138 // This will be replaced later 2139 // with the new value of vaddr. 2140 .addOperand(*SRsrc) 2141 .addOperand(*SOffset) 2142 .addOperand(*Offset); 2143 2144 // Atomics do not have this operand. 2145 if (const MachineOperand *GLC 2146 = getNamedOperand(*MI, AMDGPU::OpName::glc)) { 2147 MIB.addImm(GLC->getImm()); 2148 } 2149 2150 MIB.addImm(getNamedImmOperand(*MI, AMDGPU::OpName::slc)); 2151 2152 if (const MachineOperand *TFE 2153 = getNamedOperand(*MI, AMDGPU::OpName::tfe)) { 2154 MIB.addImm(TFE->getImm()); 2155 } 2156 2157 MIB.setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 2158 Addr64 = MIB; 2159 } else { 2160 // Atomics with return. 2161 Addr64 = BuildMI(MBB, MI, MI->getDebugLoc(), get(Addr64Opcode)) 2162 .addOperand(*VData) 2163 .addOperand(*VDataIn) 2164 .addReg(AMDGPU::NoRegister) // Dummy value for vaddr. 2165 // This will be replaced later 2166 // with the new value of vaddr. 2167 .addOperand(*SRsrc) 2168 .addOperand(*SOffset) 2169 .addOperand(*Offset) 2170 .addImm(getNamedImmOperand(*MI, AMDGPU::OpName::slc)) 2171 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 2172 } 2173 2174 MI->removeFromParent(); 2175 MI = Addr64; 2176 2177 // NewVaddr = {NewVaddrHi, NewVaddrLo} 2178 BuildMI(MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) 2179 .addReg(SRsrcPtr, 0, AMDGPU::sub0) 2180 .addImm(AMDGPU::sub0) 2181 .addReg(SRsrcPtr, 0, AMDGPU::sub1) 2182 .addImm(AMDGPU::sub1); 2183 2184 VAddr = getNamedOperand(*MI, AMDGPU::OpName::vaddr); 2185 SRsrc = getNamedOperand(*MI, AMDGPU::OpName::srsrc); 2186 } 2187 2188 // Update the instruction to use NewVaddr 2189 VAddr->setReg(NewVAddr); 2190 // Update the instruction to use NewSRsrc 2191 SRsrc->setReg(NewSRsrc); 2192 } 2193 } 2194 2195 void SIInstrInfo::splitSMRD(MachineInstr *MI, 2196 const TargetRegisterClass *HalfRC, 2197 unsigned HalfImmOp, unsigned HalfSGPROp, 2198 MachineInstr *&Lo, MachineInstr *&Hi) const { 2199 2200 DebugLoc DL = MI->getDebugLoc(); 2201 MachineBasicBlock *MBB = MI->getParent(); 2202 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 2203 unsigned RegLo = MRI.createVirtualRegister(HalfRC); 2204 unsigned RegHi = MRI.createVirtualRegister(HalfRC); 2205 unsigned HalfSize = HalfRC->getSize(); 2206 const MachineOperand *OffOp = 2207 getNamedOperand(*MI, AMDGPU::OpName::offset); 2208 const MachineOperand *SBase = getNamedOperand(*MI, AMDGPU::OpName::sbase); 2209 2210 // The SMRD has an 8-bit offset in dwords on SI and a 20-bit offset in bytes 2211 // on VI. 2212 2213 bool IsKill = SBase->isKill(); 2214 if (OffOp) { 2215 bool isVI = 2216 MBB->getParent()->getSubtarget<AMDGPUSubtarget>().getGeneration() >= 2217 AMDGPUSubtarget::VOLCANIC_ISLANDS; 2218 unsigned OffScale = isVI ? 1 : 4; 2219 // Handle the _IMM variant 2220 unsigned LoOffset = OffOp->getImm() * OffScale; 2221 unsigned HiOffset = LoOffset + HalfSize; 2222 Lo = BuildMI(*MBB, MI, DL, get(HalfImmOp), RegLo) 2223 // Use addReg instead of addOperand 2224 // to make sure kill flag is cleared. 2225 .addReg(SBase->getReg(), 0, SBase->getSubReg()) 2226 .addImm(LoOffset / OffScale); 2227 2228 if (!isUInt<20>(HiOffset) || (!isVI && !isUInt<8>(HiOffset / OffScale))) { 2229 unsigned OffsetSGPR = 2230 MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 2231 BuildMI(*MBB, MI, DL, get(AMDGPU::S_MOV_B32), OffsetSGPR) 2232 .addImm(HiOffset); // The offset in register is in bytes. 2233 Hi = BuildMI(*MBB, MI, DL, get(HalfSGPROp), RegHi) 2234 .addReg(SBase->getReg(), getKillRegState(IsKill), 2235 SBase->getSubReg()) 2236 .addReg(OffsetSGPR); 2237 } else { 2238 Hi = BuildMI(*MBB, MI, DL, get(HalfImmOp), RegHi) 2239 .addReg(SBase->getReg(), getKillRegState(IsKill), 2240 SBase->getSubReg()) 2241 .addImm(HiOffset / OffScale); 2242 } 2243 } else { 2244 // Handle the _SGPR variant 2245 MachineOperand *SOff = getNamedOperand(*MI, AMDGPU::OpName::soff); 2246 Lo = BuildMI(*MBB, MI, DL, get(HalfSGPROp), RegLo) 2247 .addReg(SBase->getReg(), 0, SBase->getSubReg()) 2248 .addOperand(*SOff); 2249 unsigned OffsetSGPR = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 2250 BuildMI(*MBB, MI, DL, get(AMDGPU::S_ADD_I32), OffsetSGPR) 2251 .addReg(SOff->getReg(), 0, SOff->getSubReg()) 2252 .addImm(HalfSize); 2253 Hi = BuildMI(*MBB, MI, DL, get(HalfSGPROp), RegHi) 2254 .addReg(SBase->getReg(), getKillRegState(IsKill), 2255 SBase->getSubReg()) 2256 .addReg(OffsetSGPR); 2257 } 2258 2259 unsigned SubLo, SubHi; 2260 const TargetRegisterClass *NewDstRC; 2261 switch (HalfSize) { 2262 case 4: 2263 SubLo = AMDGPU::sub0; 2264 SubHi = AMDGPU::sub1; 2265 NewDstRC = &AMDGPU::VReg_64RegClass; 2266 break; 2267 case 8: 2268 SubLo = AMDGPU::sub0_sub1; 2269 SubHi = AMDGPU::sub2_sub3; 2270 NewDstRC = &AMDGPU::VReg_128RegClass; 2271 break; 2272 case 16: 2273 SubLo = AMDGPU::sub0_sub1_sub2_sub3; 2274 SubHi = AMDGPU::sub4_sub5_sub6_sub7; 2275 NewDstRC = &AMDGPU::VReg_256RegClass; 2276 break; 2277 case 32: 2278 SubLo = AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7; 2279 SubHi = AMDGPU::sub8_sub9_sub10_sub11_sub12_sub13_sub14_sub15; 2280 NewDstRC = &AMDGPU::VReg_512RegClass; 2281 break; 2282 default: 2283 llvm_unreachable("Unhandled HalfSize"); 2284 } 2285 2286 unsigned OldDst = MI->getOperand(0).getReg(); 2287 unsigned NewDst = MRI.createVirtualRegister(NewDstRC); 2288 2289 MRI.replaceRegWith(OldDst, NewDst); 2290 2291 BuildMI(*MBB, MI, DL, get(AMDGPU::REG_SEQUENCE), NewDst) 2292 .addReg(RegLo) 2293 .addImm(SubLo) 2294 .addReg(RegHi) 2295 .addImm(SubHi); 2296 } 2297 2298 void SIInstrInfo::moveSMRDToVALU(MachineInstr *MI, 2299 MachineRegisterInfo &MRI, 2300 SmallVectorImpl<MachineInstr *> &Worklist) const { 2301 MachineBasicBlock *MBB = MI->getParent(); 2302 int DstIdx = AMDGPU::getNamedOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst); 2303 assert(DstIdx != -1); 2304 unsigned DstRCID = get(MI->getOpcode()).OpInfo[DstIdx].RegClass; 2305 switch(RI.getRegClass(DstRCID)->getSize()) { 2306 case 4: 2307 case 8: 2308 case 16: { 2309 unsigned NewOpcode = getVALUOp(*MI); 2310 unsigned RegOffset; 2311 unsigned ImmOffset; 2312 2313 if (MI->getOperand(2).isReg()) { 2314 RegOffset = MI->getOperand(2).getReg(); 2315 ImmOffset = 0; 2316 } else { 2317 assert(MI->getOperand(2).isImm()); 2318 // SMRD instructions take a dword offsets on SI and byte offset on VI 2319 // and MUBUF instructions always take a byte offset. 2320 ImmOffset = MI->getOperand(2).getImm(); 2321 if (MBB->getParent()->getSubtarget<AMDGPUSubtarget>().getGeneration() <= 2322 AMDGPUSubtarget::SEA_ISLANDS) 2323 ImmOffset <<= 2; 2324 RegOffset = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2325 2326 if (isUInt<12>(ImmOffset)) { 2327 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), 2328 RegOffset) 2329 .addImm(0); 2330 } else { 2331 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), 2332 RegOffset) 2333 .addImm(ImmOffset); 2334 ImmOffset = 0; 2335 } 2336 } 2337 2338 unsigned SRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass); 2339 unsigned DWord0 = RegOffset; 2340 unsigned DWord1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2341 unsigned DWord2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2342 unsigned DWord3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2343 uint64_t RsrcDataFormat = getDefaultRsrcDataFormat(); 2344 2345 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord1) 2346 .addImm(0); 2347 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord2) 2348 .addImm(RsrcDataFormat & 0xFFFFFFFF); 2349 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::S_MOV_B32), DWord3) 2350 .addImm(RsrcDataFormat >> 32); 2351 BuildMI(*MBB, MI, MI->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), SRsrc) 2352 .addReg(DWord0) 2353 .addImm(AMDGPU::sub0) 2354 .addReg(DWord1) 2355 .addImm(AMDGPU::sub1) 2356 .addReg(DWord2) 2357 .addImm(AMDGPU::sub2) 2358 .addReg(DWord3) 2359 .addImm(AMDGPU::sub3); 2360 2361 const MCInstrDesc &NewInstDesc = get(NewOpcode); 2362 const TargetRegisterClass *NewDstRC 2363 = RI.getRegClass(NewInstDesc.OpInfo[0].RegClass); 2364 unsigned NewDstReg = MRI.createVirtualRegister(NewDstRC); 2365 unsigned DstReg = MI->getOperand(0).getReg(); 2366 MRI.replaceRegWith(DstReg, NewDstReg); 2367 2368 MachineInstr *NewInst = 2369 BuildMI(*MBB, MI, MI->getDebugLoc(), NewInstDesc, NewDstReg) 2370 .addOperand(MI->getOperand(1)) // sbase 2371 .addReg(SRsrc) 2372 .addImm(0) 2373 .addImm(ImmOffset) 2374 .addImm(0) // glc 2375 .addImm(0) // slc 2376 .addImm(0) // tfe 2377 .setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 2378 MI->eraseFromParent(); 2379 2380 legalizeOperands(NewInst); 2381 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); 2382 break; 2383 } 2384 case 32: { 2385 MachineInstr *Lo, *Hi; 2386 splitSMRD(MI, &AMDGPU::SReg_128RegClass, AMDGPU::S_LOAD_DWORDX4_IMM, 2387 AMDGPU::S_LOAD_DWORDX4_SGPR, Lo, Hi); 2388 MI->eraseFromParent(); 2389 moveSMRDToVALU(Lo, MRI, Worklist); 2390 moveSMRDToVALU(Hi, MRI, Worklist); 2391 break; 2392 } 2393 2394 case 64: { 2395 MachineInstr *Lo, *Hi; 2396 splitSMRD(MI, &AMDGPU::SReg_256RegClass, AMDGPU::S_LOAD_DWORDX8_IMM, 2397 AMDGPU::S_LOAD_DWORDX8_SGPR, Lo, Hi); 2398 MI->eraseFromParent(); 2399 moveSMRDToVALU(Lo, MRI, Worklist); 2400 moveSMRDToVALU(Hi, MRI, Worklist); 2401 break; 2402 } 2403 } 2404 } 2405 2406 void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const { 2407 SmallVector<MachineInstr *, 128> Worklist; 2408 Worklist.push_back(&TopInst); 2409 2410 while (!Worklist.empty()) { 2411 MachineInstr *Inst = Worklist.pop_back_val(); 2412 MachineBasicBlock *MBB = Inst->getParent(); 2413 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 2414 2415 unsigned Opcode = Inst->getOpcode(); 2416 unsigned NewOpcode = getVALUOp(*Inst); 2417 2418 // Handle some special cases 2419 switch (Opcode) { 2420 default: 2421 if (isSMRD(*Inst)) { 2422 moveSMRDToVALU(Inst, MRI, Worklist); 2423 continue; 2424 } 2425 break; 2426 case AMDGPU::S_AND_B64: 2427 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_AND_B32_e64); 2428 Inst->eraseFromParent(); 2429 continue; 2430 2431 case AMDGPU::S_OR_B64: 2432 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_OR_B32_e64); 2433 Inst->eraseFromParent(); 2434 continue; 2435 2436 case AMDGPU::S_XOR_B64: 2437 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_XOR_B32_e64); 2438 Inst->eraseFromParent(); 2439 continue; 2440 2441 case AMDGPU::S_NOT_B64: 2442 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::V_NOT_B32_e32); 2443 Inst->eraseFromParent(); 2444 continue; 2445 2446 case AMDGPU::S_BCNT1_I32_B64: 2447 splitScalar64BitBCNT(Worklist, Inst); 2448 Inst->eraseFromParent(); 2449 continue; 2450 2451 case AMDGPU::S_BFE_I64: { 2452 splitScalar64BitBFE(Worklist, Inst); 2453 Inst->eraseFromParent(); 2454 continue; 2455 } 2456 2457 case AMDGPU::S_LSHL_B32: 2458 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 2459 NewOpcode = AMDGPU::V_LSHLREV_B32_e64; 2460 swapOperands(Inst); 2461 } 2462 break; 2463 case AMDGPU::S_ASHR_I32: 2464 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 2465 NewOpcode = AMDGPU::V_ASHRREV_I32_e64; 2466 swapOperands(Inst); 2467 } 2468 break; 2469 case AMDGPU::S_LSHR_B32: 2470 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 2471 NewOpcode = AMDGPU::V_LSHRREV_B32_e64; 2472 swapOperands(Inst); 2473 } 2474 break; 2475 case AMDGPU::S_LSHL_B64: 2476 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 2477 NewOpcode = AMDGPU::V_LSHLREV_B64; 2478 swapOperands(Inst); 2479 } 2480 break; 2481 case AMDGPU::S_ASHR_I64: 2482 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 2483 NewOpcode = AMDGPU::V_ASHRREV_I64; 2484 swapOperands(Inst); 2485 } 2486 break; 2487 case AMDGPU::S_LSHR_B64: 2488 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 2489 NewOpcode = AMDGPU::V_LSHRREV_B64; 2490 swapOperands(Inst); 2491 } 2492 break; 2493 2494 case AMDGPU::S_ABS_I32: 2495 lowerScalarAbs(Worklist, Inst); 2496 Inst->eraseFromParent(); 2497 continue; 2498 2499 case AMDGPU::S_BFE_U64: 2500 case AMDGPU::S_BFM_B64: 2501 llvm_unreachable("Moving this op to VALU not implemented"); 2502 } 2503 2504 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) { 2505 // We cannot move this instruction to the VALU, so we should try to 2506 // legalize its operands instead. 2507 legalizeOperands(Inst); 2508 continue; 2509 } 2510 2511 // Use the new VALU Opcode. 2512 const MCInstrDesc &NewDesc = get(NewOpcode); 2513 Inst->setDesc(NewDesc); 2514 2515 // Remove any references to SCC. Vector instructions can't read from it, and 2516 // We're just about to add the implicit use / defs of VCC, and we don't want 2517 // both. 2518 for (unsigned i = Inst->getNumOperands() - 1; i > 0; --i) { 2519 MachineOperand &Op = Inst->getOperand(i); 2520 if (Op.isReg() && Op.getReg() == AMDGPU::SCC) 2521 Inst->RemoveOperand(i); 2522 } 2523 2524 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) { 2525 // We are converting these to a BFE, so we need to add the missing 2526 // operands for the size and offset. 2527 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16; 2528 Inst->addOperand(MachineOperand::CreateImm(0)); 2529 Inst->addOperand(MachineOperand::CreateImm(Size)); 2530 2531 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) { 2532 // The VALU version adds the second operand to the result, so insert an 2533 // extra 0 operand. 2534 Inst->addOperand(MachineOperand::CreateImm(0)); 2535 } 2536 2537 Inst->addImplicitDefUseOperands(*Inst->getParent()->getParent()); 2538 2539 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) { 2540 const MachineOperand &OffsetWidthOp = Inst->getOperand(2); 2541 // If we need to move this to VGPRs, we need to unpack the second operand 2542 // back into the 2 separate ones for bit offset and width. 2543 assert(OffsetWidthOp.isImm() && 2544 "Scalar BFE is only implemented for constant width and offset"); 2545 uint32_t Imm = OffsetWidthOp.getImm(); 2546 2547 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 2548 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 2549 Inst->RemoveOperand(2); // Remove old immediate. 2550 Inst->addOperand(MachineOperand::CreateImm(Offset)); 2551 Inst->addOperand(MachineOperand::CreateImm(BitWidth)); 2552 } 2553 2554 // Update the destination register class. 2555 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(*Inst); 2556 if (!NewDstRC) 2557 continue; 2558 2559 unsigned DstReg = Inst->getOperand(0).getReg(); 2560 unsigned NewDstReg = MRI.createVirtualRegister(NewDstRC); 2561 MRI.replaceRegWith(DstReg, NewDstReg); 2562 2563 // Legalize the operands 2564 legalizeOperands(Inst); 2565 2566 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); 2567 } 2568 } 2569 2570 //===----------------------------------------------------------------------===// 2571 // Indirect addressing callbacks 2572 //===----------------------------------------------------------------------===// 2573 2574 unsigned SIInstrInfo::calculateIndirectAddress(unsigned RegIndex, 2575 unsigned Channel) const { 2576 assert(Channel == 0); 2577 return RegIndex; 2578 } 2579 2580 const TargetRegisterClass *SIInstrInfo::getIndirectAddrRegClass() const { 2581 return &AMDGPU::VGPR_32RegClass; 2582 } 2583 2584 void SIInstrInfo::lowerScalarAbs(SmallVectorImpl<MachineInstr *> &Worklist, 2585 MachineInstr *Inst) const { 2586 MachineBasicBlock &MBB = *Inst->getParent(); 2587 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2588 MachineBasicBlock::iterator MII = Inst; 2589 DebugLoc DL = Inst->getDebugLoc(); 2590 2591 MachineOperand &Dest = Inst->getOperand(0); 2592 MachineOperand &Src = Inst->getOperand(1); 2593 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2594 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2595 2596 BuildMI(MBB, MII, DL, get(AMDGPU::V_SUB_I32_e32), TmpReg) 2597 .addImm(0) 2598 .addReg(Src.getReg()); 2599 2600 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg) 2601 .addReg(Src.getReg()) 2602 .addReg(TmpReg); 2603 2604 MRI.replaceRegWith(Dest.getReg(), ResultReg); 2605 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 2606 } 2607 2608 void SIInstrInfo::splitScalar64BitUnaryOp( 2609 SmallVectorImpl<MachineInstr *> &Worklist, 2610 MachineInstr *Inst, 2611 unsigned Opcode) const { 2612 MachineBasicBlock &MBB = *Inst->getParent(); 2613 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2614 2615 MachineOperand &Dest = Inst->getOperand(0); 2616 MachineOperand &Src0 = Inst->getOperand(1); 2617 DebugLoc DL = Inst->getDebugLoc(); 2618 2619 MachineBasicBlock::iterator MII = Inst; 2620 2621 const MCInstrDesc &InstDesc = get(Opcode); 2622 const TargetRegisterClass *Src0RC = Src0.isReg() ? 2623 MRI.getRegClass(Src0.getReg()) : 2624 &AMDGPU::SGPR_32RegClass; 2625 2626 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 2627 2628 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 2629 AMDGPU::sub0, Src0SubRC); 2630 2631 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 2632 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 2633 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 2634 2635 unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 2636 BuildMI(MBB, MII, DL, InstDesc, DestSub0) 2637 .addOperand(SrcReg0Sub0); 2638 2639 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 2640 AMDGPU::sub1, Src0SubRC); 2641 2642 unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 2643 BuildMI(MBB, MII, DL, InstDesc, DestSub1) 2644 .addOperand(SrcReg0Sub1); 2645 2646 unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); 2647 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 2648 .addReg(DestSub0) 2649 .addImm(AMDGPU::sub0) 2650 .addReg(DestSub1) 2651 .addImm(AMDGPU::sub1); 2652 2653 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 2654 2655 // We don't need to legalizeOperands here because for a single operand, src0 2656 // will support any kind of input. 2657 2658 // Move all users of this moved value. 2659 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 2660 } 2661 2662 void SIInstrInfo::splitScalar64BitBinaryOp( 2663 SmallVectorImpl<MachineInstr *> &Worklist, 2664 MachineInstr *Inst, 2665 unsigned Opcode) const { 2666 MachineBasicBlock &MBB = *Inst->getParent(); 2667 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2668 2669 MachineOperand &Dest = Inst->getOperand(0); 2670 MachineOperand &Src0 = Inst->getOperand(1); 2671 MachineOperand &Src1 = Inst->getOperand(2); 2672 DebugLoc DL = Inst->getDebugLoc(); 2673 2674 MachineBasicBlock::iterator MII = Inst; 2675 2676 const MCInstrDesc &InstDesc = get(Opcode); 2677 const TargetRegisterClass *Src0RC = Src0.isReg() ? 2678 MRI.getRegClass(Src0.getReg()) : 2679 &AMDGPU::SGPR_32RegClass; 2680 2681 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 2682 const TargetRegisterClass *Src1RC = Src1.isReg() ? 2683 MRI.getRegClass(Src1.getReg()) : 2684 &AMDGPU::SGPR_32RegClass; 2685 2686 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 2687 2688 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 2689 AMDGPU::sub0, Src0SubRC); 2690 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 2691 AMDGPU::sub0, Src1SubRC); 2692 2693 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 2694 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 2695 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 2696 2697 unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 2698 MachineInstr *LoHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub0) 2699 .addOperand(SrcReg0Sub0) 2700 .addOperand(SrcReg1Sub0); 2701 2702 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 2703 AMDGPU::sub1, Src0SubRC); 2704 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 2705 AMDGPU::sub1, Src1SubRC); 2706 2707 unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 2708 MachineInstr *HiHalf = BuildMI(MBB, MII, DL, InstDesc, DestSub1) 2709 .addOperand(SrcReg0Sub1) 2710 .addOperand(SrcReg1Sub1); 2711 2712 unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); 2713 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 2714 .addReg(DestSub0) 2715 .addImm(AMDGPU::sub0) 2716 .addReg(DestSub1) 2717 .addImm(AMDGPU::sub1); 2718 2719 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 2720 2721 // Try to legalize the operands in case we need to swap the order to keep it 2722 // valid. 2723 legalizeOperands(LoHalf); 2724 legalizeOperands(HiHalf); 2725 2726 // Move all users of this moved vlaue. 2727 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 2728 } 2729 2730 void SIInstrInfo::splitScalar64BitBCNT(SmallVectorImpl<MachineInstr *> &Worklist, 2731 MachineInstr *Inst) const { 2732 MachineBasicBlock &MBB = *Inst->getParent(); 2733 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2734 2735 MachineBasicBlock::iterator MII = Inst; 2736 DebugLoc DL = Inst->getDebugLoc(); 2737 2738 MachineOperand &Dest = Inst->getOperand(0); 2739 MachineOperand &Src = Inst->getOperand(1); 2740 2741 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64); 2742 const TargetRegisterClass *SrcRC = Src.isReg() ? 2743 MRI.getRegClass(Src.getReg()) : 2744 &AMDGPU::SGPR_32RegClass; 2745 2746 unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2747 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2748 2749 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); 2750 2751 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 2752 AMDGPU::sub0, SrcSubRC); 2753 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 2754 AMDGPU::sub1, SrcSubRC); 2755 2756 BuildMI(MBB, MII, DL, InstDesc, MidReg) 2757 .addOperand(SrcRegSub0) 2758 .addImm(0); 2759 2760 BuildMI(MBB, MII, DL, InstDesc, ResultReg) 2761 .addOperand(SrcRegSub1) 2762 .addReg(MidReg); 2763 2764 MRI.replaceRegWith(Dest.getReg(), ResultReg); 2765 2766 // We don't need to legalize operands here. src0 for etiher instruction can be 2767 // an SGPR, and the second input is unused or determined here. 2768 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 2769 } 2770 2771 void SIInstrInfo::splitScalar64BitBFE(SmallVectorImpl<MachineInstr *> &Worklist, 2772 MachineInstr *Inst) const { 2773 MachineBasicBlock &MBB = *Inst->getParent(); 2774 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2775 MachineBasicBlock::iterator MII = Inst; 2776 DebugLoc DL = Inst->getDebugLoc(); 2777 2778 MachineOperand &Dest = Inst->getOperand(0); 2779 uint32_t Imm = Inst->getOperand(2).getImm(); 2780 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 2781 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 2782 2783 (void) Offset; 2784 2785 // Only sext_inreg cases handled. 2786 assert(Inst->getOpcode() == AMDGPU::S_BFE_I64 && 2787 BitWidth <= 32 && 2788 Offset == 0 && 2789 "Not implemented"); 2790 2791 if (BitWidth < 32) { 2792 unsigned MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2793 unsigned MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2794 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 2795 2796 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo) 2797 .addReg(Inst->getOperand(1).getReg(), 0, AMDGPU::sub0) 2798 .addImm(0) 2799 .addImm(BitWidth); 2800 2801 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi) 2802 .addImm(31) 2803 .addReg(MidRegLo); 2804 2805 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 2806 .addReg(MidRegLo) 2807 .addImm(AMDGPU::sub0) 2808 .addReg(MidRegHi) 2809 .addImm(AMDGPU::sub1); 2810 2811 MRI.replaceRegWith(Dest.getReg(), ResultReg); 2812 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 2813 return; 2814 } 2815 2816 MachineOperand &Src = Inst->getOperand(1); 2817 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2818 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 2819 2820 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg) 2821 .addImm(31) 2822 .addReg(Src.getReg(), 0, AMDGPU::sub0); 2823 2824 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 2825 .addReg(Src.getReg(), 0, AMDGPU::sub0) 2826 .addImm(AMDGPU::sub0) 2827 .addReg(TmpReg) 2828 .addImm(AMDGPU::sub1); 2829 2830 MRI.replaceRegWith(Dest.getReg(), ResultReg); 2831 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 2832 } 2833 2834 void SIInstrInfo::addUsersToMoveToVALUWorklist( 2835 unsigned DstReg, 2836 MachineRegisterInfo &MRI, 2837 SmallVectorImpl<MachineInstr *> &Worklist) const { 2838 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg), 2839 E = MRI.use_end(); I != E; ++I) { 2840 MachineInstr &UseMI = *I->getParent(); 2841 if (!canReadVGPR(UseMI, I.getOperandNo())) { 2842 Worklist.push_back(&UseMI); 2843 } 2844 } 2845 } 2846 2847 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass( 2848 const MachineInstr &Inst) const { 2849 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0); 2850 2851 switch (Inst.getOpcode()) { 2852 // For target instructions, getOpRegClass just returns the virtual register 2853 // class associated with the operand, so we need to find an equivalent VGPR 2854 // register class in order to move the instruction to the VALU. 2855 case AMDGPU::COPY: 2856 case AMDGPU::PHI: 2857 case AMDGPU::REG_SEQUENCE: 2858 case AMDGPU::INSERT_SUBREG: 2859 if (RI.hasVGPRs(NewDstRC)) 2860 return nullptr; 2861 2862 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 2863 if (!NewDstRC) 2864 return nullptr; 2865 return NewDstRC; 2866 default: 2867 return NewDstRC; 2868 } 2869 } 2870 2871 // Find the one SGPR operand we are allowed to use. 2872 unsigned SIInstrInfo::findUsedSGPR(const MachineInstr *MI, 2873 int OpIndices[3]) const { 2874 const MCInstrDesc &Desc = MI->getDesc(); 2875 2876 // Find the one SGPR operand we are allowed to use. 2877 // 2878 // First we need to consider the instruction's operand requirements before 2879 // legalizing. Some operands are required to be SGPRs, such as implicit uses 2880 // of VCC, but we are still bound by the constant bus requirement to only use 2881 // one. 2882 // 2883 // If the operand's class is an SGPR, we can never move it. 2884 2885 unsigned SGPRReg = findImplicitSGPRRead(*MI); 2886 if (SGPRReg != AMDGPU::NoRegister) 2887 return SGPRReg; 2888 2889 unsigned UsedSGPRs[3] = { AMDGPU::NoRegister }; 2890 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 2891 2892 for (unsigned i = 0; i < 3; ++i) { 2893 int Idx = OpIndices[i]; 2894 if (Idx == -1) 2895 break; 2896 2897 const MachineOperand &MO = MI->getOperand(Idx); 2898 if (!MO.isReg()) 2899 continue; 2900 2901 // Is this operand statically required to be an SGPR based on the operand 2902 // constraints? 2903 const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); 2904 bool IsRequiredSGPR = RI.isSGPRClass(OpRC); 2905 if (IsRequiredSGPR) 2906 return MO.getReg(); 2907 2908 // If this could be a VGPR or an SGPR, Check the dynamic register class. 2909 unsigned Reg = MO.getReg(); 2910 const TargetRegisterClass *RegRC = MRI.getRegClass(Reg); 2911 if (RI.isSGPRClass(RegRC)) 2912 UsedSGPRs[i] = Reg; 2913 } 2914 2915 // We don't have a required SGPR operand, so we have a bit more freedom in 2916 // selecting operands to move. 2917 2918 // Try to select the most used SGPR. If an SGPR is equal to one of the 2919 // others, we choose that. 2920 // 2921 // e.g. 2922 // V_FMA_F32 v0, s0, s0, s0 -> No moves 2923 // V_FMA_F32 v0, s0, s1, s0 -> Move s1 2924 2925 // TODO: If some of the operands are 64-bit SGPRs and some 32, we should 2926 // prefer those. 2927 2928 if (UsedSGPRs[0] != AMDGPU::NoRegister) { 2929 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2]) 2930 SGPRReg = UsedSGPRs[0]; 2931 } 2932 2933 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) { 2934 if (UsedSGPRs[1] == UsedSGPRs[2]) 2935 SGPRReg = UsedSGPRs[1]; 2936 } 2937 2938 return SGPRReg; 2939 } 2940 2941 MachineInstrBuilder SIInstrInfo::buildIndirectWrite( 2942 MachineBasicBlock *MBB, 2943 MachineBasicBlock::iterator I, 2944 unsigned ValueReg, 2945 unsigned Address, unsigned OffsetReg) const { 2946 const DebugLoc &DL = MBB->findDebugLoc(I); 2947 unsigned IndirectBaseReg = AMDGPU::VGPR_32RegClass.getRegister( 2948 getIndirectIndexBegin(*MBB->getParent())); 2949 2950 return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_DST_V1)) 2951 .addReg(IndirectBaseReg, RegState::Define) 2952 .addOperand(I->getOperand(0)) 2953 .addReg(IndirectBaseReg) 2954 .addReg(OffsetReg) 2955 .addImm(0) 2956 .addReg(ValueReg); 2957 } 2958 2959 MachineInstrBuilder SIInstrInfo::buildIndirectRead( 2960 MachineBasicBlock *MBB, 2961 MachineBasicBlock::iterator I, 2962 unsigned ValueReg, 2963 unsigned Address, unsigned OffsetReg) const { 2964 const DebugLoc &DL = MBB->findDebugLoc(I); 2965 unsigned IndirectBaseReg = AMDGPU::VGPR_32RegClass.getRegister( 2966 getIndirectIndexBegin(*MBB->getParent())); 2967 2968 return BuildMI(*MBB, I, DL, get(AMDGPU::SI_INDIRECT_SRC_V1)) 2969 .addOperand(I->getOperand(0)) 2970 .addOperand(I->getOperand(1)) 2971 .addReg(IndirectBaseReg) 2972 .addReg(OffsetReg) 2973 .addImm(0); 2974 2975 } 2976 2977 void SIInstrInfo::reserveIndirectRegisters(BitVector &Reserved, 2978 const MachineFunction &MF) const { 2979 int End = getIndirectIndexEnd(MF); 2980 int Begin = getIndirectIndexBegin(MF); 2981 2982 if (End == -1) 2983 return; 2984 2985 2986 for (int Index = Begin; Index <= End; ++Index) 2987 Reserved.set(AMDGPU::VGPR_32RegClass.getRegister(Index)); 2988 2989 for (int Index = std::max(0, Begin - 1); Index <= End; ++Index) 2990 Reserved.set(AMDGPU::VReg_64RegClass.getRegister(Index)); 2991 2992 for (int Index = std::max(0, Begin - 2); Index <= End; ++Index) 2993 Reserved.set(AMDGPU::VReg_96RegClass.getRegister(Index)); 2994 2995 for (int Index = std::max(0, Begin - 3); Index <= End; ++Index) 2996 Reserved.set(AMDGPU::VReg_128RegClass.getRegister(Index)); 2997 2998 for (int Index = std::max(0, Begin - 7); Index <= End; ++Index) 2999 Reserved.set(AMDGPU::VReg_256RegClass.getRegister(Index)); 3000 3001 for (int Index = std::max(0, Begin - 15); Index <= End; ++Index) 3002 Reserved.set(AMDGPU::VReg_512RegClass.getRegister(Index)); 3003 } 3004 3005 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI, 3006 unsigned OperandName) const { 3007 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName); 3008 if (Idx == -1) 3009 return nullptr; 3010 3011 return &MI.getOperand(Idx); 3012 } 3013 3014 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const { 3015 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT; 3016 if (ST.isAmdHsaOS()) { 3017 RsrcDataFormat |= (1ULL << 56); 3018 3019 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 3020 // Set MTYPE = 2 3021 RsrcDataFormat |= (2ULL << 59); 3022 } 3023 3024 return RsrcDataFormat; 3025 } 3026 3027 uint64_t SIInstrInfo::getScratchRsrcWords23() const { 3028 uint64_t Rsrc23 = getDefaultRsrcDataFormat() | 3029 AMDGPU::RSRC_TID_ENABLE | 3030 0xffffffff; // Size; 3031 3032 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17]. 3033 // Clear them unless we want a huge stride. 3034 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 3035 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT; 3036 3037 return Rsrc23; 3038 } 3039