1 //===-- SIInstrInfo.cpp - SI Instruction Information ---------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief SI Implementation of TargetInstrInfo. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "SIInstrInfo.h" 16 #include "AMDGPUTargetMachine.h" 17 #include "GCNHazardRecognizer.h" 18 #include "SIDefines.h" 19 #include "SIMachineFunctionInfo.h" 20 #include "llvm/CodeGen/MachineFrameInfo.h" 21 #include "llvm/CodeGen/MachineInstrBuilder.h" 22 #include "llvm/CodeGen/MachineRegisterInfo.h" 23 #include "llvm/CodeGen/RegisterScavenging.h" 24 #include "llvm/CodeGen/ScheduleDAG.h" 25 #include "llvm/IR/DiagnosticInfo.h" 26 #include "llvm/IR/Function.h" 27 #include "llvm/MC/MCInstrDesc.h" 28 #include "llvm/Support/Debug.h" 29 30 using namespace llvm; 31 32 // Must be at least 4 to be able to branch over minimum unconditional branch 33 // code. This is only for making it possible to write reasonably small tests for 34 // long branches. 35 static cl::opt<unsigned> 36 BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16), 37 cl::desc("Restrict range of branch instructions (DEBUG)")); 38 39 SIInstrInfo::SIInstrInfo(const SISubtarget &ST) 40 : AMDGPUInstrInfo(ST), RI(ST), ST(ST) {} 41 42 //===----------------------------------------------------------------------===// 43 // TargetInstrInfo callbacks 44 //===----------------------------------------------------------------------===// 45 46 static unsigned getNumOperandsNoGlue(SDNode *Node) { 47 unsigned N = Node->getNumOperands(); 48 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) 49 --N; 50 return N; 51 } 52 53 static SDValue findChainOperand(SDNode *Load) { 54 SDValue LastOp = Load->getOperand(getNumOperandsNoGlue(Load) - 1); 55 assert(LastOp.getValueType() == MVT::Other && "Chain missing from load node"); 56 return LastOp; 57 } 58 59 /// \brief Returns true if both nodes have the same value for the given 60 /// operand \p Op, or if both nodes do not have this operand. 61 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { 62 unsigned Opc0 = N0->getMachineOpcode(); 63 unsigned Opc1 = N1->getMachineOpcode(); 64 65 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); 66 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); 67 68 if (Op0Idx == -1 && Op1Idx == -1) 69 return true; 70 71 72 if ((Op0Idx == -1 && Op1Idx != -1) || 73 (Op1Idx == -1 && Op0Idx != -1)) 74 return false; 75 76 // getNamedOperandIdx returns the index for the MachineInstr's operands, 77 // which includes the result as the first operand. We are indexing into the 78 // MachineSDNode's operands, so we need to skip the result operand to get 79 // the real index. 80 --Op0Idx; 81 --Op1Idx; 82 83 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx); 84 } 85 86 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, 87 AliasAnalysis *AA) const { 88 // TODO: The generic check fails for VALU instructions that should be 89 // rematerializable due to implicit reads of exec. We really want all of the 90 // generic logic for this except for this. 91 switch (MI.getOpcode()) { 92 case AMDGPU::V_MOV_B32_e32: 93 case AMDGPU::V_MOV_B32_e64: 94 case AMDGPU::V_MOV_B64_PSEUDO: 95 return true; 96 default: 97 return false; 98 } 99 } 100 101 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, 102 int64_t &Offset0, 103 int64_t &Offset1) const { 104 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode()) 105 return false; 106 107 unsigned Opc0 = Load0->getMachineOpcode(); 108 unsigned Opc1 = Load1->getMachineOpcode(); 109 110 // Make sure both are actually loads. 111 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad()) 112 return false; 113 114 if (isDS(Opc0) && isDS(Opc1)) { 115 116 // FIXME: Handle this case: 117 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1)) 118 return false; 119 120 // Check base reg. 121 if (Load0->getOperand(1) != Load1->getOperand(1)) 122 return false; 123 124 // Check chain. 125 if (findChainOperand(Load0) != findChainOperand(Load1)) 126 return false; 127 128 // Skip read2 / write2 variants for simplicity. 129 // TODO: We should report true if the used offsets are adjacent (excluded 130 // st64 versions). 131 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::data1) != -1 || 132 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::data1) != -1) 133 return false; 134 135 Offset0 = cast<ConstantSDNode>(Load0->getOperand(2))->getZExtValue(); 136 Offset1 = cast<ConstantSDNode>(Load1->getOperand(2))->getZExtValue(); 137 return true; 138 } 139 140 if (isSMRD(Opc0) && isSMRD(Opc1)) { 141 // Skip time and cache invalidation instructions. 142 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::sbase) == -1 || 143 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::sbase) == -1) 144 return false; 145 146 assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1)); 147 148 // Check base reg. 149 if (Load0->getOperand(0) != Load1->getOperand(0)) 150 return false; 151 152 const ConstantSDNode *Load0Offset = 153 dyn_cast<ConstantSDNode>(Load0->getOperand(1)); 154 const ConstantSDNode *Load1Offset = 155 dyn_cast<ConstantSDNode>(Load1->getOperand(1)); 156 157 if (!Load0Offset || !Load1Offset) 158 return false; 159 160 // Check chain. 161 if (findChainOperand(Load0) != findChainOperand(Load1)) 162 return false; 163 164 Offset0 = Load0Offset->getZExtValue(); 165 Offset1 = Load1Offset->getZExtValue(); 166 return true; 167 } 168 169 // MUBUF and MTBUF can access the same addresses. 170 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) { 171 172 // MUBUF and MTBUF have vaddr at different indices. 173 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || 174 findChainOperand(Load0) != findChainOperand(Load1) || 175 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || 176 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) 177 return false; 178 179 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 180 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 181 182 if (OffIdx0 == -1 || OffIdx1 == -1) 183 return false; 184 185 // getNamedOperandIdx returns the index for MachineInstrs. Since they 186 // inlcude the output in the operand list, but SDNodes don't, we need to 187 // subtract the index by one. 188 --OffIdx0; 189 --OffIdx1; 190 191 SDValue Off0 = Load0->getOperand(OffIdx0); 192 SDValue Off1 = Load1->getOperand(OffIdx1); 193 194 // The offset might be a FrameIndexSDNode. 195 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1)) 196 return false; 197 198 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue(); 199 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue(); 200 return true; 201 } 202 203 return false; 204 } 205 206 static bool isStride64(unsigned Opc) { 207 switch (Opc) { 208 case AMDGPU::DS_READ2ST64_B32: 209 case AMDGPU::DS_READ2ST64_B64: 210 case AMDGPU::DS_WRITE2ST64_B32: 211 case AMDGPU::DS_WRITE2ST64_B64: 212 return true; 213 default: 214 return false; 215 } 216 } 217 218 bool SIInstrInfo::getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg, 219 int64_t &Offset, 220 const TargetRegisterInfo *TRI) const { 221 unsigned Opc = LdSt.getOpcode(); 222 223 if (isDS(LdSt)) { 224 const MachineOperand *OffsetImm = 225 getNamedOperand(LdSt, AMDGPU::OpName::offset); 226 if (OffsetImm) { 227 // Normal, single offset LDS instruction. 228 const MachineOperand *AddrReg = 229 getNamedOperand(LdSt, AMDGPU::OpName::addr); 230 231 BaseReg = AddrReg->getReg(); 232 Offset = OffsetImm->getImm(); 233 return true; 234 } 235 236 // The 2 offset instructions use offset0 and offset1 instead. We can treat 237 // these as a load with a single offset if the 2 offsets are consecutive. We 238 // will use this for some partially aligned loads. 239 const MachineOperand *Offset0Imm = 240 getNamedOperand(LdSt, AMDGPU::OpName::offset0); 241 const MachineOperand *Offset1Imm = 242 getNamedOperand(LdSt, AMDGPU::OpName::offset1); 243 244 uint8_t Offset0 = Offset0Imm->getImm(); 245 uint8_t Offset1 = Offset1Imm->getImm(); 246 247 if (Offset1 > Offset0 && Offset1 - Offset0 == 1) { 248 // Each of these offsets is in element sized units, so we need to convert 249 // to bytes of the individual reads. 250 251 unsigned EltSize; 252 if (LdSt.mayLoad()) 253 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16; 254 else { 255 assert(LdSt.mayStore()); 256 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 257 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8; 258 } 259 260 if (isStride64(Opc)) 261 EltSize *= 64; 262 263 const MachineOperand *AddrReg = 264 getNamedOperand(LdSt, AMDGPU::OpName::addr); 265 BaseReg = AddrReg->getReg(); 266 Offset = EltSize * Offset0; 267 return true; 268 } 269 270 return false; 271 } 272 273 if (isMUBUF(LdSt) || isMTBUF(LdSt)) { 274 const MachineOperand *SOffset = getNamedOperand(LdSt, AMDGPU::OpName::soffset); 275 if (SOffset && SOffset->isReg()) 276 return false; 277 278 const MachineOperand *AddrReg = 279 getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 280 if (!AddrReg) 281 return false; 282 283 const MachineOperand *OffsetImm = 284 getNamedOperand(LdSt, AMDGPU::OpName::offset); 285 BaseReg = AddrReg->getReg(); 286 Offset = OffsetImm->getImm(); 287 288 if (SOffset) // soffset can be an inline immediate. 289 Offset += SOffset->getImm(); 290 291 return true; 292 } 293 294 if (isSMRD(LdSt)) { 295 const MachineOperand *OffsetImm = 296 getNamedOperand(LdSt, AMDGPU::OpName::offset); 297 if (!OffsetImm) 298 return false; 299 300 const MachineOperand *SBaseReg = 301 getNamedOperand(LdSt, AMDGPU::OpName::sbase); 302 BaseReg = SBaseReg->getReg(); 303 Offset = OffsetImm->getImm(); 304 return true; 305 } 306 307 if (isFLAT(LdSt)) { 308 const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 309 BaseReg = AddrReg->getReg(); 310 Offset = 0; 311 return true; 312 } 313 314 return false; 315 } 316 317 bool SIInstrInfo::shouldClusterMemOps(MachineInstr &FirstLdSt, 318 MachineInstr &SecondLdSt, 319 unsigned NumLoads) const { 320 const MachineOperand *FirstDst = nullptr; 321 const MachineOperand *SecondDst = nullptr; 322 323 if ((isMUBUF(FirstLdSt) && isMUBUF(SecondLdSt)) || 324 (isMTBUF(FirstLdSt) && isMTBUF(SecondLdSt)) || 325 (isFLAT(FirstLdSt) && isFLAT(SecondLdSt))) { 326 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdata); 327 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdata); 328 } else if (isSMRD(FirstLdSt) && isSMRD(SecondLdSt)) { 329 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::sdst); 330 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::sdst); 331 } else if (isDS(FirstLdSt) && isDS(SecondLdSt)) { 332 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdst); 333 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdst); 334 } 335 336 if (!FirstDst || !SecondDst) 337 return false; 338 339 // Try to limit clustering based on the total number of bytes loaded 340 // rather than the number of instructions. This is done to help reduce 341 // register pressure. The method used is somewhat inexact, though, 342 // because it assumes that all loads in the cluster will load the 343 // same number of bytes as FirstLdSt. 344 345 // The unit of this value is bytes. 346 // FIXME: This needs finer tuning. 347 unsigned LoadClusterThreshold = 16; 348 349 const MachineRegisterInfo &MRI = 350 FirstLdSt.getParent()->getParent()->getRegInfo(); 351 const TargetRegisterClass *DstRC = MRI.getRegClass(FirstDst->getReg()); 352 353 return (NumLoads * (RI.getRegSizeInBits(*DstRC) / 8)) <= LoadClusterThreshold; 354 } 355 356 static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB, 357 MachineBasicBlock::iterator MI, 358 const DebugLoc &DL, unsigned DestReg, 359 unsigned SrcReg, bool KillSrc) { 360 MachineFunction *MF = MBB.getParent(); 361 DiagnosticInfoUnsupported IllegalCopy(*MF->getFunction(), 362 "illegal SGPR to VGPR copy", 363 DL, DS_Error); 364 LLVMContext &C = MF->getFunction()->getContext(); 365 C.diagnose(IllegalCopy); 366 367 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg) 368 .addReg(SrcReg, getKillRegState(KillSrc)); 369 } 370 371 void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 372 MachineBasicBlock::iterator MI, 373 const DebugLoc &DL, unsigned DestReg, 374 unsigned SrcReg, bool KillSrc) const { 375 const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg); 376 377 if (RC == &AMDGPU::VGPR_32RegClass) { 378 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || 379 AMDGPU::SReg_32RegClass.contains(SrcReg)); 380 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) 381 .addReg(SrcReg, getKillRegState(KillSrc)); 382 return; 383 } 384 385 if (RC == &AMDGPU::SReg_32_XM0RegClass || 386 RC == &AMDGPU::SReg_32RegClass) { 387 if (SrcReg == AMDGPU::SCC) { 388 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg) 389 .addImm(-1) 390 .addImm(0); 391 return; 392 } 393 394 if (!AMDGPU::SReg_32RegClass.contains(SrcReg)) { 395 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 396 return; 397 } 398 399 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 400 .addReg(SrcReg, getKillRegState(KillSrc)); 401 return; 402 } 403 404 if (RC == &AMDGPU::SReg_64RegClass) { 405 if (DestReg == AMDGPU::VCC) { 406 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 407 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC) 408 .addReg(SrcReg, getKillRegState(KillSrc)); 409 } else { 410 // FIXME: Hack until VReg_1 removed. 411 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 412 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 413 .addImm(0) 414 .addReg(SrcReg, getKillRegState(KillSrc)); 415 } 416 417 return; 418 } 419 420 if (!AMDGPU::SReg_64RegClass.contains(SrcReg)) { 421 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 422 return; 423 } 424 425 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 426 .addReg(SrcReg, getKillRegState(KillSrc)); 427 return; 428 } 429 430 if (DestReg == AMDGPU::SCC) { 431 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 432 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32)) 433 .addReg(SrcReg, getKillRegState(KillSrc)) 434 .addImm(0); 435 return; 436 } 437 438 unsigned EltSize = 4; 439 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 440 if (RI.isSGPRClass(RC)) { 441 if (RI.getRegSizeInBits(*RC) > 32) { 442 Opcode = AMDGPU::S_MOV_B64; 443 EltSize = 8; 444 } else { 445 Opcode = AMDGPU::S_MOV_B32; 446 EltSize = 4; 447 } 448 449 if (!RI.isSGPRClass(RI.getPhysRegClass(SrcReg))) { 450 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 451 return; 452 } 453 } 454 455 456 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize); 457 bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg); 458 459 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 460 unsigned SubIdx; 461 if (Forward) 462 SubIdx = SubIndices[Idx]; 463 else 464 SubIdx = SubIndices[SubIndices.size() - Idx - 1]; 465 466 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 467 get(Opcode), RI.getSubReg(DestReg, SubIdx)); 468 469 Builder.addReg(RI.getSubReg(SrcReg, SubIdx)); 470 471 if (Idx == 0) 472 Builder.addReg(DestReg, RegState::Define | RegState::Implicit); 473 474 bool UseKill = KillSrc && Idx == SubIndices.size() - 1; 475 Builder.addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit); 476 } 477 } 478 479 int SIInstrInfo::commuteOpcode(unsigned Opcode) const { 480 int NewOpc; 481 482 // Try to map original to commuted opcode 483 NewOpc = AMDGPU::getCommuteRev(Opcode); 484 if (NewOpc != -1) 485 // Check if the commuted (REV) opcode exists on the target. 486 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 487 488 // Try to map commuted to original opcode 489 NewOpc = AMDGPU::getCommuteOrig(Opcode); 490 if (NewOpc != -1) 491 // Check if the original (non-REV) opcode exists on the target. 492 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 493 494 return Opcode; 495 } 496 497 void SIInstrInfo::materializeImmediate(MachineBasicBlock &MBB, 498 MachineBasicBlock::iterator MI, 499 const DebugLoc &DL, unsigned DestReg, 500 int64_t Value) const { 501 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 502 const TargetRegisterClass *RegClass = MRI.getRegClass(DestReg); 503 if (RegClass == &AMDGPU::SReg_32RegClass || 504 RegClass == &AMDGPU::SGPR_32RegClass || 505 RegClass == &AMDGPU::SReg_32_XM0RegClass || 506 RegClass == &AMDGPU::SReg_32_XM0_XEXECRegClass) { 507 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 508 .addImm(Value); 509 return; 510 } 511 512 if (RegClass == &AMDGPU::SReg_64RegClass || 513 RegClass == &AMDGPU::SGPR_64RegClass || 514 RegClass == &AMDGPU::SReg_64_XEXECRegClass) { 515 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 516 .addImm(Value); 517 return; 518 } 519 520 if (RegClass == &AMDGPU::VGPR_32RegClass) { 521 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) 522 .addImm(Value); 523 return; 524 } 525 if (RegClass == &AMDGPU::VReg_64RegClass) { 526 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), DestReg) 527 .addImm(Value); 528 return; 529 } 530 531 unsigned EltSize = 4; 532 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 533 if (RI.isSGPRClass(RegClass)) { 534 if (RI.getRegSizeInBits(*RegClass) > 32) { 535 Opcode = AMDGPU::S_MOV_B64; 536 EltSize = 8; 537 } else { 538 Opcode = AMDGPU::S_MOV_B32; 539 EltSize = 4; 540 } 541 } 542 543 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RegClass, EltSize); 544 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 545 int64_t IdxValue = Idx == 0 ? Value : 0; 546 547 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 548 get(Opcode), RI.getSubReg(DestReg, Idx)); 549 Builder.addImm(IdxValue); 550 } 551 } 552 553 const TargetRegisterClass * 554 SIInstrInfo::getPreferredSelectRegClass(unsigned Size) const { 555 return &AMDGPU::VGPR_32RegClass; 556 } 557 558 void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB, 559 MachineBasicBlock::iterator I, 560 const DebugLoc &DL, unsigned DstReg, 561 ArrayRef<MachineOperand> Cond, 562 unsigned TrueReg, 563 unsigned FalseReg) const { 564 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 565 assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass && 566 "Not a VGPR32 reg"); 567 568 if (Cond.size() == 1) { 569 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 570 .addReg(FalseReg) 571 .addReg(TrueReg) 572 .add(Cond[0]); 573 } else if (Cond.size() == 2) { 574 assert(Cond[0].isImm() && "Cond[0] is not an immediate"); 575 switch (Cond[0].getImm()) { 576 case SIInstrInfo::SCC_TRUE: { 577 unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 578 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg) 579 .addImm(-1) 580 .addImm(0); 581 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 582 .addReg(FalseReg) 583 .addReg(TrueReg) 584 .addReg(SReg); 585 break; 586 } 587 case SIInstrInfo::SCC_FALSE: { 588 unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 589 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg) 590 .addImm(0) 591 .addImm(-1); 592 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 593 .addReg(FalseReg) 594 .addReg(TrueReg) 595 .addReg(SReg); 596 break; 597 } 598 case SIInstrInfo::VCCNZ: { 599 MachineOperand RegOp = Cond[1]; 600 RegOp.setImplicit(false); 601 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 602 .addReg(FalseReg) 603 .addReg(TrueReg) 604 .add(RegOp); 605 break; 606 } 607 case SIInstrInfo::VCCZ: { 608 MachineOperand RegOp = Cond[1]; 609 RegOp.setImplicit(false); 610 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 611 .addReg(TrueReg) 612 .addReg(FalseReg) 613 .add(RegOp); 614 break; 615 } 616 case SIInstrInfo::EXECNZ: { 617 unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 618 unsigned SReg2 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 619 BuildMI(MBB, I, DL, get(AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 620 .addImm(0); 621 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg) 622 .addImm(-1) 623 .addImm(0); 624 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 625 .addReg(FalseReg) 626 .addReg(TrueReg) 627 .addReg(SReg); 628 break; 629 } 630 case SIInstrInfo::EXECZ: { 631 unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 632 unsigned SReg2 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 633 BuildMI(MBB, I, DL, get(AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 634 .addImm(0); 635 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg) 636 .addImm(0) 637 .addImm(-1); 638 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 639 .addReg(FalseReg) 640 .addReg(TrueReg) 641 .addReg(SReg); 642 llvm_unreachable("Unhandled branch predicate EXECZ"); 643 break; 644 } 645 default: 646 llvm_unreachable("invalid branch predicate"); 647 } 648 } else { 649 llvm_unreachable("Can only handle Cond size 1 or 2"); 650 } 651 } 652 653 unsigned SIInstrInfo::insertEQ(MachineBasicBlock *MBB, 654 MachineBasicBlock::iterator I, 655 const DebugLoc &DL, 656 unsigned SrcReg, int Value) const { 657 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 658 unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 659 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg) 660 .addImm(Value) 661 .addReg(SrcReg); 662 663 return Reg; 664 } 665 666 unsigned SIInstrInfo::insertNE(MachineBasicBlock *MBB, 667 MachineBasicBlock::iterator I, 668 const DebugLoc &DL, 669 unsigned SrcReg, int Value) const { 670 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 671 unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 672 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg) 673 .addImm(Value) 674 .addReg(SrcReg); 675 676 return Reg; 677 } 678 679 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const { 680 681 if (RI.getRegSizeInBits(*DstRC) == 32) { 682 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 683 } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) { 684 return AMDGPU::S_MOV_B64; 685 } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) { 686 return AMDGPU::V_MOV_B64_PSEUDO; 687 } 688 return AMDGPU::COPY; 689 } 690 691 static unsigned getSGPRSpillSaveOpcode(unsigned Size) { 692 switch (Size) { 693 case 4: 694 return AMDGPU::SI_SPILL_S32_SAVE; 695 case 8: 696 return AMDGPU::SI_SPILL_S64_SAVE; 697 case 16: 698 return AMDGPU::SI_SPILL_S128_SAVE; 699 case 32: 700 return AMDGPU::SI_SPILL_S256_SAVE; 701 case 64: 702 return AMDGPU::SI_SPILL_S512_SAVE; 703 default: 704 llvm_unreachable("unknown register size"); 705 } 706 } 707 708 static unsigned getVGPRSpillSaveOpcode(unsigned Size) { 709 switch (Size) { 710 case 4: 711 return AMDGPU::SI_SPILL_V32_SAVE; 712 case 8: 713 return AMDGPU::SI_SPILL_V64_SAVE; 714 case 12: 715 return AMDGPU::SI_SPILL_V96_SAVE; 716 case 16: 717 return AMDGPU::SI_SPILL_V128_SAVE; 718 case 32: 719 return AMDGPU::SI_SPILL_V256_SAVE; 720 case 64: 721 return AMDGPU::SI_SPILL_V512_SAVE; 722 default: 723 llvm_unreachable("unknown register size"); 724 } 725 } 726 727 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 728 MachineBasicBlock::iterator MI, 729 unsigned SrcReg, bool isKill, 730 int FrameIndex, 731 const TargetRegisterClass *RC, 732 const TargetRegisterInfo *TRI) const { 733 MachineFunction *MF = MBB.getParent(); 734 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 735 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 736 DebugLoc DL = MBB.findDebugLoc(MI); 737 738 unsigned Size = FrameInfo.getObjectSize(FrameIndex); 739 unsigned Align = FrameInfo.getObjectAlignment(FrameIndex); 740 MachinePointerInfo PtrInfo 741 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 742 MachineMemOperand *MMO 743 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, 744 Size, Align); 745 unsigned SpillSize = TRI->getSpillSize(*RC); 746 747 if (RI.isSGPRClass(RC)) { 748 MFI->setHasSpilledSGPRs(); 749 750 // We are only allowed to create one new instruction when spilling 751 // registers, so we need to use pseudo instruction for spilling SGPRs. 752 const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(SpillSize)); 753 754 // The SGPR spill/restore instructions only work on number sgprs, so we need 755 // to make sure we are using the correct register class. 756 if (TargetRegisterInfo::isVirtualRegister(SrcReg) && SpillSize == 4) { 757 MachineRegisterInfo &MRI = MF->getRegInfo(); 758 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0RegClass); 759 } 760 761 MachineInstrBuilder Spill = BuildMI(MBB, MI, DL, OpDesc) 762 .addReg(SrcReg, getKillRegState(isKill)) // data 763 .addFrameIndex(FrameIndex) // addr 764 .addMemOperand(MMO) 765 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) 766 .addReg(MFI->getFrameOffsetReg(), RegState::Implicit); 767 // Add the scratch resource registers as implicit uses because we may end up 768 // needing them, and need to ensure that the reserved registers are 769 // correctly handled. 770 771 if (ST.hasScalarStores()) { 772 // m0 is used for offset to scalar stores if used to spill. 773 Spill.addReg(AMDGPU::M0, RegState::ImplicitDefine); 774 } 775 776 return; 777 } 778 779 if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) { 780 LLVMContext &Ctx = MF->getFunction()->getContext(); 781 Ctx.emitError("SIInstrInfo::storeRegToStackSlot - Do not know how to" 782 " spill register"); 783 BuildMI(MBB, MI, DL, get(AMDGPU::KILL)) 784 .addReg(SrcReg); 785 786 return; 787 } 788 789 assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected"); 790 791 unsigned Opcode = getVGPRSpillSaveOpcode(SpillSize); 792 MFI->setHasSpilledVGPRs(); 793 BuildMI(MBB, MI, DL, get(Opcode)) 794 .addReg(SrcReg, getKillRegState(isKill)) // data 795 .addFrameIndex(FrameIndex) // addr 796 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 797 .addReg(MFI->getFrameOffsetReg()) // scratch_offset 798 .addImm(0) // offset 799 .addMemOperand(MMO); 800 } 801 802 static unsigned getSGPRSpillRestoreOpcode(unsigned Size) { 803 switch (Size) { 804 case 4: 805 return AMDGPU::SI_SPILL_S32_RESTORE; 806 case 8: 807 return AMDGPU::SI_SPILL_S64_RESTORE; 808 case 16: 809 return AMDGPU::SI_SPILL_S128_RESTORE; 810 case 32: 811 return AMDGPU::SI_SPILL_S256_RESTORE; 812 case 64: 813 return AMDGPU::SI_SPILL_S512_RESTORE; 814 default: 815 llvm_unreachable("unknown register size"); 816 } 817 } 818 819 static unsigned getVGPRSpillRestoreOpcode(unsigned Size) { 820 switch (Size) { 821 case 4: 822 return AMDGPU::SI_SPILL_V32_RESTORE; 823 case 8: 824 return AMDGPU::SI_SPILL_V64_RESTORE; 825 case 12: 826 return AMDGPU::SI_SPILL_V96_RESTORE; 827 case 16: 828 return AMDGPU::SI_SPILL_V128_RESTORE; 829 case 32: 830 return AMDGPU::SI_SPILL_V256_RESTORE; 831 case 64: 832 return AMDGPU::SI_SPILL_V512_RESTORE; 833 default: 834 llvm_unreachable("unknown register size"); 835 } 836 } 837 838 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 839 MachineBasicBlock::iterator MI, 840 unsigned DestReg, int FrameIndex, 841 const TargetRegisterClass *RC, 842 const TargetRegisterInfo *TRI) const { 843 MachineFunction *MF = MBB.getParent(); 844 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 845 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 846 DebugLoc DL = MBB.findDebugLoc(MI); 847 unsigned Align = FrameInfo.getObjectAlignment(FrameIndex); 848 unsigned Size = FrameInfo.getObjectSize(FrameIndex); 849 unsigned SpillSize = TRI->getSpillSize(*RC); 850 851 MachinePointerInfo PtrInfo 852 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 853 854 MachineMemOperand *MMO = MF->getMachineMemOperand( 855 PtrInfo, MachineMemOperand::MOLoad, Size, Align); 856 857 if (RI.isSGPRClass(RC)) { 858 // FIXME: Maybe this should not include a memoperand because it will be 859 // lowered to non-memory instructions. 860 const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize)); 861 if (TargetRegisterInfo::isVirtualRegister(DestReg) && SpillSize == 4) { 862 MachineRegisterInfo &MRI = MF->getRegInfo(); 863 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0RegClass); 864 } 865 866 MachineInstrBuilder Spill = BuildMI(MBB, MI, DL, OpDesc, DestReg) 867 .addFrameIndex(FrameIndex) // addr 868 .addMemOperand(MMO) 869 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) 870 .addReg(MFI->getFrameOffsetReg(), RegState::Implicit); 871 872 if (ST.hasScalarStores()) { 873 // m0 is used for offset to scalar stores if used to spill. 874 Spill.addReg(AMDGPU::M0, RegState::ImplicitDefine); 875 } 876 877 return; 878 } 879 880 if (!ST.isVGPRSpillingEnabled(*MF->getFunction())) { 881 LLVMContext &Ctx = MF->getFunction()->getContext(); 882 Ctx.emitError("SIInstrInfo::loadRegFromStackSlot - Do not know how to" 883 " restore register"); 884 BuildMI(MBB, MI, DL, get(AMDGPU::IMPLICIT_DEF), DestReg); 885 886 return; 887 } 888 889 assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected"); 890 891 unsigned Opcode = getVGPRSpillRestoreOpcode(SpillSize); 892 BuildMI(MBB, MI, DL, get(Opcode), DestReg) 893 .addFrameIndex(FrameIndex) // vaddr 894 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 895 .addReg(MFI->getFrameOffsetReg()) // scratch_offset 896 .addImm(0) // offset 897 .addMemOperand(MMO); 898 } 899 900 /// \param @Offset Offset in bytes of the FrameIndex being spilled 901 unsigned SIInstrInfo::calculateLDSSpillAddress( 902 MachineBasicBlock &MBB, MachineInstr &MI, RegScavenger *RS, unsigned TmpReg, 903 unsigned FrameOffset, unsigned Size) const { 904 MachineFunction *MF = MBB.getParent(); 905 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 906 const SISubtarget &ST = MF->getSubtarget<SISubtarget>(); 907 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 908 DebugLoc DL = MBB.findDebugLoc(MI); 909 unsigned WorkGroupSize = MFI->getMaxFlatWorkGroupSize(); 910 unsigned WavefrontSize = ST.getWavefrontSize(); 911 912 unsigned TIDReg = MFI->getTIDReg(); 913 if (!MFI->hasCalculatedTID()) { 914 MachineBasicBlock &Entry = MBB.getParent()->front(); 915 MachineBasicBlock::iterator Insert = Entry.front(); 916 DebugLoc DL = Insert->getDebugLoc(); 917 918 TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass, 919 *MF); 920 if (TIDReg == AMDGPU::NoRegister) 921 return TIDReg; 922 923 if (!AMDGPU::isShader(MF->getFunction()->getCallingConv()) && 924 WorkGroupSize > WavefrontSize) { 925 926 unsigned TIDIGXReg 927 = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_X); 928 unsigned TIDIGYReg 929 = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_Y); 930 unsigned TIDIGZReg 931 = TRI->getPreloadedValue(*MF, SIRegisterInfo::WORKGROUP_ID_Z); 932 unsigned InputPtrReg = 933 TRI->getPreloadedValue(*MF, SIRegisterInfo::KERNARG_SEGMENT_PTR); 934 for (unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) { 935 if (!Entry.isLiveIn(Reg)) 936 Entry.addLiveIn(Reg); 937 } 938 939 RS->enterBasicBlock(Entry); 940 // FIXME: Can we scavenge an SReg_64 and access the subregs? 941 unsigned STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); 942 unsigned STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); 943 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp0) 944 .addReg(InputPtrReg) 945 .addImm(SI::KernelInputOffsets::NGROUPS_Z); 946 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp1) 947 .addReg(InputPtrReg) 948 .addImm(SI::KernelInputOffsets::NGROUPS_Y); 949 950 // NGROUPS.X * NGROUPS.Y 951 BuildMI(Entry, Insert, DL, get(AMDGPU::S_MUL_I32), STmp1) 952 .addReg(STmp1) 953 .addReg(STmp0); 954 // (NGROUPS.X * NGROUPS.Y) * TIDIG.X 955 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MUL_U32_U24_e32), TIDReg) 956 .addReg(STmp1) 957 .addReg(TIDIGXReg); 958 // NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X) 959 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MAD_U32_U24), TIDReg) 960 .addReg(STmp0) 961 .addReg(TIDIGYReg) 962 .addReg(TIDReg); 963 // (NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)) + TIDIG.Z 964 BuildMI(Entry, Insert, DL, get(AMDGPU::V_ADD_I32_e32), TIDReg) 965 .addReg(TIDReg) 966 .addReg(TIDIGZReg); 967 } else { 968 // Get the wave id 969 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_LO_U32_B32_e64), 970 TIDReg) 971 .addImm(-1) 972 .addImm(0); 973 974 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_HI_U32_B32_e64), 975 TIDReg) 976 .addImm(-1) 977 .addReg(TIDReg); 978 } 979 980 BuildMI(Entry, Insert, DL, get(AMDGPU::V_LSHLREV_B32_e32), 981 TIDReg) 982 .addImm(2) 983 .addReg(TIDReg); 984 MFI->setTIDReg(TIDReg); 985 } 986 987 // Add FrameIndex to LDS offset 988 unsigned LDSOffset = MFI->getLDSSize() + (FrameOffset * WorkGroupSize); 989 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), TmpReg) 990 .addImm(LDSOffset) 991 .addReg(TIDReg); 992 993 return TmpReg; 994 } 995 996 void SIInstrInfo::insertWaitStates(MachineBasicBlock &MBB, 997 MachineBasicBlock::iterator MI, 998 int Count) const { 999 DebugLoc DL = MBB.findDebugLoc(MI); 1000 while (Count > 0) { 1001 int Arg; 1002 if (Count >= 8) 1003 Arg = 7; 1004 else 1005 Arg = Count - 1; 1006 Count -= 8; 1007 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP)) 1008 .addImm(Arg); 1009 } 1010 } 1011 1012 void SIInstrInfo::insertNoop(MachineBasicBlock &MBB, 1013 MachineBasicBlock::iterator MI) const { 1014 insertWaitStates(MBB, MI, 1); 1015 } 1016 1017 void SIInstrInfo::insertReturn(MachineBasicBlock &MBB) const { 1018 auto MF = MBB.getParent(); 1019 SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 1020 1021 assert(Info->isEntryFunction()); 1022 1023 if (MBB.succ_empty()) { 1024 bool HasNoTerminator = MBB.getFirstTerminator() == MBB.end(); 1025 if (HasNoTerminator) 1026 BuildMI(MBB, MBB.end(), DebugLoc(), 1027 get(Info->returnsVoid() ? AMDGPU::S_ENDPGM : AMDGPU::SI_RETURN_TO_EPILOG)); 1028 } 1029 } 1030 1031 unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) const { 1032 switch (MI.getOpcode()) { 1033 default: return 1; // FIXME: Do wait states equal cycles? 1034 1035 case AMDGPU::S_NOP: 1036 return MI.getOperand(0).getImm() + 1; 1037 } 1038 } 1039 1040 bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 1041 MachineBasicBlock &MBB = *MI.getParent(); 1042 DebugLoc DL = MBB.findDebugLoc(MI); 1043 switch (MI.getOpcode()) { 1044 default: return AMDGPUInstrInfo::expandPostRAPseudo(MI); 1045 case AMDGPU::S_MOV_B64_term: { 1046 // This is only a terminator to get the correct spill code placement during 1047 // register allocation. 1048 MI.setDesc(get(AMDGPU::S_MOV_B64)); 1049 break; 1050 } 1051 case AMDGPU::S_XOR_B64_term: { 1052 // This is only a terminator to get the correct spill code placement during 1053 // register allocation. 1054 MI.setDesc(get(AMDGPU::S_XOR_B64)); 1055 break; 1056 } 1057 case AMDGPU::S_ANDN2_B64_term: { 1058 // This is only a terminator to get the correct spill code placement during 1059 // register allocation. 1060 MI.setDesc(get(AMDGPU::S_ANDN2_B64)); 1061 break; 1062 } 1063 case AMDGPU::V_MOV_B64_PSEUDO: { 1064 unsigned Dst = MI.getOperand(0).getReg(); 1065 unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 1066 unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 1067 1068 const MachineOperand &SrcOp = MI.getOperand(1); 1069 // FIXME: Will this work for 64-bit floating point immediates? 1070 assert(!SrcOp.isFPImm()); 1071 if (SrcOp.isImm()) { 1072 APInt Imm(64, SrcOp.getImm()); 1073 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1074 .addImm(Imm.getLoBits(32).getZExtValue()) 1075 .addReg(Dst, RegState::Implicit | RegState::Define); 1076 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1077 .addImm(Imm.getHiBits(32).getZExtValue()) 1078 .addReg(Dst, RegState::Implicit | RegState::Define); 1079 } else { 1080 assert(SrcOp.isReg()); 1081 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1082 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) 1083 .addReg(Dst, RegState::Implicit | RegState::Define); 1084 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1085 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) 1086 .addReg(Dst, RegState::Implicit | RegState::Define); 1087 } 1088 MI.eraseFromParent(); 1089 break; 1090 } 1091 case AMDGPU::V_MOVRELD_B32_V1: 1092 case AMDGPU::V_MOVRELD_B32_V2: 1093 case AMDGPU::V_MOVRELD_B32_V4: 1094 case AMDGPU::V_MOVRELD_B32_V8: 1095 case AMDGPU::V_MOVRELD_B32_V16: { 1096 const MCInstrDesc &MovRelDesc = get(AMDGPU::V_MOVRELD_B32_e32); 1097 unsigned VecReg = MI.getOperand(0).getReg(); 1098 bool IsUndef = MI.getOperand(1).isUndef(); 1099 unsigned SubReg = AMDGPU::sub0 + MI.getOperand(3).getImm(); 1100 assert(VecReg == MI.getOperand(1).getReg()); 1101 1102 MachineInstr *MovRel = 1103 BuildMI(MBB, MI, DL, MovRelDesc) 1104 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 1105 .add(MI.getOperand(2)) 1106 .addReg(VecReg, RegState::ImplicitDefine) 1107 .addReg(VecReg, 1108 RegState::Implicit | (IsUndef ? RegState::Undef : 0)); 1109 1110 const int ImpDefIdx = 1111 MovRelDesc.getNumOperands() + MovRelDesc.getNumImplicitUses(); 1112 const int ImpUseIdx = ImpDefIdx + 1; 1113 MovRel->tieOperands(ImpDefIdx, ImpUseIdx); 1114 1115 MI.eraseFromParent(); 1116 break; 1117 } 1118 case AMDGPU::SI_PC_ADD_REL_OFFSET: { 1119 MachineFunction &MF = *MBB.getParent(); 1120 unsigned Reg = MI.getOperand(0).getReg(); 1121 unsigned RegLo = RI.getSubReg(Reg, AMDGPU::sub0); 1122 unsigned RegHi = RI.getSubReg(Reg, AMDGPU::sub1); 1123 1124 // Create a bundle so these instructions won't be re-ordered by the 1125 // post-RA scheduler. 1126 MIBundleBuilder Bundler(MBB, MI); 1127 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg)); 1128 1129 // Add 32-bit offset from this instruction to the start of the 1130 // constant data. 1131 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo) 1132 .addReg(RegLo) 1133 .add(MI.getOperand(1))); 1134 1135 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi) 1136 .addReg(RegHi); 1137 if (MI.getOperand(2).getTargetFlags() == SIInstrInfo::MO_NONE) 1138 MIB.addImm(0); 1139 else 1140 MIB.add(MI.getOperand(2)); 1141 1142 Bundler.append(MIB); 1143 llvm::finalizeBundle(MBB, Bundler.begin()); 1144 1145 MI.eraseFromParent(); 1146 break; 1147 } 1148 } 1149 return true; 1150 } 1151 1152 bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI, 1153 MachineOperand &Src0, 1154 unsigned Src0OpName, 1155 MachineOperand &Src1, 1156 unsigned Src1OpName) const { 1157 MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName); 1158 if (!Src0Mods) 1159 return false; 1160 1161 MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName); 1162 assert(Src1Mods && 1163 "All commutable instructions have both src0 and src1 modifiers"); 1164 1165 int Src0ModsVal = Src0Mods->getImm(); 1166 int Src1ModsVal = Src1Mods->getImm(); 1167 1168 Src1Mods->setImm(Src0ModsVal); 1169 Src0Mods->setImm(Src1ModsVal); 1170 return true; 1171 } 1172 1173 static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI, 1174 MachineOperand &RegOp, 1175 MachineOperand &NonRegOp) { 1176 unsigned Reg = RegOp.getReg(); 1177 unsigned SubReg = RegOp.getSubReg(); 1178 bool IsKill = RegOp.isKill(); 1179 bool IsDead = RegOp.isDead(); 1180 bool IsUndef = RegOp.isUndef(); 1181 bool IsDebug = RegOp.isDebug(); 1182 1183 if (NonRegOp.isImm()) 1184 RegOp.ChangeToImmediate(NonRegOp.getImm()); 1185 else if (NonRegOp.isFI()) 1186 RegOp.ChangeToFrameIndex(NonRegOp.getIndex()); 1187 else 1188 return nullptr; 1189 1190 NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug); 1191 NonRegOp.setSubReg(SubReg); 1192 1193 return &MI; 1194 } 1195 1196 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, 1197 unsigned Src0Idx, 1198 unsigned Src1Idx) const { 1199 assert(!NewMI && "this should never be used"); 1200 1201 unsigned Opc = MI.getOpcode(); 1202 int CommutedOpcode = commuteOpcode(Opc); 1203 if (CommutedOpcode == -1) 1204 return nullptr; 1205 1206 assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == 1207 static_cast<int>(Src0Idx) && 1208 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == 1209 static_cast<int>(Src1Idx) && 1210 "inconsistency with findCommutedOpIndices"); 1211 1212 MachineOperand &Src0 = MI.getOperand(Src0Idx); 1213 MachineOperand &Src1 = MI.getOperand(Src1Idx); 1214 1215 MachineInstr *CommutedMI = nullptr; 1216 if (Src0.isReg() && Src1.isReg()) { 1217 if (isOperandLegal(MI, Src1Idx, &Src0)) { 1218 // Be sure to copy the source modifiers to the right place. 1219 CommutedMI 1220 = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx); 1221 } 1222 1223 } else if (Src0.isReg() && !Src1.isReg()) { 1224 // src0 should always be able to support any operand type, so no need to 1225 // check operand legality. 1226 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1); 1227 } else if (!Src0.isReg() && Src1.isReg()) { 1228 if (isOperandLegal(MI, Src1Idx, &Src0)) 1229 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0); 1230 } else { 1231 // FIXME: Found two non registers to commute. This does happen. 1232 return nullptr; 1233 } 1234 1235 1236 if (CommutedMI) { 1237 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers, 1238 Src1, AMDGPU::OpName::src1_modifiers); 1239 1240 CommutedMI->setDesc(get(CommutedOpcode)); 1241 } 1242 1243 return CommutedMI; 1244 } 1245 1246 // This needs to be implemented because the source modifiers may be inserted 1247 // between the true commutable operands, and the base 1248 // TargetInstrInfo::commuteInstruction uses it. 1249 bool SIInstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx0, 1250 unsigned &SrcOpIdx1) const { 1251 if (!MI.isCommutable()) 1252 return false; 1253 1254 unsigned Opc = MI.getOpcode(); 1255 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 1256 if (Src0Idx == -1) 1257 return false; 1258 1259 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 1260 if (Src1Idx == -1) 1261 return false; 1262 1263 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx); 1264 } 1265 1266 bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp, 1267 int64_t BrOffset) const { 1268 // BranchRelaxation should never have to check s_setpc_b64 because its dest 1269 // block is unanalyzable. 1270 assert(BranchOp != AMDGPU::S_SETPC_B64); 1271 1272 // Convert to dwords. 1273 BrOffset /= 4; 1274 1275 // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is 1276 // from the next instruction. 1277 BrOffset -= 1; 1278 1279 return isIntN(BranchOffsetBits, BrOffset); 1280 } 1281 1282 MachineBasicBlock *SIInstrInfo::getBranchDestBlock( 1283 const MachineInstr &MI) const { 1284 if (MI.getOpcode() == AMDGPU::S_SETPC_B64) { 1285 // This would be a difficult analysis to perform, but can always be legal so 1286 // there's no need to analyze it. 1287 return nullptr; 1288 } 1289 1290 return MI.getOperand(0).getMBB(); 1291 } 1292 1293 unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB, 1294 MachineBasicBlock &DestBB, 1295 const DebugLoc &DL, 1296 int64_t BrOffset, 1297 RegScavenger *RS) const { 1298 assert(RS && "RegScavenger required for long branching"); 1299 assert(MBB.empty() && 1300 "new block should be inserted for expanding unconditional branch"); 1301 assert(MBB.pred_size() == 1); 1302 1303 MachineFunction *MF = MBB.getParent(); 1304 MachineRegisterInfo &MRI = MF->getRegInfo(); 1305 1306 // FIXME: Virtual register workaround for RegScavenger not working with empty 1307 // blocks. 1308 unsigned PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1309 1310 auto I = MBB.end(); 1311 1312 // We need to compute the offset relative to the instruction immediately after 1313 // s_getpc_b64. Insert pc arithmetic code before last terminator. 1314 MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg); 1315 1316 // TODO: Handle > 32-bit block address. 1317 if (BrOffset >= 0) { 1318 BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32)) 1319 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 1320 .addReg(PCReg, 0, AMDGPU::sub0) 1321 .addMBB(&DestBB, AMDGPU::TF_LONG_BRANCH_FORWARD); 1322 BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32)) 1323 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 1324 .addReg(PCReg, 0, AMDGPU::sub1) 1325 .addImm(0); 1326 } else { 1327 // Backwards branch. 1328 BuildMI(MBB, I, DL, get(AMDGPU::S_SUB_U32)) 1329 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 1330 .addReg(PCReg, 0, AMDGPU::sub0) 1331 .addMBB(&DestBB, AMDGPU::TF_LONG_BRANCH_BACKWARD); 1332 BuildMI(MBB, I, DL, get(AMDGPU::S_SUBB_U32)) 1333 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 1334 .addReg(PCReg, 0, AMDGPU::sub1) 1335 .addImm(0); 1336 } 1337 1338 // Insert the indirect branch after the other terminator. 1339 BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64)) 1340 .addReg(PCReg); 1341 1342 // FIXME: If spilling is necessary, this will fail because this scavenger has 1343 // no emergency stack slots. It is non-trivial to spill in this situation, 1344 // because the restore code needs to be specially placed after the 1345 // jump. BranchRelaxation then needs to be made aware of the newly inserted 1346 // block. 1347 // 1348 // If a spill is needed for the pc register pair, we need to insert a spill 1349 // restore block right before the destination block, and insert a short branch 1350 // into the old destination block's fallthrough predecessor. 1351 // e.g.: 1352 // 1353 // s_cbranch_scc0 skip_long_branch: 1354 // 1355 // long_branch_bb: 1356 // spill s[8:9] 1357 // s_getpc_b64 s[8:9] 1358 // s_add_u32 s8, s8, restore_bb 1359 // s_addc_u32 s9, s9, 0 1360 // s_setpc_b64 s[8:9] 1361 // 1362 // skip_long_branch: 1363 // foo; 1364 // 1365 // ..... 1366 // 1367 // dest_bb_fallthrough_predecessor: 1368 // bar; 1369 // s_branch dest_bb 1370 // 1371 // restore_bb: 1372 // restore s[8:9] 1373 // fallthrough dest_bb 1374 /// 1375 // dest_bb: 1376 // buzz; 1377 1378 RS->enterBasicBlockEnd(MBB); 1379 unsigned Scav = RS->scavengeRegister(&AMDGPU::SReg_64RegClass, 1380 MachineBasicBlock::iterator(GetPC), 0); 1381 MRI.replaceRegWith(PCReg, Scav); 1382 MRI.clearVirtRegs(); 1383 RS->setRegUsed(Scav); 1384 1385 return 4 + 8 + 4 + 4; 1386 } 1387 1388 unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) { 1389 switch (Cond) { 1390 case SIInstrInfo::SCC_TRUE: 1391 return AMDGPU::S_CBRANCH_SCC1; 1392 case SIInstrInfo::SCC_FALSE: 1393 return AMDGPU::S_CBRANCH_SCC0; 1394 case SIInstrInfo::VCCNZ: 1395 return AMDGPU::S_CBRANCH_VCCNZ; 1396 case SIInstrInfo::VCCZ: 1397 return AMDGPU::S_CBRANCH_VCCZ; 1398 case SIInstrInfo::EXECNZ: 1399 return AMDGPU::S_CBRANCH_EXECNZ; 1400 case SIInstrInfo::EXECZ: 1401 return AMDGPU::S_CBRANCH_EXECZ; 1402 default: 1403 llvm_unreachable("invalid branch predicate"); 1404 } 1405 } 1406 1407 SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) { 1408 switch (Opcode) { 1409 case AMDGPU::S_CBRANCH_SCC0: 1410 return SCC_FALSE; 1411 case AMDGPU::S_CBRANCH_SCC1: 1412 return SCC_TRUE; 1413 case AMDGPU::S_CBRANCH_VCCNZ: 1414 return VCCNZ; 1415 case AMDGPU::S_CBRANCH_VCCZ: 1416 return VCCZ; 1417 case AMDGPU::S_CBRANCH_EXECNZ: 1418 return EXECNZ; 1419 case AMDGPU::S_CBRANCH_EXECZ: 1420 return EXECZ; 1421 default: 1422 return INVALID_BR; 1423 } 1424 } 1425 1426 bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB, 1427 MachineBasicBlock::iterator I, 1428 MachineBasicBlock *&TBB, 1429 MachineBasicBlock *&FBB, 1430 SmallVectorImpl<MachineOperand> &Cond, 1431 bool AllowModify) const { 1432 if (I->getOpcode() == AMDGPU::S_BRANCH) { 1433 // Unconditional Branch 1434 TBB = I->getOperand(0).getMBB(); 1435 return false; 1436 } 1437 1438 MachineBasicBlock *CondBB = nullptr; 1439 1440 if (I->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 1441 CondBB = I->getOperand(1).getMBB(); 1442 Cond.push_back(I->getOperand(0)); 1443 } else { 1444 BranchPredicate Pred = getBranchPredicate(I->getOpcode()); 1445 if (Pred == INVALID_BR) 1446 return true; 1447 1448 CondBB = I->getOperand(0).getMBB(); 1449 Cond.push_back(MachineOperand::CreateImm(Pred)); 1450 Cond.push_back(I->getOperand(1)); // Save the branch register. 1451 } 1452 ++I; 1453 1454 if (I == MBB.end()) { 1455 // Conditional branch followed by fall-through. 1456 TBB = CondBB; 1457 return false; 1458 } 1459 1460 if (I->getOpcode() == AMDGPU::S_BRANCH) { 1461 TBB = CondBB; 1462 FBB = I->getOperand(0).getMBB(); 1463 return false; 1464 } 1465 1466 return true; 1467 } 1468 1469 bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 1470 MachineBasicBlock *&FBB, 1471 SmallVectorImpl<MachineOperand> &Cond, 1472 bool AllowModify) const { 1473 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 1474 if (I == MBB.end()) 1475 return false; 1476 1477 if (I->getOpcode() != AMDGPU::SI_MASK_BRANCH) 1478 return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify); 1479 1480 ++I; 1481 1482 // TODO: Should be able to treat as fallthrough? 1483 if (I == MBB.end()) 1484 return true; 1485 1486 if (analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify)) 1487 return true; 1488 1489 MachineBasicBlock *MaskBrDest = I->getOperand(0).getMBB(); 1490 1491 // Specifically handle the case where the conditional branch is to the same 1492 // destination as the mask branch. e.g. 1493 // 1494 // si_mask_branch BB8 1495 // s_cbranch_execz BB8 1496 // s_cbranch BB9 1497 // 1498 // This is required to understand divergent loops which may need the branches 1499 // to be relaxed. 1500 if (TBB != MaskBrDest || Cond.empty()) 1501 return true; 1502 1503 auto Pred = Cond[0].getImm(); 1504 return (Pred != EXECZ && Pred != EXECNZ); 1505 } 1506 1507 unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB, 1508 int *BytesRemoved) const { 1509 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 1510 1511 unsigned Count = 0; 1512 unsigned RemovedSize = 0; 1513 while (I != MBB.end()) { 1514 MachineBasicBlock::iterator Next = std::next(I); 1515 if (I->getOpcode() == AMDGPU::SI_MASK_BRANCH) { 1516 I = Next; 1517 continue; 1518 } 1519 1520 RemovedSize += getInstSizeInBytes(*I); 1521 I->eraseFromParent(); 1522 ++Count; 1523 I = Next; 1524 } 1525 1526 if (BytesRemoved) 1527 *BytesRemoved = RemovedSize; 1528 1529 return Count; 1530 } 1531 1532 // Copy the flags onto the implicit condition register operand. 1533 static void preserveCondRegFlags(MachineOperand &CondReg, 1534 const MachineOperand &OrigCond) { 1535 CondReg.setIsUndef(OrigCond.isUndef()); 1536 CondReg.setIsKill(OrigCond.isKill()); 1537 } 1538 1539 unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB, 1540 MachineBasicBlock *TBB, 1541 MachineBasicBlock *FBB, 1542 ArrayRef<MachineOperand> Cond, 1543 const DebugLoc &DL, 1544 int *BytesAdded) const { 1545 1546 if (!FBB && Cond.empty()) { 1547 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 1548 .addMBB(TBB); 1549 if (BytesAdded) 1550 *BytesAdded = 4; 1551 return 1; 1552 } 1553 1554 if(Cond.size() == 1 && Cond[0].isReg()) { 1555 BuildMI(&MBB, DL, get(AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO)) 1556 .add(Cond[0]) 1557 .addMBB(TBB); 1558 return 1; 1559 } 1560 1561 assert(TBB && Cond[0].isImm()); 1562 1563 unsigned Opcode 1564 = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm())); 1565 1566 if (!FBB) { 1567 Cond[1].isUndef(); 1568 MachineInstr *CondBr = 1569 BuildMI(&MBB, DL, get(Opcode)) 1570 .addMBB(TBB); 1571 1572 // Copy the flags onto the implicit condition register operand. 1573 preserveCondRegFlags(CondBr->getOperand(1), Cond[1]); 1574 1575 if (BytesAdded) 1576 *BytesAdded = 4; 1577 return 1; 1578 } 1579 1580 assert(TBB && FBB); 1581 1582 MachineInstr *CondBr = 1583 BuildMI(&MBB, DL, get(Opcode)) 1584 .addMBB(TBB); 1585 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 1586 .addMBB(FBB); 1587 1588 MachineOperand &CondReg = CondBr->getOperand(1); 1589 CondReg.setIsUndef(Cond[1].isUndef()); 1590 CondReg.setIsKill(Cond[1].isKill()); 1591 1592 if (BytesAdded) 1593 *BytesAdded = 8; 1594 1595 return 2; 1596 } 1597 1598 bool SIInstrInfo::reverseBranchCondition( 1599 SmallVectorImpl<MachineOperand> &Cond) const { 1600 if (Cond.size() != 2) { 1601 return true; 1602 } 1603 1604 if (Cond[0].isImm()) { 1605 Cond[0].setImm(-Cond[0].getImm()); 1606 return false; 1607 } 1608 1609 return true; 1610 } 1611 1612 bool SIInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, 1613 ArrayRef<MachineOperand> Cond, 1614 unsigned TrueReg, unsigned FalseReg, 1615 int &CondCycles, 1616 int &TrueCycles, int &FalseCycles) const { 1617 switch (Cond[0].getImm()) { 1618 case VCCNZ: 1619 case VCCZ: { 1620 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 1621 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 1622 assert(MRI.getRegClass(FalseReg) == RC); 1623 1624 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 1625 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 1626 1627 // Limit to equal cost for branch vs. N v_cndmask_b32s. 1628 return !RI.isSGPRClass(RC) && NumInsts <= 6; 1629 } 1630 case SCC_TRUE: 1631 case SCC_FALSE: { 1632 // FIXME: We could insert for VGPRs if we could replace the original compare 1633 // with a vector one. 1634 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 1635 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 1636 assert(MRI.getRegClass(FalseReg) == RC); 1637 1638 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 1639 1640 // Multiples of 8 can do s_cselect_b64 1641 if (NumInsts % 2 == 0) 1642 NumInsts /= 2; 1643 1644 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 1645 return RI.isSGPRClass(RC); 1646 } 1647 default: 1648 return false; 1649 } 1650 } 1651 1652 void SIInstrInfo::insertSelect(MachineBasicBlock &MBB, 1653 MachineBasicBlock::iterator I, const DebugLoc &DL, 1654 unsigned DstReg, ArrayRef<MachineOperand> Cond, 1655 unsigned TrueReg, unsigned FalseReg) const { 1656 BranchPredicate Pred = static_cast<BranchPredicate>(Cond[0].getImm()); 1657 if (Pred == VCCZ || Pred == SCC_FALSE) { 1658 Pred = static_cast<BranchPredicate>(-Pred); 1659 std::swap(TrueReg, FalseReg); 1660 } 1661 1662 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 1663 const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg); 1664 unsigned DstSize = RI.getRegSizeInBits(*DstRC); 1665 1666 if (DstSize == 32) { 1667 unsigned SelOp = Pred == SCC_TRUE ? 1668 AMDGPU::S_CSELECT_B32 : AMDGPU::V_CNDMASK_B32_e32; 1669 1670 // Instruction's operands are backwards from what is expected. 1671 MachineInstr *Select = 1672 BuildMI(MBB, I, DL, get(SelOp), DstReg) 1673 .addReg(FalseReg) 1674 .addReg(TrueReg); 1675 1676 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 1677 return; 1678 } 1679 1680 if (DstSize == 64 && Pred == SCC_TRUE) { 1681 MachineInstr *Select = 1682 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg) 1683 .addReg(FalseReg) 1684 .addReg(TrueReg); 1685 1686 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 1687 return; 1688 } 1689 1690 static const int16_t Sub0_15[] = { 1691 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 1692 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 1693 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, 1694 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 1695 }; 1696 1697 static const int16_t Sub0_15_64[] = { 1698 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, 1699 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7, 1700 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11, 1701 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15, 1702 }; 1703 1704 unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32; 1705 const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass; 1706 const int16_t *SubIndices = Sub0_15; 1707 int NElts = DstSize / 32; 1708 1709 // 64-bit select is only avaialble for SALU. 1710 if (Pred == SCC_TRUE) { 1711 SelOp = AMDGPU::S_CSELECT_B64; 1712 EltRC = &AMDGPU::SGPR_64RegClass; 1713 SubIndices = Sub0_15_64; 1714 1715 assert(NElts % 2 == 0); 1716 NElts /= 2; 1717 } 1718 1719 MachineInstrBuilder MIB = BuildMI( 1720 MBB, I, DL, get(AMDGPU::REG_SEQUENCE), DstReg); 1721 1722 I = MIB->getIterator(); 1723 1724 SmallVector<unsigned, 8> Regs; 1725 for (int Idx = 0; Idx != NElts; ++Idx) { 1726 unsigned DstElt = MRI.createVirtualRegister(EltRC); 1727 Regs.push_back(DstElt); 1728 1729 unsigned SubIdx = SubIndices[Idx]; 1730 1731 MachineInstr *Select = 1732 BuildMI(MBB, I, DL, get(SelOp), DstElt) 1733 .addReg(FalseReg, 0, SubIdx) 1734 .addReg(TrueReg, 0, SubIdx); 1735 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 1736 1737 MIB.addReg(DstElt) 1738 .addImm(SubIdx); 1739 } 1740 } 1741 1742 bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) const { 1743 switch (MI.getOpcode()) { 1744 case AMDGPU::V_MOV_B32_e32: 1745 case AMDGPU::V_MOV_B32_e64: 1746 case AMDGPU::V_MOV_B64_PSEUDO: { 1747 // If there are additional implicit register operands, this may be used for 1748 // register indexing so the source register operand isn't simply copied. 1749 unsigned NumOps = MI.getDesc().getNumOperands() + 1750 MI.getDesc().getNumImplicitUses(); 1751 1752 return MI.getNumOperands() == NumOps; 1753 } 1754 case AMDGPU::S_MOV_B32: 1755 case AMDGPU::S_MOV_B64: 1756 case AMDGPU::COPY: 1757 return true; 1758 default: 1759 return false; 1760 } 1761 } 1762 1763 static void removeModOperands(MachineInstr &MI) { 1764 unsigned Opc = MI.getOpcode(); 1765 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, 1766 AMDGPU::OpName::src0_modifiers); 1767 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, 1768 AMDGPU::OpName::src1_modifiers); 1769 int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc, 1770 AMDGPU::OpName::src2_modifiers); 1771 1772 MI.RemoveOperand(Src2ModIdx); 1773 MI.RemoveOperand(Src1ModIdx); 1774 MI.RemoveOperand(Src0ModIdx); 1775 } 1776 1777 bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, 1778 unsigned Reg, MachineRegisterInfo *MRI) const { 1779 if (!MRI->hasOneNonDBGUse(Reg)) 1780 return false; 1781 1782 unsigned Opc = UseMI.getOpcode(); 1783 if (Opc == AMDGPU::COPY) { 1784 bool isVGPRCopy = RI.isVGPR(*MRI, UseMI.getOperand(0).getReg()); 1785 switch (DefMI.getOpcode()) { 1786 default: 1787 return false; 1788 case AMDGPU::S_MOV_B64: 1789 // TODO: We could fold 64-bit immediates, but this get compilicated 1790 // when there are sub-registers. 1791 return false; 1792 1793 case AMDGPU::V_MOV_B32_e32: 1794 case AMDGPU::S_MOV_B32: 1795 break; 1796 } 1797 unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32; 1798 const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0); 1799 assert(ImmOp); 1800 // FIXME: We could handle FrameIndex values here. 1801 if (!ImmOp->isImm()) { 1802 return false; 1803 } 1804 UseMI.setDesc(get(NewOpc)); 1805 UseMI.getOperand(1).ChangeToImmediate(ImmOp->getImm()); 1806 UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent()); 1807 return true; 1808 } 1809 1810 if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 || 1811 Opc == AMDGPU::V_MAD_F16 || Opc == AMDGPU::V_MAC_F16_e64) { 1812 // Don't fold if we are using source or output modifiers. The new VOP2 1813 // instructions don't have them. 1814 if (hasAnyModifiersSet(UseMI)) 1815 return false; 1816 1817 const MachineOperand &ImmOp = DefMI.getOperand(1); 1818 1819 // If this is a free constant, there's no reason to do this. 1820 // TODO: We could fold this here instead of letting SIFoldOperands do it 1821 // later. 1822 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0); 1823 1824 // Any src operand can be used for the legality check. 1825 if (isInlineConstant(UseMI, *Src0, ImmOp)) 1826 return false; 1827 1828 bool IsF32 = Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64; 1829 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1); 1830 MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2); 1831 1832 // Multiplied part is the constant: Use v_madmk_{f16, f32}. 1833 // We should only expect these to be on src0 due to canonicalizations. 1834 if (Src0->isReg() && Src0->getReg() == Reg) { 1835 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) 1836 return false; 1837 1838 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg()))) 1839 return false; 1840 1841 // We need to swap operands 0 and 1 since madmk constant is at operand 1. 1842 1843 const int64_t Imm = DefMI.getOperand(1).getImm(); 1844 1845 // FIXME: This would be a lot easier if we could return a new instruction 1846 // instead of having to modify in place. 1847 1848 // Remove these first since they are at the end. 1849 UseMI.RemoveOperand( 1850 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 1851 UseMI.RemoveOperand( 1852 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 1853 1854 unsigned Src1Reg = Src1->getReg(); 1855 unsigned Src1SubReg = Src1->getSubReg(); 1856 Src0->setReg(Src1Reg); 1857 Src0->setSubReg(Src1SubReg); 1858 Src0->setIsKill(Src1->isKill()); 1859 1860 if (Opc == AMDGPU::V_MAC_F32_e64 || 1861 Opc == AMDGPU::V_MAC_F16_e64) 1862 UseMI.untieRegOperand( 1863 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 1864 1865 Src1->ChangeToImmediate(Imm); 1866 1867 removeModOperands(UseMI); 1868 UseMI.setDesc(get(IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16)); 1869 1870 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 1871 if (DeleteDef) 1872 DefMI.eraseFromParent(); 1873 1874 return true; 1875 } 1876 1877 // Added part is the constant: Use v_madak_{f16, f32}. 1878 if (Src2->isReg() && Src2->getReg() == Reg) { 1879 // Not allowed to use constant bus for another operand. 1880 // We can however allow an inline immediate as src0. 1881 if (!Src0->isImm() && 1882 (Src0->isReg() && RI.isSGPRClass(MRI->getRegClass(Src0->getReg())))) 1883 return false; 1884 1885 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) 1886 return false; 1887 1888 const int64_t Imm = DefMI.getOperand(1).getImm(); 1889 1890 // FIXME: This would be a lot easier if we could return a new instruction 1891 // instead of having to modify in place. 1892 1893 // Remove these first since they are at the end. 1894 UseMI.RemoveOperand( 1895 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 1896 UseMI.RemoveOperand( 1897 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 1898 1899 if (Opc == AMDGPU::V_MAC_F32_e64 || 1900 Opc == AMDGPU::V_MAC_F16_e64) 1901 UseMI.untieRegOperand( 1902 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 1903 1904 // ChangingToImmediate adds Src2 back to the instruction. 1905 Src2->ChangeToImmediate(Imm); 1906 1907 // These come before src2. 1908 removeModOperands(UseMI); 1909 UseMI.setDesc(get(IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16)); 1910 1911 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 1912 if (DeleteDef) 1913 DefMI.eraseFromParent(); 1914 1915 return true; 1916 } 1917 } 1918 1919 return false; 1920 } 1921 1922 static bool offsetsDoNotOverlap(int WidthA, int OffsetA, 1923 int WidthB, int OffsetB) { 1924 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; 1925 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; 1926 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; 1927 return LowOffset + LowWidth <= HighOffset; 1928 } 1929 1930 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr &MIa, 1931 MachineInstr &MIb) const { 1932 unsigned BaseReg0, BaseReg1; 1933 int64_t Offset0, Offset1; 1934 1935 if (getMemOpBaseRegImmOfs(MIa, BaseReg0, Offset0, &RI) && 1936 getMemOpBaseRegImmOfs(MIb, BaseReg1, Offset1, &RI)) { 1937 1938 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) { 1939 // FIXME: Handle ds_read2 / ds_write2. 1940 return false; 1941 } 1942 unsigned Width0 = (*MIa.memoperands_begin())->getSize(); 1943 unsigned Width1 = (*MIb.memoperands_begin())->getSize(); 1944 if (BaseReg0 == BaseReg1 && 1945 offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1)) { 1946 return true; 1947 } 1948 } 1949 1950 return false; 1951 } 1952 1953 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr &MIa, 1954 MachineInstr &MIb, 1955 AliasAnalysis *AA) const { 1956 assert((MIa.mayLoad() || MIa.mayStore()) && 1957 "MIa must load from or modify a memory location"); 1958 assert((MIb.mayLoad() || MIb.mayStore()) && 1959 "MIb must load from or modify a memory location"); 1960 1961 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects()) 1962 return false; 1963 1964 // XXX - Can we relax this between address spaces? 1965 if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef()) 1966 return false; 1967 1968 if (AA && MIa.hasOneMemOperand() && MIb.hasOneMemOperand()) { 1969 const MachineMemOperand *MMOa = *MIa.memoperands_begin(); 1970 const MachineMemOperand *MMOb = *MIb.memoperands_begin(); 1971 if (MMOa->getValue() && MMOb->getValue()) { 1972 MemoryLocation LocA(MMOa->getValue(), MMOa->getSize(), MMOa->getAAInfo()); 1973 MemoryLocation LocB(MMOb->getValue(), MMOb->getSize(), MMOb->getAAInfo()); 1974 if (!AA->alias(LocA, LocB)) 1975 return true; 1976 } 1977 } 1978 1979 // TODO: Should we check the address space from the MachineMemOperand? That 1980 // would allow us to distinguish objects we know don't alias based on the 1981 // underlying address space, even if it was lowered to a different one, 1982 // e.g. private accesses lowered to use MUBUF instructions on a scratch 1983 // buffer. 1984 if (isDS(MIa)) { 1985 if (isDS(MIb)) 1986 return checkInstOffsetsDoNotOverlap(MIa, MIb); 1987 1988 return !isFLAT(MIb); 1989 } 1990 1991 if (isMUBUF(MIa) || isMTBUF(MIa)) { 1992 if (isMUBUF(MIb) || isMTBUF(MIb)) 1993 return checkInstOffsetsDoNotOverlap(MIa, MIb); 1994 1995 return !isFLAT(MIb) && !isSMRD(MIb); 1996 } 1997 1998 if (isSMRD(MIa)) { 1999 if (isSMRD(MIb)) 2000 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2001 2002 return !isFLAT(MIb) && !isMUBUF(MIa) && !isMTBUF(MIa); 2003 } 2004 2005 if (isFLAT(MIa)) { 2006 if (isFLAT(MIb)) 2007 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2008 2009 return false; 2010 } 2011 2012 return false; 2013 } 2014 2015 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB, 2016 MachineInstr &MI, 2017 LiveVariables *LV) const { 2018 bool IsF16 = false; 2019 2020 switch (MI.getOpcode()) { 2021 default: 2022 return nullptr; 2023 case AMDGPU::V_MAC_F16_e64: 2024 IsF16 = true; 2025 case AMDGPU::V_MAC_F32_e64: 2026 break; 2027 case AMDGPU::V_MAC_F16_e32: 2028 IsF16 = true; 2029 case AMDGPU::V_MAC_F32_e32: { 2030 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 2031 AMDGPU::OpName::src0); 2032 const MachineOperand *Src0 = &MI.getOperand(Src0Idx); 2033 if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0)) 2034 return nullptr; 2035 break; 2036 } 2037 } 2038 2039 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 2040 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0); 2041 const MachineOperand *Src0Mods = 2042 getNamedOperand(MI, AMDGPU::OpName::src0_modifiers); 2043 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 2044 const MachineOperand *Src1Mods = 2045 getNamedOperand(MI, AMDGPU::OpName::src1_modifiers); 2046 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 2047 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 2048 const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod); 2049 2050 return BuildMI(*MBB, MI, MI.getDebugLoc(), 2051 get(IsF16 ? AMDGPU::V_MAD_F16 : AMDGPU::V_MAD_F32)) 2052 .add(*Dst) 2053 .addImm(Src0Mods ? Src0Mods->getImm() : 0) 2054 .add(*Src0) 2055 .addImm(Src1Mods ? Src1Mods->getImm() : 0) 2056 .add(*Src1) 2057 .addImm(0) // Src mods 2058 .add(*Src2) 2059 .addImm(Clamp ? Clamp->getImm() : 0) 2060 .addImm(Omod ? Omod->getImm() : 0); 2061 } 2062 2063 // It's not generally safe to move VALU instructions across these since it will 2064 // start using the register as a base index rather than directly. 2065 // XXX - Why isn't hasSideEffects sufficient for these? 2066 static bool changesVGPRIndexingMode(const MachineInstr &MI) { 2067 switch (MI.getOpcode()) { 2068 case AMDGPU::S_SET_GPR_IDX_ON: 2069 case AMDGPU::S_SET_GPR_IDX_MODE: 2070 case AMDGPU::S_SET_GPR_IDX_OFF: 2071 return true; 2072 default: 2073 return false; 2074 } 2075 } 2076 2077 bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 2078 const MachineBasicBlock *MBB, 2079 const MachineFunction &MF) const { 2080 // XXX - Do we want the SP check in the base implementation? 2081 2082 // Target-independent instructions do not have an implicit-use of EXEC, even 2083 // when they operate on VGPRs. Treating EXEC modifications as scheduling 2084 // boundaries prevents incorrect movements of such instructions. 2085 return TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF) || 2086 MI.modifiesRegister(AMDGPU::EXEC, &RI) || 2087 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 || 2088 MI.getOpcode() == AMDGPU::S_SETREG_B32 || 2089 changesVGPRIndexingMode(MI); 2090 } 2091 2092 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const { 2093 switch (Imm.getBitWidth()) { 2094 case 32: 2095 return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(), 2096 ST.hasInv2PiInlineImm()); 2097 case 64: 2098 return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(), 2099 ST.hasInv2PiInlineImm()); 2100 case 16: 2101 return ST.has16BitInsts() && 2102 AMDGPU::isInlinableLiteral16(Imm.getSExtValue(), 2103 ST.hasInv2PiInlineImm()); 2104 default: 2105 llvm_unreachable("invalid bitwidth"); 2106 } 2107 } 2108 2109 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO, 2110 uint8_t OperandType) const { 2111 if (!MO.isImm() || OperandType < MCOI::OPERAND_FIRST_TARGET) 2112 return false; 2113 2114 // MachineOperand provides no way to tell the true operand size, since it only 2115 // records a 64-bit value. We need to know the size to determine if a 32-bit 2116 // floating point immediate bit pattern is legal for an integer immediate. It 2117 // would be for any 32-bit integer operand, but would not be for a 64-bit one. 2118 2119 int64_t Imm = MO.getImm(); 2120 switch (OperandType) { 2121 case AMDGPU::OPERAND_REG_IMM_INT32: 2122 case AMDGPU::OPERAND_REG_IMM_FP32: 2123 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 2124 case AMDGPU::OPERAND_REG_INLINE_C_FP32: { 2125 int32_t Trunc = static_cast<int32_t>(Imm); 2126 return Trunc == Imm && 2127 AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm()); 2128 } 2129 case AMDGPU::OPERAND_REG_IMM_INT64: 2130 case AMDGPU::OPERAND_REG_IMM_FP64: 2131 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 2132 case AMDGPU::OPERAND_REG_INLINE_C_FP64: { 2133 return AMDGPU::isInlinableLiteral64(MO.getImm(), 2134 ST.hasInv2PiInlineImm()); 2135 } 2136 case AMDGPU::OPERAND_REG_IMM_INT16: 2137 case AMDGPU::OPERAND_REG_IMM_FP16: 2138 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 2139 case AMDGPU::OPERAND_REG_INLINE_C_FP16: { 2140 if (isInt<16>(Imm) || isUInt<16>(Imm)) { 2141 // A few special case instructions have 16-bit operands on subtargets 2142 // where 16-bit instructions are not legal. 2143 // TODO: Do the 32-bit immediates work? We shouldn't really need to handle 2144 // constants in these cases 2145 int16_t Trunc = static_cast<int16_t>(Imm); 2146 return ST.has16BitInsts() && 2147 AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm()); 2148 } 2149 2150 return false; 2151 } 2152 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 2153 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: { 2154 uint32_t Trunc = static_cast<uint32_t>(Imm); 2155 return AMDGPU::isInlinableLiteralV216(Trunc, ST.hasInv2PiInlineImm()); 2156 } 2157 default: 2158 llvm_unreachable("invalid bitwidth"); 2159 } 2160 } 2161 2162 bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO, 2163 const MCOperandInfo &OpInfo) const { 2164 switch (MO.getType()) { 2165 case MachineOperand::MO_Register: 2166 return false; 2167 case MachineOperand::MO_Immediate: 2168 return !isInlineConstant(MO, OpInfo); 2169 case MachineOperand::MO_FrameIndex: 2170 case MachineOperand::MO_MachineBasicBlock: 2171 case MachineOperand::MO_ExternalSymbol: 2172 case MachineOperand::MO_GlobalAddress: 2173 case MachineOperand::MO_MCSymbol: 2174 return true; 2175 default: 2176 llvm_unreachable("unexpected operand type"); 2177 } 2178 } 2179 2180 static bool compareMachineOp(const MachineOperand &Op0, 2181 const MachineOperand &Op1) { 2182 if (Op0.getType() != Op1.getType()) 2183 return false; 2184 2185 switch (Op0.getType()) { 2186 case MachineOperand::MO_Register: 2187 return Op0.getReg() == Op1.getReg(); 2188 case MachineOperand::MO_Immediate: 2189 return Op0.getImm() == Op1.getImm(); 2190 default: 2191 llvm_unreachable("Didn't expect to be comparing these operand types"); 2192 } 2193 } 2194 2195 bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo, 2196 const MachineOperand &MO) const { 2197 const MCOperandInfo &OpInfo = get(MI.getOpcode()).OpInfo[OpNo]; 2198 2199 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI()); 2200 2201 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE) 2202 return true; 2203 2204 if (OpInfo.RegClass < 0) 2205 return false; 2206 2207 if (MO.isImm() && isInlineConstant(MO, OpInfo)) 2208 return RI.opCanUseInlineConstant(OpInfo.OperandType); 2209 2210 return RI.opCanUseLiteralConstant(OpInfo.OperandType); 2211 } 2212 2213 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const { 2214 int Op32 = AMDGPU::getVOPe32(Opcode); 2215 if (Op32 == -1) 2216 return false; 2217 2218 return pseudoToMCOpcode(Op32) != -1; 2219 } 2220 2221 bool SIInstrInfo::hasModifiers(unsigned Opcode) const { 2222 // The src0_modifier operand is present on all instructions 2223 // that have modifiers. 2224 2225 return AMDGPU::getNamedOperandIdx(Opcode, 2226 AMDGPU::OpName::src0_modifiers) != -1; 2227 } 2228 2229 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI, 2230 unsigned OpName) const { 2231 const MachineOperand *Mods = getNamedOperand(MI, OpName); 2232 return Mods && Mods->getImm(); 2233 } 2234 2235 bool SIInstrInfo::hasAnyModifiersSet(const MachineInstr &MI) const { 2236 return hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || 2237 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || 2238 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers) || 2239 hasModifiersSet(MI, AMDGPU::OpName::clamp) || 2240 hasModifiersSet(MI, AMDGPU::OpName::omod); 2241 } 2242 2243 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI, 2244 const MachineOperand &MO, 2245 const MCOperandInfo &OpInfo) const { 2246 // Literal constants use the constant bus. 2247 //if (isLiteralConstantLike(MO, OpInfo)) 2248 // return true; 2249 if (MO.isImm()) 2250 return !isInlineConstant(MO, OpInfo); 2251 2252 if (!MO.isReg()) 2253 return true; // Misc other operands like FrameIndex 2254 2255 if (!MO.isUse()) 2256 return false; 2257 2258 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) 2259 return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); 2260 2261 // FLAT_SCR is just an SGPR pair. 2262 if (!MO.isImplicit() && (MO.getReg() == AMDGPU::FLAT_SCR)) 2263 return true; 2264 2265 // EXEC register uses the constant bus. 2266 if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC) 2267 return true; 2268 2269 // SGPRs use the constant bus 2270 return (MO.getReg() == AMDGPU::VCC || MO.getReg() == AMDGPU::M0 || 2271 (!MO.isImplicit() && 2272 (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) || 2273 AMDGPU::SGPR_64RegClass.contains(MO.getReg())))); 2274 } 2275 2276 static unsigned findImplicitSGPRRead(const MachineInstr &MI) { 2277 for (const MachineOperand &MO : MI.implicit_operands()) { 2278 // We only care about reads. 2279 if (MO.isDef()) 2280 continue; 2281 2282 switch (MO.getReg()) { 2283 case AMDGPU::VCC: 2284 case AMDGPU::M0: 2285 case AMDGPU::FLAT_SCR: 2286 return MO.getReg(); 2287 2288 default: 2289 break; 2290 } 2291 } 2292 2293 return AMDGPU::NoRegister; 2294 } 2295 2296 static bool shouldReadExec(const MachineInstr &MI) { 2297 if (SIInstrInfo::isVALU(MI)) { 2298 switch (MI.getOpcode()) { 2299 case AMDGPU::V_READLANE_B32: 2300 case AMDGPU::V_READLANE_B32_si: 2301 case AMDGPU::V_READLANE_B32_vi: 2302 case AMDGPU::V_WRITELANE_B32: 2303 case AMDGPU::V_WRITELANE_B32_si: 2304 case AMDGPU::V_WRITELANE_B32_vi: 2305 return false; 2306 } 2307 2308 return true; 2309 } 2310 2311 if (SIInstrInfo::isGenericOpcode(MI.getOpcode()) || 2312 SIInstrInfo::isSALU(MI) || 2313 SIInstrInfo::isSMRD(MI)) 2314 return false; 2315 2316 return true; 2317 } 2318 2319 static bool isSubRegOf(const SIRegisterInfo &TRI, 2320 const MachineOperand &SuperVec, 2321 const MachineOperand &SubReg) { 2322 if (TargetRegisterInfo::isPhysicalRegister(SubReg.getReg())) 2323 return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg()); 2324 2325 return SubReg.getSubReg() != AMDGPU::NoSubRegister && 2326 SubReg.getReg() == SuperVec.getReg(); 2327 } 2328 2329 bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, 2330 StringRef &ErrInfo) const { 2331 uint16_t Opcode = MI.getOpcode(); 2332 if (SIInstrInfo::isGenericOpcode(MI.getOpcode())) 2333 return true; 2334 2335 const MachineFunction *MF = MI.getParent()->getParent(); 2336 const MachineRegisterInfo &MRI = MF->getRegInfo(); 2337 2338 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); 2339 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1); 2340 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2); 2341 2342 // Make sure the number of operands is correct. 2343 const MCInstrDesc &Desc = get(Opcode); 2344 if (!Desc.isVariadic() && 2345 Desc.getNumOperands() != MI.getNumExplicitOperands()) { 2346 ErrInfo = "Instruction has wrong number of operands."; 2347 return false; 2348 } 2349 2350 if (MI.isInlineAsm()) { 2351 // Verify register classes for inlineasm constraints. 2352 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands(); 2353 I != E; ++I) { 2354 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI); 2355 if (!RC) 2356 continue; 2357 2358 const MachineOperand &Op = MI.getOperand(I); 2359 if (!Op.isReg()) 2360 continue; 2361 2362 unsigned Reg = Op.getReg(); 2363 if (!TargetRegisterInfo::isVirtualRegister(Reg) && !RC->contains(Reg)) { 2364 ErrInfo = "inlineasm operand has incorrect register class."; 2365 return false; 2366 } 2367 } 2368 2369 return true; 2370 } 2371 2372 // Make sure the register classes are correct. 2373 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) { 2374 if (MI.getOperand(i).isFPImm()) { 2375 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast " 2376 "all fp values to integers."; 2377 return false; 2378 } 2379 2380 int RegClass = Desc.OpInfo[i].RegClass; 2381 2382 switch (Desc.OpInfo[i].OperandType) { 2383 case MCOI::OPERAND_REGISTER: 2384 if (MI.getOperand(i).isImm()) { 2385 ErrInfo = "Illegal immediate value for operand."; 2386 return false; 2387 } 2388 break; 2389 case AMDGPU::OPERAND_REG_IMM_INT32: 2390 case AMDGPU::OPERAND_REG_IMM_FP32: 2391 break; 2392 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 2393 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 2394 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 2395 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 2396 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 2397 case AMDGPU::OPERAND_REG_INLINE_C_FP16: { 2398 const MachineOperand &MO = MI.getOperand(i); 2399 if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) { 2400 ErrInfo = "Illegal immediate value for operand."; 2401 return false; 2402 } 2403 break; 2404 } 2405 case MCOI::OPERAND_IMMEDIATE: 2406 case AMDGPU::OPERAND_KIMM32: 2407 // Check if this operand is an immediate. 2408 // FrameIndex operands will be replaced by immediates, so they are 2409 // allowed. 2410 if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) { 2411 ErrInfo = "Expected immediate, but got non-immediate"; 2412 return false; 2413 } 2414 LLVM_FALLTHROUGH; 2415 default: 2416 continue; 2417 } 2418 2419 if (!MI.getOperand(i).isReg()) 2420 continue; 2421 2422 if (RegClass != -1) { 2423 unsigned Reg = MI.getOperand(i).getReg(); 2424 if (Reg == AMDGPU::NoRegister || 2425 TargetRegisterInfo::isVirtualRegister(Reg)) 2426 continue; 2427 2428 const TargetRegisterClass *RC = RI.getRegClass(RegClass); 2429 if (!RC->contains(Reg)) { 2430 ErrInfo = "Operand has incorrect register class."; 2431 return false; 2432 } 2433 } 2434 } 2435 2436 // Verify VOP* 2437 if (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI)) { 2438 // Only look at the true operands. Only a real operand can use the constant 2439 // bus, and we don't want to check pseudo-operands like the source modifier 2440 // flags. 2441 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx }; 2442 2443 unsigned ConstantBusCount = 0; 2444 2445 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) 2446 ++ConstantBusCount; 2447 2448 unsigned SGPRUsed = findImplicitSGPRRead(MI); 2449 if (SGPRUsed != AMDGPU::NoRegister) 2450 ++ConstantBusCount; 2451 2452 for (int OpIdx : OpIndices) { 2453 if (OpIdx == -1) 2454 break; 2455 const MachineOperand &MO = MI.getOperand(OpIdx); 2456 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 2457 if (MO.isReg()) { 2458 if (MO.getReg() != SGPRUsed) 2459 ++ConstantBusCount; 2460 SGPRUsed = MO.getReg(); 2461 } else { 2462 ++ConstantBusCount; 2463 } 2464 } 2465 } 2466 if (ConstantBusCount > 1) { 2467 ErrInfo = "VOP* instruction uses the constant bus more than once"; 2468 return false; 2469 } 2470 } 2471 2472 // Verify misc. restrictions on specific instructions. 2473 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 || 2474 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) { 2475 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 2476 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 2477 const MachineOperand &Src2 = MI.getOperand(Src2Idx); 2478 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) { 2479 if (!compareMachineOp(Src0, Src1) && 2480 !compareMachineOp(Src0, Src2)) { 2481 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2"; 2482 return false; 2483 } 2484 } 2485 } 2486 2487 if (isSOPK(MI)) { 2488 int64_t Imm = getNamedOperand(MI, AMDGPU::OpName::simm16)->getImm(); 2489 if (sopkIsZext(MI)) { 2490 if (!isUInt<16>(Imm)) { 2491 ErrInfo = "invalid immediate for SOPK instruction"; 2492 return false; 2493 } 2494 } else { 2495 if (!isInt<16>(Imm)) { 2496 ErrInfo = "invalid immediate for SOPK instruction"; 2497 return false; 2498 } 2499 } 2500 } 2501 2502 if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 || 2503 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 || 2504 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 2505 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) { 2506 const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 2507 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64; 2508 2509 const unsigned StaticNumOps = Desc.getNumOperands() + 2510 Desc.getNumImplicitUses(); 2511 const unsigned NumImplicitOps = IsDst ? 2 : 1; 2512 2513 // Allow additional implicit operands. This allows a fixup done by the post 2514 // RA scheduler where the main implicit operand is killed and implicit-defs 2515 // are added for sub-registers that remain live after this instruction. 2516 if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) { 2517 ErrInfo = "missing implicit register operands"; 2518 return false; 2519 } 2520 2521 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 2522 if (IsDst) { 2523 if (!Dst->isUse()) { 2524 ErrInfo = "v_movreld_b32 vdst should be a use operand"; 2525 return false; 2526 } 2527 2528 unsigned UseOpIdx; 2529 if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) || 2530 UseOpIdx != StaticNumOps + 1) { 2531 ErrInfo = "movrel implicit operands should be tied"; 2532 return false; 2533 } 2534 } 2535 2536 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 2537 const MachineOperand &ImpUse 2538 = MI.getOperand(StaticNumOps + NumImplicitOps - 1); 2539 if (!ImpUse.isReg() || !ImpUse.isUse() || 2540 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) { 2541 ErrInfo = "src0 should be subreg of implicit vector use"; 2542 return false; 2543 } 2544 } 2545 2546 // Make sure we aren't losing exec uses in the td files. This mostly requires 2547 // being careful when using let Uses to try to add other use registers. 2548 if (shouldReadExec(MI)) { 2549 if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) { 2550 ErrInfo = "VALU instruction does not implicitly read exec mask"; 2551 return false; 2552 } 2553 } 2554 2555 if (isSMRD(MI)) { 2556 if (MI.mayStore()) { 2557 // The register offset form of scalar stores may only use m0 as the 2558 // soffset register. 2559 const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff); 2560 if (Soff && Soff->getReg() != AMDGPU::M0) { 2561 ErrInfo = "scalar stores must use m0 as offset register"; 2562 return false; 2563 } 2564 } 2565 } 2566 2567 if (isFLAT(MI) && !MF->getSubtarget<SISubtarget>().hasFlatInstOffsets()) { 2568 const MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 2569 if (Offset->getImm() != 0) { 2570 ErrInfo = "subtarget does not support offsets in flat instructions"; 2571 return false; 2572 } 2573 } 2574 2575 return true; 2576 } 2577 2578 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) { 2579 switch (MI.getOpcode()) { 2580 default: return AMDGPU::INSTRUCTION_LIST_END; 2581 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE; 2582 case AMDGPU::COPY: return AMDGPU::COPY; 2583 case AMDGPU::PHI: return AMDGPU::PHI; 2584 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG; 2585 case AMDGPU::S_MOV_B32: 2586 return MI.getOperand(1).isReg() ? 2587 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32; 2588 case AMDGPU::S_ADD_I32: 2589 case AMDGPU::S_ADD_U32: return AMDGPU::V_ADD_I32_e32; 2590 case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32; 2591 case AMDGPU::S_SUB_I32: 2592 case AMDGPU::S_SUB_U32: return AMDGPU::V_SUB_I32_e32; 2593 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32; 2594 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_I32; 2595 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64; 2596 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64; 2597 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64; 2598 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64; 2599 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64; 2600 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64; 2601 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64; 2602 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32; 2603 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64; 2604 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32; 2605 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64; 2606 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32; 2607 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64; 2608 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32; 2609 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32; 2610 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32; 2611 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32; 2612 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64; 2613 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32; 2614 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32; 2615 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32; 2616 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32; 2617 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32; 2618 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32; 2619 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32; 2620 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32; 2621 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32; 2622 case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e32; 2623 case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e32; 2624 case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e32; 2625 case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e32; 2626 case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e32; 2627 case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e32; 2628 case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e32; 2629 case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e32; 2630 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64; 2631 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32; 2632 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32; 2633 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64; 2634 case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ; 2635 case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ; 2636 } 2637 } 2638 2639 bool SIInstrInfo::isSALUOpSupportedOnVALU(const MachineInstr &MI) const { 2640 return getVALUOp(MI) != AMDGPU::INSTRUCTION_LIST_END; 2641 } 2642 2643 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI, 2644 unsigned OpNo) const { 2645 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 2646 const MCInstrDesc &Desc = get(MI.getOpcode()); 2647 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() || 2648 Desc.OpInfo[OpNo].RegClass == -1) { 2649 unsigned Reg = MI.getOperand(OpNo).getReg(); 2650 2651 if (TargetRegisterInfo::isVirtualRegister(Reg)) 2652 return MRI.getRegClass(Reg); 2653 return RI.getPhysRegClass(Reg); 2654 } 2655 2656 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 2657 return RI.getRegClass(RCID); 2658 } 2659 2660 bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const { 2661 switch (MI.getOpcode()) { 2662 case AMDGPU::COPY: 2663 case AMDGPU::REG_SEQUENCE: 2664 case AMDGPU::PHI: 2665 case AMDGPU::INSERT_SUBREG: 2666 return RI.hasVGPRs(getOpRegClass(MI, 0)); 2667 default: 2668 return RI.hasVGPRs(getOpRegClass(MI, OpNo)); 2669 } 2670 } 2671 2672 void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const { 2673 MachineBasicBlock::iterator I = MI; 2674 MachineBasicBlock *MBB = MI.getParent(); 2675 MachineOperand &MO = MI.getOperand(OpIdx); 2676 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 2677 unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass; 2678 const TargetRegisterClass *RC = RI.getRegClass(RCID); 2679 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 2680 if (MO.isReg()) 2681 Opcode = AMDGPU::COPY; 2682 else if (RI.isSGPRClass(RC)) 2683 Opcode = AMDGPU::S_MOV_B32; 2684 2685 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); 2686 if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC)) 2687 VRC = &AMDGPU::VReg_64RegClass; 2688 else 2689 VRC = &AMDGPU::VGPR_32RegClass; 2690 2691 unsigned Reg = MRI.createVirtualRegister(VRC); 2692 DebugLoc DL = MBB->findDebugLoc(I); 2693 BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO); 2694 MO.ChangeToRegister(Reg, false); 2695 } 2696 2697 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI, 2698 MachineRegisterInfo &MRI, 2699 MachineOperand &SuperReg, 2700 const TargetRegisterClass *SuperRC, 2701 unsigned SubIdx, 2702 const TargetRegisterClass *SubRC) 2703 const { 2704 MachineBasicBlock *MBB = MI->getParent(); 2705 DebugLoc DL = MI->getDebugLoc(); 2706 unsigned SubReg = MRI.createVirtualRegister(SubRC); 2707 2708 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) { 2709 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 2710 .addReg(SuperReg.getReg(), 0, SubIdx); 2711 return SubReg; 2712 } 2713 2714 // Just in case the super register is itself a sub-register, copy it to a new 2715 // value so we don't need to worry about merging its subreg index with the 2716 // SubIdx passed to this function. The register coalescer should be able to 2717 // eliminate this extra copy. 2718 unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC); 2719 2720 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg) 2721 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg()); 2722 2723 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 2724 .addReg(NewSuperReg, 0, SubIdx); 2725 2726 return SubReg; 2727 } 2728 2729 MachineOperand SIInstrInfo::buildExtractSubRegOrImm( 2730 MachineBasicBlock::iterator MII, 2731 MachineRegisterInfo &MRI, 2732 MachineOperand &Op, 2733 const TargetRegisterClass *SuperRC, 2734 unsigned SubIdx, 2735 const TargetRegisterClass *SubRC) const { 2736 if (Op.isImm()) { 2737 if (SubIdx == AMDGPU::sub0) 2738 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm())); 2739 if (SubIdx == AMDGPU::sub1) 2740 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32)); 2741 2742 llvm_unreachable("Unhandled register index for immediate"); 2743 } 2744 2745 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC, 2746 SubIdx, SubRC); 2747 return MachineOperand::CreateReg(SubReg, false); 2748 } 2749 2750 // Change the order of operands from (0, 1, 2) to (0, 2, 1) 2751 void SIInstrInfo::swapOperands(MachineInstr &Inst) const { 2752 assert(Inst.getNumExplicitOperands() == 3); 2753 MachineOperand Op1 = Inst.getOperand(1); 2754 Inst.RemoveOperand(1); 2755 Inst.addOperand(Op1); 2756 } 2757 2758 bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI, 2759 const MCOperandInfo &OpInfo, 2760 const MachineOperand &MO) const { 2761 if (!MO.isReg()) 2762 return false; 2763 2764 unsigned Reg = MO.getReg(); 2765 const TargetRegisterClass *RC = 2766 TargetRegisterInfo::isVirtualRegister(Reg) ? 2767 MRI.getRegClass(Reg) : 2768 RI.getPhysRegClass(Reg); 2769 2770 const SIRegisterInfo *TRI = 2771 static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo()); 2772 RC = TRI->getSubRegClass(RC, MO.getSubReg()); 2773 2774 // In order to be legal, the common sub-class must be equal to the 2775 // class of the current operand. For example: 2776 // 2777 // v_mov_b32 s0 ; Operand defined as vsrc_b32 2778 // ; RI.getCommonSubClass(s0,vsrc_b32) = sgpr ; LEGAL 2779 // 2780 // s_sendmsg 0, s0 ; Operand defined as m0reg 2781 // ; RI.getCommonSubClass(s0,m0reg) = m0reg ; NOT LEGAL 2782 2783 return RI.getCommonSubClass(RC, RI.getRegClass(OpInfo.RegClass)) == RC; 2784 } 2785 2786 bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI, 2787 const MCOperandInfo &OpInfo, 2788 const MachineOperand &MO) const { 2789 if (MO.isReg()) 2790 return isLegalRegOperand(MRI, OpInfo, MO); 2791 2792 // Handle non-register types that are treated like immediates. 2793 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI()); 2794 return true; 2795 } 2796 2797 bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx, 2798 const MachineOperand *MO) const { 2799 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 2800 const MCInstrDesc &InstDesc = MI.getDesc(); 2801 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; 2802 const TargetRegisterClass *DefinedRC = 2803 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; 2804 if (!MO) 2805 MO = &MI.getOperand(OpIdx); 2806 2807 if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) { 2808 2809 RegSubRegPair SGPRUsed; 2810 if (MO->isReg()) 2811 SGPRUsed = RegSubRegPair(MO->getReg(), MO->getSubReg()); 2812 2813 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 2814 if (i == OpIdx) 2815 continue; 2816 const MachineOperand &Op = MI.getOperand(i); 2817 if (Op.isReg()) { 2818 if ((Op.getReg() != SGPRUsed.Reg || Op.getSubReg() != SGPRUsed.SubReg) && 2819 usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) { 2820 return false; 2821 } 2822 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) { 2823 return false; 2824 } 2825 } 2826 } 2827 2828 if (MO->isReg()) { 2829 assert(DefinedRC); 2830 return isLegalRegOperand(MRI, OpInfo, *MO); 2831 } 2832 2833 // Handle non-register types that are treated like immediates. 2834 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI()); 2835 2836 if (!DefinedRC) { 2837 // This operand expects an immediate. 2838 return true; 2839 } 2840 2841 return isImmOperandLegal(MI, OpIdx, *MO); 2842 } 2843 2844 void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI, 2845 MachineInstr &MI) const { 2846 unsigned Opc = MI.getOpcode(); 2847 const MCInstrDesc &InstrDesc = get(Opc); 2848 2849 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 2850 MachineOperand &Src1 = MI.getOperand(Src1Idx); 2851 2852 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32 2853 // we need to only have one constant bus use. 2854 // 2855 // Note we do not need to worry about literal constants here. They are 2856 // disabled for the operand type for instructions because they will always 2857 // violate the one constant bus use rule. 2858 bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister; 2859 if (HasImplicitSGPR) { 2860 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 2861 MachineOperand &Src0 = MI.getOperand(Src0Idx); 2862 2863 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) 2864 legalizeOpWithMove(MI, Src0Idx); 2865 } 2866 2867 // VOP2 src0 instructions support all operand types, so we don't need to check 2868 // their legality. If src1 is already legal, we don't need to do anything. 2869 if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1)) 2870 return; 2871 2872 // Special case: V_READLANE_B32 accepts only immediate or SGPR operands for 2873 // lane select. Fix up using V_READFIRSTLANE, since we assume that the lane 2874 // select is uniform. 2875 if (Opc == AMDGPU::V_READLANE_B32 && Src1.isReg() && 2876 RI.isVGPR(MRI, Src1.getReg())) { 2877 unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 2878 const DebugLoc &DL = MI.getDebugLoc(); 2879 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 2880 .add(Src1); 2881 Src1.ChangeToRegister(Reg, false); 2882 return; 2883 } 2884 2885 // We do not use commuteInstruction here because it is too aggressive and will 2886 // commute if it is possible. We only want to commute here if it improves 2887 // legality. This can be called a fairly large number of times so don't waste 2888 // compile time pointlessly swapping and checking legality again. 2889 if (HasImplicitSGPR || !MI.isCommutable()) { 2890 legalizeOpWithMove(MI, Src1Idx); 2891 return; 2892 } 2893 2894 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 2895 MachineOperand &Src0 = MI.getOperand(Src0Idx); 2896 2897 // If src0 can be used as src1, commuting will make the operands legal. 2898 // Otherwise we have to give up and insert a move. 2899 // 2900 // TODO: Other immediate-like operand kinds could be commuted if there was a 2901 // MachineOperand::ChangeTo* for them. 2902 if ((!Src1.isImm() && !Src1.isReg()) || 2903 !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) { 2904 legalizeOpWithMove(MI, Src1Idx); 2905 return; 2906 } 2907 2908 int CommutedOpc = commuteOpcode(MI); 2909 if (CommutedOpc == -1) { 2910 legalizeOpWithMove(MI, Src1Idx); 2911 return; 2912 } 2913 2914 MI.setDesc(get(CommutedOpc)); 2915 2916 unsigned Src0Reg = Src0.getReg(); 2917 unsigned Src0SubReg = Src0.getSubReg(); 2918 bool Src0Kill = Src0.isKill(); 2919 2920 if (Src1.isImm()) 2921 Src0.ChangeToImmediate(Src1.getImm()); 2922 else if (Src1.isReg()) { 2923 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill()); 2924 Src0.setSubReg(Src1.getSubReg()); 2925 } else 2926 llvm_unreachable("Should only have register or immediate operands"); 2927 2928 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill); 2929 Src1.setSubReg(Src0SubReg); 2930 } 2931 2932 // Legalize VOP3 operands. Because all operand types are supported for any 2933 // operand, and since literal constants are not allowed and should never be 2934 // seen, we only need to worry about inserting copies if we use multiple SGPR 2935 // operands. 2936 void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI, 2937 MachineInstr &MI) const { 2938 unsigned Opc = MI.getOpcode(); 2939 2940 int VOP3Idx[3] = { 2941 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), 2942 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1), 2943 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2) 2944 }; 2945 2946 // Find the one SGPR operand we are allowed to use. 2947 unsigned SGPRReg = findUsedSGPR(MI, VOP3Idx); 2948 2949 for (unsigned i = 0; i < 3; ++i) { 2950 int Idx = VOP3Idx[i]; 2951 if (Idx == -1) 2952 break; 2953 MachineOperand &MO = MI.getOperand(Idx); 2954 2955 // We should never see a VOP3 instruction with an illegal immediate operand. 2956 if (!MO.isReg()) 2957 continue; 2958 2959 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg()))) 2960 continue; // VGPRs are legal 2961 2962 if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) { 2963 SGPRReg = MO.getReg(); 2964 // We can use one SGPR in each VOP3 instruction. 2965 continue; 2966 } 2967 2968 // If we make it this far, then the operand is not legal and we must 2969 // legalize it. 2970 legalizeOpWithMove(MI, Idx); 2971 } 2972 } 2973 2974 unsigned SIInstrInfo::readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI, 2975 MachineRegisterInfo &MRI) const { 2976 const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg); 2977 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC); 2978 unsigned DstReg = MRI.createVirtualRegister(SRC); 2979 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32; 2980 2981 SmallVector<unsigned, 8> SRegs; 2982 for (unsigned i = 0; i < SubRegs; ++i) { 2983 unsigned SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 2984 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 2985 get(AMDGPU::V_READFIRSTLANE_B32), SGPR) 2986 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i)); 2987 SRegs.push_back(SGPR); 2988 } 2989 2990 MachineInstrBuilder MIB = 2991 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 2992 get(AMDGPU::REG_SEQUENCE), DstReg); 2993 for (unsigned i = 0; i < SubRegs; ++i) { 2994 MIB.addReg(SRegs[i]); 2995 MIB.addImm(RI.getSubRegFromChannel(i)); 2996 } 2997 return DstReg; 2998 } 2999 3000 void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI, 3001 MachineInstr &MI) const { 3002 3003 // If the pointer is store in VGPRs, then we need to move them to 3004 // SGPRs using v_readfirstlane. This is safe because we only select 3005 // loads with uniform pointers to SMRD instruction so we know the 3006 // pointer value is uniform. 3007 MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase); 3008 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) { 3009 unsigned SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI); 3010 SBase->setReg(SGPR); 3011 } 3012 } 3013 3014 void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB, 3015 MachineBasicBlock::iterator I, 3016 const TargetRegisterClass *DstRC, 3017 MachineOperand &Op, 3018 MachineRegisterInfo &MRI, 3019 const DebugLoc &DL) const { 3020 3021 unsigned OpReg = Op.getReg(); 3022 unsigned OpSubReg = Op.getSubReg(); 3023 3024 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg( 3025 RI.getRegClassForReg(MRI, OpReg), OpSubReg); 3026 3027 // Check if operand is already the correct register class. 3028 if (DstRC == OpRC) 3029 return; 3030 3031 unsigned DstReg = MRI.createVirtualRegister(DstRC); 3032 MachineInstr *Copy = 3033 BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op); 3034 3035 Op.setReg(DstReg); 3036 Op.setSubReg(0); 3037 3038 MachineInstr *Def = MRI.getVRegDef(OpReg); 3039 if (!Def) 3040 return; 3041 3042 // Try to eliminate the copy if it is copying an immediate value. 3043 if (Def->isMoveImmediate()) 3044 FoldImmediate(*Copy, *Def, OpReg, &MRI); 3045 } 3046 3047 void SIInstrInfo::legalizeOperands(MachineInstr &MI) const { 3048 MachineFunction &MF = *MI.getParent()->getParent(); 3049 MachineRegisterInfo &MRI = MF.getRegInfo(); 3050 3051 // Legalize VOP2 3052 if (isVOP2(MI) || isVOPC(MI)) { 3053 legalizeOperandsVOP2(MRI, MI); 3054 return; 3055 } 3056 3057 // Legalize VOP3 3058 if (isVOP3(MI)) { 3059 legalizeOperandsVOP3(MRI, MI); 3060 return; 3061 } 3062 3063 // Legalize SMRD 3064 if (isSMRD(MI)) { 3065 legalizeOperandsSMRD(MRI, MI); 3066 return; 3067 } 3068 3069 // Legalize REG_SEQUENCE and PHI 3070 // The register class of the operands much be the same type as the register 3071 // class of the output. 3072 if (MI.getOpcode() == AMDGPU::PHI) { 3073 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr; 3074 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) { 3075 if (!MI.getOperand(i).isReg() || 3076 !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg())) 3077 continue; 3078 const TargetRegisterClass *OpRC = 3079 MRI.getRegClass(MI.getOperand(i).getReg()); 3080 if (RI.hasVGPRs(OpRC)) { 3081 VRC = OpRC; 3082 } else { 3083 SRC = OpRC; 3084 } 3085 } 3086 3087 // If any of the operands are VGPR registers, then they all most be 3088 // otherwise we will create illegal VGPR->SGPR copies when legalizing 3089 // them. 3090 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) { 3091 if (!VRC) { 3092 assert(SRC); 3093 VRC = RI.getEquivalentVGPRClass(SRC); 3094 } 3095 RC = VRC; 3096 } else { 3097 RC = SRC; 3098 } 3099 3100 // Update all the operands so they have the same type. 3101 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 3102 MachineOperand &Op = MI.getOperand(I); 3103 if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg())) 3104 continue; 3105 3106 // MI is a PHI instruction. 3107 MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB(); 3108 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator(); 3109 3110 // Avoid creating no-op copies with the same src and dst reg class. These 3111 // confuse some of the machine passes. 3112 legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc()); 3113 } 3114 } 3115 3116 // REG_SEQUENCE doesn't really require operand legalization, but if one has a 3117 // VGPR dest type and SGPR sources, insert copies so all operands are 3118 // VGPRs. This seems to help operand folding / the register coalescer. 3119 if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) { 3120 MachineBasicBlock *MBB = MI.getParent(); 3121 const TargetRegisterClass *DstRC = getOpRegClass(MI, 0); 3122 if (RI.hasVGPRs(DstRC)) { 3123 // Update all the operands so they are VGPR register classes. These may 3124 // not be the same register class because REG_SEQUENCE supports mixing 3125 // subregister index types e.g. sub0_sub1 + sub2 + sub3 3126 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 3127 MachineOperand &Op = MI.getOperand(I); 3128 if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg())) 3129 continue; 3130 3131 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg()); 3132 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); 3133 if (VRC == OpRC) 3134 continue; 3135 3136 legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc()); 3137 Op.setIsKill(); 3138 } 3139 } 3140 3141 return; 3142 } 3143 3144 // Legalize INSERT_SUBREG 3145 // src0 must have the same register class as dst 3146 if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) { 3147 unsigned Dst = MI.getOperand(0).getReg(); 3148 unsigned Src0 = MI.getOperand(1).getReg(); 3149 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst); 3150 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0); 3151 if (DstRC != Src0RC) { 3152 MachineBasicBlock *MBB = MI.getParent(); 3153 MachineOperand &Op = MI.getOperand(1); 3154 legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc()); 3155 } 3156 return; 3157 } 3158 3159 // Legalize MIMG and MUBUF/MTBUF for shaders. 3160 // 3161 // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via 3162 // scratch memory access. In both cases, the legalization never involves 3163 // conversion to the addr64 form. 3164 if (isMIMG(MI) || 3165 (AMDGPU::isShader(MF.getFunction()->getCallingConv()) && 3166 (isMUBUF(MI) || isMTBUF(MI)))) { 3167 MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc); 3168 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) { 3169 unsigned SGPR = readlaneVGPRToSGPR(SRsrc->getReg(), MI, MRI); 3170 SRsrc->setReg(SGPR); 3171 } 3172 3173 MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp); 3174 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) { 3175 unsigned SGPR = readlaneVGPRToSGPR(SSamp->getReg(), MI, MRI); 3176 SSamp->setReg(SGPR); 3177 } 3178 return; 3179 } 3180 3181 // Legalize MUBUF* instructions by converting to addr64 form. 3182 // FIXME: If we start using the non-addr64 instructions for compute, we 3183 // may need to legalize them as above. This especially applies to the 3184 // buffer_load_format_* variants and variants with idxen (or bothen). 3185 int SRsrcIdx = 3186 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc); 3187 if (SRsrcIdx != -1) { 3188 // We have an MUBUF instruction 3189 MachineOperand *SRsrc = &MI.getOperand(SRsrcIdx); 3190 unsigned SRsrcRC = get(MI.getOpcode()).OpInfo[SRsrcIdx].RegClass; 3191 if (RI.getCommonSubClass(MRI.getRegClass(SRsrc->getReg()), 3192 RI.getRegClass(SRsrcRC))) { 3193 // The operands are legal. 3194 // FIXME: We may need to legalize operands besided srsrc. 3195 return; 3196 } 3197 3198 MachineBasicBlock &MBB = *MI.getParent(); 3199 3200 // Extract the ptr from the resource descriptor. 3201 unsigned SRsrcPtr = buildExtractSubReg(MI, MRI, *SRsrc, 3202 &AMDGPU::VReg_128RegClass, AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass); 3203 3204 // Create an empty resource descriptor 3205 unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 3206 unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 3207 unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 3208 unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass); 3209 uint64_t RsrcDataFormat = getDefaultRsrcDataFormat(); 3210 3211 // Zero64 = 0 3212 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B64), Zero64) 3213 .addImm(0); 3214 3215 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0} 3216 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B32), SRsrcFormatLo) 3217 .addImm(RsrcDataFormat & 0xFFFFFFFF); 3218 3219 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32} 3220 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::S_MOV_B32), SRsrcFormatHi) 3221 .addImm(RsrcDataFormat >> 32); 3222 3223 // NewSRsrc = {Zero64, SRsrcFormat} 3224 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewSRsrc) 3225 .addReg(Zero64) 3226 .addImm(AMDGPU::sub0_sub1) 3227 .addReg(SRsrcFormatLo) 3228 .addImm(AMDGPU::sub2) 3229 .addReg(SRsrcFormatHi) 3230 .addImm(AMDGPU::sub3); 3231 3232 MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 3233 unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 3234 if (VAddr) { 3235 // This is already an ADDR64 instruction so we need to add the pointer 3236 // extracted from the resource descriptor to the current value of VAddr. 3237 unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3238 unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3239 3240 // NewVaddrLo = SRsrcPtr:sub0 + VAddr:sub0 3241 DebugLoc DL = MI.getDebugLoc(); 3242 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), NewVAddrLo) 3243 .addReg(SRsrcPtr, 0, AMDGPU::sub0) 3244 .addReg(VAddr->getReg(), 0, AMDGPU::sub0); 3245 3246 // NewVaddrHi = SRsrcPtr:sub1 + VAddr:sub1 3247 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e32), NewVAddrHi) 3248 .addReg(SRsrcPtr, 0, AMDGPU::sub1) 3249 .addReg(VAddr->getReg(), 0, AMDGPU::sub1); 3250 3251 // NewVaddr = {NewVaddrHi, NewVaddrLo} 3252 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) 3253 .addReg(NewVAddrLo) 3254 .addImm(AMDGPU::sub0) 3255 .addReg(NewVAddrHi) 3256 .addImm(AMDGPU::sub1); 3257 } else { 3258 // This instructions is the _OFFSET variant, so we need to convert it to 3259 // ADDR64. 3260 assert(MBB.getParent()->getSubtarget<SISubtarget>().getGeneration() 3261 < SISubtarget::VOLCANIC_ISLANDS && 3262 "FIXME: Need to emit flat atomics here"); 3263 3264 MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata); 3265 MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 3266 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset); 3267 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode()); 3268 3269 // Atomics rith return have have an additional tied operand and are 3270 // missing some of the special bits. 3271 MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in); 3272 MachineInstr *Addr64; 3273 3274 if (!VDataIn) { 3275 // Regular buffer load / store. 3276 MachineInstrBuilder MIB = 3277 BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 3278 .add(*VData) 3279 .addReg(AMDGPU::NoRegister) // Dummy value for vaddr. 3280 // This will be replaced later 3281 // with the new value of vaddr. 3282 .add(*SRsrc) 3283 .add(*SOffset) 3284 .add(*Offset); 3285 3286 // Atomics do not have this operand. 3287 if (const MachineOperand *GLC = 3288 getNamedOperand(MI, AMDGPU::OpName::glc)) { 3289 MIB.addImm(GLC->getImm()); 3290 } 3291 3292 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)); 3293 3294 if (const MachineOperand *TFE = 3295 getNamedOperand(MI, AMDGPU::OpName::tfe)) { 3296 MIB.addImm(TFE->getImm()); 3297 } 3298 3299 MIB.setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); 3300 Addr64 = MIB; 3301 } else { 3302 // Atomics with return. 3303 Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 3304 .add(*VData) 3305 .add(*VDataIn) 3306 .addReg(AMDGPU::NoRegister) // Dummy value for vaddr. 3307 // This will be replaced later 3308 // with the new value of vaddr. 3309 .add(*SRsrc) 3310 .add(*SOffset) 3311 .add(*Offset) 3312 .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)) 3313 .setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); 3314 } 3315 3316 MI.removeFromParent(); 3317 3318 // NewVaddr = {NewVaddrHi, NewVaddrLo} 3319 BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), 3320 NewVAddr) 3321 .addReg(SRsrcPtr, 0, AMDGPU::sub0) 3322 .addImm(AMDGPU::sub0) 3323 .addReg(SRsrcPtr, 0, AMDGPU::sub1) 3324 .addImm(AMDGPU::sub1); 3325 3326 VAddr = getNamedOperand(*Addr64, AMDGPU::OpName::vaddr); 3327 SRsrc = getNamedOperand(*Addr64, AMDGPU::OpName::srsrc); 3328 } 3329 3330 // Update the instruction to use NewVaddr 3331 VAddr->setReg(NewVAddr); 3332 // Update the instruction to use NewSRsrc 3333 SRsrc->setReg(NewSRsrc); 3334 } 3335 } 3336 3337 void SIInstrInfo::moveToVALU(MachineInstr &TopInst) const { 3338 SmallVector<MachineInstr *, 128> Worklist; 3339 Worklist.push_back(&TopInst); 3340 3341 while (!Worklist.empty()) { 3342 MachineInstr &Inst = *Worklist.pop_back_val(); 3343 MachineBasicBlock *MBB = Inst.getParent(); 3344 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 3345 3346 unsigned Opcode = Inst.getOpcode(); 3347 unsigned NewOpcode = getVALUOp(Inst); 3348 3349 // Handle some special cases 3350 switch (Opcode) { 3351 default: 3352 break; 3353 case AMDGPU::S_AND_B64: 3354 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_AND_B32_e64); 3355 Inst.eraseFromParent(); 3356 continue; 3357 3358 case AMDGPU::S_OR_B64: 3359 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_OR_B32_e64); 3360 Inst.eraseFromParent(); 3361 continue; 3362 3363 case AMDGPU::S_XOR_B64: 3364 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_XOR_B32_e64); 3365 Inst.eraseFromParent(); 3366 continue; 3367 3368 case AMDGPU::S_NOT_B64: 3369 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::V_NOT_B32_e32); 3370 Inst.eraseFromParent(); 3371 continue; 3372 3373 case AMDGPU::S_BCNT1_I32_B64: 3374 splitScalar64BitBCNT(Worklist, Inst); 3375 Inst.eraseFromParent(); 3376 continue; 3377 3378 case AMDGPU::S_BFE_I64: { 3379 splitScalar64BitBFE(Worklist, Inst); 3380 Inst.eraseFromParent(); 3381 continue; 3382 } 3383 3384 case AMDGPU::S_LSHL_B32: 3385 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { 3386 NewOpcode = AMDGPU::V_LSHLREV_B32_e64; 3387 swapOperands(Inst); 3388 } 3389 break; 3390 case AMDGPU::S_ASHR_I32: 3391 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { 3392 NewOpcode = AMDGPU::V_ASHRREV_I32_e64; 3393 swapOperands(Inst); 3394 } 3395 break; 3396 case AMDGPU::S_LSHR_B32: 3397 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { 3398 NewOpcode = AMDGPU::V_LSHRREV_B32_e64; 3399 swapOperands(Inst); 3400 } 3401 break; 3402 case AMDGPU::S_LSHL_B64: 3403 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { 3404 NewOpcode = AMDGPU::V_LSHLREV_B64; 3405 swapOperands(Inst); 3406 } 3407 break; 3408 case AMDGPU::S_ASHR_I64: 3409 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { 3410 NewOpcode = AMDGPU::V_ASHRREV_I64; 3411 swapOperands(Inst); 3412 } 3413 break; 3414 case AMDGPU::S_LSHR_B64: 3415 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) { 3416 NewOpcode = AMDGPU::V_LSHRREV_B64; 3417 swapOperands(Inst); 3418 } 3419 break; 3420 3421 case AMDGPU::S_ABS_I32: 3422 lowerScalarAbs(Worklist, Inst); 3423 Inst.eraseFromParent(); 3424 continue; 3425 3426 case AMDGPU::S_CBRANCH_SCC0: 3427 case AMDGPU::S_CBRANCH_SCC1: 3428 // Clear unused bits of vcc 3429 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64), 3430 AMDGPU::VCC) 3431 .addReg(AMDGPU::EXEC) 3432 .addReg(AMDGPU::VCC); 3433 break; 3434 3435 case AMDGPU::S_BFE_U64: 3436 case AMDGPU::S_BFM_B64: 3437 llvm_unreachable("Moving this op to VALU not implemented"); 3438 3439 case AMDGPU::S_PACK_LL_B32_B16: 3440 case AMDGPU::S_PACK_LH_B32_B16: 3441 case AMDGPU::S_PACK_HH_B32_B16: { 3442 movePackToVALU(Worklist, MRI, Inst); 3443 Inst.eraseFromParent(); 3444 continue; 3445 } 3446 } 3447 3448 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) { 3449 // We cannot move this instruction to the VALU, so we should try to 3450 // legalize its operands instead. 3451 legalizeOperands(Inst); 3452 continue; 3453 } 3454 3455 // Use the new VALU Opcode. 3456 const MCInstrDesc &NewDesc = get(NewOpcode); 3457 Inst.setDesc(NewDesc); 3458 3459 // Remove any references to SCC. Vector instructions can't read from it, and 3460 // We're just about to add the implicit use / defs of VCC, and we don't want 3461 // both. 3462 for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) { 3463 MachineOperand &Op = Inst.getOperand(i); 3464 if (Op.isReg() && Op.getReg() == AMDGPU::SCC) { 3465 Inst.RemoveOperand(i); 3466 addSCCDefUsersToVALUWorklist(Inst, Worklist); 3467 } 3468 } 3469 3470 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) { 3471 // We are converting these to a BFE, so we need to add the missing 3472 // operands for the size and offset. 3473 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16; 3474 Inst.addOperand(MachineOperand::CreateImm(0)); 3475 Inst.addOperand(MachineOperand::CreateImm(Size)); 3476 3477 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) { 3478 // The VALU version adds the second operand to the result, so insert an 3479 // extra 0 operand. 3480 Inst.addOperand(MachineOperand::CreateImm(0)); 3481 } 3482 3483 Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent()); 3484 3485 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) { 3486 const MachineOperand &OffsetWidthOp = Inst.getOperand(2); 3487 // If we need to move this to VGPRs, we need to unpack the second operand 3488 // back into the 2 separate ones for bit offset and width. 3489 assert(OffsetWidthOp.isImm() && 3490 "Scalar BFE is only implemented for constant width and offset"); 3491 uint32_t Imm = OffsetWidthOp.getImm(); 3492 3493 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 3494 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 3495 Inst.RemoveOperand(2); // Remove old immediate. 3496 Inst.addOperand(MachineOperand::CreateImm(Offset)); 3497 Inst.addOperand(MachineOperand::CreateImm(BitWidth)); 3498 } 3499 3500 bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef(); 3501 unsigned NewDstReg = AMDGPU::NoRegister; 3502 if (HasDst) { 3503 unsigned DstReg = Inst.getOperand(0).getReg(); 3504 if (TargetRegisterInfo::isPhysicalRegister(DstReg)) 3505 continue; 3506 3507 // Update the destination register class. 3508 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst); 3509 if (!NewDstRC) 3510 continue; 3511 3512 if (Inst.isCopy() && 3513 TargetRegisterInfo::isVirtualRegister(Inst.getOperand(1).getReg()) && 3514 NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) { 3515 // Instead of creating a copy where src and dst are the same register 3516 // class, we just replace all uses of dst with src. These kinds of 3517 // copies interfere with the heuristics MachineSink uses to decide 3518 // whether or not to split a critical edge. Since the pass assumes 3519 // that copies will end up as machine instructions and not be 3520 // eliminated. 3521 addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist); 3522 MRI.replaceRegWith(DstReg, Inst.getOperand(1).getReg()); 3523 MRI.clearKillFlags(Inst.getOperand(1).getReg()); 3524 Inst.getOperand(0).setReg(DstReg); 3525 continue; 3526 } 3527 3528 NewDstReg = MRI.createVirtualRegister(NewDstRC); 3529 MRI.replaceRegWith(DstReg, NewDstReg); 3530 } 3531 3532 // Legalize the operands 3533 legalizeOperands(Inst); 3534 3535 if (HasDst) 3536 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); 3537 } 3538 } 3539 3540 void SIInstrInfo::lowerScalarAbs(SmallVectorImpl<MachineInstr *> &Worklist, 3541 MachineInstr &Inst) const { 3542 MachineBasicBlock &MBB = *Inst.getParent(); 3543 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 3544 MachineBasicBlock::iterator MII = Inst; 3545 DebugLoc DL = Inst.getDebugLoc(); 3546 3547 MachineOperand &Dest = Inst.getOperand(0); 3548 MachineOperand &Src = Inst.getOperand(1); 3549 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3550 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3551 3552 BuildMI(MBB, MII, DL, get(AMDGPU::V_SUB_I32_e32), TmpReg) 3553 .addImm(0) 3554 .addReg(Src.getReg()); 3555 3556 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg) 3557 .addReg(Src.getReg()) 3558 .addReg(TmpReg); 3559 3560 MRI.replaceRegWith(Dest.getReg(), ResultReg); 3561 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 3562 } 3563 3564 void SIInstrInfo::splitScalar64BitUnaryOp( 3565 SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst, 3566 unsigned Opcode) const { 3567 MachineBasicBlock &MBB = *Inst.getParent(); 3568 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 3569 3570 MachineOperand &Dest = Inst.getOperand(0); 3571 MachineOperand &Src0 = Inst.getOperand(1); 3572 DebugLoc DL = Inst.getDebugLoc(); 3573 3574 MachineBasicBlock::iterator MII = Inst; 3575 3576 const MCInstrDesc &InstDesc = get(Opcode); 3577 const TargetRegisterClass *Src0RC = Src0.isReg() ? 3578 MRI.getRegClass(Src0.getReg()) : 3579 &AMDGPU::SGPR_32RegClass; 3580 3581 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 3582 3583 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 3584 AMDGPU::sub0, Src0SubRC); 3585 3586 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 3587 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 3588 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 3589 3590 unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 3591 BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0); 3592 3593 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 3594 AMDGPU::sub1, Src0SubRC); 3595 3596 unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 3597 BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1); 3598 3599 unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); 3600 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 3601 .addReg(DestSub0) 3602 .addImm(AMDGPU::sub0) 3603 .addReg(DestSub1) 3604 .addImm(AMDGPU::sub1); 3605 3606 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 3607 3608 // We don't need to legalizeOperands here because for a single operand, src0 3609 // will support any kind of input. 3610 3611 // Move all users of this moved value. 3612 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 3613 } 3614 3615 void SIInstrInfo::splitScalar64BitBinaryOp( 3616 SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst, 3617 unsigned Opcode) const { 3618 MachineBasicBlock &MBB = *Inst.getParent(); 3619 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 3620 3621 MachineOperand &Dest = Inst.getOperand(0); 3622 MachineOperand &Src0 = Inst.getOperand(1); 3623 MachineOperand &Src1 = Inst.getOperand(2); 3624 DebugLoc DL = Inst.getDebugLoc(); 3625 3626 MachineBasicBlock::iterator MII = Inst; 3627 3628 const MCInstrDesc &InstDesc = get(Opcode); 3629 const TargetRegisterClass *Src0RC = Src0.isReg() ? 3630 MRI.getRegClass(Src0.getReg()) : 3631 &AMDGPU::SGPR_32RegClass; 3632 3633 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 3634 const TargetRegisterClass *Src1RC = Src1.isReg() ? 3635 MRI.getRegClass(Src1.getReg()) : 3636 &AMDGPU::SGPR_32RegClass; 3637 3638 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 3639 3640 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 3641 AMDGPU::sub0, Src0SubRC); 3642 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 3643 AMDGPU::sub0, Src1SubRC); 3644 3645 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 3646 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 3647 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 3648 3649 unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 3650 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0) 3651 .add(SrcReg0Sub0) 3652 .add(SrcReg1Sub0); 3653 3654 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 3655 AMDGPU::sub1, Src0SubRC); 3656 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 3657 AMDGPU::sub1, Src1SubRC); 3658 3659 unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 3660 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1) 3661 .add(SrcReg0Sub1) 3662 .add(SrcReg1Sub1); 3663 3664 unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); 3665 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 3666 .addReg(DestSub0) 3667 .addImm(AMDGPU::sub0) 3668 .addReg(DestSub1) 3669 .addImm(AMDGPU::sub1); 3670 3671 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 3672 3673 // Try to legalize the operands in case we need to swap the order to keep it 3674 // valid. 3675 legalizeOperands(LoHalf); 3676 legalizeOperands(HiHalf); 3677 3678 // Move all users of this moved vlaue. 3679 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 3680 } 3681 3682 void SIInstrInfo::splitScalar64BitBCNT( 3683 SmallVectorImpl<MachineInstr *> &Worklist, MachineInstr &Inst) const { 3684 MachineBasicBlock &MBB = *Inst.getParent(); 3685 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 3686 3687 MachineBasicBlock::iterator MII = Inst; 3688 DebugLoc DL = Inst.getDebugLoc(); 3689 3690 MachineOperand &Dest = Inst.getOperand(0); 3691 MachineOperand &Src = Inst.getOperand(1); 3692 3693 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64); 3694 const TargetRegisterClass *SrcRC = Src.isReg() ? 3695 MRI.getRegClass(Src.getReg()) : 3696 &AMDGPU::SGPR_32RegClass; 3697 3698 unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3699 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3700 3701 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); 3702 3703 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 3704 AMDGPU::sub0, SrcSubRC); 3705 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 3706 AMDGPU::sub1, SrcSubRC); 3707 3708 BuildMI(MBB, MII, DL, InstDesc, MidReg).add(SrcRegSub0).addImm(0); 3709 3710 BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg); 3711 3712 MRI.replaceRegWith(Dest.getReg(), ResultReg); 3713 3714 // We don't need to legalize operands here. src0 for etiher instruction can be 3715 // an SGPR, and the second input is unused or determined here. 3716 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 3717 } 3718 3719 void SIInstrInfo::splitScalar64BitBFE(SmallVectorImpl<MachineInstr *> &Worklist, 3720 MachineInstr &Inst) const { 3721 MachineBasicBlock &MBB = *Inst.getParent(); 3722 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 3723 MachineBasicBlock::iterator MII = Inst; 3724 DebugLoc DL = Inst.getDebugLoc(); 3725 3726 MachineOperand &Dest = Inst.getOperand(0); 3727 uint32_t Imm = Inst.getOperand(2).getImm(); 3728 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 3729 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 3730 3731 (void) Offset; 3732 3733 // Only sext_inreg cases handled. 3734 assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 && 3735 Offset == 0 && "Not implemented"); 3736 3737 if (BitWidth < 32) { 3738 unsigned MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3739 unsigned MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3740 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 3741 3742 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo) 3743 .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0) 3744 .addImm(0) 3745 .addImm(BitWidth); 3746 3747 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi) 3748 .addImm(31) 3749 .addReg(MidRegLo); 3750 3751 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 3752 .addReg(MidRegLo) 3753 .addImm(AMDGPU::sub0) 3754 .addReg(MidRegHi) 3755 .addImm(AMDGPU::sub1); 3756 3757 MRI.replaceRegWith(Dest.getReg(), ResultReg); 3758 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 3759 return; 3760 } 3761 3762 MachineOperand &Src = Inst.getOperand(1); 3763 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3764 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 3765 3766 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg) 3767 .addImm(31) 3768 .addReg(Src.getReg(), 0, AMDGPU::sub0); 3769 3770 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 3771 .addReg(Src.getReg(), 0, AMDGPU::sub0) 3772 .addImm(AMDGPU::sub0) 3773 .addReg(TmpReg) 3774 .addImm(AMDGPU::sub1); 3775 3776 MRI.replaceRegWith(Dest.getReg(), ResultReg); 3777 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 3778 } 3779 3780 void SIInstrInfo::addUsersToMoveToVALUWorklist( 3781 unsigned DstReg, 3782 MachineRegisterInfo &MRI, 3783 SmallVectorImpl<MachineInstr *> &Worklist) const { 3784 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg), 3785 E = MRI.use_end(); I != E;) { 3786 MachineInstr &UseMI = *I->getParent(); 3787 if (!canReadVGPR(UseMI, I.getOperandNo())) { 3788 Worklist.push_back(&UseMI); 3789 3790 do { 3791 ++I; 3792 } while (I != E && I->getParent() == &UseMI); 3793 } else { 3794 ++I; 3795 } 3796 } 3797 } 3798 3799 void SIInstrInfo::movePackToVALU(SmallVectorImpl<MachineInstr *> &Worklist, 3800 MachineRegisterInfo &MRI, 3801 MachineInstr &Inst) const { 3802 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3803 MachineBasicBlock *MBB = Inst.getParent(); 3804 MachineOperand &Src0 = Inst.getOperand(1); 3805 MachineOperand &Src1 = Inst.getOperand(2); 3806 const DebugLoc &DL = Inst.getDebugLoc(); 3807 3808 switch (Inst.getOpcode()) { 3809 case AMDGPU::S_PACK_LL_B32_B16: { 3810 unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3811 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3812 3813 // FIXME: Can do a lot better if we know the high bits of src0 or src1 are 3814 // 0. 3815 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 3816 .addImm(0xffff); 3817 3818 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_B32_e64), TmpReg) 3819 .addReg(ImmReg, RegState::Kill) 3820 .add(Src0); 3821 3822 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHL_OR_B32), ResultReg) 3823 .add(Src1) 3824 .addImm(16) 3825 .addReg(TmpReg, RegState::Kill); 3826 break; 3827 } 3828 case AMDGPU::S_PACK_LH_B32_B16: { 3829 unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3830 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 3831 .addImm(0xffff); 3832 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_BFI_B32), ResultReg) 3833 .addReg(ImmReg, RegState::Kill) 3834 .add(Src0) 3835 .add(Src1); 3836 break; 3837 } 3838 case AMDGPU::S_PACK_HH_B32_B16: { 3839 unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3840 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3841 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg) 3842 .addImm(16) 3843 .add(Src0); 3844 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 3845 .addImm(0xffff0000); 3846 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_OR_B32), ResultReg) 3847 .add(Src1) 3848 .addReg(ImmReg, RegState::Kill) 3849 .addReg(TmpReg, RegState::Kill); 3850 break; 3851 } 3852 default: 3853 llvm_unreachable("unhandled s_pack_* instruction"); 3854 } 3855 3856 MachineOperand &Dest = Inst.getOperand(0); 3857 MRI.replaceRegWith(Dest.getReg(), ResultReg); 3858 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 3859 } 3860 3861 void SIInstrInfo::addSCCDefUsersToVALUWorklist( 3862 MachineInstr &SCCDefInst, SmallVectorImpl<MachineInstr *> &Worklist) const { 3863 // This assumes that all the users of SCC are in the same block 3864 // as the SCC def. 3865 for (MachineInstr &MI : 3866 llvm::make_range(MachineBasicBlock::iterator(SCCDefInst), 3867 SCCDefInst.getParent()->end())) { 3868 // Exit if we find another SCC def. 3869 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC) != -1) 3870 return; 3871 3872 if (MI.findRegisterUseOperandIdx(AMDGPU::SCC) != -1) 3873 Worklist.push_back(&MI); 3874 } 3875 } 3876 3877 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass( 3878 const MachineInstr &Inst) const { 3879 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0); 3880 3881 switch (Inst.getOpcode()) { 3882 // For target instructions, getOpRegClass just returns the virtual register 3883 // class associated with the operand, so we need to find an equivalent VGPR 3884 // register class in order to move the instruction to the VALU. 3885 case AMDGPU::COPY: 3886 case AMDGPU::PHI: 3887 case AMDGPU::REG_SEQUENCE: 3888 case AMDGPU::INSERT_SUBREG: 3889 if (RI.hasVGPRs(NewDstRC)) 3890 return nullptr; 3891 3892 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 3893 if (!NewDstRC) 3894 return nullptr; 3895 return NewDstRC; 3896 default: 3897 return NewDstRC; 3898 } 3899 } 3900 3901 // Find the one SGPR operand we are allowed to use. 3902 unsigned SIInstrInfo::findUsedSGPR(const MachineInstr &MI, 3903 int OpIndices[3]) const { 3904 const MCInstrDesc &Desc = MI.getDesc(); 3905 3906 // Find the one SGPR operand we are allowed to use. 3907 // 3908 // First we need to consider the instruction's operand requirements before 3909 // legalizing. Some operands are required to be SGPRs, such as implicit uses 3910 // of VCC, but we are still bound by the constant bus requirement to only use 3911 // one. 3912 // 3913 // If the operand's class is an SGPR, we can never move it. 3914 3915 unsigned SGPRReg = findImplicitSGPRRead(MI); 3916 if (SGPRReg != AMDGPU::NoRegister) 3917 return SGPRReg; 3918 3919 unsigned UsedSGPRs[3] = { AMDGPU::NoRegister }; 3920 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 3921 3922 for (unsigned i = 0; i < 3; ++i) { 3923 int Idx = OpIndices[i]; 3924 if (Idx == -1) 3925 break; 3926 3927 const MachineOperand &MO = MI.getOperand(Idx); 3928 if (!MO.isReg()) 3929 continue; 3930 3931 // Is this operand statically required to be an SGPR based on the operand 3932 // constraints? 3933 const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); 3934 bool IsRequiredSGPR = RI.isSGPRClass(OpRC); 3935 if (IsRequiredSGPR) 3936 return MO.getReg(); 3937 3938 // If this could be a VGPR or an SGPR, Check the dynamic register class. 3939 unsigned Reg = MO.getReg(); 3940 const TargetRegisterClass *RegRC = MRI.getRegClass(Reg); 3941 if (RI.isSGPRClass(RegRC)) 3942 UsedSGPRs[i] = Reg; 3943 } 3944 3945 // We don't have a required SGPR operand, so we have a bit more freedom in 3946 // selecting operands to move. 3947 3948 // Try to select the most used SGPR. If an SGPR is equal to one of the 3949 // others, we choose that. 3950 // 3951 // e.g. 3952 // V_FMA_F32 v0, s0, s0, s0 -> No moves 3953 // V_FMA_F32 v0, s0, s1, s0 -> Move s1 3954 3955 // TODO: If some of the operands are 64-bit SGPRs and some 32, we should 3956 // prefer those. 3957 3958 if (UsedSGPRs[0] != AMDGPU::NoRegister) { 3959 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2]) 3960 SGPRReg = UsedSGPRs[0]; 3961 } 3962 3963 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) { 3964 if (UsedSGPRs[1] == UsedSGPRs[2]) 3965 SGPRReg = UsedSGPRs[1]; 3966 } 3967 3968 return SGPRReg; 3969 } 3970 3971 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI, 3972 unsigned OperandName) const { 3973 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName); 3974 if (Idx == -1) 3975 return nullptr; 3976 3977 return &MI.getOperand(Idx); 3978 } 3979 3980 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const { 3981 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT; 3982 if (ST.isAmdHsaOS()) { 3983 // Set ATC = 1. GFX9 doesn't have this bit. 3984 if (ST.getGeneration() <= SISubtarget::VOLCANIC_ISLANDS) 3985 RsrcDataFormat |= (1ULL << 56); 3986 3987 // Set MTYPE = 2 (MTYPE_UC = uncached). GFX9 doesn't have this. 3988 // BTW, it disables TC L2 and therefore decreases performance. 3989 if (ST.getGeneration() == SISubtarget::VOLCANIC_ISLANDS) 3990 RsrcDataFormat |= (2ULL << 59); 3991 } 3992 3993 return RsrcDataFormat; 3994 } 3995 3996 uint64_t SIInstrInfo::getScratchRsrcWords23() const { 3997 uint64_t Rsrc23 = getDefaultRsrcDataFormat() | 3998 AMDGPU::RSRC_TID_ENABLE | 3999 0xffffffff; // Size; 4000 4001 // GFX9 doesn't have ELEMENT_SIZE. 4002 if (ST.getGeneration() <= SISubtarget::VOLCANIC_ISLANDS) { 4003 uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize()) - 1; 4004 Rsrc23 |= EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT; 4005 } 4006 4007 // IndexStride = 64. 4008 Rsrc23 |= UINT64_C(3) << AMDGPU::RSRC_INDEX_STRIDE_SHIFT; 4009 4010 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17]. 4011 // Clear them unless we want a huge stride. 4012 if (ST.getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) 4013 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT; 4014 4015 return Rsrc23; 4016 } 4017 4018 bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const { 4019 unsigned Opc = MI.getOpcode(); 4020 4021 return isSMRD(Opc); 4022 } 4023 4024 bool SIInstrInfo::isHighLatencyInstruction(const MachineInstr &MI) const { 4025 unsigned Opc = MI.getOpcode(); 4026 4027 return isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc); 4028 } 4029 4030 unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI, 4031 int &FrameIndex) const { 4032 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 4033 if (!Addr || !Addr->isFI()) 4034 return AMDGPU::NoRegister; 4035 4036 assert(!MI.memoperands_empty() && 4037 (*MI.memoperands_begin())->getAddrSpace() == AMDGPUASI.PRIVATE_ADDRESS); 4038 4039 FrameIndex = Addr->getIndex(); 4040 return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg(); 4041 } 4042 4043 unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI, 4044 int &FrameIndex) const { 4045 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr); 4046 assert(Addr && Addr->isFI()); 4047 FrameIndex = Addr->getIndex(); 4048 return getNamedOperand(MI, AMDGPU::OpName::data)->getReg(); 4049 } 4050 4051 unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 4052 int &FrameIndex) const { 4053 4054 if (!MI.mayLoad()) 4055 return AMDGPU::NoRegister; 4056 4057 if (isMUBUF(MI) || isVGPRSpill(MI)) 4058 return isStackAccess(MI, FrameIndex); 4059 4060 if (isSGPRSpill(MI)) 4061 return isSGPRStackAccess(MI, FrameIndex); 4062 4063 return AMDGPU::NoRegister; 4064 } 4065 4066 unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 4067 int &FrameIndex) const { 4068 if (!MI.mayStore()) 4069 return AMDGPU::NoRegister; 4070 4071 if (isMUBUF(MI) || isVGPRSpill(MI)) 4072 return isStackAccess(MI, FrameIndex); 4073 4074 if (isSGPRSpill(MI)) 4075 return isSGPRStackAccess(MI, FrameIndex); 4076 4077 return AMDGPU::NoRegister; 4078 } 4079 4080 unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { 4081 unsigned Opc = MI.getOpcode(); 4082 const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc); 4083 unsigned DescSize = Desc.getSize(); 4084 4085 // If we have a definitive size, we can use it. Otherwise we need to inspect 4086 // the operands to know the size. 4087 // 4088 // FIXME: Instructions that have a base 32-bit encoding report their size as 4089 // 4, even though they are really 8 bytes if they have a literal operand. 4090 if (DescSize != 0 && DescSize != 4) 4091 return DescSize; 4092 4093 // 4-byte instructions may have a 32-bit literal encoded after them. Check 4094 // operands that coud ever be literals. 4095 if (isVALU(MI) || isSALU(MI)) { 4096 if (isFixedSize(MI)) 4097 return DescSize; 4098 4099 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 4100 if (Src0Idx == -1) 4101 return 4; // No operands. 4102 4103 if (isLiteralConstantLike(MI.getOperand(Src0Idx), Desc.OpInfo[Src0Idx])) 4104 return 8; 4105 4106 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 4107 if (Src1Idx == -1) 4108 return 4; 4109 4110 if (isLiteralConstantLike(MI.getOperand(Src1Idx), Desc.OpInfo[Src1Idx])) 4111 return 8; 4112 4113 return 4; 4114 } 4115 4116 if (DescSize == 4) 4117 return 4; 4118 4119 switch (Opc) { 4120 case TargetOpcode::IMPLICIT_DEF: 4121 case TargetOpcode::KILL: 4122 case TargetOpcode::DBG_VALUE: 4123 case TargetOpcode::BUNDLE: 4124 case TargetOpcode::EH_LABEL: 4125 return 0; 4126 case TargetOpcode::INLINEASM: { 4127 const MachineFunction *MF = MI.getParent()->getParent(); 4128 const char *AsmStr = MI.getOperand(0).getSymbolName(); 4129 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo()); 4130 } 4131 default: 4132 llvm_unreachable("unable to find instruction size"); 4133 } 4134 } 4135 4136 bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const { 4137 if (!isFLAT(MI)) 4138 return false; 4139 4140 if (MI.memoperands_empty()) 4141 return true; 4142 4143 for (const MachineMemOperand *MMO : MI.memoperands()) { 4144 if (MMO->getAddrSpace() == AMDGPUASI.FLAT_ADDRESS) 4145 return true; 4146 } 4147 return false; 4148 } 4149 4150 bool SIInstrInfo::isNonUniformBranchInstr(MachineInstr &Branch) const { 4151 return Branch.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO; 4152 } 4153 4154 void SIInstrInfo::convertNonUniformIfRegion(MachineBasicBlock *IfEntry, 4155 MachineBasicBlock *IfEnd) const { 4156 MachineBasicBlock::iterator TI = IfEntry->getFirstTerminator(); 4157 assert(TI != IfEntry->end()); 4158 4159 MachineInstr *Branch = &(*TI); 4160 MachineFunction *MF = IfEntry->getParent(); 4161 MachineRegisterInfo &MRI = IfEntry->getParent()->getRegInfo(); 4162 4163 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 4164 unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 4165 MachineInstr *SIIF = 4166 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_IF), DstReg) 4167 .add(Branch->getOperand(0)) 4168 .add(Branch->getOperand(1)); 4169 MachineInstr *SIEND = 4170 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_END_CF)) 4171 .addReg(DstReg); 4172 4173 IfEntry->erase(TI); 4174 IfEntry->insert(IfEntry->end(), SIIF); 4175 IfEnd->insert(IfEnd->getFirstNonPHI(), SIEND); 4176 } 4177 } 4178 4179 void SIInstrInfo::convertNonUniformLoopRegion( 4180 MachineBasicBlock *LoopEntry, MachineBasicBlock *LoopEnd) const { 4181 MachineBasicBlock::iterator TI = LoopEnd->getFirstTerminator(); 4182 // We expect 2 terminators, one conditional and one unconditional. 4183 assert(TI != LoopEnd->end()); 4184 4185 MachineInstr *Branch = &(*TI); 4186 MachineFunction *MF = LoopEnd->getParent(); 4187 MachineRegisterInfo &MRI = LoopEnd->getParent()->getRegInfo(); 4188 4189 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 4190 4191 unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 4192 unsigned BackEdgeReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 4193 MachineInstrBuilder HeaderPHIBuilder = 4194 BuildMI(*(MF), Branch->getDebugLoc(), get(TargetOpcode::PHI), DstReg); 4195 for (MachineBasicBlock::pred_iterator PI = LoopEntry->pred_begin(), 4196 E = LoopEntry->pred_end(); 4197 PI != E; ++PI) { 4198 if (*PI == LoopEnd) { 4199 HeaderPHIBuilder.addReg(BackEdgeReg); 4200 } else { 4201 MachineBasicBlock *PMBB = *PI; 4202 unsigned ZeroReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 4203 materializeImmediate(*PMBB, PMBB->getFirstTerminator(), DebugLoc(), 4204 ZeroReg, 0); 4205 HeaderPHIBuilder.addReg(ZeroReg); 4206 } 4207 HeaderPHIBuilder.addMBB(*PI); 4208 } 4209 MachineInstr *HeaderPhi = HeaderPHIBuilder; 4210 MachineInstr *SIIFBREAK = BuildMI(*(MF), Branch->getDebugLoc(), 4211 get(AMDGPU::SI_IF_BREAK), BackEdgeReg) 4212 .addReg(DstReg) 4213 .add(Branch->getOperand(0)); 4214 MachineInstr *SILOOP = 4215 BuildMI(*(MF), Branch->getDebugLoc(), get(AMDGPU::SI_LOOP)) 4216 .addReg(BackEdgeReg) 4217 .addMBB(LoopEntry); 4218 4219 LoopEntry->insert(LoopEntry->begin(), HeaderPhi); 4220 LoopEnd->erase(TI); 4221 LoopEnd->insert(LoopEnd->end(), SIIFBREAK); 4222 LoopEnd->insert(LoopEnd->end(), SILOOP); 4223 } 4224 } 4225 4226 ArrayRef<std::pair<int, const char *>> 4227 SIInstrInfo::getSerializableTargetIndices() const { 4228 static const std::pair<int, const char *> TargetIndices[] = { 4229 {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"}, 4230 {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"}, 4231 {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"}, 4232 {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"}, 4233 {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}}; 4234 return makeArrayRef(TargetIndices); 4235 } 4236 4237 /// This is used by the post-RA scheduler (SchedulePostRAList.cpp). The 4238 /// post-RA version of misched uses CreateTargetMIHazardRecognizer. 4239 ScheduleHazardRecognizer * 4240 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 4241 const ScheduleDAG *DAG) const { 4242 return new GCNHazardRecognizer(DAG->MF); 4243 } 4244 4245 /// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer 4246 /// pass. 4247 ScheduleHazardRecognizer * 4248 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const { 4249 return new GCNHazardRecognizer(MF); 4250 } 4251 4252 bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const { 4253 return !MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY && 4254 MI.modifiesRegister(AMDGPU::EXEC, &RI); 4255 } 4256 4257 MachineInstrBuilder 4258 SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 4259 MachineBasicBlock::iterator I, 4260 const DebugLoc &DL, 4261 unsigned DestReg) const { 4262 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 4263 4264 unsigned UnusedCarry = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 4265 4266 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_I32_e64), DestReg) 4267 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 4268 } 4269