1 //===- SIInstrInfo.cpp - SI Instruction Information ----------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// SI Implementation of TargetInstrInfo. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "SIInstrInfo.h" 16 #include "AMDGPU.h" 17 #include "AMDGPUIntrinsicInfo.h" 18 #include "AMDGPUSubtarget.h" 19 #include "GCNHazardRecognizer.h" 20 #include "SIDefines.h" 21 #include "SIMachineFunctionInfo.h" 22 #include "SIRegisterInfo.h" 23 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 24 #include "Utils/AMDGPUBaseInfo.h" 25 #include "llvm/ADT/APInt.h" 26 #include "llvm/ADT/ArrayRef.h" 27 #include "llvm/ADT/SmallVector.h" 28 #include "llvm/ADT/StringRef.h" 29 #include "llvm/ADT/iterator_range.h" 30 #include "llvm/Analysis/AliasAnalysis.h" 31 #include "llvm/Analysis/MemoryLocation.h" 32 #include "llvm/Analysis/ValueTracking.h" 33 #include "llvm/CodeGen/MachineBasicBlock.h" 34 #include "llvm/CodeGen/MachineDominators.h" 35 #include "llvm/CodeGen/MachineFrameInfo.h" 36 #include "llvm/CodeGen/MachineFunction.h" 37 #include "llvm/CodeGen/MachineInstr.h" 38 #include "llvm/CodeGen/MachineInstrBuilder.h" 39 #include "llvm/CodeGen/MachineInstrBundle.h" 40 #include "llvm/CodeGen/MachineMemOperand.h" 41 #include "llvm/CodeGen/MachineOperand.h" 42 #include "llvm/CodeGen/MachineRegisterInfo.h" 43 #include "llvm/CodeGen/RegisterScavenging.h" 44 #include "llvm/CodeGen/ScheduleDAG.h" 45 #include "llvm/CodeGen/SelectionDAGNodes.h" 46 #include "llvm/CodeGen/TargetOpcodes.h" 47 #include "llvm/CodeGen/TargetRegisterInfo.h" 48 #include "llvm/IR/DebugLoc.h" 49 #include "llvm/IR/DiagnosticInfo.h" 50 #include "llvm/IR/Function.h" 51 #include "llvm/IR/InlineAsm.h" 52 #include "llvm/IR/LLVMContext.h" 53 #include "llvm/MC/MCInstrDesc.h" 54 #include "llvm/Support/Casting.h" 55 #include "llvm/Support/CommandLine.h" 56 #include "llvm/Support/Compiler.h" 57 #include "llvm/Support/ErrorHandling.h" 58 #include "llvm/Support/MachineValueType.h" 59 #include "llvm/Support/MathExtras.h" 60 #include "llvm/Target/TargetMachine.h" 61 #include <cassert> 62 #include <cstdint> 63 #include <iterator> 64 #include <utility> 65 66 using namespace llvm; 67 68 #define GET_INSTRINFO_CTOR_DTOR 69 #include "AMDGPUGenInstrInfo.inc" 70 71 namespace llvm { 72 namespace AMDGPU { 73 #define GET_D16ImageDimIntrinsics_IMPL 74 #define GET_ImageDimIntrinsicTable_IMPL 75 #define GET_RsrcIntrinsics_IMPL 76 #include "AMDGPUGenSearchableTables.inc" 77 } 78 } 79 80 81 // Must be at least 4 to be able to branch over minimum unconditional branch 82 // code. This is only for making it possible to write reasonably small tests for 83 // long branches. 84 static cl::opt<unsigned> 85 BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16), 86 cl::desc("Restrict range of branch instructions (DEBUG)")); 87 88 SIInstrInfo::SIInstrInfo(const GCNSubtarget &ST) 89 : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN), 90 RI(ST), ST(ST) {} 91 92 //===----------------------------------------------------------------------===// 93 // TargetInstrInfo callbacks 94 //===----------------------------------------------------------------------===// 95 96 static unsigned getNumOperandsNoGlue(SDNode *Node) { 97 unsigned N = Node->getNumOperands(); 98 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) 99 --N; 100 return N; 101 } 102 103 static SDValue findChainOperand(SDNode *Load) { 104 SDValue LastOp = Load->getOperand(getNumOperandsNoGlue(Load) - 1); 105 assert(LastOp.getValueType() == MVT::Other && "Chain missing from load node"); 106 return LastOp; 107 } 108 109 /// Returns true if both nodes have the same value for the given 110 /// operand \p Op, or if both nodes do not have this operand. 111 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { 112 unsigned Opc0 = N0->getMachineOpcode(); 113 unsigned Opc1 = N1->getMachineOpcode(); 114 115 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); 116 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); 117 118 if (Op0Idx == -1 && Op1Idx == -1) 119 return true; 120 121 122 if ((Op0Idx == -1 && Op1Idx != -1) || 123 (Op1Idx == -1 && Op0Idx != -1)) 124 return false; 125 126 // getNamedOperandIdx returns the index for the MachineInstr's operands, 127 // which includes the result as the first operand. We are indexing into the 128 // MachineSDNode's operands, so we need to skip the result operand to get 129 // the real index. 130 --Op0Idx; 131 --Op1Idx; 132 133 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx); 134 } 135 136 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, 137 AliasAnalysis *AA) const { 138 // TODO: The generic check fails for VALU instructions that should be 139 // rematerializable due to implicit reads of exec. We really want all of the 140 // generic logic for this except for this. 141 switch (MI.getOpcode()) { 142 case AMDGPU::V_MOV_B32_e32: 143 case AMDGPU::V_MOV_B32_e64: 144 case AMDGPU::V_MOV_B64_PSEUDO: 145 return true; 146 default: 147 return false; 148 } 149 } 150 151 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, 152 int64_t &Offset0, 153 int64_t &Offset1) const { 154 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode()) 155 return false; 156 157 unsigned Opc0 = Load0->getMachineOpcode(); 158 unsigned Opc1 = Load1->getMachineOpcode(); 159 160 // Make sure both are actually loads. 161 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad()) 162 return false; 163 164 if (isDS(Opc0) && isDS(Opc1)) { 165 166 // FIXME: Handle this case: 167 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1)) 168 return false; 169 170 // Check base reg. 171 if (Load0->getOperand(1) != Load1->getOperand(1)) 172 return false; 173 174 // Check chain. 175 if (findChainOperand(Load0) != findChainOperand(Load1)) 176 return false; 177 178 // Skip read2 / write2 variants for simplicity. 179 // TODO: We should report true if the used offsets are adjacent (excluded 180 // st64 versions). 181 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::data1) != -1 || 182 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::data1) != -1) 183 return false; 184 185 Offset0 = cast<ConstantSDNode>(Load0->getOperand(2))->getZExtValue(); 186 Offset1 = cast<ConstantSDNode>(Load1->getOperand(2))->getZExtValue(); 187 return true; 188 } 189 190 if (isSMRD(Opc0) && isSMRD(Opc1)) { 191 // Skip time and cache invalidation instructions. 192 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::sbase) == -1 || 193 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::sbase) == -1) 194 return false; 195 196 assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1)); 197 198 // Check base reg. 199 if (Load0->getOperand(0) != Load1->getOperand(0)) 200 return false; 201 202 const ConstantSDNode *Load0Offset = 203 dyn_cast<ConstantSDNode>(Load0->getOperand(1)); 204 const ConstantSDNode *Load1Offset = 205 dyn_cast<ConstantSDNode>(Load1->getOperand(1)); 206 207 if (!Load0Offset || !Load1Offset) 208 return false; 209 210 // Check chain. 211 if (findChainOperand(Load0) != findChainOperand(Load1)) 212 return false; 213 214 Offset0 = Load0Offset->getZExtValue(); 215 Offset1 = Load1Offset->getZExtValue(); 216 return true; 217 } 218 219 // MUBUF and MTBUF can access the same addresses. 220 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) { 221 222 // MUBUF and MTBUF have vaddr at different indices. 223 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || 224 findChainOperand(Load0) != findChainOperand(Load1) || 225 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || 226 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) 227 return false; 228 229 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 230 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 231 232 if (OffIdx0 == -1 || OffIdx1 == -1) 233 return false; 234 235 // getNamedOperandIdx returns the index for MachineInstrs. Since they 236 // inlcude the output in the operand list, but SDNodes don't, we need to 237 // subtract the index by one. 238 --OffIdx0; 239 --OffIdx1; 240 241 SDValue Off0 = Load0->getOperand(OffIdx0); 242 SDValue Off1 = Load1->getOperand(OffIdx1); 243 244 // The offset might be a FrameIndexSDNode. 245 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1)) 246 return false; 247 248 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue(); 249 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue(); 250 return true; 251 } 252 253 return false; 254 } 255 256 static bool isStride64(unsigned Opc) { 257 switch (Opc) { 258 case AMDGPU::DS_READ2ST64_B32: 259 case AMDGPU::DS_READ2ST64_B64: 260 case AMDGPU::DS_WRITE2ST64_B32: 261 case AMDGPU::DS_WRITE2ST64_B64: 262 return true; 263 default: 264 return false; 265 } 266 } 267 268 bool SIInstrInfo::getMemOperandWithOffset(MachineInstr &LdSt, 269 MachineOperand *&BaseOp, 270 int64_t &Offset, 271 const TargetRegisterInfo *TRI) const { 272 unsigned Opc = LdSt.getOpcode(); 273 274 if (isDS(LdSt)) { 275 const MachineOperand *OffsetImm = 276 getNamedOperand(LdSt, AMDGPU::OpName::offset); 277 if (OffsetImm) { 278 // Normal, single offset LDS instruction. 279 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr); 280 Offset = OffsetImm->getImm(); 281 assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " 282 "operands of type register."); 283 return true; 284 } 285 286 // The 2 offset instructions use offset0 and offset1 instead. We can treat 287 // these as a load with a single offset if the 2 offsets are consecutive. We 288 // will use this for some partially aligned loads. 289 const MachineOperand *Offset0Imm = 290 getNamedOperand(LdSt, AMDGPU::OpName::offset0); 291 const MachineOperand *Offset1Imm = 292 getNamedOperand(LdSt, AMDGPU::OpName::offset1); 293 294 uint8_t Offset0 = Offset0Imm->getImm(); 295 uint8_t Offset1 = Offset1Imm->getImm(); 296 297 if (Offset1 > Offset0 && Offset1 - Offset0 == 1) { 298 // Each of these offsets is in element sized units, so we need to convert 299 // to bytes of the individual reads. 300 301 unsigned EltSize; 302 if (LdSt.mayLoad()) 303 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16; 304 else { 305 assert(LdSt.mayStore()); 306 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 307 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8; 308 } 309 310 if (isStride64(Opc)) 311 EltSize *= 64; 312 313 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr); 314 Offset = EltSize * Offset0; 315 assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " 316 "operands of type register."); 317 return true; 318 } 319 320 return false; 321 } 322 323 if (isMUBUF(LdSt) || isMTBUF(LdSt)) { 324 const MachineOperand *SOffset = getNamedOperand(LdSt, AMDGPU::OpName::soffset); 325 if (SOffset && SOffset->isReg()) 326 return false; 327 328 MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 329 if (!AddrReg) 330 return false; 331 332 const MachineOperand *OffsetImm = 333 getNamedOperand(LdSt, AMDGPU::OpName::offset); 334 BaseOp = AddrReg; 335 Offset = OffsetImm->getImm(); 336 337 if (SOffset) // soffset can be an inline immediate. 338 Offset += SOffset->getImm(); 339 340 assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " 341 "operands of type register."); 342 return true; 343 } 344 345 if (isSMRD(LdSt)) { 346 const MachineOperand *OffsetImm = 347 getNamedOperand(LdSt, AMDGPU::OpName::offset); 348 if (!OffsetImm) 349 return false; 350 351 MachineOperand *SBaseReg = getNamedOperand(LdSt, AMDGPU::OpName::sbase); 352 BaseOp = SBaseReg; 353 Offset = OffsetImm->getImm(); 354 assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " 355 "operands of type register."); 356 return true; 357 } 358 359 if (isFLAT(LdSt)) { 360 MachineOperand *VAddr = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 361 if (VAddr) { 362 // Can't analyze 2 offsets. 363 if (getNamedOperand(LdSt, AMDGPU::OpName::saddr)) 364 return false; 365 366 BaseOp = VAddr; 367 } else { 368 // scratch instructions have either vaddr or saddr. 369 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::saddr); 370 } 371 372 Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm(); 373 assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " 374 "operands of type register."); 375 return true; 376 } 377 378 return false; 379 } 380 381 static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, 382 const MachineOperand &BaseOp1, 383 const MachineInstr &MI2, 384 const MachineOperand &BaseOp2) { 385 // Support only base operands with base registers. 386 // Note: this could be extended to support FI operands. 387 if (!BaseOp1.isReg() || !BaseOp2.isReg()) 388 return false; 389 390 if (BaseOp1.isIdenticalTo(BaseOp2)) 391 return true; 392 393 if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand()) 394 return false; 395 396 auto MO1 = *MI1.memoperands_begin(); 397 auto MO2 = *MI2.memoperands_begin(); 398 if (MO1->getAddrSpace() != MO2->getAddrSpace()) 399 return false; 400 401 auto Base1 = MO1->getValue(); 402 auto Base2 = MO2->getValue(); 403 if (!Base1 || !Base2) 404 return false; 405 const MachineFunction &MF = *MI1.getParent()->getParent(); 406 const DataLayout &DL = MF.getFunction().getParent()->getDataLayout(); 407 Base1 = GetUnderlyingObject(Base1, DL); 408 Base2 = GetUnderlyingObject(Base1, DL); 409 410 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2)) 411 return false; 412 413 return Base1 == Base2; 414 } 415 416 bool SIInstrInfo::shouldClusterMemOps(MachineOperand &BaseOp1, 417 MachineOperand &BaseOp2, 418 unsigned NumLoads) const { 419 MachineInstr &FirstLdSt = *BaseOp1.getParent(); 420 MachineInstr &SecondLdSt = *BaseOp2.getParent(); 421 422 if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOp1, SecondLdSt, BaseOp2)) 423 return false; 424 425 const MachineOperand *FirstDst = nullptr; 426 const MachineOperand *SecondDst = nullptr; 427 428 if ((isMUBUF(FirstLdSt) && isMUBUF(SecondLdSt)) || 429 (isMTBUF(FirstLdSt) && isMTBUF(SecondLdSt)) || 430 (isFLAT(FirstLdSt) && isFLAT(SecondLdSt))) { 431 const unsigned MaxGlobalLoadCluster = 6; 432 if (NumLoads > MaxGlobalLoadCluster) 433 return false; 434 435 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdata); 436 if (!FirstDst) 437 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdst); 438 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdata); 439 if (!SecondDst) 440 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdst); 441 } else if (isSMRD(FirstLdSt) && isSMRD(SecondLdSt)) { 442 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::sdst); 443 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::sdst); 444 } else if (isDS(FirstLdSt) && isDS(SecondLdSt)) { 445 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdst); 446 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdst); 447 } 448 449 if (!FirstDst || !SecondDst) 450 return false; 451 452 // Try to limit clustering based on the total number of bytes loaded 453 // rather than the number of instructions. This is done to help reduce 454 // register pressure. The method used is somewhat inexact, though, 455 // because it assumes that all loads in the cluster will load the 456 // same number of bytes as FirstLdSt. 457 458 // The unit of this value is bytes. 459 // FIXME: This needs finer tuning. 460 unsigned LoadClusterThreshold = 16; 461 462 const MachineRegisterInfo &MRI = 463 FirstLdSt.getParent()->getParent()->getRegInfo(); 464 const TargetRegisterClass *DstRC = MRI.getRegClass(FirstDst->getReg()); 465 466 return (NumLoads * (RI.getRegSizeInBits(*DstRC) / 8)) <= LoadClusterThreshold; 467 } 468 469 // FIXME: This behaves strangely. If, for example, you have 32 load + stores, 470 // the first 16 loads will be interleaved with the stores, and the next 16 will 471 // be clustered as expected. It should really split into 2 16 store batches. 472 // 473 // Loads are clustered until this returns false, rather than trying to schedule 474 // groups of stores. This also means we have to deal with saying different 475 // address space loads should be clustered, and ones which might cause bank 476 // conflicts. 477 // 478 // This might be deprecated so it might not be worth that much effort to fix. 479 bool SIInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, 480 int64_t Offset0, int64_t Offset1, 481 unsigned NumLoads) const { 482 assert(Offset1 > Offset0 && 483 "Second offset should be larger than first offset!"); 484 // If we have less than 16 loads in a row, and the offsets are within 64 485 // bytes, then schedule together. 486 487 // A cacheline is 64 bytes (for global memory). 488 return (NumLoads <= 16 && (Offset1 - Offset0) < 64); 489 } 490 491 static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB, 492 MachineBasicBlock::iterator MI, 493 const DebugLoc &DL, unsigned DestReg, 494 unsigned SrcReg, bool KillSrc) { 495 MachineFunction *MF = MBB.getParent(); 496 DiagnosticInfoUnsupported IllegalCopy(MF->getFunction(), 497 "illegal SGPR to VGPR copy", 498 DL, DS_Error); 499 LLVMContext &C = MF->getFunction().getContext(); 500 C.diagnose(IllegalCopy); 501 502 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg) 503 .addReg(SrcReg, getKillRegState(KillSrc)); 504 } 505 506 void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 507 MachineBasicBlock::iterator MI, 508 const DebugLoc &DL, unsigned DestReg, 509 unsigned SrcReg, bool KillSrc) const { 510 const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg); 511 512 if (RC == &AMDGPU::VGPR_32RegClass) { 513 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || 514 AMDGPU::SReg_32RegClass.contains(SrcReg)); 515 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) 516 .addReg(SrcReg, getKillRegState(KillSrc)); 517 return; 518 } 519 520 if (RC == &AMDGPU::SReg_32_XM0RegClass || 521 RC == &AMDGPU::SReg_32RegClass) { 522 if (SrcReg == AMDGPU::SCC) { 523 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg) 524 .addImm(-1) 525 .addImm(0); 526 return; 527 } 528 529 if (!AMDGPU::SReg_32RegClass.contains(SrcReg)) { 530 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 531 return; 532 } 533 534 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 535 .addReg(SrcReg, getKillRegState(KillSrc)); 536 return; 537 } 538 539 if (RC == &AMDGPU::SReg_64RegClass) { 540 if (DestReg == AMDGPU::VCC) { 541 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 542 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC) 543 .addReg(SrcReg, getKillRegState(KillSrc)); 544 } else { 545 // FIXME: Hack until VReg_1 removed. 546 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 547 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 548 .addImm(0) 549 .addReg(SrcReg, getKillRegState(KillSrc)); 550 } 551 552 return; 553 } 554 555 if (!AMDGPU::SReg_64RegClass.contains(SrcReg)) { 556 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 557 return; 558 } 559 560 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 561 .addReg(SrcReg, getKillRegState(KillSrc)); 562 return; 563 } 564 565 if (DestReg == AMDGPU::SCC) { 566 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 567 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32)) 568 .addReg(SrcReg, getKillRegState(KillSrc)) 569 .addImm(0); 570 return; 571 } 572 573 unsigned EltSize = 4; 574 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 575 if (RI.isSGPRClass(RC)) { 576 if (RI.getRegSizeInBits(*RC) > 32) { 577 Opcode = AMDGPU::S_MOV_B64; 578 EltSize = 8; 579 } else { 580 Opcode = AMDGPU::S_MOV_B32; 581 EltSize = 4; 582 } 583 584 if (!RI.isSGPRClass(RI.getPhysRegClass(SrcReg))) { 585 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 586 return; 587 } 588 } 589 590 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize); 591 bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg); 592 593 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 594 unsigned SubIdx; 595 if (Forward) 596 SubIdx = SubIndices[Idx]; 597 else 598 SubIdx = SubIndices[SubIndices.size() - Idx - 1]; 599 600 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 601 get(Opcode), RI.getSubReg(DestReg, SubIdx)); 602 603 Builder.addReg(RI.getSubReg(SrcReg, SubIdx)); 604 605 if (Idx == 0) 606 Builder.addReg(DestReg, RegState::Define | RegState::Implicit); 607 608 bool UseKill = KillSrc && Idx == SubIndices.size() - 1; 609 Builder.addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit); 610 } 611 } 612 613 int SIInstrInfo::commuteOpcode(unsigned Opcode) const { 614 int NewOpc; 615 616 // Try to map original to commuted opcode 617 NewOpc = AMDGPU::getCommuteRev(Opcode); 618 if (NewOpc != -1) 619 // Check if the commuted (REV) opcode exists on the target. 620 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 621 622 // Try to map commuted to original opcode 623 NewOpc = AMDGPU::getCommuteOrig(Opcode); 624 if (NewOpc != -1) 625 // Check if the original (non-REV) opcode exists on the target. 626 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 627 628 return Opcode; 629 } 630 631 void SIInstrInfo::materializeImmediate(MachineBasicBlock &MBB, 632 MachineBasicBlock::iterator MI, 633 const DebugLoc &DL, unsigned DestReg, 634 int64_t Value) const { 635 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 636 const TargetRegisterClass *RegClass = MRI.getRegClass(DestReg); 637 if (RegClass == &AMDGPU::SReg_32RegClass || 638 RegClass == &AMDGPU::SGPR_32RegClass || 639 RegClass == &AMDGPU::SReg_32_XM0RegClass || 640 RegClass == &AMDGPU::SReg_32_XM0_XEXECRegClass) { 641 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 642 .addImm(Value); 643 return; 644 } 645 646 if (RegClass == &AMDGPU::SReg_64RegClass || 647 RegClass == &AMDGPU::SGPR_64RegClass || 648 RegClass == &AMDGPU::SReg_64_XEXECRegClass) { 649 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 650 .addImm(Value); 651 return; 652 } 653 654 if (RegClass == &AMDGPU::VGPR_32RegClass) { 655 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) 656 .addImm(Value); 657 return; 658 } 659 if (RegClass == &AMDGPU::VReg_64RegClass) { 660 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), DestReg) 661 .addImm(Value); 662 return; 663 } 664 665 unsigned EltSize = 4; 666 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 667 if (RI.isSGPRClass(RegClass)) { 668 if (RI.getRegSizeInBits(*RegClass) > 32) { 669 Opcode = AMDGPU::S_MOV_B64; 670 EltSize = 8; 671 } else { 672 Opcode = AMDGPU::S_MOV_B32; 673 EltSize = 4; 674 } 675 } 676 677 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RegClass, EltSize); 678 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 679 int64_t IdxValue = Idx == 0 ? Value : 0; 680 681 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 682 get(Opcode), RI.getSubReg(DestReg, Idx)); 683 Builder.addImm(IdxValue); 684 } 685 } 686 687 const TargetRegisterClass * 688 SIInstrInfo::getPreferredSelectRegClass(unsigned Size) const { 689 return &AMDGPU::VGPR_32RegClass; 690 } 691 692 void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB, 693 MachineBasicBlock::iterator I, 694 const DebugLoc &DL, unsigned DstReg, 695 ArrayRef<MachineOperand> Cond, 696 unsigned TrueReg, 697 unsigned FalseReg) const { 698 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 699 assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass && 700 "Not a VGPR32 reg"); 701 702 if (Cond.size() == 1) { 703 unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 704 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 705 .add(Cond[0]); 706 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 707 .addReg(FalseReg) 708 .addReg(TrueReg) 709 .addReg(SReg); 710 } else if (Cond.size() == 2) { 711 assert(Cond[0].isImm() && "Cond[0] is not an immediate"); 712 switch (Cond[0].getImm()) { 713 case SIInstrInfo::SCC_TRUE: { 714 unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 715 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg) 716 .addImm(-1) 717 .addImm(0); 718 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 719 .addReg(FalseReg) 720 .addReg(TrueReg) 721 .addReg(SReg); 722 break; 723 } 724 case SIInstrInfo::SCC_FALSE: { 725 unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 726 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg) 727 .addImm(0) 728 .addImm(-1); 729 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 730 .addReg(FalseReg) 731 .addReg(TrueReg) 732 .addReg(SReg); 733 break; 734 } 735 case SIInstrInfo::VCCNZ: { 736 MachineOperand RegOp = Cond[1]; 737 RegOp.setImplicit(false); 738 unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 739 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 740 .add(RegOp); 741 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 742 .addReg(FalseReg) 743 .addReg(TrueReg) 744 .addReg(SReg); 745 break; 746 } 747 case SIInstrInfo::VCCZ: { 748 MachineOperand RegOp = Cond[1]; 749 RegOp.setImplicit(false); 750 unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 751 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 752 .add(RegOp); 753 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 754 .addReg(TrueReg) 755 .addReg(FalseReg) 756 .addReg(SReg); 757 break; 758 } 759 case SIInstrInfo::EXECNZ: { 760 unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 761 unsigned SReg2 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 762 BuildMI(MBB, I, DL, get(AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 763 .addImm(0); 764 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg) 765 .addImm(-1) 766 .addImm(0); 767 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 768 .addReg(FalseReg) 769 .addReg(TrueReg) 770 .addReg(SReg); 771 break; 772 } 773 case SIInstrInfo::EXECZ: { 774 unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 775 unsigned SReg2 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 776 BuildMI(MBB, I, DL, get(AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 777 .addImm(0); 778 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg) 779 .addImm(0) 780 .addImm(-1); 781 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 782 .addReg(FalseReg) 783 .addReg(TrueReg) 784 .addReg(SReg); 785 llvm_unreachable("Unhandled branch predicate EXECZ"); 786 break; 787 } 788 default: 789 llvm_unreachable("invalid branch predicate"); 790 } 791 } else { 792 llvm_unreachable("Can only handle Cond size 1 or 2"); 793 } 794 } 795 796 unsigned SIInstrInfo::insertEQ(MachineBasicBlock *MBB, 797 MachineBasicBlock::iterator I, 798 const DebugLoc &DL, 799 unsigned SrcReg, int Value) const { 800 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 801 unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 802 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg) 803 .addImm(Value) 804 .addReg(SrcReg); 805 806 return Reg; 807 } 808 809 unsigned SIInstrInfo::insertNE(MachineBasicBlock *MBB, 810 MachineBasicBlock::iterator I, 811 const DebugLoc &DL, 812 unsigned SrcReg, int Value) const { 813 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 814 unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 815 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg) 816 .addImm(Value) 817 .addReg(SrcReg); 818 819 return Reg; 820 } 821 822 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const { 823 824 if (RI.getRegSizeInBits(*DstRC) == 32) { 825 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 826 } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) { 827 return AMDGPU::S_MOV_B64; 828 } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) { 829 return AMDGPU::V_MOV_B64_PSEUDO; 830 } 831 return AMDGPU::COPY; 832 } 833 834 static unsigned getSGPRSpillSaveOpcode(unsigned Size) { 835 switch (Size) { 836 case 4: 837 return AMDGPU::SI_SPILL_S32_SAVE; 838 case 8: 839 return AMDGPU::SI_SPILL_S64_SAVE; 840 case 16: 841 return AMDGPU::SI_SPILL_S128_SAVE; 842 case 32: 843 return AMDGPU::SI_SPILL_S256_SAVE; 844 case 64: 845 return AMDGPU::SI_SPILL_S512_SAVE; 846 default: 847 llvm_unreachable("unknown register size"); 848 } 849 } 850 851 static unsigned getVGPRSpillSaveOpcode(unsigned Size) { 852 switch (Size) { 853 case 4: 854 return AMDGPU::SI_SPILL_V32_SAVE; 855 case 8: 856 return AMDGPU::SI_SPILL_V64_SAVE; 857 case 12: 858 return AMDGPU::SI_SPILL_V96_SAVE; 859 case 16: 860 return AMDGPU::SI_SPILL_V128_SAVE; 861 case 32: 862 return AMDGPU::SI_SPILL_V256_SAVE; 863 case 64: 864 return AMDGPU::SI_SPILL_V512_SAVE; 865 default: 866 llvm_unreachable("unknown register size"); 867 } 868 } 869 870 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 871 MachineBasicBlock::iterator MI, 872 unsigned SrcReg, bool isKill, 873 int FrameIndex, 874 const TargetRegisterClass *RC, 875 const TargetRegisterInfo *TRI) const { 876 MachineFunction *MF = MBB.getParent(); 877 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 878 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 879 DebugLoc DL = MBB.findDebugLoc(MI); 880 881 unsigned Size = FrameInfo.getObjectSize(FrameIndex); 882 unsigned Align = FrameInfo.getObjectAlignment(FrameIndex); 883 MachinePointerInfo PtrInfo 884 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 885 MachineMemOperand *MMO 886 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, 887 Size, Align); 888 unsigned SpillSize = TRI->getSpillSize(*RC); 889 890 if (RI.isSGPRClass(RC)) { 891 MFI->setHasSpilledSGPRs(); 892 893 // We are only allowed to create one new instruction when spilling 894 // registers, so we need to use pseudo instruction for spilling SGPRs. 895 const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(SpillSize)); 896 897 // The SGPR spill/restore instructions only work on number sgprs, so we need 898 // to make sure we are using the correct register class. 899 if (TargetRegisterInfo::isVirtualRegister(SrcReg) && SpillSize == 4) { 900 MachineRegisterInfo &MRI = MF->getRegInfo(); 901 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0RegClass); 902 } 903 904 MachineInstrBuilder Spill = BuildMI(MBB, MI, DL, OpDesc) 905 .addReg(SrcReg, getKillRegState(isKill)) // data 906 .addFrameIndex(FrameIndex) // addr 907 .addMemOperand(MMO) 908 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) 909 .addReg(MFI->getFrameOffsetReg(), RegState::Implicit); 910 // Add the scratch resource registers as implicit uses because we may end up 911 // needing them, and need to ensure that the reserved registers are 912 // correctly handled. 913 914 FrameInfo.setStackID(FrameIndex, SIStackID::SGPR_SPILL); 915 if (ST.hasScalarStores()) { 916 // m0 is used for offset to scalar stores if used to spill. 917 Spill.addReg(AMDGPU::M0, RegState::ImplicitDefine | RegState::Dead); 918 } 919 920 return; 921 } 922 923 assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected"); 924 925 unsigned Opcode = getVGPRSpillSaveOpcode(SpillSize); 926 MFI->setHasSpilledVGPRs(); 927 BuildMI(MBB, MI, DL, get(Opcode)) 928 .addReg(SrcReg, getKillRegState(isKill)) // data 929 .addFrameIndex(FrameIndex) // addr 930 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 931 .addReg(MFI->getFrameOffsetReg()) // scratch_offset 932 .addImm(0) // offset 933 .addMemOperand(MMO); 934 } 935 936 static unsigned getSGPRSpillRestoreOpcode(unsigned Size) { 937 switch (Size) { 938 case 4: 939 return AMDGPU::SI_SPILL_S32_RESTORE; 940 case 8: 941 return AMDGPU::SI_SPILL_S64_RESTORE; 942 case 16: 943 return AMDGPU::SI_SPILL_S128_RESTORE; 944 case 32: 945 return AMDGPU::SI_SPILL_S256_RESTORE; 946 case 64: 947 return AMDGPU::SI_SPILL_S512_RESTORE; 948 default: 949 llvm_unreachable("unknown register size"); 950 } 951 } 952 953 static unsigned getVGPRSpillRestoreOpcode(unsigned Size) { 954 switch (Size) { 955 case 4: 956 return AMDGPU::SI_SPILL_V32_RESTORE; 957 case 8: 958 return AMDGPU::SI_SPILL_V64_RESTORE; 959 case 12: 960 return AMDGPU::SI_SPILL_V96_RESTORE; 961 case 16: 962 return AMDGPU::SI_SPILL_V128_RESTORE; 963 case 32: 964 return AMDGPU::SI_SPILL_V256_RESTORE; 965 case 64: 966 return AMDGPU::SI_SPILL_V512_RESTORE; 967 default: 968 llvm_unreachable("unknown register size"); 969 } 970 } 971 972 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 973 MachineBasicBlock::iterator MI, 974 unsigned DestReg, int FrameIndex, 975 const TargetRegisterClass *RC, 976 const TargetRegisterInfo *TRI) const { 977 MachineFunction *MF = MBB.getParent(); 978 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 979 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 980 DebugLoc DL = MBB.findDebugLoc(MI); 981 unsigned Align = FrameInfo.getObjectAlignment(FrameIndex); 982 unsigned Size = FrameInfo.getObjectSize(FrameIndex); 983 unsigned SpillSize = TRI->getSpillSize(*RC); 984 985 MachinePointerInfo PtrInfo 986 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 987 988 MachineMemOperand *MMO = MF->getMachineMemOperand( 989 PtrInfo, MachineMemOperand::MOLoad, Size, Align); 990 991 if (RI.isSGPRClass(RC)) { 992 MFI->setHasSpilledSGPRs(); 993 994 // FIXME: Maybe this should not include a memoperand because it will be 995 // lowered to non-memory instructions. 996 const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize)); 997 if (TargetRegisterInfo::isVirtualRegister(DestReg) && SpillSize == 4) { 998 MachineRegisterInfo &MRI = MF->getRegInfo(); 999 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0RegClass); 1000 } 1001 1002 FrameInfo.setStackID(FrameIndex, SIStackID::SGPR_SPILL); 1003 MachineInstrBuilder Spill = BuildMI(MBB, MI, DL, OpDesc, DestReg) 1004 .addFrameIndex(FrameIndex) // addr 1005 .addMemOperand(MMO) 1006 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) 1007 .addReg(MFI->getFrameOffsetReg(), RegState::Implicit); 1008 1009 if (ST.hasScalarStores()) { 1010 // m0 is used for offset to scalar stores if used to spill. 1011 Spill.addReg(AMDGPU::M0, RegState::ImplicitDefine | RegState::Dead); 1012 } 1013 1014 return; 1015 } 1016 1017 assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected"); 1018 1019 unsigned Opcode = getVGPRSpillRestoreOpcode(SpillSize); 1020 BuildMI(MBB, MI, DL, get(Opcode), DestReg) 1021 .addFrameIndex(FrameIndex) // vaddr 1022 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 1023 .addReg(MFI->getFrameOffsetReg()) // scratch_offset 1024 .addImm(0) // offset 1025 .addMemOperand(MMO); 1026 } 1027 1028 /// \param @Offset Offset in bytes of the FrameIndex being spilled 1029 unsigned SIInstrInfo::calculateLDSSpillAddress( 1030 MachineBasicBlock &MBB, MachineInstr &MI, RegScavenger *RS, unsigned TmpReg, 1031 unsigned FrameOffset, unsigned Size) const { 1032 MachineFunction *MF = MBB.getParent(); 1033 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1034 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 1035 DebugLoc DL = MBB.findDebugLoc(MI); 1036 unsigned WorkGroupSize = MFI->getMaxFlatWorkGroupSize(); 1037 unsigned WavefrontSize = ST.getWavefrontSize(); 1038 1039 unsigned TIDReg = MFI->getTIDReg(); 1040 if (!MFI->hasCalculatedTID()) { 1041 MachineBasicBlock &Entry = MBB.getParent()->front(); 1042 MachineBasicBlock::iterator Insert = Entry.front(); 1043 DebugLoc DL = Insert->getDebugLoc(); 1044 1045 TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass, 1046 *MF); 1047 if (TIDReg == AMDGPU::NoRegister) 1048 return TIDReg; 1049 1050 if (!AMDGPU::isShader(MF->getFunction().getCallingConv()) && 1051 WorkGroupSize > WavefrontSize) { 1052 unsigned TIDIGXReg 1053 = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_X); 1054 unsigned TIDIGYReg 1055 = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Y); 1056 unsigned TIDIGZReg 1057 = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Z); 1058 unsigned InputPtrReg = 1059 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 1060 for (unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) { 1061 if (!Entry.isLiveIn(Reg)) 1062 Entry.addLiveIn(Reg); 1063 } 1064 1065 RS->enterBasicBlock(Entry); 1066 // FIXME: Can we scavenge an SReg_64 and access the subregs? 1067 unsigned STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); 1068 unsigned STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); 1069 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp0) 1070 .addReg(InputPtrReg) 1071 .addImm(SI::KernelInputOffsets::NGROUPS_Z); 1072 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp1) 1073 .addReg(InputPtrReg) 1074 .addImm(SI::KernelInputOffsets::NGROUPS_Y); 1075 1076 // NGROUPS.X * NGROUPS.Y 1077 BuildMI(Entry, Insert, DL, get(AMDGPU::S_MUL_I32), STmp1) 1078 .addReg(STmp1) 1079 .addReg(STmp0); 1080 // (NGROUPS.X * NGROUPS.Y) * TIDIG.X 1081 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MUL_U32_U24_e32), TIDReg) 1082 .addReg(STmp1) 1083 .addReg(TIDIGXReg); 1084 // NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X) 1085 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MAD_U32_U24), TIDReg) 1086 .addReg(STmp0) 1087 .addReg(TIDIGYReg) 1088 .addReg(TIDReg); 1089 // (NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)) + TIDIG.Z 1090 getAddNoCarry(Entry, Insert, DL, TIDReg) 1091 .addReg(TIDReg) 1092 .addReg(TIDIGZReg); 1093 } else { 1094 // Get the wave id 1095 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_LO_U32_B32_e64), 1096 TIDReg) 1097 .addImm(-1) 1098 .addImm(0); 1099 1100 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_HI_U32_B32_e64), 1101 TIDReg) 1102 .addImm(-1) 1103 .addReg(TIDReg); 1104 } 1105 1106 BuildMI(Entry, Insert, DL, get(AMDGPU::V_LSHLREV_B32_e32), 1107 TIDReg) 1108 .addImm(2) 1109 .addReg(TIDReg); 1110 MFI->setTIDReg(TIDReg); 1111 } 1112 1113 // Add FrameIndex to LDS offset 1114 unsigned LDSOffset = MFI->getLDSSize() + (FrameOffset * WorkGroupSize); 1115 getAddNoCarry(MBB, MI, DL, TmpReg) 1116 .addImm(LDSOffset) 1117 .addReg(TIDReg); 1118 1119 return TmpReg; 1120 } 1121 1122 void SIInstrInfo::insertWaitStates(MachineBasicBlock &MBB, 1123 MachineBasicBlock::iterator MI, 1124 int Count) const { 1125 DebugLoc DL = MBB.findDebugLoc(MI); 1126 while (Count > 0) { 1127 int Arg; 1128 if (Count >= 8) 1129 Arg = 7; 1130 else 1131 Arg = Count - 1; 1132 Count -= 8; 1133 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP)) 1134 .addImm(Arg); 1135 } 1136 } 1137 1138 void SIInstrInfo::insertNoop(MachineBasicBlock &MBB, 1139 MachineBasicBlock::iterator MI) const { 1140 insertWaitStates(MBB, MI, 1); 1141 } 1142 1143 void SIInstrInfo::insertReturn(MachineBasicBlock &MBB) const { 1144 auto MF = MBB.getParent(); 1145 SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 1146 1147 assert(Info->isEntryFunction()); 1148 1149 if (MBB.succ_empty()) { 1150 bool HasNoTerminator = MBB.getFirstTerminator() == MBB.end(); 1151 if (HasNoTerminator) 1152 BuildMI(MBB, MBB.end(), DebugLoc(), 1153 get(Info->returnsVoid() ? AMDGPU::S_ENDPGM : AMDGPU::SI_RETURN_TO_EPILOG)); 1154 } 1155 } 1156 1157 unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) const { 1158 switch (MI.getOpcode()) { 1159 default: return 1; // FIXME: Do wait states equal cycles? 1160 1161 case AMDGPU::S_NOP: 1162 return MI.getOperand(0).getImm() + 1; 1163 } 1164 } 1165 1166 bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 1167 MachineBasicBlock &MBB = *MI.getParent(); 1168 DebugLoc DL = MBB.findDebugLoc(MI); 1169 switch (MI.getOpcode()) { 1170 default: return TargetInstrInfo::expandPostRAPseudo(MI); 1171 case AMDGPU::S_MOV_B64_term: 1172 // This is only a terminator to get the correct spill code placement during 1173 // register allocation. 1174 MI.setDesc(get(AMDGPU::S_MOV_B64)); 1175 break; 1176 1177 case AMDGPU::S_XOR_B64_term: 1178 // This is only a terminator to get the correct spill code placement during 1179 // register allocation. 1180 MI.setDesc(get(AMDGPU::S_XOR_B64)); 1181 break; 1182 1183 case AMDGPU::S_ANDN2_B64_term: 1184 // This is only a terminator to get the correct spill code placement during 1185 // register allocation. 1186 MI.setDesc(get(AMDGPU::S_ANDN2_B64)); 1187 break; 1188 1189 case AMDGPU::V_MOV_B64_PSEUDO: { 1190 unsigned Dst = MI.getOperand(0).getReg(); 1191 unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 1192 unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 1193 1194 const MachineOperand &SrcOp = MI.getOperand(1); 1195 // FIXME: Will this work for 64-bit floating point immediates? 1196 assert(!SrcOp.isFPImm()); 1197 if (SrcOp.isImm()) { 1198 APInt Imm(64, SrcOp.getImm()); 1199 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1200 .addImm(Imm.getLoBits(32).getZExtValue()) 1201 .addReg(Dst, RegState::Implicit | RegState::Define); 1202 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1203 .addImm(Imm.getHiBits(32).getZExtValue()) 1204 .addReg(Dst, RegState::Implicit | RegState::Define); 1205 } else { 1206 assert(SrcOp.isReg()); 1207 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1208 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) 1209 .addReg(Dst, RegState::Implicit | RegState::Define); 1210 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1211 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) 1212 .addReg(Dst, RegState::Implicit | RegState::Define); 1213 } 1214 MI.eraseFromParent(); 1215 break; 1216 } 1217 case AMDGPU::V_SET_INACTIVE_B32: { 1218 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOT_B64), AMDGPU::EXEC) 1219 .addReg(AMDGPU::EXEC); 1220 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), MI.getOperand(0).getReg()) 1221 .add(MI.getOperand(2)); 1222 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOT_B64), AMDGPU::EXEC) 1223 .addReg(AMDGPU::EXEC); 1224 MI.eraseFromParent(); 1225 break; 1226 } 1227 case AMDGPU::V_SET_INACTIVE_B64: { 1228 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOT_B64), AMDGPU::EXEC) 1229 .addReg(AMDGPU::EXEC); 1230 MachineInstr *Copy = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), 1231 MI.getOperand(0).getReg()) 1232 .add(MI.getOperand(2)); 1233 expandPostRAPseudo(*Copy); 1234 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOT_B64), AMDGPU::EXEC) 1235 .addReg(AMDGPU::EXEC); 1236 MI.eraseFromParent(); 1237 break; 1238 } 1239 case AMDGPU::V_MOVRELD_B32_V1: 1240 case AMDGPU::V_MOVRELD_B32_V2: 1241 case AMDGPU::V_MOVRELD_B32_V4: 1242 case AMDGPU::V_MOVRELD_B32_V8: 1243 case AMDGPU::V_MOVRELD_B32_V16: { 1244 const MCInstrDesc &MovRelDesc = get(AMDGPU::V_MOVRELD_B32_e32); 1245 unsigned VecReg = MI.getOperand(0).getReg(); 1246 bool IsUndef = MI.getOperand(1).isUndef(); 1247 unsigned SubReg = AMDGPU::sub0 + MI.getOperand(3).getImm(); 1248 assert(VecReg == MI.getOperand(1).getReg()); 1249 1250 MachineInstr *MovRel = 1251 BuildMI(MBB, MI, DL, MovRelDesc) 1252 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 1253 .add(MI.getOperand(2)) 1254 .addReg(VecReg, RegState::ImplicitDefine) 1255 .addReg(VecReg, 1256 RegState::Implicit | (IsUndef ? RegState::Undef : 0)); 1257 1258 const int ImpDefIdx = 1259 MovRelDesc.getNumOperands() + MovRelDesc.getNumImplicitUses(); 1260 const int ImpUseIdx = ImpDefIdx + 1; 1261 MovRel->tieOperands(ImpDefIdx, ImpUseIdx); 1262 1263 MI.eraseFromParent(); 1264 break; 1265 } 1266 case AMDGPU::SI_PC_ADD_REL_OFFSET: { 1267 MachineFunction &MF = *MBB.getParent(); 1268 unsigned Reg = MI.getOperand(0).getReg(); 1269 unsigned RegLo = RI.getSubReg(Reg, AMDGPU::sub0); 1270 unsigned RegHi = RI.getSubReg(Reg, AMDGPU::sub1); 1271 1272 // Create a bundle so these instructions won't be re-ordered by the 1273 // post-RA scheduler. 1274 MIBundleBuilder Bundler(MBB, MI); 1275 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg)); 1276 1277 // Add 32-bit offset from this instruction to the start of the 1278 // constant data. 1279 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo) 1280 .addReg(RegLo) 1281 .add(MI.getOperand(1))); 1282 1283 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi) 1284 .addReg(RegHi); 1285 if (MI.getOperand(2).getTargetFlags() == SIInstrInfo::MO_NONE) 1286 MIB.addImm(0); 1287 else 1288 MIB.add(MI.getOperand(2)); 1289 1290 Bundler.append(MIB); 1291 finalizeBundle(MBB, Bundler.begin()); 1292 1293 MI.eraseFromParent(); 1294 break; 1295 } 1296 case AMDGPU::EXIT_WWM: { 1297 // This only gets its own opcode so that SIFixWWMLiveness can tell when WWM 1298 // is exited. 1299 MI.setDesc(get(AMDGPU::S_MOV_B64)); 1300 break; 1301 } 1302 case TargetOpcode::BUNDLE: { 1303 if (!MI.mayLoad()) 1304 return false; 1305 1306 // If it is a load it must be a memory clause 1307 for (MachineBasicBlock::instr_iterator I = MI.getIterator(); 1308 I->isBundledWithSucc(); ++I) { 1309 I->unbundleFromSucc(); 1310 for (MachineOperand &MO : I->operands()) 1311 if (MO.isReg()) 1312 MO.setIsInternalRead(false); 1313 } 1314 1315 MI.eraseFromParent(); 1316 break; 1317 } 1318 } 1319 return true; 1320 } 1321 1322 bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI, 1323 MachineOperand &Src0, 1324 unsigned Src0OpName, 1325 MachineOperand &Src1, 1326 unsigned Src1OpName) const { 1327 MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName); 1328 if (!Src0Mods) 1329 return false; 1330 1331 MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName); 1332 assert(Src1Mods && 1333 "All commutable instructions have both src0 and src1 modifiers"); 1334 1335 int Src0ModsVal = Src0Mods->getImm(); 1336 int Src1ModsVal = Src1Mods->getImm(); 1337 1338 Src1Mods->setImm(Src0ModsVal); 1339 Src0Mods->setImm(Src1ModsVal); 1340 return true; 1341 } 1342 1343 static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI, 1344 MachineOperand &RegOp, 1345 MachineOperand &NonRegOp) { 1346 unsigned Reg = RegOp.getReg(); 1347 unsigned SubReg = RegOp.getSubReg(); 1348 bool IsKill = RegOp.isKill(); 1349 bool IsDead = RegOp.isDead(); 1350 bool IsUndef = RegOp.isUndef(); 1351 bool IsDebug = RegOp.isDebug(); 1352 1353 if (NonRegOp.isImm()) 1354 RegOp.ChangeToImmediate(NonRegOp.getImm()); 1355 else if (NonRegOp.isFI()) 1356 RegOp.ChangeToFrameIndex(NonRegOp.getIndex()); 1357 else 1358 return nullptr; 1359 1360 NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug); 1361 NonRegOp.setSubReg(SubReg); 1362 1363 return &MI; 1364 } 1365 1366 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, 1367 unsigned Src0Idx, 1368 unsigned Src1Idx) const { 1369 assert(!NewMI && "this should never be used"); 1370 1371 unsigned Opc = MI.getOpcode(); 1372 int CommutedOpcode = commuteOpcode(Opc); 1373 if (CommutedOpcode == -1) 1374 return nullptr; 1375 1376 assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == 1377 static_cast<int>(Src0Idx) && 1378 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == 1379 static_cast<int>(Src1Idx) && 1380 "inconsistency with findCommutedOpIndices"); 1381 1382 MachineOperand &Src0 = MI.getOperand(Src0Idx); 1383 MachineOperand &Src1 = MI.getOperand(Src1Idx); 1384 1385 MachineInstr *CommutedMI = nullptr; 1386 if (Src0.isReg() && Src1.isReg()) { 1387 if (isOperandLegal(MI, Src1Idx, &Src0)) { 1388 // Be sure to copy the source modifiers to the right place. 1389 CommutedMI 1390 = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx); 1391 } 1392 1393 } else if (Src0.isReg() && !Src1.isReg()) { 1394 // src0 should always be able to support any operand type, so no need to 1395 // check operand legality. 1396 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1); 1397 } else if (!Src0.isReg() && Src1.isReg()) { 1398 if (isOperandLegal(MI, Src1Idx, &Src0)) 1399 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0); 1400 } else { 1401 // FIXME: Found two non registers to commute. This does happen. 1402 return nullptr; 1403 } 1404 1405 if (CommutedMI) { 1406 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers, 1407 Src1, AMDGPU::OpName::src1_modifiers); 1408 1409 CommutedMI->setDesc(get(CommutedOpcode)); 1410 } 1411 1412 return CommutedMI; 1413 } 1414 1415 // This needs to be implemented because the source modifiers may be inserted 1416 // between the true commutable operands, and the base 1417 // TargetInstrInfo::commuteInstruction uses it. 1418 bool SIInstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx0, 1419 unsigned &SrcOpIdx1) const { 1420 return findCommutedOpIndices(MI.getDesc(), SrcOpIdx0, SrcOpIdx1); 1421 } 1422 1423 bool SIInstrInfo::findCommutedOpIndices(MCInstrDesc Desc, unsigned &SrcOpIdx0, 1424 unsigned &SrcOpIdx1) const { 1425 if (!Desc.isCommutable()) 1426 return false; 1427 1428 unsigned Opc = Desc.getOpcode(); 1429 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 1430 if (Src0Idx == -1) 1431 return false; 1432 1433 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 1434 if (Src1Idx == -1) 1435 return false; 1436 1437 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx); 1438 } 1439 1440 bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp, 1441 int64_t BrOffset) const { 1442 // BranchRelaxation should never have to check s_setpc_b64 because its dest 1443 // block is unanalyzable. 1444 assert(BranchOp != AMDGPU::S_SETPC_B64); 1445 1446 // Convert to dwords. 1447 BrOffset /= 4; 1448 1449 // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is 1450 // from the next instruction. 1451 BrOffset -= 1; 1452 1453 return isIntN(BranchOffsetBits, BrOffset); 1454 } 1455 1456 MachineBasicBlock *SIInstrInfo::getBranchDestBlock( 1457 const MachineInstr &MI) const { 1458 if (MI.getOpcode() == AMDGPU::S_SETPC_B64) { 1459 // This would be a difficult analysis to perform, but can always be legal so 1460 // there's no need to analyze it. 1461 return nullptr; 1462 } 1463 1464 return MI.getOperand(0).getMBB(); 1465 } 1466 1467 unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB, 1468 MachineBasicBlock &DestBB, 1469 const DebugLoc &DL, 1470 int64_t BrOffset, 1471 RegScavenger *RS) const { 1472 assert(RS && "RegScavenger required for long branching"); 1473 assert(MBB.empty() && 1474 "new block should be inserted for expanding unconditional branch"); 1475 assert(MBB.pred_size() == 1); 1476 1477 MachineFunction *MF = MBB.getParent(); 1478 MachineRegisterInfo &MRI = MF->getRegInfo(); 1479 1480 // FIXME: Virtual register workaround for RegScavenger not working with empty 1481 // blocks. 1482 unsigned PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1483 1484 auto I = MBB.end(); 1485 1486 // We need to compute the offset relative to the instruction immediately after 1487 // s_getpc_b64. Insert pc arithmetic code before last terminator. 1488 MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg); 1489 1490 // TODO: Handle > 32-bit block address. 1491 if (BrOffset >= 0) { 1492 BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32)) 1493 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 1494 .addReg(PCReg, 0, AMDGPU::sub0) 1495 .addMBB(&DestBB, AMDGPU::TF_LONG_BRANCH_FORWARD); 1496 BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32)) 1497 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 1498 .addReg(PCReg, 0, AMDGPU::sub1) 1499 .addImm(0); 1500 } else { 1501 // Backwards branch. 1502 BuildMI(MBB, I, DL, get(AMDGPU::S_SUB_U32)) 1503 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 1504 .addReg(PCReg, 0, AMDGPU::sub0) 1505 .addMBB(&DestBB, AMDGPU::TF_LONG_BRANCH_BACKWARD); 1506 BuildMI(MBB, I, DL, get(AMDGPU::S_SUBB_U32)) 1507 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 1508 .addReg(PCReg, 0, AMDGPU::sub1) 1509 .addImm(0); 1510 } 1511 1512 // Insert the indirect branch after the other terminator. 1513 BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64)) 1514 .addReg(PCReg); 1515 1516 // FIXME: If spilling is necessary, this will fail because this scavenger has 1517 // no emergency stack slots. It is non-trivial to spill in this situation, 1518 // because the restore code needs to be specially placed after the 1519 // jump. BranchRelaxation then needs to be made aware of the newly inserted 1520 // block. 1521 // 1522 // If a spill is needed for the pc register pair, we need to insert a spill 1523 // restore block right before the destination block, and insert a short branch 1524 // into the old destination block's fallthrough predecessor. 1525 // e.g.: 1526 // 1527 // s_cbranch_scc0 skip_long_branch: 1528 // 1529 // long_branch_bb: 1530 // spill s[8:9] 1531 // s_getpc_b64 s[8:9] 1532 // s_add_u32 s8, s8, restore_bb 1533 // s_addc_u32 s9, s9, 0 1534 // s_setpc_b64 s[8:9] 1535 // 1536 // skip_long_branch: 1537 // foo; 1538 // 1539 // ..... 1540 // 1541 // dest_bb_fallthrough_predecessor: 1542 // bar; 1543 // s_branch dest_bb 1544 // 1545 // restore_bb: 1546 // restore s[8:9] 1547 // fallthrough dest_bb 1548 /// 1549 // dest_bb: 1550 // buzz; 1551 1552 RS->enterBasicBlockEnd(MBB); 1553 unsigned Scav = RS->scavengeRegisterBackwards( 1554 AMDGPU::SReg_64RegClass, 1555 MachineBasicBlock::iterator(GetPC), false, 0); 1556 MRI.replaceRegWith(PCReg, Scav); 1557 MRI.clearVirtRegs(); 1558 RS->setRegUsed(Scav); 1559 1560 return 4 + 8 + 4 + 4; 1561 } 1562 1563 unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) { 1564 switch (Cond) { 1565 case SIInstrInfo::SCC_TRUE: 1566 return AMDGPU::S_CBRANCH_SCC1; 1567 case SIInstrInfo::SCC_FALSE: 1568 return AMDGPU::S_CBRANCH_SCC0; 1569 case SIInstrInfo::VCCNZ: 1570 return AMDGPU::S_CBRANCH_VCCNZ; 1571 case SIInstrInfo::VCCZ: 1572 return AMDGPU::S_CBRANCH_VCCZ; 1573 case SIInstrInfo::EXECNZ: 1574 return AMDGPU::S_CBRANCH_EXECNZ; 1575 case SIInstrInfo::EXECZ: 1576 return AMDGPU::S_CBRANCH_EXECZ; 1577 default: 1578 llvm_unreachable("invalid branch predicate"); 1579 } 1580 } 1581 1582 SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) { 1583 switch (Opcode) { 1584 case AMDGPU::S_CBRANCH_SCC0: 1585 return SCC_FALSE; 1586 case AMDGPU::S_CBRANCH_SCC1: 1587 return SCC_TRUE; 1588 case AMDGPU::S_CBRANCH_VCCNZ: 1589 return VCCNZ; 1590 case AMDGPU::S_CBRANCH_VCCZ: 1591 return VCCZ; 1592 case AMDGPU::S_CBRANCH_EXECNZ: 1593 return EXECNZ; 1594 case AMDGPU::S_CBRANCH_EXECZ: 1595 return EXECZ; 1596 default: 1597 return INVALID_BR; 1598 } 1599 } 1600 1601 bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB, 1602 MachineBasicBlock::iterator I, 1603 MachineBasicBlock *&TBB, 1604 MachineBasicBlock *&FBB, 1605 SmallVectorImpl<MachineOperand> &Cond, 1606 bool AllowModify) const { 1607 if (I->getOpcode() == AMDGPU::S_BRANCH) { 1608 // Unconditional Branch 1609 TBB = I->getOperand(0).getMBB(); 1610 return false; 1611 } 1612 1613 MachineBasicBlock *CondBB = nullptr; 1614 1615 if (I->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 1616 CondBB = I->getOperand(1).getMBB(); 1617 Cond.push_back(I->getOperand(0)); 1618 } else { 1619 BranchPredicate Pred = getBranchPredicate(I->getOpcode()); 1620 if (Pred == INVALID_BR) 1621 return true; 1622 1623 CondBB = I->getOperand(0).getMBB(); 1624 Cond.push_back(MachineOperand::CreateImm(Pred)); 1625 Cond.push_back(I->getOperand(1)); // Save the branch register. 1626 } 1627 ++I; 1628 1629 if (I == MBB.end()) { 1630 // Conditional branch followed by fall-through. 1631 TBB = CondBB; 1632 return false; 1633 } 1634 1635 if (I->getOpcode() == AMDGPU::S_BRANCH) { 1636 TBB = CondBB; 1637 FBB = I->getOperand(0).getMBB(); 1638 return false; 1639 } 1640 1641 return true; 1642 } 1643 1644 bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 1645 MachineBasicBlock *&FBB, 1646 SmallVectorImpl<MachineOperand> &Cond, 1647 bool AllowModify) const { 1648 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 1649 auto E = MBB.end(); 1650 if (I == E) 1651 return false; 1652 1653 // Skip over the instructions that are artificially terminators for special 1654 // exec management. 1655 while (I != E && !I->isBranch() && !I->isReturn() && 1656 I->getOpcode() != AMDGPU::SI_MASK_BRANCH) { 1657 switch (I->getOpcode()) { 1658 case AMDGPU::SI_MASK_BRANCH: 1659 case AMDGPU::S_MOV_B64_term: 1660 case AMDGPU::S_XOR_B64_term: 1661 case AMDGPU::S_ANDN2_B64_term: 1662 break; 1663 case AMDGPU::SI_IF: 1664 case AMDGPU::SI_ELSE: 1665 case AMDGPU::SI_KILL_I1_TERMINATOR: 1666 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 1667 // FIXME: It's messy that these need to be considered here at all. 1668 return true; 1669 default: 1670 llvm_unreachable("unexpected non-branch terminator inst"); 1671 } 1672 1673 ++I; 1674 } 1675 1676 if (I == E) 1677 return false; 1678 1679 if (I->getOpcode() != AMDGPU::SI_MASK_BRANCH) 1680 return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify); 1681 1682 ++I; 1683 1684 // TODO: Should be able to treat as fallthrough? 1685 if (I == MBB.end()) 1686 return true; 1687 1688 if (analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify)) 1689 return true; 1690 1691 MachineBasicBlock *MaskBrDest = I->getOperand(0).getMBB(); 1692 1693 // Specifically handle the case where the conditional branch is to the same 1694 // destination as the mask branch. e.g. 1695 // 1696 // si_mask_branch BB8 1697 // s_cbranch_execz BB8 1698 // s_cbranch BB9 1699 // 1700 // This is required to understand divergent loops which may need the branches 1701 // to be relaxed. 1702 if (TBB != MaskBrDest || Cond.empty()) 1703 return true; 1704 1705 auto Pred = Cond[0].getImm(); 1706 return (Pred != EXECZ && Pred != EXECNZ); 1707 } 1708 1709 unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB, 1710 int *BytesRemoved) const { 1711 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 1712 1713 unsigned Count = 0; 1714 unsigned RemovedSize = 0; 1715 while (I != MBB.end()) { 1716 MachineBasicBlock::iterator Next = std::next(I); 1717 if (I->getOpcode() == AMDGPU::SI_MASK_BRANCH) { 1718 I = Next; 1719 continue; 1720 } 1721 1722 RemovedSize += getInstSizeInBytes(*I); 1723 I->eraseFromParent(); 1724 ++Count; 1725 I = Next; 1726 } 1727 1728 if (BytesRemoved) 1729 *BytesRemoved = RemovedSize; 1730 1731 return Count; 1732 } 1733 1734 // Copy the flags onto the implicit condition register operand. 1735 static void preserveCondRegFlags(MachineOperand &CondReg, 1736 const MachineOperand &OrigCond) { 1737 CondReg.setIsUndef(OrigCond.isUndef()); 1738 CondReg.setIsKill(OrigCond.isKill()); 1739 } 1740 1741 unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB, 1742 MachineBasicBlock *TBB, 1743 MachineBasicBlock *FBB, 1744 ArrayRef<MachineOperand> Cond, 1745 const DebugLoc &DL, 1746 int *BytesAdded) const { 1747 if (!FBB && Cond.empty()) { 1748 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 1749 .addMBB(TBB); 1750 if (BytesAdded) 1751 *BytesAdded = 4; 1752 return 1; 1753 } 1754 1755 if(Cond.size() == 1 && Cond[0].isReg()) { 1756 BuildMI(&MBB, DL, get(AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO)) 1757 .add(Cond[0]) 1758 .addMBB(TBB); 1759 return 1; 1760 } 1761 1762 assert(TBB && Cond[0].isImm()); 1763 1764 unsigned Opcode 1765 = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm())); 1766 1767 if (!FBB) { 1768 Cond[1].isUndef(); 1769 MachineInstr *CondBr = 1770 BuildMI(&MBB, DL, get(Opcode)) 1771 .addMBB(TBB); 1772 1773 // Copy the flags onto the implicit condition register operand. 1774 preserveCondRegFlags(CondBr->getOperand(1), Cond[1]); 1775 1776 if (BytesAdded) 1777 *BytesAdded = 4; 1778 return 1; 1779 } 1780 1781 assert(TBB && FBB); 1782 1783 MachineInstr *CondBr = 1784 BuildMI(&MBB, DL, get(Opcode)) 1785 .addMBB(TBB); 1786 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 1787 .addMBB(FBB); 1788 1789 MachineOperand &CondReg = CondBr->getOperand(1); 1790 CondReg.setIsUndef(Cond[1].isUndef()); 1791 CondReg.setIsKill(Cond[1].isKill()); 1792 1793 if (BytesAdded) 1794 *BytesAdded = 8; 1795 1796 return 2; 1797 } 1798 1799 bool SIInstrInfo::reverseBranchCondition( 1800 SmallVectorImpl<MachineOperand> &Cond) const { 1801 if (Cond.size() != 2) { 1802 return true; 1803 } 1804 1805 if (Cond[0].isImm()) { 1806 Cond[0].setImm(-Cond[0].getImm()); 1807 return false; 1808 } 1809 1810 return true; 1811 } 1812 1813 bool SIInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, 1814 ArrayRef<MachineOperand> Cond, 1815 unsigned TrueReg, unsigned FalseReg, 1816 int &CondCycles, 1817 int &TrueCycles, int &FalseCycles) const { 1818 switch (Cond[0].getImm()) { 1819 case VCCNZ: 1820 case VCCZ: { 1821 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 1822 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 1823 assert(MRI.getRegClass(FalseReg) == RC); 1824 1825 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 1826 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 1827 1828 // Limit to equal cost for branch vs. N v_cndmask_b32s. 1829 return !RI.isSGPRClass(RC) && NumInsts <= 6; 1830 } 1831 case SCC_TRUE: 1832 case SCC_FALSE: { 1833 // FIXME: We could insert for VGPRs if we could replace the original compare 1834 // with a vector one. 1835 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 1836 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 1837 assert(MRI.getRegClass(FalseReg) == RC); 1838 1839 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 1840 1841 // Multiples of 8 can do s_cselect_b64 1842 if (NumInsts % 2 == 0) 1843 NumInsts /= 2; 1844 1845 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 1846 return RI.isSGPRClass(RC); 1847 } 1848 default: 1849 return false; 1850 } 1851 } 1852 1853 void SIInstrInfo::insertSelect(MachineBasicBlock &MBB, 1854 MachineBasicBlock::iterator I, const DebugLoc &DL, 1855 unsigned DstReg, ArrayRef<MachineOperand> Cond, 1856 unsigned TrueReg, unsigned FalseReg) const { 1857 BranchPredicate Pred = static_cast<BranchPredicate>(Cond[0].getImm()); 1858 if (Pred == VCCZ || Pred == SCC_FALSE) { 1859 Pred = static_cast<BranchPredicate>(-Pred); 1860 std::swap(TrueReg, FalseReg); 1861 } 1862 1863 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 1864 const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg); 1865 unsigned DstSize = RI.getRegSizeInBits(*DstRC); 1866 1867 if (DstSize == 32) { 1868 unsigned SelOp = Pred == SCC_TRUE ? 1869 AMDGPU::S_CSELECT_B32 : AMDGPU::V_CNDMASK_B32_e32; 1870 1871 // Instruction's operands are backwards from what is expected. 1872 MachineInstr *Select = 1873 BuildMI(MBB, I, DL, get(SelOp), DstReg) 1874 .addReg(FalseReg) 1875 .addReg(TrueReg); 1876 1877 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 1878 return; 1879 } 1880 1881 if (DstSize == 64 && Pred == SCC_TRUE) { 1882 MachineInstr *Select = 1883 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg) 1884 .addReg(FalseReg) 1885 .addReg(TrueReg); 1886 1887 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 1888 return; 1889 } 1890 1891 static const int16_t Sub0_15[] = { 1892 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 1893 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 1894 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, 1895 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 1896 }; 1897 1898 static const int16_t Sub0_15_64[] = { 1899 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, 1900 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7, 1901 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11, 1902 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15, 1903 }; 1904 1905 unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32; 1906 const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass; 1907 const int16_t *SubIndices = Sub0_15; 1908 int NElts = DstSize / 32; 1909 1910 // 64-bit select is only avaialble for SALU. 1911 if (Pred == SCC_TRUE) { 1912 SelOp = AMDGPU::S_CSELECT_B64; 1913 EltRC = &AMDGPU::SGPR_64RegClass; 1914 SubIndices = Sub0_15_64; 1915 1916 assert(NElts % 2 == 0); 1917 NElts /= 2; 1918 } 1919 1920 MachineInstrBuilder MIB = BuildMI( 1921 MBB, I, DL, get(AMDGPU::REG_SEQUENCE), DstReg); 1922 1923 I = MIB->getIterator(); 1924 1925 SmallVector<unsigned, 8> Regs; 1926 for (int Idx = 0; Idx != NElts; ++Idx) { 1927 unsigned DstElt = MRI.createVirtualRegister(EltRC); 1928 Regs.push_back(DstElt); 1929 1930 unsigned SubIdx = SubIndices[Idx]; 1931 1932 MachineInstr *Select = 1933 BuildMI(MBB, I, DL, get(SelOp), DstElt) 1934 .addReg(FalseReg, 0, SubIdx) 1935 .addReg(TrueReg, 0, SubIdx); 1936 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 1937 1938 MIB.addReg(DstElt) 1939 .addImm(SubIdx); 1940 } 1941 } 1942 1943 bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) const { 1944 switch (MI.getOpcode()) { 1945 case AMDGPU::V_MOV_B32_e32: 1946 case AMDGPU::V_MOV_B32_e64: 1947 case AMDGPU::V_MOV_B64_PSEUDO: { 1948 // If there are additional implicit register operands, this may be used for 1949 // register indexing so the source register operand isn't simply copied. 1950 unsigned NumOps = MI.getDesc().getNumOperands() + 1951 MI.getDesc().getNumImplicitUses(); 1952 1953 return MI.getNumOperands() == NumOps; 1954 } 1955 case AMDGPU::S_MOV_B32: 1956 case AMDGPU::S_MOV_B64: 1957 case AMDGPU::COPY: 1958 return true; 1959 default: 1960 return false; 1961 } 1962 } 1963 1964 unsigned SIInstrInfo::getAddressSpaceForPseudoSourceKind( 1965 unsigned Kind) const { 1966 switch(Kind) { 1967 case PseudoSourceValue::Stack: 1968 case PseudoSourceValue::FixedStack: 1969 return AMDGPUAS::PRIVATE_ADDRESS; 1970 case PseudoSourceValue::ConstantPool: 1971 case PseudoSourceValue::GOT: 1972 case PseudoSourceValue::JumpTable: 1973 case PseudoSourceValue::GlobalValueCallEntry: 1974 case PseudoSourceValue::ExternalSymbolCallEntry: 1975 case PseudoSourceValue::TargetCustom: 1976 return AMDGPUAS::CONSTANT_ADDRESS; 1977 } 1978 return AMDGPUAS::FLAT_ADDRESS; 1979 } 1980 1981 static void removeModOperands(MachineInstr &MI) { 1982 unsigned Opc = MI.getOpcode(); 1983 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, 1984 AMDGPU::OpName::src0_modifiers); 1985 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, 1986 AMDGPU::OpName::src1_modifiers); 1987 int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc, 1988 AMDGPU::OpName::src2_modifiers); 1989 1990 MI.RemoveOperand(Src2ModIdx); 1991 MI.RemoveOperand(Src1ModIdx); 1992 MI.RemoveOperand(Src0ModIdx); 1993 } 1994 1995 bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, 1996 unsigned Reg, MachineRegisterInfo *MRI) const { 1997 if (!MRI->hasOneNonDBGUse(Reg)) 1998 return false; 1999 2000 switch (DefMI.getOpcode()) { 2001 default: 2002 return false; 2003 case AMDGPU::S_MOV_B64: 2004 // TODO: We could fold 64-bit immediates, but this get compilicated 2005 // when there are sub-registers. 2006 return false; 2007 2008 case AMDGPU::V_MOV_B32_e32: 2009 case AMDGPU::S_MOV_B32: 2010 break; 2011 } 2012 2013 const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0); 2014 assert(ImmOp); 2015 // FIXME: We could handle FrameIndex values here. 2016 if (!ImmOp->isImm()) 2017 return false; 2018 2019 unsigned Opc = UseMI.getOpcode(); 2020 if (Opc == AMDGPU::COPY) { 2021 bool isVGPRCopy = RI.isVGPR(*MRI, UseMI.getOperand(0).getReg()); 2022 unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32; 2023 UseMI.setDesc(get(NewOpc)); 2024 UseMI.getOperand(1).ChangeToImmediate(ImmOp->getImm()); 2025 UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent()); 2026 return true; 2027 } 2028 2029 if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 || 2030 Opc == AMDGPU::V_MAD_F16 || Opc == AMDGPU::V_MAC_F16_e64) { 2031 // Don't fold if we are using source or output modifiers. The new VOP2 2032 // instructions don't have them. 2033 if (hasAnyModifiersSet(UseMI)) 2034 return false; 2035 2036 // If this is a free constant, there's no reason to do this. 2037 // TODO: We could fold this here instead of letting SIFoldOperands do it 2038 // later. 2039 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0); 2040 2041 // Any src operand can be used for the legality check. 2042 if (isInlineConstant(UseMI, *Src0, *ImmOp)) 2043 return false; 2044 2045 bool IsF32 = Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64; 2046 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1); 2047 MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2); 2048 2049 // Multiplied part is the constant: Use v_madmk_{f16, f32}. 2050 // We should only expect these to be on src0 due to canonicalizations. 2051 if (Src0->isReg() && Src0->getReg() == Reg) { 2052 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) 2053 return false; 2054 2055 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg()))) 2056 return false; 2057 2058 // We need to swap operands 0 and 1 since madmk constant is at operand 1. 2059 2060 const int64_t Imm = ImmOp->getImm(); 2061 2062 // FIXME: This would be a lot easier if we could return a new instruction 2063 // instead of having to modify in place. 2064 2065 // Remove these first since they are at the end. 2066 UseMI.RemoveOperand( 2067 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 2068 UseMI.RemoveOperand( 2069 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 2070 2071 unsigned Src1Reg = Src1->getReg(); 2072 unsigned Src1SubReg = Src1->getSubReg(); 2073 Src0->setReg(Src1Reg); 2074 Src0->setSubReg(Src1SubReg); 2075 Src0->setIsKill(Src1->isKill()); 2076 2077 if (Opc == AMDGPU::V_MAC_F32_e64 || 2078 Opc == AMDGPU::V_MAC_F16_e64) 2079 UseMI.untieRegOperand( 2080 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 2081 2082 Src1->ChangeToImmediate(Imm); 2083 2084 removeModOperands(UseMI); 2085 UseMI.setDesc(get(IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16)); 2086 2087 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 2088 if (DeleteDef) 2089 DefMI.eraseFromParent(); 2090 2091 return true; 2092 } 2093 2094 // Added part is the constant: Use v_madak_{f16, f32}. 2095 if (Src2->isReg() && Src2->getReg() == Reg) { 2096 // Not allowed to use constant bus for another operand. 2097 // We can however allow an inline immediate as src0. 2098 bool Src0Inlined = false; 2099 if (Src0->isReg()) { 2100 // Try to inline constant if possible. 2101 // If the Def moves immediate and the use is single 2102 // We are saving VGPR here. 2103 MachineInstr *Def = MRI->getUniqueVRegDef(Src0->getReg()); 2104 if (Def && Def->isMoveImmediate() && 2105 isInlineConstant(Def->getOperand(1)) && 2106 MRI->hasOneUse(Src0->getReg())) { 2107 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 2108 Src0Inlined = true; 2109 } else if ((RI.isPhysicalRegister(Src0->getReg()) && 2110 RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg()))) || 2111 (RI.isVirtualRegister(Src0->getReg()) && 2112 RI.isSGPRClass(MRI->getRegClass(Src0->getReg())))) 2113 return false; 2114 // VGPR is okay as Src0 - fallthrough 2115 } 2116 2117 if (Src1->isReg() && !Src0Inlined ) { 2118 // We have one slot for inlinable constant so far - try to fill it 2119 MachineInstr *Def = MRI->getUniqueVRegDef(Src1->getReg()); 2120 if (Def && Def->isMoveImmediate() && 2121 isInlineConstant(Def->getOperand(1)) && 2122 MRI->hasOneUse(Src1->getReg()) && 2123 commuteInstruction(UseMI)) { 2124 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 2125 } else if ((RI.isPhysicalRegister(Src1->getReg()) && 2126 RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) || 2127 (RI.isVirtualRegister(Src1->getReg()) && 2128 RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) 2129 return false; 2130 // VGPR is okay as Src1 - fallthrough 2131 } 2132 2133 const int64_t Imm = ImmOp->getImm(); 2134 2135 // FIXME: This would be a lot easier if we could return a new instruction 2136 // instead of having to modify in place. 2137 2138 // Remove these first since they are at the end. 2139 UseMI.RemoveOperand( 2140 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 2141 UseMI.RemoveOperand( 2142 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 2143 2144 if (Opc == AMDGPU::V_MAC_F32_e64 || 2145 Opc == AMDGPU::V_MAC_F16_e64) 2146 UseMI.untieRegOperand( 2147 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 2148 2149 // ChangingToImmediate adds Src2 back to the instruction. 2150 Src2->ChangeToImmediate(Imm); 2151 2152 // These come before src2. 2153 removeModOperands(UseMI); 2154 UseMI.setDesc(get(IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16)); 2155 2156 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 2157 if (DeleteDef) 2158 DefMI.eraseFromParent(); 2159 2160 return true; 2161 } 2162 } 2163 2164 return false; 2165 } 2166 2167 static bool offsetsDoNotOverlap(int WidthA, int OffsetA, 2168 int WidthB, int OffsetB) { 2169 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; 2170 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; 2171 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; 2172 return LowOffset + LowWidth <= HighOffset; 2173 } 2174 2175 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr &MIa, 2176 MachineInstr &MIb) const { 2177 MachineOperand *BaseOp0, *BaseOp1; 2178 int64_t Offset0, Offset1; 2179 2180 if (getMemOperandWithOffset(MIa, BaseOp0, Offset0, &RI) && 2181 getMemOperandWithOffset(MIb, BaseOp1, Offset1, &RI)) { 2182 if (!BaseOp0->isIdenticalTo(*BaseOp1)) 2183 return false; 2184 2185 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) { 2186 // FIXME: Handle ds_read2 / ds_write2. 2187 return false; 2188 } 2189 unsigned Width0 = (*MIa.memoperands_begin())->getSize(); 2190 unsigned Width1 = (*MIb.memoperands_begin())->getSize(); 2191 if (offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1)) { 2192 return true; 2193 } 2194 } 2195 2196 return false; 2197 } 2198 2199 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr &MIa, 2200 MachineInstr &MIb, 2201 AliasAnalysis *AA) const { 2202 assert((MIa.mayLoad() || MIa.mayStore()) && 2203 "MIa must load from or modify a memory location"); 2204 assert((MIb.mayLoad() || MIb.mayStore()) && 2205 "MIb must load from or modify a memory location"); 2206 2207 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects()) 2208 return false; 2209 2210 // XXX - Can we relax this between address spaces? 2211 if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef()) 2212 return false; 2213 2214 if (AA && MIa.hasOneMemOperand() && MIb.hasOneMemOperand()) { 2215 const MachineMemOperand *MMOa = *MIa.memoperands_begin(); 2216 const MachineMemOperand *MMOb = *MIb.memoperands_begin(); 2217 if (MMOa->getValue() && MMOb->getValue()) { 2218 MemoryLocation LocA(MMOa->getValue(), MMOa->getSize(), MMOa->getAAInfo()); 2219 MemoryLocation LocB(MMOb->getValue(), MMOb->getSize(), MMOb->getAAInfo()); 2220 if (!AA->alias(LocA, LocB)) 2221 return true; 2222 } 2223 } 2224 2225 // TODO: Should we check the address space from the MachineMemOperand? That 2226 // would allow us to distinguish objects we know don't alias based on the 2227 // underlying address space, even if it was lowered to a different one, 2228 // e.g. private accesses lowered to use MUBUF instructions on a scratch 2229 // buffer. 2230 if (isDS(MIa)) { 2231 if (isDS(MIb)) 2232 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2233 2234 return !isFLAT(MIb) || isSegmentSpecificFLAT(MIb); 2235 } 2236 2237 if (isMUBUF(MIa) || isMTBUF(MIa)) { 2238 if (isMUBUF(MIb) || isMTBUF(MIb)) 2239 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2240 2241 return !isFLAT(MIb) && !isSMRD(MIb); 2242 } 2243 2244 if (isSMRD(MIa)) { 2245 if (isSMRD(MIb)) 2246 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2247 2248 return !isFLAT(MIb) && !isMUBUF(MIa) && !isMTBUF(MIa); 2249 } 2250 2251 if (isFLAT(MIa)) { 2252 if (isFLAT(MIb)) 2253 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2254 2255 return false; 2256 } 2257 2258 return false; 2259 } 2260 2261 static int64_t getFoldableImm(const MachineOperand* MO) { 2262 if (!MO->isReg()) 2263 return false; 2264 const MachineFunction *MF = MO->getParent()->getParent()->getParent(); 2265 const MachineRegisterInfo &MRI = MF->getRegInfo(); 2266 auto Def = MRI.getUniqueVRegDef(MO->getReg()); 2267 if (Def && Def->getOpcode() == AMDGPU::V_MOV_B32_e32 && 2268 Def->getOperand(1).isImm()) 2269 return Def->getOperand(1).getImm(); 2270 return AMDGPU::NoRegister; 2271 } 2272 2273 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB, 2274 MachineInstr &MI, 2275 LiveVariables *LV) const { 2276 unsigned Opc = MI.getOpcode(); 2277 bool IsF16 = false; 2278 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e32 || Opc == AMDGPU::V_FMAC_F32_e64; 2279 2280 switch (Opc) { 2281 default: 2282 return nullptr; 2283 case AMDGPU::V_MAC_F16_e64: 2284 IsF16 = true; 2285 LLVM_FALLTHROUGH; 2286 case AMDGPU::V_MAC_F32_e64: 2287 case AMDGPU::V_FMAC_F32_e64: 2288 break; 2289 case AMDGPU::V_MAC_F16_e32: 2290 IsF16 = true; 2291 LLVM_FALLTHROUGH; 2292 case AMDGPU::V_MAC_F32_e32: 2293 case AMDGPU::V_FMAC_F32_e32: { 2294 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 2295 AMDGPU::OpName::src0); 2296 const MachineOperand *Src0 = &MI.getOperand(Src0Idx); 2297 if (!Src0->isReg() && !Src0->isImm()) 2298 return nullptr; 2299 2300 if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0)) 2301 return nullptr; 2302 2303 break; 2304 } 2305 } 2306 2307 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 2308 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0); 2309 const MachineOperand *Src0Mods = 2310 getNamedOperand(MI, AMDGPU::OpName::src0_modifiers); 2311 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 2312 const MachineOperand *Src1Mods = 2313 getNamedOperand(MI, AMDGPU::OpName::src1_modifiers); 2314 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 2315 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 2316 const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod); 2317 2318 if (!IsFMA && !Src0Mods && !Src1Mods && !Clamp && !Omod && 2319 // If we have an SGPR input, we will violate the constant bus restriction. 2320 (!Src0->isReg() || !RI.isSGPRReg(MBB->getParent()->getRegInfo(), Src0->getReg()))) { 2321 if (auto Imm = getFoldableImm(Src2)) { 2322 return BuildMI(*MBB, MI, MI.getDebugLoc(), 2323 get(IsF16 ? AMDGPU::V_MADAK_F16 : AMDGPU::V_MADAK_F32)) 2324 .add(*Dst) 2325 .add(*Src0) 2326 .add(*Src1) 2327 .addImm(Imm); 2328 } 2329 if (auto Imm = getFoldableImm(Src1)) { 2330 return BuildMI(*MBB, MI, MI.getDebugLoc(), 2331 get(IsF16 ? AMDGPU::V_MADMK_F16 : AMDGPU::V_MADMK_F32)) 2332 .add(*Dst) 2333 .add(*Src0) 2334 .addImm(Imm) 2335 .add(*Src2); 2336 } 2337 if (auto Imm = getFoldableImm(Src0)) { 2338 if (isOperandLegal(MI, AMDGPU::getNamedOperandIdx(AMDGPU::V_MADMK_F32, 2339 AMDGPU::OpName::src0), Src1)) 2340 return BuildMI(*MBB, MI, MI.getDebugLoc(), 2341 get(IsF16 ? AMDGPU::V_MADMK_F16 : AMDGPU::V_MADMK_F32)) 2342 .add(*Dst) 2343 .add(*Src1) 2344 .addImm(Imm) 2345 .add(*Src2); 2346 } 2347 } 2348 2349 assert((!IsFMA || !IsF16) && "fmac only expected with f32"); 2350 unsigned NewOpc = IsFMA ? AMDGPU::V_FMA_F32 : 2351 (IsF16 ? AMDGPU::V_MAD_F16 : AMDGPU::V_MAD_F32); 2352 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2353 .add(*Dst) 2354 .addImm(Src0Mods ? Src0Mods->getImm() : 0) 2355 .add(*Src0) 2356 .addImm(Src1Mods ? Src1Mods->getImm() : 0) 2357 .add(*Src1) 2358 .addImm(0) // Src mods 2359 .add(*Src2) 2360 .addImm(Clamp ? Clamp->getImm() : 0) 2361 .addImm(Omod ? Omod->getImm() : 0); 2362 } 2363 2364 // It's not generally safe to move VALU instructions across these since it will 2365 // start using the register as a base index rather than directly. 2366 // XXX - Why isn't hasSideEffects sufficient for these? 2367 static bool changesVGPRIndexingMode(const MachineInstr &MI) { 2368 switch (MI.getOpcode()) { 2369 case AMDGPU::S_SET_GPR_IDX_ON: 2370 case AMDGPU::S_SET_GPR_IDX_MODE: 2371 case AMDGPU::S_SET_GPR_IDX_OFF: 2372 return true; 2373 default: 2374 return false; 2375 } 2376 } 2377 2378 bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 2379 const MachineBasicBlock *MBB, 2380 const MachineFunction &MF) const { 2381 // XXX - Do we want the SP check in the base implementation? 2382 2383 // Target-independent instructions do not have an implicit-use of EXEC, even 2384 // when they operate on VGPRs. Treating EXEC modifications as scheduling 2385 // boundaries prevents incorrect movements of such instructions. 2386 return TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF) || 2387 MI.modifiesRegister(AMDGPU::EXEC, &RI) || 2388 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 || 2389 MI.getOpcode() == AMDGPU::S_SETREG_B32 || 2390 changesVGPRIndexingMode(MI); 2391 } 2392 2393 bool SIInstrInfo::hasUnwantedEffectsWhenEXECEmpty(const MachineInstr &MI) const { 2394 unsigned Opcode = MI.getOpcode(); 2395 2396 if (MI.mayStore() && isSMRD(MI)) 2397 return true; // scalar store or atomic 2398 2399 // These instructions cause shader I/O that may cause hardware lockups 2400 // when executed with an empty EXEC mask. 2401 // 2402 // Note: exp with VM = DONE = 0 is automatically skipped by hardware when 2403 // EXEC = 0, but checking for that case here seems not worth it 2404 // given the typical code patterns. 2405 if (Opcode == AMDGPU::S_SENDMSG || Opcode == AMDGPU::S_SENDMSGHALT || 2406 Opcode == AMDGPU::EXP || Opcode == AMDGPU::EXP_DONE) 2407 return true; 2408 2409 if (MI.isInlineAsm()) 2410 return true; // conservative assumption 2411 2412 // These are like SALU instructions in terms of effects, so it's questionable 2413 // whether we should return true for those. 2414 // 2415 // However, executing them with EXEC = 0 causes them to operate on undefined 2416 // data, which we avoid by returning true here. 2417 if (Opcode == AMDGPU::V_READFIRSTLANE_B32 || Opcode == AMDGPU::V_READLANE_B32) 2418 return true; 2419 2420 return false; 2421 } 2422 2423 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const { 2424 switch (Imm.getBitWidth()) { 2425 case 32: 2426 return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(), 2427 ST.hasInv2PiInlineImm()); 2428 case 64: 2429 return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(), 2430 ST.hasInv2PiInlineImm()); 2431 case 16: 2432 return ST.has16BitInsts() && 2433 AMDGPU::isInlinableLiteral16(Imm.getSExtValue(), 2434 ST.hasInv2PiInlineImm()); 2435 default: 2436 llvm_unreachable("invalid bitwidth"); 2437 } 2438 } 2439 2440 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO, 2441 uint8_t OperandType) const { 2442 if (!MO.isImm() || 2443 OperandType < AMDGPU::OPERAND_SRC_FIRST || 2444 OperandType > AMDGPU::OPERAND_SRC_LAST) 2445 return false; 2446 2447 // MachineOperand provides no way to tell the true operand size, since it only 2448 // records a 64-bit value. We need to know the size to determine if a 32-bit 2449 // floating point immediate bit pattern is legal for an integer immediate. It 2450 // would be for any 32-bit integer operand, but would not be for a 64-bit one. 2451 2452 int64_t Imm = MO.getImm(); 2453 switch (OperandType) { 2454 case AMDGPU::OPERAND_REG_IMM_INT32: 2455 case AMDGPU::OPERAND_REG_IMM_FP32: 2456 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 2457 case AMDGPU::OPERAND_REG_INLINE_C_FP32: { 2458 int32_t Trunc = static_cast<int32_t>(Imm); 2459 return AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm()); 2460 } 2461 case AMDGPU::OPERAND_REG_IMM_INT64: 2462 case AMDGPU::OPERAND_REG_IMM_FP64: 2463 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 2464 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 2465 return AMDGPU::isInlinableLiteral64(MO.getImm(), 2466 ST.hasInv2PiInlineImm()); 2467 case AMDGPU::OPERAND_REG_IMM_INT16: 2468 case AMDGPU::OPERAND_REG_IMM_FP16: 2469 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 2470 case AMDGPU::OPERAND_REG_INLINE_C_FP16: { 2471 if (isInt<16>(Imm) || isUInt<16>(Imm)) { 2472 // A few special case instructions have 16-bit operands on subtargets 2473 // where 16-bit instructions are not legal. 2474 // TODO: Do the 32-bit immediates work? We shouldn't really need to handle 2475 // constants in these cases 2476 int16_t Trunc = static_cast<int16_t>(Imm); 2477 return ST.has16BitInsts() && 2478 AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm()); 2479 } 2480 2481 return false; 2482 } 2483 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 2484 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: { 2485 if (isUInt<16>(Imm)) { 2486 int16_t Trunc = static_cast<int16_t>(Imm); 2487 return ST.has16BitInsts() && 2488 AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm()); 2489 } 2490 if (!(Imm & 0xffff)) { 2491 return ST.has16BitInsts() && 2492 AMDGPU::isInlinableLiteral16(Imm >> 16, ST.hasInv2PiInlineImm()); 2493 } 2494 uint32_t Trunc = static_cast<uint32_t>(Imm); 2495 return AMDGPU::isInlinableLiteralV216(Trunc, ST.hasInv2PiInlineImm()); 2496 } 2497 default: 2498 llvm_unreachable("invalid bitwidth"); 2499 } 2500 } 2501 2502 bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO, 2503 const MCOperandInfo &OpInfo) const { 2504 switch (MO.getType()) { 2505 case MachineOperand::MO_Register: 2506 return false; 2507 case MachineOperand::MO_Immediate: 2508 return !isInlineConstant(MO, OpInfo); 2509 case MachineOperand::MO_FrameIndex: 2510 case MachineOperand::MO_MachineBasicBlock: 2511 case MachineOperand::MO_ExternalSymbol: 2512 case MachineOperand::MO_GlobalAddress: 2513 case MachineOperand::MO_MCSymbol: 2514 return true; 2515 default: 2516 llvm_unreachable("unexpected operand type"); 2517 } 2518 } 2519 2520 static bool compareMachineOp(const MachineOperand &Op0, 2521 const MachineOperand &Op1) { 2522 if (Op0.getType() != Op1.getType()) 2523 return false; 2524 2525 switch (Op0.getType()) { 2526 case MachineOperand::MO_Register: 2527 return Op0.getReg() == Op1.getReg(); 2528 case MachineOperand::MO_Immediate: 2529 return Op0.getImm() == Op1.getImm(); 2530 default: 2531 llvm_unreachable("Didn't expect to be comparing these operand types"); 2532 } 2533 } 2534 2535 bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo, 2536 const MachineOperand &MO) const { 2537 const MCOperandInfo &OpInfo = get(MI.getOpcode()).OpInfo[OpNo]; 2538 2539 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI()); 2540 2541 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE) 2542 return true; 2543 2544 if (OpInfo.RegClass < 0) 2545 return false; 2546 2547 if (MO.isImm() && isInlineConstant(MO, OpInfo)) 2548 return RI.opCanUseInlineConstant(OpInfo.OperandType); 2549 2550 return RI.opCanUseLiteralConstant(OpInfo.OperandType); 2551 } 2552 2553 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const { 2554 int Op32 = AMDGPU::getVOPe32(Opcode); 2555 if (Op32 == -1) 2556 return false; 2557 2558 return pseudoToMCOpcode(Op32) != -1; 2559 } 2560 2561 bool SIInstrInfo::hasModifiers(unsigned Opcode) const { 2562 // The src0_modifier operand is present on all instructions 2563 // that have modifiers. 2564 2565 return AMDGPU::getNamedOperandIdx(Opcode, 2566 AMDGPU::OpName::src0_modifiers) != -1; 2567 } 2568 2569 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI, 2570 unsigned OpName) const { 2571 const MachineOperand *Mods = getNamedOperand(MI, OpName); 2572 return Mods && Mods->getImm(); 2573 } 2574 2575 bool SIInstrInfo::hasAnyModifiersSet(const MachineInstr &MI) const { 2576 return hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || 2577 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || 2578 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers) || 2579 hasModifiersSet(MI, AMDGPU::OpName::clamp) || 2580 hasModifiersSet(MI, AMDGPU::OpName::omod); 2581 } 2582 2583 bool SIInstrInfo::canShrink(const MachineInstr &MI, 2584 const MachineRegisterInfo &MRI) const { 2585 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 2586 // Can't shrink instruction with three operands. 2587 // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add 2588 // a special case for it. It can only be shrunk if the third operand 2589 // is vcc. We should handle this the same way we handle vopc, by addding 2590 // a register allocation hint pre-regalloc and then do the shrinking 2591 // post-regalloc. 2592 if (Src2) { 2593 switch (MI.getOpcode()) { 2594 default: return false; 2595 2596 case AMDGPU::V_ADDC_U32_e64: 2597 case AMDGPU::V_SUBB_U32_e64: 2598 case AMDGPU::V_SUBBREV_U32_e64: { 2599 const MachineOperand *Src1 2600 = getNamedOperand(MI, AMDGPU::OpName::src1); 2601 if (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg())) 2602 return false; 2603 // Additional verification is needed for sdst/src2. 2604 return true; 2605 } 2606 case AMDGPU::V_MAC_F32_e64: 2607 case AMDGPU::V_MAC_F16_e64: 2608 case AMDGPU::V_FMAC_F32_e64: 2609 if (!Src2->isReg() || !RI.isVGPR(MRI, Src2->getReg()) || 2610 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers)) 2611 return false; 2612 break; 2613 2614 case AMDGPU::V_CNDMASK_B32_e64: 2615 break; 2616 } 2617 } 2618 2619 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 2620 if (Src1 && (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()) || 2621 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers))) 2622 return false; 2623 2624 // We don't need to check src0, all input types are legal, so just make sure 2625 // src0 isn't using any modifiers. 2626 if (hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers)) 2627 return false; 2628 2629 // Check output modifiers 2630 return !hasModifiersSet(MI, AMDGPU::OpName::omod) && 2631 !hasModifiersSet(MI, AMDGPU::OpName::clamp); 2632 } 2633 2634 // Set VCC operand with all flags from \p Orig, except for setting it as 2635 // implicit. 2636 static void copyFlagsToImplicitVCC(MachineInstr &MI, 2637 const MachineOperand &Orig) { 2638 2639 for (MachineOperand &Use : MI.implicit_operands()) { 2640 if (Use.isUse() && Use.getReg() == AMDGPU::VCC) { 2641 Use.setIsUndef(Orig.isUndef()); 2642 Use.setIsKill(Orig.isKill()); 2643 return; 2644 } 2645 } 2646 } 2647 2648 MachineInstr *SIInstrInfo::buildShrunkInst(MachineInstr &MI, 2649 unsigned Op32) const { 2650 MachineBasicBlock *MBB = MI.getParent();; 2651 MachineInstrBuilder Inst32 = 2652 BuildMI(*MBB, MI, MI.getDebugLoc(), get(Op32)); 2653 2654 // Add the dst operand if the 32-bit encoding also has an explicit $vdst. 2655 // For VOPC instructions, this is replaced by an implicit def of vcc. 2656 int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst); 2657 if (Op32DstIdx != -1) { 2658 // dst 2659 Inst32.add(MI.getOperand(0)); 2660 } else { 2661 assert(MI.getOperand(0).getReg() == AMDGPU::VCC && 2662 "Unexpected case"); 2663 } 2664 2665 Inst32.add(*getNamedOperand(MI, AMDGPU::OpName::src0)); 2666 2667 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 2668 if (Src1) 2669 Inst32.add(*Src1); 2670 2671 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 2672 2673 if (Src2) { 2674 int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2); 2675 if (Op32Src2Idx != -1) { 2676 Inst32.add(*Src2); 2677 } else { 2678 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is 2679 // replaced with an implicit read of vcc. This was already added 2680 // during the initial BuildMI, so find it to preserve the flags. 2681 copyFlagsToImplicitVCC(*Inst32, *Src2); 2682 } 2683 } 2684 2685 return Inst32; 2686 } 2687 2688 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI, 2689 const MachineOperand &MO, 2690 const MCOperandInfo &OpInfo) const { 2691 // Literal constants use the constant bus. 2692 //if (isLiteralConstantLike(MO, OpInfo)) 2693 // return true; 2694 if (MO.isImm()) 2695 return !isInlineConstant(MO, OpInfo); 2696 2697 if (!MO.isReg()) 2698 return true; // Misc other operands like FrameIndex 2699 2700 if (!MO.isUse()) 2701 return false; 2702 2703 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) 2704 return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); 2705 2706 // FLAT_SCR is just an SGPR pair. 2707 if (!MO.isImplicit() && (MO.getReg() == AMDGPU::FLAT_SCR)) 2708 return true; 2709 2710 // EXEC register uses the constant bus. 2711 if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC) 2712 return true; 2713 2714 // SGPRs use the constant bus 2715 return (MO.getReg() == AMDGPU::VCC || MO.getReg() == AMDGPU::M0 || 2716 (!MO.isImplicit() && 2717 (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) || 2718 AMDGPU::SGPR_64RegClass.contains(MO.getReg())))); 2719 } 2720 2721 static unsigned findImplicitSGPRRead(const MachineInstr &MI) { 2722 for (const MachineOperand &MO : MI.implicit_operands()) { 2723 // We only care about reads. 2724 if (MO.isDef()) 2725 continue; 2726 2727 switch (MO.getReg()) { 2728 case AMDGPU::VCC: 2729 case AMDGPU::M0: 2730 case AMDGPU::FLAT_SCR: 2731 return MO.getReg(); 2732 2733 default: 2734 break; 2735 } 2736 } 2737 2738 return AMDGPU::NoRegister; 2739 } 2740 2741 static bool shouldReadExec(const MachineInstr &MI) { 2742 if (SIInstrInfo::isVALU(MI)) { 2743 switch (MI.getOpcode()) { 2744 case AMDGPU::V_READLANE_B32: 2745 case AMDGPU::V_READLANE_B32_si: 2746 case AMDGPU::V_READLANE_B32_vi: 2747 case AMDGPU::V_WRITELANE_B32: 2748 case AMDGPU::V_WRITELANE_B32_si: 2749 case AMDGPU::V_WRITELANE_B32_vi: 2750 return false; 2751 } 2752 2753 return true; 2754 } 2755 2756 if (SIInstrInfo::isGenericOpcode(MI.getOpcode()) || 2757 SIInstrInfo::isSALU(MI) || 2758 SIInstrInfo::isSMRD(MI)) 2759 return false; 2760 2761 return true; 2762 } 2763 2764 static bool isSubRegOf(const SIRegisterInfo &TRI, 2765 const MachineOperand &SuperVec, 2766 const MachineOperand &SubReg) { 2767 if (TargetRegisterInfo::isPhysicalRegister(SubReg.getReg())) 2768 return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg()); 2769 2770 return SubReg.getSubReg() != AMDGPU::NoSubRegister && 2771 SubReg.getReg() == SuperVec.getReg(); 2772 } 2773 2774 bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, 2775 StringRef &ErrInfo) const { 2776 uint16_t Opcode = MI.getOpcode(); 2777 if (SIInstrInfo::isGenericOpcode(MI.getOpcode())) 2778 return true; 2779 2780 const MachineFunction *MF = MI.getParent()->getParent(); 2781 const MachineRegisterInfo &MRI = MF->getRegInfo(); 2782 2783 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); 2784 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1); 2785 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2); 2786 2787 // Make sure the number of operands is correct. 2788 const MCInstrDesc &Desc = get(Opcode); 2789 if (!Desc.isVariadic() && 2790 Desc.getNumOperands() != MI.getNumExplicitOperands()) { 2791 ErrInfo = "Instruction has wrong number of operands."; 2792 return false; 2793 } 2794 2795 if (MI.isInlineAsm()) { 2796 // Verify register classes for inlineasm constraints. 2797 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands(); 2798 I != E; ++I) { 2799 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI); 2800 if (!RC) 2801 continue; 2802 2803 const MachineOperand &Op = MI.getOperand(I); 2804 if (!Op.isReg()) 2805 continue; 2806 2807 unsigned Reg = Op.getReg(); 2808 if (!TargetRegisterInfo::isVirtualRegister(Reg) && !RC->contains(Reg)) { 2809 ErrInfo = "inlineasm operand has incorrect register class."; 2810 return false; 2811 } 2812 } 2813 2814 return true; 2815 } 2816 2817 // Make sure the register classes are correct. 2818 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) { 2819 if (MI.getOperand(i).isFPImm()) { 2820 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast " 2821 "all fp values to integers."; 2822 return false; 2823 } 2824 2825 int RegClass = Desc.OpInfo[i].RegClass; 2826 2827 switch (Desc.OpInfo[i].OperandType) { 2828 case MCOI::OPERAND_REGISTER: 2829 if (MI.getOperand(i).isImm()) { 2830 ErrInfo = "Illegal immediate value for operand."; 2831 return false; 2832 } 2833 break; 2834 case AMDGPU::OPERAND_REG_IMM_INT32: 2835 case AMDGPU::OPERAND_REG_IMM_FP32: 2836 break; 2837 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 2838 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 2839 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 2840 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 2841 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 2842 case AMDGPU::OPERAND_REG_INLINE_C_FP16: { 2843 const MachineOperand &MO = MI.getOperand(i); 2844 if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) { 2845 ErrInfo = "Illegal immediate value for operand."; 2846 return false; 2847 } 2848 break; 2849 } 2850 case MCOI::OPERAND_IMMEDIATE: 2851 case AMDGPU::OPERAND_KIMM32: 2852 // Check if this operand is an immediate. 2853 // FrameIndex operands will be replaced by immediates, so they are 2854 // allowed. 2855 if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) { 2856 ErrInfo = "Expected immediate, but got non-immediate"; 2857 return false; 2858 } 2859 LLVM_FALLTHROUGH; 2860 default: 2861 continue; 2862 } 2863 2864 if (!MI.getOperand(i).isReg()) 2865 continue; 2866 2867 if (RegClass != -1) { 2868 unsigned Reg = MI.getOperand(i).getReg(); 2869 if (Reg == AMDGPU::NoRegister || 2870 TargetRegisterInfo::isVirtualRegister(Reg)) 2871 continue; 2872 2873 const TargetRegisterClass *RC = RI.getRegClass(RegClass); 2874 if (!RC->contains(Reg)) { 2875 ErrInfo = "Operand has incorrect register class."; 2876 return false; 2877 } 2878 } 2879 } 2880 2881 // Verify SDWA 2882 if (isSDWA(MI)) { 2883 if (!ST.hasSDWA()) { 2884 ErrInfo = "SDWA is not supported on this target"; 2885 return false; 2886 } 2887 2888 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst); 2889 2890 const int OpIndicies[] = { DstIdx, Src0Idx, Src1Idx, Src2Idx }; 2891 2892 for (int OpIdx: OpIndicies) { 2893 if (OpIdx == -1) 2894 continue; 2895 const MachineOperand &MO = MI.getOperand(OpIdx); 2896 2897 if (!ST.hasSDWAScalar()) { 2898 // Only VGPRS on VI 2899 if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) { 2900 ErrInfo = "Only VGPRs allowed as operands in SDWA instructions on VI"; 2901 return false; 2902 } 2903 } else { 2904 // No immediates on GFX9 2905 if (!MO.isReg()) { 2906 ErrInfo = "Only reg allowed as operands in SDWA instructions on GFX9"; 2907 return false; 2908 } 2909 } 2910 } 2911 2912 if (!ST.hasSDWAOmod()) { 2913 // No omod allowed on VI 2914 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 2915 if (OMod != nullptr && 2916 (!OMod->isImm() || OMod->getImm() != 0)) { 2917 ErrInfo = "OMod not allowed in SDWA instructions on VI"; 2918 return false; 2919 } 2920 } 2921 2922 uint16_t BasicOpcode = AMDGPU::getBasicFromSDWAOp(Opcode); 2923 if (isVOPC(BasicOpcode)) { 2924 if (!ST.hasSDWASdst() && DstIdx != -1) { 2925 // Only vcc allowed as dst on VI for VOPC 2926 const MachineOperand &Dst = MI.getOperand(DstIdx); 2927 if (!Dst.isReg() || Dst.getReg() != AMDGPU::VCC) { 2928 ErrInfo = "Only VCC allowed as dst in SDWA instructions on VI"; 2929 return false; 2930 } 2931 } else if (!ST.hasSDWAOutModsVOPC()) { 2932 // No clamp allowed on GFX9 for VOPC 2933 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 2934 if (Clamp && (!Clamp->isImm() || Clamp->getImm() != 0)) { 2935 ErrInfo = "Clamp not allowed in VOPC SDWA instructions on VI"; 2936 return false; 2937 } 2938 2939 // No omod allowed on GFX9 for VOPC 2940 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 2941 if (OMod && (!OMod->isImm() || OMod->getImm() != 0)) { 2942 ErrInfo = "OMod not allowed in VOPC SDWA instructions on VI"; 2943 return false; 2944 } 2945 } 2946 } 2947 2948 const MachineOperand *DstUnused = getNamedOperand(MI, AMDGPU::OpName::dst_unused); 2949 if (DstUnused && DstUnused->isImm() && 2950 DstUnused->getImm() == AMDGPU::SDWA::UNUSED_PRESERVE) { 2951 const MachineOperand &Dst = MI.getOperand(DstIdx); 2952 if (!Dst.isReg() || !Dst.isTied()) { 2953 ErrInfo = "Dst register should have tied register"; 2954 return false; 2955 } 2956 2957 const MachineOperand &TiedMO = 2958 MI.getOperand(MI.findTiedOperandIdx(DstIdx)); 2959 if (!TiedMO.isReg() || !TiedMO.isImplicit() || !TiedMO.isUse()) { 2960 ErrInfo = 2961 "Dst register should be tied to implicit use of preserved register"; 2962 return false; 2963 } else if (TargetRegisterInfo::isPhysicalRegister(TiedMO.getReg()) && 2964 Dst.getReg() != TiedMO.getReg()) { 2965 ErrInfo = "Dst register should use same physical register as preserved"; 2966 return false; 2967 } 2968 } 2969 } 2970 2971 // Verify VOP*. Ignore multiple sgpr operands on writelane. 2972 if (Desc.getOpcode() != AMDGPU::V_WRITELANE_B32 2973 && (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI) || isSDWA(MI))) { 2974 // Only look at the true operands. Only a real operand can use the constant 2975 // bus, and we don't want to check pseudo-operands like the source modifier 2976 // flags. 2977 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx }; 2978 2979 unsigned ConstantBusCount = 0; 2980 unsigned LiteralCount = 0; 2981 2982 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) 2983 ++ConstantBusCount; 2984 2985 unsigned SGPRUsed = findImplicitSGPRRead(MI); 2986 if (SGPRUsed != AMDGPU::NoRegister) 2987 ++ConstantBusCount; 2988 2989 for (int OpIdx : OpIndices) { 2990 if (OpIdx == -1) 2991 break; 2992 const MachineOperand &MO = MI.getOperand(OpIdx); 2993 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 2994 if (MO.isReg()) { 2995 if (MO.getReg() != SGPRUsed) 2996 ++ConstantBusCount; 2997 SGPRUsed = MO.getReg(); 2998 } else { 2999 ++ConstantBusCount; 3000 ++LiteralCount; 3001 } 3002 } 3003 } 3004 if (ConstantBusCount > 1) { 3005 ErrInfo = "VOP* instruction uses the constant bus more than once"; 3006 return false; 3007 } 3008 3009 if (isVOP3(MI) && LiteralCount) { 3010 ErrInfo = "VOP3 instruction uses literal"; 3011 return false; 3012 } 3013 } 3014 3015 // Verify misc. restrictions on specific instructions. 3016 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 || 3017 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) { 3018 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3019 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 3020 const MachineOperand &Src2 = MI.getOperand(Src2Idx); 3021 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) { 3022 if (!compareMachineOp(Src0, Src1) && 3023 !compareMachineOp(Src0, Src2)) { 3024 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2"; 3025 return false; 3026 } 3027 } 3028 } 3029 3030 if (isSOPK(MI)) { 3031 int64_t Imm = getNamedOperand(MI, AMDGPU::OpName::simm16)->getImm(); 3032 if (sopkIsZext(MI)) { 3033 if (!isUInt<16>(Imm)) { 3034 ErrInfo = "invalid immediate for SOPK instruction"; 3035 return false; 3036 } 3037 } else { 3038 if (!isInt<16>(Imm)) { 3039 ErrInfo = "invalid immediate for SOPK instruction"; 3040 return false; 3041 } 3042 } 3043 } 3044 3045 if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 || 3046 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 || 3047 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 3048 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) { 3049 const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 3050 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64; 3051 3052 const unsigned StaticNumOps = Desc.getNumOperands() + 3053 Desc.getNumImplicitUses(); 3054 const unsigned NumImplicitOps = IsDst ? 2 : 1; 3055 3056 // Allow additional implicit operands. This allows a fixup done by the post 3057 // RA scheduler where the main implicit operand is killed and implicit-defs 3058 // are added for sub-registers that remain live after this instruction. 3059 if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) { 3060 ErrInfo = "missing implicit register operands"; 3061 return false; 3062 } 3063 3064 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 3065 if (IsDst) { 3066 if (!Dst->isUse()) { 3067 ErrInfo = "v_movreld_b32 vdst should be a use operand"; 3068 return false; 3069 } 3070 3071 unsigned UseOpIdx; 3072 if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) || 3073 UseOpIdx != StaticNumOps + 1) { 3074 ErrInfo = "movrel implicit operands should be tied"; 3075 return false; 3076 } 3077 } 3078 3079 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3080 const MachineOperand &ImpUse 3081 = MI.getOperand(StaticNumOps + NumImplicitOps - 1); 3082 if (!ImpUse.isReg() || !ImpUse.isUse() || 3083 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) { 3084 ErrInfo = "src0 should be subreg of implicit vector use"; 3085 return false; 3086 } 3087 } 3088 3089 // Make sure we aren't losing exec uses in the td files. This mostly requires 3090 // being careful when using let Uses to try to add other use registers. 3091 if (shouldReadExec(MI)) { 3092 if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) { 3093 ErrInfo = "VALU instruction does not implicitly read exec mask"; 3094 return false; 3095 } 3096 } 3097 3098 if (isSMRD(MI)) { 3099 if (MI.mayStore()) { 3100 // The register offset form of scalar stores may only use m0 as the 3101 // soffset register. 3102 const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff); 3103 if (Soff && Soff->getReg() != AMDGPU::M0) { 3104 ErrInfo = "scalar stores must use m0 as offset register"; 3105 return false; 3106 } 3107 } 3108 } 3109 3110 if (isFLAT(MI) && !MF->getSubtarget<GCNSubtarget>().hasFlatInstOffsets()) { 3111 const MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 3112 if (Offset->getImm() != 0) { 3113 ErrInfo = "subtarget does not support offsets in flat instructions"; 3114 return false; 3115 } 3116 } 3117 3118 const MachineOperand *DppCt = getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl); 3119 if (DppCt) { 3120 using namespace AMDGPU::DPP; 3121 3122 unsigned DC = DppCt->getImm(); 3123 if (DC == DppCtrl::DPP_UNUSED1 || DC == DppCtrl::DPP_UNUSED2 || 3124 DC == DppCtrl::DPP_UNUSED3 || DC > DppCtrl::DPP_LAST || 3125 (DC >= DppCtrl::DPP_UNUSED4_FIRST && DC <= DppCtrl::DPP_UNUSED4_LAST) || 3126 (DC >= DppCtrl::DPP_UNUSED5_FIRST && DC <= DppCtrl::DPP_UNUSED5_LAST) || 3127 (DC >= DppCtrl::DPP_UNUSED6_FIRST && DC <= DppCtrl::DPP_UNUSED6_LAST) || 3128 (DC >= DppCtrl::DPP_UNUSED7_FIRST && DC <= DppCtrl::DPP_UNUSED7_LAST)) { 3129 ErrInfo = "Invalid dpp_ctrl value"; 3130 return false; 3131 } 3132 } 3133 3134 return true; 3135 } 3136 3137 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) const { 3138 switch (MI.getOpcode()) { 3139 default: return AMDGPU::INSTRUCTION_LIST_END; 3140 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE; 3141 case AMDGPU::COPY: return AMDGPU::COPY; 3142 case AMDGPU::PHI: return AMDGPU::PHI; 3143 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG; 3144 case AMDGPU::WQM: return AMDGPU::WQM; 3145 case AMDGPU::WWM: return AMDGPU::WWM; 3146 case AMDGPU::S_MOV_B32: 3147 return MI.getOperand(1).isReg() ? 3148 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32; 3149 case AMDGPU::S_ADD_I32: 3150 return ST.hasAddNoCarry() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_I32_e32; 3151 case AMDGPU::S_ADDC_U32: 3152 return AMDGPU::V_ADDC_U32_e32; 3153 case AMDGPU::S_SUB_I32: 3154 return ST.hasAddNoCarry() ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_I32_e32; 3155 // FIXME: These are not consistently handled, and selected when the carry is 3156 // used. 3157 case AMDGPU::S_ADD_U32: 3158 return AMDGPU::V_ADD_I32_e32; 3159 case AMDGPU::S_SUB_U32: 3160 return AMDGPU::V_SUB_I32_e32; 3161 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32; 3162 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_I32; 3163 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64; 3164 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64; 3165 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64; 3166 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64; 3167 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64; 3168 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64; 3169 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64; 3170 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32; 3171 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64; 3172 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32; 3173 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64; 3174 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32; 3175 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64; 3176 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32; 3177 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32; 3178 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32; 3179 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32; 3180 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64; 3181 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32; 3182 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32; 3183 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32; 3184 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32; 3185 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32; 3186 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32; 3187 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32; 3188 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32; 3189 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32; 3190 case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e32; 3191 case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e32; 3192 case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e32; 3193 case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e32; 3194 case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e32; 3195 case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e32; 3196 case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e32; 3197 case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e32; 3198 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64; 3199 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32; 3200 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32; 3201 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64; 3202 case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ; 3203 case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ; 3204 } 3205 } 3206 3207 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI, 3208 unsigned OpNo) const { 3209 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 3210 const MCInstrDesc &Desc = get(MI.getOpcode()); 3211 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() || 3212 Desc.OpInfo[OpNo].RegClass == -1) { 3213 unsigned Reg = MI.getOperand(OpNo).getReg(); 3214 3215 if (TargetRegisterInfo::isVirtualRegister(Reg)) 3216 return MRI.getRegClass(Reg); 3217 return RI.getPhysRegClass(Reg); 3218 } 3219 3220 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 3221 return RI.getRegClass(RCID); 3222 } 3223 3224 bool SIInstrInfo::canReadVGPR(const MachineInstr &MI, unsigned OpNo) const { 3225 switch (MI.getOpcode()) { 3226 case AMDGPU::COPY: 3227 case AMDGPU::REG_SEQUENCE: 3228 case AMDGPU::PHI: 3229 case AMDGPU::INSERT_SUBREG: 3230 return RI.hasVGPRs(getOpRegClass(MI, 0)); 3231 default: 3232 return RI.hasVGPRs(getOpRegClass(MI, OpNo)); 3233 } 3234 } 3235 3236 void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const { 3237 MachineBasicBlock::iterator I = MI; 3238 MachineBasicBlock *MBB = MI.getParent(); 3239 MachineOperand &MO = MI.getOperand(OpIdx); 3240 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 3241 unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass; 3242 const TargetRegisterClass *RC = RI.getRegClass(RCID); 3243 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 3244 if (MO.isReg()) 3245 Opcode = AMDGPU::COPY; 3246 else if (RI.isSGPRClass(RC)) 3247 Opcode = AMDGPU::S_MOV_B32; 3248 3249 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); 3250 if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC)) 3251 VRC = &AMDGPU::VReg_64RegClass; 3252 else 3253 VRC = &AMDGPU::VGPR_32RegClass; 3254 3255 unsigned Reg = MRI.createVirtualRegister(VRC); 3256 DebugLoc DL = MBB->findDebugLoc(I); 3257 BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO); 3258 MO.ChangeToRegister(Reg, false); 3259 } 3260 3261 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI, 3262 MachineRegisterInfo &MRI, 3263 MachineOperand &SuperReg, 3264 const TargetRegisterClass *SuperRC, 3265 unsigned SubIdx, 3266 const TargetRegisterClass *SubRC) 3267 const { 3268 MachineBasicBlock *MBB = MI->getParent(); 3269 DebugLoc DL = MI->getDebugLoc(); 3270 unsigned SubReg = MRI.createVirtualRegister(SubRC); 3271 3272 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) { 3273 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 3274 .addReg(SuperReg.getReg(), 0, SubIdx); 3275 return SubReg; 3276 } 3277 3278 // Just in case the super register is itself a sub-register, copy it to a new 3279 // value so we don't need to worry about merging its subreg index with the 3280 // SubIdx passed to this function. The register coalescer should be able to 3281 // eliminate this extra copy. 3282 unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC); 3283 3284 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg) 3285 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg()); 3286 3287 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 3288 .addReg(NewSuperReg, 0, SubIdx); 3289 3290 return SubReg; 3291 } 3292 3293 MachineOperand SIInstrInfo::buildExtractSubRegOrImm( 3294 MachineBasicBlock::iterator MII, 3295 MachineRegisterInfo &MRI, 3296 MachineOperand &Op, 3297 const TargetRegisterClass *SuperRC, 3298 unsigned SubIdx, 3299 const TargetRegisterClass *SubRC) const { 3300 if (Op.isImm()) { 3301 if (SubIdx == AMDGPU::sub0) 3302 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm())); 3303 if (SubIdx == AMDGPU::sub1) 3304 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32)); 3305 3306 llvm_unreachable("Unhandled register index for immediate"); 3307 } 3308 3309 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC, 3310 SubIdx, SubRC); 3311 return MachineOperand::CreateReg(SubReg, false); 3312 } 3313 3314 // Change the order of operands from (0, 1, 2) to (0, 2, 1) 3315 void SIInstrInfo::swapOperands(MachineInstr &Inst) const { 3316 assert(Inst.getNumExplicitOperands() == 3); 3317 MachineOperand Op1 = Inst.getOperand(1); 3318 Inst.RemoveOperand(1); 3319 Inst.addOperand(Op1); 3320 } 3321 3322 bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI, 3323 const MCOperandInfo &OpInfo, 3324 const MachineOperand &MO) const { 3325 if (!MO.isReg()) 3326 return false; 3327 3328 unsigned Reg = MO.getReg(); 3329 const TargetRegisterClass *RC = 3330 TargetRegisterInfo::isVirtualRegister(Reg) ? 3331 MRI.getRegClass(Reg) : 3332 RI.getPhysRegClass(Reg); 3333 3334 const SIRegisterInfo *TRI = 3335 static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo()); 3336 RC = TRI->getSubRegClass(RC, MO.getSubReg()); 3337 3338 // In order to be legal, the common sub-class must be equal to the 3339 // class of the current operand. For example: 3340 // 3341 // v_mov_b32 s0 ; Operand defined as vsrc_b32 3342 // ; RI.getCommonSubClass(s0,vsrc_b32) = sgpr ; LEGAL 3343 // 3344 // s_sendmsg 0, s0 ; Operand defined as m0reg 3345 // ; RI.getCommonSubClass(s0,m0reg) = m0reg ; NOT LEGAL 3346 3347 return RI.getCommonSubClass(RC, RI.getRegClass(OpInfo.RegClass)) == RC; 3348 } 3349 3350 bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI, 3351 const MCOperandInfo &OpInfo, 3352 const MachineOperand &MO) const { 3353 if (MO.isReg()) 3354 return isLegalRegOperand(MRI, OpInfo, MO); 3355 3356 // Handle non-register types that are treated like immediates. 3357 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI()); 3358 return true; 3359 } 3360 3361 bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx, 3362 const MachineOperand *MO) const { 3363 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 3364 const MCInstrDesc &InstDesc = MI.getDesc(); 3365 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; 3366 const TargetRegisterClass *DefinedRC = 3367 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; 3368 if (!MO) 3369 MO = &MI.getOperand(OpIdx); 3370 3371 if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) { 3372 3373 RegSubRegPair SGPRUsed; 3374 if (MO->isReg()) 3375 SGPRUsed = RegSubRegPair(MO->getReg(), MO->getSubReg()); 3376 3377 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 3378 if (i == OpIdx) 3379 continue; 3380 const MachineOperand &Op = MI.getOperand(i); 3381 if (Op.isReg()) { 3382 if ((Op.getReg() != SGPRUsed.Reg || Op.getSubReg() != SGPRUsed.SubReg) && 3383 usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) { 3384 return false; 3385 } 3386 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) { 3387 return false; 3388 } 3389 } 3390 } 3391 3392 if (MO->isReg()) { 3393 assert(DefinedRC); 3394 return isLegalRegOperand(MRI, OpInfo, *MO); 3395 } 3396 3397 // Handle non-register types that are treated like immediates. 3398 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI()); 3399 3400 if (!DefinedRC) { 3401 // This operand expects an immediate. 3402 return true; 3403 } 3404 3405 return isImmOperandLegal(MI, OpIdx, *MO); 3406 } 3407 3408 void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI, 3409 MachineInstr &MI) const { 3410 unsigned Opc = MI.getOpcode(); 3411 const MCInstrDesc &InstrDesc = get(Opc); 3412 3413 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 3414 MachineOperand &Src1 = MI.getOperand(Src1Idx); 3415 3416 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32 3417 // we need to only have one constant bus use. 3418 // 3419 // Note we do not need to worry about literal constants here. They are 3420 // disabled for the operand type for instructions because they will always 3421 // violate the one constant bus use rule. 3422 bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister; 3423 if (HasImplicitSGPR) { 3424 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 3425 MachineOperand &Src0 = MI.getOperand(Src0Idx); 3426 3427 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) 3428 legalizeOpWithMove(MI, Src0Idx); 3429 } 3430 3431 // Special case: V_WRITELANE_B32 accepts only immediate or SGPR operands for 3432 // both the value to write (src0) and lane select (src1). Fix up non-SGPR 3433 // src0/src1 with V_READFIRSTLANE. 3434 if (Opc == AMDGPU::V_WRITELANE_B32) { 3435 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 3436 MachineOperand &Src0 = MI.getOperand(Src0Idx); 3437 const DebugLoc &DL = MI.getDebugLoc(); 3438 if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) { 3439 unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 3440 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 3441 .add(Src0); 3442 Src0.ChangeToRegister(Reg, false); 3443 } 3444 if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) { 3445 unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 3446 const DebugLoc &DL = MI.getDebugLoc(); 3447 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 3448 .add(Src1); 3449 Src1.ChangeToRegister(Reg, false); 3450 } 3451 return; 3452 } 3453 3454 // VOP2 src0 instructions support all operand types, so we don't need to check 3455 // their legality. If src1 is already legal, we don't need to do anything. 3456 if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1)) 3457 return; 3458 3459 // Special case: V_READLANE_B32 accepts only immediate or SGPR operands for 3460 // lane select. Fix up using V_READFIRSTLANE, since we assume that the lane 3461 // select is uniform. 3462 if (Opc == AMDGPU::V_READLANE_B32 && Src1.isReg() && 3463 RI.isVGPR(MRI, Src1.getReg())) { 3464 unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 3465 const DebugLoc &DL = MI.getDebugLoc(); 3466 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 3467 .add(Src1); 3468 Src1.ChangeToRegister(Reg, false); 3469 return; 3470 } 3471 3472 // We do not use commuteInstruction here because it is too aggressive and will 3473 // commute if it is possible. We only want to commute here if it improves 3474 // legality. This can be called a fairly large number of times so don't waste 3475 // compile time pointlessly swapping and checking legality again. 3476 if (HasImplicitSGPR || !MI.isCommutable()) { 3477 legalizeOpWithMove(MI, Src1Idx); 3478 return; 3479 } 3480 3481 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 3482 MachineOperand &Src0 = MI.getOperand(Src0Idx); 3483 3484 // If src0 can be used as src1, commuting will make the operands legal. 3485 // Otherwise we have to give up and insert a move. 3486 // 3487 // TODO: Other immediate-like operand kinds could be commuted if there was a 3488 // MachineOperand::ChangeTo* for them. 3489 if ((!Src1.isImm() && !Src1.isReg()) || 3490 !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) { 3491 legalizeOpWithMove(MI, Src1Idx); 3492 return; 3493 } 3494 3495 int CommutedOpc = commuteOpcode(MI); 3496 if (CommutedOpc == -1) { 3497 legalizeOpWithMove(MI, Src1Idx); 3498 return; 3499 } 3500 3501 MI.setDesc(get(CommutedOpc)); 3502 3503 unsigned Src0Reg = Src0.getReg(); 3504 unsigned Src0SubReg = Src0.getSubReg(); 3505 bool Src0Kill = Src0.isKill(); 3506 3507 if (Src1.isImm()) 3508 Src0.ChangeToImmediate(Src1.getImm()); 3509 else if (Src1.isReg()) { 3510 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill()); 3511 Src0.setSubReg(Src1.getSubReg()); 3512 } else 3513 llvm_unreachable("Should only have register or immediate operands"); 3514 3515 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill); 3516 Src1.setSubReg(Src0SubReg); 3517 } 3518 3519 // Legalize VOP3 operands. Because all operand types are supported for any 3520 // operand, and since literal constants are not allowed and should never be 3521 // seen, we only need to worry about inserting copies if we use multiple SGPR 3522 // operands. 3523 void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI, 3524 MachineInstr &MI) const { 3525 unsigned Opc = MI.getOpcode(); 3526 3527 int VOP3Idx[3] = { 3528 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), 3529 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1), 3530 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2) 3531 }; 3532 3533 // Find the one SGPR operand we are allowed to use. 3534 unsigned SGPRReg = findUsedSGPR(MI, VOP3Idx); 3535 3536 for (unsigned i = 0; i < 3; ++i) { 3537 int Idx = VOP3Idx[i]; 3538 if (Idx == -1) 3539 break; 3540 MachineOperand &MO = MI.getOperand(Idx); 3541 3542 // We should never see a VOP3 instruction with an illegal immediate operand. 3543 if (!MO.isReg()) 3544 continue; 3545 3546 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg()))) 3547 continue; // VGPRs are legal 3548 3549 if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) { 3550 SGPRReg = MO.getReg(); 3551 // We can use one SGPR in each VOP3 instruction. 3552 continue; 3553 } 3554 3555 // If we make it this far, then the operand is not legal and we must 3556 // legalize it. 3557 legalizeOpWithMove(MI, Idx); 3558 } 3559 } 3560 3561 unsigned SIInstrInfo::readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI, 3562 MachineRegisterInfo &MRI) const { 3563 const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg); 3564 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC); 3565 unsigned DstReg = MRI.createVirtualRegister(SRC); 3566 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32; 3567 3568 if (SubRegs == 1) { 3569 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 3570 get(AMDGPU::V_READFIRSTLANE_B32), DstReg) 3571 .addReg(SrcReg); 3572 return DstReg; 3573 } 3574 3575 SmallVector<unsigned, 8> SRegs; 3576 for (unsigned i = 0; i < SubRegs; ++i) { 3577 unsigned SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 3578 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 3579 get(AMDGPU::V_READFIRSTLANE_B32), SGPR) 3580 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i)); 3581 SRegs.push_back(SGPR); 3582 } 3583 3584 MachineInstrBuilder MIB = 3585 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 3586 get(AMDGPU::REG_SEQUENCE), DstReg); 3587 for (unsigned i = 0; i < SubRegs; ++i) { 3588 MIB.addReg(SRegs[i]); 3589 MIB.addImm(RI.getSubRegFromChannel(i)); 3590 } 3591 return DstReg; 3592 } 3593 3594 void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI, 3595 MachineInstr &MI) const { 3596 3597 // If the pointer is store in VGPRs, then we need to move them to 3598 // SGPRs using v_readfirstlane. This is safe because we only select 3599 // loads with uniform pointers to SMRD instruction so we know the 3600 // pointer value is uniform. 3601 MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase); 3602 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) { 3603 unsigned SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI); 3604 SBase->setReg(SGPR); 3605 } 3606 } 3607 3608 void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB, 3609 MachineBasicBlock::iterator I, 3610 const TargetRegisterClass *DstRC, 3611 MachineOperand &Op, 3612 MachineRegisterInfo &MRI, 3613 const DebugLoc &DL) const { 3614 unsigned OpReg = Op.getReg(); 3615 unsigned OpSubReg = Op.getSubReg(); 3616 3617 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg( 3618 RI.getRegClassForReg(MRI, OpReg), OpSubReg); 3619 3620 // Check if operand is already the correct register class. 3621 if (DstRC == OpRC) 3622 return; 3623 3624 unsigned DstReg = MRI.createVirtualRegister(DstRC); 3625 MachineInstr *Copy = 3626 BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op); 3627 3628 Op.setReg(DstReg); 3629 Op.setSubReg(0); 3630 3631 MachineInstr *Def = MRI.getVRegDef(OpReg); 3632 if (!Def) 3633 return; 3634 3635 // Try to eliminate the copy if it is copying an immediate value. 3636 if (Def->isMoveImmediate()) 3637 FoldImmediate(*Copy, *Def, OpReg, &MRI); 3638 } 3639 3640 // Emit the actual waterfall loop, executing the wrapped instruction for each 3641 // unique value of \p Rsrc across all lanes. In the best case we execute 1 3642 // iteration, in the worst case we execute 64 (once per lane). 3643 static void 3644 emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI, 3645 MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB, 3646 const DebugLoc &DL, MachineOperand &Rsrc) { 3647 MachineBasicBlock::iterator I = LoopBB.begin(); 3648 3649 unsigned VRsrc = Rsrc.getReg(); 3650 unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef()); 3651 3652 unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 3653 unsigned CondReg0 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 3654 unsigned CondReg1 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 3655 unsigned AndCond = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 3656 unsigned SRsrcSub0 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 3657 unsigned SRsrcSub1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 3658 unsigned SRsrcSub2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 3659 unsigned SRsrcSub3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 3660 unsigned SRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass); 3661 3662 // Beginning of the loop, read the next Rsrc variant. 3663 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub0) 3664 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub0); 3665 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub1) 3666 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub1); 3667 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub2) 3668 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub2); 3669 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub3) 3670 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub3); 3671 3672 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), SRsrc) 3673 .addReg(SRsrcSub0) 3674 .addImm(AMDGPU::sub0) 3675 .addReg(SRsrcSub1) 3676 .addImm(AMDGPU::sub1) 3677 .addReg(SRsrcSub2) 3678 .addImm(AMDGPU::sub2) 3679 .addReg(SRsrcSub3) 3680 .addImm(AMDGPU::sub3); 3681 3682 // Update Rsrc operand to use the SGPR Rsrc. 3683 Rsrc.setReg(SRsrc); 3684 Rsrc.setIsKill(true); 3685 3686 // Identify all lanes with identical Rsrc operands in their VGPRs. 3687 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), CondReg0) 3688 .addReg(SRsrc, 0, AMDGPU::sub0_sub1) 3689 .addReg(VRsrc, 0, AMDGPU::sub0_sub1); 3690 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), CondReg1) 3691 .addReg(SRsrc, 0, AMDGPU::sub2_sub3) 3692 .addReg(VRsrc, 0, AMDGPU::sub2_sub3); 3693 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_AND_B64), AndCond) 3694 .addReg(CondReg0) 3695 .addReg(CondReg1); 3696 3697 MRI.setSimpleHint(SaveExec, AndCond); 3698 3699 // Update EXEC to matching lanes, saving original to SaveExec. 3700 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_AND_SAVEEXEC_B64), SaveExec) 3701 .addReg(AndCond, RegState::Kill); 3702 3703 // The original instruction is here; we insert the terminators after it. 3704 I = LoopBB.end(); 3705 3706 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 3707 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_XOR_B64_term), AMDGPU::EXEC) 3708 .addReg(AMDGPU::EXEC) 3709 .addReg(SaveExec); 3710 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_CBRANCH_EXECNZ)).addMBB(&LoopBB); 3711 } 3712 3713 // Build a waterfall loop around \p MI, replacing the VGPR \p Rsrc register 3714 // with SGPRs by iterating over all unique values across all lanes. 3715 static void loadSRsrcFromVGPR(const SIInstrInfo &TII, MachineInstr &MI, 3716 MachineOperand &Rsrc, MachineDominatorTree *MDT) { 3717 MachineBasicBlock &MBB = *MI.getParent(); 3718 MachineFunction &MF = *MBB.getParent(); 3719 MachineRegisterInfo &MRI = MF.getRegInfo(); 3720 MachineBasicBlock::iterator I(&MI); 3721 const DebugLoc &DL = MI.getDebugLoc(); 3722 3723 unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 3724 3725 // Save the EXEC mask 3726 BuildMI(MBB, I, DL, TII.get(AMDGPU::S_MOV_B64), SaveExec) 3727 .addReg(AMDGPU::EXEC); 3728 3729 // Killed uses in the instruction we are waterfalling around will be 3730 // incorrect due to the added control-flow. 3731 for (auto &MO : MI.uses()) { 3732 if (MO.isReg() && MO.isUse()) { 3733 MRI.clearKillFlags(MO.getReg()); 3734 } 3735 } 3736 3737 // To insert the loop we need to split the block. Move everything after this 3738 // point to a new block, and insert a new empty block between the two. 3739 MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock(); 3740 MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock(); 3741 MachineFunction::iterator MBBI(MBB); 3742 ++MBBI; 3743 3744 MF.insert(MBBI, LoopBB); 3745 MF.insert(MBBI, RemainderBB); 3746 3747 LoopBB->addSuccessor(LoopBB); 3748 LoopBB->addSuccessor(RemainderBB); 3749 3750 // Move MI to the LoopBB, and the remainder of the block to RemainderBB. 3751 MachineBasicBlock::iterator J = I++; 3752 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 3753 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); 3754 LoopBB->splice(LoopBB->begin(), &MBB, J); 3755 3756 MBB.addSuccessor(LoopBB); 3757 3758 // Update dominators. We know that MBB immediately dominates LoopBB, that 3759 // LoopBB immediately dominates RemainderBB, and that RemainderBB immediately 3760 // dominates all of the successors transferred to it from MBB that MBB used 3761 // to dominate. 3762 if (MDT) { 3763 MDT->addNewBlock(LoopBB, &MBB); 3764 MDT->addNewBlock(RemainderBB, LoopBB); 3765 for (auto &Succ : RemainderBB->successors()) { 3766 if (MDT->dominates(&MBB, Succ)) { 3767 MDT->changeImmediateDominator(Succ, RemainderBB); 3768 } 3769 } 3770 } 3771 3772 emitLoadSRsrcFromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, Rsrc); 3773 3774 // Restore the EXEC mask 3775 MachineBasicBlock::iterator First = RemainderBB->begin(); 3776 BuildMI(*RemainderBB, First, DL, TII.get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 3777 .addReg(SaveExec); 3778 } 3779 3780 // Extract pointer from Rsrc and return a zero-value Rsrc replacement. 3781 static std::tuple<unsigned, unsigned> 3782 extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc) { 3783 MachineBasicBlock &MBB = *MI.getParent(); 3784 MachineFunction &MF = *MBB.getParent(); 3785 MachineRegisterInfo &MRI = MF.getRegInfo(); 3786 3787 // Extract the ptr from the resource descriptor. 3788 unsigned RsrcPtr = 3789 TII.buildExtractSubReg(MI, MRI, Rsrc, &AMDGPU::VReg_128RegClass, 3790 AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass); 3791 3792 // Create an empty resource descriptor 3793 unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 3794 unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 3795 unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 3796 unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass); 3797 uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat(); 3798 3799 // Zero64 = 0 3800 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B64), Zero64) 3801 .addImm(0); 3802 3803 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0} 3804 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatLo) 3805 .addImm(RsrcDataFormat & 0xFFFFFFFF); 3806 3807 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32} 3808 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatHi) 3809 .addImm(RsrcDataFormat >> 32); 3810 3811 // NewSRsrc = {Zero64, SRsrcFormat} 3812 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::REG_SEQUENCE), NewSRsrc) 3813 .addReg(Zero64) 3814 .addImm(AMDGPU::sub0_sub1) 3815 .addReg(SRsrcFormatLo) 3816 .addImm(AMDGPU::sub2) 3817 .addReg(SRsrcFormatHi) 3818 .addImm(AMDGPU::sub3); 3819 3820 return std::make_tuple(RsrcPtr, NewSRsrc); 3821 } 3822 3823 void SIInstrInfo::legalizeOperands(MachineInstr &MI, 3824 MachineDominatorTree *MDT) const { 3825 MachineFunction &MF = *MI.getParent()->getParent(); 3826 MachineRegisterInfo &MRI = MF.getRegInfo(); 3827 3828 // Legalize VOP2 3829 if (isVOP2(MI) || isVOPC(MI)) { 3830 legalizeOperandsVOP2(MRI, MI); 3831 return; 3832 } 3833 3834 // Legalize VOP3 3835 if (isVOP3(MI)) { 3836 legalizeOperandsVOP3(MRI, MI); 3837 return; 3838 } 3839 3840 // Legalize SMRD 3841 if (isSMRD(MI)) { 3842 legalizeOperandsSMRD(MRI, MI); 3843 return; 3844 } 3845 3846 // Legalize REG_SEQUENCE and PHI 3847 // The register class of the operands much be the same type as the register 3848 // class of the output. 3849 if (MI.getOpcode() == AMDGPU::PHI) { 3850 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr; 3851 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) { 3852 if (!MI.getOperand(i).isReg() || 3853 !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg())) 3854 continue; 3855 const TargetRegisterClass *OpRC = 3856 MRI.getRegClass(MI.getOperand(i).getReg()); 3857 if (RI.hasVGPRs(OpRC)) { 3858 VRC = OpRC; 3859 } else { 3860 SRC = OpRC; 3861 } 3862 } 3863 3864 // If any of the operands are VGPR registers, then they all most be 3865 // otherwise we will create illegal VGPR->SGPR copies when legalizing 3866 // them. 3867 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) { 3868 if (!VRC) { 3869 assert(SRC); 3870 VRC = RI.getEquivalentVGPRClass(SRC); 3871 } 3872 RC = VRC; 3873 } else { 3874 RC = SRC; 3875 } 3876 3877 // Update all the operands so they have the same type. 3878 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 3879 MachineOperand &Op = MI.getOperand(I); 3880 if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg())) 3881 continue; 3882 3883 // MI is a PHI instruction. 3884 MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB(); 3885 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator(); 3886 3887 // Avoid creating no-op copies with the same src and dst reg class. These 3888 // confuse some of the machine passes. 3889 legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc()); 3890 } 3891 } 3892 3893 // REG_SEQUENCE doesn't really require operand legalization, but if one has a 3894 // VGPR dest type and SGPR sources, insert copies so all operands are 3895 // VGPRs. This seems to help operand folding / the register coalescer. 3896 if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) { 3897 MachineBasicBlock *MBB = MI.getParent(); 3898 const TargetRegisterClass *DstRC = getOpRegClass(MI, 0); 3899 if (RI.hasVGPRs(DstRC)) { 3900 // Update all the operands so they are VGPR register classes. These may 3901 // not be the same register class because REG_SEQUENCE supports mixing 3902 // subregister index types e.g. sub0_sub1 + sub2 + sub3 3903 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 3904 MachineOperand &Op = MI.getOperand(I); 3905 if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg())) 3906 continue; 3907 3908 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg()); 3909 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); 3910 if (VRC == OpRC) 3911 continue; 3912 3913 legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc()); 3914 Op.setIsKill(); 3915 } 3916 } 3917 3918 return; 3919 } 3920 3921 // Legalize INSERT_SUBREG 3922 // src0 must have the same register class as dst 3923 if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) { 3924 unsigned Dst = MI.getOperand(0).getReg(); 3925 unsigned Src0 = MI.getOperand(1).getReg(); 3926 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst); 3927 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0); 3928 if (DstRC != Src0RC) { 3929 MachineBasicBlock *MBB = MI.getParent(); 3930 MachineOperand &Op = MI.getOperand(1); 3931 legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc()); 3932 } 3933 return; 3934 } 3935 3936 // Legalize SI_INIT_M0 3937 if (MI.getOpcode() == AMDGPU::SI_INIT_M0) { 3938 MachineOperand &Src = MI.getOperand(0); 3939 if (Src.isReg() && RI.hasVGPRs(MRI.getRegClass(Src.getReg()))) 3940 Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI)); 3941 return; 3942 } 3943 3944 // Legalize MIMG and MUBUF/MTBUF for shaders. 3945 // 3946 // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via 3947 // scratch memory access. In both cases, the legalization never involves 3948 // conversion to the addr64 form. 3949 if (isMIMG(MI) || 3950 (AMDGPU::isShader(MF.getFunction().getCallingConv()) && 3951 (isMUBUF(MI) || isMTBUF(MI)))) { 3952 MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc); 3953 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) { 3954 unsigned SGPR = readlaneVGPRToSGPR(SRsrc->getReg(), MI, MRI); 3955 SRsrc->setReg(SGPR); 3956 } 3957 3958 MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp); 3959 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) { 3960 unsigned SGPR = readlaneVGPRToSGPR(SSamp->getReg(), MI, MRI); 3961 SSamp->setReg(SGPR); 3962 } 3963 return; 3964 } 3965 3966 // Legalize MUBUF* instructions. 3967 int RsrcIdx = 3968 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc); 3969 if (RsrcIdx != -1) { 3970 // We have an MUBUF instruction 3971 MachineOperand *Rsrc = &MI.getOperand(RsrcIdx); 3972 unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass; 3973 if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()), 3974 RI.getRegClass(RsrcRC))) { 3975 // The operands are legal. 3976 // FIXME: We may need to legalize operands besided srsrc. 3977 return; 3978 } 3979 3980 // Legalize a VGPR Rsrc. 3981 // 3982 // If the instruction is _ADDR64, we can avoid a waterfall by extracting 3983 // the base pointer from the VGPR Rsrc, adding it to the VAddr, then using 3984 // a zero-value SRsrc. 3985 // 3986 // If the instruction is _OFFSET (both idxen and offen disabled), and we 3987 // support ADDR64 instructions, we can convert to ADDR64 and do the same as 3988 // above. 3989 // 3990 // Otherwise we are on non-ADDR64 hardware, and/or we have 3991 // idxen/offen/bothen and we fall back to a waterfall loop. 3992 3993 MachineBasicBlock &MBB = *MI.getParent(); 3994 3995 MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 3996 if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) { 3997 // This is already an ADDR64 instruction so we need to add the pointer 3998 // extracted from the resource descriptor to the current value of VAddr. 3999 unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4000 unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4001 unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 4002 4003 unsigned RsrcPtr, NewSRsrc; 4004 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 4005 4006 // NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0 4007 DebugLoc DL = MI.getDebugLoc(); 4008 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), NewVAddrLo) 4009 .addReg(RsrcPtr, 0, AMDGPU::sub0) 4010 .addReg(VAddr->getReg(), 0, AMDGPU::sub0); 4011 4012 // NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1 4013 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e32), NewVAddrHi) 4014 .addReg(RsrcPtr, 0, AMDGPU::sub1) 4015 .addReg(VAddr->getReg(), 0, AMDGPU::sub1); 4016 4017 // NewVaddr = {NewVaddrHi, NewVaddrLo} 4018 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) 4019 .addReg(NewVAddrLo) 4020 .addImm(AMDGPU::sub0) 4021 .addReg(NewVAddrHi) 4022 .addImm(AMDGPU::sub1); 4023 4024 VAddr->setReg(NewVAddr); 4025 Rsrc->setReg(NewSRsrc); 4026 } else if (!VAddr && ST.hasAddr64()) { 4027 // This instructions is the _OFFSET variant, so we need to convert it to 4028 // ADDR64. 4029 assert(MBB.getParent()->getSubtarget<GCNSubtarget>().getGeneration() 4030 < AMDGPUSubtarget::VOLCANIC_ISLANDS && 4031 "FIXME: Need to emit flat atomics here"); 4032 4033 unsigned RsrcPtr, NewSRsrc; 4034 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 4035 4036 unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 4037 MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata); 4038 MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 4039 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset); 4040 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode()); 4041 4042 // Atomics rith return have have an additional tied operand and are 4043 // missing some of the special bits. 4044 MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in); 4045 MachineInstr *Addr64; 4046 4047 if (!VDataIn) { 4048 // Regular buffer load / store. 4049 MachineInstrBuilder MIB = 4050 BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 4051 .add(*VData) 4052 .addReg(NewVAddr) 4053 .addReg(NewSRsrc) 4054 .add(*SOffset) 4055 .add(*Offset); 4056 4057 // Atomics do not have this operand. 4058 if (const MachineOperand *GLC = 4059 getNamedOperand(MI, AMDGPU::OpName::glc)) { 4060 MIB.addImm(GLC->getImm()); 4061 } 4062 4063 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)); 4064 4065 if (const MachineOperand *TFE = 4066 getNamedOperand(MI, AMDGPU::OpName::tfe)) { 4067 MIB.addImm(TFE->getImm()); 4068 } 4069 4070 MIB.cloneMemRefs(MI); 4071 Addr64 = MIB; 4072 } else { 4073 // Atomics with return. 4074 Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 4075 .add(*VData) 4076 .add(*VDataIn) 4077 .addReg(NewVAddr) 4078 .addReg(NewSRsrc) 4079 .add(*SOffset) 4080 .add(*Offset) 4081 .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)) 4082 .cloneMemRefs(MI); 4083 } 4084 4085 MI.removeFromParent(); 4086 4087 // NewVaddr = {NewVaddrHi, NewVaddrLo} 4088 BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), 4089 NewVAddr) 4090 .addReg(RsrcPtr, 0, AMDGPU::sub0) 4091 .addImm(AMDGPU::sub0) 4092 .addReg(RsrcPtr, 0, AMDGPU::sub1) 4093 .addImm(AMDGPU::sub1); 4094 } else { 4095 // This is another variant; legalize Rsrc with waterfall loop from VGPRs 4096 // to SGPRs. 4097 loadSRsrcFromVGPR(*this, MI, *Rsrc, MDT); 4098 } 4099 } 4100 } 4101 4102 void SIInstrInfo::moveToVALU(MachineInstr &TopInst, 4103 MachineDominatorTree *MDT) const { 4104 SetVectorType Worklist; 4105 Worklist.insert(&TopInst); 4106 4107 while (!Worklist.empty()) { 4108 MachineInstr &Inst = *Worklist.pop_back_val(); 4109 MachineBasicBlock *MBB = Inst.getParent(); 4110 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 4111 4112 unsigned Opcode = Inst.getOpcode(); 4113 unsigned NewOpcode = getVALUOp(Inst); 4114 4115 // Handle some special cases 4116 switch (Opcode) { 4117 default: 4118 break; 4119 case AMDGPU::S_ADD_U64_PSEUDO: 4120 case AMDGPU::S_SUB_U64_PSEUDO: 4121 splitScalar64BitAddSub(Worklist, Inst, MDT); 4122 Inst.eraseFromParent(); 4123 continue; 4124 case AMDGPU::S_ADD_I32: 4125 case AMDGPU::S_SUB_I32: 4126 // FIXME: The u32 versions currently selected use the carry. 4127 if (moveScalarAddSub(Worklist, Inst, MDT)) 4128 continue; 4129 4130 // Default handling 4131 break; 4132 case AMDGPU::S_AND_B64: 4133 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_AND_B32_e64, MDT); 4134 Inst.eraseFromParent(); 4135 continue; 4136 4137 case AMDGPU::S_OR_B64: 4138 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_OR_B32_e64, MDT); 4139 Inst.eraseFromParent(); 4140 continue; 4141 4142 case AMDGPU::S_XOR_B64: 4143 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::V_XOR_B32_e64, MDT); 4144 Inst.eraseFromParent(); 4145 continue; 4146 4147 case AMDGPU::S_NOT_B64: 4148 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::V_NOT_B32_e32); 4149 Inst.eraseFromParent(); 4150 continue; 4151 4152 case AMDGPU::S_BCNT1_I32_B64: 4153 splitScalar64BitBCNT(Worklist, Inst); 4154 Inst.eraseFromParent(); 4155 continue; 4156 4157 case AMDGPU::S_BFE_I64: 4158 splitScalar64BitBFE(Worklist, Inst); 4159 Inst.eraseFromParent(); 4160 continue; 4161 4162 case AMDGPU::S_LSHL_B32: 4163 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 4164 NewOpcode = AMDGPU::V_LSHLREV_B32_e64; 4165 swapOperands(Inst); 4166 } 4167 break; 4168 case AMDGPU::S_ASHR_I32: 4169 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 4170 NewOpcode = AMDGPU::V_ASHRREV_I32_e64; 4171 swapOperands(Inst); 4172 } 4173 break; 4174 case AMDGPU::S_LSHR_B32: 4175 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 4176 NewOpcode = AMDGPU::V_LSHRREV_B32_e64; 4177 swapOperands(Inst); 4178 } 4179 break; 4180 case AMDGPU::S_LSHL_B64: 4181 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 4182 NewOpcode = AMDGPU::V_LSHLREV_B64; 4183 swapOperands(Inst); 4184 } 4185 break; 4186 case AMDGPU::S_ASHR_I64: 4187 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 4188 NewOpcode = AMDGPU::V_ASHRREV_I64; 4189 swapOperands(Inst); 4190 } 4191 break; 4192 case AMDGPU::S_LSHR_B64: 4193 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 4194 NewOpcode = AMDGPU::V_LSHRREV_B64; 4195 swapOperands(Inst); 4196 } 4197 break; 4198 4199 case AMDGPU::S_ABS_I32: 4200 lowerScalarAbs(Worklist, Inst); 4201 Inst.eraseFromParent(); 4202 continue; 4203 4204 case AMDGPU::S_CBRANCH_SCC0: 4205 case AMDGPU::S_CBRANCH_SCC1: 4206 // Clear unused bits of vcc 4207 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64), 4208 AMDGPU::VCC) 4209 .addReg(AMDGPU::EXEC) 4210 .addReg(AMDGPU::VCC); 4211 break; 4212 4213 case AMDGPU::S_BFE_U64: 4214 case AMDGPU::S_BFM_B64: 4215 llvm_unreachable("Moving this op to VALU not implemented"); 4216 4217 case AMDGPU::S_PACK_LL_B32_B16: 4218 case AMDGPU::S_PACK_LH_B32_B16: 4219 case AMDGPU::S_PACK_HH_B32_B16: 4220 movePackToVALU(Worklist, MRI, Inst); 4221 Inst.eraseFromParent(); 4222 continue; 4223 4224 case AMDGPU::S_XNOR_B32: 4225 lowerScalarXnor(Worklist, Inst); 4226 Inst.eraseFromParent(); 4227 continue; 4228 4229 case AMDGPU::S_XNOR_B64: 4230 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT); 4231 Inst.eraseFromParent(); 4232 continue; 4233 4234 case AMDGPU::S_BUFFER_LOAD_DWORD_SGPR: 4235 case AMDGPU::S_BUFFER_LOAD_DWORDX2_SGPR: 4236 case AMDGPU::S_BUFFER_LOAD_DWORDX4_SGPR: 4237 case AMDGPU::S_BUFFER_LOAD_DWORDX8_SGPR: 4238 case AMDGPU::S_BUFFER_LOAD_DWORDX16_SGPR: { 4239 unsigned VDst; 4240 unsigned NewOpcode; 4241 4242 switch(Opcode) { 4243 case AMDGPU::S_BUFFER_LOAD_DWORD_SGPR: 4244 NewOpcode = AMDGPU::BUFFER_LOAD_DWORD_OFFEN; 4245 VDst = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4246 break; 4247 case AMDGPU::S_BUFFER_LOAD_DWORDX2_SGPR: 4248 NewOpcode = AMDGPU::BUFFER_LOAD_DWORDX2_OFFEN; 4249 VDst = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 4250 break; 4251 case AMDGPU::S_BUFFER_LOAD_DWORDX4_SGPR: 4252 NewOpcode = AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN; 4253 VDst = MRI.createVirtualRegister(&AMDGPU::VReg_128RegClass); 4254 break; 4255 case AMDGPU::S_BUFFER_LOAD_DWORDX8_SGPR: 4256 case AMDGPU::S_BUFFER_LOAD_DWORDX16_SGPR: 4257 splitScalarBuffer(Worklist, Inst); 4258 Inst.eraseFromParent(); 4259 continue; 4260 } 4261 4262 const MachineOperand *VAddr = getNamedOperand(Inst, AMDGPU::OpName::soff); 4263 auto Add = MRI.getUniqueVRegDef(VAddr->getReg()); 4264 unsigned Offset = 0; 4265 4266 // FIXME: This isn't safe because the addressing mode doesn't work 4267 // correctly if vaddr is negative. 4268 // 4269 // FIXME: Should probably be done somewhere else, maybe SIFoldOperands. 4270 // 4271 // See if we can extract an immediate offset by recognizing one of these: 4272 // V_ADD_I32_e32 dst, imm, src1 4273 // V_ADD_I32_e32 dst, (S_MOV_B32 imm), src1 4274 // V_ADD will be removed by "Remove dead machine instructions". 4275 if (Add && 4276 (Add->getOpcode() == AMDGPU::V_ADD_I32_e32 || 4277 Add->getOpcode() == AMDGPU::V_ADD_U32_e32 || 4278 Add->getOpcode() == AMDGPU::V_ADD_U32_e64)) { 4279 static const unsigned SrcNames[2] = { 4280 AMDGPU::OpName::src0, 4281 AMDGPU::OpName::src1, 4282 }; 4283 4284 // Find a literal offset in one of source operands. 4285 for (int i = 0; i < 2; i++) { 4286 const MachineOperand *Src = 4287 getNamedOperand(*Add, SrcNames[i]); 4288 4289 if (Src->isReg()) { 4290 MachineInstr *Def = MRI.getUniqueVRegDef(Src->getReg()); 4291 if (Def) { 4292 if (Def->isMoveImmediate()) 4293 Src = &Def->getOperand(1); 4294 else if (Def->isCopy()) { 4295 auto Mov = MRI.getUniqueVRegDef(Def->getOperand(1).getReg()); 4296 if (Mov && Mov->isMoveImmediate()) { 4297 Src = &Mov->getOperand(1); 4298 } 4299 } 4300 } 4301 } 4302 4303 if (Src) { 4304 if (Src->isImm()) 4305 Offset = Src->getImm(); 4306 else if (Src->isCImm()) 4307 Offset = Src->getCImm()->getZExtValue(); 4308 } 4309 4310 if (Offset && isLegalMUBUFImmOffset(Offset)) { 4311 VAddr = getNamedOperand(*Add, SrcNames[!i]); 4312 break; 4313 } 4314 4315 Offset = 0; 4316 } 4317 } 4318 4319 MachineInstr *NewInstr = 4320 BuildMI(*MBB, Inst, Inst.getDebugLoc(), 4321 get(NewOpcode), VDst) 4322 .add(*VAddr) // vaddr 4323 .add(*getNamedOperand(Inst, AMDGPU::OpName::sbase)) // srsrc 4324 .addImm(0) // soffset 4325 .addImm(Offset) // offset 4326 .addImm(getNamedOperand(Inst, AMDGPU::OpName::glc)->getImm()) 4327 .addImm(0) // slc 4328 .addImm(0) // tfe 4329 .cloneMemRefs(Inst) 4330 .getInstr(); 4331 4332 MRI.replaceRegWith(getNamedOperand(Inst, AMDGPU::OpName::sdst)->getReg(), 4333 VDst); 4334 addUsersToMoveToVALUWorklist(VDst, MRI, Worklist); 4335 Inst.eraseFromParent(); 4336 4337 // Legalize all operands other than the offset. Notably, convert the srsrc 4338 // into SGPRs using v_readfirstlane if needed. 4339 legalizeOperands(*NewInstr, MDT); 4340 continue; 4341 } 4342 } 4343 4344 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) { 4345 // We cannot move this instruction to the VALU, so we should try to 4346 // legalize its operands instead. 4347 legalizeOperands(Inst, MDT); 4348 continue; 4349 } 4350 4351 // Use the new VALU Opcode. 4352 const MCInstrDesc &NewDesc = get(NewOpcode); 4353 Inst.setDesc(NewDesc); 4354 4355 // Remove any references to SCC. Vector instructions can't read from it, and 4356 // We're just about to add the implicit use / defs of VCC, and we don't want 4357 // both. 4358 for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) { 4359 MachineOperand &Op = Inst.getOperand(i); 4360 if (Op.isReg() && Op.getReg() == AMDGPU::SCC) { 4361 Inst.RemoveOperand(i); 4362 addSCCDefUsersToVALUWorklist(Inst, Worklist); 4363 } 4364 } 4365 4366 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) { 4367 // We are converting these to a BFE, so we need to add the missing 4368 // operands for the size and offset. 4369 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16; 4370 Inst.addOperand(MachineOperand::CreateImm(0)); 4371 Inst.addOperand(MachineOperand::CreateImm(Size)); 4372 4373 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) { 4374 // The VALU version adds the second operand to the result, so insert an 4375 // extra 0 operand. 4376 Inst.addOperand(MachineOperand::CreateImm(0)); 4377 } 4378 4379 Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent()); 4380 4381 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) { 4382 const MachineOperand &OffsetWidthOp = Inst.getOperand(2); 4383 // If we need to move this to VGPRs, we need to unpack the second operand 4384 // back into the 2 separate ones for bit offset and width. 4385 assert(OffsetWidthOp.isImm() && 4386 "Scalar BFE is only implemented for constant width and offset"); 4387 uint32_t Imm = OffsetWidthOp.getImm(); 4388 4389 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 4390 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 4391 Inst.RemoveOperand(2); // Remove old immediate. 4392 Inst.addOperand(MachineOperand::CreateImm(Offset)); 4393 Inst.addOperand(MachineOperand::CreateImm(BitWidth)); 4394 } 4395 4396 bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef(); 4397 unsigned NewDstReg = AMDGPU::NoRegister; 4398 if (HasDst) { 4399 unsigned DstReg = Inst.getOperand(0).getReg(); 4400 if (TargetRegisterInfo::isPhysicalRegister(DstReg)) 4401 continue; 4402 4403 // Update the destination register class. 4404 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst); 4405 if (!NewDstRC) 4406 continue; 4407 4408 if (Inst.isCopy() && 4409 TargetRegisterInfo::isVirtualRegister(Inst.getOperand(1).getReg()) && 4410 NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) { 4411 // Instead of creating a copy where src and dst are the same register 4412 // class, we just replace all uses of dst with src. These kinds of 4413 // copies interfere with the heuristics MachineSink uses to decide 4414 // whether or not to split a critical edge. Since the pass assumes 4415 // that copies will end up as machine instructions and not be 4416 // eliminated. 4417 addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist); 4418 MRI.replaceRegWith(DstReg, Inst.getOperand(1).getReg()); 4419 MRI.clearKillFlags(Inst.getOperand(1).getReg()); 4420 Inst.getOperand(0).setReg(DstReg); 4421 4422 // Make sure we don't leave around a dead VGPR->SGPR copy. Normally 4423 // these are deleted later, but at -O0 it would leave a suspicious 4424 // looking illegal copy of an undef register. 4425 for (unsigned I = Inst.getNumOperands() - 1; I != 0; --I) 4426 Inst.RemoveOperand(I); 4427 Inst.setDesc(get(AMDGPU::IMPLICIT_DEF)); 4428 continue; 4429 } 4430 4431 NewDstReg = MRI.createVirtualRegister(NewDstRC); 4432 MRI.replaceRegWith(DstReg, NewDstReg); 4433 } 4434 4435 // Legalize the operands 4436 legalizeOperands(Inst, MDT); 4437 4438 if (HasDst) 4439 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); 4440 } 4441 } 4442 4443 // Add/sub require special handling to deal with carry outs. 4444 bool SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst, 4445 MachineDominatorTree *MDT) const { 4446 if (ST.hasAddNoCarry()) { 4447 // Assume there is no user of scc since we don't select this in that case. 4448 // Since scc isn't used, it doesn't really matter if the i32 or u32 variant 4449 // is used. 4450 4451 MachineBasicBlock &MBB = *Inst.getParent(); 4452 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 4453 4454 unsigned OldDstReg = Inst.getOperand(0).getReg(); 4455 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4456 4457 unsigned Opc = Inst.getOpcode(); 4458 assert(Opc == AMDGPU::S_ADD_I32 || Opc == AMDGPU::S_SUB_I32); 4459 4460 unsigned NewOpc = Opc == AMDGPU::S_ADD_I32 ? 4461 AMDGPU::V_ADD_U32_e64 : AMDGPU::V_SUB_U32_e64; 4462 4463 assert(Inst.getOperand(3).getReg() == AMDGPU::SCC); 4464 Inst.RemoveOperand(3); 4465 4466 Inst.setDesc(get(NewOpc)); 4467 Inst.addImplicitDefUseOperands(*MBB.getParent()); 4468 MRI.replaceRegWith(OldDstReg, ResultReg); 4469 legalizeOperands(Inst, MDT); 4470 4471 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 4472 return true; 4473 } 4474 4475 return false; 4476 } 4477 4478 void SIInstrInfo::lowerScalarAbs(SetVectorType &Worklist, 4479 MachineInstr &Inst) const { 4480 MachineBasicBlock &MBB = *Inst.getParent(); 4481 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 4482 MachineBasicBlock::iterator MII = Inst; 4483 DebugLoc DL = Inst.getDebugLoc(); 4484 4485 MachineOperand &Dest = Inst.getOperand(0); 4486 MachineOperand &Src = Inst.getOperand(1); 4487 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4488 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4489 4490 unsigned SubOp = ST.hasAddNoCarry() ? 4491 AMDGPU::V_SUB_U32_e32 : AMDGPU::V_SUB_I32_e32; 4492 4493 BuildMI(MBB, MII, DL, get(SubOp), TmpReg) 4494 .addImm(0) 4495 .addReg(Src.getReg()); 4496 4497 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg) 4498 .addReg(Src.getReg()) 4499 .addReg(TmpReg); 4500 4501 MRI.replaceRegWith(Dest.getReg(), ResultReg); 4502 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 4503 } 4504 4505 void SIInstrInfo::lowerScalarXnor(SetVectorType &Worklist, 4506 MachineInstr &Inst) const { 4507 MachineBasicBlock &MBB = *Inst.getParent(); 4508 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 4509 MachineBasicBlock::iterator MII = Inst; 4510 const DebugLoc &DL = Inst.getDebugLoc(); 4511 4512 MachineOperand &Dest = Inst.getOperand(0); 4513 MachineOperand &Src0 = Inst.getOperand(1); 4514 MachineOperand &Src1 = Inst.getOperand(2); 4515 4516 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src0, MRI, DL); 4517 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src1, MRI, DL); 4518 4519 unsigned NewDest = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4520 if (ST.hasDLInsts()) { 4521 BuildMI(MBB, MII, DL, get(AMDGPU::V_XNOR_B32_e64), NewDest) 4522 .add(Src0) 4523 .add(Src1); 4524 } else { 4525 unsigned Xor = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4526 BuildMI(MBB, MII, DL, get(AMDGPU::V_XOR_B32_e64), Xor) 4527 .add(Src0) 4528 .add(Src1); 4529 4530 BuildMI(MBB, MII, DL, get(AMDGPU::V_NOT_B32_e64), NewDest) 4531 .addReg(Xor); 4532 } 4533 4534 MRI.replaceRegWith(Dest.getReg(), NewDest); 4535 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 4536 } 4537 4538 void SIInstrInfo::splitScalar64BitUnaryOp( 4539 SetVectorType &Worklist, MachineInstr &Inst, 4540 unsigned Opcode) const { 4541 MachineBasicBlock &MBB = *Inst.getParent(); 4542 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 4543 4544 MachineOperand &Dest = Inst.getOperand(0); 4545 MachineOperand &Src0 = Inst.getOperand(1); 4546 DebugLoc DL = Inst.getDebugLoc(); 4547 4548 MachineBasicBlock::iterator MII = Inst; 4549 4550 const MCInstrDesc &InstDesc = get(Opcode); 4551 const TargetRegisterClass *Src0RC = Src0.isReg() ? 4552 MRI.getRegClass(Src0.getReg()) : 4553 &AMDGPU::SGPR_32RegClass; 4554 4555 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 4556 4557 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 4558 AMDGPU::sub0, Src0SubRC); 4559 4560 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 4561 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 4562 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 4563 4564 unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 4565 BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0); 4566 4567 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 4568 AMDGPU::sub1, Src0SubRC); 4569 4570 unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 4571 BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1); 4572 4573 unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); 4574 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 4575 .addReg(DestSub0) 4576 .addImm(AMDGPU::sub0) 4577 .addReg(DestSub1) 4578 .addImm(AMDGPU::sub1); 4579 4580 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 4581 4582 // We don't need to legalizeOperands here because for a single operand, src0 4583 // will support any kind of input. 4584 4585 // Move all users of this moved value. 4586 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 4587 } 4588 4589 void SIInstrInfo::splitScalar64BitAddSub(SetVectorType &Worklist, 4590 MachineInstr &Inst, 4591 MachineDominatorTree *MDT) const { 4592 bool IsAdd = (Inst.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); 4593 4594 MachineBasicBlock &MBB = *Inst.getParent(); 4595 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 4596 4597 unsigned FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 4598 unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4599 unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4600 4601 unsigned CarryReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 4602 unsigned DeadCarryReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 4603 4604 MachineOperand &Dest = Inst.getOperand(0); 4605 MachineOperand &Src0 = Inst.getOperand(1); 4606 MachineOperand &Src1 = Inst.getOperand(2); 4607 const DebugLoc &DL = Inst.getDebugLoc(); 4608 MachineBasicBlock::iterator MII = Inst; 4609 4610 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg()); 4611 const TargetRegisterClass *Src1RC = MRI.getRegClass(Src1.getReg()); 4612 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 4613 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 4614 4615 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 4616 AMDGPU::sub0, Src0SubRC); 4617 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 4618 AMDGPU::sub0, Src1SubRC); 4619 4620 4621 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 4622 AMDGPU::sub1, Src0SubRC); 4623 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 4624 AMDGPU::sub1, Src1SubRC); 4625 4626 unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_I32_e64 : AMDGPU::V_SUB_I32_e64; 4627 MachineInstr *LoHalf = 4628 BuildMI(MBB, MII, DL, get(LoOpc), DestSub0) 4629 .addReg(CarryReg, RegState::Define) 4630 .add(SrcReg0Sub0) 4631 .add(SrcReg1Sub0); 4632 4633 unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; 4634 MachineInstr *HiHalf = 4635 BuildMI(MBB, MII, DL, get(HiOpc), DestSub1) 4636 .addReg(DeadCarryReg, RegState::Define | RegState::Dead) 4637 .add(SrcReg0Sub1) 4638 .add(SrcReg1Sub1) 4639 .addReg(CarryReg, RegState::Kill); 4640 4641 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 4642 .addReg(DestSub0) 4643 .addImm(AMDGPU::sub0) 4644 .addReg(DestSub1) 4645 .addImm(AMDGPU::sub1); 4646 4647 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 4648 4649 // Try to legalize the operands in case we need to swap the order to keep it 4650 // valid. 4651 legalizeOperands(*LoHalf, MDT); 4652 legalizeOperands(*HiHalf, MDT); 4653 4654 // Move all users of this moved vlaue. 4655 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 4656 } 4657 4658 void SIInstrInfo::splitScalar64BitBinaryOp(SetVectorType &Worklist, 4659 MachineInstr &Inst, unsigned Opcode, 4660 MachineDominatorTree *MDT) const { 4661 MachineBasicBlock &MBB = *Inst.getParent(); 4662 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 4663 4664 MachineOperand &Dest = Inst.getOperand(0); 4665 MachineOperand &Src0 = Inst.getOperand(1); 4666 MachineOperand &Src1 = Inst.getOperand(2); 4667 DebugLoc DL = Inst.getDebugLoc(); 4668 4669 MachineBasicBlock::iterator MII = Inst; 4670 4671 const MCInstrDesc &InstDesc = get(Opcode); 4672 const TargetRegisterClass *Src0RC = Src0.isReg() ? 4673 MRI.getRegClass(Src0.getReg()) : 4674 &AMDGPU::SGPR_32RegClass; 4675 4676 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 4677 const TargetRegisterClass *Src1RC = Src1.isReg() ? 4678 MRI.getRegClass(Src1.getReg()) : 4679 &AMDGPU::SGPR_32RegClass; 4680 4681 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 4682 4683 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 4684 AMDGPU::sub0, Src0SubRC); 4685 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 4686 AMDGPU::sub0, Src1SubRC); 4687 4688 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 4689 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 4690 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 4691 4692 unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 4693 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0) 4694 .add(SrcReg0Sub0) 4695 .add(SrcReg1Sub0); 4696 4697 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 4698 AMDGPU::sub1, Src0SubRC); 4699 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 4700 AMDGPU::sub1, Src1SubRC); 4701 4702 unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 4703 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1) 4704 .add(SrcReg0Sub1) 4705 .add(SrcReg1Sub1); 4706 4707 unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); 4708 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 4709 .addReg(DestSub0) 4710 .addImm(AMDGPU::sub0) 4711 .addReg(DestSub1) 4712 .addImm(AMDGPU::sub1); 4713 4714 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 4715 4716 // Try to legalize the operands in case we need to swap the order to keep it 4717 // valid. 4718 legalizeOperands(LoHalf, MDT); 4719 legalizeOperands(HiHalf, MDT); 4720 4721 // Move all users of this moved vlaue. 4722 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 4723 } 4724 4725 void SIInstrInfo::splitScalar64BitBCNT( 4726 SetVectorType &Worklist, MachineInstr &Inst) const { 4727 MachineBasicBlock &MBB = *Inst.getParent(); 4728 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 4729 4730 MachineBasicBlock::iterator MII = Inst; 4731 DebugLoc DL = Inst.getDebugLoc(); 4732 4733 MachineOperand &Dest = Inst.getOperand(0); 4734 MachineOperand &Src = Inst.getOperand(1); 4735 4736 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64); 4737 const TargetRegisterClass *SrcRC = Src.isReg() ? 4738 MRI.getRegClass(Src.getReg()) : 4739 &AMDGPU::SGPR_32RegClass; 4740 4741 unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4742 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4743 4744 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); 4745 4746 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 4747 AMDGPU::sub0, SrcSubRC); 4748 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 4749 AMDGPU::sub1, SrcSubRC); 4750 4751 BuildMI(MBB, MII, DL, InstDesc, MidReg).add(SrcRegSub0).addImm(0); 4752 4753 BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg); 4754 4755 MRI.replaceRegWith(Dest.getReg(), ResultReg); 4756 4757 // We don't need to legalize operands here. src0 for etiher instruction can be 4758 // an SGPR, and the second input is unused or determined here. 4759 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 4760 } 4761 4762 void SIInstrInfo::splitScalar64BitBFE(SetVectorType &Worklist, 4763 MachineInstr &Inst) const { 4764 MachineBasicBlock &MBB = *Inst.getParent(); 4765 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 4766 MachineBasicBlock::iterator MII = Inst; 4767 DebugLoc DL = Inst.getDebugLoc(); 4768 4769 MachineOperand &Dest = Inst.getOperand(0); 4770 uint32_t Imm = Inst.getOperand(2).getImm(); 4771 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 4772 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 4773 4774 (void) Offset; 4775 4776 // Only sext_inreg cases handled. 4777 assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 && 4778 Offset == 0 && "Not implemented"); 4779 4780 if (BitWidth < 32) { 4781 unsigned MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4782 unsigned MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4783 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 4784 4785 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo) 4786 .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0) 4787 .addImm(0) 4788 .addImm(BitWidth); 4789 4790 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi) 4791 .addImm(31) 4792 .addReg(MidRegLo); 4793 4794 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 4795 .addReg(MidRegLo) 4796 .addImm(AMDGPU::sub0) 4797 .addReg(MidRegHi) 4798 .addImm(AMDGPU::sub1); 4799 4800 MRI.replaceRegWith(Dest.getReg(), ResultReg); 4801 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 4802 return; 4803 } 4804 4805 MachineOperand &Src = Inst.getOperand(1); 4806 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4807 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 4808 4809 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg) 4810 .addImm(31) 4811 .addReg(Src.getReg(), 0, AMDGPU::sub0); 4812 4813 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 4814 .addReg(Src.getReg(), 0, AMDGPU::sub0) 4815 .addImm(AMDGPU::sub0) 4816 .addReg(TmpReg) 4817 .addImm(AMDGPU::sub1); 4818 4819 MRI.replaceRegWith(Dest.getReg(), ResultReg); 4820 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 4821 } 4822 4823 void SIInstrInfo::splitScalarBuffer(SetVectorType &Worklist, 4824 MachineInstr &Inst) const { 4825 MachineBasicBlock &MBB = *Inst.getParent(); 4826 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 4827 4828 MachineBasicBlock::iterator MII = Inst; 4829 auto &DL = Inst.getDebugLoc(); 4830 4831 MachineOperand &Dest = *getNamedOperand(Inst, AMDGPU::OpName::sdst);; 4832 MachineOperand &Rsrc = *getNamedOperand(Inst, AMDGPU::OpName::sbase); 4833 MachineOperand &Offset = *getNamedOperand(Inst, AMDGPU::OpName::soff); 4834 MachineOperand &Glc = *getNamedOperand(Inst, AMDGPU::OpName::glc); 4835 4836 unsigned Opcode = Inst.getOpcode(); 4837 unsigned NewOpcode = AMDGPU::BUFFER_LOAD_DWORDX4_OFFEN; 4838 unsigned Count = 0; 4839 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 4840 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 4841 4842 switch(Opcode) { 4843 default: 4844 return; 4845 case AMDGPU::S_BUFFER_LOAD_DWORDX8_SGPR: 4846 Count = 2; 4847 break; 4848 case AMDGPU::S_BUFFER_LOAD_DWORDX16_SGPR: 4849 Count = 4; 4850 break; 4851 } 4852 4853 // FIXME: Should also attempt to build VAddr and Offset like the non-split 4854 // case (see call site for this function) 4855 4856 // Create a vector of result registers 4857 SmallVector<unsigned, 8> ResultRegs; 4858 for (unsigned i = 0; i < Count ; ++i) { 4859 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_128RegClass); 4860 MachineInstr &NewMI = *BuildMI(MBB, MII, DL, get(NewOpcode), ResultReg) 4861 .addReg(Offset.getReg()) // offset 4862 .addReg(Rsrc.getReg()) // rsrc 4863 .addImm(0) // soffset 4864 .addImm(i << 4) // inst_offset 4865 .addImm(Glc.getImm()) // glc 4866 .addImm(0) // slc 4867 .addImm(0) // tfe 4868 .addMemOperand(*Inst.memoperands_begin()); 4869 // Extract the 4 32 bit sub-registers from the result to add into the final REG_SEQUENCE 4870 auto &NewDestOp = NewMI.getOperand(0); 4871 for (unsigned i = 0 ; i < 4 ; i++) 4872 ResultRegs.push_back(buildExtractSubReg(MII, MRI, NewDestOp, &AMDGPU::VReg_128RegClass, 4873 RI.getSubRegFromChannel(i), &AMDGPU::VGPR_32RegClass)); 4874 } 4875 // Create a new combined result to replace original with 4876 unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); 4877 MachineInstrBuilder CombinedResBuilder = BuildMI(MBB, MII, DL, 4878 get(TargetOpcode::REG_SEQUENCE), FullDestReg); 4879 4880 for (unsigned i = 0 ; i < Count * 4 ; ++i) { 4881 CombinedResBuilder 4882 .addReg(ResultRegs[i]) 4883 .addImm(RI.getSubRegFromChannel(i)); 4884 } 4885 4886 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 4887 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 4888 } 4889 4890 void SIInstrInfo::addUsersToMoveToVALUWorklist( 4891 unsigned DstReg, 4892 MachineRegisterInfo &MRI, 4893 SetVectorType &Worklist) const { 4894 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg), 4895 E = MRI.use_end(); I != E;) { 4896 MachineInstr &UseMI = *I->getParent(); 4897 if (!canReadVGPR(UseMI, I.getOperandNo())) { 4898 Worklist.insert(&UseMI); 4899 4900 do { 4901 ++I; 4902 } while (I != E && I->getParent() == &UseMI); 4903 } else { 4904 ++I; 4905 } 4906 } 4907 } 4908 4909 void SIInstrInfo::movePackToVALU(SetVectorType &Worklist, 4910 MachineRegisterInfo &MRI, 4911 MachineInstr &Inst) const { 4912 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4913 MachineBasicBlock *MBB = Inst.getParent(); 4914 MachineOperand &Src0 = Inst.getOperand(1); 4915 MachineOperand &Src1 = Inst.getOperand(2); 4916 const DebugLoc &DL = Inst.getDebugLoc(); 4917 4918 switch (Inst.getOpcode()) { 4919 case AMDGPU::S_PACK_LL_B32_B16: { 4920 unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4921 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4922 4923 // FIXME: Can do a lot better if we know the high bits of src0 or src1 are 4924 // 0. 4925 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 4926 .addImm(0xffff); 4927 4928 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_B32_e64), TmpReg) 4929 .addReg(ImmReg, RegState::Kill) 4930 .add(Src0); 4931 4932 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHL_OR_B32), ResultReg) 4933 .add(Src1) 4934 .addImm(16) 4935 .addReg(TmpReg, RegState::Kill); 4936 break; 4937 } 4938 case AMDGPU::S_PACK_LH_B32_B16: { 4939 unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4940 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 4941 .addImm(0xffff); 4942 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_BFI_B32), ResultReg) 4943 .addReg(ImmReg, RegState::Kill) 4944 .add(Src0) 4945 .add(Src1); 4946 break; 4947 } 4948 case AMDGPU::S_PACK_HH_B32_B16: { 4949 unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4950 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4951 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg) 4952 .addImm(16) 4953 .add(Src0); 4954 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 4955 .addImm(0xffff0000); 4956 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_OR_B32), ResultReg) 4957 .add(Src1) 4958 .addReg(ImmReg, RegState::Kill) 4959 .addReg(TmpReg, RegState::Kill); 4960 break; 4961 } 4962 default: 4963 llvm_unreachable("unhandled s_pack_* instruction"); 4964 } 4965 4966 MachineOperand &Dest = Inst.getOperand(0); 4967 MRI.replaceRegWith(Dest.getReg(), ResultReg); 4968 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 4969 } 4970 4971 void SIInstrInfo::addSCCDefUsersToVALUWorklist( 4972 MachineInstr &SCCDefInst, SetVectorType &Worklist) const { 4973 // This assumes that all the users of SCC are in the same block 4974 // as the SCC def. 4975 for (MachineInstr &MI : 4976 make_range(MachineBasicBlock::iterator(SCCDefInst), 4977 SCCDefInst.getParent()->end())) { 4978 // Exit if we find another SCC def. 4979 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != -1) 4980 return; 4981 4982 if (MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI) != -1) 4983 Worklist.insert(&MI); 4984 } 4985 } 4986 4987 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass( 4988 const MachineInstr &Inst) const { 4989 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0); 4990 4991 switch (Inst.getOpcode()) { 4992 // For target instructions, getOpRegClass just returns the virtual register 4993 // class associated with the operand, so we need to find an equivalent VGPR 4994 // register class in order to move the instruction to the VALU. 4995 case AMDGPU::COPY: 4996 case AMDGPU::PHI: 4997 case AMDGPU::REG_SEQUENCE: 4998 case AMDGPU::INSERT_SUBREG: 4999 case AMDGPU::WQM: 5000 case AMDGPU::WWM: 5001 if (RI.hasVGPRs(NewDstRC)) 5002 return nullptr; 5003 5004 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 5005 if (!NewDstRC) 5006 return nullptr; 5007 return NewDstRC; 5008 default: 5009 return NewDstRC; 5010 } 5011 } 5012 5013 // Find the one SGPR operand we are allowed to use. 5014 unsigned SIInstrInfo::findUsedSGPR(const MachineInstr &MI, 5015 int OpIndices[3]) const { 5016 const MCInstrDesc &Desc = MI.getDesc(); 5017 5018 // Find the one SGPR operand we are allowed to use. 5019 // 5020 // First we need to consider the instruction's operand requirements before 5021 // legalizing. Some operands are required to be SGPRs, such as implicit uses 5022 // of VCC, but we are still bound by the constant bus requirement to only use 5023 // one. 5024 // 5025 // If the operand's class is an SGPR, we can never move it. 5026 5027 unsigned SGPRReg = findImplicitSGPRRead(MI); 5028 if (SGPRReg != AMDGPU::NoRegister) 5029 return SGPRReg; 5030 5031 unsigned UsedSGPRs[3] = { AMDGPU::NoRegister }; 5032 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 5033 5034 for (unsigned i = 0; i < 3; ++i) { 5035 int Idx = OpIndices[i]; 5036 if (Idx == -1) 5037 break; 5038 5039 const MachineOperand &MO = MI.getOperand(Idx); 5040 if (!MO.isReg()) 5041 continue; 5042 5043 // Is this operand statically required to be an SGPR based on the operand 5044 // constraints? 5045 const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); 5046 bool IsRequiredSGPR = RI.isSGPRClass(OpRC); 5047 if (IsRequiredSGPR) 5048 return MO.getReg(); 5049 5050 // If this could be a VGPR or an SGPR, Check the dynamic register class. 5051 unsigned Reg = MO.getReg(); 5052 const TargetRegisterClass *RegRC = MRI.getRegClass(Reg); 5053 if (RI.isSGPRClass(RegRC)) 5054 UsedSGPRs[i] = Reg; 5055 } 5056 5057 // We don't have a required SGPR operand, so we have a bit more freedom in 5058 // selecting operands to move. 5059 5060 // Try to select the most used SGPR. If an SGPR is equal to one of the 5061 // others, we choose that. 5062 // 5063 // e.g. 5064 // V_FMA_F32 v0, s0, s0, s0 -> No moves 5065 // V_FMA_F32 v0, s0, s1, s0 -> Move s1 5066 5067 // TODO: If some of the operands are 64-bit SGPRs and some 32, we should 5068 // prefer those. 5069 5070 if (UsedSGPRs[0] != AMDGPU::NoRegister) { 5071 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2]) 5072 SGPRReg = UsedSGPRs[0]; 5073 } 5074 5075 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) { 5076 if (UsedSGPRs[1] == UsedSGPRs[2]) 5077 SGPRReg = UsedSGPRs[1]; 5078 } 5079 5080 return SGPRReg; 5081 } 5082 5083 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI, 5084 unsigned OperandName) const { 5085 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName); 5086 if (Idx == -1) 5087 return nullptr; 5088 5089 return &MI.getOperand(Idx); 5090 } 5091 5092 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const { 5093 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT; 5094 if (ST.isAmdHsaOS()) { 5095 // Set ATC = 1. GFX9 doesn't have this bit. 5096 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) 5097 RsrcDataFormat |= (1ULL << 56); 5098 5099 // Set MTYPE = 2 (MTYPE_UC = uncached). GFX9 doesn't have this. 5100 // BTW, it disables TC L2 and therefore decreases performance. 5101 if (ST.getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS) 5102 RsrcDataFormat |= (2ULL << 59); 5103 } 5104 5105 return RsrcDataFormat; 5106 } 5107 5108 uint64_t SIInstrInfo::getScratchRsrcWords23() const { 5109 uint64_t Rsrc23 = getDefaultRsrcDataFormat() | 5110 AMDGPU::RSRC_TID_ENABLE | 5111 0xffffffff; // Size; 5112 5113 // GFX9 doesn't have ELEMENT_SIZE. 5114 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 5115 uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize()) - 1; 5116 Rsrc23 |= EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT; 5117 } 5118 5119 // IndexStride = 64. 5120 Rsrc23 |= UINT64_C(3) << AMDGPU::RSRC_INDEX_STRIDE_SHIFT; 5121 5122 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17]. 5123 // Clear them unless we want a huge stride. 5124 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 5125 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT; 5126 5127 return Rsrc23; 5128 } 5129 5130 bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const { 5131 unsigned Opc = MI.getOpcode(); 5132 5133 return isSMRD(Opc); 5134 } 5135 5136 bool SIInstrInfo::isHighLatencyInstruction(const MachineInstr &MI) const { 5137 unsigned Opc = MI.getOpcode(); 5138 5139 return isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc); 5140 } 5141 5142 unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI, 5143 int &FrameIndex) const { 5144 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 5145 if (!Addr || !Addr->isFI()) 5146 return AMDGPU::NoRegister; 5147 5148 assert(!MI.memoperands_empty() && 5149 (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS); 5150 5151 FrameIndex = Addr->getIndex(); 5152 return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg(); 5153 } 5154 5155 unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI, 5156 int &FrameIndex) const { 5157 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr); 5158 assert(Addr && Addr->isFI()); 5159 FrameIndex = Addr->getIndex(); 5160 return getNamedOperand(MI, AMDGPU::OpName::data)->getReg(); 5161 } 5162 5163 unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 5164 int &FrameIndex) const { 5165 if (!MI.mayLoad()) 5166 return AMDGPU::NoRegister; 5167 5168 if (isMUBUF(MI) || isVGPRSpill(MI)) 5169 return isStackAccess(MI, FrameIndex); 5170 5171 if (isSGPRSpill(MI)) 5172 return isSGPRStackAccess(MI, FrameIndex); 5173 5174 return AMDGPU::NoRegister; 5175 } 5176 5177 unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 5178 int &FrameIndex) const { 5179 if (!MI.mayStore()) 5180 return AMDGPU::NoRegister; 5181 5182 if (isMUBUF(MI) || isVGPRSpill(MI)) 5183 return isStackAccess(MI, FrameIndex); 5184 5185 if (isSGPRSpill(MI)) 5186 return isSGPRStackAccess(MI, FrameIndex); 5187 5188 return AMDGPU::NoRegister; 5189 } 5190 5191 unsigned SIInstrInfo::getInstBundleSize(const MachineInstr &MI) const { 5192 unsigned Size = 0; 5193 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 5194 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 5195 while (++I != E && I->isInsideBundle()) { 5196 assert(!I->isBundle() && "No nested bundle!"); 5197 Size += getInstSizeInBytes(*I); 5198 } 5199 5200 return Size; 5201 } 5202 5203 unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { 5204 unsigned Opc = MI.getOpcode(); 5205 const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc); 5206 unsigned DescSize = Desc.getSize(); 5207 5208 // If we have a definitive size, we can use it. Otherwise we need to inspect 5209 // the operands to know the size. 5210 if (isFixedSize(MI)) 5211 return DescSize; 5212 5213 // 4-byte instructions may have a 32-bit literal encoded after them. Check 5214 // operands that coud ever be literals. 5215 if (isVALU(MI) || isSALU(MI)) { 5216 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 5217 if (Src0Idx == -1) 5218 return DescSize; // No operands. 5219 5220 if (isLiteralConstantLike(MI.getOperand(Src0Idx), Desc.OpInfo[Src0Idx])) 5221 return DescSize + 4; 5222 5223 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 5224 if (Src1Idx == -1) 5225 return DescSize; 5226 5227 if (isLiteralConstantLike(MI.getOperand(Src1Idx), Desc.OpInfo[Src1Idx])) 5228 return DescSize + 4; 5229 5230 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 5231 if (Src2Idx == -1) 5232 return DescSize; 5233 5234 if (isLiteralConstantLike(MI.getOperand(Src2Idx), Desc.OpInfo[Src2Idx])) 5235 return DescSize + 4; 5236 5237 return DescSize; 5238 } 5239 5240 switch (Opc) { 5241 case TargetOpcode::IMPLICIT_DEF: 5242 case TargetOpcode::KILL: 5243 case TargetOpcode::DBG_VALUE: 5244 case TargetOpcode::EH_LABEL: 5245 return 0; 5246 case TargetOpcode::BUNDLE: 5247 return getInstBundleSize(MI); 5248 case TargetOpcode::INLINEASM: { 5249 const MachineFunction *MF = MI.getParent()->getParent(); 5250 const char *AsmStr = MI.getOperand(0).getSymbolName(); 5251 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo()); 5252 } 5253 default: 5254 return DescSize; 5255 } 5256 } 5257 5258 bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const { 5259 if (!isFLAT(MI)) 5260 return false; 5261 5262 if (MI.memoperands_empty()) 5263 return true; 5264 5265 for (const MachineMemOperand *MMO : MI.memoperands()) { 5266 if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS) 5267 return true; 5268 } 5269 return false; 5270 } 5271 5272 bool SIInstrInfo::isNonUniformBranchInstr(MachineInstr &Branch) const { 5273 return Branch.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO; 5274 } 5275 5276 void SIInstrInfo::convertNonUniformIfRegion(MachineBasicBlock *IfEntry, 5277 MachineBasicBlock *IfEnd) const { 5278 MachineBasicBlock::iterator TI = IfEntry->getFirstTerminator(); 5279 assert(TI != IfEntry->end()); 5280 5281 MachineInstr *Branch = &(*TI); 5282 MachineFunction *MF = IfEntry->getParent(); 5283 MachineRegisterInfo &MRI = IfEntry->getParent()->getRegInfo(); 5284 5285 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 5286 unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 5287 MachineInstr *SIIF = 5288 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_IF), DstReg) 5289 .add(Branch->getOperand(0)) 5290 .add(Branch->getOperand(1)); 5291 MachineInstr *SIEND = 5292 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_END_CF)) 5293 .addReg(DstReg); 5294 5295 IfEntry->erase(TI); 5296 IfEntry->insert(IfEntry->end(), SIIF); 5297 IfEnd->insert(IfEnd->getFirstNonPHI(), SIEND); 5298 } 5299 } 5300 5301 void SIInstrInfo::convertNonUniformLoopRegion( 5302 MachineBasicBlock *LoopEntry, MachineBasicBlock *LoopEnd) const { 5303 MachineBasicBlock::iterator TI = LoopEnd->getFirstTerminator(); 5304 // We expect 2 terminators, one conditional and one unconditional. 5305 assert(TI != LoopEnd->end()); 5306 5307 MachineInstr *Branch = &(*TI); 5308 MachineFunction *MF = LoopEnd->getParent(); 5309 MachineRegisterInfo &MRI = LoopEnd->getParent()->getRegInfo(); 5310 5311 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 5312 5313 unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 5314 unsigned BackEdgeReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 5315 MachineInstrBuilder HeaderPHIBuilder = 5316 BuildMI(*(MF), Branch->getDebugLoc(), get(TargetOpcode::PHI), DstReg); 5317 for (MachineBasicBlock::pred_iterator PI = LoopEntry->pred_begin(), 5318 E = LoopEntry->pred_end(); 5319 PI != E; ++PI) { 5320 if (*PI == LoopEnd) { 5321 HeaderPHIBuilder.addReg(BackEdgeReg); 5322 } else { 5323 MachineBasicBlock *PMBB = *PI; 5324 unsigned ZeroReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 5325 materializeImmediate(*PMBB, PMBB->getFirstTerminator(), DebugLoc(), 5326 ZeroReg, 0); 5327 HeaderPHIBuilder.addReg(ZeroReg); 5328 } 5329 HeaderPHIBuilder.addMBB(*PI); 5330 } 5331 MachineInstr *HeaderPhi = HeaderPHIBuilder; 5332 MachineInstr *SIIFBREAK = BuildMI(*(MF), Branch->getDebugLoc(), 5333 get(AMDGPU::SI_IF_BREAK), BackEdgeReg) 5334 .addReg(DstReg) 5335 .add(Branch->getOperand(0)); 5336 MachineInstr *SILOOP = 5337 BuildMI(*(MF), Branch->getDebugLoc(), get(AMDGPU::SI_LOOP)) 5338 .addReg(BackEdgeReg) 5339 .addMBB(LoopEntry); 5340 5341 LoopEntry->insert(LoopEntry->begin(), HeaderPhi); 5342 LoopEnd->erase(TI); 5343 LoopEnd->insert(LoopEnd->end(), SIIFBREAK); 5344 LoopEnd->insert(LoopEnd->end(), SILOOP); 5345 } 5346 } 5347 5348 ArrayRef<std::pair<int, const char *>> 5349 SIInstrInfo::getSerializableTargetIndices() const { 5350 static const std::pair<int, const char *> TargetIndices[] = { 5351 {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"}, 5352 {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"}, 5353 {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"}, 5354 {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"}, 5355 {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}}; 5356 return makeArrayRef(TargetIndices); 5357 } 5358 5359 /// This is used by the post-RA scheduler (SchedulePostRAList.cpp). The 5360 /// post-RA version of misched uses CreateTargetMIHazardRecognizer. 5361 ScheduleHazardRecognizer * 5362 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 5363 const ScheduleDAG *DAG) const { 5364 return new GCNHazardRecognizer(DAG->MF); 5365 } 5366 5367 /// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer 5368 /// pass. 5369 ScheduleHazardRecognizer * 5370 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const { 5371 return new GCNHazardRecognizer(MF); 5372 } 5373 5374 std::pair<unsigned, unsigned> 5375 SIInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { 5376 return std::make_pair(TF & MO_MASK, TF & ~MO_MASK); 5377 } 5378 5379 ArrayRef<std::pair<unsigned, const char *>> 5380 SIInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { 5381 static const std::pair<unsigned, const char *> TargetFlags[] = { 5382 { MO_GOTPCREL, "amdgpu-gotprel" }, 5383 { MO_GOTPCREL32_LO, "amdgpu-gotprel32-lo" }, 5384 { MO_GOTPCREL32_HI, "amdgpu-gotprel32-hi" }, 5385 { MO_REL32_LO, "amdgpu-rel32-lo" }, 5386 { MO_REL32_HI, "amdgpu-rel32-hi" } 5387 }; 5388 5389 return makeArrayRef(TargetFlags); 5390 } 5391 5392 bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const { 5393 return !MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY && 5394 MI.modifiesRegister(AMDGPU::EXEC, &RI); 5395 } 5396 5397 MachineInstrBuilder 5398 SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 5399 MachineBasicBlock::iterator I, 5400 const DebugLoc &DL, 5401 unsigned DestReg) const { 5402 if (ST.hasAddNoCarry()) 5403 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg); 5404 5405 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5406 unsigned UnusedCarry = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 5407 MRI.setRegAllocationHint(UnusedCarry, 0, AMDGPU::VCC); 5408 5409 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_I32_e64), DestReg) 5410 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 5411 } 5412 5413 bool SIInstrInfo::isKillTerminator(unsigned Opcode) { 5414 switch (Opcode) { 5415 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 5416 case AMDGPU::SI_KILL_I1_TERMINATOR: 5417 return true; 5418 default: 5419 return false; 5420 } 5421 } 5422 5423 const MCInstrDesc &SIInstrInfo::getKillTerminatorFromPseudo(unsigned Opcode) const { 5424 switch (Opcode) { 5425 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: 5426 return get(AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR); 5427 case AMDGPU::SI_KILL_I1_PSEUDO: 5428 return get(AMDGPU::SI_KILL_I1_TERMINATOR); 5429 default: 5430 llvm_unreachable("invalid opcode, expected SI_KILL_*_PSEUDO"); 5431 } 5432 } 5433 5434 bool SIInstrInfo::isBufferSMRD(const MachineInstr &MI) const { 5435 if (!isSMRD(MI)) 5436 return false; 5437 5438 // Check that it is using a buffer resource. 5439 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sbase); 5440 if (Idx == -1) // e.g. s_memtime 5441 return false; 5442 5443 const auto RCID = MI.getDesc().OpInfo[Idx].RegClass; 5444 return RCID == AMDGPU::SReg_128RegClassID; 5445 } 5446 5447 // This must be kept in sync with the SIEncodingFamily class in SIInstrInfo.td 5448 enum SIEncodingFamily { 5449 SI = 0, 5450 VI = 1, 5451 SDWA = 2, 5452 SDWA9 = 3, 5453 GFX80 = 4, 5454 GFX9 = 5 5455 }; 5456 5457 static SIEncodingFamily subtargetEncodingFamily(const GCNSubtarget &ST) { 5458 switch (ST.getGeneration()) { 5459 default: 5460 break; 5461 case AMDGPUSubtarget::SOUTHERN_ISLANDS: 5462 case AMDGPUSubtarget::SEA_ISLANDS: 5463 return SIEncodingFamily::SI; 5464 case AMDGPUSubtarget::VOLCANIC_ISLANDS: 5465 case AMDGPUSubtarget::GFX9: 5466 return SIEncodingFamily::VI; 5467 } 5468 llvm_unreachable("Unknown subtarget generation!"); 5469 } 5470 5471 int SIInstrInfo::pseudoToMCOpcode(int Opcode) const { 5472 SIEncodingFamily Gen = subtargetEncodingFamily(ST); 5473 5474 if ((get(Opcode).TSFlags & SIInstrFlags::renamedInGFX9) != 0 && 5475 ST.getGeneration() >= AMDGPUSubtarget::GFX9) 5476 Gen = SIEncodingFamily::GFX9; 5477 5478 if (get(Opcode).TSFlags & SIInstrFlags::SDWA) 5479 Gen = ST.getGeneration() == AMDGPUSubtarget::GFX9 ? SIEncodingFamily::SDWA9 5480 : SIEncodingFamily::SDWA; 5481 // Adjust the encoding family to GFX80 for D16 buffer instructions when the 5482 // subtarget has UnpackedD16VMem feature. 5483 // TODO: remove this when we discard GFX80 encoding. 5484 if (ST.hasUnpackedD16VMem() && (get(Opcode).TSFlags & SIInstrFlags::D16Buf)) 5485 Gen = SIEncodingFamily::GFX80; 5486 5487 int MCOp = AMDGPU::getMCOpcode(Opcode, Gen); 5488 5489 // -1 means that Opcode is already a native instruction. 5490 if (MCOp == -1) 5491 return Opcode; 5492 5493 // (uint16_t)-1 means that Opcode is a pseudo instruction that has 5494 // no encoding in the given subtarget generation. 5495 if (MCOp == (uint16_t)-1) 5496 return -1; 5497 5498 return MCOp; 5499 } 5500