1 //===- SIInstrInfo.cpp - SI Instruction Information ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// SI Implementation of TargetInstrInfo. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SIInstrInfo.h" 15 #include "AMDGPU.h" 16 #include "AMDGPUSubtarget.h" 17 #include "GCNHazardRecognizer.h" 18 #include "SIDefines.h" 19 #include "SIMachineFunctionInfo.h" 20 #include "SIRegisterInfo.h" 21 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 22 #include "Utils/AMDGPUBaseInfo.h" 23 #include "llvm/ADT/APInt.h" 24 #include "llvm/ADT/ArrayRef.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/ADT/iterator_range.h" 28 #include "llvm/Analysis/AliasAnalysis.h" 29 #include "llvm/Analysis/MemoryLocation.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/CodeGen/MachineBasicBlock.h" 32 #include "llvm/CodeGen/MachineDominators.h" 33 #include "llvm/CodeGen/MachineFrameInfo.h" 34 #include "llvm/CodeGen/MachineFunction.h" 35 #include "llvm/CodeGen/MachineInstr.h" 36 #include "llvm/CodeGen/MachineInstrBuilder.h" 37 #include "llvm/CodeGen/MachineInstrBundle.h" 38 #include "llvm/CodeGen/MachineMemOperand.h" 39 #include "llvm/CodeGen/MachineOperand.h" 40 #include "llvm/CodeGen/MachineRegisterInfo.h" 41 #include "llvm/CodeGen/RegisterScavenging.h" 42 #include "llvm/CodeGen/ScheduleDAG.h" 43 #include "llvm/CodeGen/SelectionDAGNodes.h" 44 #include "llvm/CodeGen/TargetOpcodes.h" 45 #include "llvm/CodeGen/TargetRegisterInfo.h" 46 #include "llvm/IR/DebugLoc.h" 47 #include "llvm/IR/DiagnosticInfo.h" 48 #include "llvm/IR/Function.h" 49 #include "llvm/IR/InlineAsm.h" 50 #include "llvm/IR/LLVMContext.h" 51 #include "llvm/MC/MCInstrDesc.h" 52 #include "llvm/Support/Casting.h" 53 #include "llvm/Support/CommandLine.h" 54 #include "llvm/Support/Compiler.h" 55 #include "llvm/Support/ErrorHandling.h" 56 #include "llvm/Support/MachineValueType.h" 57 #include "llvm/Support/MathExtras.h" 58 #include "llvm/Target/TargetMachine.h" 59 #include <cassert> 60 #include <cstdint> 61 #include <iterator> 62 #include <utility> 63 64 using namespace llvm; 65 66 #define GET_INSTRINFO_CTOR_DTOR 67 #include "AMDGPUGenInstrInfo.inc" 68 69 namespace llvm { 70 namespace AMDGPU { 71 #define GET_D16ImageDimIntrinsics_IMPL 72 #define GET_ImageDimIntrinsicTable_IMPL 73 #define GET_RsrcIntrinsics_IMPL 74 #include "AMDGPUGenSearchableTables.inc" 75 } 76 } 77 78 79 // Must be at least 4 to be able to branch over minimum unconditional branch 80 // code. This is only for making it possible to write reasonably small tests for 81 // long branches. 82 static cl::opt<unsigned> 83 BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16), 84 cl::desc("Restrict range of branch instructions (DEBUG)")); 85 86 SIInstrInfo::SIInstrInfo(const GCNSubtarget &ST) 87 : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN), 88 RI(ST), ST(ST) {} 89 90 //===----------------------------------------------------------------------===// 91 // TargetInstrInfo callbacks 92 //===----------------------------------------------------------------------===// 93 94 static unsigned getNumOperandsNoGlue(SDNode *Node) { 95 unsigned N = Node->getNumOperands(); 96 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) 97 --N; 98 return N; 99 } 100 101 /// Returns true if both nodes have the same value for the given 102 /// operand \p Op, or if both nodes do not have this operand. 103 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { 104 unsigned Opc0 = N0->getMachineOpcode(); 105 unsigned Opc1 = N1->getMachineOpcode(); 106 107 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); 108 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); 109 110 if (Op0Idx == -1 && Op1Idx == -1) 111 return true; 112 113 114 if ((Op0Idx == -1 && Op1Idx != -1) || 115 (Op1Idx == -1 && Op0Idx != -1)) 116 return false; 117 118 // getNamedOperandIdx returns the index for the MachineInstr's operands, 119 // which includes the result as the first operand. We are indexing into the 120 // MachineSDNode's operands, so we need to skip the result operand to get 121 // the real index. 122 --Op0Idx; 123 --Op1Idx; 124 125 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx); 126 } 127 128 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, 129 AliasAnalysis *AA) const { 130 // TODO: The generic check fails for VALU instructions that should be 131 // rematerializable due to implicit reads of exec. We really want all of the 132 // generic logic for this except for this. 133 switch (MI.getOpcode()) { 134 case AMDGPU::V_MOV_B32_e32: 135 case AMDGPU::V_MOV_B32_e64: 136 case AMDGPU::V_MOV_B64_PSEUDO: 137 // No implicit operands. 138 return MI.getNumOperands() == MI.getDesc().getNumOperands(); 139 default: 140 return false; 141 } 142 } 143 144 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, 145 int64_t &Offset0, 146 int64_t &Offset1) const { 147 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode()) 148 return false; 149 150 unsigned Opc0 = Load0->getMachineOpcode(); 151 unsigned Opc1 = Load1->getMachineOpcode(); 152 153 // Make sure both are actually loads. 154 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad()) 155 return false; 156 157 if (isDS(Opc0) && isDS(Opc1)) { 158 159 // FIXME: Handle this case: 160 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1)) 161 return false; 162 163 // Check base reg. 164 if (Load0->getOperand(0) != Load1->getOperand(0)) 165 return false; 166 167 // Skip read2 / write2 variants for simplicity. 168 // TODO: We should report true if the used offsets are adjacent (excluded 169 // st64 versions). 170 int Offset0Idx = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 171 int Offset1Idx = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 172 if (Offset0Idx == -1 || Offset1Idx == -1) 173 return false; 174 175 // XXX - be careful of datalesss loads 176 // getNamedOperandIdx returns the index for MachineInstrs. Since they 177 // include the output in the operand list, but SDNodes don't, we need to 178 // subtract the index by one. 179 Offset0Idx -= get(Opc0).NumDefs; 180 Offset1Idx -= get(Opc1).NumDefs; 181 Offset0 = cast<ConstantSDNode>(Load0->getOperand(Offset0Idx))->getZExtValue(); 182 Offset1 = cast<ConstantSDNode>(Load1->getOperand(Offset1Idx))->getZExtValue(); 183 return true; 184 } 185 186 if (isSMRD(Opc0) && isSMRD(Opc1)) { 187 // Skip time and cache invalidation instructions. 188 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::sbase) == -1 || 189 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::sbase) == -1) 190 return false; 191 192 assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1)); 193 194 // Check base reg. 195 if (Load0->getOperand(0) != Load1->getOperand(0)) 196 return false; 197 198 const ConstantSDNode *Load0Offset = 199 dyn_cast<ConstantSDNode>(Load0->getOperand(1)); 200 const ConstantSDNode *Load1Offset = 201 dyn_cast<ConstantSDNode>(Load1->getOperand(1)); 202 203 if (!Load0Offset || !Load1Offset) 204 return false; 205 206 Offset0 = Load0Offset->getZExtValue(); 207 Offset1 = Load1Offset->getZExtValue(); 208 return true; 209 } 210 211 // MUBUF and MTBUF can access the same addresses. 212 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) { 213 214 // MUBUF and MTBUF have vaddr at different indices. 215 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || 216 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || 217 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) 218 return false; 219 220 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 221 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 222 223 if (OffIdx0 == -1 || OffIdx1 == -1) 224 return false; 225 226 // getNamedOperandIdx returns the index for MachineInstrs. Since they 227 // include the output in the operand list, but SDNodes don't, we need to 228 // subtract the index by one. 229 OffIdx0 -= get(Opc0).NumDefs; 230 OffIdx1 -= get(Opc1).NumDefs; 231 232 SDValue Off0 = Load0->getOperand(OffIdx0); 233 SDValue Off1 = Load1->getOperand(OffIdx1); 234 235 // The offset might be a FrameIndexSDNode. 236 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1)) 237 return false; 238 239 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue(); 240 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue(); 241 return true; 242 } 243 244 return false; 245 } 246 247 static bool isStride64(unsigned Opc) { 248 switch (Opc) { 249 case AMDGPU::DS_READ2ST64_B32: 250 case AMDGPU::DS_READ2ST64_B64: 251 case AMDGPU::DS_WRITE2ST64_B32: 252 case AMDGPU::DS_WRITE2ST64_B64: 253 return true; 254 default: 255 return false; 256 } 257 } 258 259 bool SIInstrInfo::getMemOperandWithOffset(const MachineInstr &LdSt, 260 const MachineOperand *&BaseOp, 261 int64_t &Offset, 262 const TargetRegisterInfo *TRI) const { 263 unsigned Opc = LdSt.getOpcode(); 264 265 if (isDS(LdSt)) { 266 const MachineOperand *OffsetImm = 267 getNamedOperand(LdSt, AMDGPU::OpName::offset); 268 if (OffsetImm) { 269 // Normal, single offset LDS instruction. 270 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr); 271 // TODO: ds_consume/ds_append use M0 for the base address. Is it safe to 272 // report that here? 273 if (!BaseOp) 274 return false; 275 276 Offset = OffsetImm->getImm(); 277 assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " 278 "operands of type register."); 279 return true; 280 } 281 282 // The 2 offset instructions use offset0 and offset1 instead. We can treat 283 // these as a load with a single offset if the 2 offsets are consecutive. We 284 // will use this for some partially aligned loads. 285 const MachineOperand *Offset0Imm = 286 getNamedOperand(LdSt, AMDGPU::OpName::offset0); 287 const MachineOperand *Offset1Imm = 288 getNamedOperand(LdSt, AMDGPU::OpName::offset1); 289 290 uint8_t Offset0 = Offset0Imm->getImm(); 291 uint8_t Offset1 = Offset1Imm->getImm(); 292 293 if (Offset1 > Offset0 && Offset1 - Offset0 == 1) { 294 // Each of these offsets is in element sized units, so we need to convert 295 // to bytes of the individual reads. 296 297 unsigned EltSize; 298 if (LdSt.mayLoad()) 299 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16; 300 else { 301 assert(LdSt.mayStore()); 302 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 303 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8; 304 } 305 306 if (isStride64(Opc)) 307 EltSize *= 64; 308 309 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr); 310 Offset = EltSize * Offset0; 311 assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " 312 "operands of type register."); 313 return true; 314 } 315 316 return false; 317 } 318 319 if (isMUBUF(LdSt) || isMTBUF(LdSt)) { 320 const MachineOperand *SOffset = getNamedOperand(LdSt, AMDGPU::OpName::soffset); 321 if (SOffset && SOffset->isReg()) { 322 // We can only handle this if it's a stack access, as any other resource 323 // would require reporting multiple base registers. 324 const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 325 if (AddrReg && !AddrReg->isFI()) 326 return false; 327 328 const MachineOperand *RSrc = getNamedOperand(LdSt, AMDGPU::OpName::srsrc); 329 const SIMachineFunctionInfo *MFI 330 = LdSt.getParent()->getParent()->getInfo<SIMachineFunctionInfo>(); 331 if (RSrc->getReg() != MFI->getScratchRSrcReg()) 332 return false; 333 334 const MachineOperand *OffsetImm = 335 getNamedOperand(LdSt, AMDGPU::OpName::offset); 336 BaseOp = SOffset; 337 Offset = OffsetImm->getImm(); 338 return true; 339 } 340 341 const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 342 if (!AddrReg) 343 return false; 344 345 const MachineOperand *OffsetImm = 346 getNamedOperand(LdSt, AMDGPU::OpName::offset); 347 BaseOp = AddrReg; 348 Offset = OffsetImm->getImm(); 349 350 if (SOffset) // soffset can be an inline immediate. 351 Offset += SOffset->getImm(); 352 353 assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " 354 "operands of type register."); 355 return true; 356 } 357 358 if (isSMRD(LdSt)) { 359 const MachineOperand *OffsetImm = 360 getNamedOperand(LdSt, AMDGPU::OpName::offset); 361 if (!OffsetImm) 362 return false; 363 364 const MachineOperand *SBaseReg = getNamedOperand(LdSt, AMDGPU::OpName::sbase); 365 BaseOp = SBaseReg; 366 Offset = OffsetImm->getImm(); 367 assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " 368 "operands of type register."); 369 return true; 370 } 371 372 if (isFLAT(LdSt)) { 373 const MachineOperand *VAddr = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 374 if (VAddr) { 375 // Can't analyze 2 offsets. 376 if (getNamedOperand(LdSt, AMDGPU::OpName::saddr)) 377 return false; 378 379 BaseOp = VAddr; 380 } else { 381 // scratch instructions have either vaddr or saddr. 382 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::saddr); 383 } 384 385 Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm(); 386 assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " 387 "operands of type register."); 388 return true; 389 } 390 391 return false; 392 } 393 394 static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, 395 const MachineOperand &BaseOp1, 396 const MachineInstr &MI2, 397 const MachineOperand &BaseOp2) { 398 // Support only base operands with base registers. 399 // Note: this could be extended to support FI operands. 400 if (!BaseOp1.isReg() || !BaseOp2.isReg()) 401 return false; 402 403 if (BaseOp1.isIdenticalTo(BaseOp2)) 404 return true; 405 406 if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand()) 407 return false; 408 409 auto MO1 = *MI1.memoperands_begin(); 410 auto MO2 = *MI2.memoperands_begin(); 411 if (MO1->getAddrSpace() != MO2->getAddrSpace()) 412 return false; 413 414 auto Base1 = MO1->getValue(); 415 auto Base2 = MO2->getValue(); 416 if (!Base1 || !Base2) 417 return false; 418 const MachineFunction &MF = *MI1.getParent()->getParent(); 419 const DataLayout &DL = MF.getFunction().getParent()->getDataLayout(); 420 Base1 = GetUnderlyingObject(Base1, DL); 421 Base2 = GetUnderlyingObject(Base1, DL); 422 423 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2)) 424 return false; 425 426 return Base1 == Base2; 427 } 428 429 bool SIInstrInfo::shouldClusterMemOps(const MachineOperand &BaseOp1, 430 const MachineOperand &BaseOp2, 431 unsigned NumLoads) const { 432 const MachineInstr &FirstLdSt = *BaseOp1.getParent(); 433 const MachineInstr &SecondLdSt = *BaseOp2.getParent(); 434 435 if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOp1, SecondLdSt, BaseOp2)) 436 return false; 437 438 const MachineOperand *FirstDst = nullptr; 439 const MachineOperand *SecondDst = nullptr; 440 441 if ((isMUBUF(FirstLdSt) && isMUBUF(SecondLdSt)) || 442 (isMTBUF(FirstLdSt) && isMTBUF(SecondLdSt)) || 443 (isFLAT(FirstLdSt) && isFLAT(SecondLdSt))) { 444 const unsigned MaxGlobalLoadCluster = 6; 445 if (NumLoads > MaxGlobalLoadCluster) 446 return false; 447 448 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdata); 449 if (!FirstDst) 450 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdst); 451 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdata); 452 if (!SecondDst) 453 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdst); 454 } else if (isSMRD(FirstLdSt) && isSMRD(SecondLdSt)) { 455 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::sdst); 456 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::sdst); 457 } else if (isDS(FirstLdSt) && isDS(SecondLdSt)) { 458 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdst); 459 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdst); 460 } 461 462 if (!FirstDst || !SecondDst) 463 return false; 464 465 // Try to limit clustering based on the total number of bytes loaded 466 // rather than the number of instructions. This is done to help reduce 467 // register pressure. The method used is somewhat inexact, though, 468 // because it assumes that all loads in the cluster will load the 469 // same number of bytes as FirstLdSt. 470 471 // The unit of this value is bytes. 472 // FIXME: This needs finer tuning. 473 unsigned LoadClusterThreshold = 16; 474 475 const MachineRegisterInfo &MRI = 476 FirstLdSt.getParent()->getParent()->getRegInfo(); 477 478 const Register Reg = FirstDst->getReg(); 479 480 const TargetRegisterClass *DstRC = Register::isVirtualRegister(Reg) 481 ? MRI.getRegClass(Reg) 482 : RI.getPhysRegClass(Reg); 483 484 return (NumLoads * (RI.getRegSizeInBits(*DstRC) / 8)) <= LoadClusterThreshold; 485 } 486 487 // FIXME: This behaves strangely. If, for example, you have 32 load + stores, 488 // the first 16 loads will be interleaved with the stores, and the next 16 will 489 // be clustered as expected. It should really split into 2 16 store batches. 490 // 491 // Loads are clustered until this returns false, rather than trying to schedule 492 // groups of stores. This also means we have to deal with saying different 493 // address space loads should be clustered, and ones which might cause bank 494 // conflicts. 495 // 496 // This might be deprecated so it might not be worth that much effort to fix. 497 bool SIInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, 498 int64_t Offset0, int64_t Offset1, 499 unsigned NumLoads) const { 500 assert(Offset1 > Offset0 && 501 "Second offset should be larger than first offset!"); 502 // If we have less than 16 loads in a row, and the offsets are within 64 503 // bytes, then schedule together. 504 505 // A cacheline is 64 bytes (for global memory). 506 return (NumLoads <= 16 && (Offset1 - Offset0) < 64); 507 } 508 509 static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB, 510 MachineBasicBlock::iterator MI, 511 const DebugLoc &DL, unsigned DestReg, 512 unsigned SrcReg, bool KillSrc) { 513 MachineFunction *MF = MBB.getParent(); 514 DiagnosticInfoUnsupported IllegalCopy(MF->getFunction(), 515 "illegal SGPR to VGPR copy", 516 DL, DS_Error); 517 LLVMContext &C = MF->getFunction().getContext(); 518 C.diagnose(IllegalCopy); 519 520 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg) 521 .addReg(SrcReg, getKillRegState(KillSrc)); 522 } 523 524 void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 525 MachineBasicBlock::iterator MI, 526 const DebugLoc &DL, unsigned DestReg, 527 unsigned SrcReg, bool KillSrc) const { 528 const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg); 529 530 if (RC == &AMDGPU::VGPR_32RegClass) { 531 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || 532 AMDGPU::SReg_32RegClass.contains(SrcReg) || 533 AMDGPU::AGPR_32RegClass.contains(SrcReg)); 534 unsigned Opc = AMDGPU::AGPR_32RegClass.contains(SrcReg) ? 535 AMDGPU::V_ACCVGPR_READ_B32 : AMDGPU::V_MOV_B32_e32; 536 BuildMI(MBB, MI, DL, get(Opc), DestReg) 537 .addReg(SrcReg, getKillRegState(KillSrc)); 538 return; 539 } 540 541 if (RC == &AMDGPU::SReg_32_XM0RegClass || 542 RC == &AMDGPU::SReg_32RegClass) { 543 if (SrcReg == AMDGPU::SCC) { 544 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg) 545 .addImm(-1) 546 .addImm(0); 547 return; 548 } 549 550 if (DestReg == AMDGPU::VCC_LO) { 551 if (AMDGPU::SReg_32RegClass.contains(SrcReg)) { 552 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), AMDGPU::VCC_LO) 553 .addReg(SrcReg, getKillRegState(KillSrc)); 554 } else { 555 // FIXME: Hack until VReg_1 removed. 556 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 557 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 558 .addImm(0) 559 .addReg(SrcReg, getKillRegState(KillSrc)); 560 } 561 562 return; 563 } 564 565 if (!AMDGPU::SReg_32RegClass.contains(SrcReg)) { 566 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 567 return; 568 } 569 570 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 571 .addReg(SrcReg, getKillRegState(KillSrc)); 572 return; 573 } 574 575 if (RC == &AMDGPU::SReg_64RegClass) { 576 if (DestReg == AMDGPU::VCC) { 577 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 578 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC) 579 .addReg(SrcReg, getKillRegState(KillSrc)); 580 } else { 581 // FIXME: Hack until VReg_1 removed. 582 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 583 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 584 .addImm(0) 585 .addReg(SrcReg, getKillRegState(KillSrc)); 586 } 587 588 return; 589 } 590 591 if (!AMDGPU::SReg_64RegClass.contains(SrcReg)) { 592 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 593 return; 594 } 595 596 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 597 .addReg(SrcReg, getKillRegState(KillSrc)); 598 return; 599 } 600 601 if (DestReg == AMDGPU::SCC) { 602 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 603 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32)) 604 .addReg(SrcReg, getKillRegState(KillSrc)) 605 .addImm(0); 606 return; 607 } 608 609 if (RC == &AMDGPU::AGPR_32RegClass) { 610 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || 611 AMDGPU::SReg_32RegClass.contains(SrcReg) || 612 AMDGPU::AGPR_32RegClass.contains(SrcReg)); 613 if (!AMDGPU::VGPR_32RegClass.contains(SrcReg)) { 614 // First try to find defining accvgpr_write to avoid temporary registers. 615 for (auto Def = MI, E = MBB.begin(); Def != E; ) { 616 --Def; 617 if (!Def->definesRegister(SrcReg, &RI)) 618 continue; 619 if (Def->getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32) 620 break; 621 622 MachineOperand &DefOp = Def->getOperand(1); 623 assert(DefOp.isReg() || DefOp.isImm()); 624 625 if (DefOp.isReg()) { 626 // Check that register source operand if not clobbered before MI. 627 // Immediate operands are always safe to propagate. 628 bool SafeToPropagate = true; 629 for (auto I = Def; I != MI && SafeToPropagate; ++I) 630 if (I->modifiesRegister(DefOp.getReg(), &RI)) 631 SafeToPropagate = false; 632 633 if (!SafeToPropagate) 634 break; 635 636 DefOp.setIsKill(false); 637 } 638 639 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32), DestReg) 640 .add(DefOp); 641 return; 642 } 643 644 RegScavenger RS; 645 RS.enterBasicBlock(MBB); 646 RS.forward(MI); 647 648 // Ideally we want to have three registers for a long reg_sequence copy 649 // to hide 2 waitstates between v_mov_b32 and accvgpr_write. 650 unsigned MaxVGPRs = RI.getRegPressureLimit(&AMDGPU::VGPR_32RegClass, 651 *MBB.getParent()); 652 653 // Registers in the sequence are allocated contiguously so we can just 654 // use register number to pick one of three round-robin temps. 655 unsigned RegNo = DestReg % 3; 656 unsigned Tmp = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0); 657 if (!Tmp) 658 report_fatal_error("Cannot scavenge VGPR to copy to AGPR"); 659 RS.setRegUsed(Tmp); 660 // Only loop through if there are any free registers left, otherwise 661 // scavenger may report a fatal error without emergency spill slot 662 // or spill with the slot. 663 while (RegNo-- && RS.FindUnusedReg(&AMDGPU::VGPR_32RegClass)) { 664 unsigned Tmp2 = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0); 665 if (!Tmp2 || RI.getHWRegIndex(Tmp2) >= MaxVGPRs) 666 break; 667 Tmp = Tmp2; 668 RS.setRegUsed(Tmp); 669 } 670 copyPhysReg(MBB, MI, DL, Tmp, SrcReg, KillSrc); 671 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32), DestReg) 672 .addReg(Tmp, RegState::Kill); 673 return; 674 } 675 676 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32), DestReg) 677 .addReg(SrcReg, getKillRegState(KillSrc)); 678 return; 679 } 680 681 unsigned EltSize = 4; 682 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 683 if (RI.isSGPRClass(RC)) { 684 // TODO: Copy vec3/vec5 with s_mov_b64s then final s_mov_b32. 685 if (!(RI.getRegSizeInBits(*RC) % 64)) { 686 Opcode = AMDGPU::S_MOV_B64; 687 EltSize = 8; 688 } else { 689 Opcode = AMDGPU::S_MOV_B32; 690 EltSize = 4; 691 } 692 693 if (!RI.isSGPRClass(RI.getPhysRegClass(SrcReg))) { 694 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 695 return; 696 } 697 } else if (RI.hasAGPRs(RC)) { 698 Opcode = RI.hasVGPRs(RI.getPhysRegClass(SrcReg)) ? 699 AMDGPU::V_ACCVGPR_WRITE_B32 : AMDGPU::COPY; 700 } else if (RI.hasVGPRs(RC) && RI.hasAGPRs(RI.getPhysRegClass(SrcReg))) { 701 Opcode = AMDGPU::V_ACCVGPR_READ_B32; 702 } 703 704 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize); 705 bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg); 706 707 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 708 unsigned SubIdx; 709 if (Forward) 710 SubIdx = SubIndices[Idx]; 711 else 712 SubIdx = SubIndices[SubIndices.size() - Idx - 1]; 713 714 if (Opcode == TargetOpcode::COPY) { 715 copyPhysReg(MBB, MI, DL, RI.getSubReg(DestReg, SubIdx), 716 RI.getSubReg(SrcReg, SubIdx), KillSrc); 717 continue; 718 } 719 720 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 721 get(Opcode), RI.getSubReg(DestReg, SubIdx)); 722 723 Builder.addReg(RI.getSubReg(SrcReg, SubIdx)); 724 725 if (Idx == 0) 726 Builder.addReg(DestReg, RegState::Define | RegState::Implicit); 727 728 bool UseKill = KillSrc && Idx == SubIndices.size() - 1; 729 Builder.addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit); 730 } 731 } 732 733 int SIInstrInfo::commuteOpcode(unsigned Opcode) const { 734 int NewOpc; 735 736 // Try to map original to commuted opcode 737 NewOpc = AMDGPU::getCommuteRev(Opcode); 738 if (NewOpc != -1) 739 // Check if the commuted (REV) opcode exists on the target. 740 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 741 742 // Try to map commuted to original opcode 743 NewOpc = AMDGPU::getCommuteOrig(Opcode); 744 if (NewOpc != -1) 745 // Check if the original (non-REV) opcode exists on the target. 746 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 747 748 return Opcode; 749 } 750 751 void SIInstrInfo::materializeImmediate(MachineBasicBlock &MBB, 752 MachineBasicBlock::iterator MI, 753 const DebugLoc &DL, unsigned DestReg, 754 int64_t Value) const { 755 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 756 const TargetRegisterClass *RegClass = MRI.getRegClass(DestReg); 757 if (RegClass == &AMDGPU::SReg_32RegClass || 758 RegClass == &AMDGPU::SGPR_32RegClass || 759 RegClass == &AMDGPU::SReg_32_XM0RegClass || 760 RegClass == &AMDGPU::SReg_32_XM0_XEXECRegClass) { 761 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 762 .addImm(Value); 763 return; 764 } 765 766 if (RegClass == &AMDGPU::SReg_64RegClass || 767 RegClass == &AMDGPU::SGPR_64RegClass || 768 RegClass == &AMDGPU::SReg_64_XEXECRegClass) { 769 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 770 .addImm(Value); 771 return; 772 } 773 774 if (RegClass == &AMDGPU::VGPR_32RegClass) { 775 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) 776 .addImm(Value); 777 return; 778 } 779 if (RegClass == &AMDGPU::VReg_64RegClass) { 780 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), DestReg) 781 .addImm(Value); 782 return; 783 } 784 785 unsigned EltSize = 4; 786 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 787 if (RI.isSGPRClass(RegClass)) { 788 if (RI.getRegSizeInBits(*RegClass) > 32) { 789 Opcode = AMDGPU::S_MOV_B64; 790 EltSize = 8; 791 } else { 792 Opcode = AMDGPU::S_MOV_B32; 793 EltSize = 4; 794 } 795 } 796 797 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RegClass, EltSize); 798 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 799 int64_t IdxValue = Idx == 0 ? Value : 0; 800 801 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 802 get(Opcode), RI.getSubReg(DestReg, Idx)); 803 Builder.addImm(IdxValue); 804 } 805 } 806 807 const TargetRegisterClass * 808 SIInstrInfo::getPreferredSelectRegClass(unsigned Size) const { 809 return &AMDGPU::VGPR_32RegClass; 810 } 811 812 void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB, 813 MachineBasicBlock::iterator I, 814 const DebugLoc &DL, unsigned DstReg, 815 ArrayRef<MachineOperand> Cond, 816 unsigned TrueReg, 817 unsigned FalseReg) const { 818 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 819 MachineFunction *MF = MBB.getParent(); 820 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 821 const TargetRegisterClass *BoolXExecRC = 822 RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 823 assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass && 824 "Not a VGPR32 reg"); 825 826 if (Cond.size() == 1) { 827 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 828 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 829 .add(Cond[0]); 830 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 831 .addImm(0) 832 .addReg(FalseReg) 833 .addImm(0) 834 .addReg(TrueReg) 835 .addReg(SReg); 836 } else if (Cond.size() == 2) { 837 assert(Cond[0].isImm() && "Cond[0] is not an immediate"); 838 switch (Cond[0].getImm()) { 839 case SIInstrInfo::SCC_TRUE: { 840 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 841 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 842 : AMDGPU::S_CSELECT_B64), SReg) 843 .addImm(-1) 844 .addImm(0); 845 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 846 .addImm(0) 847 .addReg(FalseReg) 848 .addImm(0) 849 .addReg(TrueReg) 850 .addReg(SReg); 851 break; 852 } 853 case SIInstrInfo::SCC_FALSE: { 854 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 855 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 856 : AMDGPU::S_CSELECT_B64), SReg) 857 .addImm(0) 858 .addImm(-1); 859 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 860 .addImm(0) 861 .addReg(FalseReg) 862 .addImm(0) 863 .addReg(TrueReg) 864 .addReg(SReg); 865 break; 866 } 867 case SIInstrInfo::VCCNZ: { 868 MachineOperand RegOp = Cond[1]; 869 RegOp.setImplicit(false); 870 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 871 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 872 .add(RegOp); 873 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 874 .addImm(0) 875 .addReg(FalseReg) 876 .addImm(0) 877 .addReg(TrueReg) 878 .addReg(SReg); 879 break; 880 } 881 case SIInstrInfo::VCCZ: { 882 MachineOperand RegOp = Cond[1]; 883 RegOp.setImplicit(false); 884 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 885 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 886 .add(RegOp); 887 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 888 .addImm(0) 889 .addReg(TrueReg) 890 .addImm(0) 891 .addReg(FalseReg) 892 .addReg(SReg); 893 break; 894 } 895 case SIInstrInfo::EXECNZ: { 896 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 897 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 898 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 899 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 900 .addImm(0); 901 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 902 : AMDGPU::S_CSELECT_B64), SReg) 903 .addImm(-1) 904 .addImm(0); 905 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 906 .addImm(0) 907 .addReg(FalseReg) 908 .addImm(0) 909 .addReg(TrueReg) 910 .addReg(SReg); 911 break; 912 } 913 case SIInstrInfo::EXECZ: { 914 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 915 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 916 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 917 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 918 .addImm(0); 919 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 920 : AMDGPU::S_CSELECT_B64), SReg) 921 .addImm(0) 922 .addImm(-1); 923 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 924 .addImm(0) 925 .addReg(FalseReg) 926 .addImm(0) 927 .addReg(TrueReg) 928 .addReg(SReg); 929 llvm_unreachable("Unhandled branch predicate EXECZ"); 930 break; 931 } 932 default: 933 llvm_unreachable("invalid branch predicate"); 934 } 935 } else { 936 llvm_unreachable("Can only handle Cond size 1 or 2"); 937 } 938 } 939 940 unsigned SIInstrInfo::insertEQ(MachineBasicBlock *MBB, 941 MachineBasicBlock::iterator I, 942 const DebugLoc &DL, 943 unsigned SrcReg, int Value) const { 944 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 945 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 946 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg) 947 .addImm(Value) 948 .addReg(SrcReg); 949 950 return Reg; 951 } 952 953 unsigned SIInstrInfo::insertNE(MachineBasicBlock *MBB, 954 MachineBasicBlock::iterator I, 955 const DebugLoc &DL, 956 unsigned SrcReg, int Value) const { 957 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 958 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 959 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg) 960 .addImm(Value) 961 .addReg(SrcReg); 962 963 return Reg; 964 } 965 966 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const { 967 968 if (RI.hasAGPRs(DstRC)) 969 return AMDGPU::COPY; 970 if (RI.getRegSizeInBits(*DstRC) == 32) { 971 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 972 } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) { 973 return AMDGPU::S_MOV_B64; 974 } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) { 975 return AMDGPU::V_MOV_B64_PSEUDO; 976 } 977 return AMDGPU::COPY; 978 } 979 980 static unsigned getSGPRSpillSaveOpcode(unsigned Size) { 981 switch (Size) { 982 case 4: 983 return AMDGPU::SI_SPILL_S32_SAVE; 984 case 8: 985 return AMDGPU::SI_SPILL_S64_SAVE; 986 case 12: 987 return AMDGPU::SI_SPILL_S96_SAVE; 988 case 16: 989 return AMDGPU::SI_SPILL_S128_SAVE; 990 case 20: 991 return AMDGPU::SI_SPILL_S160_SAVE; 992 case 32: 993 return AMDGPU::SI_SPILL_S256_SAVE; 994 case 64: 995 return AMDGPU::SI_SPILL_S512_SAVE; 996 case 128: 997 return AMDGPU::SI_SPILL_S1024_SAVE; 998 default: 999 llvm_unreachable("unknown register size"); 1000 } 1001 } 1002 1003 static unsigned getVGPRSpillSaveOpcode(unsigned Size) { 1004 switch (Size) { 1005 case 4: 1006 return AMDGPU::SI_SPILL_V32_SAVE; 1007 case 8: 1008 return AMDGPU::SI_SPILL_V64_SAVE; 1009 case 12: 1010 return AMDGPU::SI_SPILL_V96_SAVE; 1011 case 16: 1012 return AMDGPU::SI_SPILL_V128_SAVE; 1013 case 20: 1014 return AMDGPU::SI_SPILL_V160_SAVE; 1015 case 32: 1016 return AMDGPU::SI_SPILL_V256_SAVE; 1017 case 64: 1018 return AMDGPU::SI_SPILL_V512_SAVE; 1019 case 128: 1020 return AMDGPU::SI_SPILL_V1024_SAVE; 1021 default: 1022 llvm_unreachable("unknown register size"); 1023 } 1024 } 1025 1026 static unsigned getAGPRSpillSaveOpcode(unsigned Size) { 1027 switch (Size) { 1028 case 4: 1029 return AMDGPU::SI_SPILL_A32_SAVE; 1030 case 8: 1031 return AMDGPU::SI_SPILL_A64_SAVE; 1032 case 16: 1033 return AMDGPU::SI_SPILL_A128_SAVE; 1034 case 64: 1035 return AMDGPU::SI_SPILL_A512_SAVE; 1036 case 128: 1037 return AMDGPU::SI_SPILL_A1024_SAVE; 1038 default: 1039 llvm_unreachable("unknown register size"); 1040 } 1041 } 1042 1043 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 1044 MachineBasicBlock::iterator MI, 1045 unsigned SrcReg, bool isKill, 1046 int FrameIndex, 1047 const TargetRegisterClass *RC, 1048 const TargetRegisterInfo *TRI) const { 1049 MachineFunction *MF = MBB.getParent(); 1050 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1051 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 1052 const DebugLoc &DL = MBB.findDebugLoc(MI); 1053 1054 unsigned Size = FrameInfo.getObjectSize(FrameIndex); 1055 unsigned Align = FrameInfo.getObjectAlignment(FrameIndex); 1056 MachinePointerInfo PtrInfo 1057 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 1058 MachineMemOperand *MMO 1059 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, 1060 Size, Align); 1061 unsigned SpillSize = TRI->getSpillSize(*RC); 1062 1063 if (RI.isSGPRClass(RC)) { 1064 MFI->setHasSpilledSGPRs(); 1065 1066 // We are only allowed to create one new instruction when spilling 1067 // registers, so we need to use pseudo instruction for spilling SGPRs. 1068 const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(SpillSize)); 1069 1070 // The SGPR spill/restore instructions only work on number sgprs, so we need 1071 // to make sure we are using the correct register class. 1072 if (Register::isVirtualRegister(SrcReg) && SpillSize == 4) { 1073 MachineRegisterInfo &MRI = MF->getRegInfo(); 1074 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0RegClass); 1075 } 1076 1077 BuildMI(MBB, MI, DL, OpDesc) 1078 .addReg(SrcReg, getKillRegState(isKill)) // data 1079 .addFrameIndex(FrameIndex) // addr 1080 .addMemOperand(MMO) 1081 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) 1082 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); 1083 // Add the scratch resource registers as implicit uses because we may end up 1084 // needing them, and need to ensure that the reserved registers are 1085 // correctly handled. 1086 if (RI.spillSGPRToVGPR()) 1087 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill); 1088 return; 1089 } 1090 1091 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillSaveOpcode(SpillSize) 1092 : getVGPRSpillSaveOpcode(SpillSize); 1093 MFI->setHasSpilledVGPRs(); 1094 1095 auto MIB = BuildMI(MBB, MI, DL, get(Opcode)); 1096 if (RI.hasAGPRs(RC)) { 1097 MachineRegisterInfo &MRI = MF->getRegInfo(); 1098 Register Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1099 MIB.addReg(Tmp, RegState::Define); 1100 } 1101 MIB.addReg(SrcReg, getKillRegState(isKill)) // data 1102 .addFrameIndex(FrameIndex) // addr 1103 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 1104 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset 1105 .addImm(0) // offset 1106 .addMemOperand(MMO); 1107 } 1108 1109 static unsigned getSGPRSpillRestoreOpcode(unsigned Size) { 1110 switch (Size) { 1111 case 4: 1112 return AMDGPU::SI_SPILL_S32_RESTORE; 1113 case 8: 1114 return AMDGPU::SI_SPILL_S64_RESTORE; 1115 case 12: 1116 return AMDGPU::SI_SPILL_S96_RESTORE; 1117 case 16: 1118 return AMDGPU::SI_SPILL_S128_RESTORE; 1119 case 20: 1120 return AMDGPU::SI_SPILL_S160_RESTORE; 1121 case 32: 1122 return AMDGPU::SI_SPILL_S256_RESTORE; 1123 case 64: 1124 return AMDGPU::SI_SPILL_S512_RESTORE; 1125 case 128: 1126 return AMDGPU::SI_SPILL_S1024_RESTORE; 1127 default: 1128 llvm_unreachable("unknown register size"); 1129 } 1130 } 1131 1132 static unsigned getVGPRSpillRestoreOpcode(unsigned Size) { 1133 switch (Size) { 1134 case 4: 1135 return AMDGPU::SI_SPILL_V32_RESTORE; 1136 case 8: 1137 return AMDGPU::SI_SPILL_V64_RESTORE; 1138 case 12: 1139 return AMDGPU::SI_SPILL_V96_RESTORE; 1140 case 16: 1141 return AMDGPU::SI_SPILL_V128_RESTORE; 1142 case 20: 1143 return AMDGPU::SI_SPILL_V160_RESTORE; 1144 case 32: 1145 return AMDGPU::SI_SPILL_V256_RESTORE; 1146 case 64: 1147 return AMDGPU::SI_SPILL_V512_RESTORE; 1148 case 128: 1149 return AMDGPU::SI_SPILL_V1024_RESTORE; 1150 default: 1151 llvm_unreachable("unknown register size"); 1152 } 1153 } 1154 1155 static unsigned getAGPRSpillRestoreOpcode(unsigned Size) { 1156 switch (Size) { 1157 case 4: 1158 return AMDGPU::SI_SPILL_A32_RESTORE; 1159 case 8: 1160 return AMDGPU::SI_SPILL_A64_RESTORE; 1161 case 16: 1162 return AMDGPU::SI_SPILL_A128_RESTORE; 1163 case 64: 1164 return AMDGPU::SI_SPILL_A512_RESTORE; 1165 case 128: 1166 return AMDGPU::SI_SPILL_A1024_RESTORE; 1167 default: 1168 llvm_unreachable("unknown register size"); 1169 } 1170 } 1171 1172 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 1173 MachineBasicBlock::iterator MI, 1174 unsigned DestReg, int FrameIndex, 1175 const TargetRegisterClass *RC, 1176 const TargetRegisterInfo *TRI) const { 1177 MachineFunction *MF = MBB.getParent(); 1178 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1179 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 1180 const DebugLoc &DL = MBB.findDebugLoc(MI); 1181 unsigned Align = FrameInfo.getObjectAlignment(FrameIndex); 1182 unsigned Size = FrameInfo.getObjectSize(FrameIndex); 1183 unsigned SpillSize = TRI->getSpillSize(*RC); 1184 1185 MachinePointerInfo PtrInfo 1186 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 1187 1188 MachineMemOperand *MMO = MF->getMachineMemOperand( 1189 PtrInfo, MachineMemOperand::MOLoad, Size, Align); 1190 1191 if (RI.isSGPRClass(RC)) { 1192 MFI->setHasSpilledSGPRs(); 1193 1194 // FIXME: Maybe this should not include a memoperand because it will be 1195 // lowered to non-memory instructions. 1196 const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize)); 1197 if (Register::isVirtualRegister(DestReg) && SpillSize == 4) { 1198 MachineRegisterInfo &MRI = MF->getRegInfo(); 1199 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0RegClass); 1200 } 1201 1202 if (RI.spillSGPRToVGPR()) 1203 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill); 1204 BuildMI(MBB, MI, DL, OpDesc, DestReg) 1205 .addFrameIndex(FrameIndex) // addr 1206 .addMemOperand(MMO) 1207 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) 1208 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); 1209 return; 1210 } 1211 1212 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillRestoreOpcode(SpillSize) 1213 : getVGPRSpillRestoreOpcode(SpillSize); 1214 auto MIB = BuildMI(MBB, MI, DL, get(Opcode), DestReg); 1215 if (RI.hasAGPRs(RC)) { 1216 MachineRegisterInfo &MRI = MF->getRegInfo(); 1217 Register Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1218 MIB.addReg(Tmp, RegState::Define); 1219 } 1220 MIB.addFrameIndex(FrameIndex) // vaddr 1221 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 1222 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset 1223 .addImm(0) // offset 1224 .addMemOperand(MMO); 1225 } 1226 1227 /// \param @Offset Offset in bytes of the FrameIndex being spilled 1228 unsigned SIInstrInfo::calculateLDSSpillAddress( 1229 MachineBasicBlock &MBB, MachineInstr &MI, RegScavenger *RS, unsigned TmpReg, 1230 unsigned FrameOffset, unsigned Size) const { 1231 MachineFunction *MF = MBB.getParent(); 1232 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1233 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 1234 const DebugLoc &DL = MBB.findDebugLoc(MI); 1235 unsigned WorkGroupSize = MFI->getMaxFlatWorkGroupSize(); 1236 unsigned WavefrontSize = ST.getWavefrontSize(); 1237 1238 unsigned TIDReg = MFI->getTIDReg(); 1239 if (!MFI->hasCalculatedTID()) { 1240 MachineBasicBlock &Entry = MBB.getParent()->front(); 1241 MachineBasicBlock::iterator Insert = Entry.front(); 1242 const DebugLoc &DL = Insert->getDebugLoc(); 1243 1244 TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass, 1245 *MF); 1246 if (TIDReg == AMDGPU::NoRegister) 1247 return TIDReg; 1248 1249 if (!AMDGPU::isShader(MF->getFunction().getCallingConv()) && 1250 WorkGroupSize > WavefrontSize) { 1251 Register TIDIGXReg = 1252 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_X); 1253 Register TIDIGYReg = 1254 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Y); 1255 Register TIDIGZReg = 1256 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Z); 1257 Register InputPtrReg = 1258 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 1259 for (unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) { 1260 if (!Entry.isLiveIn(Reg)) 1261 Entry.addLiveIn(Reg); 1262 } 1263 1264 RS->enterBasicBlock(Entry); 1265 // FIXME: Can we scavenge an SReg_64 and access the subregs? 1266 unsigned STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); 1267 unsigned STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); 1268 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp0) 1269 .addReg(InputPtrReg) 1270 .addImm(SI::KernelInputOffsets::NGROUPS_Z); 1271 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp1) 1272 .addReg(InputPtrReg) 1273 .addImm(SI::KernelInputOffsets::NGROUPS_Y); 1274 1275 // NGROUPS.X * NGROUPS.Y 1276 BuildMI(Entry, Insert, DL, get(AMDGPU::S_MUL_I32), STmp1) 1277 .addReg(STmp1) 1278 .addReg(STmp0); 1279 // (NGROUPS.X * NGROUPS.Y) * TIDIG.X 1280 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MUL_U32_U24_e32), TIDReg) 1281 .addReg(STmp1) 1282 .addReg(TIDIGXReg); 1283 // NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X) 1284 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MAD_U32_U24), TIDReg) 1285 .addReg(STmp0) 1286 .addReg(TIDIGYReg) 1287 .addReg(TIDReg); 1288 // (NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)) + TIDIG.Z 1289 getAddNoCarry(Entry, Insert, DL, TIDReg) 1290 .addReg(TIDReg) 1291 .addReg(TIDIGZReg) 1292 .addImm(0); // clamp bit 1293 } else { 1294 // Get the wave id 1295 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_LO_U32_B32_e64), 1296 TIDReg) 1297 .addImm(-1) 1298 .addImm(0); 1299 1300 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_HI_U32_B32_e64), 1301 TIDReg) 1302 .addImm(-1) 1303 .addReg(TIDReg); 1304 } 1305 1306 BuildMI(Entry, Insert, DL, get(AMDGPU::V_LSHLREV_B32_e32), 1307 TIDReg) 1308 .addImm(2) 1309 .addReg(TIDReg); 1310 MFI->setTIDReg(TIDReg); 1311 } 1312 1313 // Add FrameIndex to LDS offset 1314 unsigned LDSOffset = MFI->getLDSSize() + (FrameOffset * WorkGroupSize); 1315 getAddNoCarry(MBB, MI, DL, TmpReg) 1316 .addImm(LDSOffset) 1317 .addReg(TIDReg) 1318 .addImm(0); // clamp bit 1319 1320 return TmpReg; 1321 } 1322 1323 void SIInstrInfo::insertWaitStates(MachineBasicBlock &MBB, 1324 MachineBasicBlock::iterator MI, 1325 int Count) const { 1326 DebugLoc DL = MBB.findDebugLoc(MI); 1327 while (Count > 0) { 1328 int Arg; 1329 if (Count >= 8) 1330 Arg = 7; 1331 else 1332 Arg = Count - 1; 1333 Count -= 8; 1334 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP)) 1335 .addImm(Arg); 1336 } 1337 } 1338 1339 void SIInstrInfo::insertNoop(MachineBasicBlock &MBB, 1340 MachineBasicBlock::iterator MI) const { 1341 insertWaitStates(MBB, MI, 1); 1342 } 1343 1344 void SIInstrInfo::insertReturn(MachineBasicBlock &MBB) const { 1345 auto MF = MBB.getParent(); 1346 SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 1347 1348 assert(Info->isEntryFunction()); 1349 1350 if (MBB.succ_empty()) { 1351 bool HasNoTerminator = MBB.getFirstTerminator() == MBB.end(); 1352 if (HasNoTerminator) { 1353 if (Info->returnsVoid()) { 1354 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::S_ENDPGM)).addImm(0); 1355 } else { 1356 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::SI_RETURN_TO_EPILOG)); 1357 } 1358 } 1359 } 1360 } 1361 1362 unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) { 1363 switch (MI.getOpcode()) { 1364 default: return 1; // FIXME: Do wait states equal cycles? 1365 1366 case AMDGPU::S_NOP: 1367 return MI.getOperand(0).getImm() + 1; 1368 } 1369 } 1370 1371 bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 1372 MachineBasicBlock &MBB = *MI.getParent(); 1373 DebugLoc DL = MBB.findDebugLoc(MI); 1374 switch (MI.getOpcode()) { 1375 default: return TargetInstrInfo::expandPostRAPseudo(MI); 1376 case AMDGPU::S_MOV_B64_term: 1377 // This is only a terminator to get the correct spill code placement during 1378 // register allocation. 1379 MI.setDesc(get(AMDGPU::S_MOV_B64)); 1380 break; 1381 1382 case AMDGPU::S_MOV_B32_term: 1383 // This is only a terminator to get the correct spill code placement during 1384 // register allocation. 1385 MI.setDesc(get(AMDGPU::S_MOV_B32)); 1386 break; 1387 1388 case AMDGPU::S_XOR_B64_term: 1389 // This is only a terminator to get the correct spill code placement during 1390 // register allocation. 1391 MI.setDesc(get(AMDGPU::S_XOR_B64)); 1392 break; 1393 1394 case AMDGPU::S_XOR_B32_term: 1395 // This is only a terminator to get the correct spill code placement during 1396 // register allocation. 1397 MI.setDesc(get(AMDGPU::S_XOR_B32)); 1398 break; 1399 1400 case AMDGPU::S_OR_B32_term: 1401 // This is only a terminator to get the correct spill code placement during 1402 // register allocation. 1403 MI.setDesc(get(AMDGPU::S_OR_B32)); 1404 break; 1405 1406 case AMDGPU::S_ANDN2_B64_term: 1407 // This is only a terminator to get the correct spill code placement during 1408 // register allocation. 1409 MI.setDesc(get(AMDGPU::S_ANDN2_B64)); 1410 break; 1411 1412 case AMDGPU::S_ANDN2_B32_term: 1413 // This is only a terminator to get the correct spill code placement during 1414 // register allocation. 1415 MI.setDesc(get(AMDGPU::S_ANDN2_B32)); 1416 break; 1417 1418 case AMDGPU::V_MOV_B64_PSEUDO: { 1419 Register Dst = MI.getOperand(0).getReg(); 1420 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 1421 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 1422 1423 const MachineOperand &SrcOp = MI.getOperand(1); 1424 // FIXME: Will this work for 64-bit floating point immediates? 1425 assert(!SrcOp.isFPImm()); 1426 if (SrcOp.isImm()) { 1427 APInt Imm(64, SrcOp.getImm()); 1428 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1429 .addImm(Imm.getLoBits(32).getZExtValue()) 1430 .addReg(Dst, RegState::Implicit | RegState::Define); 1431 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1432 .addImm(Imm.getHiBits(32).getZExtValue()) 1433 .addReg(Dst, RegState::Implicit | RegState::Define); 1434 } else { 1435 assert(SrcOp.isReg()); 1436 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1437 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) 1438 .addReg(Dst, RegState::Implicit | RegState::Define); 1439 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1440 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) 1441 .addReg(Dst, RegState::Implicit | RegState::Define); 1442 } 1443 MI.eraseFromParent(); 1444 break; 1445 } 1446 case AMDGPU::V_MOV_B64_DPP_PSEUDO: { 1447 expandMovDPP64(MI); 1448 break; 1449 } 1450 case AMDGPU::V_SET_INACTIVE_B32: { 1451 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; 1452 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1453 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1454 .addReg(Exec); 1455 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), MI.getOperand(0).getReg()) 1456 .add(MI.getOperand(2)); 1457 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1458 .addReg(Exec); 1459 MI.eraseFromParent(); 1460 break; 1461 } 1462 case AMDGPU::V_SET_INACTIVE_B64: { 1463 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; 1464 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1465 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1466 .addReg(Exec); 1467 MachineInstr *Copy = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), 1468 MI.getOperand(0).getReg()) 1469 .add(MI.getOperand(2)); 1470 expandPostRAPseudo(*Copy); 1471 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1472 .addReg(Exec); 1473 MI.eraseFromParent(); 1474 break; 1475 } 1476 case AMDGPU::V_MOVRELD_B32_V1: 1477 case AMDGPU::V_MOVRELD_B32_V2: 1478 case AMDGPU::V_MOVRELD_B32_V4: 1479 case AMDGPU::V_MOVRELD_B32_V8: 1480 case AMDGPU::V_MOVRELD_B32_V16: { 1481 const MCInstrDesc &MovRelDesc = get(AMDGPU::V_MOVRELD_B32_e32); 1482 Register VecReg = MI.getOperand(0).getReg(); 1483 bool IsUndef = MI.getOperand(1).isUndef(); 1484 unsigned SubReg = AMDGPU::sub0 + MI.getOperand(3).getImm(); 1485 assert(VecReg == MI.getOperand(1).getReg()); 1486 1487 MachineInstr *MovRel = 1488 BuildMI(MBB, MI, DL, MovRelDesc) 1489 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 1490 .add(MI.getOperand(2)) 1491 .addReg(VecReg, RegState::ImplicitDefine) 1492 .addReg(VecReg, 1493 RegState::Implicit | (IsUndef ? RegState::Undef : 0)); 1494 1495 const int ImpDefIdx = 1496 MovRelDesc.getNumOperands() + MovRelDesc.getNumImplicitUses(); 1497 const int ImpUseIdx = ImpDefIdx + 1; 1498 MovRel->tieOperands(ImpDefIdx, ImpUseIdx); 1499 1500 MI.eraseFromParent(); 1501 break; 1502 } 1503 case AMDGPU::SI_PC_ADD_REL_OFFSET: { 1504 MachineFunction &MF = *MBB.getParent(); 1505 Register Reg = MI.getOperand(0).getReg(); 1506 Register RegLo = RI.getSubReg(Reg, AMDGPU::sub0); 1507 Register RegHi = RI.getSubReg(Reg, AMDGPU::sub1); 1508 1509 // Create a bundle so these instructions won't be re-ordered by the 1510 // post-RA scheduler. 1511 MIBundleBuilder Bundler(MBB, MI); 1512 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg)); 1513 1514 // Add 32-bit offset from this instruction to the start of the 1515 // constant data. 1516 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo) 1517 .addReg(RegLo) 1518 .add(MI.getOperand(1))); 1519 1520 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi) 1521 .addReg(RegHi); 1522 MIB.add(MI.getOperand(2)); 1523 1524 Bundler.append(MIB); 1525 finalizeBundle(MBB, Bundler.begin()); 1526 1527 MI.eraseFromParent(); 1528 break; 1529 } 1530 case AMDGPU::ENTER_WWM: { 1531 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 1532 // WWM is entered. 1533 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1534 : AMDGPU::S_OR_SAVEEXEC_B64)); 1535 break; 1536 } 1537 case AMDGPU::EXIT_WWM: { 1538 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 1539 // WWM is exited. 1540 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64)); 1541 break; 1542 } 1543 case TargetOpcode::BUNDLE: { 1544 if (!MI.mayLoad() || MI.hasUnmodeledSideEffects()) 1545 return false; 1546 1547 // If it is a load it must be a memory clause 1548 for (MachineBasicBlock::instr_iterator I = MI.getIterator(); 1549 I->isBundledWithSucc(); ++I) { 1550 I->unbundleFromSucc(); 1551 for (MachineOperand &MO : I->operands()) 1552 if (MO.isReg()) 1553 MO.setIsInternalRead(false); 1554 } 1555 1556 MI.eraseFromParent(); 1557 break; 1558 } 1559 } 1560 return true; 1561 } 1562 1563 std::pair<MachineInstr*, MachineInstr*> 1564 SIInstrInfo::expandMovDPP64(MachineInstr &MI) const { 1565 assert (MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO); 1566 1567 MachineBasicBlock &MBB = *MI.getParent(); 1568 DebugLoc DL = MBB.findDebugLoc(MI); 1569 MachineFunction *MF = MBB.getParent(); 1570 MachineRegisterInfo &MRI = MF->getRegInfo(); 1571 Register Dst = MI.getOperand(0).getReg(); 1572 unsigned Part = 0; 1573 MachineInstr *Split[2]; 1574 1575 1576 for (auto Sub : { AMDGPU::sub0, AMDGPU::sub1 }) { 1577 auto MovDPP = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_dpp)); 1578 if (Dst.isPhysical()) { 1579 MovDPP.addDef(RI.getSubReg(Dst, Sub)); 1580 } else { 1581 assert(MRI.isSSA()); 1582 auto Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1583 MovDPP.addDef(Tmp); 1584 } 1585 1586 for (unsigned I = 1; I <= 2; ++I) { // old and src operands. 1587 const MachineOperand &SrcOp = MI.getOperand(I); 1588 assert(!SrcOp.isFPImm()); 1589 if (SrcOp.isImm()) { 1590 APInt Imm(64, SrcOp.getImm()); 1591 Imm.ashrInPlace(Part * 32); 1592 MovDPP.addImm(Imm.getLoBits(32).getZExtValue()); 1593 } else { 1594 assert(SrcOp.isReg()); 1595 Register Src = SrcOp.getReg(); 1596 if (Src.isPhysical()) 1597 MovDPP.addReg(RI.getSubReg(Src, Sub)); 1598 else 1599 MovDPP.addReg(Src, SrcOp.isUndef() ? RegState::Undef : 0, Sub); 1600 } 1601 } 1602 1603 for (unsigned I = 3; I < MI.getNumExplicitOperands(); ++I) 1604 MovDPP.addImm(MI.getOperand(I).getImm()); 1605 1606 Split[Part] = MovDPP; 1607 ++Part; 1608 } 1609 1610 if (Dst.isVirtual()) 1611 BuildMI(MBB, MI, DL, get(AMDGPU::REG_SEQUENCE), Dst) 1612 .addReg(Split[0]->getOperand(0).getReg()) 1613 .addImm(AMDGPU::sub0) 1614 .addReg(Split[1]->getOperand(0).getReg()) 1615 .addImm(AMDGPU::sub1); 1616 1617 MI.eraseFromParent(); 1618 return std::make_pair(Split[0], Split[1]); 1619 } 1620 1621 bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI, 1622 MachineOperand &Src0, 1623 unsigned Src0OpName, 1624 MachineOperand &Src1, 1625 unsigned Src1OpName) const { 1626 MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName); 1627 if (!Src0Mods) 1628 return false; 1629 1630 MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName); 1631 assert(Src1Mods && 1632 "All commutable instructions have both src0 and src1 modifiers"); 1633 1634 int Src0ModsVal = Src0Mods->getImm(); 1635 int Src1ModsVal = Src1Mods->getImm(); 1636 1637 Src1Mods->setImm(Src0ModsVal); 1638 Src0Mods->setImm(Src1ModsVal); 1639 return true; 1640 } 1641 1642 static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI, 1643 MachineOperand &RegOp, 1644 MachineOperand &NonRegOp) { 1645 Register Reg = RegOp.getReg(); 1646 unsigned SubReg = RegOp.getSubReg(); 1647 bool IsKill = RegOp.isKill(); 1648 bool IsDead = RegOp.isDead(); 1649 bool IsUndef = RegOp.isUndef(); 1650 bool IsDebug = RegOp.isDebug(); 1651 1652 if (NonRegOp.isImm()) 1653 RegOp.ChangeToImmediate(NonRegOp.getImm()); 1654 else if (NonRegOp.isFI()) 1655 RegOp.ChangeToFrameIndex(NonRegOp.getIndex()); 1656 else 1657 return nullptr; 1658 1659 NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug); 1660 NonRegOp.setSubReg(SubReg); 1661 1662 return &MI; 1663 } 1664 1665 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, 1666 unsigned Src0Idx, 1667 unsigned Src1Idx) const { 1668 assert(!NewMI && "this should never be used"); 1669 1670 unsigned Opc = MI.getOpcode(); 1671 int CommutedOpcode = commuteOpcode(Opc); 1672 if (CommutedOpcode == -1) 1673 return nullptr; 1674 1675 assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == 1676 static_cast<int>(Src0Idx) && 1677 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == 1678 static_cast<int>(Src1Idx) && 1679 "inconsistency with findCommutedOpIndices"); 1680 1681 MachineOperand &Src0 = MI.getOperand(Src0Idx); 1682 MachineOperand &Src1 = MI.getOperand(Src1Idx); 1683 1684 MachineInstr *CommutedMI = nullptr; 1685 if (Src0.isReg() && Src1.isReg()) { 1686 if (isOperandLegal(MI, Src1Idx, &Src0)) { 1687 // Be sure to copy the source modifiers to the right place. 1688 CommutedMI 1689 = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx); 1690 } 1691 1692 } else if (Src0.isReg() && !Src1.isReg()) { 1693 // src0 should always be able to support any operand type, so no need to 1694 // check operand legality. 1695 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1); 1696 } else if (!Src0.isReg() && Src1.isReg()) { 1697 if (isOperandLegal(MI, Src1Idx, &Src0)) 1698 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0); 1699 } else { 1700 // FIXME: Found two non registers to commute. This does happen. 1701 return nullptr; 1702 } 1703 1704 if (CommutedMI) { 1705 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers, 1706 Src1, AMDGPU::OpName::src1_modifiers); 1707 1708 CommutedMI->setDesc(get(CommutedOpcode)); 1709 } 1710 1711 return CommutedMI; 1712 } 1713 1714 // This needs to be implemented because the source modifiers may be inserted 1715 // between the true commutable operands, and the base 1716 // TargetInstrInfo::commuteInstruction uses it. 1717 bool SIInstrInfo::findCommutedOpIndices(const MachineInstr &MI, 1718 unsigned &SrcOpIdx0, 1719 unsigned &SrcOpIdx1) const { 1720 return findCommutedOpIndices(MI.getDesc(), SrcOpIdx0, SrcOpIdx1); 1721 } 1722 1723 bool SIInstrInfo::findCommutedOpIndices(MCInstrDesc Desc, unsigned &SrcOpIdx0, 1724 unsigned &SrcOpIdx1) const { 1725 if (!Desc.isCommutable()) 1726 return false; 1727 1728 unsigned Opc = Desc.getOpcode(); 1729 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 1730 if (Src0Idx == -1) 1731 return false; 1732 1733 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 1734 if (Src1Idx == -1) 1735 return false; 1736 1737 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx); 1738 } 1739 1740 bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp, 1741 int64_t BrOffset) const { 1742 // BranchRelaxation should never have to check s_setpc_b64 because its dest 1743 // block is unanalyzable. 1744 assert(BranchOp != AMDGPU::S_SETPC_B64); 1745 1746 // Convert to dwords. 1747 BrOffset /= 4; 1748 1749 // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is 1750 // from the next instruction. 1751 BrOffset -= 1; 1752 1753 return isIntN(BranchOffsetBits, BrOffset); 1754 } 1755 1756 MachineBasicBlock *SIInstrInfo::getBranchDestBlock( 1757 const MachineInstr &MI) const { 1758 if (MI.getOpcode() == AMDGPU::S_SETPC_B64) { 1759 // This would be a difficult analysis to perform, but can always be legal so 1760 // there's no need to analyze it. 1761 return nullptr; 1762 } 1763 1764 return MI.getOperand(0).getMBB(); 1765 } 1766 1767 unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB, 1768 MachineBasicBlock &DestBB, 1769 const DebugLoc &DL, 1770 int64_t BrOffset, 1771 RegScavenger *RS) const { 1772 assert(RS && "RegScavenger required for long branching"); 1773 assert(MBB.empty() && 1774 "new block should be inserted for expanding unconditional branch"); 1775 assert(MBB.pred_size() == 1); 1776 1777 MachineFunction *MF = MBB.getParent(); 1778 MachineRegisterInfo &MRI = MF->getRegInfo(); 1779 1780 // FIXME: Virtual register workaround for RegScavenger not working with empty 1781 // blocks. 1782 Register PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1783 1784 auto I = MBB.end(); 1785 1786 // We need to compute the offset relative to the instruction immediately after 1787 // s_getpc_b64. Insert pc arithmetic code before last terminator. 1788 MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg); 1789 1790 // TODO: Handle > 32-bit block address. 1791 if (BrOffset >= 0) { 1792 BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32)) 1793 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 1794 .addReg(PCReg, 0, AMDGPU::sub0) 1795 .addMBB(&DestBB, MO_LONG_BRANCH_FORWARD); 1796 BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32)) 1797 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 1798 .addReg(PCReg, 0, AMDGPU::sub1) 1799 .addImm(0); 1800 } else { 1801 // Backwards branch. 1802 BuildMI(MBB, I, DL, get(AMDGPU::S_SUB_U32)) 1803 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 1804 .addReg(PCReg, 0, AMDGPU::sub0) 1805 .addMBB(&DestBB, MO_LONG_BRANCH_BACKWARD); 1806 BuildMI(MBB, I, DL, get(AMDGPU::S_SUBB_U32)) 1807 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 1808 .addReg(PCReg, 0, AMDGPU::sub1) 1809 .addImm(0); 1810 } 1811 1812 // Insert the indirect branch after the other terminator. 1813 BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64)) 1814 .addReg(PCReg); 1815 1816 // FIXME: If spilling is necessary, this will fail because this scavenger has 1817 // no emergency stack slots. It is non-trivial to spill in this situation, 1818 // because the restore code needs to be specially placed after the 1819 // jump. BranchRelaxation then needs to be made aware of the newly inserted 1820 // block. 1821 // 1822 // If a spill is needed for the pc register pair, we need to insert a spill 1823 // restore block right before the destination block, and insert a short branch 1824 // into the old destination block's fallthrough predecessor. 1825 // e.g.: 1826 // 1827 // s_cbranch_scc0 skip_long_branch: 1828 // 1829 // long_branch_bb: 1830 // spill s[8:9] 1831 // s_getpc_b64 s[8:9] 1832 // s_add_u32 s8, s8, restore_bb 1833 // s_addc_u32 s9, s9, 0 1834 // s_setpc_b64 s[8:9] 1835 // 1836 // skip_long_branch: 1837 // foo; 1838 // 1839 // ..... 1840 // 1841 // dest_bb_fallthrough_predecessor: 1842 // bar; 1843 // s_branch dest_bb 1844 // 1845 // restore_bb: 1846 // restore s[8:9] 1847 // fallthrough dest_bb 1848 /// 1849 // dest_bb: 1850 // buzz; 1851 1852 RS->enterBasicBlockEnd(MBB); 1853 unsigned Scav = RS->scavengeRegisterBackwards( 1854 AMDGPU::SReg_64RegClass, 1855 MachineBasicBlock::iterator(GetPC), false, 0); 1856 MRI.replaceRegWith(PCReg, Scav); 1857 MRI.clearVirtRegs(); 1858 RS->setRegUsed(Scav); 1859 1860 return 4 + 8 + 4 + 4; 1861 } 1862 1863 unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) { 1864 switch (Cond) { 1865 case SIInstrInfo::SCC_TRUE: 1866 return AMDGPU::S_CBRANCH_SCC1; 1867 case SIInstrInfo::SCC_FALSE: 1868 return AMDGPU::S_CBRANCH_SCC0; 1869 case SIInstrInfo::VCCNZ: 1870 return AMDGPU::S_CBRANCH_VCCNZ; 1871 case SIInstrInfo::VCCZ: 1872 return AMDGPU::S_CBRANCH_VCCZ; 1873 case SIInstrInfo::EXECNZ: 1874 return AMDGPU::S_CBRANCH_EXECNZ; 1875 case SIInstrInfo::EXECZ: 1876 return AMDGPU::S_CBRANCH_EXECZ; 1877 default: 1878 llvm_unreachable("invalid branch predicate"); 1879 } 1880 } 1881 1882 SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) { 1883 switch (Opcode) { 1884 case AMDGPU::S_CBRANCH_SCC0: 1885 return SCC_FALSE; 1886 case AMDGPU::S_CBRANCH_SCC1: 1887 return SCC_TRUE; 1888 case AMDGPU::S_CBRANCH_VCCNZ: 1889 return VCCNZ; 1890 case AMDGPU::S_CBRANCH_VCCZ: 1891 return VCCZ; 1892 case AMDGPU::S_CBRANCH_EXECNZ: 1893 return EXECNZ; 1894 case AMDGPU::S_CBRANCH_EXECZ: 1895 return EXECZ; 1896 default: 1897 return INVALID_BR; 1898 } 1899 } 1900 1901 bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB, 1902 MachineBasicBlock::iterator I, 1903 MachineBasicBlock *&TBB, 1904 MachineBasicBlock *&FBB, 1905 SmallVectorImpl<MachineOperand> &Cond, 1906 bool AllowModify) const { 1907 if (I->getOpcode() == AMDGPU::S_BRANCH) { 1908 // Unconditional Branch 1909 TBB = I->getOperand(0).getMBB(); 1910 return false; 1911 } 1912 1913 MachineBasicBlock *CondBB = nullptr; 1914 1915 if (I->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 1916 CondBB = I->getOperand(1).getMBB(); 1917 Cond.push_back(I->getOperand(0)); 1918 } else { 1919 BranchPredicate Pred = getBranchPredicate(I->getOpcode()); 1920 if (Pred == INVALID_BR) 1921 return true; 1922 1923 CondBB = I->getOperand(0).getMBB(); 1924 Cond.push_back(MachineOperand::CreateImm(Pred)); 1925 Cond.push_back(I->getOperand(1)); // Save the branch register. 1926 } 1927 ++I; 1928 1929 if (I == MBB.end()) { 1930 // Conditional branch followed by fall-through. 1931 TBB = CondBB; 1932 return false; 1933 } 1934 1935 if (I->getOpcode() == AMDGPU::S_BRANCH) { 1936 TBB = CondBB; 1937 FBB = I->getOperand(0).getMBB(); 1938 return false; 1939 } 1940 1941 return true; 1942 } 1943 1944 bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 1945 MachineBasicBlock *&FBB, 1946 SmallVectorImpl<MachineOperand> &Cond, 1947 bool AllowModify) const { 1948 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 1949 auto E = MBB.end(); 1950 if (I == E) 1951 return false; 1952 1953 // Skip over the instructions that are artificially terminators for special 1954 // exec management. 1955 while (I != E && !I->isBranch() && !I->isReturn() && 1956 I->getOpcode() != AMDGPU::SI_MASK_BRANCH) { 1957 switch (I->getOpcode()) { 1958 case AMDGPU::SI_MASK_BRANCH: 1959 case AMDGPU::S_MOV_B64_term: 1960 case AMDGPU::S_XOR_B64_term: 1961 case AMDGPU::S_ANDN2_B64_term: 1962 case AMDGPU::S_MOV_B32_term: 1963 case AMDGPU::S_XOR_B32_term: 1964 case AMDGPU::S_OR_B32_term: 1965 case AMDGPU::S_ANDN2_B32_term: 1966 break; 1967 case AMDGPU::SI_IF: 1968 case AMDGPU::SI_ELSE: 1969 case AMDGPU::SI_KILL_I1_TERMINATOR: 1970 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 1971 // FIXME: It's messy that these need to be considered here at all. 1972 return true; 1973 default: 1974 llvm_unreachable("unexpected non-branch terminator inst"); 1975 } 1976 1977 ++I; 1978 } 1979 1980 if (I == E) 1981 return false; 1982 1983 if (I->getOpcode() != AMDGPU::SI_MASK_BRANCH) 1984 return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify); 1985 1986 ++I; 1987 1988 // TODO: Should be able to treat as fallthrough? 1989 if (I == MBB.end()) 1990 return true; 1991 1992 if (analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify)) 1993 return true; 1994 1995 MachineBasicBlock *MaskBrDest = I->getOperand(0).getMBB(); 1996 1997 // Specifically handle the case where the conditional branch is to the same 1998 // destination as the mask branch. e.g. 1999 // 2000 // si_mask_branch BB8 2001 // s_cbranch_execz BB8 2002 // s_cbranch BB9 2003 // 2004 // This is required to understand divergent loops which may need the branches 2005 // to be relaxed. 2006 if (TBB != MaskBrDest || Cond.empty()) 2007 return true; 2008 2009 auto Pred = Cond[0].getImm(); 2010 return (Pred != EXECZ && Pred != EXECNZ); 2011 } 2012 2013 unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB, 2014 int *BytesRemoved) const { 2015 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 2016 2017 unsigned Count = 0; 2018 unsigned RemovedSize = 0; 2019 while (I != MBB.end()) { 2020 MachineBasicBlock::iterator Next = std::next(I); 2021 if (I->getOpcode() == AMDGPU::SI_MASK_BRANCH) { 2022 I = Next; 2023 continue; 2024 } 2025 2026 RemovedSize += getInstSizeInBytes(*I); 2027 I->eraseFromParent(); 2028 ++Count; 2029 I = Next; 2030 } 2031 2032 if (BytesRemoved) 2033 *BytesRemoved = RemovedSize; 2034 2035 return Count; 2036 } 2037 2038 // Copy the flags onto the implicit condition register operand. 2039 static void preserveCondRegFlags(MachineOperand &CondReg, 2040 const MachineOperand &OrigCond) { 2041 CondReg.setIsUndef(OrigCond.isUndef()); 2042 CondReg.setIsKill(OrigCond.isKill()); 2043 } 2044 2045 unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB, 2046 MachineBasicBlock *TBB, 2047 MachineBasicBlock *FBB, 2048 ArrayRef<MachineOperand> Cond, 2049 const DebugLoc &DL, 2050 int *BytesAdded) const { 2051 if (!FBB && Cond.empty()) { 2052 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 2053 .addMBB(TBB); 2054 if (BytesAdded) 2055 *BytesAdded = 4; 2056 return 1; 2057 } 2058 2059 if(Cond.size() == 1 && Cond[0].isReg()) { 2060 BuildMI(&MBB, DL, get(AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO)) 2061 .add(Cond[0]) 2062 .addMBB(TBB); 2063 return 1; 2064 } 2065 2066 assert(TBB && Cond[0].isImm()); 2067 2068 unsigned Opcode 2069 = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm())); 2070 2071 if (!FBB) { 2072 Cond[1].isUndef(); 2073 MachineInstr *CondBr = 2074 BuildMI(&MBB, DL, get(Opcode)) 2075 .addMBB(TBB); 2076 2077 // Copy the flags onto the implicit condition register operand. 2078 preserveCondRegFlags(CondBr->getOperand(1), Cond[1]); 2079 2080 if (BytesAdded) 2081 *BytesAdded = 4; 2082 return 1; 2083 } 2084 2085 assert(TBB && FBB); 2086 2087 MachineInstr *CondBr = 2088 BuildMI(&MBB, DL, get(Opcode)) 2089 .addMBB(TBB); 2090 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 2091 .addMBB(FBB); 2092 2093 MachineOperand &CondReg = CondBr->getOperand(1); 2094 CondReg.setIsUndef(Cond[1].isUndef()); 2095 CondReg.setIsKill(Cond[1].isKill()); 2096 2097 if (BytesAdded) 2098 *BytesAdded = 8; 2099 2100 return 2; 2101 } 2102 2103 bool SIInstrInfo::reverseBranchCondition( 2104 SmallVectorImpl<MachineOperand> &Cond) const { 2105 if (Cond.size() != 2) { 2106 return true; 2107 } 2108 2109 if (Cond[0].isImm()) { 2110 Cond[0].setImm(-Cond[0].getImm()); 2111 return false; 2112 } 2113 2114 return true; 2115 } 2116 2117 bool SIInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, 2118 ArrayRef<MachineOperand> Cond, 2119 unsigned TrueReg, unsigned FalseReg, 2120 int &CondCycles, 2121 int &TrueCycles, int &FalseCycles) const { 2122 switch (Cond[0].getImm()) { 2123 case VCCNZ: 2124 case VCCZ: { 2125 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2126 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 2127 assert(MRI.getRegClass(FalseReg) == RC); 2128 2129 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 2130 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 2131 2132 // Limit to equal cost for branch vs. N v_cndmask_b32s. 2133 return RI.hasVGPRs(RC) && NumInsts <= 6; 2134 } 2135 case SCC_TRUE: 2136 case SCC_FALSE: { 2137 // FIXME: We could insert for VGPRs if we could replace the original compare 2138 // with a vector one. 2139 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2140 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 2141 assert(MRI.getRegClass(FalseReg) == RC); 2142 2143 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 2144 2145 // Multiples of 8 can do s_cselect_b64 2146 if (NumInsts % 2 == 0) 2147 NumInsts /= 2; 2148 2149 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 2150 return RI.isSGPRClass(RC); 2151 } 2152 default: 2153 return false; 2154 } 2155 } 2156 2157 void SIInstrInfo::insertSelect(MachineBasicBlock &MBB, 2158 MachineBasicBlock::iterator I, const DebugLoc &DL, 2159 unsigned DstReg, ArrayRef<MachineOperand> Cond, 2160 unsigned TrueReg, unsigned FalseReg) const { 2161 BranchPredicate Pred = static_cast<BranchPredicate>(Cond[0].getImm()); 2162 if (Pred == VCCZ || Pred == SCC_FALSE) { 2163 Pred = static_cast<BranchPredicate>(-Pred); 2164 std::swap(TrueReg, FalseReg); 2165 } 2166 2167 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2168 const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg); 2169 unsigned DstSize = RI.getRegSizeInBits(*DstRC); 2170 2171 if (DstSize == 32) { 2172 unsigned SelOp = Pred == SCC_TRUE ? 2173 AMDGPU::S_CSELECT_B32 : AMDGPU::V_CNDMASK_B32_e32; 2174 2175 // Instruction's operands are backwards from what is expected. 2176 MachineInstr *Select = 2177 BuildMI(MBB, I, DL, get(SelOp), DstReg) 2178 .addReg(FalseReg) 2179 .addReg(TrueReg); 2180 2181 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2182 return; 2183 } 2184 2185 if (DstSize == 64 && Pred == SCC_TRUE) { 2186 MachineInstr *Select = 2187 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg) 2188 .addReg(FalseReg) 2189 .addReg(TrueReg); 2190 2191 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2192 return; 2193 } 2194 2195 static const int16_t Sub0_15[] = { 2196 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 2197 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 2198 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, 2199 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 2200 }; 2201 2202 static const int16_t Sub0_15_64[] = { 2203 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, 2204 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7, 2205 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11, 2206 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15, 2207 }; 2208 2209 unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32; 2210 const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass; 2211 const int16_t *SubIndices = Sub0_15; 2212 int NElts = DstSize / 32; 2213 2214 // 64-bit select is only available for SALU. 2215 // TODO: Split 96-bit into 64-bit and 32-bit, not 3x 32-bit. 2216 if (Pred == SCC_TRUE) { 2217 if (NElts % 2) { 2218 SelOp = AMDGPU::S_CSELECT_B32; 2219 EltRC = &AMDGPU::SGPR_32RegClass; 2220 } else { 2221 SelOp = AMDGPU::S_CSELECT_B64; 2222 EltRC = &AMDGPU::SGPR_64RegClass; 2223 SubIndices = Sub0_15_64; 2224 NElts /= 2; 2225 } 2226 } 2227 2228 MachineInstrBuilder MIB = BuildMI( 2229 MBB, I, DL, get(AMDGPU::REG_SEQUENCE), DstReg); 2230 2231 I = MIB->getIterator(); 2232 2233 SmallVector<unsigned, 8> Regs; 2234 for (int Idx = 0; Idx != NElts; ++Idx) { 2235 Register DstElt = MRI.createVirtualRegister(EltRC); 2236 Regs.push_back(DstElt); 2237 2238 unsigned SubIdx = SubIndices[Idx]; 2239 2240 MachineInstr *Select = 2241 BuildMI(MBB, I, DL, get(SelOp), DstElt) 2242 .addReg(FalseReg, 0, SubIdx) 2243 .addReg(TrueReg, 0, SubIdx); 2244 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2245 fixImplicitOperands(*Select); 2246 2247 MIB.addReg(DstElt) 2248 .addImm(SubIdx); 2249 } 2250 } 2251 2252 bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) const { 2253 switch (MI.getOpcode()) { 2254 case AMDGPU::V_MOV_B32_e32: 2255 case AMDGPU::V_MOV_B32_e64: 2256 case AMDGPU::V_MOV_B64_PSEUDO: { 2257 // If there are additional implicit register operands, this may be used for 2258 // register indexing so the source register operand isn't simply copied. 2259 unsigned NumOps = MI.getDesc().getNumOperands() + 2260 MI.getDesc().getNumImplicitUses(); 2261 2262 return MI.getNumOperands() == NumOps; 2263 } 2264 case AMDGPU::S_MOV_B32: 2265 case AMDGPU::S_MOV_B64: 2266 case AMDGPU::COPY: 2267 case AMDGPU::V_ACCVGPR_WRITE_B32: 2268 case AMDGPU::V_ACCVGPR_READ_B32: 2269 return true; 2270 default: 2271 return false; 2272 } 2273 } 2274 2275 unsigned SIInstrInfo::getAddressSpaceForPseudoSourceKind( 2276 unsigned Kind) const { 2277 switch(Kind) { 2278 case PseudoSourceValue::Stack: 2279 case PseudoSourceValue::FixedStack: 2280 return AMDGPUAS::PRIVATE_ADDRESS; 2281 case PseudoSourceValue::ConstantPool: 2282 case PseudoSourceValue::GOT: 2283 case PseudoSourceValue::JumpTable: 2284 case PseudoSourceValue::GlobalValueCallEntry: 2285 case PseudoSourceValue::ExternalSymbolCallEntry: 2286 case PseudoSourceValue::TargetCustom: 2287 return AMDGPUAS::CONSTANT_ADDRESS; 2288 } 2289 return AMDGPUAS::FLAT_ADDRESS; 2290 } 2291 2292 static void removeModOperands(MachineInstr &MI) { 2293 unsigned Opc = MI.getOpcode(); 2294 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2295 AMDGPU::OpName::src0_modifiers); 2296 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2297 AMDGPU::OpName::src1_modifiers); 2298 int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2299 AMDGPU::OpName::src2_modifiers); 2300 2301 MI.RemoveOperand(Src2ModIdx); 2302 MI.RemoveOperand(Src1ModIdx); 2303 MI.RemoveOperand(Src0ModIdx); 2304 } 2305 2306 bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, 2307 unsigned Reg, MachineRegisterInfo *MRI) const { 2308 if (!MRI->hasOneNonDBGUse(Reg)) 2309 return false; 2310 2311 switch (DefMI.getOpcode()) { 2312 default: 2313 return false; 2314 case AMDGPU::S_MOV_B64: 2315 // TODO: We could fold 64-bit immediates, but this get compilicated 2316 // when there are sub-registers. 2317 return false; 2318 2319 case AMDGPU::V_MOV_B32_e32: 2320 case AMDGPU::S_MOV_B32: 2321 case AMDGPU::V_ACCVGPR_WRITE_B32: 2322 break; 2323 } 2324 2325 const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0); 2326 assert(ImmOp); 2327 // FIXME: We could handle FrameIndex values here. 2328 if (!ImmOp->isImm()) 2329 return false; 2330 2331 unsigned Opc = UseMI.getOpcode(); 2332 if (Opc == AMDGPU::COPY) { 2333 bool isVGPRCopy = RI.isVGPR(*MRI, UseMI.getOperand(0).getReg()); 2334 unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32; 2335 if (RI.isAGPR(*MRI, UseMI.getOperand(0).getReg())) { 2336 if (!isInlineConstant(*ImmOp, AMDGPU::OPERAND_REG_INLINE_AC_INT32)) 2337 return false; 2338 NewOpc = AMDGPU::V_ACCVGPR_WRITE_B32; 2339 } 2340 UseMI.setDesc(get(NewOpc)); 2341 UseMI.getOperand(1).ChangeToImmediate(ImmOp->getImm()); 2342 UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent()); 2343 return true; 2344 } 2345 2346 if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 || 2347 Opc == AMDGPU::V_MAD_F16 || Opc == AMDGPU::V_MAC_F16_e64 || 2348 Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64 || 2349 Opc == AMDGPU::V_FMA_F16 || Opc == AMDGPU::V_FMAC_F16_e64) { 2350 // Don't fold if we are using source or output modifiers. The new VOP2 2351 // instructions don't have them. 2352 if (hasAnyModifiersSet(UseMI)) 2353 return false; 2354 2355 // If this is a free constant, there's no reason to do this. 2356 // TODO: We could fold this here instead of letting SIFoldOperands do it 2357 // later. 2358 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0); 2359 2360 // Any src operand can be used for the legality check. 2361 if (isInlineConstant(UseMI, *Src0, *ImmOp)) 2362 return false; 2363 2364 bool IsF32 = Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 || 2365 Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64; 2366 bool IsFMA = Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64 || 2367 Opc == AMDGPU::V_FMA_F16 || Opc == AMDGPU::V_FMAC_F16_e64; 2368 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1); 2369 MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2); 2370 2371 // Multiplied part is the constant: Use v_madmk_{f16, f32}. 2372 // We should only expect these to be on src0 due to canonicalizations. 2373 if (Src0->isReg() && Src0->getReg() == Reg) { 2374 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) 2375 return false; 2376 2377 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg()))) 2378 return false; 2379 2380 unsigned NewOpc = 2381 IsFMA ? (IsF32 ? AMDGPU::V_FMAMK_F32 : AMDGPU::V_FMAMK_F16) 2382 : (IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16); 2383 if (pseudoToMCOpcode(NewOpc) == -1) 2384 return false; 2385 2386 // We need to swap operands 0 and 1 since madmk constant is at operand 1. 2387 2388 const int64_t Imm = ImmOp->getImm(); 2389 2390 // FIXME: This would be a lot easier if we could return a new instruction 2391 // instead of having to modify in place. 2392 2393 // Remove these first since they are at the end. 2394 UseMI.RemoveOperand( 2395 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 2396 UseMI.RemoveOperand( 2397 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 2398 2399 Register Src1Reg = Src1->getReg(); 2400 unsigned Src1SubReg = Src1->getSubReg(); 2401 Src0->setReg(Src1Reg); 2402 Src0->setSubReg(Src1SubReg); 2403 Src0->setIsKill(Src1->isKill()); 2404 2405 if (Opc == AMDGPU::V_MAC_F32_e64 || 2406 Opc == AMDGPU::V_MAC_F16_e64 || 2407 Opc == AMDGPU::V_FMAC_F32_e64 || 2408 Opc == AMDGPU::V_FMAC_F16_e64) 2409 UseMI.untieRegOperand( 2410 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 2411 2412 Src1->ChangeToImmediate(Imm); 2413 2414 removeModOperands(UseMI); 2415 UseMI.setDesc(get(NewOpc)); 2416 2417 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 2418 if (DeleteDef) 2419 DefMI.eraseFromParent(); 2420 2421 return true; 2422 } 2423 2424 // Added part is the constant: Use v_madak_{f16, f32}. 2425 if (Src2->isReg() && Src2->getReg() == Reg) { 2426 // Not allowed to use constant bus for another operand. 2427 // We can however allow an inline immediate as src0. 2428 bool Src0Inlined = false; 2429 if (Src0->isReg()) { 2430 // Try to inline constant if possible. 2431 // If the Def moves immediate and the use is single 2432 // We are saving VGPR here. 2433 MachineInstr *Def = MRI->getUniqueVRegDef(Src0->getReg()); 2434 if (Def && Def->isMoveImmediate() && 2435 isInlineConstant(Def->getOperand(1)) && 2436 MRI->hasOneUse(Src0->getReg())) { 2437 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 2438 Src0Inlined = true; 2439 } else if ((Register::isPhysicalRegister(Src0->getReg()) && 2440 (ST.getConstantBusLimit(Opc) <= 1 && 2441 RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg())))) || 2442 (Register::isVirtualRegister(Src0->getReg()) && 2443 (ST.getConstantBusLimit(Opc) <= 1 && 2444 RI.isSGPRClass(MRI->getRegClass(Src0->getReg()))))) 2445 return false; 2446 // VGPR is okay as Src0 - fallthrough 2447 } 2448 2449 if (Src1->isReg() && !Src0Inlined ) { 2450 // We have one slot for inlinable constant so far - try to fill it 2451 MachineInstr *Def = MRI->getUniqueVRegDef(Src1->getReg()); 2452 if (Def && Def->isMoveImmediate() && 2453 isInlineConstant(Def->getOperand(1)) && 2454 MRI->hasOneUse(Src1->getReg()) && 2455 commuteInstruction(UseMI)) { 2456 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 2457 } else if ((Register::isPhysicalRegister(Src1->getReg()) && 2458 RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) || 2459 (Register::isVirtualRegister(Src1->getReg()) && 2460 RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) 2461 return false; 2462 // VGPR is okay as Src1 - fallthrough 2463 } 2464 2465 unsigned NewOpc = 2466 IsFMA ? (IsF32 ? AMDGPU::V_FMAAK_F32 : AMDGPU::V_FMAAK_F16) 2467 : (IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16); 2468 if (pseudoToMCOpcode(NewOpc) == -1) 2469 return false; 2470 2471 const int64_t Imm = ImmOp->getImm(); 2472 2473 // FIXME: This would be a lot easier if we could return a new instruction 2474 // instead of having to modify in place. 2475 2476 // Remove these first since they are at the end. 2477 UseMI.RemoveOperand( 2478 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 2479 UseMI.RemoveOperand( 2480 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 2481 2482 if (Opc == AMDGPU::V_MAC_F32_e64 || 2483 Opc == AMDGPU::V_MAC_F16_e64 || 2484 Opc == AMDGPU::V_FMAC_F32_e64 || 2485 Opc == AMDGPU::V_FMAC_F16_e64) 2486 UseMI.untieRegOperand( 2487 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 2488 2489 // ChangingToImmediate adds Src2 back to the instruction. 2490 Src2->ChangeToImmediate(Imm); 2491 2492 // These come before src2. 2493 removeModOperands(UseMI); 2494 UseMI.setDesc(get(NewOpc)); 2495 // It might happen that UseMI was commuted 2496 // and we now have SGPR as SRC1. If so 2 inlined 2497 // constant and SGPR are illegal. 2498 legalizeOperands(UseMI); 2499 2500 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 2501 if (DeleteDef) 2502 DefMI.eraseFromParent(); 2503 2504 return true; 2505 } 2506 } 2507 2508 return false; 2509 } 2510 2511 static bool offsetsDoNotOverlap(int WidthA, int OffsetA, 2512 int WidthB, int OffsetB) { 2513 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; 2514 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; 2515 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; 2516 return LowOffset + LowWidth <= HighOffset; 2517 } 2518 2519 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa, 2520 const MachineInstr &MIb) const { 2521 const MachineOperand *BaseOp0, *BaseOp1; 2522 int64_t Offset0, Offset1; 2523 2524 if (getMemOperandWithOffset(MIa, BaseOp0, Offset0, &RI) && 2525 getMemOperandWithOffset(MIb, BaseOp1, Offset1, &RI)) { 2526 if (!BaseOp0->isIdenticalTo(*BaseOp1)) 2527 return false; 2528 2529 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) { 2530 // FIXME: Handle ds_read2 / ds_write2. 2531 return false; 2532 } 2533 unsigned Width0 = (*MIa.memoperands_begin())->getSize(); 2534 unsigned Width1 = (*MIb.memoperands_begin())->getSize(); 2535 if (offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1)) { 2536 return true; 2537 } 2538 } 2539 2540 return false; 2541 } 2542 2543 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, 2544 const MachineInstr &MIb) const { 2545 assert((MIa.mayLoad() || MIa.mayStore()) && 2546 "MIa must load from or modify a memory location"); 2547 assert((MIb.mayLoad() || MIb.mayStore()) && 2548 "MIb must load from or modify a memory location"); 2549 2550 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects()) 2551 return false; 2552 2553 // XXX - Can we relax this between address spaces? 2554 if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef()) 2555 return false; 2556 2557 // TODO: Should we check the address space from the MachineMemOperand? That 2558 // would allow us to distinguish objects we know don't alias based on the 2559 // underlying address space, even if it was lowered to a different one, 2560 // e.g. private accesses lowered to use MUBUF instructions on a scratch 2561 // buffer. 2562 if (isDS(MIa)) { 2563 if (isDS(MIb)) 2564 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2565 2566 return !isFLAT(MIb) || isSegmentSpecificFLAT(MIb); 2567 } 2568 2569 if (isMUBUF(MIa) || isMTBUF(MIa)) { 2570 if (isMUBUF(MIb) || isMTBUF(MIb)) 2571 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2572 2573 return !isFLAT(MIb) && !isSMRD(MIb); 2574 } 2575 2576 if (isSMRD(MIa)) { 2577 if (isSMRD(MIb)) 2578 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2579 2580 return !isFLAT(MIb) && !isMUBUF(MIa) && !isMTBUF(MIa); 2581 } 2582 2583 if (isFLAT(MIa)) { 2584 if (isFLAT(MIb)) 2585 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2586 2587 return false; 2588 } 2589 2590 return false; 2591 } 2592 2593 static int64_t getFoldableImm(const MachineOperand* MO) { 2594 if (!MO->isReg()) 2595 return false; 2596 const MachineFunction *MF = MO->getParent()->getParent()->getParent(); 2597 const MachineRegisterInfo &MRI = MF->getRegInfo(); 2598 auto Def = MRI.getUniqueVRegDef(MO->getReg()); 2599 if (Def && Def->getOpcode() == AMDGPU::V_MOV_B32_e32 && 2600 Def->getOperand(1).isImm()) 2601 return Def->getOperand(1).getImm(); 2602 return AMDGPU::NoRegister; 2603 } 2604 2605 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB, 2606 MachineInstr &MI, 2607 LiveVariables *LV) const { 2608 unsigned Opc = MI.getOpcode(); 2609 bool IsF16 = false; 2610 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e32 || Opc == AMDGPU::V_FMAC_F32_e64 || 2611 Opc == AMDGPU::V_FMAC_F16_e32 || Opc == AMDGPU::V_FMAC_F16_e64; 2612 2613 switch (Opc) { 2614 default: 2615 return nullptr; 2616 case AMDGPU::V_MAC_F16_e64: 2617 case AMDGPU::V_FMAC_F16_e64: 2618 IsF16 = true; 2619 LLVM_FALLTHROUGH; 2620 case AMDGPU::V_MAC_F32_e64: 2621 case AMDGPU::V_FMAC_F32_e64: 2622 break; 2623 case AMDGPU::V_MAC_F16_e32: 2624 case AMDGPU::V_FMAC_F16_e32: 2625 IsF16 = true; 2626 LLVM_FALLTHROUGH; 2627 case AMDGPU::V_MAC_F32_e32: 2628 case AMDGPU::V_FMAC_F32_e32: { 2629 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 2630 AMDGPU::OpName::src0); 2631 const MachineOperand *Src0 = &MI.getOperand(Src0Idx); 2632 if (!Src0->isReg() && !Src0->isImm()) 2633 return nullptr; 2634 2635 if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0)) 2636 return nullptr; 2637 2638 break; 2639 } 2640 } 2641 2642 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 2643 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0); 2644 const MachineOperand *Src0Mods = 2645 getNamedOperand(MI, AMDGPU::OpName::src0_modifiers); 2646 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 2647 const MachineOperand *Src1Mods = 2648 getNamedOperand(MI, AMDGPU::OpName::src1_modifiers); 2649 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 2650 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 2651 const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod); 2652 2653 if (!Src0Mods && !Src1Mods && !Clamp && !Omod && 2654 // If we have an SGPR input, we will violate the constant bus restriction. 2655 (ST.getConstantBusLimit(Opc) > 1 || 2656 !Src0->isReg() || 2657 !RI.isSGPRReg(MBB->getParent()->getRegInfo(), Src0->getReg()))) { 2658 if (auto Imm = getFoldableImm(Src2)) { 2659 unsigned NewOpc = 2660 IsFMA ? (IsF16 ? AMDGPU::V_FMAAK_F16 : AMDGPU::V_FMAAK_F32) 2661 : (IsF16 ? AMDGPU::V_MADAK_F16 : AMDGPU::V_MADAK_F32); 2662 if (pseudoToMCOpcode(NewOpc) != -1) 2663 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2664 .add(*Dst) 2665 .add(*Src0) 2666 .add(*Src1) 2667 .addImm(Imm); 2668 } 2669 unsigned NewOpc = 2670 IsFMA ? (IsF16 ? AMDGPU::V_FMAMK_F16 : AMDGPU::V_FMAMK_F32) 2671 : (IsF16 ? AMDGPU::V_MADMK_F16 : AMDGPU::V_MADMK_F32); 2672 if (auto Imm = getFoldableImm(Src1)) { 2673 if (pseudoToMCOpcode(NewOpc) != -1) 2674 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2675 .add(*Dst) 2676 .add(*Src0) 2677 .addImm(Imm) 2678 .add(*Src2); 2679 } 2680 if (auto Imm = getFoldableImm(Src0)) { 2681 if (pseudoToMCOpcode(NewOpc) != -1 && 2682 isOperandLegal(MI, AMDGPU::getNamedOperandIdx(NewOpc, 2683 AMDGPU::OpName::src0), Src1)) 2684 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2685 .add(*Dst) 2686 .add(*Src1) 2687 .addImm(Imm) 2688 .add(*Src2); 2689 } 2690 } 2691 2692 unsigned NewOpc = IsFMA ? (IsF16 ? AMDGPU::V_FMA_F16 : AMDGPU::V_FMA_F32) 2693 : (IsF16 ? AMDGPU::V_MAD_F16 : AMDGPU::V_MAD_F32); 2694 if (pseudoToMCOpcode(NewOpc) == -1) 2695 return nullptr; 2696 2697 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2698 .add(*Dst) 2699 .addImm(Src0Mods ? Src0Mods->getImm() : 0) 2700 .add(*Src0) 2701 .addImm(Src1Mods ? Src1Mods->getImm() : 0) 2702 .add(*Src1) 2703 .addImm(0) // Src mods 2704 .add(*Src2) 2705 .addImm(Clamp ? Clamp->getImm() : 0) 2706 .addImm(Omod ? Omod->getImm() : 0); 2707 } 2708 2709 // It's not generally safe to move VALU instructions across these since it will 2710 // start using the register as a base index rather than directly. 2711 // XXX - Why isn't hasSideEffects sufficient for these? 2712 static bool changesVGPRIndexingMode(const MachineInstr &MI) { 2713 switch (MI.getOpcode()) { 2714 case AMDGPU::S_SET_GPR_IDX_ON: 2715 case AMDGPU::S_SET_GPR_IDX_MODE: 2716 case AMDGPU::S_SET_GPR_IDX_OFF: 2717 return true; 2718 default: 2719 return false; 2720 } 2721 } 2722 2723 bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 2724 const MachineBasicBlock *MBB, 2725 const MachineFunction &MF) const { 2726 // XXX - Do we want the SP check in the base implementation? 2727 2728 // Target-independent instructions do not have an implicit-use of EXEC, even 2729 // when they operate on VGPRs. Treating EXEC modifications as scheduling 2730 // boundaries prevents incorrect movements of such instructions. 2731 return TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF) || 2732 MI.modifiesRegister(AMDGPU::EXEC, &RI) || 2733 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 || 2734 MI.getOpcode() == AMDGPU::S_SETREG_B32 || 2735 MI.getOpcode() == AMDGPU::S_DENORM_MODE || 2736 changesVGPRIndexingMode(MI); 2737 } 2738 2739 bool SIInstrInfo::isAlwaysGDS(uint16_t Opcode) const { 2740 return Opcode == AMDGPU::DS_ORDERED_COUNT || 2741 Opcode == AMDGPU::DS_GWS_INIT || 2742 Opcode == AMDGPU::DS_GWS_SEMA_V || 2743 Opcode == AMDGPU::DS_GWS_SEMA_BR || 2744 Opcode == AMDGPU::DS_GWS_SEMA_P || 2745 Opcode == AMDGPU::DS_GWS_SEMA_RELEASE_ALL || 2746 Opcode == AMDGPU::DS_GWS_BARRIER; 2747 } 2748 2749 bool SIInstrInfo::hasUnwantedEffectsWhenEXECEmpty(const MachineInstr &MI) const { 2750 unsigned Opcode = MI.getOpcode(); 2751 2752 if (MI.mayStore() && isSMRD(MI)) 2753 return true; // scalar store or atomic 2754 2755 // This will terminate the function when other lanes may need to continue. 2756 if (MI.isReturn()) 2757 return true; 2758 2759 // These instructions cause shader I/O that may cause hardware lockups 2760 // when executed with an empty EXEC mask. 2761 // 2762 // Note: exp with VM = DONE = 0 is automatically skipped by hardware when 2763 // EXEC = 0, but checking for that case here seems not worth it 2764 // given the typical code patterns. 2765 if (Opcode == AMDGPU::S_SENDMSG || Opcode == AMDGPU::S_SENDMSGHALT || 2766 Opcode == AMDGPU::EXP || Opcode == AMDGPU::EXP_DONE || 2767 Opcode == AMDGPU::DS_ORDERED_COUNT || Opcode == AMDGPU::S_TRAP || 2768 Opcode == AMDGPU::DS_GWS_INIT || Opcode == AMDGPU::DS_GWS_BARRIER) 2769 return true; 2770 2771 if (MI.isCall() || MI.isInlineAsm()) 2772 return true; // conservative assumption 2773 2774 // These are like SALU instructions in terms of effects, so it's questionable 2775 // whether we should return true for those. 2776 // 2777 // However, executing them with EXEC = 0 causes them to operate on undefined 2778 // data, which we avoid by returning true here. 2779 if (Opcode == AMDGPU::V_READFIRSTLANE_B32 || Opcode == AMDGPU::V_READLANE_B32) 2780 return true; 2781 2782 return false; 2783 } 2784 2785 bool SIInstrInfo::mayReadEXEC(const MachineRegisterInfo &MRI, 2786 const MachineInstr &MI) const { 2787 if (MI.isMetaInstruction()) 2788 return false; 2789 2790 // This won't read exec if this is an SGPR->SGPR copy. 2791 if (MI.isCopyLike()) { 2792 if (!RI.isSGPRReg(MRI, MI.getOperand(0).getReg())) 2793 return true; 2794 2795 // Make sure this isn't copying exec as a normal operand 2796 return MI.readsRegister(AMDGPU::EXEC, &RI); 2797 } 2798 2799 // Make a conservative assumption about the callee. 2800 if (MI.isCall()) 2801 return true; 2802 2803 // Be conservative with any unhandled generic opcodes. 2804 if (!isTargetSpecificOpcode(MI.getOpcode())) 2805 return true; 2806 2807 return !isSALU(MI) || MI.readsRegister(AMDGPU::EXEC, &RI); 2808 } 2809 2810 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const { 2811 switch (Imm.getBitWidth()) { 2812 case 1: // This likely will be a condition code mask. 2813 return true; 2814 2815 case 32: 2816 return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(), 2817 ST.hasInv2PiInlineImm()); 2818 case 64: 2819 return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(), 2820 ST.hasInv2PiInlineImm()); 2821 case 16: 2822 return ST.has16BitInsts() && 2823 AMDGPU::isInlinableLiteral16(Imm.getSExtValue(), 2824 ST.hasInv2PiInlineImm()); 2825 default: 2826 llvm_unreachable("invalid bitwidth"); 2827 } 2828 } 2829 2830 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO, 2831 uint8_t OperandType) const { 2832 if (!MO.isImm() || 2833 OperandType < AMDGPU::OPERAND_SRC_FIRST || 2834 OperandType > AMDGPU::OPERAND_SRC_LAST) 2835 return false; 2836 2837 // MachineOperand provides no way to tell the true operand size, since it only 2838 // records a 64-bit value. We need to know the size to determine if a 32-bit 2839 // floating point immediate bit pattern is legal for an integer immediate. It 2840 // would be for any 32-bit integer operand, but would not be for a 64-bit one. 2841 2842 int64_t Imm = MO.getImm(); 2843 switch (OperandType) { 2844 case AMDGPU::OPERAND_REG_IMM_INT32: 2845 case AMDGPU::OPERAND_REG_IMM_FP32: 2846 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 2847 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 2848 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 2849 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: { 2850 int32_t Trunc = static_cast<int32_t>(Imm); 2851 return AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm()); 2852 } 2853 case AMDGPU::OPERAND_REG_IMM_INT64: 2854 case AMDGPU::OPERAND_REG_IMM_FP64: 2855 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 2856 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 2857 return AMDGPU::isInlinableLiteral64(MO.getImm(), 2858 ST.hasInv2PiInlineImm()); 2859 case AMDGPU::OPERAND_REG_IMM_INT16: 2860 case AMDGPU::OPERAND_REG_IMM_FP16: 2861 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 2862 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 2863 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 2864 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: { 2865 if (isInt<16>(Imm) || isUInt<16>(Imm)) { 2866 // A few special case instructions have 16-bit operands on subtargets 2867 // where 16-bit instructions are not legal. 2868 // TODO: Do the 32-bit immediates work? We shouldn't really need to handle 2869 // constants in these cases 2870 int16_t Trunc = static_cast<int16_t>(Imm); 2871 return ST.has16BitInsts() && 2872 AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm()); 2873 } 2874 2875 return false; 2876 } 2877 case AMDGPU::OPERAND_REG_IMM_V2INT16: 2878 case AMDGPU::OPERAND_REG_IMM_V2FP16: 2879 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 2880 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 2881 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16: 2882 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: { 2883 uint32_t Trunc = static_cast<uint32_t>(Imm); 2884 return AMDGPU::isInlinableLiteralV216(Trunc, ST.hasInv2PiInlineImm()); 2885 } 2886 default: 2887 llvm_unreachable("invalid bitwidth"); 2888 } 2889 } 2890 2891 bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO, 2892 const MCOperandInfo &OpInfo) const { 2893 switch (MO.getType()) { 2894 case MachineOperand::MO_Register: 2895 return false; 2896 case MachineOperand::MO_Immediate: 2897 return !isInlineConstant(MO, OpInfo); 2898 case MachineOperand::MO_FrameIndex: 2899 case MachineOperand::MO_MachineBasicBlock: 2900 case MachineOperand::MO_ExternalSymbol: 2901 case MachineOperand::MO_GlobalAddress: 2902 case MachineOperand::MO_MCSymbol: 2903 return true; 2904 default: 2905 llvm_unreachable("unexpected operand type"); 2906 } 2907 } 2908 2909 static bool compareMachineOp(const MachineOperand &Op0, 2910 const MachineOperand &Op1) { 2911 if (Op0.getType() != Op1.getType()) 2912 return false; 2913 2914 switch (Op0.getType()) { 2915 case MachineOperand::MO_Register: 2916 return Op0.getReg() == Op1.getReg(); 2917 case MachineOperand::MO_Immediate: 2918 return Op0.getImm() == Op1.getImm(); 2919 default: 2920 llvm_unreachable("Didn't expect to be comparing these operand types"); 2921 } 2922 } 2923 2924 bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo, 2925 const MachineOperand &MO) const { 2926 const MCInstrDesc &InstDesc = MI.getDesc(); 2927 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo]; 2928 2929 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()); 2930 2931 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE) 2932 return true; 2933 2934 if (OpInfo.RegClass < 0) 2935 return false; 2936 2937 const MachineFunction *MF = MI.getParent()->getParent(); 2938 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 2939 2940 if (MO.isImm() && isInlineConstant(MO, OpInfo)) { 2941 if (isMAI(MI) && ST.hasMFMAInlineLiteralBug() && 2942 OpNo ==(unsigned)AMDGPU::getNamedOperandIdx(MI.getOpcode(), 2943 AMDGPU::OpName::src2)) 2944 return false; 2945 return RI.opCanUseInlineConstant(OpInfo.OperandType); 2946 } 2947 2948 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType)) 2949 return false; 2950 2951 if (!isVOP3(MI) || !AMDGPU::isSISrcOperand(InstDesc, OpNo)) 2952 return true; 2953 2954 return ST.hasVOP3Literal(); 2955 } 2956 2957 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const { 2958 int Op32 = AMDGPU::getVOPe32(Opcode); 2959 if (Op32 == -1) 2960 return false; 2961 2962 return pseudoToMCOpcode(Op32) != -1; 2963 } 2964 2965 bool SIInstrInfo::hasModifiers(unsigned Opcode) const { 2966 // The src0_modifier operand is present on all instructions 2967 // that have modifiers. 2968 2969 return AMDGPU::getNamedOperandIdx(Opcode, 2970 AMDGPU::OpName::src0_modifiers) != -1; 2971 } 2972 2973 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI, 2974 unsigned OpName) const { 2975 const MachineOperand *Mods = getNamedOperand(MI, OpName); 2976 return Mods && Mods->getImm(); 2977 } 2978 2979 bool SIInstrInfo::hasAnyModifiersSet(const MachineInstr &MI) const { 2980 return hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || 2981 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || 2982 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers) || 2983 hasModifiersSet(MI, AMDGPU::OpName::clamp) || 2984 hasModifiersSet(MI, AMDGPU::OpName::omod); 2985 } 2986 2987 bool SIInstrInfo::canShrink(const MachineInstr &MI, 2988 const MachineRegisterInfo &MRI) const { 2989 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 2990 // Can't shrink instruction with three operands. 2991 // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add 2992 // a special case for it. It can only be shrunk if the third operand 2993 // is vcc, and src0_modifiers and src1_modifiers are not set. 2994 // We should handle this the same way we handle vopc, by addding 2995 // a register allocation hint pre-regalloc and then do the shrinking 2996 // post-regalloc. 2997 if (Src2) { 2998 switch (MI.getOpcode()) { 2999 default: return false; 3000 3001 case AMDGPU::V_ADDC_U32_e64: 3002 case AMDGPU::V_SUBB_U32_e64: 3003 case AMDGPU::V_SUBBREV_U32_e64: { 3004 const MachineOperand *Src1 3005 = getNamedOperand(MI, AMDGPU::OpName::src1); 3006 if (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg())) 3007 return false; 3008 // Additional verification is needed for sdst/src2. 3009 return true; 3010 } 3011 case AMDGPU::V_MAC_F32_e64: 3012 case AMDGPU::V_MAC_F16_e64: 3013 case AMDGPU::V_FMAC_F32_e64: 3014 case AMDGPU::V_FMAC_F16_e64: 3015 if (!Src2->isReg() || !RI.isVGPR(MRI, Src2->getReg()) || 3016 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers)) 3017 return false; 3018 break; 3019 3020 case AMDGPU::V_CNDMASK_B32_e64: 3021 break; 3022 } 3023 } 3024 3025 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3026 if (Src1 && (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()) || 3027 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers))) 3028 return false; 3029 3030 // We don't need to check src0, all input types are legal, so just make sure 3031 // src0 isn't using any modifiers. 3032 if (hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers)) 3033 return false; 3034 3035 // Can it be shrunk to a valid 32 bit opcode? 3036 if (!hasVALU32BitEncoding(MI.getOpcode())) 3037 return false; 3038 3039 // Check output modifiers 3040 return !hasModifiersSet(MI, AMDGPU::OpName::omod) && 3041 !hasModifiersSet(MI, AMDGPU::OpName::clamp); 3042 } 3043 3044 // Set VCC operand with all flags from \p Orig, except for setting it as 3045 // implicit. 3046 static void copyFlagsToImplicitVCC(MachineInstr &MI, 3047 const MachineOperand &Orig) { 3048 3049 for (MachineOperand &Use : MI.implicit_operands()) { 3050 if (Use.isUse() && Use.getReg() == AMDGPU::VCC) { 3051 Use.setIsUndef(Orig.isUndef()); 3052 Use.setIsKill(Orig.isKill()); 3053 return; 3054 } 3055 } 3056 } 3057 3058 MachineInstr *SIInstrInfo::buildShrunkInst(MachineInstr &MI, 3059 unsigned Op32) const { 3060 MachineBasicBlock *MBB = MI.getParent();; 3061 MachineInstrBuilder Inst32 = 3062 BuildMI(*MBB, MI, MI.getDebugLoc(), get(Op32)); 3063 3064 // Add the dst operand if the 32-bit encoding also has an explicit $vdst. 3065 // For VOPC instructions, this is replaced by an implicit def of vcc. 3066 int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst); 3067 if (Op32DstIdx != -1) { 3068 // dst 3069 Inst32.add(MI.getOperand(0)); 3070 } else { 3071 assert(((MI.getOperand(0).getReg() == AMDGPU::VCC) || 3072 (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) && 3073 "Unexpected case"); 3074 } 3075 3076 Inst32.add(*getNamedOperand(MI, AMDGPU::OpName::src0)); 3077 3078 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3079 if (Src1) 3080 Inst32.add(*Src1); 3081 3082 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3083 3084 if (Src2) { 3085 int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2); 3086 if (Op32Src2Idx != -1) { 3087 Inst32.add(*Src2); 3088 } else { 3089 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is 3090 // replaced with an implicit read of vcc. This was already added 3091 // during the initial BuildMI, so find it to preserve the flags. 3092 copyFlagsToImplicitVCC(*Inst32, *Src2); 3093 } 3094 } 3095 3096 return Inst32; 3097 } 3098 3099 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI, 3100 const MachineOperand &MO, 3101 const MCOperandInfo &OpInfo) const { 3102 // Literal constants use the constant bus. 3103 //if (isLiteralConstantLike(MO, OpInfo)) 3104 // return true; 3105 if (MO.isImm()) 3106 return !isInlineConstant(MO, OpInfo); 3107 3108 if (!MO.isReg()) 3109 return true; // Misc other operands like FrameIndex 3110 3111 if (!MO.isUse()) 3112 return false; 3113 3114 if (Register::isVirtualRegister(MO.getReg())) 3115 return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); 3116 3117 // Null is free 3118 if (MO.getReg() == AMDGPU::SGPR_NULL) 3119 return false; 3120 3121 // SGPRs use the constant bus 3122 if (MO.isImplicit()) { 3123 return MO.getReg() == AMDGPU::M0 || 3124 MO.getReg() == AMDGPU::VCC || 3125 MO.getReg() == AMDGPU::VCC_LO; 3126 } else { 3127 return AMDGPU::SReg_32RegClass.contains(MO.getReg()) || 3128 AMDGPU::SReg_64RegClass.contains(MO.getReg()); 3129 } 3130 } 3131 3132 static unsigned findImplicitSGPRRead(const MachineInstr &MI) { 3133 for (const MachineOperand &MO : MI.implicit_operands()) { 3134 // We only care about reads. 3135 if (MO.isDef()) 3136 continue; 3137 3138 switch (MO.getReg()) { 3139 case AMDGPU::VCC: 3140 case AMDGPU::VCC_LO: 3141 case AMDGPU::VCC_HI: 3142 case AMDGPU::M0: 3143 case AMDGPU::FLAT_SCR: 3144 return MO.getReg(); 3145 3146 default: 3147 break; 3148 } 3149 } 3150 3151 return AMDGPU::NoRegister; 3152 } 3153 3154 static bool shouldReadExec(const MachineInstr &MI) { 3155 if (SIInstrInfo::isVALU(MI)) { 3156 switch (MI.getOpcode()) { 3157 case AMDGPU::V_READLANE_B32: 3158 case AMDGPU::V_READLANE_B32_gfx6_gfx7: 3159 case AMDGPU::V_READLANE_B32_gfx10: 3160 case AMDGPU::V_READLANE_B32_vi: 3161 case AMDGPU::V_WRITELANE_B32: 3162 case AMDGPU::V_WRITELANE_B32_gfx6_gfx7: 3163 case AMDGPU::V_WRITELANE_B32_gfx10: 3164 case AMDGPU::V_WRITELANE_B32_vi: 3165 return false; 3166 } 3167 3168 return true; 3169 } 3170 3171 if (MI.isPreISelOpcode() || 3172 SIInstrInfo::isGenericOpcode(MI.getOpcode()) || 3173 SIInstrInfo::isSALU(MI) || 3174 SIInstrInfo::isSMRD(MI)) 3175 return false; 3176 3177 return true; 3178 } 3179 3180 static bool isSubRegOf(const SIRegisterInfo &TRI, 3181 const MachineOperand &SuperVec, 3182 const MachineOperand &SubReg) { 3183 if (Register::isPhysicalRegister(SubReg.getReg())) 3184 return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg()); 3185 3186 return SubReg.getSubReg() != AMDGPU::NoSubRegister && 3187 SubReg.getReg() == SuperVec.getReg(); 3188 } 3189 3190 bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, 3191 StringRef &ErrInfo) const { 3192 uint16_t Opcode = MI.getOpcode(); 3193 if (SIInstrInfo::isGenericOpcode(MI.getOpcode())) 3194 return true; 3195 3196 const MachineFunction *MF = MI.getParent()->getParent(); 3197 const MachineRegisterInfo &MRI = MF->getRegInfo(); 3198 3199 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); 3200 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1); 3201 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2); 3202 3203 // Make sure the number of operands is correct. 3204 const MCInstrDesc &Desc = get(Opcode); 3205 if (!Desc.isVariadic() && 3206 Desc.getNumOperands() != MI.getNumExplicitOperands()) { 3207 ErrInfo = "Instruction has wrong number of operands."; 3208 return false; 3209 } 3210 3211 if (MI.isInlineAsm()) { 3212 // Verify register classes for inlineasm constraints. 3213 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands(); 3214 I != E; ++I) { 3215 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI); 3216 if (!RC) 3217 continue; 3218 3219 const MachineOperand &Op = MI.getOperand(I); 3220 if (!Op.isReg()) 3221 continue; 3222 3223 Register Reg = Op.getReg(); 3224 if (!Register::isVirtualRegister(Reg) && !RC->contains(Reg)) { 3225 ErrInfo = "inlineasm operand has incorrect register class."; 3226 return false; 3227 } 3228 } 3229 3230 return true; 3231 } 3232 3233 // Make sure the register classes are correct. 3234 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) { 3235 if (MI.getOperand(i).isFPImm()) { 3236 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast " 3237 "all fp values to integers."; 3238 return false; 3239 } 3240 3241 int RegClass = Desc.OpInfo[i].RegClass; 3242 3243 switch (Desc.OpInfo[i].OperandType) { 3244 case MCOI::OPERAND_REGISTER: 3245 if (MI.getOperand(i).isImm() || MI.getOperand(i).isGlobal()) { 3246 ErrInfo = "Illegal immediate value for operand."; 3247 return false; 3248 } 3249 break; 3250 case AMDGPU::OPERAND_REG_IMM_INT32: 3251 case AMDGPU::OPERAND_REG_IMM_FP32: 3252 break; 3253 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 3254 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 3255 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 3256 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 3257 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 3258 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 3259 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 3260 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: 3261 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 3262 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: { 3263 const MachineOperand &MO = MI.getOperand(i); 3264 if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) { 3265 ErrInfo = "Illegal immediate value for operand."; 3266 return false; 3267 } 3268 break; 3269 } 3270 case MCOI::OPERAND_IMMEDIATE: 3271 case AMDGPU::OPERAND_KIMM32: 3272 // Check if this operand is an immediate. 3273 // FrameIndex operands will be replaced by immediates, so they are 3274 // allowed. 3275 if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) { 3276 ErrInfo = "Expected immediate, but got non-immediate"; 3277 return false; 3278 } 3279 LLVM_FALLTHROUGH; 3280 default: 3281 continue; 3282 } 3283 3284 if (!MI.getOperand(i).isReg()) 3285 continue; 3286 3287 if (RegClass != -1) { 3288 Register Reg = MI.getOperand(i).getReg(); 3289 if (Reg == AMDGPU::NoRegister || Register::isVirtualRegister(Reg)) 3290 continue; 3291 3292 const TargetRegisterClass *RC = RI.getRegClass(RegClass); 3293 if (!RC->contains(Reg)) { 3294 ErrInfo = "Operand has incorrect register class."; 3295 return false; 3296 } 3297 } 3298 } 3299 3300 // Verify SDWA 3301 if (isSDWA(MI)) { 3302 if (!ST.hasSDWA()) { 3303 ErrInfo = "SDWA is not supported on this target"; 3304 return false; 3305 } 3306 3307 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst); 3308 3309 const int OpIndicies[] = { DstIdx, Src0Idx, Src1Idx, Src2Idx }; 3310 3311 for (int OpIdx: OpIndicies) { 3312 if (OpIdx == -1) 3313 continue; 3314 const MachineOperand &MO = MI.getOperand(OpIdx); 3315 3316 if (!ST.hasSDWAScalar()) { 3317 // Only VGPRS on VI 3318 if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) { 3319 ErrInfo = "Only VGPRs allowed as operands in SDWA instructions on VI"; 3320 return false; 3321 } 3322 } else { 3323 // No immediates on GFX9 3324 if (!MO.isReg()) { 3325 ErrInfo = "Only reg allowed as operands in SDWA instructions on GFX9"; 3326 return false; 3327 } 3328 } 3329 } 3330 3331 if (!ST.hasSDWAOmod()) { 3332 // No omod allowed on VI 3333 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 3334 if (OMod != nullptr && 3335 (!OMod->isImm() || OMod->getImm() != 0)) { 3336 ErrInfo = "OMod not allowed in SDWA instructions on VI"; 3337 return false; 3338 } 3339 } 3340 3341 uint16_t BasicOpcode = AMDGPU::getBasicFromSDWAOp(Opcode); 3342 if (isVOPC(BasicOpcode)) { 3343 if (!ST.hasSDWASdst() && DstIdx != -1) { 3344 // Only vcc allowed as dst on VI for VOPC 3345 const MachineOperand &Dst = MI.getOperand(DstIdx); 3346 if (!Dst.isReg() || Dst.getReg() != AMDGPU::VCC) { 3347 ErrInfo = "Only VCC allowed as dst in SDWA instructions on VI"; 3348 return false; 3349 } 3350 } else if (!ST.hasSDWAOutModsVOPC()) { 3351 // No clamp allowed on GFX9 for VOPC 3352 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 3353 if (Clamp && (!Clamp->isImm() || Clamp->getImm() != 0)) { 3354 ErrInfo = "Clamp not allowed in VOPC SDWA instructions on VI"; 3355 return false; 3356 } 3357 3358 // No omod allowed on GFX9 for VOPC 3359 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 3360 if (OMod && (!OMod->isImm() || OMod->getImm() != 0)) { 3361 ErrInfo = "OMod not allowed in VOPC SDWA instructions on VI"; 3362 return false; 3363 } 3364 } 3365 } 3366 3367 const MachineOperand *DstUnused = getNamedOperand(MI, AMDGPU::OpName::dst_unused); 3368 if (DstUnused && DstUnused->isImm() && 3369 DstUnused->getImm() == AMDGPU::SDWA::UNUSED_PRESERVE) { 3370 const MachineOperand &Dst = MI.getOperand(DstIdx); 3371 if (!Dst.isReg() || !Dst.isTied()) { 3372 ErrInfo = "Dst register should have tied register"; 3373 return false; 3374 } 3375 3376 const MachineOperand &TiedMO = 3377 MI.getOperand(MI.findTiedOperandIdx(DstIdx)); 3378 if (!TiedMO.isReg() || !TiedMO.isImplicit() || !TiedMO.isUse()) { 3379 ErrInfo = 3380 "Dst register should be tied to implicit use of preserved register"; 3381 return false; 3382 } else if (Register::isPhysicalRegister(TiedMO.getReg()) && 3383 Dst.getReg() != TiedMO.getReg()) { 3384 ErrInfo = "Dst register should use same physical register as preserved"; 3385 return false; 3386 } 3387 } 3388 } 3389 3390 // Verify MIMG 3391 if (isMIMG(MI.getOpcode()) && !MI.mayStore()) { 3392 // Ensure that the return type used is large enough for all the options 3393 // being used TFE/LWE require an extra result register. 3394 const MachineOperand *DMask = getNamedOperand(MI, AMDGPU::OpName::dmask); 3395 if (DMask) { 3396 uint64_t DMaskImm = DMask->getImm(); 3397 uint32_t RegCount = 3398 isGather4(MI.getOpcode()) ? 4 : countPopulation(DMaskImm); 3399 const MachineOperand *TFE = getNamedOperand(MI, AMDGPU::OpName::tfe); 3400 const MachineOperand *LWE = getNamedOperand(MI, AMDGPU::OpName::lwe); 3401 const MachineOperand *D16 = getNamedOperand(MI, AMDGPU::OpName::d16); 3402 3403 // Adjust for packed 16 bit values 3404 if (D16 && D16->getImm() && !ST.hasUnpackedD16VMem()) 3405 RegCount >>= 1; 3406 3407 // Adjust if using LWE or TFE 3408 if ((LWE && LWE->getImm()) || (TFE && TFE->getImm())) 3409 RegCount += 1; 3410 3411 const uint32_t DstIdx = 3412 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata); 3413 const MachineOperand &Dst = MI.getOperand(DstIdx); 3414 if (Dst.isReg()) { 3415 const TargetRegisterClass *DstRC = getOpRegClass(MI, DstIdx); 3416 uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32; 3417 if (RegCount > DstSize) { 3418 ErrInfo = "MIMG instruction returns too many registers for dst " 3419 "register class"; 3420 return false; 3421 } 3422 } 3423 } 3424 } 3425 3426 // Verify VOP*. Ignore multiple sgpr operands on writelane. 3427 if (Desc.getOpcode() != AMDGPU::V_WRITELANE_B32 3428 && (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI) || isSDWA(MI))) { 3429 // Only look at the true operands. Only a real operand can use the constant 3430 // bus, and we don't want to check pseudo-operands like the source modifier 3431 // flags. 3432 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx }; 3433 3434 unsigned ConstantBusCount = 0; 3435 unsigned LiteralCount = 0; 3436 3437 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) 3438 ++ConstantBusCount; 3439 3440 SmallVector<unsigned, 2> SGPRsUsed; 3441 unsigned SGPRUsed = findImplicitSGPRRead(MI); 3442 if (SGPRUsed != AMDGPU::NoRegister) { 3443 ++ConstantBusCount; 3444 SGPRsUsed.push_back(SGPRUsed); 3445 } 3446 3447 for (int OpIdx : OpIndices) { 3448 if (OpIdx == -1) 3449 break; 3450 const MachineOperand &MO = MI.getOperand(OpIdx); 3451 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 3452 if (MO.isReg()) { 3453 SGPRUsed = MO.getReg(); 3454 if (llvm::all_of(SGPRsUsed, [this, SGPRUsed](unsigned SGPR) { 3455 return !RI.regsOverlap(SGPRUsed, SGPR); 3456 })) { 3457 ++ConstantBusCount; 3458 SGPRsUsed.push_back(SGPRUsed); 3459 } 3460 } else { 3461 ++ConstantBusCount; 3462 ++LiteralCount; 3463 } 3464 } 3465 } 3466 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 3467 // v_writelane_b32 is an exception from constant bus restriction: 3468 // vsrc0 can be sgpr, const or m0 and lane select sgpr, m0 or inline-const 3469 if (ConstantBusCount > ST.getConstantBusLimit(Opcode) && 3470 Opcode != AMDGPU::V_WRITELANE_B32) { 3471 ErrInfo = "VOP* instruction violates constant bus restriction"; 3472 return false; 3473 } 3474 3475 if (isVOP3(MI) && LiteralCount) { 3476 if (LiteralCount && !ST.hasVOP3Literal()) { 3477 ErrInfo = "VOP3 instruction uses literal"; 3478 return false; 3479 } 3480 if (LiteralCount > 1) { 3481 ErrInfo = "VOP3 instruction uses more than one literal"; 3482 return false; 3483 } 3484 } 3485 } 3486 3487 // Special case for writelane - this can break the multiple constant bus rule, 3488 // but still can't use more than one SGPR register 3489 if (Desc.getOpcode() == AMDGPU::V_WRITELANE_B32) { 3490 unsigned SGPRCount = 0; 3491 Register SGPRUsed = AMDGPU::NoRegister; 3492 3493 for (int OpIdx : {Src0Idx, Src1Idx, Src2Idx}) { 3494 if (OpIdx == -1) 3495 break; 3496 3497 const MachineOperand &MO = MI.getOperand(OpIdx); 3498 3499 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 3500 if (MO.isReg() && MO.getReg() != AMDGPU::M0) { 3501 if (MO.getReg() != SGPRUsed) 3502 ++SGPRCount; 3503 SGPRUsed = MO.getReg(); 3504 } 3505 } 3506 if (SGPRCount > ST.getConstantBusLimit(Opcode)) { 3507 ErrInfo = "WRITELANE instruction violates constant bus restriction"; 3508 return false; 3509 } 3510 } 3511 } 3512 3513 // Verify misc. restrictions on specific instructions. 3514 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 || 3515 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) { 3516 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3517 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 3518 const MachineOperand &Src2 = MI.getOperand(Src2Idx); 3519 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) { 3520 if (!compareMachineOp(Src0, Src1) && 3521 !compareMachineOp(Src0, Src2)) { 3522 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2"; 3523 return false; 3524 } 3525 } 3526 } 3527 3528 if (isSOP2(MI) || isSOPC(MI)) { 3529 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3530 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 3531 unsigned Immediates = 0; 3532 3533 if (!Src0.isReg() && 3534 !isInlineConstant(Src0, Desc.OpInfo[Src0Idx].OperandType)) 3535 Immediates++; 3536 if (!Src1.isReg() && 3537 !isInlineConstant(Src1, Desc.OpInfo[Src1Idx].OperandType)) 3538 Immediates++; 3539 3540 if (Immediates > 1) { 3541 ErrInfo = "SOP2/SOPC instruction requires too many immediate constants"; 3542 return false; 3543 } 3544 } 3545 3546 if (isSOPK(MI)) { 3547 auto Op = getNamedOperand(MI, AMDGPU::OpName::simm16); 3548 if (Desc.isBranch()) { 3549 if (!Op->isMBB()) { 3550 ErrInfo = "invalid branch target for SOPK instruction"; 3551 return false; 3552 } 3553 } else { 3554 uint64_t Imm = Op->getImm(); 3555 if (sopkIsZext(MI)) { 3556 if (!isUInt<16>(Imm)) { 3557 ErrInfo = "invalid immediate for SOPK instruction"; 3558 return false; 3559 } 3560 } else { 3561 if (!isInt<16>(Imm)) { 3562 ErrInfo = "invalid immediate for SOPK instruction"; 3563 return false; 3564 } 3565 } 3566 } 3567 } 3568 3569 if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 || 3570 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 || 3571 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 3572 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) { 3573 const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 3574 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64; 3575 3576 const unsigned StaticNumOps = Desc.getNumOperands() + 3577 Desc.getNumImplicitUses(); 3578 const unsigned NumImplicitOps = IsDst ? 2 : 1; 3579 3580 // Allow additional implicit operands. This allows a fixup done by the post 3581 // RA scheduler where the main implicit operand is killed and implicit-defs 3582 // are added for sub-registers that remain live after this instruction. 3583 if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) { 3584 ErrInfo = "missing implicit register operands"; 3585 return false; 3586 } 3587 3588 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 3589 if (IsDst) { 3590 if (!Dst->isUse()) { 3591 ErrInfo = "v_movreld_b32 vdst should be a use operand"; 3592 return false; 3593 } 3594 3595 unsigned UseOpIdx; 3596 if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) || 3597 UseOpIdx != StaticNumOps + 1) { 3598 ErrInfo = "movrel implicit operands should be tied"; 3599 return false; 3600 } 3601 } 3602 3603 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3604 const MachineOperand &ImpUse 3605 = MI.getOperand(StaticNumOps + NumImplicitOps - 1); 3606 if (!ImpUse.isReg() || !ImpUse.isUse() || 3607 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) { 3608 ErrInfo = "src0 should be subreg of implicit vector use"; 3609 return false; 3610 } 3611 } 3612 3613 // Make sure we aren't losing exec uses in the td files. This mostly requires 3614 // being careful when using let Uses to try to add other use registers. 3615 if (shouldReadExec(MI)) { 3616 if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) { 3617 ErrInfo = "VALU instruction does not implicitly read exec mask"; 3618 return false; 3619 } 3620 } 3621 3622 if (isSMRD(MI)) { 3623 if (MI.mayStore()) { 3624 // The register offset form of scalar stores may only use m0 as the 3625 // soffset register. 3626 const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff); 3627 if (Soff && Soff->getReg() != AMDGPU::M0) { 3628 ErrInfo = "scalar stores must use m0 as offset register"; 3629 return false; 3630 } 3631 } 3632 } 3633 3634 if (isFLAT(MI) && !MF->getSubtarget<GCNSubtarget>().hasFlatInstOffsets()) { 3635 const MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 3636 if (Offset->getImm() != 0) { 3637 ErrInfo = "subtarget does not support offsets in flat instructions"; 3638 return false; 3639 } 3640 } 3641 3642 if (isMIMG(MI)) { 3643 const MachineOperand *DimOp = getNamedOperand(MI, AMDGPU::OpName::dim); 3644 if (DimOp) { 3645 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opcode, 3646 AMDGPU::OpName::vaddr0); 3647 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc); 3648 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opcode); 3649 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = 3650 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode); 3651 const AMDGPU::MIMGDimInfo *Dim = 3652 AMDGPU::getMIMGDimInfoByEncoding(DimOp->getImm()); 3653 3654 if (!Dim) { 3655 ErrInfo = "dim is out of range"; 3656 return false; 3657 } 3658 3659 bool IsNSA = SRsrcIdx - VAddr0Idx > 1; 3660 unsigned AddrWords = BaseOpcode->NumExtraArgs + 3661 (BaseOpcode->Gradients ? Dim->NumGradients : 0) + 3662 (BaseOpcode->Coordinates ? Dim->NumCoords : 0) + 3663 (BaseOpcode->LodOrClampOrMip ? 1 : 0); 3664 3665 unsigned VAddrWords; 3666 if (IsNSA) { 3667 VAddrWords = SRsrcIdx - VAddr0Idx; 3668 } else { 3669 const TargetRegisterClass *RC = getOpRegClass(MI, VAddr0Idx); 3670 VAddrWords = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC) / 32; 3671 if (AddrWords > 8) 3672 AddrWords = 16; 3673 else if (AddrWords > 4) 3674 AddrWords = 8; 3675 else if (AddrWords == 3 && VAddrWords == 4) { 3676 // CodeGen uses the V4 variant of instructions for three addresses, 3677 // because the selection DAG does not support non-power-of-two types. 3678 AddrWords = 4; 3679 } 3680 } 3681 3682 if (VAddrWords != AddrWords) { 3683 ErrInfo = "bad vaddr size"; 3684 return false; 3685 } 3686 } 3687 } 3688 3689 const MachineOperand *DppCt = getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl); 3690 if (DppCt) { 3691 using namespace AMDGPU::DPP; 3692 3693 unsigned DC = DppCt->getImm(); 3694 if (DC == DppCtrl::DPP_UNUSED1 || DC == DppCtrl::DPP_UNUSED2 || 3695 DC == DppCtrl::DPP_UNUSED3 || DC > DppCtrl::DPP_LAST || 3696 (DC >= DppCtrl::DPP_UNUSED4_FIRST && DC <= DppCtrl::DPP_UNUSED4_LAST) || 3697 (DC >= DppCtrl::DPP_UNUSED5_FIRST && DC <= DppCtrl::DPP_UNUSED5_LAST) || 3698 (DC >= DppCtrl::DPP_UNUSED6_FIRST && DC <= DppCtrl::DPP_UNUSED6_LAST) || 3699 (DC >= DppCtrl::DPP_UNUSED7_FIRST && DC <= DppCtrl::DPP_UNUSED7_LAST) || 3700 (DC >= DppCtrl::DPP_UNUSED8_FIRST && DC <= DppCtrl::DPP_UNUSED8_LAST)) { 3701 ErrInfo = "Invalid dpp_ctrl value"; 3702 return false; 3703 } 3704 if (DC >= DppCtrl::WAVE_SHL1 && DC <= DppCtrl::WAVE_ROR1 && 3705 ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 3706 ErrInfo = "Invalid dpp_ctrl value: " 3707 "wavefront shifts are not supported on GFX10+"; 3708 return false; 3709 } 3710 if (DC >= DppCtrl::BCAST15 && DC <= DppCtrl::BCAST31 && 3711 ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 3712 ErrInfo = "Invalid dpp_ctrl value: " 3713 "broadcasts are not supported on GFX10+"; 3714 return false; 3715 } 3716 if (DC >= DppCtrl::ROW_SHARE_FIRST && DC <= DppCtrl::ROW_XMASK_LAST && 3717 ST.getGeneration() < AMDGPUSubtarget::GFX10) { 3718 ErrInfo = "Invalid dpp_ctrl value: " 3719 "row_share and row_xmask are not supported before GFX10"; 3720 return false; 3721 } 3722 } 3723 3724 return true; 3725 } 3726 3727 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) const { 3728 switch (MI.getOpcode()) { 3729 default: return AMDGPU::INSTRUCTION_LIST_END; 3730 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE; 3731 case AMDGPU::COPY: return AMDGPU::COPY; 3732 case AMDGPU::PHI: return AMDGPU::PHI; 3733 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG; 3734 case AMDGPU::WQM: return AMDGPU::WQM; 3735 case AMDGPU::SOFT_WQM: return AMDGPU::SOFT_WQM; 3736 case AMDGPU::WWM: return AMDGPU::WWM; 3737 case AMDGPU::S_MOV_B32: { 3738 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 3739 return MI.getOperand(1).isReg() || 3740 RI.isAGPR(MRI, MI.getOperand(0).getReg()) ? 3741 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32; 3742 } 3743 case AMDGPU::S_ADD_I32: 3744 return ST.hasAddNoCarry() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_I32_e32; 3745 case AMDGPU::S_ADDC_U32: 3746 return AMDGPU::V_ADDC_U32_e32; 3747 case AMDGPU::S_SUB_I32: 3748 return ST.hasAddNoCarry() ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_I32_e32; 3749 // FIXME: These are not consistently handled, and selected when the carry is 3750 // used. 3751 case AMDGPU::S_ADD_U32: 3752 return AMDGPU::V_ADD_I32_e32; 3753 case AMDGPU::S_SUB_U32: 3754 return AMDGPU::V_SUB_I32_e32; 3755 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32; 3756 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_U32; 3757 case AMDGPU::S_MUL_HI_U32: return AMDGPU::V_MUL_HI_U32; 3758 case AMDGPU::S_MUL_HI_I32: return AMDGPU::V_MUL_HI_I32; 3759 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64; 3760 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64; 3761 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64; 3762 case AMDGPU::S_XNOR_B32: 3763 return ST.hasDLInsts() ? AMDGPU::V_XNOR_B32_e64 : AMDGPU::INSTRUCTION_LIST_END; 3764 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64; 3765 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64; 3766 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64; 3767 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64; 3768 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32; 3769 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64; 3770 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32; 3771 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64; 3772 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32; 3773 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64; 3774 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32; 3775 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32; 3776 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32; 3777 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32; 3778 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64; 3779 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32; 3780 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32; 3781 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32; 3782 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32; 3783 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32; 3784 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32; 3785 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32; 3786 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32; 3787 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32; 3788 case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e32; 3789 case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e32; 3790 case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e32; 3791 case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e32; 3792 case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e32; 3793 case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e32; 3794 case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e32; 3795 case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e32; 3796 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64; 3797 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32; 3798 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32; 3799 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64; 3800 case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ; 3801 case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ; 3802 } 3803 llvm_unreachable( 3804 "Unexpected scalar opcode without corresponding vector one!"); 3805 } 3806 3807 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI, 3808 unsigned OpNo) const { 3809 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 3810 const MCInstrDesc &Desc = get(MI.getOpcode()); 3811 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() || 3812 Desc.OpInfo[OpNo].RegClass == -1) { 3813 Register Reg = MI.getOperand(OpNo).getReg(); 3814 3815 if (Register::isVirtualRegister(Reg)) 3816 return MRI.getRegClass(Reg); 3817 return RI.getPhysRegClass(Reg); 3818 } 3819 3820 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 3821 return RI.getRegClass(RCID); 3822 } 3823 3824 void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const { 3825 MachineBasicBlock::iterator I = MI; 3826 MachineBasicBlock *MBB = MI.getParent(); 3827 MachineOperand &MO = MI.getOperand(OpIdx); 3828 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 3829 const SIRegisterInfo *TRI = 3830 static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo()); 3831 unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass; 3832 const TargetRegisterClass *RC = RI.getRegClass(RCID); 3833 unsigned Size = TRI->getRegSizeInBits(*RC); 3834 unsigned Opcode = (Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO : AMDGPU::V_MOV_B32_e32; 3835 if (MO.isReg()) 3836 Opcode = AMDGPU::COPY; 3837 else if (RI.isSGPRClass(RC)) 3838 Opcode = (Size == 64) ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; 3839 3840 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); 3841 if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC)) 3842 VRC = &AMDGPU::VReg_64RegClass; 3843 else 3844 VRC = &AMDGPU::VGPR_32RegClass; 3845 3846 Register Reg = MRI.createVirtualRegister(VRC); 3847 DebugLoc DL = MBB->findDebugLoc(I); 3848 BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO); 3849 MO.ChangeToRegister(Reg, false); 3850 } 3851 3852 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI, 3853 MachineRegisterInfo &MRI, 3854 MachineOperand &SuperReg, 3855 const TargetRegisterClass *SuperRC, 3856 unsigned SubIdx, 3857 const TargetRegisterClass *SubRC) 3858 const { 3859 MachineBasicBlock *MBB = MI->getParent(); 3860 DebugLoc DL = MI->getDebugLoc(); 3861 Register SubReg = MRI.createVirtualRegister(SubRC); 3862 3863 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) { 3864 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 3865 .addReg(SuperReg.getReg(), 0, SubIdx); 3866 return SubReg; 3867 } 3868 3869 // Just in case the super register is itself a sub-register, copy it to a new 3870 // value so we don't need to worry about merging its subreg index with the 3871 // SubIdx passed to this function. The register coalescer should be able to 3872 // eliminate this extra copy. 3873 Register NewSuperReg = MRI.createVirtualRegister(SuperRC); 3874 3875 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg) 3876 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg()); 3877 3878 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 3879 .addReg(NewSuperReg, 0, SubIdx); 3880 3881 return SubReg; 3882 } 3883 3884 MachineOperand SIInstrInfo::buildExtractSubRegOrImm( 3885 MachineBasicBlock::iterator MII, 3886 MachineRegisterInfo &MRI, 3887 MachineOperand &Op, 3888 const TargetRegisterClass *SuperRC, 3889 unsigned SubIdx, 3890 const TargetRegisterClass *SubRC) const { 3891 if (Op.isImm()) { 3892 if (SubIdx == AMDGPU::sub0) 3893 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm())); 3894 if (SubIdx == AMDGPU::sub1) 3895 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32)); 3896 3897 llvm_unreachable("Unhandled register index for immediate"); 3898 } 3899 3900 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC, 3901 SubIdx, SubRC); 3902 return MachineOperand::CreateReg(SubReg, false); 3903 } 3904 3905 // Change the order of operands from (0, 1, 2) to (0, 2, 1) 3906 void SIInstrInfo::swapOperands(MachineInstr &Inst) const { 3907 assert(Inst.getNumExplicitOperands() == 3); 3908 MachineOperand Op1 = Inst.getOperand(1); 3909 Inst.RemoveOperand(1); 3910 Inst.addOperand(Op1); 3911 } 3912 3913 bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI, 3914 const MCOperandInfo &OpInfo, 3915 const MachineOperand &MO) const { 3916 if (!MO.isReg()) 3917 return false; 3918 3919 Register Reg = MO.getReg(); 3920 const TargetRegisterClass *RC = Register::isVirtualRegister(Reg) 3921 ? MRI.getRegClass(Reg) 3922 : RI.getPhysRegClass(Reg); 3923 3924 const TargetRegisterClass *DRC = RI.getRegClass(OpInfo.RegClass); 3925 if (MO.getSubReg()) { 3926 const MachineFunction *MF = MO.getParent()->getParent()->getParent(); 3927 const TargetRegisterClass *SuperRC = RI.getLargestLegalSuperClass(RC, *MF); 3928 if (!SuperRC) 3929 return false; 3930 3931 DRC = RI.getMatchingSuperRegClass(SuperRC, DRC, MO.getSubReg()); 3932 if (!DRC) 3933 return false; 3934 } 3935 return RC->hasSuperClassEq(DRC); 3936 } 3937 3938 bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI, 3939 const MCOperandInfo &OpInfo, 3940 const MachineOperand &MO) const { 3941 if (MO.isReg()) 3942 return isLegalRegOperand(MRI, OpInfo, MO); 3943 3944 // Handle non-register types that are treated like immediates. 3945 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()); 3946 return true; 3947 } 3948 3949 bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx, 3950 const MachineOperand *MO) const { 3951 const MachineFunction &MF = *MI.getParent()->getParent(); 3952 const MachineRegisterInfo &MRI = MF.getRegInfo(); 3953 const MCInstrDesc &InstDesc = MI.getDesc(); 3954 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; 3955 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 3956 const TargetRegisterClass *DefinedRC = 3957 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; 3958 if (!MO) 3959 MO = &MI.getOperand(OpIdx); 3960 3961 int ConstantBusLimit = ST.getConstantBusLimit(MI.getOpcode()); 3962 int VOP3LiteralLimit = ST.hasVOP3Literal() ? 1 : 0; 3963 if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) { 3964 if (isVOP3(MI) && isLiteralConstantLike(*MO, OpInfo) && !VOP3LiteralLimit--) 3965 return false; 3966 3967 SmallDenseSet<RegSubRegPair> SGPRsUsed; 3968 if (MO->isReg()) 3969 SGPRsUsed.insert(RegSubRegPair(MO->getReg(), MO->getSubReg())); 3970 3971 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 3972 if (i == OpIdx) 3973 continue; 3974 const MachineOperand &Op = MI.getOperand(i); 3975 if (Op.isReg()) { 3976 RegSubRegPair SGPR(Op.getReg(), Op.getSubReg()); 3977 if (!SGPRsUsed.count(SGPR) && 3978 usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) { 3979 if (--ConstantBusLimit <= 0) 3980 return false; 3981 SGPRsUsed.insert(SGPR); 3982 } 3983 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) { 3984 if (--ConstantBusLimit <= 0) 3985 return false; 3986 } else if (isVOP3(MI) && AMDGPU::isSISrcOperand(InstDesc, i) && 3987 isLiteralConstantLike(Op, InstDesc.OpInfo[i])) { 3988 if (!VOP3LiteralLimit--) 3989 return false; 3990 if (--ConstantBusLimit <= 0) 3991 return false; 3992 } 3993 } 3994 } 3995 3996 if (MO->isReg()) { 3997 assert(DefinedRC); 3998 return isLegalRegOperand(MRI, OpInfo, *MO); 3999 } 4000 4001 // Handle non-register types that are treated like immediates. 4002 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI() || MO->isGlobal()); 4003 4004 if (!DefinedRC) { 4005 // This operand expects an immediate. 4006 return true; 4007 } 4008 4009 return isImmOperandLegal(MI, OpIdx, *MO); 4010 } 4011 4012 void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI, 4013 MachineInstr &MI) const { 4014 unsigned Opc = MI.getOpcode(); 4015 const MCInstrDesc &InstrDesc = get(Opc); 4016 4017 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 4018 MachineOperand &Src0 = MI.getOperand(Src0Idx); 4019 4020 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 4021 MachineOperand &Src1 = MI.getOperand(Src1Idx); 4022 4023 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32 4024 // we need to only have one constant bus use before GFX10. 4025 bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister; 4026 if (HasImplicitSGPR && ST.getConstantBusLimit(Opc) <= 1 && 4027 Src0.isReg() && (RI.isSGPRReg(MRI, Src0.getReg()) || 4028 isLiteralConstantLike(Src0, InstrDesc.OpInfo[Src0Idx]))) 4029 legalizeOpWithMove(MI, Src0Idx); 4030 4031 // Special case: V_WRITELANE_B32 accepts only immediate or SGPR operands for 4032 // both the value to write (src0) and lane select (src1). Fix up non-SGPR 4033 // src0/src1 with V_READFIRSTLANE. 4034 if (Opc == AMDGPU::V_WRITELANE_B32) { 4035 const DebugLoc &DL = MI.getDebugLoc(); 4036 if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) { 4037 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4038 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4039 .add(Src0); 4040 Src0.ChangeToRegister(Reg, false); 4041 } 4042 if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) { 4043 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4044 const DebugLoc &DL = MI.getDebugLoc(); 4045 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4046 .add(Src1); 4047 Src1.ChangeToRegister(Reg, false); 4048 } 4049 return; 4050 } 4051 4052 // No VOP2 instructions support AGPRs. 4053 if (Src0.isReg() && RI.isAGPR(MRI, Src0.getReg())) 4054 legalizeOpWithMove(MI, Src0Idx); 4055 4056 if (Src1.isReg() && RI.isAGPR(MRI, Src1.getReg())) 4057 legalizeOpWithMove(MI, Src1Idx); 4058 4059 // VOP2 src0 instructions support all operand types, so we don't need to check 4060 // their legality. If src1 is already legal, we don't need to do anything. 4061 if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1)) 4062 return; 4063 4064 // Special case: V_READLANE_B32 accepts only immediate or SGPR operands for 4065 // lane select. Fix up using V_READFIRSTLANE, since we assume that the lane 4066 // select is uniform. 4067 if (Opc == AMDGPU::V_READLANE_B32 && Src1.isReg() && 4068 RI.isVGPR(MRI, Src1.getReg())) { 4069 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4070 const DebugLoc &DL = MI.getDebugLoc(); 4071 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4072 .add(Src1); 4073 Src1.ChangeToRegister(Reg, false); 4074 return; 4075 } 4076 4077 // We do not use commuteInstruction here because it is too aggressive and will 4078 // commute if it is possible. We only want to commute here if it improves 4079 // legality. This can be called a fairly large number of times so don't waste 4080 // compile time pointlessly swapping and checking legality again. 4081 if (HasImplicitSGPR || !MI.isCommutable()) { 4082 legalizeOpWithMove(MI, Src1Idx); 4083 return; 4084 } 4085 4086 // If src0 can be used as src1, commuting will make the operands legal. 4087 // Otherwise we have to give up and insert a move. 4088 // 4089 // TODO: Other immediate-like operand kinds could be commuted if there was a 4090 // MachineOperand::ChangeTo* for them. 4091 if ((!Src1.isImm() && !Src1.isReg()) || 4092 !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) { 4093 legalizeOpWithMove(MI, Src1Idx); 4094 return; 4095 } 4096 4097 int CommutedOpc = commuteOpcode(MI); 4098 if (CommutedOpc == -1) { 4099 legalizeOpWithMove(MI, Src1Idx); 4100 return; 4101 } 4102 4103 MI.setDesc(get(CommutedOpc)); 4104 4105 Register Src0Reg = Src0.getReg(); 4106 unsigned Src0SubReg = Src0.getSubReg(); 4107 bool Src0Kill = Src0.isKill(); 4108 4109 if (Src1.isImm()) 4110 Src0.ChangeToImmediate(Src1.getImm()); 4111 else if (Src1.isReg()) { 4112 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill()); 4113 Src0.setSubReg(Src1.getSubReg()); 4114 } else 4115 llvm_unreachable("Should only have register or immediate operands"); 4116 4117 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill); 4118 Src1.setSubReg(Src0SubReg); 4119 fixImplicitOperands(MI); 4120 } 4121 4122 // Legalize VOP3 operands. All operand types are supported for any operand 4123 // but only one literal constant and only starting from GFX10. 4124 void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI, 4125 MachineInstr &MI) const { 4126 unsigned Opc = MI.getOpcode(); 4127 4128 int VOP3Idx[3] = { 4129 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), 4130 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1), 4131 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2) 4132 }; 4133 4134 if (Opc == AMDGPU::V_PERMLANE16_B32 || 4135 Opc == AMDGPU::V_PERMLANEX16_B32) { 4136 // src1 and src2 must be scalar 4137 MachineOperand &Src1 = MI.getOperand(VOP3Idx[1]); 4138 MachineOperand &Src2 = MI.getOperand(VOP3Idx[2]); 4139 const DebugLoc &DL = MI.getDebugLoc(); 4140 if (Src1.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) { 4141 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4142 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4143 .add(Src1); 4144 Src1.ChangeToRegister(Reg, false); 4145 } 4146 if (Src2.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src2.getReg()))) { 4147 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4148 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4149 .add(Src2); 4150 Src2.ChangeToRegister(Reg, false); 4151 } 4152 } 4153 4154 // Find the one SGPR operand we are allowed to use. 4155 int ConstantBusLimit = ST.getConstantBusLimit(Opc); 4156 int LiteralLimit = ST.hasVOP3Literal() ? 1 : 0; 4157 SmallDenseSet<unsigned> SGPRsUsed; 4158 unsigned SGPRReg = findUsedSGPR(MI, VOP3Idx); 4159 if (SGPRReg != AMDGPU::NoRegister) { 4160 SGPRsUsed.insert(SGPRReg); 4161 --ConstantBusLimit; 4162 } 4163 4164 for (unsigned i = 0; i < 3; ++i) { 4165 int Idx = VOP3Idx[i]; 4166 if (Idx == -1) 4167 break; 4168 MachineOperand &MO = MI.getOperand(Idx); 4169 4170 if (!MO.isReg()) { 4171 if (!isLiteralConstantLike(MO, get(Opc).OpInfo[Idx])) 4172 continue; 4173 4174 if (LiteralLimit > 0 && ConstantBusLimit > 0) { 4175 --LiteralLimit; 4176 --ConstantBusLimit; 4177 continue; 4178 } 4179 4180 --LiteralLimit; 4181 --ConstantBusLimit; 4182 legalizeOpWithMove(MI, Idx); 4183 continue; 4184 } 4185 4186 if (RI.hasAGPRs(MRI.getRegClass(MO.getReg())) && 4187 !isOperandLegal(MI, Idx, &MO)) { 4188 legalizeOpWithMove(MI, Idx); 4189 continue; 4190 } 4191 4192 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg()))) 4193 continue; // VGPRs are legal 4194 4195 // We can use one SGPR in each VOP3 instruction prior to GFX10 4196 // and two starting from GFX10. 4197 if (SGPRsUsed.count(MO.getReg())) 4198 continue; 4199 if (ConstantBusLimit > 0) { 4200 SGPRsUsed.insert(MO.getReg()); 4201 --ConstantBusLimit; 4202 continue; 4203 } 4204 4205 // If we make it this far, then the operand is not legal and we must 4206 // legalize it. 4207 legalizeOpWithMove(MI, Idx); 4208 } 4209 } 4210 4211 unsigned SIInstrInfo::readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI, 4212 MachineRegisterInfo &MRI) const { 4213 const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg); 4214 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC); 4215 Register DstReg = MRI.createVirtualRegister(SRC); 4216 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32; 4217 4218 if (RI.hasAGPRs(VRC)) { 4219 VRC = RI.getEquivalentVGPRClass(VRC); 4220 Register NewSrcReg = MRI.createVirtualRegister(VRC); 4221 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4222 get(TargetOpcode::COPY), NewSrcReg) 4223 .addReg(SrcReg); 4224 SrcReg = NewSrcReg; 4225 } 4226 4227 if (SubRegs == 1) { 4228 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4229 get(AMDGPU::V_READFIRSTLANE_B32), DstReg) 4230 .addReg(SrcReg); 4231 return DstReg; 4232 } 4233 4234 SmallVector<unsigned, 8> SRegs; 4235 for (unsigned i = 0; i < SubRegs; ++i) { 4236 Register SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4237 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4238 get(AMDGPU::V_READFIRSTLANE_B32), SGPR) 4239 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i)); 4240 SRegs.push_back(SGPR); 4241 } 4242 4243 MachineInstrBuilder MIB = 4244 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4245 get(AMDGPU::REG_SEQUENCE), DstReg); 4246 for (unsigned i = 0; i < SubRegs; ++i) { 4247 MIB.addReg(SRegs[i]); 4248 MIB.addImm(RI.getSubRegFromChannel(i)); 4249 } 4250 return DstReg; 4251 } 4252 4253 void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI, 4254 MachineInstr &MI) const { 4255 4256 // If the pointer is store in VGPRs, then we need to move them to 4257 // SGPRs using v_readfirstlane. This is safe because we only select 4258 // loads with uniform pointers to SMRD instruction so we know the 4259 // pointer value is uniform. 4260 MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase); 4261 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) { 4262 unsigned SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI); 4263 SBase->setReg(SGPR); 4264 } 4265 MachineOperand *SOff = getNamedOperand(MI, AMDGPU::OpName::soff); 4266 if (SOff && !RI.isSGPRClass(MRI.getRegClass(SOff->getReg()))) { 4267 unsigned SGPR = readlaneVGPRToSGPR(SOff->getReg(), MI, MRI); 4268 SOff->setReg(SGPR); 4269 } 4270 } 4271 4272 void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB, 4273 MachineBasicBlock::iterator I, 4274 const TargetRegisterClass *DstRC, 4275 MachineOperand &Op, 4276 MachineRegisterInfo &MRI, 4277 const DebugLoc &DL) const { 4278 Register OpReg = Op.getReg(); 4279 unsigned OpSubReg = Op.getSubReg(); 4280 4281 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg( 4282 RI.getRegClassForReg(MRI, OpReg), OpSubReg); 4283 4284 // Check if operand is already the correct register class. 4285 if (DstRC == OpRC) 4286 return; 4287 4288 Register DstReg = MRI.createVirtualRegister(DstRC); 4289 MachineInstr *Copy = 4290 BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op); 4291 4292 Op.setReg(DstReg); 4293 Op.setSubReg(0); 4294 4295 MachineInstr *Def = MRI.getVRegDef(OpReg); 4296 if (!Def) 4297 return; 4298 4299 // Try to eliminate the copy if it is copying an immediate value. 4300 if (Def->isMoveImmediate() && DstRC != &AMDGPU::VReg_1RegClass) 4301 FoldImmediate(*Copy, *Def, OpReg, &MRI); 4302 4303 bool ImpDef = Def->isImplicitDef(); 4304 while (!ImpDef && Def && Def->isCopy()) { 4305 if (Def->getOperand(1).getReg().isPhysical()) 4306 break; 4307 Def = MRI.getUniqueVRegDef(Def->getOperand(1).getReg()); 4308 ImpDef = Def && Def->isImplicitDef(); 4309 } 4310 if (!RI.isSGPRClass(DstRC) && !Copy->readsRegister(AMDGPU::EXEC, &RI) && 4311 !ImpDef) 4312 Copy->addOperand(MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 4313 } 4314 4315 // Emit the actual waterfall loop, executing the wrapped instruction for each 4316 // unique value of \p Rsrc across all lanes. In the best case we execute 1 4317 // iteration, in the worst case we execute 64 (once per lane). 4318 static void 4319 emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI, 4320 MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB, 4321 const DebugLoc &DL, MachineOperand &Rsrc) { 4322 MachineFunction &MF = *OrigBB.getParent(); 4323 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 4324 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 4325 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 4326 unsigned SaveExecOpc = 4327 ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 : AMDGPU::S_AND_SAVEEXEC_B64; 4328 unsigned XorTermOpc = 4329 ST.isWave32() ? AMDGPU::S_XOR_B32_term : AMDGPU::S_XOR_B64_term; 4330 unsigned AndOpc = 4331 ST.isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64; 4332 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 4333 4334 MachineBasicBlock::iterator I = LoopBB.begin(); 4335 4336 Register VRsrc = Rsrc.getReg(); 4337 unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef()); 4338 4339 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); 4340 Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC); 4341 Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC); 4342 Register AndCond = MRI.createVirtualRegister(BoolXExecRC); 4343 Register SRsrcSub0 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4344 Register SRsrcSub1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4345 Register SRsrcSub2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4346 Register SRsrcSub3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4347 Register SRsrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass); 4348 4349 // Beginning of the loop, read the next Rsrc variant. 4350 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub0) 4351 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub0); 4352 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub1) 4353 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub1); 4354 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub2) 4355 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub2); 4356 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub3) 4357 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub3); 4358 4359 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), SRsrc) 4360 .addReg(SRsrcSub0) 4361 .addImm(AMDGPU::sub0) 4362 .addReg(SRsrcSub1) 4363 .addImm(AMDGPU::sub1) 4364 .addReg(SRsrcSub2) 4365 .addImm(AMDGPU::sub2) 4366 .addReg(SRsrcSub3) 4367 .addImm(AMDGPU::sub3); 4368 4369 // Update Rsrc operand to use the SGPR Rsrc. 4370 Rsrc.setReg(SRsrc); 4371 Rsrc.setIsKill(true); 4372 4373 // Identify all lanes with identical Rsrc operands in their VGPRs. 4374 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), CondReg0) 4375 .addReg(SRsrc, 0, AMDGPU::sub0_sub1) 4376 .addReg(VRsrc, 0, AMDGPU::sub0_sub1); 4377 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), CondReg1) 4378 .addReg(SRsrc, 0, AMDGPU::sub2_sub3) 4379 .addReg(VRsrc, 0, AMDGPU::sub2_sub3); 4380 BuildMI(LoopBB, I, DL, TII.get(AndOpc), AndCond) 4381 .addReg(CondReg0) 4382 .addReg(CondReg1); 4383 4384 MRI.setSimpleHint(SaveExec, AndCond); 4385 4386 // Update EXEC to matching lanes, saving original to SaveExec. 4387 BuildMI(LoopBB, I, DL, TII.get(SaveExecOpc), SaveExec) 4388 .addReg(AndCond, RegState::Kill); 4389 4390 // The original instruction is here; we insert the terminators after it. 4391 I = LoopBB.end(); 4392 4393 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 4394 BuildMI(LoopBB, I, DL, TII.get(XorTermOpc), Exec) 4395 .addReg(Exec) 4396 .addReg(SaveExec); 4397 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_CBRANCH_EXECNZ)).addMBB(&LoopBB); 4398 } 4399 4400 // Build a waterfall loop around \p MI, replacing the VGPR \p Rsrc register 4401 // with SGPRs by iterating over all unique values across all lanes. 4402 static void loadSRsrcFromVGPR(const SIInstrInfo &TII, MachineInstr &MI, 4403 MachineOperand &Rsrc, MachineDominatorTree *MDT) { 4404 MachineBasicBlock &MBB = *MI.getParent(); 4405 MachineFunction &MF = *MBB.getParent(); 4406 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 4407 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 4408 MachineRegisterInfo &MRI = MF.getRegInfo(); 4409 MachineBasicBlock::iterator I(&MI); 4410 const DebugLoc &DL = MI.getDebugLoc(); 4411 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 4412 unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 4413 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 4414 4415 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); 4416 4417 // Save the EXEC mask 4418 BuildMI(MBB, I, DL, TII.get(MovExecOpc), SaveExec).addReg(Exec); 4419 4420 // Killed uses in the instruction we are waterfalling around will be 4421 // incorrect due to the added control-flow. 4422 for (auto &MO : MI.uses()) { 4423 if (MO.isReg() && MO.isUse()) { 4424 MRI.clearKillFlags(MO.getReg()); 4425 } 4426 } 4427 4428 // To insert the loop we need to split the block. Move everything after this 4429 // point to a new block, and insert a new empty block between the two. 4430 MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock(); 4431 MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock(); 4432 MachineFunction::iterator MBBI(MBB); 4433 ++MBBI; 4434 4435 MF.insert(MBBI, LoopBB); 4436 MF.insert(MBBI, RemainderBB); 4437 4438 LoopBB->addSuccessor(LoopBB); 4439 LoopBB->addSuccessor(RemainderBB); 4440 4441 // Move MI to the LoopBB, and the remainder of the block to RemainderBB. 4442 MachineBasicBlock::iterator J = I++; 4443 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 4444 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); 4445 LoopBB->splice(LoopBB->begin(), &MBB, J); 4446 4447 MBB.addSuccessor(LoopBB); 4448 4449 // Update dominators. We know that MBB immediately dominates LoopBB, that 4450 // LoopBB immediately dominates RemainderBB, and that RemainderBB immediately 4451 // dominates all of the successors transferred to it from MBB that MBB used 4452 // to properly dominate. 4453 if (MDT) { 4454 MDT->addNewBlock(LoopBB, &MBB); 4455 MDT->addNewBlock(RemainderBB, LoopBB); 4456 for (auto &Succ : RemainderBB->successors()) { 4457 if (MDT->properlyDominates(&MBB, Succ)) { 4458 MDT->changeImmediateDominator(Succ, RemainderBB); 4459 } 4460 } 4461 } 4462 4463 emitLoadSRsrcFromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, Rsrc); 4464 4465 // Restore the EXEC mask 4466 MachineBasicBlock::iterator First = RemainderBB->begin(); 4467 BuildMI(*RemainderBB, First, DL, TII.get(MovExecOpc), Exec).addReg(SaveExec); 4468 } 4469 4470 // Extract pointer from Rsrc and return a zero-value Rsrc replacement. 4471 static std::tuple<unsigned, unsigned> 4472 extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc) { 4473 MachineBasicBlock &MBB = *MI.getParent(); 4474 MachineFunction &MF = *MBB.getParent(); 4475 MachineRegisterInfo &MRI = MF.getRegInfo(); 4476 4477 // Extract the ptr from the resource descriptor. 4478 unsigned RsrcPtr = 4479 TII.buildExtractSubReg(MI, MRI, Rsrc, &AMDGPU::VReg_128RegClass, 4480 AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass); 4481 4482 // Create an empty resource descriptor 4483 Register Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 4484 Register SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4485 Register SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4486 Register NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass); 4487 uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat(); 4488 4489 // Zero64 = 0 4490 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B64), Zero64) 4491 .addImm(0); 4492 4493 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0} 4494 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatLo) 4495 .addImm(RsrcDataFormat & 0xFFFFFFFF); 4496 4497 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32} 4498 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatHi) 4499 .addImm(RsrcDataFormat >> 32); 4500 4501 // NewSRsrc = {Zero64, SRsrcFormat} 4502 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::REG_SEQUENCE), NewSRsrc) 4503 .addReg(Zero64) 4504 .addImm(AMDGPU::sub0_sub1) 4505 .addReg(SRsrcFormatLo) 4506 .addImm(AMDGPU::sub2) 4507 .addReg(SRsrcFormatHi) 4508 .addImm(AMDGPU::sub3); 4509 4510 return std::make_tuple(RsrcPtr, NewSRsrc); 4511 } 4512 4513 void SIInstrInfo::legalizeOperands(MachineInstr &MI, 4514 MachineDominatorTree *MDT) const { 4515 MachineFunction &MF = *MI.getParent()->getParent(); 4516 MachineRegisterInfo &MRI = MF.getRegInfo(); 4517 4518 // Legalize VOP2 4519 if (isVOP2(MI) || isVOPC(MI)) { 4520 legalizeOperandsVOP2(MRI, MI); 4521 return; 4522 } 4523 4524 // Legalize VOP3 4525 if (isVOP3(MI)) { 4526 legalizeOperandsVOP3(MRI, MI); 4527 return; 4528 } 4529 4530 // Legalize SMRD 4531 if (isSMRD(MI)) { 4532 legalizeOperandsSMRD(MRI, MI); 4533 return; 4534 } 4535 4536 // Legalize REG_SEQUENCE and PHI 4537 // The register class of the operands much be the same type as the register 4538 // class of the output. 4539 if (MI.getOpcode() == AMDGPU::PHI) { 4540 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr; 4541 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) { 4542 if (!MI.getOperand(i).isReg() || 4543 !Register::isVirtualRegister(MI.getOperand(i).getReg())) 4544 continue; 4545 const TargetRegisterClass *OpRC = 4546 MRI.getRegClass(MI.getOperand(i).getReg()); 4547 if (RI.hasVectorRegisters(OpRC)) { 4548 VRC = OpRC; 4549 } else { 4550 SRC = OpRC; 4551 } 4552 } 4553 4554 // If any of the operands are VGPR registers, then they all most be 4555 // otherwise we will create illegal VGPR->SGPR copies when legalizing 4556 // them. 4557 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) { 4558 if (!VRC) { 4559 assert(SRC); 4560 if (getOpRegClass(MI, 0) == &AMDGPU::VReg_1RegClass) { 4561 VRC = &AMDGPU::VReg_1RegClass; 4562 } else 4563 VRC = RI.hasAGPRs(getOpRegClass(MI, 0)) 4564 ? RI.getEquivalentAGPRClass(SRC) 4565 : RI.getEquivalentVGPRClass(SRC); 4566 } else { 4567 VRC = RI.hasAGPRs(getOpRegClass(MI, 0)) 4568 ? RI.getEquivalentAGPRClass(VRC) 4569 : RI.getEquivalentVGPRClass(VRC); 4570 } 4571 RC = VRC; 4572 } else { 4573 RC = SRC; 4574 } 4575 4576 // Update all the operands so they have the same type. 4577 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 4578 MachineOperand &Op = MI.getOperand(I); 4579 if (!Op.isReg() || !Register::isVirtualRegister(Op.getReg())) 4580 continue; 4581 4582 // MI is a PHI instruction. 4583 MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB(); 4584 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator(); 4585 4586 // Avoid creating no-op copies with the same src and dst reg class. These 4587 // confuse some of the machine passes. 4588 legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc()); 4589 } 4590 } 4591 4592 // REG_SEQUENCE doesn't really require operand legalization, but if one has a 4593 // VGPR dest type and SGPR sources, insert copies so all operands are 4594 // VGPRs. This seems to help operand folding / the register coalescer. 4595 if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) { 4596 MachineBasicBlock *MBB = MI.getParent(); 4597 const TargetRegisterClass *DstRC = getOpRegClass(MI, 0); 4598 if (RI.hasVGPRs(DstRC)) { 4599 // Update all the operands so they are VGPR register classes. These may 4600 // not be the same register class because REG_SEQUENCE supports mixing 4601 // subregister index types e.g. sub0_sub1 + sub2 + sub3 4602 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 4603 MachineOperand &Op = MI.getOperand(I); 4604 if (!Op.isReg() || !Register::isVirtualRegister(Op.getReg())) 4605 continue; 4606 4607 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg()); 4608 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); 4609 if (VRC == OpRC) 4610 continue; 4611 4612 legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc()); 4613 Op.setIsKill(); 4614 } 4615 } 4616 4617 return; 4618 } 4619 4620 // Legalize INSERT_SUBREG 4621 // src0 must have the same register class as dst 4622 if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) { 4623 Register Dst = MI.getOperand(0).getReg(); 4624 Register Src0 = MI.getOperand(1).getReg(); 4625 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst); 4626 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0); 4627 if (DstRC != Src0RC) { 4628 MachineBasicBlock *MBB = MI.getParent(); 4629 MachineOperand &Op = MI.getOperand(1); 4630 legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc()); 4631 } 4632 return; 4633 } 4634 4635 // Legalize SI_INIT_M0 4636 if (MI.getOpcode() == AMDGPU::SI_INIT_M0) { 4637 MachineOperand &Src = MI.getOperand(0); 4638 if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg()))) 4639 Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI)); 4640 return; 4641 } 4642 4643 // Legalize MIMG and MUBUF/MTBUF for shaders. 4644 // 4645 // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via 4646 // scratch memory access. In both cases, the legalization never involves 4647 // conversion to the addr64 form. 4648 if (isMIMG(MI) || 4649 (AMDGPU::isShader(MF.getFunction().getCallingConv()) && 4650 (isMUBUF(MI) || isMTBUF(MI)))) { 4651 MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc); 4652 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) { 4653 unsigned SGPR = readlaneVGPRToSGPR(SRsrc->getReg(), MI, MRI); 4654 SRsrc->setReg(SGPR); 4655 } 4656 4657 MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp); 4658 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) { 4659 unsigned SGPR = readlaneVGPRToSGPR(SSamp->getReg(), MI, MRI); 4660 SSamp->setReg(SGPR); 4661 } 4662 return; 4663 } 4664 4665 // Legalize MUBUF* instructions. 4666 int RsrcIdx = 4667 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc); 4668 if (RsrcIdx != -1) { 4669 // We have an MUBUF instruction 4670 MachineOperand *Rsrc = &MI.getOperand(RsrcIdx); 4671 unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass; 4672 if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()), 4673 RI.getRegClass(RsrcRC))) { 4674 // The operands are legal. 4675 // FIXME: We may need to legalize operands besided srsrc. 4676 return; 4677 } 4678 4679 // Legalize a VGPR Rsrc. 4680 // 4681 // If the instruction is _ADDR64, we can avoid a waterfall by extracting 4682 // the base pointer from the VGPR Rsrc, adding it to the VAddr, then using 4683 // a zero-value SRsrc. 4684 // 4685 // If the instruction is _OFFSET (both idxen and offen disabled), and we 4686 // support ADDR64 instructions, we can convert to ADDR64 and do the same as 4687 // above. 4688 // 4689 // Otherwise we are on non-ADDR64 hardware, and/or we have 4690 // idxen/offen/bothen and we fall back to a waterfall loop. 4691 4692 MachineBasicBlock &MBB = *MI.getParent(); 4693 4694 MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 4695 if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) { 4696 // This is already an ADDR64 instruction so we need to add the pointer 4697 // extracted from the resource descriptor to the current value of VAddr. 4698 Register NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4699 Register NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4700 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 4701 4702 const auto *BoolXExecRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 4703 Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC); 4704 Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC); 4705 4706 unsigned RsrcPtr, NewSRsrc; 4707 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 4708 4709 // NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0 4710 const DebugLoc &DL = MI.getDebugLoc(); 4711 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e64), NewVAddrLo) 4712 .addDef(CondReg0) 4713 .addReg(RsrcPtr, 0, AMDGPU::sub0) 4714 .addReg(VAddr->getReg(), 0, AMDGPU::sub0) 4715 .addImm(0); 4716 4717 // NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1 4718 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e64), NewVAddrHi) 4719 .addDef(CondReg1, RegState::Dead) 4720 .addReg(RsrcPtr, 0, AMDGPU::sub1) 4721 .addReg(VAddr->getReg(), 0, AMDGPU::sub1) 4722 .addReg(CondReg0, RegState::Kill) 4723 .addImm(0); 4724 4725 // NewVaddr = {NewVaddrHi, NewVaddrLo} 4726 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) 4727 .addReg(NewVAddrLo) 4728 .addImm(AMDGPU::sub0) 4729 .addReg(NewVAddrHi) 4730 .addImm(AMDGPU::sub1); 4731 4732 VAddr->setReg(NewVAddr); 4733 Rsrc->setReg(NewSRsrc); 4734 } else if (!VAddr && ST.hasAddr64()) { 4735 // This instructions is the _OFFSET variant, so we need to convert it to 4736 // ADDR64. 4737 assert(MBB.getParent()->getSubtarget<GCNSubtarget>().getGeneration() 4738 < AMDGPUSubtarget::VOLCANIC_ISLANDS && 4739 "FIXME: Need to emit flat atomics here"); 4740 4741 unsigned RsrcPtr, NewSRsrc; 4742 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 4743 4744 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 4745 MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata); 4746 MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 4747 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset); 4748 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode()); 4749 4750 // Atomics rith return have have an additional tied operand and are 4751 // missing some of the special bits. 4752 MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in); 4753 MachineInstr *Addr64; 4754 4755 if (!VDataIn) { 4756 // Regular buffer load / store. 4757 MachineInstrBuilder MIB = 4758 BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 4759 .add(*VData) 4760 .addReg(NewVAddr) 4761 .addReg(NewSRsrc) 4762 .add(*SOffset) 4763 .add(*Offset); 4764 4765 // Atomics do not have this operand. 4766 if (const MachineOperand *GLC = 4767 getNamedOperand(MI, AMDGPU::OpName::glc)) { 4768 MIB.addImm(GLC->getImm()); 4769 } 4770 if (const MachineOperand *DLC = 4771 getNamedOperand(MI, AMDGPU::OpName::dlc)) { 4772 MIB.addImm(DLC->getImm()); 4773 } 4774 4775 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)); 4776 4777 if (const MachineOperand *TFE = 4778 getNamedOperand(MI, AMDGPU::OpName::tfe)) { 4779 MIB.addImm(TFE->getImm()); 4780 } 4781 4782 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::swz)); 4783 4784 MIB.cloneMemRefs(MI); 4785 Addr64 = MIB; 4786 } else { 4787 // Atomics with return. 4788 Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 4789 .add(*VData) 4790 .add(*VDataIn) 4791 .addReg(NewVAddr) 4792 .addReg(NewSRsrc) 4793 .add(*SOffset) 4794 .add(*Offset) 4795 .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)) 4796 .cloneMemRefs(MI); 4797 } 4798 4799 MI.removeFromParent(); 4800 4801 // NewVaddr = {NewVaddrHi, NewVaddrLo} 4802 BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), 4803 NewVAddr) 4804 .addReg(RsrcPtr, 0, AMDGPU::sub0) 4805 .addImm(AMDGPU::sub0) 4806 .addReg(RsrcPtr, 0, AMDGPU::sub1) 4807 .addImm(AMDGPU::sub1); 4808 } else { 4809 // This is another variant; legalize Rsrc with waterfall loop from VGPRs 4810 // to SGPRs. 4811 loadSRsrcFromVGPR(*this, MI, *Rsrc, MDT); 4812 } 4813 } 4814 } 4815 4816 void SIInstrInfo::moveToVALU(MachineInstr &TopInst, 4817 MachineDominatorTree *MDT) const { 4818 SetVectorType Worklist; 4819 Worklist.insert(&TopInst); 4820 4821 while (!Worklist.empty()) { 4822 MachineInstr &Inst = *Worklist.pop_back_val(); 4823 MachineBasicBlock *MBB = Inst.getParent(); 4824 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 4825 4826 unsigned Opcode = Inst.getOpcode(); 4827 unsigned NewOpcode = getVALUOp(Inst); 4828 4829 // Handle some special cases 4830 switch (Opcode) { 4831 default: 4832 break; 4833 case AMDGPU::S_ADD_U64_PSEUDO: 4834 case AMDGPU::S_SUB_U64_PSEUDO: 4835 splitScalar64BitAddSub(Worklist, Inst, MDT); 4836 Inst.eraseFromParent(); 4837 continue; 4838 case AMDGPU::S_ADD_I32: 4839 case AMDGPU::S_SUB_I32: 4840 // FIXME: The u32 versions currently selected use the carry. 4841 if (moveScalarAddSub(Worklist, Inst, MDT)) 4842 continue; 4843 4844 // Default handling 4845 break; 4846 case AMDGPU::S_AND_B64: 4847 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32, MDT); 4848 Inst.eraseFromParent(); 4849 continue; 4850 4851 case AMDGPU::S_OR_B64: 4852 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32, MDT); 4853 Inst.eraseFromParent(); 4854 continue; 4855 4856 case AMDGPU::S_XOR_B64: 4857 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32, MDT); 4858 Inst.eraseFromParent(); 4859 continue; 4860 4861 case AMDGPU::S_NAND_B64: 4862 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NAND_B32, MDT); 4863 Inst.eraseFromParent(); 4864 continue; 4865 4866 case AMDGPU::S_NOR_B64: 4867 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NOR_B32, MDT); 4868 Inst.eraseFromParent(); 4869 continue; 4870 4871 case AMDGPU::S_XNOR_B64: 4872 if (ST.hasDLInsts()) 4873 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT); 4874 else 4875 splitScalar64BitXnor(Worklist, Inst, MDT); 4876 Inst.eraseFromParent(); 4877 continue; 4878 4879 case AMDGPU::S_ANDN2_B64: 4880 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ANDN2_B32, MDT); 4881 Inst.eraseFromParent(); 4882 continue; 4883 4884 case AMDGPU::S_ORN2_B64: 4885 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ORN2_B32, MDT); 4886 Inst.eraseFromParent(); 4887 continue; 4888 4889 case AMDGPU::S_NOT_B64: 4890 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32); 4891 Inst.eraseFromParent(); 4892 continue; 4893 4894 case AMDGPU::S_BCNT1_I32_B64: 4895 splitScalar64BitBCNT(Worklist, Inst); 4896 Inst.eraseFromParent(); 4897 continue; 4898 4899 case AMDGPU::S_BFE_I64: 4900 splitScalar64BitBFE(Worklist, Inst); 4901 Inst.eraseFromParent(); 4902 continue; 4903 4904 case AMDGPU::S_LSHL_B32: 4905 if (ST.hasOnlyRevVALUShifts()) { 4906 NewOpcode = AMDGPU::V_LSHLREV_B32_e64; 4907 swapOperands(Inst); 4908 } 4909 break; 4910 case AMDGPU::S_ASHR_I32: 4911 if (ST.hasOnlyRevVALUShifts()) { 4912 NewOpcode = AMDGPU::V_ASHRREV_I32_e64; 4913 swapOperands(Inst); 4914 } 4915 break; 4916 case AMDGPU::S_LSHR_B32: 4917 if (ST.hasOnlyRevVALUShifts()) { 4918 NewOpcode = AMDGPU::V_LSHRREV_B32_e64; 4919 swapOperands(Inst); 4920 } 4921 break; 4922 case AMDGPU::S_LSHL_B64: 4923 if (ST.hasOnlyRevVALUShifts()) { 4924 NewOpcode = AMDGPU::V_LSHLREV_B64; 4925 swapOperands(Inst); 4926 } 4927 break; 4928 case AMDGPU::S_ASHR_I64: 4929 if (ST.hasOnlyRevVALUShifts()) { 4930 NewOpcode = AMDGPU::V_ASHRREV_I64; 4931 swapOperands(Inst); 4932 } 4933 break; 4934 case AMDGPU::S_LSHR_B64: 4935 if (ST.hasOnlyRevVALUShifts()) { 4936 NewOpcode = AMDGPU::V_LSHRREV_B64; 4937 swapOperands(Inst); 4938 } 4939 break; 4940 4941 case AMDGPU::S_ABS_I32: 4942 lowerScalarAbs(Worklist, Inst); 4943 Inst.eraseFromParent(); 4944 continue; 4945 4946 case AMDGPU::S_CBRANCH_SCC0: 4947 case AMDGPU::S_CBRANCH_SCC1: 4948 // Clear unused bits of vcc 4949 if (ST.isWave32()) 4950 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B32), 4951 AMDGPU::VCC_LO) 4952 .addReg(AMDGPU::EXEC_LO) 4953 .addReg(AMDGPU::VCC_LO); 4954 else 4955 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64), 4956 AMDGPU::VCC) 4957 .addReg(AMDGPU::EXEC) 4958 .addReg(AMDGPU::VCC); 4959 break; 4960 4961 case AMDGPU::S_BFE_U64: 4962 case AMDGPU::S_BFM_B64: 4963 llvm_unreachable("Moving this op to VALU not implemented"); 4964 4965 case AMDGPU::S_PACK_LL_B32_B16: 4966 case AMDGPU::S_PACK_LH_B32_B16: 4967 case AMDGPU::S_PACK_HH_B32_B16: 4968 movePackToVALU(Worklist, MRI, Inst); 4969 Inst.eraseFromParent(); 4970 continue; 4971 4972 case AMDGPU::S_XNOR_B32: 4973 lowerScalarXnor(Worklist, Inst); 4974 Inst.eraseFromParent(); 4975 continue; 4976 4977 case AMDGPU::S_NAND_B32: 4978 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_AND_B32); 4979 Inst.eraseFromParent(); 4980 continue; 4981 4982 case AMDGPU::S_NOR_B32: 4983 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_OR_B32); 4984 Inst.eraseFromParent(); 4985 continue; 4986 4987 case AMDGPU::S_ANDN2_B32: 4988 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_AND_B32); 4989 Inst.eraseFromParent(); 4990 continue; 4991 4992 case AMDGPU::S_ORN2_B32: 4993 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_OR_B32); 4994 Inst.eraseFromParent(); 4995 continue; 4996 } 4997 4998 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) { 4999 // We cannot move this instruction to the VALU, so we should try to 5000 // legalize its operands instead. 5001 legalizeOperands(Inst, MDT); 5002 continue; 5003 } 5004 5005 // Use the new VALU Opcode. 5006 const MCInstrDesc &NewDesc = get(NewOpcode); 5007 Inst.setDesc(NewDesc); 5008 5009 // Remove any references to SCC. Vector instructions can't read from it, and 5010 // We're just about to add the implicit use / defs of VCC, and we don't want 5011 // both. 5012 for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) { 5013 MachineOperand &Op = Inst.getOperand(i); 5014 if (Op.isReg() && Op.getReg() == AMDGPU::SCC) { 5015 // Only propagate through live-def of SCC. 5016 if (Op.isDef() && !Op.isDead()) 5017 addSCCDefUsersToVALUWorklist(Op, Inst, Worklist); 5018 Inst.RemoveOperand(i); 5019 } 5020 } 5021 5022 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) { 5023 // We are converting these to a BFE, so we need to add the missing 5024 // operands for the size and offset. 5025 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16; 5026 Inst.addOperand(MachineOperand::CreateImm(0)); 5027 Inst.addOperand(MachineOperand::CreateImm(Size)); 5028 5029 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) { 5030 // The VALU version adds the second operand to the result, so insert an 5031 // extra 0 operand. 5032 Inst.addOperand(MachineOperand::CreateImm(0)); 5033 } 5034 5035 Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent()); 5036 fixImplicitOperands(Inst); 5037 5038 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) { 5039 const MachineOperand &OffsetWidthOp = Inst.getOperand(2); 5040 // If we need to move this to VGPRs, we need to unpack the second operand 5041 // back into the 2 separate ones for bit offset and width. 5042 assert(OffsetWidthOp.isImm() && 5043 "Scalar BFE is only implemented for constant width and offset"); 5044 uint32_t Imm = OffsetWidthOp.getImm(); 5045 5046 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 5047 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 5048 Inst.RemoveOperand(2); // Remove old immediate. 5049 Inst.addOperand(MachineOperand::CreateImm(Offset)); 5050 Inst.addOperand(MachineOperand::CreateImm(BitWidth)); 5051 } 5052 5053 bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef(); 5054 unsigned NewDstReg = AMDGPU::NoRegister; 5055 if (HasDst) { 5056 Register DstReg = Inst.getOperand(0).getReg(); 5057 if (Register::isPhysicalRegister(DstReg)) 5058 continue; 5059 5060 // Update the destination register class. 5061 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst); 5062 if (!NewDstRC) 5063 continue; 5064 5065 if (Inst.isCopy() && 5066 Register::isVirtualRegister(Inst.getOperand(1).getReg()) && 5067 NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) { 5068 // Instead of creating a copy where src and dst are the same register 5069 // class, we just replace all uses of dst with src. These kinds of 5070 // copies interfere with the heuristics MachineSink uses to decide 5071 // whether or not to split a critical edge. Since the pass assumes 5072 // that copies will end up as machine instructions and not be 5073 // eliminated. 5074 addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist); 5075 MRI.replaceRegWith(DstReg, Inst.getOperand(1).getReg()); 5076 MRI.clearKillFlags(Inst.getOperand(1).getReg()); 5077 Inst.getOperand(0).setReg(DstReg); 5078 5079 // Make sure we don't leave around a dead VGPR->SGPR copy. Normally 5080 // these are deleted later, but at -O0 it would leave a suspicious 5081 // looking illegal copy of an undef register. 5082 for (unsigned I = Inst.getNumOperands() - 1; I != 0; --I) 5083 Inst.RemoveOperand(I); 5084 Inst.setDesc(get(AMDGPU::IMPLICIT_DEF)); 5085 continue; 5086 } 5087 5088 NewDstReg = MRI.createVirtualRegister(NewDstRC); 5089 MRI.replaceRegWith(DstReg, NewDstReg); 5090 } 5091 5092 // Legalize the operands 5093 legalizeOperands(Inst, MDT); 5094 5095 if (HasDst) 5096 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); 5097 } 5098 } 5099 5100 // Add/sub require special handling to deal with carry outs. 5101 bool SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst, 5102 MachineDominatorTree *MDT) const { 5103 if (ST.hasAddNoCarry()) { 5104 // Assume there is no user of scc since we don't select this in that case. 5105 // Since scc isn't used, it doesn't really matter if the i32 or u32 variant 5106 // is used. 5107 5108 MachineBasicBlock &MBB = *Inst.getParent(); 5109 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5110 5111 Register OldDstReg = Inst.getOperand(0).getReg(); 5112 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5113 5114 unsigned Opc = Inst.getOpcode(); 5115 assert(Opc == AMDGPU::S_ADD_I32 || Opc == AMDGPU::S_SUB_I32); 5116 5117 unsigned NewOpc = Opc == AMDGPU::S_ADD_I32 ? 5118 AMDGPU::V_ADD_U32_e64 : AMDGPU::V_SUB_U32_e64; 5119 5120 assert(Inst.getOperand(3).getReg() == AMDGPU::SCC); 5121 Inst.RemoveOperand(3); 5122 5123 Inst.setDesc(get(NewOpc)); 5124 Inst.addOperand(MachineOperand::CreateImm(0)); // clamp bit 5125 Inst.addImplicitDefUseOperands(*MBB.getParent()); 5126 MRI.replaceRegWith(OldDstReg, ResultReg); 5127 legalizeOperands(Inst, MDT); 5128 5129 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5130 return true; 5131 } 5132 5133 return false; 5134 } 5135 5136 void SIInstrInfo::lowerScalarAbs(SetVectorType &Worklist, 5137 MachineInstr &Inst) const { 5138 MachineBasicBlock &MBB = *Inst.getParent(); 5139 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5140 MachineBasicBlock::iterator MII = Inst; 5141 DebugLoc DL = Inst.getDebugLoc(); 5142 5143 MachineOperand &Dest = Inst.getOperand(0); 5144 MachineOperand &Src = Inst.getOperand(1); 5145 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5146 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5147 5148 unsigned SubOp = ST.hasAddNoCarry() ? 5149 AMDGPU::V_SUB_U32_e32 : AMDGPU::V_SUB_I32_e32; 5150 5151 BuildMI(MBB, MII, DL, get(SubOp), TmpReg) 5152 .addImm(0) 5153 .addReg(Src.getReg()); 5154 5155 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg) 5156 .addReg(Src.getReg()) 5157 .addReg(TmpReg); 5158 5159 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5160 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5161 } 5162 5163 void SIInstrInfo::lowerScalarXnor(SetVectorType &Worklist, 5164 MachineInstr &Inst) const { 5165 MachineBasicBlock &MBB = *Inst.getParent(); 5166 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5167 MachineBasicBlock::iterator MII = Inst; 5168 const DebugLoc &DL = Inst.getDebugLoc(); 5169 5170 MachineOperand &Dest = Inst.getOperand(0); 5171 MachineOperand &Src0 = Inst.getOperand(1); 5172 MachineOperand &Src1 = Inst.getOperand(2); 5173 5174 if (ST.hasDLInsts()) { 5175 Register NewDest = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5176 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src0, MRI, DL); 5177 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src1, MRI, DL); 5178 5179 BuildMI(MBB, MII, DL, get(AMDGPU::V_XNOR_B32_e64), NewDest) 5180 .add(Src0) 5181 .add(Src1); 5182 5183 MRI.replaceRegWith(Dest.getReg(), NewDest); 5184 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5185 } else { 5186 // Using the identity !(x ^ y) == (!x ^ y) == (x ^ !y), we can 5187 // invert either source and then perform the XOR. If either source is a 5188 // scalar register, then we can leave the inversion on the scalar unit to 5189 // acheive a better distrubution of scalar and vector instructions. 5190 bool Src0IsSGPR = Src0.isReg() && 5191 RI.isSGPRClass(MRI.getRegClass(Src0.getReg())); 5192 bool Src1IsSGPR = Src1.isReg() && 5193 RI.isSGPRClass(MRI.getRegClass(Src1.getReg())); 5194 MachineInstr *Xor; 5195 Register Temp = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5196 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5197 5198 // Build a pair of scalar instructions and add them to the work list. 5199 // The next iteration over the work list will lower these to the vector 5200 // unit as necessary. 5201 if (Src0IsSGPR) { 5202 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src0); 5203 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 5204 .addReg(Temp) 5205 .add(Src1); 5206 } else if (Src1IsSGPR) { 5207 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src1); 5208 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 5209 .add(Src0) 5210 .addReg(Temp); 5211 } else { 5212 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), Temp) 5213 .add(Src0) 5214 .add(Src1); 5215 MachineInstr *Not = 5216 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest).addReg(Temp); 5217 Worklist.insert(Not); 5218 } 5219 5220 MRI.replaceRegWith(Dest.getReg(), NewDest); 5221 5222 Worklist.insert(Xor); 5223 5224 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5225 } 5226 } 5227 5228 void SIInstrInfo::splitScalarNotBinop(SetVectorType &Worklist, 5229 MachineInstr &Inst, 5230 unsigned Opcode) const { 5231 MachineBasicBlock &MBB = *Inst.getParent(); 5232 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5233 MachineBasicBlock::iterator MII = Inst; 5234 const DebugLoc &DL = Inst.getDebugLoc(); 5235 5236 MachineOperand &Dest = Inst.getOperand(0); 5237 MachineOperand &Src0 = Inst.getOperand(1); 5238 MachineOperand &Src1 = Inst.getOperand(2); 5239 5240 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5241 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5242 5243 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), Interm) 5244 .add(Src0) 5245 .add(Src1); 5246 5247 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest) 5248 .addReg(Interm); 5249 5250 Worklist.insert(&Op); 5251 Worklist.insert(&Not); 5252 5253 MRI.replaceRegWith(Dest.getReg(), NewDest); 5254 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5255 } 5256 5257 void SIInstrInfo::splitScalarBinOpN2(SetVectorType& Worklist, 5258 MachineInstr &Inst, 5259 unsigned Opcode) const { 5260 MachineBasicBlock &MBB = *Inst.getParent(); 5261 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5262 MachineBasicBlock::iterator MII = Inst; 5263 const DebugLoc &DL = Inst.getDebugLoc(); 5264 5265 MachineOperand &Dest = Inst.getOperand(0); 5266 MachineOperand &Src0 = Inst.getOperand(1); 5267 MachineOperand &Src1 = Inst.getOperand(2); 5268 5269 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5270 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5271 5272 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Interm) 5273 .add(Src1); 5274 5275 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), NewDest) 5276 .add(Src0) 5277 .addReg(Interm); 5278 5279 Worklist.insert(&Not); 5280 Worklist.insert(&Op); 5281 5282 MRI.replaceRegWith(Dest.getReg(), NewDest); 5283 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5284 } 5285 5286 void SIInstrInfo::splitScalar64BitUnaryOp( 5287 SetVectorType &Worklist, MachineInstr &Inst, 5288 unsigned Opcode) const { 5289 MachineBasicBlock &MBB = *Inst.getParent(); 5290 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5291 5292 MachineOperand &Dest = Inst.getOperand(0); 5293 MachineOperand &Src0 = Inst.getOperand(1); 5294 DebugLoc DL = Inst.getDebugLoc(); 5295 5296 MachineBasicBlock::iterator MII = Inst; 5297 5298 const MCInstrDesc &InstDesc = get(Opcode); 5299 const TargetRegisterClass *Src0RC = Src0.isReg() ? 5300 MRI.getRegClass(Src0.getReg()) : 5301 &AMDGPU::SGPR_32RegClass; 5302 5303 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 5304 5305 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5306 AMDGPU::sub0, Src0SubRC); 5307 5308 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 5309 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 5310 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 5311 5312 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 5313 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0); 5314 5315 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5316 AMDGPU::sub1, Src0SubRC); 5317 5318 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 5319 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1); 5320 5321 Register FullDestReg = MRI.createVirtualRegister(NewDestRC); 5322 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 5323 .addReg(DestSub0) 5324 .addImm(AMDGPU::sub0) 5325 .addReg(DestSub1) 5326 .addImm(AMDGPU::sub1); 5327 5328 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 5329 5330 Worklist.insert(&LoHalf); 5331 Worklist.insert(&HiHalf); 5332 5333 // We don't need to legalizeOperands here because for a single operand, src0 5334 // will support any kind of input. 5335 5336 // Move all users of this moved value. 5337 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 5338 } 5339 5340 void SIInstrInfo::splitScalar64BitAddSub(SetVectorType &Worklist, 5341 MachineInstr &Inst, 5342 MachineDominatorTree *MDT) const { 5343 bool IsAdd = (Inst.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); 5344 5345 MachineBasicBlock &MBB = *Inst.getParent(); 5346 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5347 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5348 5349 Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5350 Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5351 Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5352 5353 Register CarryReg = MRI.createVirtualRegister(CarryRC); 5354 Register DeadCarryReg = MRI.createVirtualRegister(CarryRC); 5355 5356 MachineOperand &Dest = Inst.getOperand(0); 5357 MachineOperand &Src0 = Inst.getOperand(1); 5358 MachineOperand &Src1 = Inst.getOperand(2); 5359 const DebugLoc &DL = Inst.getDebugLoc(); 5360 MachineBasicBlock::iterator MII = Inst; 5361 5362 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg()); 5363 const TargetRegisterClass *Src1RC = MRI.getRegClass(Src1.getReg()); 5364 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 5365 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 5366 5367 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5368 AMDGPU::sub0, Src0SubRC); 5369 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 5370 AMDGPU::sub0, Src1SubRC); 5371 5372 5373 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5374 AMDGPU::sub1, Src0SubRC); 5375 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 5376 AMDGPU::sub1, Src1SubRC); 5377 5378 unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_I32_e64 : AMDGPU::V_SUB_I32_e64; 5379 MachineInstr *LoHalf = 5380 BuildMI(MBB, MII, DL, get(LoOpc), DestSub0) 5381 .addReg(CarryReg, RegState::Define) 5382 .add(SrcReg0Sub0) 5383 .add(SrcReg1Sub0) 5384 .addImm(0); // clamp bit 5385 5386 unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; 5387 MachineInstr *HiHalf = 5388 BuildMI(MBB, MII, DL, get(HiOpc), DestSub1) 5389 .addReg(DeadCarryReg, RegState::Define | RegState::Dead) 5390 .add(SrcReg0Sub1) 5391 .add(SrcReg1Sub1) 5392 .addReg(CarryReg, RegState::Kill) 5393 .addImm(0); // clamp bit 5394 5395 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 5396 .addReg(DestSub0) 5397 .addImm(AMDGPU::sub0) 5398 .addReg(DestSub1) 5399 .addImm(AMDGPU::sub1); 5400 5401 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 5402 5403 // Try to legalize the operands in case we need to swap the order to keep it 5404 // valid. 5405 legalizeOperands(*LoHalf, MDT); 5406 legalizeOperands(*HiHalf, MDT); 5407 5408 // Move all users of this moved vlaue. 5409 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 5410 } 5411 5412 void SIInstrInfo::splitScalar64BitBinaryOp(SetVectorType &Worklist, 5413 MachineInstr &Inst, unsigned Opcode, 5414 MachineDominatorTree *MDT) const { 5415 MachineBasicBlock &MBB = *Inst.getParent(); 5416 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5417 5418 MachineOperand &Dest = Inst.getOperand(0); 5419 MachineOperand &Src0 = Inst.getOperand(1); 5420 MachineOperand &Src1 = Inst.getOperand(2); 5421 DebugLoc DL = Inst.getDebugLoc(); 5422 5423 MachineBasicBlock::iterator MII = Inst; 5424 5425 const MCInstrDesc &InstDesc = get(Opcode); 5426 const TargetRegisterClass *Src0RC = Src0.isReg() ? 5427 MRI.getRegClass(Src0.getReg()) : 5428 &AMDGPU::SGPR_32RegClass; 5429 5430 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 5431 const TargetRegisterClass *Src1RC = Src1.isReg() ? 5432 MRI.getRegClass(Src1.getReg()) : 5433 &AMDGPU::SGPR_32RegClass; 5434 5435 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 5436 5437 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5438 AMDGPU::sub0, Src0SubRC); 5439 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 5440 AMDGPU::sub0, Src1SubRC); 5441 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5442 AMDGPU::sub1, Src0SubRC); 5443 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 5444 AMDGPU::sub1, Src1SubRC); 5445 5446 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 5447 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 5448 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 5449 5450 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 5451 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0) 5452 .add(SrcReg0Sub0) 5453 .add(SrcReg1Sub0); 5454 5455 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 5456 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1) 5457 .add(SrcReg0Sub1) 5458 .add(SrcReg1Sub1); 5459 5460 Register FullDestReg = MRI.createVirtualRegister(NewDestRC); 5461 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 5462 .addReg(DestSub0) 5463 .addImm(AMDGPU::sub0) 5464 .addReg(DestSub1) 5465 .addImm(AMDGPU::sub1); 5466 5467 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 5468 5469 Worklist.insert(&LoHalf); 5470 Worklist.insert(&HiHalf); 5471 5472 // Move all users of this moved vlaue. 5473 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 5474 } 5475 5476 void SIInstrInfo::splitScalar64BitXnor(SetVectorType &Worklist, 5477 MachineInstr &Inst, 5478 MachineDominatorTree *MDT) const { 5479 MachineBasicBlock &MBB = *Inst.getParent(); 5480 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5481 5482 MachineOperand &Dest = Inst.getOperand(0); 5483 MachineOperand &Src0 = Inst.getOperand(1); 5484 MachineOperand &Src1 = Inst.getOperand(2); 5485 const DebugLoc &DL = Inst.getDebugLoc(); 5486 5487 MachineBasicBlock::iterator MII = Inst; 5488 5489 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 5490 5491 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 5492 5493 MachineOperand* Op0; 5494 MachineOperand* Op1; 5495 5496 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) { 5497 Op0 = &Src0; 5498 Op1 = &Src1; 5499 } else { 5500 Op0 = &Src1; 5501 Op1 = &Src0; 5502 } 5503 5504 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B64), Interm) 5505 .add(*Op0); 5506 5507 Register NewDest = MRI.createVirtualRegister(DestRC); 5508 5509 MachineInstr &Xor = *BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B64), NewDest) 5510 .addReg(Interm) 5511 .add(*Op1); 5512 5513 MRI.replaceRegWith(Dest.getReg(), NewDest); 5514 5515 Worklist.insert(&Xor); 5516 } 5517 5518 void SIInstrInfo::splitScalar64BitBCNT( 5519 SetVectorType &Worklist, MachineInstr &Inst) const { 5520 MachineBasicBlock &MBB = *Inst.getParent(); 5521 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5522 5523 MachineBasicBlock::iterator MII = Inst; 5524 const DebugLoc &DL = Inst.getDebugLoc(); 5525 5526 MachineOperand &Dest = Inst.getOperand(0); 5527 MachineOperand &Src = Inst.getOperand(1); 5528 5529 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64); 5530 const TargetRegisterClass *SrcRC = Src.isReg() ? 5531 MRI.getRegClass(Src.getReg()) : 5532 &AMDGPU::SGPR_32RegClass; 5533 5534 Register MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5535 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5536 5537 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); 5538 5539 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 5540 AMDGPU::sub0, SrcSubRC); 5541 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 5542 AMDGPU::sub1, SrcSubRC); 5543 5544 BuildMI(MBB, MII, DL, InstDesc, MidReg).add(SrcRegSub0).addImm(0); 5545 5546 BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg); 5547 5548 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5549 5550 // We don't need to legalize operands here. src0 for etiher instruction can be 5551 // an SGPR, and the second input is unused or determined here. 5552 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5553 } 5554 5555 void SIInstrInfo::splitScalar64BitBFE(SetVectorType &Worklist, 5556 MachineInstr &Inst) const { 5557 MachineBasicBlock &MBB = *Inst.getParent(); 5558 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5559 MachineBasicBlock::iterator MII = Inst; 5560 const DebugLoc &DL = Inst.getDebugLoc(); 5561 5562 MachineOperand &Dest = Inst.getOperand(0); 5563 uint32_t Imm = Inst.getOperand(2).getImm(); 5564 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 5565 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 5566 5567 (void) Offset; 5568 5569 // Only sext_inreg cases handled. 5570 assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 && 5571 Offset == 0 && "Not implemented"); 5572 5573 if (BitWidth < 32) { 5574 Register MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5575 Register MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5576 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5577 5578 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo) 5579 .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0) 5580 .addImm(0) 5581 .addImm(BitWidth); 5582 5583 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi) 5584 .addImm(31) 5585 .addReg(MidRegLo); 5586 5587 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 5588 .addReg(MidRegLo) 5589 .addImm(AMDGPU::sub0) 5590 .addReg(MidRegHi) 5591 .addImm(AMDGPU::sub1); 5592 5593 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5594 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5595 return; 5596 } 5597 5598 MachineOperand &Src = Inst.getOperand(1); 5599 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5600 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5601 5602 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg) 5603 .addImm(31) 5604 .addReg(Src.getReg(), 0, AMDGPU::sub0); 5605 5606 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 5607 .addReg(Src.getReg(), 0, AMDGPU::sub0) 5608 .addImm(AMDGPU::sub0) 5609 .addReg(TmpReg) 5610 .addImm(AMDGPU::sub1); 5611 5612 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5613 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5614 } 5615 5616 void SIInstrInfo::addUsersToMoveToVALUWorklist( 5617 unsigned DstReg, 5618 MachineRegisterInfo &MRI, 5619 SetVectorType &Worklist) const { 5620 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg), 5621 E = MRI.use_end(); I != E;) { 5622 MachineInstr &UseMI = *I->getParent(); 5623 5624 unsigned OpNo = 0; 5625 5626 switch (UseMI.getOpcode()) { 5627 case AMDGPU::COPY: 5628 case AMDGPU::WQM: 5629 case AMDGPU::SOFT_WQM: 5630 case AMDGPU::WWM: 5631 case AMDGPU::REG_SEQUENCE: 5632 case AMDGPU::PHI: 5633 case AMDGPU::INSERT_SUBREG: 5634 break; 5635 default: 5636 OpNo = I.getOperandNo(); 5637 break; 5638 } 5639 5640 if (!RI.hasVectorRegisters(getOpRegClass(UseMI, OpNo))) { 5641 Worklist.insert(&UseMI); 5642 5643 do { 5644 ++I; 5645 } while (I != E && I->getParent() == &UseMI); 5646 } else { 5647 ++I; 5648 } 5649 } 5650 } 5651 5652 void SIInstrInfo::movePackToVALU(SetVectorType &Worklist, 5653 MachineRegisterInfo &MRI, 5654 MachineInstr &Inst) const { 5655 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5656 MachineBasicBlock *MBB = Inst.getParent(); 5657 MachineOperand &Src0 = Inst.getOperand(1); 5658 MachineOperand &Src1 = Inst.getOperand(2); 5659 const DebugLoc &DL = Inst.getDebugLoc(); 5660 5661 switch (Inst.getOpcode()) { 5662 case AMDGPU::S_PACK_LL_B32_B16: { 5663 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5664 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5665 5666 // FIXME: Can do a lot better if we know the high bits of src0 or src1 are 5667 // 0. 5668 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 5669 .addImm(0xffff); 5670 5671 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_B32_e64), TmpReg) 5672 .addReg(ImmReg, RegState::Kill) 5673 .add(Src0); 5674 5675 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHL_OR_B32), ResultReg) 5676 .add(Src1) 5677 .addImm(16) 5678 .addReg(TmpReg, RegState::Kill); 5679 break; 5680 } 5681 case AMDGPU::S_PACK_LH_B32_B16: { 5682 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5683 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 5684 .addImm(0xffff); 5685 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_BFI_B32), ResultReg) 5686 .addReg(ImmReg, RegState::Kill) 5687 .add(Src0) 5688 .add(Src1); 5689 break; 5690 } 5691 case AMDGPU::S_PACK_HH_B32_B16: { 5692 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5693 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5694 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg) 5695 .addImm(16) 5696 .add(Src0); 5697 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 5698 .addImm(0xffff0000); 5699 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_OR_B32), ResultReg) 5700 .add(Src1) 5701 .addReg(ImmReg, RegState::Kill) 5702 .addReg(TmpReg, RegState::Kill); 5703 break; 5704 } 5705 default: 5706 llvm_unreachable("unhandled s_pack_* instruction"); 5707 } 5708 5709 MachineOperand &Dest = Inst.getOperand(0); 5710 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5711 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5712 } 5713 5714 void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op, 5715 MachineInstr &SCCDefInst, 5716 SetVectorType &Worklist) const { 5717 // Ensure that def inst defines SCC, which is still live. 5718 assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isDef() && 5719 !Op.isDead() && Op.getParent() == &SCCDefInst); 5720 // This assumes that all the users of SCC are in the same block 5721 // as the SCC def. 5722 for (MachineInstr &MI : // Skip the def inst itself. 5723 make_range(std::next(MachineBasicBlock::iterator(SCCDefInst)), 5724 SCCDefInst.getParent()->end())) { 5725 // Check if SCC is used first. 5726 if (MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI) != -1) 5727 Worklist.insert(&MI); 5728 // Exit if we find another SCC def. 5729 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != -1) 5730 return; 5731 } 5732 } 5733 5734 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass( 5735 const MachineInstr &Inst) const { 5736 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0); 5737 5738 switch (Inst.getOpcode()) { 5739 // For target instructions, getOpRegClass just returns the virtual register 5740 // class associated with the operand, so we need to find an equivalent VGPR 5741 // register class in order to move the instruction to the VALU. 5742 case AMDGPU::COPY: 5743 case AMDGPU::PHI: 5744 case AMDGPU::REG_SEQUENCE: 5745 case AMDGPU::INSERT_SUBREG: 5746 case AMDGPU::WQM: 5747 case AMDGPU::SOFT_WQM: 5748 case AMDGPU::WWM: { 5749 const TargetRegisterClass *SrcRC = getOpRegClass(Inst, 1); 5750 if (RI.hasAGPRs(SrcRC)) { 5751 if (RI.hasAGPRs(NewDstRC)) 5752 return nullptr; 5753 5754 switch (Inst.getOpcode()) { 5755 case AMDGPU::PHI: 5756 case AMDGPU::REG_SEQUENCE: 5757 case AMDGPU::INSERT_SUBREG: 5758 NewDstRC = RI.getEquivalentAGPRClass(NewDstRC); 5759 break; 5760 default: 5761 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 5762 } 5763 5764 if (!NewDstRC) 5765 return nullptr; 5766 } else { 5767 if (RI.hasVGPRs(NewDstRC) || NewDstRC == &AMDGPU::VReg_1RegClass) 5768 return nullptr; 5769 5770 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 5771 if (!NewDstRC) 5772 return nullptr; 5773 } 5774 5775 return NewDstRC; 5776 } 5777 default: 5778 return NewDstRC; 5779 } 5780 } 5781 5782 // Find the one SGPR operand we are allowed to use. 5783 unsigned SIInstrInfo::findUsedSGPR(const MachineInstr &MI, 5784 int OpIndices[3]) const { 5785 const MCInstrDesc &Desc = MI.getDesc(); 5786 5787 // Find the one SGPR operand we are allowed to use. 5788 // 5789 // First we need to consider the instruction's operand requirements before 5790 // legalizing. Some operands are required to be SGPRs, such as implicit uses 5791 // of VCC, but we are still bound by the constant bus requirement to only use 5792 // one. 5793 // 5794 // If the operand's class is an SGPR, we can never move it. 5795 5796 unsigned SGPRReg = findImplicitSGPRRead(MI); 5797 if (SGPRReg != AMDGPU::NoRegister) 5798 return SGPRReg; 5799 5800 unsigned UsedSGPRs[3] = { AMDGPU::NoRegister }; 5801 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 5802 5803 for (unsigned i = 0; i < 3; ++i) { 5804 int Idx = OpIndices[i]; 5805 if (Idx == -1) 5806 break; 5807 5808 const MachineOperand &MO = MI.getOperand(Idx); 5809 if (!MO.isReg()) 5810 continue; 5811 5812 // Is this operand statically required to be an SGPR based on the operand 5813 // constraints? 5814 const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); 5815 bool IsRequiredSGPR = RI.isSGPRClass(OpRC); 5816 if (IsRequiredSGPR) 5817 return MO.getReg(); 5818 5819 // If this could be a VGPR or an SGPR, Check the dynamic register class. 5820 Register Reg = MO.getReg(); 5821 const TargetRegisterClass *RegRC = MRI.getRegClass(Reg); 5822 if (RI.isSGPRClass(RegRC)) 5823 UsedSGPRs[i] = Reg; 5824 } 5825 5826 // We don't have a required SGPR operand, so we have a bit more freedom in 5827 // selecting operands to move. 5828 5829 // Try to select the most used SGPR. If an SGPR is equal to one of the 5830 // others, we choose that. 5831 // 5832 // e.g. 5833 // V_FMA_F32 v0, s0, s0, s0 -> No moves 5834 // V_FMA_F32 v0, s0, s1, s0 -> Move s1 5835 5836 // TODO: If some of the operands are 64-bit SGPRs and some 32, we should 5837 // prefer those. 5838 5839 if (UsedSGPRs[0] != AMDGPU::NoRegister) { 5840 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2]) 5841 SGPRReg = UsedSGPRs[0]; 5842 } 5843 5844 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) { 5845 if (UsedSGPRs[1] == UsedSGPRs[2]) 5846 SGPRReg = UsedSGPRs[1]; 5847 } 5848 5849 return SGPRReg; 5850 } 5851 5852 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI, 5853 unsigned OperandName) const { 5854 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName); 5855 if (Idx == -1) 5856 return nullptr; 5857 5858 return &MI.getOperand(Idx); 5859 } 5860 5861 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const { 5862 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 5863 return (22ULL << 44) | // IMG_FORMAT_32_FLOAT 5864 (1ULL << 56) | // RESOURCE_LEVEL = 1 5865 (3ULL << 60); // OOB_SELECT = 3 5866 } 5867 5868 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT; 5869 if (ST.isAmdHsaOS()) { 5870 // Set ATC = 1. GFX9 doesn't have this bit. 5871 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) 5872 RsrcDataFormat |= (1ULL << 56); 5873 5874 // Set MTYPE = 2 (MTYPE_UC = uncached). GFX9 doesn't have this. 5875 // BTW, it disables TC L2 and therefore decreases performance. 5876 if (ST.getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS) 5877 RsrcDataFormat |= (2ULL << 59); 5878 } 5879 5880 return RsrcDataFormat; 5881 } 5882 5883 uint64_t SIInstrInfo::getScratchRsrcWords23() const { 5884 uint64_t Rsrc23 = getDefaultRsrcDataFormat() | 5885 AMDGPU::RSRC_TID_ENABLE | 5886 0xffffffff; // Size; 5887 5888 // GFX9 doesn't have ELEMENT_SIZE. 5889 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 5890 uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize()) - 1; 5891 Rsrc23 |= EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT; 5892 } 5893 5894 // IndexStride = 64 / 32. 5895 uint64_t IndexStride = ST.getWavefrontSize() == 64 ? 3 : 2; 5896 Rsrc23 |= IndexStride << AMDGPU::RSRC_INDEX_STRIDE_SHIFT; 5897 5898 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17]. 5899 // Clear them unless we want a huge stride. 5900 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS && 5901 ST.getGeneration() <= AMDGPUSubtarget::GFX9) 5902 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT; 5903 5904 return Rsrc23; 5905 } 5906 5907 bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const { 5908 unsigned Opc = MI.getOpcode(); 5909 5910 return isSMRD(Opc); 5911 } 5912 5913 bool SIInstrInfo::isHighLatencyInstruction(const MachineInstr &MI) const { 5914 unsigned Opc = MI.getOpcode(); 5915 5916 return isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc); 5917 } 5918 5919 unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI, 5920 int &FrameIndex) const { 5921 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 5922 if (!Addr || !Addr->isFI()) 5923 return AMDGPU::NoRegister; 5924 5925 assert(!MI.memoperands_empty() && 5926 (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS); 5927 5928 FrameIndex = Addr->getIndex(); 5929 return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg(); 5930 } 5931 5932 unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI, 5933 int &FrameIndex) const { 5934 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr); 5935 assert(Addr && Addr->isFI()); 5936 FrameIndex = Addr->getIndex(); 5937 return getNamedOperand(MI, AMDGPU::OpName::data)->getReg(); 5938 } 5939 5940 unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 5941 int &FrameIndex) const { 5942 if (!MI.mayLoad()) 5943 return AMDGPU::NoRegister; 5944 5945 if (isMUBUF(MI) || isVGPRSpill(MI)) 5946 return isStackAccess(MI, FrameIndex); 5947 5948 if (isSGPRSpill(MI)) 5949 return isSGPRStackAccess(MI, FrameIndex); 5950 5951 return AMDGPU::NoRegister; 5952 } 5953 5954 unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 5955 int &FrameIndex) const { 5956 if (!MI.mayStore()) 5957 return AMDGPU::NoRegister; 5958 5959 if (isMUBUF(MI) || isVGPRSpill(MI)) 5960 return isStackAccess(MI, FrameIndex); 5961 5962 if (isSGPRSpill(MI)) 5963 return isSGPRStackAccess(MI, FrameIndex); 5964 5965 return AMDGPU::NoRegister; 5966 } 5967 5968 unsigned SIInstrInfo::getInstBundleSize(const MachineInstr &MI) const { 5969 unsigned Size = 0; 5970 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 5971 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 5972 while (++I != E && I->isInsideBundle()) { 5973 assert(!I->isBundle() && "No nested bundle!"); 5974 Size += getInstSizeInBytes(*I); 5975 } 5976 5977 return Size; 5978 } 5979 5980 unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { 5981 unsigned Opc = MI.getOpcode(); 5982 const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc); 5983 unsigned DescSize = Desc.getSize(); 5984 5985 // If we have a definitive size, we can use it. Otherwise we need to inspect 5986 // the operands to know the size. 5987 if (isFixedSize(MI)) 5988 return DescSize; 5989 5990 // 4-byte instructions may have a 32-bit literal encoded after them. Check 5991 // operands that coud ever be literals. 5992 if (isVALU(MI) || isSALU(MI)) { 5993 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 5994 if (Src0Idx == -1) 5995 return DescSize; // No operands. 5996 5997 if (isLiteralConstantLike(MI.getOperand(Src0Idx), Desc.OpInfo[Src0Idx])) 5998 return isVOP3(MI) ? 12 : (DescSize + 4); 5999 6000 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 6001 if (Src1Idx == -1) 6002 return DescSize; 6003 6004 if (isLiteralConstantLike(MI.getOperand(Src1Idx), Desc.OpInfo[Src1Idx])) 6005 return isVOP3(MI) ? 12 : (DescSize + 4); 6006 6007 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 6008 if (Src2Idx == -1) 6009 return DescSize; 6010 6011 if (isLiteralConstantLike(MI.getOperand(Src2Idx), Desc.OpInfo[Src2Idx])) 6012 return isVOP3(MI) ? 12 : (DescSize + 4); 6013 6014 return DescSize; 6015 } 6016 6017 // Check whether we have extra NSA words. 6018 if (isMIMG(MI)) { 6019 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); 6020 if (VAddr0Idx < 0) 6021 return 8; 6022 6023 int RSrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); 6024 return 8 + 4 * ((RSrcIdx - VAddr0Idx + 2) / 4); 6025 } 6026 6027 switch (Opc) { 6028 case TargetOpcode::IMPLICIT_DEF: 6029 case TargetOpcode::KILL: 6030 case TargetOpcode::DBG_VALUE: 6031 case TargetOpcode::EH_LABEL: 6032 return 0; 6033 case TargetOpcode::BUNDLE: 6034 return getInstBundleSize(MI); 6035 case TargetOpcode::INLINEASM: 6036 case TargetOpcode::INLINEASM_BR: { 6037 const MachineFunction *MF = MI.getParent()->getParent(); 6038 const char *AsmStr = MI.getOperand(0).getSymbolName(); 6039 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo(), 6040 &MF->getSubtarget()); 6041 } 6042 default: 6043 return DescSize; 6044 } 6045 } 6046 6047 bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const { 6048 if (!isFLAT(MI)) 6049 return false; 6050 6051 if (MI.memoperands_empty()) 6052 return true; 6053 6054 for (const MachineMemOperand *MMO : MI.memoperands()) { 6055 if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS) 6056 return true; 6057 } 6058 return false; 6059 } 6060 6061 bool SIInstrInfo::isNonUniformBranchInstr(MachineInstr &Branch) const { 6062 return Branch.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO; 6063 } 6064 6065 void SIInstrInfo::convertNonUniformIfRegion(MachineBasicBlock *IfEntry, 6066 MachineBasicBlock *IfEnd) const { 6067 MachineBasicBlock::iterator TI = IfEntry->getFirstTerminator(); 6068 assert(TI != IfEntry->end()); 6069 6070 MachineInstr *Branch = &(*TI); 6071 MachineFunction *MF = IfEntry->getParent(); 6072 MachineRegisterInfo &MRI = IfEntry->getParent()->getRegInfo(); 6073 6074 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 6075 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 6076 MachineInstr *SIIF = 6077 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_IF), DstReg) 6078 .add(Branch->getOperand(0)) 6079 .add(Branch->getOperand(1)); 6080 MachineInstr *SIEND = 6081 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_END_CF)) 6082 .addReg(DstReg); 6083 6084 IfEntry->erase(TI); 6085 IfEntry->insert(IfEntry->end(), SIIF); 6086 IfEnd->insert(IfEnd->getFirstNonPHI(), SIEND); 6087 } 6088 } 6089 6090 void SIInstrInfo::convertNonUniformLoopRegion( 6091 MachineBasicBlock *LoopEntry, MachineBasicBlock *LoopEnd) const { 6092 MachineBasicBlock::iterator TI = LoopEnd->getFirstTerminator(); 6093 // We expect 2 terminators, one conditional and one unconditional. 6094 assert(TI != LoopEnd->end()); 6095 6096 MachineInstr *Branch = &(*TI); 6097 MachineFunction *MF = LoopEnd->getParent(); 6098 MachineRegisterInfo &MRI = LoopEnd->getParent()->getRegInfo(); 6099 6100 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 6101 6102 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 6103 Register BackEdgeReg = MRI.createVirtualRegister(RI.getBoolRC()); 6104 MachineInstrBuilder HeaderPHIBuilder = 6105 BuildMI(*(MF), Branch->getDebugLoc(), get(TargetOpcode::PHI), DstReg); 6106 for (MachineBasicBlock::pred_iterator PI = LoopEntry->pred_begin(), 6107 E = LoopEntry->pred_end(); 6108 PI != E; ++PI) { 6109 if (*PI == LoopEnd) { 6110 HeaderPHIBuilder.addReg(BackEdgeReg); 6111 } else { 6112 MachineBasicBlock *PMBB = *PI; 6113 Register ZeroReg = MRI.createVirtualRegister(RI.getBoolRC()); 6114 materializeImmediate(*PMBB, PMBB->getFirstTerminator(), DebugLoc(), 6115 ZeroReg, 0); 6116 HeaderPHIBuilder.addReg(ZeroReg); 6117 } 6118 HeaderPHIBuilder.addMBB(*PI); 6119 } 6120 MachineInstr *HeaderPhi = HeaderPHIBuilder; 6121 MachineInstr *SIIFBREAK = BuildMI(*(MF), Branch->getDebugLoc(), 6122 get(AMDGPU::SI_IF_BREAK), BackEdgeReg) 6123 .addReg(DstReg) 6124 .add(Branch->getOperand(0)); 6125 MachineInstr *SILOOP = 6126 BuildMI(*(MF), Branch->getDebugLoc(), get(AMDGPU::SI_LOOP)) 6127 .addReg(BackEdgeReg) 6128 .addMBB(LoopEntry); 6129 6130 LoopEntry->insert(LoopEntry->begin(), HeaderPhi); 6131 LoopEnd->erase(TI); 6132 LoopEnd->insert(LoopEnd->end(), SIIFBREAK); 6133 LoopEnd->insert(LoopEnd->end(), SILOOP); 6134 } 6135 } 6136 6137 ArrayRef<std::pair<int, const char *>> 6138 SIInstrInfo::getSerializableTargetIndices() const { 6139 static const std::pair<int, const char *> TargetIndices[] = { 6140 {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"}, 6141 {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"}, 6142 {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"}, 6143 {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"}, 6144 {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}}; 6145 return makeArrayRef(TargetIndices); 6146 } 6147 6148 /// This is used by the post-RA scheduler (SchedulePostRAList.cpp). The 6149 /// post-RA version of misched uses CreateTargetMIHazardRecognizer. 6150 ScheduleHazardRecognizer * 6151 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 6152 const ScheduleDAG *DAG) const { 6153 return new GCNHazardRecognizer(DAG->MF); 6154 } 6155 6156 /// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer 6157 /// pass. 6158 ScheduleHazardRecognizer * 6159 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const { 6160 return new GCNHazardRecognizer(MF); 6161 } 6162 6163 std::pair<unsigned, unsigned> 6164 SIInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { 6165 return std::make_pair(TF & MO_MASK, TF & ~MO_MASK); 6166 } 6167 6168 ArrayRef<std::pair<unsigned, const char *>> 6169 SIInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { 6170 static const std::pair<unsigned, const char *> TargetFlags[] = { 6171 { MO_GOTPCREL, "amdgpu-gotprel" }, 6172 { MO_GOTPCREL32_LO, "amdgpu-gotprel32-lo" }, 6173 { MO_GOTPCREL32_HI, "amdgpu-gotprel32-hi" }, 6174 { MO_REL32_LO, "amdgpu-rel32-lo" }, 6175 { MO_REL32_HI, "amdgpu-rel32-hi" }, 6176 { MO_ABS32_LO, "amdgpu-abs32-lo" }, 6177 { MO_ABS32_HI, "amdgpu-abs32-hi" }, 6178 }; 6179 6180 return makeArrayRef(TargetFlags); 6181 } 6182 6183 bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const { 6184 return !MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY && 6185 MI.modifiesRegister(AMDGPU::EXEC, &RI); 6186 } 6187 6188 MachineInstrBuilder 6189 SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 6190 MachineBasicBlock::iterator I, 6191 const DebugLoc &DL, 6192 unsigned DestReg) const { 6193 if (ST.hasAddNoCarry()) 6194 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg); 6195 6196 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6197 Register UnusedCarry = MRI.createVirtualRegister(RI.getBoolRC()); 6198 MRI.setRegAllocationHint(UnusedCarry, 0, RI.getVCC()); 6199 6200 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_I32_e64), DestReg) 6201 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 6202 } 6203 6204 MachineInstrBuilder SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 6205 MachineBasicBlock::iterator I, 6206 const DebugLoc &DL, 6207 Register DestReg, 6208 RegScavenger &RS) const { 6209 if (ST.hasAddNoCarry()) 6210 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e32), DestReg); 6211 6212 Register UnusedCarry = RS.scavengeRegister(RI.getBoolRC(), I, 0, false); 6213 // TODO: Users need to deal with this. 6214 if (!UnusedCarry.isValid()) 6215 return MachineInstrBuilder(); 6216 6217 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_I32_e64), DestReg) 6218 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 6219 } 6220 6221 bool SIInstrInfo::isKillTerminator(unsigned Opcode) { 6222 switch (Opcode) { 6223 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 6224 case AMDGPU::SI_KILL_I1_TERMINATOR: 6225 return true; 6226 default: 6227 return false; 6228 } 6229 } 6230 6231 const MCInstrDesc &SIInstrInfo::getKillTerminatorFromPseudo(unsigned Opcode) const { 6232 switch (Opcode) { 6233 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: 6234 return get(AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR); 6235 case AMDGPU::SI_KILL_I1_PSEUDO: 6236 return get(AMDGPU::SI_KILL_I1_TERMINATOR); 6237 default: 6238 llvm_unreachable("invalid opcode, expected SI_KILL_*_PSEUDO"); 6239 } 6240 } 6241 6242 void SIInstrInfo::fixImplicitOperands(MachineInstr &MI) const { 6243 MachineBasicBlock *MBB = MI.getParent(); 6244 MachineFunction *MF = MBB->getParent(); 6245 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 6246 6247 if (!ST.isWave32()) 6248 return; 6249 6250 for (auto &Op : MI.implicit_operands()) { 6251 if (Op.isReg() && Op.getReg() == AMDGPU::VCC) 6252 Op.setReg(AMDGPU::VCC_LO); 6253 } 6254 } 6255 6256 bool SIInstrInfo::isBufferSMRD(const MachineInstr &MI) const { 6257 if (!isSMRD(MI)) 6258 return false; 6259 6260 // Check that it is using a buffer resource. 6261 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sbase); 6262 if (Idx == -1) // e.g. s_memtime 6263 return false; 6264 6265 const auto RCID = MI.getDesc().OpInfo[Idx].RegClass; 6266 return RI.getRegClass(RCID)->hasSubClassEq(&AMDGPU::SGPR_128RegClass); 6267 } 6268 6269 unsigned SIInstrInfo::getNumFlatOffsetBits(unsigned AddrSpace, 6270 bool Signed) const { 6271 if (!ST.hasFlatInstOffsets()) 6272 return 0; 6273 6274 if (ST.hasFlatSegmentOffsetBug() && AddrSpace == AMDGPUAS::FLAT_ADDRESS) 6275 return 0; 6276 6277 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) 6278 return Signed ? 12 : 11; 6279 6280 return Signed ? 13 : 12; 6281 } 6282 6283 bool SIInstrInfo::isLegalFLATOffset(int64_t Offset, unsigned AddrSpace, 6284 bool Signed) const { 6285 // TODO: Should 0 be special cased? 6286 if (!ST.hasFlatInstOffsets()) 6287 return false; 6288 6289 if (ST.hasFlatSegmentOffsetBug() && AddrSpace == AMDGPUAS::FLAT_ADDRESS) 6290 return false; 6291 6292 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 6293 return (Signed && isInt<12>(Offset)) || 6294 (!Signed && isUInt<11>(Offset)); 6295 } 6296 6297 return (Signed && isInt<13>(Offset)) || 6298 (!Signed && isUInt<12>(Offset)); 6299 } 6300 6301 6302 // This must be kept in sync with the SIEncodingFamily class in SIInstrInfo.td 6303 enum SIEncodingFamily { 6304 SI = 0, 6305 VI = 1, 6306 SDWA = 2, 6307 SDWA9 = 3, 6308 GFX80 = 4, 6309 GFX9 = 5, 6310 GFX10 = 6, 6311 SDWA10 = 7 6312 }; 6313 6314 static SIEncodingFamily subtargetEncodingFamily(const GCNSubtarget &ST) { 6315 switch (ST.getGeneration()) { 6316 default: 6317 break; 6318 case AMDGPUSubtarget::SOUTHERN_ISLANDS: 6319 case AMDGPUSubtarget::SEA_ISLANDS: 6320 return SIEncodingFamily::SI; 6321 case AMDGPUSubtarget::VOLCANIC_ISLANDS: 6322 case AMDGPUSubtarget::GFX9: 6323 return SIEncodingFamily::VI; 6324 case AMDGPUSubtarget::GFX10: 6325 return SIEncodingFamily::GFX10; 6326 } 6327 llvm_unreachable("Unknown subtarget generation!"); 6328 } 6329 6330 int SIInstrInfo::pseudoToMCOpcode(int Opcode) const { 6331 SIEncodingFamily Gen = subtargetEncodingFamily(ST); 6332 6333 if ((get(Opcode).TSFlags & SIInstrFlags::renamedInGFX9) != 0 && 6334 ST.getGeneration() == AMDGPUSubtarget::GFX9) 6335 Gen = SIEncodingFamily::GFX9; 6336 6337 // Adjust the encoding family to GFX80 for D16 buffer instructions when the 6338 // subtarget has UnpackedD16VMem feature. 6339 // TODO: remove this when we discard GFX80 encoding. 6340 if (ST.hasUnpackedD16VMem() && (get(Opcode).TSFlags & SIInstrFlags::D16Buf)) 6341 Gen = SIEncodingFamily::GFX80; 6342 6343 if (get(Opcode).TSFlags & SIInstrFlags::SDWA) { 6344 switch (ST.getGeneration()) { 6345 default: 6346 Gen = SIEncodingFamily::SDWA; 6347 break; 6348 case AMDGPUSubtarget::GFX9: 6349 Gen = SIEncodingFamily::SDWA9; 6350 break; 6351 case AMDGPUSubtarget::GFX10: 6352 Gen = SIEncodingFamily::SDWA10; 6353 break; 6354 } 6355 } 6356 6357 int MCOp = AMDGPU::getMCOpcode(Opcode, Gen); 6358 6359 // -1 means that Opcode is already a native instruction. 6360 if (MCOp == -1) 6361 return Opcode; 6362 6363 // (uint16_t)-1 means that Opcode is a pseudo instruction that has 6364 // no encoding in the given subtarget generation. 6365 if (MCOp == (uint16_t)-1) 6366 return -1; 6367 6368 return MCOp; 6369 } 6370 6371 static 6372 TargetInstrInfo::RegSubRegPair getRegOrUndef(const MachineOperand &RegOpnd) { 6373 assert(RegOpnd.isReg()); 6374 return RegOpnd.isUndef() ? TargetInstrInfo::RegSubRegPair() : 6375 getRegSubRegPair(RegOpnd); 6376 } 6377 6378 TargetInstrInfo::RegSubRegPair 6379 llvm::getRegSequenceSubReg(MachineInstr &MI, unsigned SubReg) { 6380 assert(MI.isRegSequence()); 6381 for (unsigned I = 0, E = (MI.getNumOperands() - 1)/ 2; I < E; ++I) 6382 if (MI.getOperand(1 + 2 * I + 1).getImm() == SubReg) { 6383 auto &RegOp = MI.getOperand(1 + 2 * I); 6384 return getRegOrUndef(RegOp); 6385 } 6386 return TargetInstrInfo::RegSubRegPair(); 6387 } 6388 6389 // Try to find the definition of reg:subreg in subreg-manipulation pseudos 6390 // Following a subreg of reg:subreg isn't supported 6391 static bool followSubRegDef(MachineInstr &MI, 6392 TargetInstrInfo::RegSubRegPair &RSR) { 6393 if (!RSR.SubReg) 6394 return false; 6395 switch (MI.getOpcode()) { 6396 default: break; 6397 case AMDGPU::REG_SEQUENCE: 6398 RSR = getRegSequenceSubReg(MI, RSR.SubReg); 6399 return true; 6400 // EXTRACT_SUBREG ins't supported as this would follow a subreg of subreg 6401 case AMDGPU::INSERT_SUBREG: 6402 if (RSR.SubReg == (unsigned)MI.getOperand(3).getImm()) 6403 // inserted the subreg we're looking for 6404 RSR = getRegOrUndef(MI.getOperand(2)); 6405 else { // the subreg in the rest of the reg 6406 auto R1 = getRegOrUndef(MI.getOperand(1)); 6407 if (R1.SubReg) // subreg of subreg isn't supported 6408 return false; 6409 RSR.Reg = R1.Reg; 6410 } 6411 return true; 6412 } 6413 return false; 6414 } 6415 6416 MachineInstr *llvm::getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P, 6417 MachineRegisterInfo &MRI) { 6418 assert(MRI.isSSA()); 6419 if (!Register::isVirtualRegister(P.Reg)) 6420 return nullptr; 6421 6422 auto RSR = P; 6423 auto *DefInst = MRI.getVRegDef(RSR.Reg); 6424 while (auto *MI = DefInst) { 6425 DefInst = nullptr; 6426 switch (MI->getOpcode()) { 6427 case AMDGPU::COPY: 6428 case AMDGPU::V_MOV_B32_e32: { 6429 auto &Op1 = MI->getOperand(1); 6430 if (Op1.isReg() && Register::isVirtualRegister(Op1.getReg())) { 6431 if (Op1.isUndef()) 6432 return nullptr; 6433 RSR = getRegSubRegPair(Op1); 6434 DefInst = MRI.getVRegDef(RSR.Reg); 6435 } 6436 break; 6437 } 6438 default: 6439 if (followSubRegDef(*MI, RSR)) { 6440 if (!RSR.Reg) 6441 return nullptr; 6442 DefInst = MRI.getVRegDef(RSR.Reg); 6443 } 6444 } 6445 if (!DefInst) 6446 return MI; 6447 } 6448 return nullptr; 6449 } 6450 6451 bool llvm::execMayBeModifiedBeforeUse(const MachineRegisterInfo &MRI, 6452 Register VReg, 6453 const MachineInstr &DefMI, 6454 const MachineInstr &UseMI) { 6455 assert(MRI.isSSA() && "Must be run on SSA"); 6456 6457 auto *TRI = MRI.getTargetRegisterInfo(); 6458 auto *DefBB = DefMI.getParent(); 6459 6460 // Don't bother searching between blocks, although it is possible this block 6461 // doesn't modify exec. 6462 if (UseMI.getParent() != DefBB) 6463 return true; 6464 6465 const int MaxInstScan = 20; 6466 int NumInst = 0; 6467 6468 // Stop scan at the use. 6469 auto E = UseMI.getIterator(); 6470 for (auto I = std::next(DefMI.getIterator()); I != E; ++I) { 6471 if (I->isDebugInstr()) 6472 continue; 6473 6474 if (++NumInst > MaxInstScan) 6475 return true; 6476 6477 if (I->modifiesRegister(AMDGPU::EXEC, TRI)) 6478 return true; 6479 } 6480 6481 return false; 6482 } 6483 6484 bool llvm::execMayBeModifiedBeforeAnyUse(const MachineRegisterInfo &MRI, 6485 Register VReg, 6486 const MachineInstr &DefMI) { 6487 assert(MRI.isSSA() && "Must be run on SSA"); 6488 6489 auto *TRI = MRI.getTargetRegisterInfo(); 6490 auto *DefBB = DefMI.getParent(); 6491 6492 const int MaxUseInstScan = 10; 6493 int NumUseInst = 0; 6494 6495 for (auto &UseInst : MRI.use_nodbg_instructions(VReg)) { 6496 // Don't bother searching between blocks, although it is possible this block 6497 // doesn't modify exec. 6498 if (UseInst.getParent() != DefBB) 6499 return true; 6500 6501 if (++NumUseInst > MaxUseInstScan) 6502 return true; 6503 } 6504 6505 const int MaxInstScan = 20; 6506 int NumInst = 0; 6507 6508 // Stop scan when we have seen all the uses. 6509 for (auto I = std::next(DefMI.getIterator()); ; ++I) { 6510 if (I->isDebugInstr()) 6511 continue; 6512 6513 if (++NumInst > MaxInstScan) 6514 return true; 6515 6516 if (I->readsRegister(VReg)) 6517 if (--NumUseInst == 0) 6518 return false; 6519 6520 if (I->modifiesRegister(AMDGPU::EXEC, TRI)) 6521 return true; 6522 } 6523 } 6524 6525 MachineInstr *SIInstrInfo::createPHIDestinationCopy( 6526 MachineBasicBlock &MBB, MachineBasicBlock::iterator LastPHIIt, 6527 const DebugLoc &DL, Register Src, Register Dst) const { 6528 auto Cur = MBB.begin(); 6529 if (Cur != MBB.end()) 6530 do { 6531 if (!Cur->isPHI() && Cur->readsRegister(Dst)) 6532 return BuildMI(MBB, Cur, DL, get(TargetOpcode::COPY), Dst).addReg(Src); 6533 ++Cur; 6534 } while (Cur != MBB.end() && Cur != LastPHIIt); 6535 6536 return TargetInstrInfo::createPHIDestinationCopy(MBB, LastPHIIt, DL, Src, 6537 Dst); 6538 } 6539 6540 MachineInstr *SIInstrInfo::createPHISourceCopy( 6541 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, 6542 const DebugLoc &DL, Register Src, Register SrcSubReg, Register Dst) const { 6543 if (InsPt != MBB.end() && 6544 (InsPt->getOpcode() == AMDGPU::SI_IF || 6545 InsPt->getOpcode() == AMDGPU::SI_ELSE || 6546 InsPt->getOpcode() == AMDGPU::SI_IF_BREAK) && 6547 InsPt->definesRegister(Src)) { 6548 InsPt++; 6549 return BuildMI(MBB, InsPt, InsPt->getDebugLoc(), 6550 get(ST.isWave32() ? AMDGPU::S_MOV_B32_term 6551 : AMDGPU::S_MOV_B64_term), 6552 Dst) 6553 .addReg(Src, 0, SrcSubReg) 6554 .addReg(AMDGPU::EXEC, RegState::Implicit); 6555 } 6556 return TargetInstrInfo::createPHISourceCopy(MBB, InsPt, DL, Src, SrcSubReg, 6557 Dst); 6558 } 6559 6560 bool llvm::SIInstrInfo::isWave32() const { return ST.isWave32(); } 6561