1 //===- SIInstrInfo.cpp - SI Instruction Information ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// SI Implementation of TargetInstrInfo. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SIInstrInfo.h" 15 #include "AMDGPU.h" 16 #include "AMDGPUInstrInfo.h" 17 #include "GCNHazardRecognizer.h" 18 #include "GCNSubtarget.h" 19 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 20 #include "SIMachineFunctionInfo.h" 21 #include "llvm/Analysis/ValueTracking.h" 22 #include "llvm/CodeGen/LiveVariables.h" 23 #include "llvm/CodeGen/MachineDominators.h" 24 #include "llvm/CodeGen/RegisterScavenging.h" 25 #include "llvm/CodeGen/ScheduleDAG.h" 26 #include "llvm/IR/DiagnosticInfo.h" 27 #include "llvm/IR/IntrinsicsAMDGPU.h" 28 #include "llvm/Support/CommandLine.h" 29 #include "llvm/Target/TargetMachine.h" 30 31 using namespace llvm; 32 33 #define DEBUG_TYPE "si-instr-info" 34 35 #define GET_INSTRINFO_CTOR_DTOR 36 #include "AMDGPUGenInstrInfo.inc" 37 38 namespace llvm { 39 40 class AAResults; 41 42 namespace AMDGPU { 43 #define GET_D16ImageDimIntrinsics_IMPL 44 #define GET_ImageDimIntrinsicTable_IMPL 45 #define GET_RsrcIntrinsics_IMPL 46 #include "AMDGPUGenSearchableTables.inc" 47 } 48 } 49 50 51 // Must be at least 4 to be able to branch over minimum unconditional branch 52 // code. This is only for making it possible to write reasonably small tests for 53 // long branches. 54 static cl::opt<unsigned> 55 BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16), 56 cl::desc("Restrict range of branch instructions (DEBUG)")); 57 58 static cl::opt<bool> Fix16BitCopies( 59 "amdgpu-fix-16-bit-physreg-copies", 60 cl::desc("Fix copies between 32 and 16 bit registers by extending to 32 bit"), 61 cl::init(true), 62 cl::ReallyHidden); 63 64 SIInstrInfo::SIInstrInfo(const GCNSubtarget &ST) 65 : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN), 66 RI(ST), ST(ST) { 67 SchedModel.init(&ST); 68 } 69 70 //===----------------------------------------------------------------------===// 71 // TargetInstrInfo callbacks 72 //===----------------------------------------------------------------------===// 73 74 static unsigned getNumOperandsNoGlue(SDNode *Node) { 75 unsigned N = Node->getNumOperands(); 76 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) 77 --N; 78 return N; 79 } 80 81 /// Returns true if both nodes have the same value for the given 82 /// operand \p Op, or if both nodes do not have this operand. 83 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { 84 unsigned Opc0 = N0->getMachineOpcode(); 85 unsigned Opc1 = N1->getMachineOpcode(); 86 87 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); 88 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); 89 90 if (Op0Idx == -1 && Op1Idx == -1) 91 return true; 92 93 94 if ((Op0Idx == -1 && Op1Idx != -1) || 95 (Op1Idx == -1 && Op0Idx != -1)) 96 return false; 97 98 // getNamedOperandIdx returns the index for the MachineInstr's operands, 99 // which includes the result as the first operand. We are indexing into the 100 // MachineSDNode's operands, so we need to skip the result operand to get 101 // the real index. 102 --Op0Idx; 103 --Op1Idx; 104 105 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx); 106 } 107 108 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, 109 AAResults *AA) const { 110 // TODO: The generic check fails for VALU instructions that should be 111 // rematerializable due to implicit reads of exec. We really want all of the 112 // generic logic for this except for this. 113 switch (MI.getOpcode()) { 114 case AMDGPU::V_MOV_B32_e32: 115 case AMDGPU::V_MOV_B32_e64: 116 case AMDGPU::V_MOV_B64_PSEUDO: 117 case AMDGPU::V_ACCVGPR_READ_B32_e64: 118 case AMDGPU::V_ACCVGPR_WRITE_B32_e64: 119 // No implicit operands. 120 return MI.getNumOperands() == MI.getDesc().getNumOperands(); 121 default: 122 return false; 123 } 124 } 125 126 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, 127 int64_t &Offset0, 128 int64_t &Offset1) const { 129 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode()) 130 return false; 131 132 unsigned Opc0 = Load0->getMachineOpcode(); 133 unsigned Opc1 = Load1->getMachineOpcode(); 134 135 // Make sure both are actually loads. 136 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad()) 137 return false; 138 139 if (isDS(Opc0) && isDS(Opc1)) { 140 141 // FIXME: Handle this case: 142 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1)) 143 return false; 144 145 // Check base reg. 146 if (Load0->getOperand(0) != Load1->getOperand(0)) 147 return false; 148 149 // Skip read2 / write2 variants for simplicity. 150 // TODO: We should report true if the used offsets are adjacent (excluded 151 // st64 versions). 152 int Offset0Idx = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 153 int Offset1Idx = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 154 if (Offset0Idx == -1 || Offset1Idx == -1) 155 return false; 156 157 // XXX - be careful of datalesss loads 158 // getNamedOperandIdx returns the index for MachineInstrs. Since they 159 // include the output in the operand list, but SDNodes don't, we need to 160 // subtract the index by one. 161 Offset0Idx -= get(Opc0).NumDefs; 162 Offset1Idx -= get(Opc1).NumDefs; 163 Offset0 = cast<ConstantSDNode>(Load0->getOperand(Offset0Idx))->getZExtValue(); 164 Offset1 = cast<ConstantSDNode>(Load1->getOperand(Offset1Idx))->getZExtValue(); 165 return true; 166 } 167 168 if (isSMRD(Opc0) && isSMRD(Opc1)) { 169 // Skip time and cache invalidation instructions. 170 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::sbase) == -1 || 171 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::sbase) == -1) 172 return false; 173 174 assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1)); 175 176 // Check base reg. 177 if (Load0->getOperand(0) != Load1->getOperand(0)) 178 return false; 179 180 const ConstantSDNode *Load0Offset = 181 dyn_cast<ConstantSDNode>(Load0->getOperand(1)); 182 const ConstantSDNode *Load1Offset = 183 dyn_cast<ConstantSDNode>(Load1->getOperand(1)); 184 185 if (!Load0Offset || !Load1Offset) 186 return false; 187 188 Offset0 = Load0Offset->getZExtValue(); 189 Offset1 = Load1Offset->getZExtValue(); 190 return true; 191 } 192 193 // MUBUF and MTBUF can access the same addresses. 194 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) { 195 196 // MUBUF and MTBUF have vaddr at different indices. 197 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || 198 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || 199 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) 200 return false; 201 202 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 203 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 204 205 if (OffIdx0 == -1 || OffIdx1 == -1) 206 return false; 207 208 // getNamedOperandIdx returns the index for MachineInstrs. Since they 209 // include the output in the operand list, but SDNodes don't, we need to 210 // subtract the index by one. 211 OffIdx0 -= get(Opc0).NumDefs; 212 OffIdx1 -= get(Opc1).NumDefs; 213 214 SDValue Off0 = Load0->getOperand(OffIdx0); 215 SDValue Off1 = Load1->getOperand(OffIdx1); 216 217 // The offset might be a FrameIndexSDNode. 218 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1)) 219 return false; 220 221 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue(); 222 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue(); 223 return true; 224 } 225 226 return false; 227 } 228 229 static bool isStride64(unsigned Opc) { 230 switch (Opc) { 231 case AMDGPU::DS_READ2ST64_B32: 232 case AMDGPU::DS_READ2ST64_B64: 233 case AMDGPU::DS_WRITE2ST64_B32: 234 case AMDGPU::DS_WRITE2ST64_B64: 235 return true; 236 default: 237 return false; 238 } 239 } 240 241 bool SIInstrInfo::getMemOperandsWithOffsetWidth( 242 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps, 243 int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, 244 const TargetRegisterInfo *TRI) const { 245 if (!LdSt.mayLoadOrStore()) 246 return false; 247 248 unsigned Opc = LdSt.getOpcode(); 249 OffsetIsScalable = false; 250 const MachineOperand *BaseOp, *OffsetOp; 251 int DataOpIdx; 252 253 if (isDS(LdSt)) { 254 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr); 255 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset); 256 if (OffsetOp) { 257 // Normal, single offset LDS instruction. 258 if (!BaseOp) { 259 // DS_CONSUME/DS_APPEND use M0 for the base address. 260 // TODO: find the implicit use operand for M0 and use that as BaseOp? 261 return false; 262 } 263 BaseOps.push_back(BaseOp); 264 Offset = OffsetOp->getImm(); 265 // Get appropriate operand, and compute width accordingly. 266 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 267 if (DataOpIdx == -1) 268 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 269 Width = getOpSize(LdSt, DataOpIdx); 270 } else { 271 // The 2 offset instructions use offset0 and offset1 instead. We can treat 272 // these as a load with a single offset if the 2 offsets are consecutive. 273 // We will use this for some partially aligned loads. 274 const MachineOperand *Offset0Op = 275 getNamedOperand(LdSt, AMDGPU::OpName::offset0); 276 const MachineOperand *Offset1Op = 277 getNamedOperand(LdSt, AMDGPU::OpName::offset1); 278 279 unsigned Offset0 = Offset0Op->getImm(); 280 unsigned Offset1 = Offset1Op->getImm(); 281 if (Offset0 + 1 != Offset1) 282 return false; 283 284 // Each of these offsets is in element sized units, so we need to convert 285 // to bytes of the individual reads. 286 287 unsigned EltSize; 288 if (LdSt.mayLoad()) 289 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16; 290 else { 291 assert(LdSt.mayStore()); 292 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 293 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8; 294 } 295 296 if (isStride64(Opc)) 297 EltSize *= 64; 298 299 BaseOps.push_back(BaseOp); 300 Offset = EltSize * Offset0; 301 // Get appropriate operand(s), and compute width accordingly. 302 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 303 if (DataOpIdx == -1) { 304 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 305 Width = getOpSize(LdSt, DataOpIdx); 306 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1); 307 Width += getOpSize(LdSt, DataOpIdx); 308 } else { 309 Width = getOpSize(LdSt, DataOpIdx); 310 } 311 } 312 return true; 313 } 314 315 if (isMUBUF(LdSt) || isMTBUF(LdSt)) { 316 const MachineOperand *SOffset = getNamedOperand(LdSt, AMDGPU::OpName::soffset); 317 if (SOffset && SOffset->isReg()) { 318 // We can only handle this if it's a stack access, as any other resource 319 // would require reporting multiple base registers. 320 const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 321 if (AddrReg && !AddrReg->isFI()) 322 return false; 323 324 const MachineOperand *RSrc = getNamedOperand(LdSt, AMDGPU::OpName::srsrc); 325 const SIMachineFunctionInfo *MFI 326 = LdSt.getParent()->getParent()->getInfo<SIMachineFunctionInfo>(); 327 if (RSrc->getReg() != MFI->getScratchRSrcReg()) 328 return false; 329 330 const MachineOperand *OffsetImm = 331 getNamedOperand(LdSt, AMDGPU::OpName::offset); 332 BaseOps.push_back(RSrc); 333 BaseOps.push_back(SOffset); 334 Offset = OffsetImm->getImm(); 335 } else { 336 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::srsrc); 337 if (!BaseOp) // e.g. BUFFER_WBINVL1_VOL 338 return false; 339 BaseOps.push_back(BaseOp); 340 341 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 342 if (BaseOp) 343 BaseOps.push_back(BaseOp); 344 345 const MachineOperand *OffsetImm = 346 getNamedOperand(LdSt, AMDGPU::OpName::offset); 347 Offset = OffsetImm->getImm(); 348 if (SOffset) // soffset can be an inline immediate. 349 Offset += SOffset->getImm(); 350 } 351 // Get appropriate operand, and compute width accordingly. 352 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 353 if (DataOpIdx == -1) 354 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 355 Width = getOpSize(LdSt, DataOpIdx); 356 return true; 357 } 358 359 if (isMIMG(LdSt)) { 360 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); 361 BaseOps.push_back(&LdSt.getOperand(SRsrcIdx)); 362 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); 363 if (VAddr0Idx >= 0) { 364 // GFX10 possible NSA encoding. 365 for (int I = VAddr0Idx; I < SRsrcIdx; ++I) 366 BaseOps.push_back(&LdSt.getOperand(I)); 367 } else { 368 BaseOps.push_back(getNamedOperand(LdSt, AMDGPU::OpName::vaddr)); 369 } 370 Offset = 0; 371 // Get appropriate operand, and compute width accordingly. 372 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 373 Width = getOpSize(LdSt, DataOpIdx); 374 return true; 375 } 376 377 if (isSMRD(LdSt)) { 378 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::sbase); 379 if (!BaseOp) // e.g. S_MEMTIME 380 return false; 381 BaseOps.push_back(BaseOp); 382 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset); 383 Offset = OffsetOp ? OffsetOp->getImm() : 0; 384 // Get appropriate operand, and compute width accordingly. 385 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::sdst); 386 Width = getOpSize(LdSt, DataOpIdx); 387 return true; 388 } 389 390 if (isFLAT(LdSt)) { 391 // Instructions have either vaddr or saddr or both or none. 392 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 393 if (BaseOp) 394 BaseOps.push_back(BaseOp); 395 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::saddr); 396 if (BaseOp) 397 BaseOps.push_back(BaseOp); 398 Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm(); 399 // Get appropriate operand, and compute width accordingly. 400 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 401 if (DataOpIdx == -1) 402 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 403 Width = getOpSize(LdSt, DataOpIdx); 404 return true; 405 } 406 407 return false; 408 } 409 410 static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, 411 ArrayRef<const MachineOperand *> BaseOps1, 412 const MachineInstr &MI2, 413 ArrayRef<const MachineOperand *> BaseOps2) { 414 // Only examine the first "base" operand of each instruction, on the 415 // assumption that it represents the real base address of the memory access. 416 // Other operands are typically offsets or indices from this base address. 417 if (BaseOps1.front()->isIdenticalTo(*BaseOps2.front())) 418 return true; 419 420 if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand()) 421 return false; 422 423 auto MO1 = *MI1.memoperands_begin(); 424 auto MO2 = *MI2.memoperands_begin(); 425 if (MO1->getAddrSpace() != MO2->getAddrSpace()) 426 return false; 427 428 auto Base1 = MO1->getValue(); 429 auto Base2 = MO2->getValue(); 430 if (!Base1 || !Base2) 431 return false; 432 Base1 = getUnderlyingObject(Base1); 433 Base2 = getUnderlyingObject(Base2); 434 435 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2)) 436 return false; 437 438 return Base1 == Base2; 439 } 440 441 bool SIInstrInfo::shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1, 442 ArrayRef<const MachineOperand *> BaseOps2, 443 unsigned NumLoads, 444 unsigned NumBytes) const { 445 // If the mem ops (to be clustered) do not have the same base ptr, then they 446 // should not be clustered 447 if (!BaseOps1.empty() && !BaseOps2.empty()) { 448 const MachineInstr &FirstLdSt = *BaseOps1.front()->getParent(); 449 const MachineInstr &SecondLdSt = *BaseOps2.front()->getParent(); 450 if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOps1, SecondLdSt, BaseOps2)) 451 return false; 452 } else if (!BaseOps1.empty() || !BaseOps2.empty()) { 453 // If only one base op is empty, they do not have the same base ptr 454 return false; 455 } 456 457 // In order to avoid regester pressure, on an average, the number of DWORDS 458 // loaded together by all clustered mem ops should not exceed 8. This is an 459 // empirical value based on certain observations and performance related 460 // experiments. 461 // The good thing about this heuristic is - it avoids clustering of too many 462 // sub-word loads, and also avoids clustering of wide loads. Below is the 463 // brief summary of how the heuristic behaves for various `LoadSize`. 464 // (1) 1 <= LoadSize <= 4: cluster at max 8 mem ops 465 // (2) 5 <= LoadSize <= 8: cluster at max 4 mem ops 466 // (3) 9 <= LoadSize <= 12: cluster at max 2 mem ops 467 // (4) 13 <= LoadSize <= 16: cluster at max 2 mem ops 468 // (5) LoadSize >= 17: do not cluster 469 const unsigned LoadSize = NumBytes / NumLoads; 470 const unsigned NumDWORDs = ((LoadSize + 3) / 4) * NumLoads; 471 return NumDWORDs <= 8; 472 } 473 474 // FIXME: This behaves strangely. If, for example, you have 32 load + stores, 475 // the first 16 loads will be interleaved with the stores, and the next 16 will 476 // be clustered as expected. It should really split into 2 16 store batches. 477 // 478 // Loads are clustered until this returns false, rather than trying to schedule 479 // groups of stores. This also means we have to deal with saying different 480 // address space loads should be clustered, and ones which might cause bank 481 // conflicts. 482 // 483 // This might be deprecated so it might not be worth that much effort to fix. 484 bool SIInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, 485 int64_t Offset0, int64_t Offset1, 486 unsigned NumLoads) const { 487 assert(Offset1 > Offset0 && 488 "Second offset should be larger than first offset!"); 489 // If we have less than 16 loads in a row, and the offsets are within 64 490 // bytes, then schedule together. 491 492 // A cacheline is 64 bytes (for global memory). 493 return (NumLoads <= 16 && (Offset1 - Offset0) < 64); 494 } 495 496 static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB, 497 MachineBasicBlock::iterator MI, 498 const DebugLoc &DL, MCRegister DestReg, 499 MCRegister SrcReg, bool KillSrc, 500 const char *Msg = "illegal SGPR to VGPR copy") { 501 MachineFunction *MF = MBB.getParent(); 502 DiagnosticInfoUnsupported IllegalCopy(MF->getFunction(), Msg, DL, DS_Error); 503 LLVMContext &C = MF->getFunction().getContext(); 504 C.diagnose(IllegalCopy); 505 506 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg) 507 .addReg(SrcReg, getKillRegState(KillSrc)); 508 } 509 510 /// Handle copying from SGPR to AGPR, or from AGPR to AGPR. It is not possible 511 /// to directly copy, so an intermediate VGPR needs to be used. 512 static void indirectCopyToAGPR(const SIInstrInfo &TII, 513 MachineBasicBlock &MBB, 514 MachineBasicBlock::iterator MI, 515 const DebugLoc &DL, MCRegister DestReg, 516 MCRegister SrcReg, bool KillSrc, 517 RegScavenger &RS, 518 Register ImpDefSuperReg = Register(), 519 Register ImpUseSuperReg = Register()) { 520 const SIRegisterInfo &RI = TII.getRegisterInfo(); 521 522 assert(AMDGPU::SReg_32RegClass.contains(SrcReg) || 523 AMDGPU::AGPR_32RegClass.contains(SrcReg)); 524 525 // First try to find defining accvgpr_write to avoid temporary registers. 526 for (auto Def = MI, E = MBB.begin(); Def != E; ) { 527 --Def; 528 if (!Def->definesRegister(SrcReg, &RI)) 529 continue; 530 if (Def->getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64) 531 break; 532 533 MachineOperand &DefOp = Def->getOperand(1); 534 assert(DefOp.isReg() || DefOp.isImm()); 535 536 if (DefOp.isReg()) { 537 // Check that register source operand if not clobbered before MI. 538 // Immediate operands are always safe to propagate. 539 bool SafeToPropagate = true; 540 for (auto I = Def; I != MI && SafeToPropagate; ++I) 541 if (I->modifiesRegister(DefOp.getReg(), &RI)) 542 SafeToPropagate = false; 543 544 if (!SafeToPropagate) 545 break; 546 547 DefOp.setIsKill(false); 548 } 549 550 MachineInstrBuilder Builder = 551 BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg) 552 .add(DefOp); 553 if (ImpDefSuperReg) 554 Builder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit); 555 556 if (ImpUseSuperReg) { 557 Builder.addReg(ImpUseSuperReg, 558 getKillRegState(KillSrc) | RegState::Implicit); 559 } 560 561 return; 562 } 563 564 RS.enterBasicBlock(MBB); 565 RS.forward(MI); 566 567 // Ideally we want to have three registers for a long reg_sequence copy 568 // to hide 2 waitstates between v_mov_b32 and accvgpr_write. 569 unsigned MaxVGPRs = RI.getRegPressureLimit(&AMDGPU::VGPR_32RegClass, 570 *MBB.getParent()); 571 572 // Registers in the sequence are allocated contiguously so we can just 573 // use register number to pick one of three round-robin temps. 574 unsigned RegNo = DestReg % 3; 575 Register Tmp = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0); 576 if (!Tmp) 577 report_fatal_error("Cannot scavenge VGPR to copy to AGPR"); 578 RS.setRegUsed(Tmp); 579 // Only loop through if there are any free registers left, otherwise 580 // scavenger may report a fatal error without emergency spill slot 581 // or spill with the slot. 582 while (RegNo-- && RS.FindUnusedReg(&AMDGPU::VGPR_32RegClass)) { 583 Register Tmp2 = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0); 584 if (!Tmp2 || RI.getHWRegIndex(Tmp2) >= MaxVGPRs) 585 break; 586 Tmp = Tmp2; 587 RS.setRegUsed(Tmp); 588 } 589 590 // Insert copy to temporary VGPR. 591 unsigned TmpCopyOp = AMDGPU::V_MOV_B32_e32; 592 if (AMDGPU::AGPR_32RegClass.contains(SrcReg)) { 593 TmpCopyOp = AMDGPU::V_ACCVGPR_READ_B32_e64; 594 } else { 595 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 596 } 597 598 MachineInstrBuilder UseBuilder = BuildMI(MBB, MI, DL, TII.get(TmpCopyOp), Tmp) 599 .addReg(SrcReg, getKillRegState(KillSrc)); 600 if (ImpUseSuperReg) { 601 UseBuilder.addReg(ImpUseSuperReg, 602 getKillRegState(KillSrc) | RegState::Implicit); 603 } 604 605 MachineInstrBuilder DefBuilder 606 = BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg) 607 .addReg(Tmp, RegState::Kill); 608 609 if (ImpDefSuperReg) 610 DefBuilder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit); 611 } 612 613 static void expandSGPRCopy(const SIInstrInfo &TII, MachineBasicBlock &MBB, 614 MachineBasicBlock::iterator MI, const DebugLoc &DL, 615 MCRegister DestReg, MCRegister SrcReg, bool KillSrc, 616 const TargetRegisterClass *RC, bool Forward) { 617 const SIRegisterInfo &RI = TII.getRegisterInfo(); 618 ArrayRef<int16_t> BaseIndices = RI.getRegSplitParts(RC, 4); 619 MachineBasicBlock::iterator I = MI; 620 MachineInstr *FirstMI = nullptr, *LastMI = nullptr; 621 622 for (unsigned Idx = 0; Idx < BaseIndices.size(); ++Idx) { 623 int16_t SubIdx = BaseIndices[Idx]; 624 Register Reg = RI.getSubReg(DestReg, SubIdx); 625 unsigned Opcode = AMDGPU::S_MOV_B32; 626 627 // Is SGPR aligned? If so try to combine with next. 628 Register Src = RI.getSubReg(SrcReg, SubIdx); 629 bool AlignedDest = ((Reg - AMDGPU::SGPR0) % 2) == 0; 630 bool AlignedSrc = ((Src - AMDGPU::SGPR0) % 2) == 0; 631 if (AlignedDest && AlignedSrc && (Idx + 1 < BaseIndices.size())) { 632 // Can use SGPR64 copy 633 unsigned Channel = RI.getChannelFromSubReg(SubIdx); 634 SubIdx = RI.getSubRegFromChannel(Channel, 2); 635 Opcode = AMDGPU::S_MOV_B64; 636 Idx++; 637 } 638 639 LastMI = BuildMI(MBB, I, DL, TII.get(Opcode), RI.getSubReg(DestReg, SubIdx)) 640 .addReg(RI.getSubReg(SrcReg, SubIdx)) 641 .addReg(SrcReg, RegState::Implicit); 642 643 if (!FirstMI) 644 FirstMI = LastMI; 645 646 if (!Forward) 647 I--; 648 } 649 650 assert(FirstMI && LastMI); 651 if (!Forward) 652 std::swap(FirstMI, LastMI); 653 654 FirstMI->addOperand( 655 MachineOperand::CreateReg(DestReg, true /*IsDef*/, true /*IsImp*/)); 656 657 if (KillSrc) 658 LastMI->addRegisterKilled(SrcReg, &RI); 659 } 660 661 void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 662 MachineBasicBlock::iterator MI, 663 const DebugLoc &DL, MCRegister DestReg, 664 MCRegister SrcReg, bool KillSrc) const { 665 const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg); 666 667 // FIXME: This is hack to resolve copies between 16 bit and 32 bit 668 // registers until all patterns are fixed. 669 if (Fix16BitCopies && 670 ((RI.getRegSizeInBits(*RC) == 16) ^ 671 (RI.getRegSizeInBits(*RI.getPhysRegClass(SrcReg)) == 16))) { 672 MCRegister &RegToFix = (RI.getRegSizeInBits(*RC) == 16) ? DestReg : SrcReg; 673 MCRegister Super = RI.get32BitRegister(RegToFix); 674 assert(RI.getSubReg(Super, AMDGPU::lo16) == RegToFix); 675 RegToFix = Super; 676 677 if (DestReg == SrcReg) { 678 // Insert empty bundle since ExpandPostRA expects an instruction here. 679 BuildMI(MBB, MI, DL, get(AMDGPU::BUNDLE)); 680 return; 681 } 682 683 RC = RI.getPhysRegClass(DestReg); 684 } 685 686 if (RC == &AMDGPU::VGPR_32RegClass) { 687 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || 688 AMDGPU::SReg_32RegClass.contains(SrcReg) || 689 AMDGPU::AGPR_32RegClass.contains(SrcReg)); 690 unsigned Opc = AMDGPU::AGPR_32RegClass.contains(SrcReg) ? 691 AMDGPU::V_ACCVGPR_READ_B32_e64 : AMDGPU::V_MOV_B32_e32; 692 BuildMI(MBB, MI, DL, get(Opc), DestReg) 693 .addReg(SrcReg, getKillRegState(KillSrc)); 694 return; 695 } 696 697 if (RC == &AMDGPU::SReg_32_XM0RegClass || 698 RC == &AMDGPU::SReg_32RegClass) { 699 if (SrcReg == AMDGPU::SCC) { 700 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg) 701 .addImm(1) 702 .addImm(0); 703 return; 704 } 705 706 if (DestReg == AMDGPU::VCC_LO) { 707 if (AMDGPU::SReg_32RegClass.contains(SrcReg)) { 708 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), AMDGPU::VCC_LO) 709 .addReg(SrcReg, getKillRegState(KillSrc)); 710 } else { 711 // FIXME: Hack until VReg_1 removed. 712 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 713 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 714 .addImm(0) 715 .addReg(SrcReg, getKillRegState(KillSrc)); 716 } 717 718 return; 719 } 720 721 if (!AMDGPU::SReg_32RegClass.contains(SrcReg)) { 722 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 723 return; 724 } 725 726 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 727 .addReg(SrcReg, getKillRegState(KillSrc)); 728 return; 729 } 730 731 if (RC == &AMDGPU::SReg_64RegClass) { 732 if (SrcReg == AMDGPU::SCC) { 733 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B64), DestReg) 734 .addImm(1) 735 .addImm(0); 736 return; 737 } 738 739 if (DestReg == AMDGPU::VCC) { 740 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 741 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC) 742 .addReg(SrcReg, getKillRegState(KillSrc)); 743 } else { 744 // FIXME: Hack until VReg_1 removed. 745 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 746 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 747 .addImm(0) 748 .addReg(SrcReg, getKillRegState(KillSrc)); 749 } 750 751 return; 752 } 753 754 if (!AMDGPU::SReg_64RegClass.contains(SrcReg)) { 755 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 756 return; 757 } 758 759 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 760 .addReg(SrcReg, getKillRegState(KillSrc)); 761 return; 762 } 763 764 if (DestReg == AMDGPU::SCC) { 765 // Copying 64-bit or 32-bit sources to SCC barely makes sense, 766 // but SelectionDAG emits such copies for i1 sources. 767 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 768 // This copy can only be produced by patterns 769 // with explicit SCC, which are known to be enabled 770 // only for subtargets with S_CMP_LG_U64 present. 771 assert(ST.hasScalarCompareEq64()); 772 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U64)) 773 .addReg(SrcReg, getKillRegState(KillSrc)) 774 .addImm(0); 775 } else { 776 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 777 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32)) 778 .addReg(SrcReg, getKillRegState(KillSrc)) 779 .addImm(0); 780 } 781 782 return; 783 } 784 785 786 if (RC == &AMDGPU::AGPR_32RegClass) { 787 if (AMDGPU::VGPR_32RegClass.contains(SrcReg)) { 788 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg) 789 .addReg(SrcReg, getKillRegState(KillSrc)); 790 return; 791 } 792 793 // FIXME: Pass should maintain scavenger to avoid scan through the block on 794 // every AGPR spill. 795 RegScavenger RS; 796 indirectCopyToAGPR(*this, MBB, MI, DL, DestReg, SrcReg, KillSrc, RS); 797 return; 798 } 799 800 if (RI.getRegSizeInBits(*RC) == 16) { 801 assert(AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || 802 AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || 803 AMDGPU::SReg_LO16RegClass.contains(SrcReg) || 804 AMDGPU::AGPR_LO16RegClass.contains(SrcReg)); 805 806 bool IsSGPRDst = AMDGPU::SReg_LO16RegClass.contains(DestReg); 807 bool IsSGPRSrc = AMDGPU::SReg_LO16RegClass.contains(SrcReg); 808 bool IsAGPRDst = AMDGPU::AGPR_LO16RegClass.contains(DestReg); 809 bool IsAGPRSrc = AMDGPU::AGPR_LO16RegClass.contains(SrcReg); 810 bool DstLow = AMDGPU::VGPR_LO16RegClass.contains(DestReg) || 811 AMDGPU::SReg_LO16RegClass.contains(DestReg) || 812 AMDGPU::AGPR_LO16RegClass.contains(DestReg); 813 bool SrcLow = AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || 814 AMDGPU::SReg_LO16RegClass.contains(SrcReg) || 815 AMDGPU::AGPR_LO16RegClass.contains(SrcReg); 816 MCRegister NewDestReg = RI.get32BitRegister(DestReg); 817 MCRegister NewSrcReg = RI.get32BitRegister(SrcReg); 818 819 if (IsSGPRDst) { 820 if (!IsSGPRSrc) { 821 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 822 return; 823 } 824 825 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), NewDestReg) 826 .addReg(NewSrcReg, getKillRegState(KillSrc)); 827 return; 828 } 829 830 if (IsAGPRDst || IsAGPRSrc) { 831 if (!DstLow || !SrcLow) { 832 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc, 833 "Cannot use hi16 subreg with an AGPR!"); 834 } 835 836 copyPhysReg(MBB, MI, DL, NewDestReg, NewSrcReg, KillSrc); 837 return; 838 } 839 840 if (IsSGPRSrc && !ST.hasSDWAScalar()) { 841 if (!DstLow || !SrcLow) { 842 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc, 843 "Cannot use hi16 subreg on VI!"); 844 } 845 846 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), NewDestReg) 847 .addReg(NewSrcReg, getKillRegState(KillSrc)); 848 return; 849 } 850 851 auto MIB = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_sdwa), NewDestReg) 852 .addImm(0) // src0_modifiers 853 .addReg(NewSrcReg) 854 .addImm(0) // clamp 855 .addImm(DstLow ? AMDGPU::SDWA::SdwaSel::WORD_0 856 : AMDGPU::SDWA::SdwaSel::WORD_1) 857 .addImm(AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE) 858 .addImm(SrcLow ? AMDGPU::SDWA::SdwaSel::WORD_0 859 : AMDGPU::SDWA::SdwaSel::WORD_1) 860 .addReg(NewDestReg, RegState::Implicit | RegState::Undef); 861 // First implicit operand is $exec. 862 MIB->tieOperands(0, MIB->getNumOperands() - 1); 863 return; 864 } 865 866 const bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg); 867 if (RI.isSGPRClass(RC)) { 868 if (!RI.isSGPRClass(RI.getPhysRegClass(SrcReg))) { 869 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 870 return; 871 } 872 expandSGPRCopy(*this, MBB, MI, DL, DestReg, SrcReg, KillSrc, RC, Forward); 873 return; 874 } 875 876 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 877 if (RI.hasAGPRs(RC)) { 878 Opcode = RI.hasVGPRs(RI.getPhysRegClass(SrcReg)) ? 879 AMDGPU::V_ACCVGPR_WRITE_B32_e64 : AMDGPU::INSTRUCTION_LIST_END; 880 } else if (RI.hasVGPRs(RC) && RI.hasAGPRs(RI.getPhysRegClass(SrcReg))) { 881 Opcode = AMDGPU::V_ACCVGPR_READ_B32_e64; 882 } 883 884 // For the cases where we need an intermediate instruction/temporary register 885 // (destination is an AGPR), we need a scavenger. 886 // 887 // FIXME: The pass should maintain this for us so we don't have to re-scan the 888 // whole block for every handled copy. 889 std::unique_ptr<RegScavenger> RS; 890 if (Opcode == AMDGPU::INSTRUCTION_LIST_END) 891 RS.reset(new RegScavenger()); 892 893 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, 4); 894 895 // If there is an overlap, we can't kill the super-register on the last 896 // instruction, since it will also kill the components made live by this def. 897 const bool CanKillSuperReg = KillSrc && !RI.regsOverlap(SrcReg, DestReg); 898 899 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 900 unsigned SubIdx; 901 if (Forward) 902 SubIdx = SubIndices[Idx]; 903 else 904 SubIdx = SubIndices[SubIndices.size() - Idx - 1]; 905 906 bool UseKill = CanKillSuperReg && Idx == SubIndices.size() - 1; 907 908 if (Opcode == AMDGPU::INSTRUCTION_LIST_END) { 909 Register ImpDefSuper = Idx == 0 ? Register(DestReg) : Register(); 910 Register ImpUseSuper = SrcReg; 911 indirectCopyToAGPR(*this, MBB, MI, DL, RI.getSubReg(DestReg, SubIdx), 912 RI.getSubReg(SrcReg, SubIdx), UseKill, *RS, 913 ImpDefSuper, ImpUseSuper); 914 } else { 915 MachineInstrBuilder Builder = 916 BuildMI(MBB, MI, DL, get(Opcode), RI.getSubReg(DestReg, SubIdx)) 917 .addReg(RI.getSubReg(SrcReg, SubIdx)); 918 if (Idx == 0) 919 Builder.addReg(DestReg, RegState::Define | RegState::Implicit); 920 921 Builder.addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit); 922 } 923 } 924 } 925 926 int SIInstrInfo::commuteOpcode(unsigned Opcode) const { 927 int NewOpc; 928 929 // Try to map original to commuted opcode 930 NewOpc = AMDGPU::getCommuteRev(Opcode); 931 if (NewOpc != -1) 932 // Check if the commuted (REV) opcode exists on the target. 933 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 934 935 // Try to map commuted to original opcode 936 NewOpc = AMDGPU::getCommuteOrig(Opcode); 937 if (NewOpc != -1) 938 // Check if the original (non-REV) opcode exists on the target. 939 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 940 941 return Opcode; 942 } 943 944 void SIInstrInfo::materializeImmediate(MachineBasicBlock &MBB, 945 MachineBasicBlock::iterator MI, 946 const DebugLoc &DL, unsigned DestReg, 947 int64_t Value) const { 948 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 949 const TargetRegisterClass *RegClass = MRI.getRegClass(DestReg); 950 if (RegClass == &AMDGPU::SReg_32RegClass || 951 RegClass == &AMDGPU::SGPR_32RegClass || 952 RegClass == &AMDGPU::SReg_32_XM0RegClass || 953 RegClass == &AMDGPU::SReg_32_XM0_XEXECRegClass) { 954 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 955 .addImm(Value); 956 return; 957 } 958 959 if (RegClass == &AMDGPU::SReg_64RegClass || 960 RegClass == &AMDGPU::SGPR_64RegClass || 961 RegClass == &AMDGPU::SReg_64_XEXECRegClass) { 962 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 963 .addImm(Value); 964 return; 965 } 966 967 if (RegClass == &AMDGPU::VGPR_32RegClass) { 968 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) 969 .addImm(Value); 970 return; 971 } 972 if (RegClass == &AMDGPU::VReg_64RegClass) { 973 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), DestReg) 974 .addImm(Value); 975 return; 976 } 977 978 unsigned EltSize = 4; 979 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 980 if (RI.isSGPRClass(RegClass)) { 981 if (RI.getRegSizeInBits(*RegClass) > 32) { 982 Opcode = AMDGPU::S_MOV_B64; 983 EltSize = 8; 984 } else { 985 Opcode = AMDGPU::S_MOV_B32; 986 EltSize = 4; 987 } 988 } 989 990 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RegClass, EltSize); 991 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 992 int64_t IdxValue = Idx == 0 ? Value : 0; 993 994 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 995 get(Opcode), RI.getSubReg(DestReg, SubIndices[Idx])); 996 Builder.addImm(IdxValue); 997 } 998 } 999 1000 const TargetRegisterClass * 1001 SIInstrInfo::getPreferredSelectRegClass(unsigned Size) const { 1002 return &AMDGPU::VGPR_32RegClass; 1003 } 1004 1005 void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB, 1006 MachineBasicBlock::iterator I, 1007 const DebugLoc &DL, Register DstReg, 1008 ArrayRef<MachineOperand> Cond, 1009 Register TrueReg, 1010 Register FalseReg) const { 1011 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 1012 const TargetRegisterClass *BoolXExecRC = 1013 RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 1014 assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass && 1015 "Not a VGPR32 reg"); 1016 1017 if (Cond.size() == 1) { 1018 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1019 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1020 .add(Cond[0]); 1021 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1022 .addImm(0) 1023 .addReg(FalseReg) 1024 .addImm(0) 1025 .addReg(TrueReg) 1026 .addReg(SReg); 1027 } else if (Cond.size() == 2) { 1028 assert(Cond[0].isImm() && "Cond[0] is not an immediate"); 1029 switch (Cond[0].getImm()) { 1030 case SIInstrInfo::SCC_TRUE: { 1031 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1032 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1033 : AMDGPU::S_CSELECT_B64), SReg) 1034 .addImm(1) 1035 .addImm(0); 1036 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1037 .addImm(0) 1038 .addReg(FalseReg) 1039 .addImm(0) 1040 .addReg(TrueReg) 1041 .addReg(SReg); 1042 break; 1043 } 1044 case SIInstrInfo::SCC_FALSE: { 1045 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1046 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1047 : AMDGPU::S_CSELECT_B64), SReg) 1048 .addImm(0) 1049 .addImm(1); 1050 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1051 .addImm(0) 1052 .addReg(FalseReg) 1053 .addImm(0) 1054 .addReg(TrueReg) 1055 .addReg(SReg); 1056 break; 1057 } 1058 case SIInstrInfo::VCCNZ: { 1059 MachineOperand RegOp = Cond[1]; 1060 RegOp.setImplicit(false); 1061 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1062 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1063 .add(RegOp); 1064 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1065 .addImm(0) 1066 .addReg(FalseReg) 1067 .addImm(0) 1068 .addReg(TrueReg) 1069 .addReg(SReg); 1070 break; 1071 } 1072 case SIInstrInfo::VCCZ: { 1073 MachineOperand RegOp = Cond[1]; 1074 RegOp.setImplicit(false); 1075 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1076 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1077 .add(RegOp); 1078 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1079 .addImm(0) 1080 .addReg(TrueReg) 1081 .addImm(0) 1082 .addReg(FalseReg) 1083 .addReg(SReg); 1084 break; 1085 } 1086 case SIInstrInfo::EXECNZ: { 1087 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1088 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 1089 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1090 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 1091 .addImm(0); 1092 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1093 : AMDGPU::S_CSELECT_B64), SReg) 1094 .addImm(1) 1095 .addImm(0); 1096 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1097 .addImm(0) 1098 .addReg(FalseReg) 1099 .addImm(0) 1100 .addReg(TrueReg) 1101 .addReg(SReg); 1102 break; 1103 } 1104 case SIInstrInfo::EXECZ: { 1105 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1106 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 1107 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1108 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 1109 .addImm(0); 1110 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1111 : AMDGPU::S_CSELECT_B64), SReg) 1112 .addImm(0) 1113 .addImm(1); 1114 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1115 .addImm(0) 1116 .addReg(FalseReg) 1117 .addImm(0) 1118 .addReg(TrueReg) 1119 .addReg(SReg); 1120 llvm_unreachable("Unhandled branch predicate EXECZ"); 1121 break; 1122 } 1123 default: 1124 llvm_unreachable("invalid branch predicate"); 1125 } 1126 } else { 1127 llvm_unreachable("Can only handle Cond size 1 or 2"); 1128 } 1129 } 1130 1131 Register SIInstrInfo::insertEQ(MachineBasicBlock *MBB, 1132 MachineBasicBlock::iterator I, 1133 const DebugLoc &DL, 1134 Register SrcReg, int Value) const { 1135 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1136 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 1137 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg) 1138 .addImm(Value) 1139 .addReg(SrcReg); 1140 1141 return Reg; 1142 } 1143 1144 Register SIInstrInfo::insertNE(MachineBasicBlock *MBB, 1145 MachineBasicBlock::iterator I, 1146 const DebugLoc &DL, 1147 Register SrcReg, int Value) const { 1148 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1149 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 1150 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg) 1151 .addImm(Value) 1152 .addReg(SrcReg); 1153 1154 return Reg; 1155 } 1156 1157 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const { 1158 1159 if (RI.hasAGPRs(DstRC)) 1160 return AMDGPU::COPY; 1161 if (RI.getRegSizeInBits(*DstRC) == 32) { 1162 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 1163 } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) { 1164 return AMDGPU::S_MOV_B64; 1165 } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) { 1166 return AMDGPU::V_MOV_B64_PSEUDO; 1167 } 1168 return AMDGPU::COPY; 1169 } 1170 1171 const MCInstrDesc & 1172 SIInstrInfo::getIndirectGPRIDXPseudo(unsigned VecSize, 1173 bool IsIndirectSrc) const { 1174 if (IsIndirectSrc) { 1175 if (VecSize <= 32) // 4 bytes 1176 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V1); 1177 if (VecSize <= 64) // 8 bytes 1178 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V2); 1179 if (VecSize <= 96) // 12 bytes 1180 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V3); 1181 if (VecSize <= 128) // 16 bytes 1182 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V4); 1183 if (VecSize <= 160) // 20 bytes 1184 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V5); 1185 if (VecSize <= 256) // 32 bytes 1186 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V8); 1187 if (VecSize <= 512) // 64 bytes 1188 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V16); 1189 if (VecSize <= 1024) // 128 bytes 1190 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V32); 1191 1192 llvm_unreachable("unsupported size for IndirectRegReadGPRIDX pseudos"); 1193 } 1194 1195 if (VecSize <= 32) // 4 bytes 1196 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V1); 1197 if (VecSize <= 64) // 8 bytes 1198 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V2); 1199 if (VecSize <= 96) // 12 bytes 1200 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V3); 1201 if (VecSize <= 128) // 16 bytes 1202 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V4); 1203 if (VecSize <= 160) // 20 bytes 1204 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V5); 1205 if (VecSize <= 256) // 32 bytes 1206 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V8); 1207 if (VecSize <= 512) // 64 bytes 1208 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V16); 1209 if (VecSize <= 1024) // 128 bytes 1210 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V32); 1211 1212 llvm_unreachable("unsupported size for IndirectRegWriteGPRIDX pseudos"); 1213 } 1214 1215 static unsigned getIndirectVGPRWriteMovRelPseudoOpc(unsigned VecSize) { 1216 if (VecSize <= 32) // 4 bytes 1217 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V1; 1218 if (VecSize <= 64) // 8 bytes 1219 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V2; 1220 if (VecSize <= 96) // 12 bytes 1221 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V3; 1222 if (VecSize <= 128) // 16 bytes 1223 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V4; 1224 if (VecSize <= 160) // 20 bytes 1225 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V5; 1226 if (VecSize <= 256) // 32 bytes 1227 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V8; 1228 if (VecSize <= 512) // 64 bytes 1229 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V16; 1230 if (VecSize <= 1024) // 128 bytes 1231 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V32; 1232 1233 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1234 } 1235 1236 static unsigned getIndirectSGPRWriteMovRelPseudo32(unsigned VecSize) { 1237 if (VecSize <= 32) // 4 bytes 1238 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V1; 1239 if (VecSize <= 64) // 8 bytes 1240 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V2; 1241 if (VecSize <= 96) // 12 bytes 1242 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V3; 1243 if (VecSize <= 128) // 16 bytes 1244 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V4; 1245 if (VecSize <= 160) // 20 bytes 1246 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V5; 1247 if (VecSize <= 256) // 32 bytes 1248 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V8; 1249 if (VecSize <= 512) // 64 bytes 1250 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V16; 1251 if (VecSize <= 1024) // 128 bytes 1252 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V32; 1253 1254 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1255 } 1256 1257 static unsigned getIndirectSGPRWriteMovRelPseudo64(unsigned VecSize) { 1258 if (VecSize <= 64) // 8 bytes 1259 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V1; 1260 if (VecSize <= 128) // 16 bytes 1261 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V2; 1262 if (VecSize <= 256) // 32 bytes 1263 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V4; 1264 if (VecSize <= 512) // 64 bytes 1265 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V8; 1266 if (VecSize <= 1024) // 128 bytes 1267 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V16; 1268 1269 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1270 } 1271 1272 const MCInstrDesc & 1273 SIInstrInfo::getIndirectRegWriteMovRelPseudo(unsigned VecSize, unsigned EltSize, 1274 bool IsSGPR) const { 1275 if (IsSGPR) { 1276 switch (EltSize) { 1277 case 32: 1278 return get(getIndirectSGPRWriteMovRelPseudo32(VecSize)); 1279 case 64: 1280 return get(getIndirectSGPRWriteMovRelPseudo64(VecSize)); 1281 default: 1282 llvm_unreachable("invalid reg indexing elt size"); 1283 } 1284 } 1285 1286 assert(EltSize == 32 && "invalid reg indexing elt size"); 1287 return get(getIndirectVGPRWriteMovRelPseudoOpc(VecSize)); 1288 } 1289 1290 static unsigned getSGPRSpillSaveOpcode(unsigned Size) { 1291 switch (Size) { 1292 case 4: 1293 return AMDGPU::SI_SPILL_S32_SAVE; 1294 case 8: 1295 return AMDGPU::SI_SPILL_S64_SAVE; 1296 case 12: 1297 return AMDGPU::SI_SPILL_S96_SAVE; 1298 case 16: 1299 return AMDGPU::SI_SPILL_S128_SAVE; 1300 case 20: 1301 return AMDGPU::SI_SPILL_S160_SAVE; 1302 case 24: 1303 return AMDGPU::SI_SPILL_S192_SAVE; 1304 case 32: 1305 return AMDGPU::SI_SPILL_S256_SAVE; 1306 case 64: 1307 return AMDGPU::SI_SPILL_S512_SAVE; 1308 case 128: 1309 return AMDGPU::SI_SPILL_S1024_SAVE; 1310 default: 1311 llvm_unreachable("unknown register size"); 1312 } 1313 } 1314 1315 static unsigned getVGPRSpillSaveOpcode(unsigned Size) { 1316 switch (Size) { 1317 case 4: 1318 return AMDGPU::SI_SPILL_V32_SAVE; 1319 case 8: 1320 return AMDGPU::SI_SPILL_V64_SAVE; 1321 case 12: 1322 return AMDGPU::SI_SPILL_V96_SAVE; 1323 case 16: 1324 return AMDGPU::SI_SPILL_V128_SAVE; 1325 case 20: 1326 return AMDGPU::SI_SPILL_V160_SAVE; 1327 case 24: 1328 return AMDGPU::SI_SPILL_V192_SAVE; 1329 case 32: 1330 return AMDGPU::SI_SPILL_V256_SAVE; 1331 case 64: 1332 return AMDGPU::SI_SPILL_V512_SAVE; 1333 case 128: 1334 return AMDGPU::SI_SPILL_V1024_SAVE; 1335 default: 1336 llvm_unreachable("unknown register size"); 1337 } 1338 } 1339 1340 static unsigned getAGPRSpillSaveOpcode(unsigned Size) { 1341 switch (Size) { 1342 case 4: 1343 return AMDGPU::SI_SPILL_A32_SAVE; 1344 case 8: 1345 return AMDGPU::SI_SPILL_A64_SAVE; 1346 case 12: 1347 return AMDGPU::SI_SPILL_A96_SAVE; 1348 case 16: 1349 return AMDGPU::SI_SPILL_A128_SAVE; 1350 case 20: 1351 return AMDGPU::SI_SPILL_A160_SAVE; 1352 case 24: 1353 return AMDGPU::SI_SPILL_A192_SAVE; 1354 case 32: 1355 return AMDGPU::SI_SPILL_A256_SAVE; 1356 case 64: 1357 return AMDGPU::SI_SPILL_A512_SAVE; 1358 case 128: 1359 return AMDGPU::SI_SPILL_A1024_SAVE; 1360 default: 1361 llvm_unreachable("unknown register size"); 1362 } 1363 } 1364 1365 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 1366 MachineBasicBlock::iterator MI, 1367 Register SrcReg, bool isKill, 1368 int FrameIndex, 1369 const TargetRegisterClass *RC, 1370 const TargetRegisterInfo *TRI) const { 1371 MachineFunction *MF = MBB.getParent(); 1372 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1373 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 1374 const DebugLoc &DL = MBB.findDebugLoc(MI); 1375 1376 MachinePointerInfo PtrInfo 1377 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 1378 MachineMemOperand *MMO = MF->getMachineMemOperand( 1379 PtrInfo, MachineMemOperand::MOStore, FrameInfo.getObjectSize(FrameIndex), 1380 FrameInfo.getObjectAlign(FrameIndex)); 1381 unsigned SpillSize = TRI->getSpillSize(*RC); 1382 1383 if (RI.isSGPRClass(RC)) { 1384 MFI->setHasSpilledSGPRs(); 1385 assert(SrcReg != AMDGPU::M0 && "m0 should not be spilled"); 1386 assert(SrcReg != AMDGPU::EXEC_LO && SrcReg != AMDGPU::EXEC_HI && 1387 SrcReg != AMDGPU::EXEC && "exec should not be spilled"); 1388 1389 // We are only allowed to create one new instruction when spilling 1390 // registers, so we need to use pseudo instruction for spilling SGPRs. 1391 const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(SpillSize)); 1392 1393 // The SGPR spill/restore instructions only work on number sgprs, so we need 1394 // to make sure we are using the correct register class. 1395 if (SrcReg.isVirtual() && SpillSize == 4) { 1396 MachineRegisterInfo &MRI = MF->getRegInfo(); 1397 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 1398 } 1399 1400 BuildMI(MBB, MI, DL, OpDesc) 1401 .addReg(SrcReg, getKillRegState(isKill)) // data 1402 .addFrameIndex(FrameIndex) // addr 1403 .addMemOperand(MMO) 1404 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); 1405 1406 if (RI.spillSGPRToVGPR()) 1407 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill); 1408 return; 1409 } 1410 1411 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillSaveOpcode(SpillSize) 1412 : getVGPRSpillSaveOpcode(SpillSize); 1413 MFI->setHasSpilledVGPRs(); 1414 1415 BuildMI(MBB, MI, DL, get(Opcode)) 1416 .addReg(SrcReg, getKillRegState(isKill)) // data 1417 .addFrameIndex(FrameIndex) // addr 1418 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset 1419 .addImm(0) // offset 1420 .addMemOperand(MMO); 1421 } 1422 1423 static unsigned getSGPRSpillRestoreOpcode(unsigned Size) { 1424 switch (Size) { 1425 case 4: 1426 return AMDGPU::SI_SPILL_S32_RESTORE; 1427 case 8: 1428 return AMDGPU::SI_SPILL_S64_RESTORE; 1429 case 12: 1430 return AMDGPU::SI_SPILL_S96_RESTORE; 1431 case 16: 1432 return AMDGPU::SI_SPILL_S128_RESTORE; 1433 case 20: 1434 return AMDGPU::SI_SPILL_S160_RESTORE; 1435 case 24: 1436 return AMDGPU::SI_SPILL_S192_RESTORE; 1437 case 32: 1438 return AMDGPU::SI_SPILL_S256_RESTORE; 1439 case 64: 1440 return AMDGPU::SI_SPILL_S512_RESTORE; 1441 case 128: 1442 return AMDGPU::SI_SPILL_S1024_RESTORE; 1443 default: 1444 llvm_unreachable("unknown register size"); 1445 } 1446 } 1447 1448 static unsigned getVGPRSpillRestoreOpcode(unsigned Size) { 1449 switch (Size) { 1450 case 4: 1451 return AMDGPU::SI_SPILL_V32_RESTORE; 1452 case 8: 1453 return AMDGPU::SI_SPILL_V64_RESTORE; 1454 case 12: 1455 return AMDGPU::SI_SPILL_V96_RESTORE; 1456 case 16: 1457 return AMDGPU::SI_SPILL_V128_RESTORE; 1458 case 20: 1459 return AMDGPU::SI_SPILL_V160_RESTORE; 1460 case 24: 1461 return AMDGPU::SI_SPILL_V192_RESTORE; 1462 case 32: 1463 return AMDGPU::SI_SPILL_V256_RESTORE; 1464 case 64: 1465 return AMDGPU::SI_SPILL_V512_RESTORE; 1466 case 128: 1467 return AMDGPU::SI_SPILL_V1024_RESTORE; 1468 default: 1469 llvm_unreachable("unknown register size"); 1470 } 1471 } 1472 1473 static unsigned getAGPRSpillRestoreOpcode(unsigned Size) { 1474 switch (Size) { 1475 case 4: 1476 return AMDGPU::SI_SPILL_A32_RESTORE; 1477 case 8: 1478 return AMDGPU::SI_SPILL_A64_RESTORE; 1479 case 12: 1480 return AMDGPU::SI_SPILL_A96_RESTORE; 1481 case 16: 1482 return AMDGPU::SI_SPILL_A128_RESTORE; 1483 case 20: 1484 return AMDGPU::SI_SPILL_A160_RESTORE; 1485 case 24: 1486 return AMDGPU::SI_SPILL_A192_RESTORE; 1487 case 32: 1488 return AMDGPU::SI_SPILL_A256_RESTORE; 1489 case 64: 1490 return AMDGPU::SI_SPILL_A512_RESTORE; 1491 case 128: 1492 return AMDGPU::SI_SPILL_A1024_RESTORE; 1493 default: 1494 llvm_unreachable("unknown register size"); 1495 } 1496 } 1497 1498 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 1499 MachineBasicBlock::iterator MI, 1500 Register DestReg, int FrameIndex, 1501 const TargetRegisterClass *RC, 1502 const TargetRegisterInfo *TRI) const { 1503 MachineFunction *MF = MBB.getParent(); 1504 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1505 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 1506 const DebugLoc &DL = MBB.findDebugLoc(MI); 1507 unsigned SpillSize = TRI->getSpillSize(*RC); 1508 1509 MachinePointerInfo PtrInfo 1510 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 1511 1512 MachineMemOperand *MMO = MF->getMachineMemOperand( 1513 PtrInfo, MachineMemOperand::MOLoad, FrameInfo.getObjectSize(FrameIndex), 1514 FrameInfo.getObjectAlign(FrameIndex)); 1515 1516 if (RI.isSGPRClass(RC)) { 1517 MFI->setHasSpilledSGPRs(); 1518 assert(DestReg != AMDGPU::M0 && "m0 should not be reloaded into"); 1519 assert(DestReg != AMDGPU::EXEC_LO && DestReg != AMDGPU::EXEC_HI && 1520 DestReg != AMDGPU::EXEC && "exec should not be spilled"); 1521 1522 // FIXME: Maybe this should not include a memoperand because it will be 1523 // lowered to non-memory instructions. 1524 const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize)); 1525 if (DestReg.isVirtual() && SpillSize == 4) { 1526 MachineRegisterInfo &MRI = MF->getRegInfo(); 1527 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 1528 } 1529 1530 if (RI.spillSGPRToVGPR()) 1531 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill); 1532 BuildMI(MBB, MI, DL, OpDesc, DestReg) 1533 .addFrameIndex(FrameIndex) // addr 1534 .addMemOperand(MMO) 1535 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); 1536 1537 return; 1538 } 1539 1540 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillRestoreOpcode(SpillSize) 1541 : getVGPRSpillRestoreOpcode(SpillSize); 1542 BuildMI(MBB, MI, DL, get(Opcode), DestReg) 1543 .addFrameIndex(FrameIndex) // vaddr 1544 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset 1545 .addImm(0) // offset 1546 .addMemOperand(MMO); 1547 } 1548 1549 void SIInstrInfo::insertNoop(MachineBasicBlock &MBB, 1550 MachineBasicBlock::iterator MI) const { 1551 insertNoops(MBB, MI, 1); 1552 } 1553 1554 void SIInstrInfo::insertNoops(MachineBasicBlock &MBB, 1555 MachineBasicBlock::iterator MI, 1556 unsigned Quantity) const { 1557 DebugLoc DL = MBB.findDebugLoc(MI); 1558 while (Quantity > 0) { 1559 unsigned Arg = std::min(Quantity, 8u); 1560 Quantity -= Arg; 1561 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP)).addImm(Arg - 1); 1562 } 1563 } 1564 1565 void SIInstrInfo::insertReturn(MachineBasicBlock &MBB) const { 1566 auto MF = MBB.getParent(); 1567 SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 1568 1569 assert(Info->isEntryFunction()); 1570 1571 if (MBB.succ_empty()) { 1572 bool HasNoTerminator = MBB.getFirstTerminator() == MBB.end(); 1573 if (HasNoTerminator) { 1574 if (Info->returnsVoid()) { 1575 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::S_ENDPGM)).addImm(0); 1576 } else { 1577 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::SI_RETURN_TO_EPILOG)); 1578 } 1579 } 1580 } 1581 } 1582 1583 unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) { 1584 switch (MI.getOpcode()) { 1585 default: return 1; // FIXME: Do wait states equal cycles? 1586 1587 case AMDGPU::S_NOP: 1588 return MI.getOperand(0).getImm() + 1; 1589 } 1590 } 1591 1592 bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 1593 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1594 MachineBasicBlock &MBB = *MI.getParent(); 1595 DebugLoc DL = MBB.findDebugLoc(MI); 1596 switch (MI.getOpcode()) { 1597 default: return TargetInstrInfo::expandPostRAPseudo(MI); 1598 case AMDGPU::S_MOV_B64_term: 1599 // This is only a terminator to get the correct spill code placement during 1600 // register allocation. 1601 MI.setDesc(get(AMDGPU::S_MOV_B64)); 1602 break; 1603 1604 case AMDGPU::S_MOV_B32_term: 1605 // This is only a terminator to get the correct spill code placement during 1606 // register allocation. 1607 MI.setDesc(get(AMDGPU::S_MOV_B32)); 1608 break; 1609 1610 case AMDGPU::S_XOR_B64_term: 1611 // This is only a terminator to get the correct spill code placement during 1612 // register allocation. 1613 MI.setDesc(get(AMDGPU::S_XOR_B64)); 1614 break; 1615 1616 case AMDGPU::S_XOR_B32_term: 1617 // This is only a terminator to get the correct spill code placement during 1618 // register allocation. 1619 MI.setDesc(get(AMDGPU::S_XOR_B32)); 1620 break; 1621 case AMDGPU::S_OR_B64_term: 1622 // This is only a terminator to get the correct spill code placement during 1623 // register allocation. 1624 MI.setDesc(get(AMDGPU::S_OR_B64)); 1625 break; 1626 case AMDGPU::S_OR_B32_term: 1627 // This is only a terminator to get the correct spill code placement during 1628 // register allocation. 1629 MI.setDesc(get(AMDGPU::S_OR_B32)); 1630 break; 1631 1632 case AMDGPU::S_ANDN2_B64_term: 1633 // This is only a terminator to get the correct spill code placement during 1634 // register allocation. 1635 MI.setDesc(get(AMDGPU::S_ANDN2_B64)); 1636 break; 1637 1638 case AMDGPU::S_ANDN2_B32_term: 1639 // This is only a terminator to get the correct spill code placement during 1640 // register allocation. 1641 MI.setDesc(get(AMDGPU::S_ANDN2_B32)); 1642 break; 1643 1644 case AMDGPU::V_MOV_B64_PSEUDO: { 1645 Register Dst = MI.getOperand(0).getReg(); 1646 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 1647 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 1648 1649 const MachineOperand &SrcOp = MI.getOperand(1); 1650 // FIXME: Will this work for 64-bit floating point immediates? 1651 assert(!SrcOp.isFPImm()); 1652 if (SrcOp.isImm()) { 1653 APInt Imm(64, SrcOp.getImm()); 1654 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1655 .addImm(Imm.getLoBits(32).getZExtValue()) 1656 .addReg(Dst, RegState::Implicit | RegState::Define); 1657 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1658 .addImm(Imm.getHiBits(32).getZExtValue()) 1659 .addReg(Dst, RegState::Implicit | RegState::Define); 1660 } else { 1661 assert(SrcOp.isReg()); 1662 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1663 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) 1664 .addReg(Dst, RegState::Implicit | RegState::Define); 1665 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1666 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) 1667 .addReg(Dst, RegState::Implicit | RegState::Define); 1668 } 1669 MI.eraseFromParent(); 1670 break; 1671 } 1672 case AMDGPU::V_MOV_B64_DPP_PSEUDO: { 1673 expandMovDPP64(MI); 1674 break; 1675 } 1676 case AMDGPU::V_SET_INACTIVE_B32: { 1677 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; 1678 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1679 auto FirstNot = BuildMI(MBB, MI, DL, get(NotOpc), Exec).addReg(Exec); 1680 FirstNot->addRegisterDead(AMDGPU::SCC, TRI); // SCC is overwritten 1681 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), MI.getOperand(0).getReg()) 1682 .add(MI.getOperand(2)); 1683 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1684 .addReg(Exec); 1685 MI.eraseFromParent(); 1686 break; 1687 } 1688 case AMDGPU::V_SET_INACTIVE_B64: { 1689 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; 1690 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1691 auto FirstNot = BuildMI(MBB, MI, DL, get(NotOpc), Exec).addReg(Exec); 1692 FirstNot->addRegisterDead(AMDGPU::SCC, TRI); // SCC is overwritten 1693 MachineInstr *Copy = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), 1694 MI.getOperand(0).getReg()) 1695 .add(MI.getOperand(2)); 1696 expandPostRAPseudo(*Copy); 1697 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1698 .addReg(Exec); 1699 MI.eraseFromParent(); 1700 break; 1701 } 1702 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V1: 1703 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V2: 1704 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V3: 1705 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V4: 1706 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V5: 1707 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V8: 1708 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V16: 1709 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V32: 1710 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V1: 1711 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V2: 1712 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V3: 1713 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V4: 1714 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V5: 1715 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V8: 1716 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V16: 1717 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V32: 1718 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V1: 1719 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V2: 1720 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V4: 1721 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V8: 1722 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V16: { 1723 const TargetRegisterClass *EltRC = getOpRegClass(MI, 2); 1724 1725 unsigned Opc; 1726 if (RI.hasVGPRs(EltRC)) { 1727 Opc = AMDGPU::V_MOVRELD_B32_e32; 1728 } else { 1729 Opc = RI.getRegSizeInBits(*EltRC) == 64 ? AMDGPU::S_MOVRELD_B64 1730 : AMDGPU::S_MOVRELD_B32; 1731 } 1732 1733 const MCInstrDesc &OpDesc = get(Opc); 1734 Register VecReg = MI.getOperand(0).getReg(); 1735 bool IsUndef = MI.getOperand(1).isUndef(); 1736 unsigned SubReg = MI.getOperand(3).getImm(); 1737 assert(VecReg == MI.getOperand(1).getReg()); 1738 1739 MachineInstrBuilder MIB = 1740 BuildMI(MBB, MI, DL, OpDesc) 1741 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 1742 .add(MI.getOperand(2)) 1743 .addReg(VecReg, RegState::ImplicitDefine) 1744 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0)); 1745 1746 const int ImpDefIdx = 1747 OpDesc.getNumOperands() + OpDesc.getNumImplicitUses(); 1748 const int ImpUseIdx = ImpDefIdx + 1; 1749 MIB->tieOperands(ImpDefIdx, ImpUseIdx); 1750 MI.eraseFromParent(); 1751 break; 1752 } 1753 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V1: 1754 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V2: 1755 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V3: 1756 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V4: 1757 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V5: 1758 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V8: 1759 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V16: 1760 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V32: { 1761 assert(ST.useVGPRIndexMode()); 1762 Register VecReg = MI.getOperand(0).getReg(); 1763 bool IsUndef = MI.getOperand(1).isUndef(); 1764 Register Idx = MI.getOperand(3).getReg(); 1765 Register SubReg = MI.getOperand(4).getImm(); 1766 1767 MachineInstr *SetOn = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_ON)) 1768 .addReg(Idx) 1769 .addImm(AMDGPU::VGPRIndexMode::DST_ENABLE); 1770 SetOn->getOperand(3).setIsUndef(); 1771 1772 const MCInstrDesc &OpDesc = get(AMDGPU::V_MOV_B32_indirect); 1773 MachineInstrBuilder MIB = 1774 BuildMI(MBB, MI, DL, OpDesc) 1775 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 1776 .add(MI.getOperand(2)) 1777 .addReg(VecReg, RegState::ImplicitDefine) 1778 .addReg(VecReg, 1779 RegState::Implicit | (IsUndef ? RegState::Undef : 0)); 1780 1781 const int ImpDefIdx = OpDesc.getNumOperands() + OpDesc.getNumImplicitUses(); 1782 const int ImpUseIdx = ImpDefIdx + 1; 1783 MIB->tieOperands(ImpDefIdx, ImpUseIdx); 1784 1785 MachineInstr *SetOff = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_OFF)); 1786 1787 finalizeBundle(MBB, SetOn->getIterator(), std::next(SetOff->getIterator())); 1788 1789 MI.eraseFromParent(); 1790 break; 1791 } 1792 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V1: 1793 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V2: 1794 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V3: 1795 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V4: 1796 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V5: 1797 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V8: 1798 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V16: 1799 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V32: { 1800 assert(ST.useVGPRIndexMode()); 1801 Register Dst = MI.getOperand(0).getReg(); 1802 Register VecReg = MI.getOperand(1).getReg(); 1803 bool IsUndef = MI.getOperand(1).isUndef(); 1804 Register Idx = MI.getOperand(2).getReg(); 1805 Register SubReg = MI.getOperand(3).getImm(); 1806 1807 MachineInstr *SetOn = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_ON)) 1808 .addReg(Idx) 1809 .addImm(AMDGPU::VGPRIndexMode::SRC0_ENABLE); 1810 SetOn->getOperand(3).setIsUndef(); 1811 1812 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32)) 1813 .addDef(Dst) 1814 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 1815 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0)) 1816 .addReg(AMDGPU::M0, RegState::Implicit); 1817 1818 MachineInstr *SetOff = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_OFF)); 1819 1820 finalizeBundle(MBB, SetOn->getIterator(), std::next(SetOff->getIterator())); 1821 1822 MI.eraseFromParent(); 1823 break; 1824 } 1825 case AMDGPU::SI_PC_ADD_REL_OFFSET: { 1826 MachineFunction &MF = *MBB.getParent(); 1827 Register Reg = MI.getOperand(0).getReg(); 1828 Register RegLo = RI.getSubReg(Reg, AMDGPU::sub0); 1829 Register RegHi = RI.getSubReg(Reg, AMDGPU::sub1); 1830 1831 // Create a bundle so these instructions won't be re-ordered by the 1832 // post-RA scheduler. 1833 MIBundleBuilder Bundler(MBB, MI); 1834 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg)); 1835 1836 // Add 32-bit offset from this instruction to the start of the 1837 // constant data. 1838 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo) 1839 .addReg(RegLo) 1840 .add(MI.getOperand(1))); 1841 1842 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi) 1843 .addReg(RegHi); 1844 MIB.add(MI.getOperand(2)); 1845 1846 Bundler.append(MIB); 1847 finalizeBundle(MBB, Bundler.begin()); 1848 1849 MI.eraseFromParent(); 1850 break; 1851 } 1852 case AMDGPU::ENTER_WWM: { 1853 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 1854 // WWM is entered. 1855 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1856 : AMDGPU::S_OR_SAVEEXEC_B64)); 1857 break; 1858 } 1859 case AMDGPU::EXIT_WWM: { 1860 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 1861 // WWM is exited. 1862 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64)); 1863 break; 1864 } 1865 } 1866 return true; 1867 } 1868 1869 std::pair<MachineInstr*, MachineInstr*> 1870 SIInstrInfo::expandMovDPP64(MachineInstr &MI) const { 1871 assert (MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO); 1872 1873 MachineBasicBlock &MBB = *MI.getParent(); 1874 DebugLoc DL = MBB.findDebugLoc(MI); 1875 MachineFunction *MF = MBB.getParent(); 1876 MachineRegisterInfo &MRI = MF->getRegInfo(); 1877 Register Dst = MI.getOperand(0).getReg(); 1878 unsigned Part = 0; 1879 MachineInstr *Split[2]; 1880 1881 1882 for (auto Sub : { AMDGPU::sub0, AMDGPU::sub1 }) { 1883 auto MovDPP = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_dpp)); 1884 if (Dst.isPhysical()) { 1885 MovDPP.addDef(RI.getSubReg(Dst, Sub)); 1886 } else { 1887 assert(MRI.isSSA()); 1888 auto Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1889 MovDPP.addDef(Tmp); 1890 } 1891 1892 for (unsigned I = 1; I <= 2; ++I) { // old and src operands. 1893 const MachineOperand &SrcOp = MI.getOperand(I); 1894 assert(!SrcOp.isFPImm()); 1895 if (SrcOp.isImm()) { 1896 APInt Imm(64, SrcOp.getImm()); 1897 Imm.ashrInPlace(Part * 32); 1898 MovDPP.addImm(Imm.getLoBits(32).getZExtValue()); 1899 } else { 1900 assert(SrcOp.isReg()); 1901 Register Src = SrcOp.getReg(); 1902 if (Src.isPhysical()) 1903 MovDPP.addReg(RI.getSubReg(Src, Sub)); 1904 else 1905 MovDPP.addReg(Src, SrcOp.isUndef() ? RegState::Undef : 0, Sub); 1906 } 1907 } 1908 1909 for (unsigned I = 3; I < MI.getNumExplicitOperands(); ++I) 1910 MovDPP.addImm(MI.getOperand(I).getImm()); 1911 1912 Split[Part] = MovDPP; 1913 ++Part; 1914 } 1915 1916 if (Dst.isVirtual()) 1917 BuildMI(MBB, MI, DL, get(AMDGPU::REG_SEQUENCE), Dst) 1918 .addReg(Split[0]->getOperand(0).getReg()) 1919 .addImm(AMDGPU::sub0) 1920 .addReg(Split[1]->getOperand(0).getReg()) 1921 .addImm(AMDGPU::sub1); 1922 1923 MI.eraseFromParent(); 1924 return std::make_pair(Split[0], Split[1]); 1925 } 1926 1927 bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI, 1928 MachineOperand &Src0, 1929 unsigned Src0OpName, 1930 MachineOperand &Src1, 1931 unsigned Src1OpName) const { 1932 MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName); 1933 if (!Src0Mods) 1934 return false; 1935 1936 MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName); 1937 assert(Src1Mods && 1938 "All commutable instructions have both src0 and src1 modifiers"); 1939 1940 int Src0ModsVal = Src0Mods->getImm(); 1941 int Src1ModsVal = Src1Mods->getImm(); 1942 1943 Src1Mods->setImm(Src0ModsVal); 1944 Src0Mods->setImm(Src1ModsVal); 1945 return true; 1946 } 1947 1948 static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI, 1949 MachineOperand &RegOp, 1950 MachineOperand &NonRegOp) { 1951 Register Reg = RegOp.getReg(); 1952 unsigned SubReg = RegOp.getSubReg(); 1953 bool IsKill = RegOp.isKill(); 1954 bool IsDead = RegOp.isDead(); 1955 bool IsUndef = RegOp.isUndef(); 1956 bool IsDebug = RegOp.isDebug(); 1957 1958 if (NonRegOp.isImm()) 1959 RegOp.ChangeToImmediate(NonRegOp.getImm()); 1960 else if (NonRegOp.isFI()) 1961 RegOp.ChangeToFrameIndex(NonRegOp.getIndex()); 1962 else if (NonRegOp.isGlobal()) { 1963 RegOp.ChangeToGA(NonRegOp.getGlobal(), NonRegOp.getOffset(), 1964 NonRegOp.getTargetFlags()); 1965 } else 1966 return nullptr; 1967 1968 // Make sure we don't reinterpret a subreg index in the target flags. 1969 RegOp.setTargetFlags(NonRegOp.getTargetFlags()); 1970 1971 NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug); 1972 NonRegOp.setSubReg(SubReg); 1973 1974 return &MI; 1975 } 1976 1977 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, 1978 unsigned Src0Idx, 1979 unsigned Src1Idx) const { 1980 assert(!NewMI && "this should never be used"); 1981 1982 unsigned Opc = MI.getOpcode(); 1983 int CommutedOpcode = commuteOpcode(Opc); 1984 if (CommutedOpcode == -1) 1985 return nullptr; 1986 1987 assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == 1988 static_cast<int>(Src0Idx) && 1989 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == 1990 static_cast<int>(Src1Idx) && 1991 "inconsistency with findCommutedOpIndices"); 1992 1993 MachineOperand &Src0 = MI.getOperand(Src0Idx); 1994 MachineOperand &Src1 = MI.getOperand(Src1Idx); 1995 1996 MachineInstr *CommutedMI = nullptr; 1997 if (Src0.isReg() && Src1.isReg()) { 1998 if (isOperandLegal(MI, Src1Idx, &Src0)) { 1999 // Be sure to copy the source modifiers to the right place. 2000 CommutedMI 2001 = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx); 2002 } 2003 2004 } else if (Src0.isReg() && !Src1.isReg()) { 2005 // src0 should always be able to support any operand type, so no need to 2006 // check operand legality. 2007 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1); 2008 } else if (!Src0.isReg() && Src1.isReg()) { 2009 if (isOperandLegal(MI, Src1Idx, &Src0)) 2010 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0); 2011 } else { 2012 // FIXME: Found two non registers to commute. This does happen. 2013 return nullptr; 2014 } 2015 2016 if (CommutedMI) { 2017 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers, 2018 Src1, AMDGPU::OpName::src1_modifiers); 2019 2020 CommutedMI->setDesc(get(CommutedOpcode)); 2021 } 2022 2023 return CommutedMI; 2024 } 2025 2026 // This needs to be implemented because the source modifiers may be inserted 2027 // between the true commutable operands, and the base 2028 // TargetInstrInfo::commuteInstruction uses it. 2029 bool SIInstrInfo::findCommutedOpIndices(const MachineInstr &MI, 2030 unsigned &SrcOpIdx0, 2031 unsigned &SrcOpIdx1) const { 2032 return findCommutedOpIndices(MI.getDesc(), SrcOpIdx0, SrcOpIdx1); 2033 } 2034 2035 bool SIInstrInfo::findCommutedOpIndices(MCInstrDesc Desc, unsigned &SrcOpIdx0, 2036 unsigned &SrcOpIdx1) const { 2037 if (!Desc.isCommutable()) 2038 return false; 2039 2040 unsigned Opc = Desc.getOpcode(); 2041 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 2042 if (Src0Idx == -1) 2043 return false; 2044 2045 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 2046 if (Src1Idx == -1) 2047 return false; 2048 2049 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx); 2050 } 2051 2052 bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp, 2053 int64_t BrOffset) const { 2054 // BranchRelaxation should never have to check s_setpc_b64 because its dest 2055 // block is unanalyzable. 2056 assert(BranchOp != AMDGPU::S_SETPC_B64); 2057 2058 // Convert to dwords. 2059 BrOffset /= 4; 2060 2061 // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is 2062 // from the next instruction. 2063 BrOffset -= 1; 2064 2065 return isIntN(BranchOffsetBits, BrOffset); 2066 } 2067 2068 MachineBasicBlock *SIInstrInfo::getBranchDestBlock( 2069 const MachineInstr &MI) const { 2070 if (MI.getOpcode() == AMDGPU::S_SETPC_B64) { 2071 // This would be a difficult analysis to perform, but can always be legal so 2072 // there's no need to analyze it. 2073 return nullptr; 2074 } 2075 2076 return MI.getOperand(0).getMBB(); 2077 } 2078 2079 unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB, 2080 MachineBasicBlock &DestBB, 2081 const DebugLoc &DL, 2082 int64_t BrOffset, 2083 RegScavenger *RS) const { 2084 assert(RS && "RegScavenger required for long branching"); 2085 assert(MBB.empty() && 2086 "new block should be inserted for expanding unconditional branch"); 2087 assert(MBB.pred_size() == 1); 2088 2089 MachineFunction *MF = MBB.getParent(); 2090 MachineRegisterInfo &MRI = MF->getRegInfo(); 2091 2092 // FIXME: Virtual register workaround for RegScavenger not working with empty 2093 // blocks. 2094 Register PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 2095 2096 auto I = MBB.end(); 2097 2098 // We need to compute the offset relative to the instruction immediately after 2099 // s_getpc_b64. Insert pc arithmetic code before last terminator. 2100 MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg); 2101 2102 // TODO: Handle > 32-bit block address. 2103 if (BrOffset >= 0) { 2104 BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32)) 2105 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 2106 .addReg(PCReg, 0, AMDGPU::sub0) 2107 .addMBB(&DestBB, MO_LONG_BRANCH_FORWARD); 2108 BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32)) 2109 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 2110 .addReg(PCReg, 0, AMDGPU::sub1) 2111 .addImm(0); 2112 } else { 2113 // Backwards branch. 2114 BuildMI(MBB, I, DL, get(AMDGPU::S_SUB_U32)) 2115 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 2116 .addReg(PCReg, 0, AMDGPU::sub0) 2117 .addMBB(&DestBB, MO_LONG_BRANCH_BACKWARD); 2118 BuildMI(MBB, I, DL, get(AMDGPU::S_SUBB_U32)) 2119 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 2120 .addReg(PCReg, 0, AMDGPU::sub1) 2121 .addImm(0); 2122 } 2123 2124 // Insert the indirect branch after the other terminator. 2125 BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64)) 2126 .addReg(PCReg); 2127 2128 // FIXME: If spilling is necessary, this will fail because this scavenger has 2129 // no emergency stack slots. It is non-trivial to spill in this situation, 2130 // because the restore code needs to be specially placed after the 2131 // jump. BranchRelaxation then needs to be made aware of the newly inserted 2132 // block. 2133 // 2134 // If a spill is needed for the pc register pair, we need to insert a spill 2135 // restore block right before the destination block, and insert a short branch 2136 // into the old destination block's fallthrough predecessor. 2137 // e.g.: 2138 // 2139 // s_cbranch_scc0 skip_long_branch: 2140 // 2141 // long_branch_bb: 2142 // spill s[8:9] 2143 // s_getpc_b64 s[8:9] 2144 // s_add_u32 s8, s8, restore_bb 2145 // s_addc_u32 s9, s9, 0 2146 // s_setpc_b64 s[8:9] 2147 // 2148 // skip_long_branch: 2149 // foo; 2150 // 2151 // ..... 2152 // 2153 // dest_bb_fallthrough_predecessor: 2154 // bar; 2155 // s_branch dest_bb 2156 // 2157 // restore_bb: 2158 // restore s[8:9] 2159 // fallthrough dest_bb 2160 /// 2161 // dest_bb: 2162 // buzz; 2163 2164 RS->enterBasicBlockEnd(MBB); 2165 Register Scav = RS->scavengeRegisterBackwards( 2166 AMDGPU::SReg_64RegClass, 2167 MachineBasicBlock::iterator(GetPC), false, 0); 2168 MRI.replaceRegWith(PCReg, Scav); 2169 MRI.clearVirtRegs(); 2170 RS->setRegUsed(Scav); 2171 2172 return 4 + 8 + 4 + 4; 2173 } 2174 2175 unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) { 2176 switch (Cond) { 2177 case SIInstrInfo::SCC_TRUE: 2178 return AMDGPU::S_CBRANCH_SCC1; 2179 case SIInstrInfo::SCC_FALSE: 2180 return AMDGPU::S_CBRANCH_SCC0; 2181 case SIInstrInfo::VCCNZ: 2182 return AMDGPU::S_CBRANCH_VCCNZ; 2183 case SIInstrInfo::VCCZ: 2184 return AMDGPU::S_CBRANCH_VCCZ; 2185 case SIInstrInfo::EXECNZ: 2186 return AMDGPU::S_CBRANCH_EXECNZ; 2187 case SIInstrInfo::EXECZ: 2188 return AMDGPU::S_CBRANCH_EXECZ; 2189 default: 2190 llvm_unreachable("invalid branch predicate"); 2191 } 2192 } 2193 2194 SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) { 2195 switch (Opcode) { 2196 case AMDGPU::S_CBRANCH_SCC0: 2197 return SCC_FALSE; 2198 case AMDGPU::S_CBRANCH_SCC1: 2199 return SCC_TRUE; 2200 case AMDGPU::S_CBRANCH_VCCNZ: 2201 return VCCNZ; 2202 case AMDGPU::S_CBRANCH_VCCZ: 2203 return VCCZ; 2204 case AMDGPU::S_CBRANCH_EXECNZ: 2205 return EXECNZ; 2206 case AMDGPU::S_CBRANCH_EXECZ: 2207 return EXECZ; 2208 default: 2209 return INVALID_BR; 2210 } 2211 } 2212 2213 bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB, 2214 MachineBasicBlock::iterator I, 2215 MachineBasicBlock *&TBB, 2216 MachineBasicBlock *&FBB, 2217 SmallVectorImpl<MachineOperand> &Cond, 2218 bool AllowModify) const { 2219 if (I->getOpcode() == AMDGPU::S_BRANCH) { 2220 // Unconditional Branch 2221 TBB = I->getOperand(0).getMBB(); 2222 return false; 2223 } 2224 2225 MachineBasicBlock *CondBB = nullptr; 2226 2227 if (I->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 2228 CondBB = I->getOperand(1).getMBB(); 2229 Cond.push_back(I->getOperand(0)); 2230 } else { 2231 BranchPredicate Pred = getBranchPredicate(I->getOpcode()); 2232 if (Pred == INVALID_BR) 2233 return true; 2234 2235 CondBB = I->getOperand(0).getMBB(); 2236 Cond.push_back(MachineOperand::CreateImm(Pred)); 2237 Cond.push_back(I->getOperand(1)); // Save the branch register. 2238 } 2239 ++I; 2240 2241 if (I == MBB.end()) { 2242 // Conditional branch followed by fall-through. 2243 TBB = CondBB; 2244 return false; 2245 } 2246 2247 if (I->getOpcode() == AMDGPU::S_BRANCH) { 2248 TBB = CondBB; 2249 FBB = I->getOperand(0).getMBB(); 2250 return false; 2251 } 2252 2253 return true; 2254 } 2255 2256 bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 2257 MachineBasicBlock *&FBB, 2258 SmallVectorImpl<MachineOperand> &Cond, 2259 bool AllowModify) const { 2260 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 2261 auto E = MBB.end(); 2262 if (I == E) 2263 return false; 2264 2265 // Skip over the instructions that are artificially terminators for special 2266 // exec management. 2267 while (I != E && !I->isBranch() && !I->isReturn() && 2268 I->getOpcode() != AMDGPU::SI_MASK_BRANCH) { 2269 switch (I->getOpcode()) { 2270 case AMDGPU::SI_MASK_BRANCH: 2271 case AMDGPU::S_MOV_B64_term: 2272 case AMDGPU::S_XOR_B64_term: 2273 case AMDGPU::S_OR_B64_term: 2274 case AMDGPU::S_ANDN2_B64_term: 2275 case AMDGPU::S_MOV_B32_term: 2276 case AMDGPU::S_XOR_B32_term: 2277 case AMDGPU::S_OR_B32_term: 2278 case AMDGPU::S_ANDN2_B32_term: 2279 break; 2280 case AMDGPU::SI_IF: 2281 case AMDGPU::SI_ELSE: 2282 case AMDGPU::SI_KILL_I1_TERMINATOR: 2283 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 2284 // FIXME: It's messy that these need to be considered here at all. 2285 return true; 2286 default: 2287 llvm_unreachable("unexpected non-branch terminator inst"); 2288 } 2289 2290 ++I; 2291 } 2292 2293 if (I == E) 2294 return false; 2295 2296 if (I->getOpcode() != AMDGPU::SI_MASK_BRANCH) 2297 return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify); 2298 2299 ++I; 2300 2301 // TODO: Should be able to treat as fallthrough? 2302 if (I == MBB.end()) 2303 return true; 2304 2305 if (analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify)) 2306 return true; 2307 2308 MachineBasicBlock *MaskBrDest = I->getOperand(0).getMBB(); 2309 2310 // Specifically handle the case where the conditional branch is to the same 2311 // destination as the mask branch. e.g. 2312 // 2313 // si_mask_branch BB8 2314 // s_cbranch_execz BB8 2315 // s_cbranch BB9 2316 // 2317 // This is required to understand divergent loops which may need the branches 2318 // to be relaxed. 2319 if (TBB != MaskBrDest || Cond.empty()) 2320 return true; 2321 2322 auto Pred = Cond[0].getImm(); 2323 return (Pred != EXECZ && Pred != EXECNZ); 2324 } 2325 2326 unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB, 2327 int *BytesRemoved) const { 2328 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 2329 2330 unsigned Count = 0; 2331 unsigned RemovedSize = 0; 2332 while (I != MBB.end()) { 2333 MachineBasicBlock::iterator Next = std::next(I); 2334 if (I->getOpcode() == AMDGPU::SI_MASK_BRANCH) { 2335 I = Next; 2336 continue; 2337 } 2338 2339 RemovedSize += getInstSizeInBytes(*I); 2340 I->eraseFromParent(); 2341 ++Count; 2342 I = Next; 2343 } 2344 2345 if (BytesRemoved) 2346 *BytesRemoved = RemovedSize; 2347 2348 return Count; 2349 } 2350 2351 // Copy the flags onto the implicit condition register operand. 2352 static void preserveCondRegFlags(MachineOperand &CondReg, 2353 const MachineOperand &OrigCond) { 2354 CondReg.setIsUndef(OrigCond.isUndef()); 2355 CondReg.setIsKill(OrigCond.isKill()); 2356 } 2357 2358 unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB, 2359 MachineBasicBlock *TBB, 2360 MachineBasicBlock *FBB, 2361 ArrayRef<MachineOperand> Cond, 2362 const DebugLoc &DL, 2363 int *BytesAdded) const { 2364 if (!FBB && Cond.empty()) { 2365 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 2366 .addMBB(TBB); 2367 if (BytesAdded) 2368 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4; 2369 return 1; 2370 } 2371 2372 if(Cond.size() == 1 && Cond[0].isReg()) { 2373 BuildMI(&MBB, DL, get(AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO)) 2374 .add(Cond[0]) 2375 .addMBB(TBB); 2376 return 1; 2377 } 2378 2379 assert(TBB && Cond[0].isImm()); 2380 2381 unsigned Opcode 2382 = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm())); 2383 2384 if (!FBB) { 2385 Cond[1].isUndef(); 2386 MachineInstr *CondBr = 2387 BuildMI(&MBB, DL, get(Opcode)) 2388 .addMBB(TBB); 2389 2390 // Copy the flags onto the implicit condition register operand. 2391 preserveCondRegFlags(CondBr->getOperand(1), Cond[1]); 2392 fixImplicitOperands(*CondBr); 2393 2394 if (BytesAdded) 2395 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4; 2396 return 1; 2397 } 2398 2399 assert(TBB && FBB); 2400 2401 MachineInstr *CondBr = 2402 BuildMI(&MBB, DL, get(Opcode)) 2403 .addMBB(TBB); 2404 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 2405 .addMBB(FBB); 2406 2407 MachineOperand &CondReg = CondBr->getOperand(1); 2408 CondReg.setIsUndef(Cond[1].isUndef()); 2409 CondReg.setIsKill(Cond[1].isKill()); 2410 2411 if (BytesAdded) 2412 *BytesAdded = ST.hasOffset3fBug() ? 16 : 8; 2413 2414 return 2; 2415 } 2416 2417 bool SIInstrInfo::reverseBranchCondition( 2418 SmallVectorImpl<MachineOperand> &Cond) const { 2419 if (Cond.size() != 2) { 2420 return true; 2421 } 2422 2423 if (Cond[0].isImm()) { 2424 Cond[0].setImm(-Cond[0].getImm()); 2425 return false; 2426 } 2427 2428 return true; 2429 } 2430 2431 bool SIInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, 2432 ArrayRef<MachineOperand> Cond, 2433 Register DstReg, Register TrueReg, 2434 Register FalseReg, int &CondCycles, 2435 int &TrueCycles, int &FalseCycles) const { 2436 switch (Cond[0].getImm()) { 2437 case VCCNZ: 2438 case VCCZ: { 2439 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2440 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 2441 if (MRI.getRegClass(FalseReg) != RC) 2442 return false; 2443 2444 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 2445 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 2446 2447 // Limit to equal cost for branch vs. N v_cndmask_b32s. 2448 return RI.hasVGPRs(RC) && NumInsts <= 6; 2449 } 2450 case SCC_TRUE: 2451 case SCC_FALSE: { 2452 // FIXME: We could insert for VGPRs if we could replace the original compare 2453 // with a vector one. 2454 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2455 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 2456 if (MRI.getRegClass(FalseReg) != RC) 2457 return false; 2458 2459 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 2460 2461 // Multiples of 8 can do s_cselect_b64 2462 if (NumInsts % 2 == 0) 2463 NumInsts /= 2; 2464 2465 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 2466 return RI.isSGPRClass(RC); 2467 } 2468 default: 2469 return false; 2470 } 2471 } 2472 2473 void SIInstrInfo::insertSelect(MachineBasicBlock &MBB, 2474 MachineBasicBlock::iterator I, const DebugLoc &DL, 2475 Register DstReg, ArrayRef<MachineOperand> Cond, 2476 Register TrueReg, Register FalseReg) const { 2477 BranchPredicate Pred = static_cast<BranchPredicate>(Cond[0].getImm()); 2478 if (Pred == VCCZ || Pred == SCC_FALSE) { 2479 Pred = static_cast<BranchPredicate>(-Pred); 2480 std::swap(TrueReg, FalseReg); 2481 } 2482 2483 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2484 const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg); 2485 unsigned DstSize = RI.getRegSizeInBits(*DstRC); 2486 2487 if (DstSize == 32) { 2488 MachineInstr *Select; 2489 if (Pred == SCC_TRUE) { 2490 Select = BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B32), DstReg) 2491 .addReg(TrueReg) 2492 .addReg(FalseReg); 2493 } else { 2494 // Instruction's operands are backwards from what is expected. 2495 Select = BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e32), DstReg) 2496 .addReg(FalseReg) 2497 .addReg(TrueReg); 2498 } 2499 2500 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2501 return; 2502 } 2503 2504 if (DstSize == 64 && Pred == SCC_TRUE) { 2505 MachineInstr *Select = 2506 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg) 2507 .addReg(TrueReg) 2508 .addReg(FalseReg); 2509 2510 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2511 return; 2512 } 2513 2514 static const int16_t Sub0_15[] = { 2515 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 2516 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 2517 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, 2518 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 2519 }; 2520 2521 static const int16_t Sub0_15_64[] = { 2522 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, 2523 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7, 2524 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11, 2525 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15, 2526 }; 2527 2528 unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32; 2529 const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass; 2530 const int16_t *SubIndices = Sub0_15; 2531 int NElts = DstSize / 32; 2532 2533 // 64-bit select is only available for SALU. 2534 // TODO: Split 96-bit into 64-bit and 32-bit, not 3x 32-bit. 2535 if (Pred == SCC_TRUE) { 2536 if (NElts % 2) { 2537 SelOp = AMDGPU::S_CSELECT_B32; 2538 EltRC = &AMDGPU::SGPR_32RegClass; 2539 } else { 2540 SelOp = AMDGPU::S_CSELECT_B64; 2541 EltRC = &AMDGPU::SGPR_64RegClass; 2542 SubIndices = Sub0_15_64; 2543 NElts /= 2; 2544 } 2545 } 2546 2547 MachineInstrBuilder MIB = BuildMI( 2548 MBB, I, DL, get(AMDGPU::REG_SEQUENCE), DstReg); 2549 2550 I = MIB->getIterator(); 2551 2552 SmallVector<Register, 8> Regs; 2553 for (int Idx = 0; Idx != NElts; ++Idx) { 2554 Register DstElt = MRI.createVirtualRegister(EltRC); 2555 Regs.push_back(DstElt); 2556 2557 unsigned SubIdx = SubIndices[Idx]; 2558 2559 MachineInstr *Select; 2560 if (SelOp == AMDGPU::V_CNDMASK_B32_e32) { 2561 Select = 2562 BuildMI(MBB, I, DL, get(SelOp), DstElt) 2563 .addReg(FalseReg, 0, SubIdx) 2564 .addReg(TrueReg, 0, SubIdx); 2565 } else { 2566 Select = 2567 BuildMI(MBB, I, DL, get(SelOp), DstElt) 2568 .addReg(TrueReg, 0, SubIdx) 2569 .addReg(FalseReg, 0, SubIdx); 2570 } 2571 2572 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2573 fixImplicitOperands(*Select); 2574 2575 MIB.addReg(DstElt) 2576 .addImm(SubIdx); 2577 } 2578 } 2579 2580 bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) const { 2581 switch (MI.getOpcode()) { 2582 case AMDGPU::V_MOV_B32_e32: 2583 case AMDGPU::V_MOV_B32_e64: 2584 case AMDGPU::V_MOV_B64_PSEUDO: { 2585 // If there are additional implicit register operands, this may be used for 2586 // register indexing so the source register operand isn't simply copied. 2587 unsigned NumOps = MI.getDesc().getNumOperands() + 2588 MI.getDesc().getNumImplicitUses(); 2589 2590 return MI.getNumOperands() == NumOps; 2591 } 2592 case AMDGPU::S_MOV_B32: 2593 case AMDGPU::S_MOV_B64: 2594 case AMDGPU::COPY: 2595 case AMDGPU::V_ACCVGPR_WRITE_B32_e64: 2596 case AMDGPU::V_ACCVGPR_READ_B32_e64: 2597 return true; 2598 default: 2599 return false; 2600 } 2601 } 2602 2603 unsigned SIInstrInfo::getAddressSpaceForPseudoSourceKind( 2604 unsigned Kind) const { 2605 switch(Kind) { 2606 case PseudoSourceValue::Stack: 2607 case PseudoSourceValue::FixedStack: 2608 return AMDGPUAS::PRIVATE_ADDRESS; 2609 case PseudoSourceValue::ConstantPool: 2610 case PseudoSourceValue::GOT: 2611 case PseudoSourceValue::JumpTable: 2612 case PseudoSourceValue::GlobalValueCallEntry: 2613 case PseudoSourceValue::ExternalSymbolCallEntry: 2614 case PseudoSourceValue::TargetCustom: 2615 return AMDGPUAS::CONSTANT_ADDRESS; 2616 } 2617 return AMDGPUAS::FLAT_ADDRESS; 2618 } 2619 2620 static void removeModOperands(MachineInstr &MI) { 2621 unsigned Opc = MI.getOpcode(); 2622 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2623 AMDGPU::OpName::src0_modifiers); 2624 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2625 AMDGPU::OpName::src1_modifiers); 2626 int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2627 AMDGPU::OpName::src2_modifiers); 2628 2629 MI.RemoveOperand(Src2ModIdx); 2630 MI.RemoveOperand(Src1ModIdx); 2631 MI.RemoveOperand(Src0ModIdx); 2632 } 2633 2634 bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, 2635 Register Reg, MachineRegisterInfo *MRI) const { 2636 if (!MRI->hasOneNonDBGUse(Reg)) 2637 return false; 2638 2639 switch (DefMI.getOpcode()) { 2640 default: 2641 return false; 2642 case AMDGPU::S_MOV_B64: 2643 // TODO: We could fold 64-bit immediates, but this get compilicated 2644 // when there are sub-registers. 2645 return false; 2646 2647 case AMDGPU::V_MOV_B32_e32: 2648 case AMDGPU::S_MOV_B32: 2649 case AMDGPU::V_ACCVGPR_WRITE_B32_e64: 2650 break; 2651 } 2652 2653 const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0); 2654 assert(ImmOp); 2655 // FIXME: We could handle FrameIndex values here. 2656 if (!ImmOp->isImm()) 2657 return false; 2658 2659 unsigned Opc = UseMI.getOpcode(); 2660 if (Opc == AMDGPU::COPY) { 2661 Register DstReg = UseMI.getOperand(0).getReg(); 2662 bool Is16Bit = getOpSize(UseMI, 0) == 2; 2663 bool isVGPRCopy = RI.isVGPR(*MRI, DstReg); 2664 unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32; 2665 APInt Imm(32, ImmOp->getImm()); 2666 2667 if (UseMI.getOperand(1).getSubReg() == AMDGPU::hi16) 2668 Imm = Imm.ashr(16); 2669 2670 if (RI.isAGPR(*MRI, DstReg)) { 2671 if (!isInlineConstant(Imm)) 2672 return false; 2673 NewOpc = AMDGPU::V_ACCVGPR_WRITE_B32_e64; 2674 } 2675 2676 if (Is16Bit) { 2677 if (isVGPRCopy) 2678 return false; // Do not clobber vgpr_hi16 2679 2680 if (DstReg.isVirtual() && 2681 UseMI.getOperand(0).getSubReg() != AMDGPU::lo16) 2682 return false; 2683 2684 UseMI.getOperand(0).setSubReg(0); 2685 if (DstReg.isPhysical()) { 2686 DstReg = RI.get32BitRegister(DstReg); 2687 UseMI.getOperand(0).setReg(DstReg); 2688 } 2689 assert(UseMI.getOperand(1).getReg().isVirtual()); 2690 } 2691 2692 UseMI.setDesc(get(NewOpc)); 2693 UseMI.getOperand(1).ChangeToImmediate(Imm.getSExtValue()); 2694 UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent()); 2695 return true; 2696 } 2697 2698 if (Opc == AMDGPU::V_MAD_F32_e64 || Opc == AMDGPU::V_MAC_F32_e64 || 2699 Opc == AMDGPU::V_MAD_F16_e64 || Opc == AMDGPU::V_MAC_F16_e64 || 2700 Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64 || 2701 Opc == AMDGPU::V_FMA_F16_e64 || Opc == AMDGPU::V_FMAC_F16_e64) { 2702 // Don't fold if we are using source or output modifiers. The new VOP2 2703 // instructions don't have them. 2704 if (hasAnyModifiersSet(UseMI)) 2705 return false; 2706 2707 // If this is a free constant, there's no reason to do this. 2708 // TODO: We could fold this here instead of letting SIFoldOperands do it 2709 // later. 2710 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0); 2711 2712 // Any src operand can be used for the legality check. 2713 if (isInlineConstant(UseMI, *Src0, *ImmOp)) 2714 return false; 2715 2716 bool IsF32 = Opc == AMDGPU::V_MAD_F32_e64 || Opc == AMDGPU::V_MAC_F32_e64 || 2717 Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64; 2718 bool IsFMA = Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64 || 2719 Opc == AMDGPU::V_FMA_F16_e64 || Opc == AMDGPU::V_FMAC_F16_e64; 2720 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1); 2721 MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2); 2722 2723 // Multiplied part is the constant: Use v_madmk_{f16, f32}. 2724 // We should only expect these to be on src0 due to canonicalizations. 2725 if (Src0->isReg() && Src0->getReg() == Reg) { 2726 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) 2727 return false; 2728 2729 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg()))) 2730 return false; 2731 2732 unsigned NewOpc = 2733 IsFMA ? (IsF32 ? AMDGPU::V_FMAMK_F32 : AMDGPU::V_FMAMK_F16) 2734 : (IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16); 2735 if (pseudoToMCOpcode(NewOpc) == -1) 2736 return false; 2737 2738 // We need to swap operands 0 and 1 since madmk constant is at operand 1. 2739 2740 const int64_t Imm = ImmOp->getImm(); 2741 2742 // FIXME: This would be a lot easier if we could return a new instruction 2743 // instead of having to modify in place. 2744 2745 // Remove these first since they are at the end. 2746 UseMI.RemoveOperand( 2747 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 2748 UseMI.RemoveOperand( 2749 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 2750 2751 Register Src1Reg = Src1->getReg(); 2752 unsigned Src1SubReg = Src1->getSubReg(); 2753 Src0->setReg(Src1Reg); 2754 Src0->setSubReg(Src1SubReg); 2755 Src0->setIsKill(Src1->isKill()); 2756 2757 if (Opc == AMDGPU::V_MAC_F32_e64 || 2758 Opc == AMDGPU::V_MAC_F16_e64 || 2759 Opc == AMDGPU::V_FMAC_F32_e64 || 2760 Opc == AMDGPU::V_FMAC_F16_e64) 2761 UseMI.untieRegOperand( 2762 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 2763 2764 Src1->ChangeToImmediate(Imm); 2765 2766 removeModOperands(UseMI); 2767 UseMI.setDesc(get(NewOpc)); 2768 2769 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 2770 if (DeleteDef) 2771 DefMI.eraseFromParent(); 2772 2773 return true; 2774 } 2775 2776 // Added part is the constant: Use v_madak_{f16, f32}. 2777 if (Src2->isReg() && Src2->getReg() == Reg) { 2778 // Not allowed to use constant bus for another operand. 2779 // We can however allow an inline immediate as src0. 2780 bool Src0Inlined = false; 2781 if (Src0->isReg()) { 2782 // Try to inline constant if possible. 2783 // If the Def moves immediate and the use is single 2784 // We are saving VGPR here. 2785 MachineInstr *Def = MRI->getUniqueVRegDef(Src0->getReg()); 2786 if (Def && Def->isMoveImmediate() && 2787 isInlineConstant(Def->getOperand(1)) && 2788 MRI->hasOneUse(Src0->getReg())) { 2789 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 2790 Src0Inlined = true; 2791 } else if ((Src0->getReg().isPhysical() && 2792 (ST.getConstantBusLimit(Opc) <= 1 && 2793 RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg())))) || 2794 (Src0->getReg().isVirtual() && 2795 (ST.getConstantBusLimit(Opc) <= 1 && 2796 RI.isSGPRClass(MRI->getRegClass(Src0->getReg()))))) 2797 return false; 2798 // VGPR is okay as Src0 - fallthrough 2799 } 2800 2801 if (Src1->isReg() && !Src0Inlined ) { 2802 // We have one slot for inlinable constant so far - try to fill it 2803 MachineInstr *Def = MRI->getUniqueVRegDef(Src1->getReg()); 2804 if (Def && Def->isMoveImmediate() && 2805 isInlineConstant(Def->getOperand(1)) && 2806 MRI->hasOneUse(Src1->getReg()) && 2807 commuteInstruction(UseMI)) { 2808 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 2809 } else if ((Src1->getReg().isPhysical() && 2810 RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) || 2811 (Src1->getReg().isVirtual() && 2812 RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) 2813 return false; 2814 // VGPR is okay as Src1 - fallthrough 2815 } 2816 2817 unsigned NewOpc = 2818 IsFMA ? (IsF32 ? AMDGPU::V_FMAAK_F32 : AMDGPU::V_FMAAK_F16) 2819 : (IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16); 2820 if (pseudoToMCOpcode(NewOpc) == -1) 2821 return false; 2822 2823 const int64_t Imm = ImmOp->getImm(); 2824 2825 // FIXME: This would be a lot easier if we could return a new instruction 2826 // instead of having to modify in place. 2827 2828 // Remove these first since they are at the end. 2829 UseMI.RemoveOperand( 2830 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 2831 UseMI.RemoveOperand( 2832 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 2833 2834 if (Opc == AMDGPU::V_MAC_F32_e64 || 2835 Opc == AMDGPU::V_MAC_F16_e64 || 2836 Opc == AMDGPU::V_FMAC_F32_e64 || 2837 Opc == AMDGPU::V_FMAC_F16_e64) 2838 UseMI.untieRegOperand( 2839 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 2840 2841 // ChangingToImmediate adds Src2 back to the instruction. 2842 Src2->ChangeToImmediate(Imm); 2843 2844 // These come before src2. 2845 removeModOperands(UseMI); 2846 UseMI.setDesc(get(NewOpc)); 2847 // It might happen that UseMI was commuted 2848 // and we now have SGPR as SRC1. If so 2 inlined 2849 // constant and SGPR are illegal. 2850 legalizeOperands(UseMI); 2851 2852 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 2853 if (DeleteDef) 2854 DefMI.eraseFromParent(); 2855 2856 return true; 2857 } 2858 } 2859 2860 return false; 2861 } 2862 2863 static bool 2864 memOpsHaveSameBaseOperands(ArrayRef<const MachineOperand *> BaseOps1, 2865 ArrayRef<const MachineOperand *> BaseOps2) { 2866 if (BaseOps1.size() != BaseOps2.size()) 2867 return false; 2868 for (size_t I = 0, E = BaseOps1.size(); I < E; ++I) { 2869 if (!BaseOps1[I]->isIdenticalTo(*BaseOps2[I])) 2870 return false; 2871 } 2872 return true; 2873 } 2874 2875 static bool offsetsDoNotOverlap(int WidthA, int OffsetA, 2876 int WidthB, int OffsetB) { 2877 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; 2878 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; 2879 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; 2880 return LowOffset + LowWidth <= HighOffset; 2881 } 2882 2883 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa, 2884 const MachineInstr &MIb) const { 2885 SmallVector<const MachineOperand *, 4> BaseOps0, BaseOps1; 2886 int64_t Offset0, Offset1; 2887 unsigned Dummy0, Dummy1; 2888 bool Offset0IsScalable, Offset1IsScalable; 2889 if (!getMemOperandsWithOffsetWidth(MIa, BaseOps0, Offset0, Offset0IsScalable, 2890 Dummy0, &RI) || 2891 !getMemOperandsWithOffsetWidth(MIb, BaseOps1, Offset1, Offset1IsScalable, 2892 Dummy1, &RI)) 2893 return false; 2894 2895 if (!memOpsHaveSameBaseOperands(BaseOps0, BaseOps1)) 2896 return false; 2897 2898 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) { 2899 // FIXME: Handle ds_read2 / ds_write2. 2900 return false; 2901 } 2902 unsigned Width0 = MIa.memoperands().front()->getSize(); 2903 unsigned Width1 = MIb.memoperands().front()->getSize(); 2904 return offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1); 2905 } 2906 2907 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, 2908 const MachineInstr &MIb) const { 2909 assert(MIa.mayLoadOrStore() && 2910 "MIa must load from or modify a memory location"); 2911 assert(MIb.mayLoadOrStore() && 2912 "MIb must load from or modify a memory location"); 2913 2914 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects()) 2915 return false; 2916 2917 // XXX - Can we relax this between address spaces? 2918 if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef()) 2919 return false; 2920 2921 // TODO: Should we check the address space from the MachineMemOperand? That 2922 // would allow us to distinguish objects we know don't alias based on the 2923 // underlying address space, even if it was lowered to a different one, 2924 // e.g. private accesses lowered to use MUBUF instructions on a scratch 2925 // buffer. 2926 if (isDS(MIa)) { 2927 if (isDS(MIb)) 2928 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2929 2930 return !isFLAT(MIb) || isSegmentSpecificFLAT(MIb); 2931 } 2932 2933 if (isMUBUF(MIa) || isMTBUF(MIa)) { 2934 if (isMUBUF(MIb) || isMTBUF(MIb)) 2935 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2936 2937 return !isFLAT(MIb) && !isSMRD(MIb); 2938 } 2939 2940 if (isSMRD(MIa)) { 2941 if (isSMRD(MIb)) 2942 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2943 2944 return !isFLAT(MIb) && !isMUBUF(MIb) && !isMTBUF(MIb); 2945 } 2946 2947 if (isFLAT(MIa)) { 2948 if (isFLAT(MIb)) 2949 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2950 2951 return false; 2952 } 2953 2954 return false; 2955 } 2956 2957 static int64_t getFoldableImm(const MachineOperand* MO) { 2958 if (!MO->isReg()) 2959 return false; 2960 const MachineFunction *MF = MO->getParent()->getParent()->getParent(); 2961 const MachineRegisterInfo &MRI = MF->getRegInfo(); 2962 auto Def = MRI.getUniqueVRegDef(MO->getReg()); 2963 if (Def && Def->getOpcode() == AMDGPU::V_MOV_B32_e32 && 2964 Def->getOperand(1).isImm()) 2965 return Def->getOperand(1).getImm(); 2966 return AMDGPU::NoRegister; 2967 } 2968 2969 static void updateLiveVariables(LiveVariables *LV, MachineInstr &MI, 2970 MachineInstr &NewMI) { 2971 if (LV) { 2972 unsigned NumOps = MI.getNumOperands(); 2973 for (unsigned I = 1; I < NumOps; ++I) { 2974 MachineOperand &Op = MI.getOperand(I); 2975 if (Op.isReg() && Op.isKill()) 2976 LV->replaceKillInstruction(Op.getReg(), MI, NewMI); 2977 } 2978 } 2979 } 2980 2981 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB, 2982 MachineInstr &MI, 2983 LiveVariables *LV) const { 2984 unsigned Opc = MI.getOpcode(); 2985 bool IsF16 = false; 2986 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e32 || Opc == AMDGPU::V_FMAC_F32_e64 || 2987 Opc == AMDGPU::V_FMAC_F16_e32 || Opc == AMDGPU::V_FMAC_F16_e64; 2988 2989 switch (Opc) { 2990 default: 2991 return nullptr; 2992 case AMDGPU::V_MAC_F16_e64: 2993 case AMDGPU::V_FMAC_F16_e64: 2994 IsF16 = true; 2995 LLVM_FALLTHROUGH; 2996 case AMDGPU::V_MAC_F32_e64: 2997 case AMDGPU::V_FMAC_F32_e64: 2998 break; 2999 case AMDGPU::V_MAC_F16_e32: 3000 case AMDGPU::V_FMAC_F16_e32: 3001 IsF16 = true; 3002 LLVM_FALLTHROUGH; 3003 case AMDGPU::V_MAC_F32_e32: 3004 case AMDGPU::V_FMAC_F32_e32: { 3005 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 3006 AMDGPU::OpName::src0); 3007 const MachineOperand *Src0 = &MI.getOperand(Src0Idx); 3008 if (!Src0->isReg() && !Src0->isImm()) 3009 return nullptr; 3010 3011 if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0)) 3012 return nullptr; 3013 3014 break; 3015 } 3016 } 3017 3018 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 3019 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0); 3020 const MachineOperand *Src0Mods = 3021 getNamedOperand(MI, AMDGPU::OpName::src0_modifiers); 3022 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3023 const MachineOperand *Src1Mods = 3024 getNamedOperand(MI, AMDGPU::OpName::src1_modifiers); 3025 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3026 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 3027 const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod); 3028 MachineInstrBuilder MIB; 3029 3030 if (!Src0Mods && !Src1Mods && !Clamp && !Omod && 3031 // If we have an SGPR input, we will violate the constant bus restriction. 3032 (ST.getConstantBusLimit(Opc) > 1 || !Src0->isReg() || 3033 !RI.isSGPRReg(MBB->getParent()->getRegInfo(), Src0->getReg()))) { 3034 if (auto Imm = getFoldableImm(Src2)) { 3035 unsigned NewOpc = 3036 IsFMA ? (IsF16 ? AMDGPU::V_FMAAK_F16 : AMDGPU::V_FMAAK_F32) 3037 : (IsF16 ? AMDGPU::V_MADAK_F16 : AMDGPU::V_MADAK_F32); 3038 if (pseudoToMCOpcode(NewOpc) != -1) { 3039 MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 3040 .add(*Dst) 3041 .add(*Src0) 3042 .add(*Src1) 3043 .addImm(Imm); 3044 updateLiveVariables(LV, MI, *MIB); 3045 return MIB; 3046 } 3047 } 3048 unsigned NewOpc = IsFMA 3049 ? (IsF16 ? AMDGPU::V_FMAMK_F16 : AMDGPU::V_FMAMK_F32) 3050 : (IsF16 ? AMDGPU::V_MADMK_F16 : AMDGPU::V_MADMK_F32); 3051 if (auto Imm = getFoldableImm(Src1)) { 3052 if (pseudoToMCOpcode(NewOpc) != -1) { 3053 MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 3054 .add(*Dst) 3055 .add(*Src0) 3056 .addImm(Imm) 3057 .add(*Src2); 3058 updateLiveVariables(LV, MI, *MIB); 3059 return MIB; 3060 } 3061 } 3062 if (auto Imm = getFoldableImm(Src0)) { 3063 if (pseudoToMCOpcode(NewOpc) != -1 && 3064 isOperandLegal( 3065 MI, AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::src0), 3066 Src1)) { 3067 MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 3068 .add(*Dst) 3069 .add(*Src1) 3070 .addImm(Imm) 3071 .add(*Src2); 3072 updateLiveVariables(LV, MI, *MIB); 3073 return MIB; 3074 } 3075 } 3076 } 3077 3078 unsigned NewOpc = IsFMA ? (IsF16 ? AMDGPU::V_FMA_F16_e64 : AMDGPU::V_FMA_F32_e64) 3079 : (IsF16 ? AMDGPU::V_MAD_F16_e64 : AMDGPU::V_MAD_F32_e64); 3080 if (pseudoToMCOpcode(NewOpc) == -1) 3081 return nullptr; 3082 3083 MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 3084 .add(*Dst) 3085 .addImm(Src0Mods ? Src0Mods->getImm() : 0) 3086 .add(*Src0) 3087 .addImm(Src1Mods ? Src1Mods->getImm() : 0) 3088 .add(*Src1) 3089 .addImm(0) // Src mods 3090 .add(*Src2) 3091 .addImm(Clamp ? Clamp->getImm() : 0) 3092 .addImm(Omod ? Omod->getImm() : 0); 3093 updateLiveVariables(LV, MI, *MIB); 3094 return MIB; 3095 } 3096 3097 // It's not generally safe to move VALU instructions across these since it will 3098 // start using the register as a base index rather than directly. 3099 // XXX - Why isn't hasSideEffects sufficient for these? 3100 static bool changesVGPRIndexingMode(const MachineInstr &MI) { 3101 switch (MI.getOpcode()) { 3102 case AMDGPU::S_SET_GPR_IDX_ON: 3103 case AMDGPU::S_SET_GPR_IDX_MODE: 3104 case AMDGPU::S_SET_GPR_IDX_OFF: 3105 return true; 3106 default: 3107 return false; 3108 } 3109 } 3110 3111 bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 3112 const MachineBasicBlock *MBB, 3113 const MachineFunction &MF) const { 3114 // Skipping the check for SP writes in the base implementation. The reason it 3115 // was added was apparently due to compile time concerns. 3116 // 3117 // TODO: Do we really want this barrier? It triggers unnecessary hazard nops 3118 // but is probably avoidable. 3119 3120 // Copied from base implementation. 3121 // Terminators and labels can't be scheduled around. 3122 if (MI.isTerminator() || MI.isPosition()) 3123 return true; 3124 3125 // INLINEASM_BR can jump to another block 3126 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR) 3127 return true; 3128 3129 // Target-independent instructions do not have an implicit-use of EXEC, even 3130 // when they operate on VGPRs. Treating EXEC modifications as scheduling 3131 // boundaries prevents incorrect movements of such instructions. 3132 return MI.modifiesRegister(AMDGPU::EXEC, &RI) || 3133 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 || 3134 MI.getOpcode() == AMDGPU::S_SETREG_B32 || 3135 changesVGPRIndexingMode(MI); 3136 } 3137 3138 bool SIInstrInfo::isAlwaysGDS(uint16_t Opcode) const { 3139 return Opcode == AMDGPU::DS_ORDERED_COUNT || 3140 Opcode == AMDGPU::DS_GWS_INIT || 3141 Opcode == AMDGPU::DS_GWS_SEMA_V || 3142 Opcode == AMDGPU::DS_GWS_SEMA_BR || 3143 Opcode == AMDGPU::DS_GWS_SEMA_P || 3144 Opcode == AMDGPU::DS_GWS_SEMA_RELEASE_ALL || 3145 Opcode == AMDGPU::DS_GWS_BARRIER; 3146 } 3147 3148 bool SIInstrInfo::modifiesModeRegister(const MachineInstr &MI) { 3149 // Skip the full operand and register alias search modifiesRegister 3150 // does. There's only a handful of instructions that touch this, it's only an 3151 // implicit def, and doesn't alias any other registers. 3152 if (const MCPhysReg *ImpDef = MI.getDesc().getImplicitDefs()) { 3153 for (; ImpDef && *ImpDef; ++ImpDef) { 3154 if (*ImpDef == AMDGPU::MODE) 3155 return true; 3156 } 3157 } 3158 3159 return false; 3160 } 3161 3162 bool SIInstrInfo::hasUnwantedEffectsWhenEXECEmpty(const MachineInstr &MI) const { 3163 unsigned Opcode = MI.getOpcode(); 3164 3165 if (MI.mayStore() && isSMRD(MI)) 3166 return true; // scalar store or atomic 3167 3168 // This will terminate the function when other lanes may need to continue. 3169 if (MI.isReturn()) 3170 return true; 3171 3172 // These instructions cause shader I/O that may cause hardware lockups 3173 // when executed with an empty EXEC mask. 3174 // 3175 // Note: exp with VM = DONE = 0 is automatically skipped by hardware when 3176 // EXEC = 0, but checking for that case here seems not worth it 3177 // given the typical code patterns. 3178 if (Opcode == AMDGPU::S_SENDMSG || Opcode == AMDGPU::S_SENDMSGHALT || 3179 isEXP(Opcode) || 3180 Opcode == AMDGPU::DS_ORDERED_COUNT || Opcode == AMDGPU::S_TRAP || 3181 Opcode == AMDGPU::DS_GWS_INIT || Opcode == AMDGPU::DS_GWS_BARRIER) 3182 return true; 3183 3184 if (MI.isCall() || MI.isInlineAsm()) 3185 return true; // conservative assumption 3186 3187 // A mode change is a scalar operation that influences vector instructions. 3188 if (modifiesModeRegister(MI)) 3189 return true; 3190 3191 // These are like SALU instructions in terms of effects, so it's questionable 3192 // whether we should return true for those. 3193 // 3194 // However, executing them with EXEC = 0 causes them to operate on undefined 3195 // data, which we avoid by returning true here. 3196 if (Opcode == AMDGPU::V_READFIRSTLANE_B32 || 3197 Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32) 3198 return true; 3199 3200 return false; 3201 } 3202 3203 bool SIInstrInfo::mayReadEXEC(const MachineRegisterInfo &MRI, 3204 const MachineInstr &MI) const { 3205 if (MI.isMetaInstruction()) 3206 return false; 3207 3208 // This won't read exec if this is an SGPR->SGPR copy. 3209 if (MI.isCopyLike()) { 3210 if (!RI.isSGPRReg(MRI, MI.getOperand(0).getReg())) 3211 return true; 3212 3213 // Make sure this isn't copying exec as a normal operand 3214 return MI.readsRegister(AMDGPU::EXEC, &RI); 3215 } 3216 3217 // Make a conservative assumption about the callee. 3218 if (MI.isCall()) 3219 return true; 3220 3221 // Be conservative with any unhandled generic opcodes. 3222 if (!isTargetSpecificOpcode(MI.getOpcode())) 3223 return true; 3224 3225 return !isSALU(MI) || MI.readsRegister(AMDGPU::EXEC, &RI); 3226 } 3227 3228 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const { 3229 switch (Imm.getBitWidth()) { 3230 case 1: // This likely will be a condition code mask. 3231 return true; 3232 3233 case 32: 3234 return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(), 3235 ST.hasInv2PiInlineImm()); 3236 case 64: 3237 return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(), 3238 ST.hasInv2PiInlineImm()); 3239 case 16: 3240 return ST.has16BitInsts() && 3241 AMDGPU::isInlinableLiteral16(Imm.getSExtValue(), 3242 ST.hasInv2PiInlineImm()); 3243 default: 3244 llvm_unreachable("invalid bitwidth"); 3245 } 3246 } 3247 3248 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO, 3249 uint8_t OperandType) const { 3250 if (!MO.isImm() || 3251 OperandType < AMDGPU::OPERAND_SRC_FIRST || 3252 OperandType > AMDGPU::OPERAND_SRC_LAST) 3253 return false; 3254 3255 // MachineOperand provides no way to tell the true operand size, since it only 3256 // records a 64-bit value. We need to know the size to determine if a 32-bit 3257 // floating point immediate bit pattern is legal for an integer immediate. It 3258 // would be for any 32-bit integer operand, but would not be for a 64-bit one. 3259 3260 int64_t Imm = MO.getImm(); 3261 switch (OperandType) { 3262 case AMDGPU::OPERAND_REG_IMM_INT32: 3263 case AMDGPU::OPERAND_REG_IMM_FP32: 3264 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 3265 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 3266 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 3267 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: { 3268 int32_t Trunc = static_cast<int32_t>(Imm); 3269 return AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm()); 3270 } 3271 case AMDGPU::OPERAND_REG_IMM_INT64: 3272 case AMDGPU::OPERAND_REG_IMM_FP64: 3273 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 3274 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 3275 return AMDGPU::isInlinableLiteral64(MO.getImm(), 3276 ST.hasInv2PiInlineImm()); 3277 case AMDGPU::OPERAND_REG_IMM_INT16: 3278 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 3279 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 3280 // We would expect inline immediates to not be concerned with an integer/fp 3281 // distinction. However, in the case of 16-bit integer operations, the 3282 // "floating point" values appear to not work. It seems read the low 16-bits 3283 // of 32-bit immediates, which happens to always work for the integer 3284 // values. 3285 // 3286 // See llvm bugzilla 46302. 3287 // 3288 // TODO: Theoretically we could use op-sel to use the high bits of the 3289 // 32-bit FP values. 3290 return AMDGPU::isInlinableIntLiteral(Imm); 3291 case AMDGPU::OPERAND_REG_IMM_V2INT16: 3292 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 3293 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16: 3294 // This suffers the same problem as the scalar 16-bit cases. 3295 return AMDGPU::isInlinableIntLiteralV216(Imm); 3296 case AMDGPU::OPERAND_REG_IMM_FP16: 3297 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 3298 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: { 3299 if (isInt<16>(Imm) || isUInt<16>(Imm)) { 3300 // A few special case instructions have 16-bit operands on subtargets 3301 // where 16-bit instructions are not legal. 3302 // TODO: Do the 32-bit immediates work? We shouldn't really need to handle 3303 // constants in these cases 3304 int16_t Trunc = static_cast<int16_t>(Imm); 3305 return ST.has16BitInsts() && 3306 AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm()); 3307 } 3308 3309 return false; 3310 } 3311 case AMDGPU::OPERAND_REG_IMM_V2FP16: 3312 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 3313 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: { 3314 uint32_t Trunc = static_cast<uint32_t>(Imm); 3315 return AMDGPU::isInlinableLiteralV216(Trunc, ST.hasInv2PiInlineImm()); 3316 } 3317 default: 3318 llvm_unreachable("invalid bitwidth"); 3319 } 3320 } 3321 3322 bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO, 3323 const MCOperandInfo &OpInfo) const { 3324 switch (MO.getType()) { 3325 case MachineOperand::MO_Register: 3326 return false; 3327 case MachineOperand::MO_Immediate: 3328 return !isInlineConstant(MO, OpInfo); 3329 case MachineOperand::MO_FrameIndex: 3330 case MachineOperand::MO_MachineBasicBlock: 3331 case MachineOperand::MO_ExternalSymbol: 3332 case MachineOperand::MO_GlobalAddress: 3333 case MachineOperand::MO_MCSymbol: 3334 return true; 3335 default: 3336 llvm_unreachable("unexpected operand type"); 3337 } 3338 } 3339 3340 static bool compareMachineOp(const MachineOperand &Op0, 3341 const MachineOperand &Op1) { 3342 if (Op0.getType() != Op1.getType()) 3343 return false; 3344 3345 switch (Op0.getType()) { 3346 case MachineOperand::MO_Register: 3347 return Op0.getReg() == Op1.getReg(); 3348 case MachineOperand::MO_Immediate: 3349 return Op0.getImm() == Op1.getImm(); 3350 default: 3351 llvm_unreachable("Didn't expect to be comparing these operand types"); 3352 } 3353 } 3354 3355 bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo, 3356 const MachineOperand &MO) const { 3357 const MCInstrDesc &InstDesc = MI.getDesc(); 3358 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo]; 3359 3360 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()); 3361 3362 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE) 3363 return true; 3364 3365 if (OpInfo.RegClass < 0) 3366 return false; 3367 3368 if (MO.isImm() && isInlineConstant(MO, OpInfo)) { 3369 if (isMAI(MI) && ST.hasMFMAInlineLiteralBug() && 3370 OpNo ==(unsigned)AMDGPU::getNamedOperandIdx(MI.getOpcode(), 3371 AMDGPU::OpName::src2)) 3372 return false; 3373 return RI.opCanUseInlineConstant(OpInfo.OperandType); 3374 } 3375 3376 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType)) 3377 return false; 3378 3379 if (!isVOP3(MI) || !AMDGPU::isSISrcOperand(InstDesc, OpNo)) 3380 return true; 3381 3382 return ST.hasVOP3Literal(); 3383 } 3384 3385 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const { 3386 int Op32 = AMDGPU::getVOPe32(Opcode); 3387 if (Op32 == -1) 3388 return false; 3389 3390 return pseudoToMCOpcode(Op32) != -1; 3391 } 3392 3393 bool SIInstrInfo::hasModifiers(unsigned Opcode) const { 3394 // The src0_modifier operand is present on all instructions 3395 // that have modifiers. 3396 3397 return AMDGPU::getNamedOperandIdx(Opcode, 3398 AMDGPU::OpName::src0_modifiers) != -1; 3399 } 3400 3401 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI, 3402 unsigned OpName) const { 3403 const MachineOperand *Mods = getNamedOperand(MI, OpName); 3404 return Mods && Mods->getImm(); 3405 } 3406 3407 bool SIInstrInfo::hasAnyModifiersSet(const MachineInstr &MI) const { 3408 return hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || 3409 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || 3410 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers) || 3411 hasModifiersSet(MI, AMDGPU::OpName::clamp) || 3412 hasModifiersSet(MI, AMDGPU::OpName::omod); 3413 } 3414 3415 bool SIInstrInfo::canShrink(const MachineInstr &MI, 3416 const MachineRegisterInfo &MRI) const { 3417 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3418 // Can't shrink instruction with three operands. 3419 // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add 3420 // a special case for it. It can only be shrunk if the third operand 3421 // is vcc, and src0_modifiers and src1_modifiers are not set. 3422 // We should handle this the same way we handle vopc, by addding 3423 // a register allocation hint pre-regalloc and then do the shrinking 3424 // post-regalloc. 3425 if (Src2) { 3426 switch (MI.getOpcode()) { 3427 default: return false; 3428 3429 case AMDGPU::V_ADDC_U32_e64: 3430 case AMDGPU::V_SUBB_U32_e64: 3431 case AMDGPU::V_SUBBREV_U32_e64: { 3432 const MachineOperand *Src1 3433 = getNamedOperand(MI, AMDGPU::OpName::src1); 3434 if (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg())) 3435 return false; 3436 // Additional verification is needed for sdst/src2. 3437 return true; 3438 } 3439 case AMDGPU::V_MAC_F32_e64: 3440 case AMDGPU::V_MAC_F16_e64: 3441 case AMDGPU::V_FMAC_F32_e64: 3442 case AMDGPU::V_FMAC_F16_e64: 3443 if (!Src2->isReg() || !RI.isVGPR(MRI, Src2->getReg()) || 3444 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers)) 3445 return false; 3446 break; 3447 3448 case AMDGPU::V_CNDMASK_B32_e64: 3449 break; 3450 } 3451 } 3452 3453 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3454 if (Src1 && (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()) || 3455 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers))) 3456 return false; 3457 3458 // We don't need to check src0, all input types are legal, so just make sure 3459 // src0 isn't using any modifiers. 3460 if (hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers)) 3461 return false; 3462 3463 // Can it be shrunk to a valid 32 bit opcode? 3464 if (!hasVALU32BitEncoding(MI.getOpcode())) 3465 return false; 3466 3467 // Check output modifiers 3468 return !hasModifiersSet(MI, AMDGPU::OpName::omod) && 3469 !hasModifiersSet(MI, AMDGPU::OpName::clamp); 3470 } 3471 3472 // Set VCC operand with all flags from \p Orig, except for setting it as 3473 // implicit. 3474 static void copyFlagsToImplicitVCC(MachineInstr &MI, 3475 const MachineOperand &Orig) { 3476 3477 for (MachineOperand &Use : MI.implicit_operands()) { 3478 if (Use.isUse() && 3479 (Use.getReg() == AMDGPU::VCC || Use.getReg() == AMDGPU::VCC_LO)) { 3480 Use.setIsUndef(Orig.isUndef()); 3481 Use.setIsKill(Orig.isKill()); 3482 return; 3483 } 3484 } 3485 } 3486 3487 MachineInstr *SIInstrInfo::buildShrunkInst(MachineInstr &MI, 3488 unsigned Op32) const { 3489 MachineBasicBlock *MBB = MI.getParent();; 3490 MachineInstrBuilder Inst32 = 3491 BuildMI(*MBB, MI, MI.getDebugLoc(), get(Op32)) 3492 .setMIFlags(MI.getFlags()); 3493 3494 // Add the dst operand if the 32-bit encoding also has an explicit $vdst. 3495 // For VOPC instructions, this is replaced by an implicit def of vcc. 3496 int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst); 3497 if (Op32DstIdx != -1) { 3498 // dst 3499 Inst32.add(MI.getOperand(0)); 3500 } else { 3501 assert(((MI.getOperand(0).getReg() == AMDGPU::VCC) || 3502 (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) && 3503 "Unexpected case"); 3504 } 3505 3506 Inst32.add(*getNamedOperand(MI, AMDGPU::OpName::src0)); 3507 3508 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3509 if (Src1) 3510 Inst32.add(*Src1); 3511 3512 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3513 3514 if (Src2) { 3515 int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2); 3516 if (Op32Src2Idx != -1) { 3517 Inst32.add(*Src2); 3518 } else { 3519 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is 3520 // replaced with an implicit read of vcc or vcc_lo. The implicit read 3521 // of vcc was already added during the initial BuildMI, but we 3522 // 1) may need to change vcc to vcc_lo to preserve the original register 3523 // 2) have to preserve the original flags. 3524 fixImplicitOperands(*Inst32); 3525 copyFlagsToImplicitVCC(*Inst32, *Src2); 3526 } 3527 } 3528 3529 return Inst32; 3530 } 3531 3532 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI, 3533 const MachineOperand &MO, 3534 const MCOperandInfo &OpInfo) const { 3535 // Literal constants use the constant bus. 3536 //if (isLiteralConstantLike(MO, OpInfo)) 3537 // return true; 3538 if (MO.isImm()) 3539 return !isInlineConstant(MO, OpInfo); 3540 3541 if (!MO.isReg()) 3542 return true; // Misc other operands like FrameIndex 3543 3544 if (!MO.isUse()) 3545 return false; 3546 3547 if (MO.getReg().isVirtual()) 3548 return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); 3549 3550 // Null is free 3551 if (MO.getReg() == AMDGPU::SGPR_NULL) 3552 return false; 3553 3554 // SGPRs use the constant bus 3555 if (MO.isImplicit()) { 3556 return MO.getReg() == AMDGPU::M0 || 3557 MO.getReg() == AMDGPU::VCC || 3558 MO.getReg() == AMDGPU::VCC_LO; 3559 } else { 3560 return AMDGPU::SReg_32RegClass.contains(MO.getReg()) || 3561 AMDGPU::SReg_64RegClass.contains(MO.getReg()); 3562 } 3563 } 3564 3565 static Register findImplicitSGPRRead(const MachineInstr &MI) { 3566 for (const MachineOperand &MO : MI.implicit_operands()) { 3567 // We only care about reads. 3568 if (MO.isDef()) 3569 continue; 3570 3571 switch (MO.getReg()) { 3572 case AMDGPU::VCC: 3573 case AMDGPU::VCC_LO: 3574 case AMDGPU::VCC_HI: 3575 case AMDGPU::M0: 3576 case AMDGPU::FLAT_SCR: 3577 return MO.getReg(); 3578 3579 default: 3580 break; 3581 } 3582 } 3583 3584 return AMDGPU::NoRegister; 3585 } 3586 3587 static bool shouldReadExec(const MachineInstr &MI) { 3588 if (SIInstrInfo::isVALU(MI)) { 3589 switch (MI.getOpcode()) { 3590 case AMDGPU::V_READLANE_B32: 3591 case AMDGPU::V_WRITELANE_B32: 3592 return false; 3593 } 3594 3595 return true; 3596 } 3597 3598 if (MI.isPreISelOpcode() || 3599 SIInstrInfo::isGenericOpcode(MI.getOpcode()) || 3600 SIInstrInfo::isSALU(MI) || 3601 SIInstrInfo::isSMRD(MI)) 3602 return false; 3603 3604 return true; 3605 } 3606 3607 static bool isSubRegOf(const SIRegisterInfo &TRI, 3608 const MachineOperand &SuperVec, 3609 const MachineOperand &SubReg) { 3610 if (SubReg.getReg().isPhysical()) 3611 return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg()); 3612 3613 return SubReg.getSubReg() != AMDGPU::NoSubRegister && 3614 SubReg.getReg() == SuperVec.getReg(); 3615 } 3616 3617 bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, 3618 StringRef &ErrInfo) const { 3619 uint16_t Opcode = MI.getOpcode(); 3620 if (SIInstrInfo::isGenericOpcode(MI.getOpcode())) 3621 return true; 3622 3623 const MachineFunction *MF = MI.getParent()->getParent(); 3624 const MachineRegisterInfo &MRI = MF->getRegInfo(); 3625 3626 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); 3627 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1); 3628 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2); 3629 3630 // Make sure the number of operands is correct. 3631 const MCInstrDesc &Desc = get(Opcode); 3632 if (!Desc.isVariadic() && 3633 Desc.getNumOperands() != MI.getNumExplicitOperands()) { 3634 ErrInfo = "Instruction has wrong number of operands."; 3635 return false; 3636 } 3637 3638 if (MI.isInlineAsm()) { 3639 // Verify register classes for inlineasm constraints. 3640 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands(); 3641 I != E; ++I) { 3642 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI); 3643 if (!RC) 3644 continue; 3645 3646 const MachineOperand &Op = MI.getOperand(I); 3647 if (!Op.isReg()) 3648 continue; 3649 3650 Register Reg = Op.getReg(); 3651 if (!Reg.isVirtual() && !RC->contains(Reg)) { 3652 ErrInfo = "inlineasm operand has incorrect register class."; 3653 return false; 3654 } 3655 } 3656 3657 return true; 3658 } 3659 3660 if (isMIMG(MI) && MI.memoperands_empty() && MI.mayLoadOrStore()) { 3661 ErrInfo = "missing memory operand from MIMG instruction."; 3662 return false; 3663 } 3664 3665 // Make sure the register classes are correct. 3666 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) { 3667 if (MI.getOperand(i).isFPImm()) { 3668 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast " 3669 "all fp values to integers."; 3670 return false; 3671 } 3672 3673 int RegClass = Desc.OpInfo[i].RegClass; 3674 3675 switch (Desc.OpInfo[i].OperandType) { 3676 case MCOI::OPERAND_REGISTER: 3677 if (MI.getOperand(i).isImm() || MI.getOperand(i).isGlobal()) { 3678 ErrInfo = "Illegal immediate value for operand."; 3679 return false; 3680 } 3681 break; 3682 case AMDGPU::OPERAND_REG_IMM_INT32: 3683 case AMDGPU::OPERAND_REG_IMM_FP32: 3684 break; 3685 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 3686 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 3687 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 3688 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 3689 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 3690 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 3691 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 3692 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: 3693 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 3694 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: { 3695 const MachineOperand &MO = MI.getOperand(i); 3696 if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) { 3697 ErrInfo = "Illegal immediate value for operand."; 3698 return false; 3699 } 3700 break; 3701 } 3702 case MCOI::OPERAND_IMMEDIATE: 3703 case AMDGPU::OPERAND_KIMM32: 3704 // Check if this operand is an immediate. 3705 // FrameIndex operands will be replaced by immediates, so they are 3706 // allowed. 3707 if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) { 3708 ErrInfo = "Expected immediate, but got non-immediate"; 3709 return false; 3710 } 3711 LLVM_FALLTHROUGH; 3712 default: 3713 continue; 3714 } 3715 3716 if (!MI.getOperand(i).isReg()) 3717 continue; 3718 3719 if (RegClass != -1) { 3720 Register Reg = MI.getOperand(i).getReg(); 3721 if (Reg == AMDGPU::NoRegister || Reg.isVirtual()) 3722 continue; 3723 3724 const TargetRegisterClass *RC = RI.getRegClass(RegClass); 3725 if (!RC->contains(Reg)) { 3726 ErrInfo = "Operand has incorrect register class."; 3727 return false; 3728 } 3729 } 3730 } 3731 3732 // Verify SDWA 3733 if (isSDWA(MI)) { 3734 if (!ST.hasSDWA()) { 3735 ErrInfo = "SDWA is not supported on this target"; 3736 return false; 3737 } 3738 3739 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst); 3740 3741 const int OpIndicies[] = { DstIdx, Src0Idx, Src1Idx, Src2Idx }; 3742 3743 for (int OpIdx: OpIndicies) { 3744 if (OpIdx == -1) 3745 continue; 3746 const MachineOperand &MO = MI.getOperand(OpIdx); 3747 3748 if (!ST.hasSDWAScalar()) { 3749 // Only VGPRS on VI 3750 if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) { 3751 ErrInfo = "Only VGPRs allowed as operands in SDWA instructions on VI"; 3752 return false; 3753 } 3754 } else { 3755 // No immediates on GFX9 3756 if (!MO.isReg()) { 3757 ErrInfo = 3758 "Only reg allowed as operands in SDWA instructions on GFX9+"; 3759 return false; 3760 } 3761 } 3762 } 3763 3764 if (!ST.hasSDWAOmod()) { 3765 // No omod allowed on VI 3766 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 3767 if (OMod != nullptr && 3768 (!OMod->isImm() || OMod->getImm() != 0)) { 3769 ErrInfo = "OMod not allowed in SDWA instructions on VI"; 3770 return false; 3771 } 3772 } 3773 3774 uint16_t BasicOpcode = AMDGPU::getBasicFromSDWAOp(Opcode); 3775 if (isVOPC(BasicOpcode)) { 3776 if (!ST.hasSDWASdst() && DstIdx != -1) { 3777 // Only vcc allowed as dst on VI for VOPC 3778 const MachineOperand &Dst = MI.getOperand(DstIdx); 3779 if (!Dst.isReg() || Dst.getReg() != AMDGPU::VCC) { 3780 ErrInfo = "Only VCC allowed as dst in SDWA instructions on VI"; 3781 return false; 3782 } 3783 } else if (!ST.hasSDWAOutModsVOPC()) { 3784 // No clamp allowed on GFX9 for VOPC 3785 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 3786 if (Clamp && (!Clamp->isImm() || Clamp->getImm() != 0)) { 3787 ErrInfo = "Clamp not allowed in VOPC SDWA instructions on VI"; 3788 return false; 3789 } 3790 3791 // No omod allowed on GFX9 for VOPC 3792 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 3793 if (OMod && (!OMod->isImm() || OMod->getImm() != 0)) { 3794 ErrInfo = "OMod not allowed in VOPC SDWA instructions on VI"; 3795 return false; 3796 } 3797 } 3798 } 3799 3800 const MachineOperand *DstUnused = getNamedOperand(MI, AMDGPU::OpName::dst_unused); 3801 if (DstUnused && DstUnused->isImm() && 3802 DstUnused->getImm() == AMDGPU::SDWA::UNUSED_PRESERVE) { 3803 const MachineOperand &Dst = MI.getOperand(DstIdx); 3804 if (!Dst.isReg() || !Dst.isTied()) { 3805 ErrInfo = "Dst register should have tied register"; 3806 return false; 3807 } 3808 3809 const MachineOperand &TiedMO = 3810 MI.getOperand(MI.findTiedOperandIdx(DstIdx)); 3811 if (!TiedMO.isReg() || !TiedMO.isImplicit() || !TiedMO.isUse()) { 3812 ErrInfo = 3813 "Dst register should be tied to implicit use of preserved register"; 3814 return false; 3815 } else if (TiedMO.getReg().isPhysical() && 3816 Dst.getReg() != TiedMO.getReg()) { 3817 ErrInfo = "Dst register should use same physical register as preserved"; 3818 return false; 3819 } 3820 } 3821 } 3822 3823 // Verify MIMG 3824 if (isMIMG(MI.getOpcode()) && !MI.mayStore()) { 3825 // Ensure that the return type used is large enough for all the options 3826 // being used TFE/LWE require an extra result register. 3827 const MachineOperand *DMask = getNamedOperand(MI, AMDGPU::OpName::dmask); 3828 if (DMask) { 3829 uint64_t DMaskImm = DMask->getImm(); 3830 uint32_t RegCount = 3831 isGather4(MI.getOpcode()) ? 4 : countPopulation(DMaskImm); 3832 const MachineOperand *TFE = getNamedOperand(MI, AMDGPU::OpName::tfe); 3833 const MachineOperand *LWE = getNamedOperand(MI, AMDGPU::OpName::lwe); 3834 const MachineOperand *D16 = getNamedOperand(MI, AMDGPU::OpName::d16); 3835 3836 // Adjust for packed 16 bit values 3837 if (D16 && D16->getImm() && !ST.hasUnpackedD16VMem()) 3838 RegCount >>= 1; 3839 3840 // Adjust if using LWE or TFE 3841 if ((LWE && LWE->getImm()) || (TFE && TFE->getImm())) 3842 RegCount += 1; 3843 3844 const uint32_t DstIdx = 3845 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata); 3846 const MachineOperand &Dst = MI.getOperand(DstIdx); 3847 if (Dst.isReg()) { 3848 const TargetRegisterClass *DstRC = getOpRegClass(MI, DstIdx); 3849 uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32; 3850 if (RegCount > DstSize) { 3851 ErrInfo = "MIMG instruction returns too many registers for dst " 3852 "register class"; 3853 return false; 3854 } 3855 } 3856 } 3857 } 3858 3859 // Verify VOP*. Ignore multiple sgpr operands on writelane. 3860 if (Desc.getOpcode() != AMDGPU::V_WRITELANE_B32 3861 && (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI) || isSDWA(MI))) { 3862 // Only look at the true operands. Only a real operand can use the constant 3863 // bus, and we don't want to check pseudo-operands like the source modifier 3864 // flags. 3865 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx }; 3866 3867 unsigned ConstantBusCount = 0; 3868 unsigned LiteralCount = 0; 3869 3870 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) 3871 ++ConstantBusCount; 3872 3873 SmallVector<Register, 2> SGPRsUsed; 3874 Register SGPRUsed; 3875 3876 for (int OpIdx : OpIndices) { 3877 if (OpIdx == -1) 3878 break; 3879 const MachineOperand &MO = MI.getOperand(OpIdx); 3880 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 3881 if (MO.isReg()) { 3882 SGPRUsed = MO.getReg(); 3883 if (llvm::all_of(SGPRsUsed, [SGPRUsed](unsigned SGPR) { 3884 return SGPRUsed != SGPR; 3885 })) { 3886 ++ConstantBusCount; 3887 SGPRsUsed.push_back(SGPRUsed); 3888 } 3889 } else { 3890 ++ConstantBusCount; 3891 ++LiteralCount; 3892 } 3893 } 3894 } 3895 3896 SGPRUsed = findImplicitSGPRRead(MI); 3897 if (SGPRUsed != AMDGPU::NoRegister) { 3898 // Implicit uses may safely overlap true overands 3899 if (llvm::all_of(SGPRsUsed, [this, SGPRUsed](unsigned SGPR) { 3900 return !RI.regsOverlap(SGPRUsed, SGPR); 3901 })) { 3902 ++ConstantBusCount; 3903 SGPRsUsed.push_back(SGPRUsed); 3904 } 3905 } 3906 3907 // v_writelane_b32 is an exception from constant bus restriction: 3908 // vsrc0 can be sgpr, const or m0 and lane select sgpr, m0 or inline-const 3909 if (ConstantBusCount > ST.getConstantBusLimit(Opcode) && 3910 Opcode != AMDGPU::V_WRITELANE_B32) { 3911 ErrInfo = "VOP* instruction violates constant bus restriction"; 3912 return false; 3913 } 3914 3915 if (isVOP3(MI) && LiteralCount) { 3916 if (!ST.hasVOP3Literal()) { 3917 ErrInfo = "VOP3 instruction uses literal"; 3918 return false; 3919 } 3920 if (LiteralCount > 1) { 3921 ErrInfo = "VOP3 instruction uses more than one literal"; 3922 return false; 3923 } 3924 } 3925 } 3926 3927 // Special case for writelane - this can break the multiple constant bus rule, 3928 // but still can't use more than one SGPR register 3929 if (Desc.getOpcode() == AMDGPU::V_WRITELANE_B32) { 3930 unsigned SGPRCount = 0; 3931 Register SGPRUsed = AMDGPU::NoRegister; 3932 3933 for (int OpIdx : {Src0Idx, Src1Idx, Src2Idx}) { 3934 if (OpIdx == -1) 3935 break; 3936 3937 const MachineOperand &MO = MI.getOperand(OpIdx); 3938 3939 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 3940 if (MO.isReg() && MO.getReg() != AMDGPU::M0) { 3941 if (MO.getReg() != SGPRUsed) 3942 ++SGPRCount; 3943 SGPRUsed = MO.getReg(); 3944 } 3945 } 3946 if (SGPRCount > ST.getConstantBusLimit(Opcode)) { 3947 ErrInfo = "WRITELANE instruction violates constant bus restriction"; 3948 return false; 3949 } 3950 } 3951 } 3952 3953 // Verify misc. restrictions on specific instructions. 3954 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32_e64 || 3955 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64_e64) { 3956 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3957 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 3958 const MachineOperand &Src2 = MI.getOperand(Src2Idx); 3959 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) { 3960 if (!compareMachineOp(Src0, Src1) && 3961 !compareMachineOp(Src0, Src2)) { 3962 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2"; 3963 return false; 3964 } 3965 } 3966 if ((getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm() & 3967 SISrcMods::ABS) || 3968 (getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm() & 3969 SISrcMods::ABS) || 3970 (getNamedOperand(MI, AMDGPU::OpName::src2_modifiers)->getImm() & 3971 SISrcMods::ABS)) { 3972 ErrInfo = "ABS not allowed in VOP3B instructions"; 3973 return false; 3974 } 3975 } 3976 3977 if (isSOP2(MI) || isSOPC(MI)) { 3978 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3979 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 3980 unsigned Immediates = 0; 3981 3982 if (!Src0.isReg() && 3983 !isInlineConstant(Src0, Desc.OpInfo[Src0Idx].OperandType)) 3984 Immediates++; 3985 if (!Src1.isReg() && 3986 !isInlineConstant(Src1, Desc.OpInfo[Src1Idx].OperandType)) 3987 Immediates++; 3988 3989 if (Immediates > 1) { 3990 ErrInfo = "SOP2/SOPC instruction requires too many immediate constants"; 3991 return false; 3992 } 3993 } 3994 3995 if (isSOPK(MI)) { 3996 auto Op = getNamedOperand(MI, AMDGPU::OpName::simm16); 3997 if (Desc.isBranch()) { 3998 if (!Op->isMBB()) { 3999 ErrInfo = "invalid branch target for SOPK instruction"; 4000 return false; 4001 } 4002 } else { 4003 uint64_t Imm = Op->getImm(); 4004 if (sopkIsZext(MI)) { 4005 if (!isUInt<16>(Imm)) { 4006 ErrInfo = "invalid immediate for SOPK instruction"; 4007 return false; 4008 } 4009 } else { 4010 if (!isInt<16>(Imm)) { 4011 ErrInfo = "invalid immediate for SOPK instruction"; 4012 return false; 4013 } 4014 } 4015 } 4016 } 4017 4018 if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 || 4019 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 || 4020 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 4021 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) { 4022 const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 4023 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64; 4024 4025 const unsigned StaticNumOps = Desc.getNumOperands() + 4026 Desc.getNumImplicitUses(); 4027 const unsigned NumImplicitOps = IsDst ? 2 : 1; 4028 4029 // Allow additional implicit operands. This allows a fixup done by the post 4030 // RA scheduler where the main implicit operand is killed and implicit-defs 4031 // are added for sub-registers that remain live after this instruction. 4032 if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) { 4033 ErrInfo = "missing implicit register operands"; 4034 return false; 4035 } 4036 4037 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 4038 if (IsDst) { 4039 if (!Dst->isUse()) { 4040 ErrInfo = "v_movreld_b32 vdst should be a use operand"; 4041 return false; 4042 } 4043 4044 unsigned UseOpIdx; 4045 if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) || 4046 UseOpIdx != StaticNumOps + 1) { 4047 ErrInfo = "movrel implicit operands should be tied"; 4048 return false; 4049 } 4050 } 4051 4052 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 4053 const MachineOperand &ImpUse 4054 = MI.getOperand(StaticNumOps + NumImplicitOps - 1); 4055 if (!ImpUse.isReg() || !ImpUse.isUse() || 4056 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) { 4057 ErrInfo = "src0 should be subreg of implicit vector use"; 4058 return false; 4059 } 4060 } 4061 4062 // Make sure we aren't losing exec uses in the td files. This mostly requires 4063 // being careful when using let Uses to try to add other use registers. 4064 if (shouldReadExec(MI)) { 4065 if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) { 4066 ErrInfo = "VALU instruction does not implicitly read exec mask"; 4067 return false; 4068 } 4069 } 4070 4071 if (isSMRD(MI)) { 4072 if (MI.mayStore()) { 4073 // The register offset form of scalar stores may only use m0 as the 4074 // soffset register. 4075 const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff); 4076 if (Soff && Soff->getReg() != AMDGPU::M0) { 4077 ErrInfo = "scalar stores must use m0 as offset register"; 4078 return false; 4079 } 4080 } 4081 } 4082 4083 if (isFLAT(MI) && !ST.hasFlatInstOffsets()) { 4084 const MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 4085 if (Offset->getImm() != 0) { 4086 ErrInfo = "subtarget does not support offsets in flat instructions"; 4087 return false; 4088 } 4089 } 4090 4091 if (isMIMG(MI)) { 4092 const MachineOperand *DimOp = getNamedOperand(MI, AMDGPU::OpName::dim); 4093 if (DimOp) { 4094 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opcode, 4095 AMDGPU::OpName::vaddr0); 4096 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc); 4097 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opcode); 4098 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = 4099 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode); 4100 const AMDGPU::MIMGDimInfo *Dim = 4101 AMDGPU::getMIMGDimInfoByEncoding(DimOp->getImm()); 4102 4103 if (!Dim) { 4104 ErrInfo = "dim is out of range"; 4105 return false; 4106 } 4107 4108 bool IsA16 = false; 4109 if (ST.hasR128A16()) { 4110 const MachineOperand *R128A16 = getNamedOperand(MI, AMDGPU::OpName::r128); 4111 IsA16 = R128A16->getImm() != 0; 4112 } else if (ST.hasGFX10A16()) { 4113 const MachineOperand *A16 = getNamedOperand(MI, AMDGPU::OpName::a16); 4114 IsA16 = A16->getImm() != 0; 4115 } 4116 4117 bool PackDerivatives = IsA16 || BaseOpcode->G16; 4118 bool IsNSA = SRsrcIdx - VAddr0Idx > 1; 4119 4120 unsigned AddrWords = BaseOpcode->NumExtraArgs; 4121 unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) + 4122 (BaseOpcode->LodOrClampOrMip ? 1 : 0); 4123 if (IsA16) 4124 AddrWords += (AddrComponents + 1) / 2; 4125 else 4126 AddrWords += AddrComponents; 4127 4128 if (BaseOpcode->Gradients) { 4129 if (PackDerivatives) 4130 // There are two gradients per coordinate, we pack them separately. 4131 // For the 3d case, we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv) 4132 AddrWords += (Dim->NumGradients / 2 + 1) / 2 * 2; 4133 else 4134 AddrWords += Dim->NumGradients; 4135 } 4136 4137 unsigned VAddrWords; 4138 if (IsNSA) { 4139 VAddrWords = SRsrcIdx - VAddr0Idx; 4140 } else { 4141 const TargetRegisterClass *RC = getOpRegClass(MI, VAddr0Idx); 4142 VAddrWords = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC) / 32; 4143 if (AddrWords > 8) 4144 AddrWords = 16; 4145 else if (AddrWords > 4) 4146 AddrWords = 8; 4147 else if (AddrWords == 4) 4148 AddrWords = 4; 4149 else if (AddrWords == 3) 4150 AddrWords = 3; 4151 } 4152 4153 if (VAddrWords != AddrWords) { 4154 LLVM_DEBUG(dbgs() << "bad vaddr size, expected " << AddrWords 4155 << " but got " << VAddrWords << "\n"); 4156 ErrInfo = "bad vaddr size"; 4157 return false; 4158 } 4159 } 4160 } 4161 4162 const MachineOperand *DppCt = getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl); 4163 if (DppCt) { 4164 using namespace AMDGPU::DPP; 4165 4166 unsigned DC = DppCt->getImm(); 4167 if (DC == DppCtrl::DPP_UNUSED1 || DC == DppCtrl::DPP_UNUSED2 || 4168 DC == DppCtrl::DPP_UNUSED3 || DC > DppCtrl::DPP_LAST || 4169 (DC >= DppCtrl::DPP_UNUSED4_FIRST && DC <= DppCtrl::DPP_UNUSED4_LAST) || 4170 (DC >= DppCtrl::DPP_UNUSED5_FIRST && DC <= DppCtrl::DPP_UNUSED5_LAST) || 4171 (DC >= DppCtrl::DPP_UNUSED6_FIRST && DC <= DppCtrl::DPP_UNUSED6_LAST) || 4172 (DC >= DppCtrl::DPP_UNUSED7_FIRST && DC <= DppCtrl::DPP_UNUSED7_LAST) || 4173 (DC >= DppCtrl::DPP_UNUSED8_FIRST && DC <= DppCtrl::DPP_UNUSED8_LAST)) { 4174 ErrInfo = "Invalid dpp_ctrl value"; 4175 return false; 4176 } 4177 if (DC >= DppCtrl::WAVE_SHL1 && DC <= DppCtrl::WAVE_ROR1 && 4178 ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 4179 ErrInfo = "Invalid dpp_ctrl value: " 4180 "wavefront shifts are not supported on GFX10+"; 4181 return false; 4182 } 4183 if (DC >= DppCtrl::BCAST15 && DC <= DppCtrl::BCAST31 && 4184 ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 4185 ErrInfo = "Invalid dpp_ctrl value: " 4186 "broadcasts are not supported on GFX10+"; 4187 return false; 4188 } 4189 if (DC >= DppCtrl::ROW_SHARE_FIRST && DC <= DppCtrl::ROW_XMASK_LAST && 4190 ST.getGeneration() < AMDGPUSubtarget::GFX10) { 4191 ErrInfo = "Invalid dpp_ctrl value: " 4192 "row_share and row_xmask are not supported before GFX10"; 4193 return false; 4194 } 4195 } 4196 4197 return true; 4198 } 4199 4200 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) const { 4201 switch (MI.getOpcode()) { 4202 default: return AMDGPU::INSTRUCTION_LIST_END; 4203 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE; 4204 case AMDGPU::COPY: return AMDGPU::COPY; 4205 case AMDGPU::PHI: return AMDGPU::PHI; 4206 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG; 4207 case AMDGPU::WQM: return AMDGPU::WQM; 4208 case AMDGPU::SOFT_WQM: return AMDGPU::SOFT_WQM; 4209 case AMDGPU::WWM: return AMDGPU::WWM; 4210 case AMDGPU::S_MOV_B32: { 4211 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 4212 return MI.getOperand(1).isReg() || 4213 RI.isAGPR(MRI, MI.getOperand(0).getReg()) ? 4214 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32; 4215 } 4216 case AMDGPU::S_ADD_I32: 4217 return ST.hasAddNoCarry() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_CO_U32_e32; 4218 case AMDGPU::S_ADDC_U32: 4219 return AMDGPU::V_ADDC_U32_e32; 4220 case AMDGPU::S_SUB_I32: 4221 return ST.hasAddNoCarry() ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_CO_U32_e32; 4222 // FIXME: These are not consistently handled, and selected when the carry is 4223 // used. 4224 case AMDGPU::S_ADD_U32: 4225 return AMDGPU::V_ADD_CO_U32_e32; 4226 case AMDGPU::S_SUB_U32: 4227 return AMDGPU::V_SUB_CO_U32_e32; 4228 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32; 4229 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_U32_e64; 4230 case AMDGPU::S_MUL_HI_U32: return AMDGPU::V_MUL_HI_U32_e64; 4231 case AMDGPU::S_MUL_HI_I32: return AMDGPU::V_MUL_HI_I32_e64; 4232 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64; 4233 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64; 4234 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64; 4235 case AMDGPU::S_XNOR_B32: 4236 return ST.hasDLInsts() ? AMDGPU::V_XNOR_B32_e64 : AMDGPU::INSTRUCTION_LIST_END; 4237 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64; 4238 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64; 4239 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64; 4240 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64; 4241 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32; 4242 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64_e64; 4243 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32; 4244 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64_e64; 4245 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32; 4246 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64_e64; 4247 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32_e64; 4248 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32_e64; 4249 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32_e64; 4250 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32_e64; 4251 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64; 4252 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32; 4253 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32; 4254 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32; 4255 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32; 4256 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32; 4257 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32; 4258 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32; 4259 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32; 4260 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32; 4261 case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e32; 4262 case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e32; 4263 case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e32; 4264 case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e32; 4265 case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e32; 4266 case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e32; 4267 case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e32; 4268 case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e32; 4269 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64; 4270 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32; 4271 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32; 4272 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64; 4273 case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ; 4274 case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ; 4275 } 4276 llvm_unreachable( 4277 "Unexpected scalar opcode without corresponding vector one!"); 4278 } 4279 4280 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI, 4281 unsigned OpNo) const { 4282 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 4283 const MCInstrDesc &Desc = get(MI.getOpcode()); 4284 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() || 4285 Desc.OpInfo[OpNo].RegClass == -1) { 4286 Register Reg = MI.getOperand(OpNo).getReg(); 4287 4288 if (Reg.isVirtual()) 4289 return MRI.getRegClass(Reg); 4290 return RI.getPhysRegClass(Reg); 4291 } 4292 4293 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 4294 return RI.getRegClass(RCID); 4295 } 4296 4297 void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const { 4298 MachineBasicBlock::iterator I = MI; 4299 MachineBasicBlock *MBB = MI.getParent(); 4300 MachineOperand &MO = MI.getOperand(OpIdx); 4301 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 4302 unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass; 4303 const TargetRegisterClass *RC = RI.getRegClass(RCID); 4304 unsigned Size = RI.getRegSizeInBits(*RC); 4305 unsigned Opcode = (Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO : AMDGPU::V_MOV_B32_e32; 4306 if (MO.isReg()) 4307 Opcode = AMDGPU::COPY; 4308 else if (RI.isSGPRClass(RC)) 4309 Opcode = (Size == 64) ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; 4310 4311 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); 4312 if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC)) 4313 VRC = &AMDGPU::VReg_64RegClass; 4314 else 4315 VRC = &AMDGPU::VGPR_32RegClass; 4316 4317 Register Reg = MRI.createVirtualRegister(VRC); 4318 DebugLoc DL = MBB->findDebugLoc(I); 4319 BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO); 4320 MO.ChangeToRegister(Reg, false); 4321 } 4322 4323 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI, 4324 MachineRegisterInfo &MRI, 4325 MachineOperand &SuperReg, 4326 const TargetRegisterClass *SuperRC, 4327 unsigned SubIdx, 4328 const TargetRegisterClass *SubRC) 4329 const { 4330 MachineBasicBlock *MBB = MI->getParent(); 4331 DebugLoc DL = MI->getDebugLoc(); 4332 Register SubReg = MRI.createVirtualRegister(SubRC); 4333 4334 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) { 4335 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 4336 .addReg(SuperReg.getReg(), 0, SubIdx); 4337 return SubReg; 4338 } 4339 4340 // Just in case the super register is itself a sub-register, copy it to a new 4341 // value so we don't need to worry about merging its subreg index with the 4342 // SubIdx passed to this function. The register coalescer should be able to 4343 // eliminate this extra copy. 4344 Register NewSuperReg = MRI.createVirtualRegister(SuperRC); 4345 4346 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg) 4347 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg()); 4348 4349 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 4350 .addReg(NewSuperReg, 0, SubIdx); 4351 4352 return SubReg; 4353 } 4354 4355 MachineOperand SIInstrInfo::buildExtractSubRegOrImm( 4356 MachineBasicBlock::iterator MII, 4357 MachineRegisterInfo &MRI, 4358 MachineOperand &Op, 4359 const TargetRegisterClass *SuperRC, 4360 unsigned SubIdx, 4361 const TargetRegisterClass *SubRC) const { 4362 if (Op.isImm()) { 4363 if (SubIdx == AMDGPU::sub0) 4364 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm())); 4365 if (SubIdx == AMDGPU::sub1) 4366 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32)); 4367 4368 llvm_unreachable("Unhandled register index for immediate"); 4369 } 4370 4371 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC, 4372 SubIdx, SubRC); 4373 return MachineOperand::CreateReg(SubReg, false); 4374 } 4375 4376 // Change the order of operands from (0, 1, 2) to (0, 2, 1) 4377 void SIInstrInfo::swapOperands(MachineInstr &Inst) const { 4378 assert(Inst.getNumExplicitOperands() == 3); 4379 MachineOperand Op1 = Inst.getOperand(1); 4380 Inst.RemoveOperand(1); 4381 Inst.addOperand(Op1); 4382 } 4383 4384 bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI, 4385 const MCOperandInfo &OpInfo, 4386 const MachineOperand &MO) const { 4387 if (!MO.isReg()) 4388 return false; 4389 4390 Register Reg = MO.getReg(); 4391 4392 const TargetRegisterClass *DRC = RI.getRegClass(OpInfo.RegClass); 4393 if (Reg.isPhysical()) 4394 return DRC->contains(Reg); 4395 4396 const TargetRegisterClass *RC = MRI.getRegClass(Reg); 4397 4398 if (MO.getSubReg()) { 4399 const MachineFunction *MF = MO.getParent()->getParent()->getParent(); 4400 const TargetRegisterClass *SuperRC = RI.getLargestLegalSuperClass(RC, *MF); 4401 if (!SuperRC) 4402 return false; 4403 4404 DRC = RI.getMatchingSuperRegClass(SuperRC, DRC, MO.getSubReg()); 4405 if (!DRC) 4406 return false; 4407 } 4408 return RC->hasSuperClassEq(DRC); 4409 } 4410 4411 bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI, 4412 const MCOperandInfo &OpInfo, 4413 const MachineOperand &MO) const { 4414 if (MO.isReg()) 4415 return isLegalRegOperand(MRI, OpInfo, MO); 4416 4417 // Handle non-register types that are treated like immediates. 4418 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()); 4419 return true; 4420 } 4421 4422 bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx, 4423 const MachineOperand *MO) const { 4424 const MachineFunction &MF = *MI.getParent()->getParent(); 4425 const MachineRegisterInfo &MRI = MF.getRegInfo(); 4426 const MCInstrDesc &InstDesc = MI.getDesc(); 4427 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; 4428 const TargetRegisterClass *DefinedRC = 4429 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; 4430 if (!MO) 4431 MO = &MI.getOperand(OpIdx); 4432 4433 int ConstantBusLimit = ST.getConstantBusLimit(MI.getOpcode()); 4434 int VOP3LiteralLimit = ST.hasVOP3Literal() ? 1 : 0; 4435 if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) { 4436 if (isVOP3(MI) && isLiteralConstantLike(*MO, OpInfo) && !VOP3LiteralLimit--) 4437 return false; 4438 4439 SmallDenseSet<RegSubRegPair> SGPRsUsed; 4440 if (MO->isReg()) 4441 SGPRsUsed.insert(RegSubRegPair(MO->getReg(), MO->getSubReg())); 4442 4443 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 4444 if (i == OpIdx) 4445 continue; 4446 const MachineOperand &Op = MI.getOperand(i); 4447 if (Op.isReg()) { 4448 RegSubRegPair SGPR(Op.getReg(), Op.getSubReg()); 4449 if (!SGPRsUsed.count(SGPR) && 4450 usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) { 4451 if (--ConstantBusLimit <= 0) 4452 return false; 4453 SGPRsUsed.insert(SGPR); 4454 } 4455 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) { 4456 if (--ConstantBusLimit <= 0) 4457 return false; 4458 } else if (isVOP3(MI) && AMDGPU::isSISrcOperand(InstDesc, i) && 4459 isLiteralConstantLike(Op, InstDesc.OpInfo[i])) { 4460 if (!VOP3LiteralLimit--) 4461 return false; 4462 if (--ConstantBusLimit <= 0) 4463 return false; 4464 } 4465 } 4466 } 4467 4468 if (MO->isReg()) { 4469 assert(DefinedRC); 4470 return isLegalRegOperand(MRI, OpInfo, *MO); 4471 } 4472 4473 // Handle non-register types that are treated like immediates. 4474 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI() || MO->isGlobal()); 4475 4476 if (!DefinedRC) { 4477 // This operand expects an immediate. 4478 return true; 4479 } 4480 4481 return isImmOperandLegal(MI, OpIdx, *MO); 4482 } 4483 4484 void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI, 4485 MachineInstr &MI) const { 4486 unsigned Opc = MI.getOpcode(); 4487 const MCInstrDesc &InstrDesc = get(Opc); 4488 4489 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 4490 MachineOperand &Src0 = MI.getOperand(Src0Idx); 4491 4492 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 4493 MachineOperand &Src1 = MI.getOperand(Src1Idx); 4494 4495 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32 4496 // we need to only have one constant bus use before GFX10. 4497 bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister; 4498 if (HasImplicitSGPR && ST.getConstantBusLimit(Opc) <= 1 && 4499 Src0.isReg() && (RI.isSGPRReg(MRI, Src0.getReg()) || 4500 isLiteralConstantLike(Src0, InstrDesc.OpInfo[Src0Idx]))) 4501 legalizeOpWithMove(MI, Src0Idx); 4502 4503 // Special case: V_WRITELANE_B32 accepts only immediate or SGPR operands for 4504 // both the value to write (src0) and lane select (src1). Fix up non-SGPR 4505 // src0/src1 with V_READFIRSTLANE. 4506 if (Opc == AMDGPU::V_WRITELANE_B32) { 4507 const DebugLoc &DL = MI.getDebugLoc(); 4508 if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) { 4509 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4510 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4511 .add(Src0); 4512 Src0.ChangeToRegister(Reg, false); 4513 } 4514 if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) { 4515 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4516 const DebugLoc &DL = MI.getDebugLoc(); 4517 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4518 .add(Src1); 4519 Src1.ChangeToRegister(Reg, false); 4520 } 4521 return; 4522 } 4523 4524 // No VOP2 instructions support AGPRs. 4525 if (Src0.isReg() && RI.isAGPR(MRI, Src0.getReg())) 4526 legalizeOpWithMove(MI, Src0Idx); 4527 4528 if (Src1.isReg() && RI.isAGPR(MRI, Src1.getReg())) 4529 legalizeOpWithMove(MI, Src1Idx); 4530 4531 // VOP2 src0 instructions support all operand types, so we don't need to check 4532 // their legality. If src1 is already legal, we don't need to do anything. 4533 if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1)) 4534 return; 4535 4536 // Special case: V_READLANE_B32 accepts only immediate or SGPR operands for 4537 // lane select. Fix up using V_READFIRSTLANE, since we assume that the lane 4538 // select is uniform. 4539 if (Opc == AMDGPU::V_READLANE_B32 && Src1.isReg() && 4540 RI.isVGPR(MRI, Src1.getReg())) { 4541 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4542 const DebugLoc &DL = MI.getDebugLoc(); 4543 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4544 .add(Src1); 4545 Src1.ChangeToRegister(Reg, false); 4546 return; 4547 } 4548 4549 // We do not use commuteInstruction here because it is too aggressive and will 4550 // commute if it is possible. We only want to commute here if it improves 4551 // legality. This can be called a fairly large number of times so don't waste 4552 // compile time pointlessly swapping and checking legality again. 4553 if (HasImplicitSGPR || !MI.isCommutable()) { 4554 legalizeOpWithMove(MI, Src1Idx); 4555 return; 4556 } 4557 4558 // If src0 can be used as src1, commuting will make the operands legal. 4559 // Otherwise we have to give up and insert a move. 4560 // 4561 // TODO: Other immediate-like operand kinds could be commuted if there was a 4562 // MachineOperand::ChangeTo* for them. 4563 if ((!Src1.isImm() && !Src1.isReg()) || 4564 !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) { 4565 legalizeOpWithMove(MI, Src1Idx); 4566 return; 4567 } 4568 4569 int CommutedOpc = commuteOpcode(MI); 4570 if (CommutedOpc == -1) { 4571 legalizeOpWithMove(MI, Src1Idx); 4572 return; 4573 } 4574 4575 MI.setDesc(get(CommutedOpc)); 4576 4577 Register Src0Reg = Src0.getReg(); 4578 unsigned Src0SubReg = Src0.getSubReg(); 4579 bool Src0Kill = Src0.isKill(); 4580 4581 if (Src1.isImm()) 4582 Src0.ChangeToImmediate(Src1.getImm()); 4583 else if (Src1.isReg()) { 4584 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill()); 4585 Src0.setSubReg(Src1.getSubReg()); 4586 } else 4587 llvm_unreachable("Should only have register or immediate operands"); 4588 4589 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill); 4590 Src1.setSubReg(Src0SubReg); 4591 fixImplicitOperands(MI); 4592 } 4593 4594 // Legalize VOP3 operands. All operand types are supported for any operand 4595 // but only one literal constant and only starting from GFX10. 4596 void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI, 4597 MachineInstr &MI) const { 4598 unsigned Opc = MI.getOpcode(); 4599 4600 int VOP3Idx[3] = { 4601 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), 4602 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1), 4603 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2) 4604 }; 4605 4606 if (Opc == AMDGPU::V_PERMLANE16_B32_e64 || 4607 Opc == AMDGPU::V_PERMLANEX16_B32_e64) { 4608 // src1 and src2 must be scalar 4609 MachineOperand &Src1 = MI.getOperand(VOP3Idx[1]); 4610 MachineOperand &Src2 = MI.getOperand(VOP3Idx[2]); 4611 const DebugLoc &DL = MI.getDebugLoc(); 4612 if (Src1.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) { 4613 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4614 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4615 .add(Src1); 4616 Src1.ChangeToRegister(Reg, false); 4617 } 4618 if (Src2.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src2.getReg()))) { 4619 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4620 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4621 .add(Src2); 4622 Src2.ChangeToRegister(Reg, false); 4623 } 4624 } 4625 4626 // Find the one SGPR operand we are allowed to use. 4627 int ConstantBusLimit = ST.getConstantBusLimit(Opc); 4628 int LiteralLimit = ST.hasVOP3Literal() ? 1 : 0; 4629 SmallDenseSet<unsigned> SGPRsUsed; 4630 Register SGPRReg = findUsedSGPR(MI, VOP3Idx); 4631 if (SGPRReg != AMDGPU::NoRegister) { 4632 SGPRsUsed.insert(SGPRReg); 4633 --ConstantBusLimit; 4634 } 4635 4636 for (unsigned i = 0; i < 3; ++i) { 4637 int Idx = VOP3Idx[i]; 4638 if (Idx == -1) 4639 break; 4640 MachineOperand &MO = MI.getOperand(Idx); 4641 4642 if (!MO.isReg()) { 4643 if (!isLiteralConstantLike(MO, get(Opc).OpInfo[Idx])) 4644 continue; 4645 4646 if (LiteralLimit > 0 && ConstantBusLimit > 0) { 4647 --LiteralLimit; 4648 --ConstantBusLimit; 4649 continue; 4650 } 4651 4652 --LiteralLimit; 4653 --ConstantBusLimit; 4654 legalizeOpWithMove(MI, Idx); 4655 continue; 4656 } 4657 4658 if (RI.hasAGPRs(MRI.getRegClass(MO.getReg())) && 4659 !isOperandLegal(MI, Idx, &MO)) { 4660 legalizeOpWithMove(MI, Idx); 4661 continue; 4662 } 4663 4664 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg()))) 4665 continue; // VGPRs are legal 4666 4667 // We can use one SGPR in each VOP3 instruction prior to GFX10 4668 // and two starting from GFX10. 4669 if (SGPRsUsed.count(MO.getReg())) 4670 continue; 4671 if (ConstantBusLimit > 0) { 4672 SGPRsUsed.insert(MO.getReg()); 4673 --ConstantBusLimit; 4674 continue; 4675 } 4676 4677 // If we make it this far, then the operand is not legal and we must 4678 // legalize it. 4679 legalizeOpWithMove(MI, Idx); 4680 } 4681 } 4682 4683 Register SIInstrInfo::readlaneVGPRToSGPR(Register SrcReg, MachineInstr &UseMI, 4684 MachineRegisterInfo &MRI) const { 4685 const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg); 4686 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC); 4687 Register DstReg = MRI.createVirtualRegister(SRC); 4688 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32; 4689 4690 if (RI.hasAGPRs(VRC)) { 4691 VRC = RI.getEquivalentVGPRClass(VRC); 4692 Register NewSrcReg = MRI.createVirtualRegister(VRC); 4693 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4694 get(TargetOpcode::COPY), NewSrcReg) 4695 .addReg(SrcReg); 4696 SrcReg = NewSrcReg; 4697 } 4698 4699 if (SubRegs == 1) { 4700 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4701 get(AMDGPU::V_READFIRSTLANE_B32), DstReg) 4702 .addReg(SrcReg); 4703 return DstReg; 4704 } 4705 4706 SmallVector<unsigned, 8> SRegs; 4707 for (unsigned i = 0; i < SubRegs; ++i) { 4708 Register SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4709 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4710 get(AMDGPU::V_READFIRSTLANE_B32), SGPR) 4711 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i)); 4712 SRegs.push_back(SGPR); 4713 } 4714 4715 MachineInstrBuilder MIB = 4716 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4717 get(AMDGPU::REG_SEQUENCE), DstReg); 4718 for (unsigned i = 0; i < SubRegs; ++i) { 4719 MIB.addReg(SRegs[i]); 4720 MIB.addImm(RI.getSubRegFromChannel(i)); 4721 } 4722 return DstReg; 4723 } 4724 4725 void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI, 4726 MachineInstr &MI) const { 4727 4728 // If the pointer is store in VGPRs, then we need to move them to 4729 // SGPRs using v_readfirstlane. This is safe because we only select 4730 // loads with uniform pointers to SMRD instruction so we know the 4731 // pointer value is uniform. 4732 MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase); 4733 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) { 4734 Register SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI); 4735 SBase->setReg(SGPR); 4736 } 4737 MachineOperand *SOff = getNamedOperand(MI, AMDGPU::OpName::soff); 4738 if (SOff && !RI.isSGPRClass(MRI.getRegClass(SOff->getReg()))) { 4739 Register SGPR = readlaneVGPRToSGPR(SOff->getReg(), MI, MRI); 4740 SOff->setReg(SGPR); 4741 } 4742 } 4743 4744 // FIXME: Remove this when SelectionDAG is obsoleted. 4745 void SIInstrInfo::legalizeOperandsFLAT(MachineRegisterInfo &MRI, 4746 MachineInstr &MI) const { 4747 if (!isSegmentSpecificFLAT(MI)) 4748 return; 4749 4750 // Fixup SGPR operands in VGPRs. We only select these when the DAG divergence 4751 // thinks they are uniform, so a readfirstlane should be valid. 4752 MachineOperand *SAddr = getNamedOperand(MI, AMDGPU::OpName::saddr); 4753 if (!SAddr || RI.isSGPRClass(MRI.getRegClass(SAddr->getReg()))) 4754 return; 4755 4756 Register ToSGPR = readlaneVGPRToSGPR(SAddr->getReg(), MI, MRI); 4757 SAddr->setReg(ToSGPR); 4758 } 4759 4760 void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB, 4761 MachineBasicBlock::iterator I, 4762 const TargetRegisterClass *DstRC, 4763 MachineOperand &Op, 4764 MachineRegisterInfo &MRI, 4765 const DebugLoc &DL) const { 4766 Register OpReg = Op.getReg(); 4767 unsigned OpSubReg = Op.getSubReg(); 4768 4769 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg( 4770 RI.getRegClassForReg(MRI, OpReg), OpSubReg); 4771 4772 // Check if operand is already the correct register class. 4773 if (DstRC == OpRC) 4774 return; 4775 4776 Register DstReg = MRI.createVirtualRegister(DstRC); 4777 MachineInstr *Copy = 4778 BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op); 4779 4780 Op.setReg(DstReg); 4781 Op.setSubReg(0); 4782 4783 MachineInstr *Def = MRI.getVRegDef(OpReg); 4784 if (!Def) 4785 return; 4786 4787 // Try to eliminate the copy if it is copying an immediate value. 4788 if (Def->isMoveImmediate() && DstRC != &AMDGPU::VReg_1RegClass) 4789 FoldImmediate(*Copy, *Def, OpReg, &MRI); 4790 4791 bool ImpDef = Def->isImplicitDef(); 4792 while (!ImpDef && Def && Def->isCopy()) { 4793 if (Def->getOperand(1).getReg().isPhysical()) 4794 break; 4795 Def = MRI.getUniqueVRegDef(Def->getOperand(1).getReg()); 4796 ImpDef = Def && Def->isImplicitDef(); 4797 } 4798 if (!RI.isSGPRClass(DstRC) && !Copy->readsRegister(AMDGPU::EXEC, &RI) && 4799 !ImpDef) 4800 Copy->addOperand(MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 4801 } 4802 4803 // Emit the actual waterfall loop, executing the wrapped instruction for each 4804 // unique value of \p Rsrc across all lanes. In the best case we execute 1 4805 // iteration, in the worst case we execute 64 (once per lane). 4806 static void 4807 emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI, 4808 MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB, 4809 const DebugLoc &DL, MachineOperand &Rsrc) { 4810 MachineFunction &MF = *OrigBB.getParent(); 4811 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 4812 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 4813 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 4814 unsigned SaveExecOpc = 4815 ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 : AMDGPU::S_AND_SAVEEXEC_B64; 4816 unsigned XorTermOpc = 4817 ST.isWave32() ? AMDGPU::S_XOR_B32_term : AMDGPU::S_XOR_B64_term; 4818 unsigned AndOpc = 4819 ST.isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64; 4820 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 4821 4822 MachineBasicBlock::iterator I = LoopBB.begin(); 4823 4824 SmallVector<Register, 8> ReadlanePieces; 4825 Register CondReg = AMDGPU::NoRegister; 4826 4827 Register VRsrc = Rsrc.getReg(); 4828 unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef()); 4829 4830 unsigned RegSize = TRI->getRegSizeInBits(Rsrc.getReg(), MRI); 4831 unsigned NumSubRegs = RegSize / 32; 4832 assert(NumSubRegs % 2 == 0 && NumSubRegs <= 32 && "Unhandled register size"); 4833 4834 for (unsigned Idx = 0; Idx < NumSubRegs; Idx += 2) { 4835 4836 Register CurRegLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4837 Register CurRegHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4838 4839 // Read the next variant <- also loop target. 4840 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegLo) 4841 .addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx)); 4842 4843 // Read the next variant <- also loop target. 4844 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegHi) 4845 .addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx + 1)); 4846 4847 ReadlanePieces.push_back(CurRegLo); 4848 ReadlanePieces.push_back(CurRegHi); 4849 4850 // Comparison is to be done as 64-bit. 4851 Register CurReg = MRI.createVirtualRegister(&AMDGPU::SGPR_64RegClass); 4852 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), CurReg) 4853 .addReg(CurRegLo) 4854 .addImm(AMDGPU::sub0) 4855 .addReg(CurRegHi) 4856 .addImm(AMDGPU::sub1); 4857 4858 Register NewCondReg = MRI.createVirtualRegister(BoolXExecRC); 4859 auto Cmp = 4860 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), NewCondReg) 4861 .addReg(CurReg); 4862 if (NumSubRegs <= 2) 4863 Cmp.addReg(VRsrc); 4864 else 4865 Cmp.addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx, 2)); 4866 4867 // Combine the comparision results with AND. 4868 if (CondReg == AMDGPU::NoRegister) // First. 4869 CondReg = NewCondReg; 4870 else { // If not the first, we create an AND. 4871 Register AndReg = MRI.createVirtualRegister(BoolXExecRC); 4872 BuildMI(LoopBB, I, DL, TII.get(AndOpc), AndReg) 4873 .addReg(CondReg) 4874 .addReg(NewCondReg); 4875 CondReg = AndReg; 4876 } 4877 } // End for loop. 4878 4879 auto SRsrcRC = TRI->getEquivalentSGPRClass(MRI.getRegClass(VRsrc)); 4880 Register SRsrc = MRI.createVirtualRegister(SRsrcRC); 4881 4882 // Build scalar Rsrc. 4883 auto Merge = BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), SRsrc); 4884 unsigned Channel = 0; 4885 for (Register Piece : ReadlanePieces) { 4886 Merge.addReg(Piece) 4887 .addImm(TRI->getSubRegFromChannel(Channel++)); 4888 } 4889 4890 // Update Rsrc operand to use the SGPR Rsrc. 4891 Rsrc.setReg(SRsrc); 4892 Rsrc.setIsKill(true); 4893 4894 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); 4895 MRI.setSimpleHint(SaveExec, CondReg); 4896 4897 // Update EXEC to matching lanes, saving original to SaveExec. 4898 BuildMI(LoopBB, I, DL, TII.get(SaveExecOpc), SaveExec) 4899 .addReg(CondReg, RegState::Kill); 4900 4901 // The original instruction is here; we insert the terminators after it. 4902 I = LoopBB.end(); 4903 4904 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 4905 BuildMI(LoopBB, I, DL, TII.get(XorTermOpc), Exec) 4906 .addReg(Exec) 4907 .addReg(SaveExec); 4908 4909 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_CBRANCH_EXECNZ)).addMBB(&LoopBB); 4910 } 4911 4912 // Build a waterfall loop around \p MI, replacing the VGPR \p Rsrc register 4913 // with SGPRs by iterating over all unique values across all lanes. 4914 // Returns the loop basic block that now contains \p MI. 4915 static MachineBasicBlock * 4916 loadSRsrcFromVGPR(const SIInstrInfo &TII, MachineInstr &MI, 4917 MachineOperand &Rsrc, MachineDominatorTree *MDT, 4918 MachineBasicBlock::iterator Begin = nullptr, 4919 MachineBasicBlock::iterator End = nullptr) { 4920 MachineBasicBlock &MBB = *MI.getParent(); 4921 MachineFunction &MF = *MBB.getParent(); 4922 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 4923 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 4924 MachineRegisterInfo &MRI = MF.getRegInfo(); 4925 if (!Begin.isValid()) 4926 Begin = &MI; 4927 if (!End.isValid()) { 4928 End = &MI; 4929 ++End; 4930 } 4931 const DebugLoc &DL = MI.getDebugLoc(); 4932 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 4933 unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 4934 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 4935 4936 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); 4937 4938 // Save the EXEC mask 4939 BuildMI(MBB, Begin, DL, TII.get(MovExecOpc), SaveExec).addReg(Exec); 4940 4941 // Killed uses in the instruction we are waterfalling around will be 4942 // incorrect due to the added control-flow. 4943 MachineBasicBlock::iterator AfterMI = MI; 4944 ++AfterMI; 4945 for (auto I = Begin; I != AfterMI; I++) { 4946 for (auto &MO : I->uses()) { 4947 if (MO.isReg() && MO.isUse()) { 4948 MRI.clearKillFlags(MO.getReg()); 4949 } 4950 } 4951 } 4952 4953 // To insert the loop we need to split the block. Move everything after this 4954 // point to a new block, and insert a new empty block between the two. 4955 MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock(); 4956 MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock(); 4957 MachineFunction::iterator MBBI(MBB); 4958 ++MBBI; 4959 4960 MF.insert(MBBI, LoopBB); 4961 MF.insert(MBBI, RemainderBB); 4962 4963 LoopBB->addSuccessor(LoopBB); 4964 LoopBB->addSuccessor(RemainderBB); 4965 4966 // Move Begin to MI to the LoopBB, and the remainder of the block to 4967 // RemainderBB. 4968 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 4969 RemainderBB->splice(RemainderBB->begin(), &MBB, End, MBB.end()); 4970 LoopBB->splice(LoopBB->begin(), &MBB, Begin, MBB.end()); 4971 4972 MBB.addSuccessor(LoopBB); 4973 4974 // Update dominators. We know that MBB immediately dominates LoopBB, that 4975 // LoopBB immediately dominates RemainderBB, and that RemainderBB immediately 4976 // dominates all of the successors transferred to it from MBB that MBB used 4977 // to properly dominate. 4978 if (MDT) { 4979 MDT->addNewBlock(LoopBB, &MBB); 4980 MDT->addNewBlock(RemainderBB, LoopBB); 4981 for (auto &Succ : RemainderBB->successors()) { 4982 if (MDT->properlyDominates(&MBB, Succ)) { 4983 MDT->changeImmediateDominator(Succ, RemainderBB); 4984 } 4985 } 4986 } 4987 4988 emitLoadSRsrcFromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, Rsrc); 4989 4990 // Restore the EXEC mask 4991 MachineBasicBlock::iterator First = RemainderBB->begin(); 4992 BuildMI(*RemainderBB, First, DL, TII.get(MovExecOpc), Exec).addReg(SaveExec); 4993 return LoopBB; 4994 } 4995 4996 // Extract pointer from Rsrc and return a zero-value Rsrc replacement. 4997 static std::tuple<unsigned, unsigned> 4998 extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc) { 4999 MachineBasicBlock &MBB = *MI.getParent(); 5000 MachineFunction &MF = *MBB.getParent(); 5001 MachineRegisterInfo &MRI = MF.getRegInfo(); 5002 5003 // Extract the ptr from the resource descriptor. 5004 unsigned RsrcPtr = 5005 TII.buildExtractSubReg(MI, MRI, Rsrc, &AMDGPU::VReg_128RegClass, 5006 AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass); 5007 5008 // Create an empty resource descriptor 5009 Register Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 5010 Register SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 5011 Register SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 5012 Register NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass); 5013 uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat(); 5014 5015 // Zero64 = 0 5016 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B64), Zero64) 5017 .addImm(0); 5018 5019 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0} 5020 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatLo) 5021 .addImm(RsrcDataFormat & 0xFFFFFFFF); 5022 5023 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32} 5024 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatHi) 5025 .addImm(RsrcDataFormat >> 32); 5026 5027 // NewSRsrc = {Zero64, SRsrcFormat} 5028 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::REG_SEQUENCE), NewSRsrc) 5029 .addReg(Zero64) 5030 .addImm(AMDGPU::sub0_sub1) 5031 .addReg(SRsrcFormatLo) 5032 .addImm(AMDGPU::sub2) 5033 .addReg(SRsrcFormatHi) 5034 .addImm(AMDGPU::sub3); 5035 5036 return std::make_tuple(RsrcPtr, NewSRsrc); 5037 } 5038 5039 MachineBasicBlock * 5040 SIInstrInfo::legalizeOperands(MachineInstr &MI, 5041 MachineDominatorTree *MDT) const { 5042 MachineFunction &MF = *MI.getParent()->getParent(); 5043 MachineRegisterInfo &MRI = MF.getRegInfo(); 5044 MachineBasicBlock *CreatedBB = nullptr; 5045 5046 // Legalize VOP2 5047 if (isVOP2(MI) || isVOPC(MI)) { 5048 legalizeOperandsVOP2(MRI, MI); 5049 return CreatedBB; 5050 } 5051 5052 // Legalize VOP3 5053 if (isVOP3(MI)) { 5054 legalizeOperandsVOP3(MRI, MI); 5055 return CreatedBB; 5056 } 5057 5058 // Legalize SMRD 5059 if (isSMRD(MI)) { 5060 legalizeOperandsSMRD(MRI, MI); 5061 return CreatedBB; 5062 } 5063 5064 // Legalize FLAT 5065 if (isFLAT(MI)) { 5066 legalizeOperandsFLAT(MRI, MI); 5067 return CreatedBB; 5068 } 5069 5070 // Legalize REG_SEQUENCE and PHI 5071 // The register class of the operands much be the same type as the register 5072 // class of the output. 5073 if (MI.getOpcode() == AMDGPU::PHI) { 5074 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr; 5075 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) { 5076 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).getReg().isVirtual()) 5077 continue; 5078 const TargetRegisterClass *OpRC = 5079 MRI.getRegClass(MI.getOperand(i).getReg()); 5080 if (RI.hasVectorRegisters(OpRC)) { 5081 VRC = OpRC; 5082 } else { 5083 SRC = OpRC; 5084 } 5085 } 5086 5087 // If any of the operands are VGPR registers, then they all most be 5088 // otherwise we will create illegal VGPR->SGPR copies when legalizing 5089 // them. 5090 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) { 5091 if (!VRC) { 5092 assert(SRC); 5093 if (getOpRegClass(MI, 0) == &AMDGPU::VReg_1RegClass) { 5094 VRC = &AMDGPU::VReg_1RegClass; 5095 } else 5096 VRC = RI.hasAGPRs(getOpRegClass(MI, 0)) 5097 ? RI.getEquivalentAGPRClass(SRC) 5098 : RI.getEquivalentVGPRClass(SRC); 5099 } else { 5100 VRC = RI.hasAGPRs(getOpRegClass(MI, 0)) 5101 ? RI.getEquivalentAGPRClass(VRC) 5102 : RI.getEquivalentVGPRClass(VRC); 5103 } 5104 RC = VRC; 5105 } else { 5106 RC = SRC; 5107 } 5108 5109 // Update all the operands so they have the same type. 5110 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 5111 MachineOperand &Op = MI.getOperand(I); 5112 if (!Op.isReg() || !Op.getReg().isVirtual()) 5113 continue; 5114 5115 // MI is a PHI instruction. 5116 MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB(); 5117 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator(); 5118 5119 // Avoid creating no-op copies with the same src and dst reg class. These 5120 // confuse some of the machine passes. 5121 legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc()); 5122 } 5123 } 5124 5125 // REG_SEQUENCE doesn't really require operand legalization, but if one has a 5126 // VGPR dest type and SGPR sources, insert copies so all operands are 5127 // VGPRs. This seems to help operand folding / the register coalescer. 5128 if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) { 5129 MachineBasicBlock *MBB = MI.getParent(); 5130 const TargetRegisterClass *DstRC = getOpRegClass(MI, 0); 5131 if (RI.hasVGPRs(DstRC)) { 5132 // Update all the operands so they are VGPR register classes. These may 5133 // not be the same register class because REG_SEQUENCE supports mixing 5134 // subregister index types e.g. sub0_sub1 + sub2 + sub3 5135 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 5136 MachineOperand &Op = MI.getOperand(I); 5137 if (!Op.isReg() || !Op.getReg().isVirtual()) 5138 continue; 5139 5140 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg()); 5141 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); 5142 if (VRC == OpRC) 5143 continue; 5144 5145 legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc()); 5146 Op.setIsKill(); 5147 } 5148 } 5149 5150 return CreatedBB; 5151 } 5152 5153 // Legalize INSERT_SUBREG 5154 // src0 must have the same register class as dst 5155 if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) { 5156 Register Dst = MI.getOperand(0).getReg(); 5157 Register Src0 = MI.getOperand(1).getReg(); 5158 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst); 5159 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0); 5160 if (DstRC != Src0RC) { 5161 MachineBasicBlock *MBB = MI.getParent(); 5162 MachineOperand &Op = MI.getOperand(1); 5163 legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc()); 5164 } 5165 return CreatedBB; 5166 } 5167 5168 // Legalize SI_INIT_M0 5169 if (MI.getOpcode() == AMDGPU::SI_INIT_M0) { 5170 MachineOperand &Src = MI.getOperand(0); 5171 if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg()))) 5172 Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI)); 5173 return CreatedBB; 5174 } 5175 5176 // Legalize MIMG and MUBUF/MTBUF for shaders. 5177 // 5178 // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via 5179 // scratch memory access. In both cases, the legalization never involves 5180 // conversion to the addr64 form. 5181 if (isMIMG(MI) || (AMDGPU::isGraphics(MF.getFunction().getCallingConv()) && 5182 (isMUBUF(MI) || isMTBUF(MI)))) { 5183 MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc); 5184 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) 5185 CreatedBB = loadSRsrcFromVGPR(*this, MI, *SRsrc, MDT); 5186 5187 MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp); 5188 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) 5189 CreatedBB = loadSRsrcFromVGPR(*this, MI, *SSamp, MDT); 5190 5191 return CreatedBB; 5192 } 5193 5194 // Legalize SI_CALL 5195 if (MI.getOpcode() == AMDGPU::SI_CALL_ISEL) { 5196 MachineOperand *Dest = &MI.getOperand(0); 5197 if (!RI.isSGPRClass(MRI.getRegClass(Dest->getReg()))) { 5198 // Move everything between ADJCALLSTACKUP and ADJCALLSTACKDOWN and 5199 // following copies, we also need to move copies from and to physical 5200 // registers into the loop block. 5201 unsigned FrameSetupOpcode = getCallFrameSetupOpcode(); 5202 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode(); 5203 5204 // Also move the copies to physical registers into the loop block 5205 MachineBasicBlock &MBB = *MI.getParent(); 5206 MachineBasicBlock::iterator Start(&MI); 5207 while (Start->getOpcode() != FrameSetupOpcode) 5208 --Start; 5209 MachineBasicBlock::iterator End(&MI); 5210 while (End->getOpcode() != FrameDestroyOpcode) 5211 ++End; 5212 // Also include following copies of the return value 5213 ++End; 5214 while (End != MBB.end() && End->isCopy() && End->getOperand(1).isReg() && 5215 MI.definesRegister(End->getOperand(1).getReg())) 5216 ++End; 5217 CreatedBB = loadSRsrcFromVGPR(*this, MI, *Dest, MDT, Start, End); 5218 } 5219 } 5220 5221 // Legalize MUBUF* instructions. 5222 int RsrcIdx = 5223 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc); 5224 if (RsrcIdx != -1) { 5225 // We have an MUBUF instruction 5226 MachineOperand *Rsrc = &MI.getOperand(RsrcIdx); 5227 unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass; 5228 if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()), 5229 RI.getRegClass(RsrcRC))) { 5230 // The operands are legal. 5231 // FIXME: We may need to legalize operands besided srsrc. 5232 return CreatedBB; 5233 } 5234 5235 // Legalize a VGPR Rsrc. 5236 // 5237 // If the instruction is _ADDR64, we can avoid a waterfall by extracting 5238 // the base pointer from the VGPR Rsrc, adding it to the VAddr, then using 5239 // a zero-value SRsrc. 5240 // 5241 // If the instruction is _OFFSET (both idxen and offen disabled), and we 5242 // support ADDR64 instructions, we can convert to ADDR64 and do the same as 5243 // above. 5244 // 5245 // Otherwise we are on non-ADDR64 hardware, and/or we have 5246 // idxen/offen/bothen and we fall back to a waterfall loop. 5247 5248 MachineBasicBlock &MBB = *MI.getParent(); 5249 5250 MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 5251 if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) { 5252 // This is already an ADDR64 instruction so we need to add the pointer 5253 // extracted from the resource descriptor to the current value of VAddr. 5254 Register NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5255 Register NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5256 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5257 5258 const auto *BoolXExecRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5259 Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC); 5260 Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC); 5261 5262 unsigned RsrcPtr, NewSRsrc; 5263 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 5264 5265 // NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0 5266 const DebugLoc &DL = MI.getDebugLoc(); 5267 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_CO_U32_e64), NewVAddrLo) 5268 .addDef(CondReg0) 5269 .addReg(RsrcPtr, 0, AMDGPU::sub0) 5270 .addReg(VAddr->getReg(), 0, AMDGPU::sub0) 5271 .addImm(0); 5272 5273 // NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1 5274 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e64), NewVAddrHi) 5275 .addDef(CondReg1, RegState::Dead) 5276 .addReg(RsrcPtr, 0, AMDGPU::sub1) 5277 .addReg(VAddr->getReg(), 0, AMDGPU::sub1) 5278 .addReg(CondReg0, RegState::Kill) 5279 .addImm(0); 5280 5281 // NewVaddr = {NewVaddrHi, NewVaddrLo} 5282 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) 5283 .addReg(NewVAddrLo) 5284 .addImm(AMDGPU::sub0) 5285 .addReg(NewVAddrHi) 5286 .addImm(AMDGPU::sub1); 5287 5288 VAddr->setReg(NewVAddr); 5289 Rsrc->setReg(NewSRsrc); 5290 } else if (!VAddr && ST.hasAddr64()) { 5291 // This instructions is the _OFFSET variant, so we need to convert it to 5292 // ADDR64. 5293 assert(ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS && 5294 "FIXME: Need to emit flat atomics here"); 5295 5296 unsigned RsrcPtr, NewSRsrc; 5297 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 5298 5299 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5300 MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata); 5301 MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 5302 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset); 5303 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode()); 5304 5305 // Atomics rith return have have an additional tied operand and are 5306 // missing some of the special bits. 5307 MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in); 5308 MachineInstr *Addr64; 5309 5310 if (!VDataIn) { 5311 // Regular buffer load / store. 5312 MachineInstrBuilder MIB = 5313 BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 5314 .add(*VData) 5315 .addReg(NewVAddr) 5316 .addReg(NewSRsrc) 5317 .add(*SOffset) 5318 .add(*Offset); 5319 5320 // Atomics do not have this operand. 5321 if (const MachineOperand *GLC = 5322 getNamedOperand(MI, AMDGPU::OpName::glc)) { 5323 MIB.addImm(GLC->getImm()); 5324 } 5325 if (const MachineOperand *DLC = 5326 getNamedOperand(MI, AMDGPU::OpName::dlc)) { 5327 MIB.addImm(DLC->getImm()); 5328 } 5329 5330 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)); 5331 5332 if (const MachineOperand *TFE = 5333 getNamedOperand(MI, AMDGPU::OpName::tfe)) { 5334 MIB.addImm(TFE->getImm()); 5335 } 5336 5337 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::swz)); 5338 5339 MIB.cloneMemRefs(MI); 5340 Addr64 = MIB; 5341 } else { 5342 // Atomics with return. 5343 Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 5344 .add(*VData) 5345 .add(*VDataIn) 5346 .addReg(NewVAddr) 5347 .addReg(NewSRsrc) 5348 .add(*SOffset) 5349 .add(*Offset) 5350 .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)) 5351 .cloneMemRefs(MI); 5352 } 5353 5354 MI.removeFromParent(); 5355 5356 // NewVaddr = {NewVaddrHi, NewVaddrLo} 5357 BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), 5358 NewVAddr) 5359 .addReg(RsrcPtr, 0, AMDGPU::sub0) 5360 .addImm(AMDGPU::sub0) 5361 .addReg(RsrcPtr, 0, AMDGPU::sub1) 5362 .addImm(AMDGPU::sub1); 5363 } else { 5364 // This is another variant; legalize Rsrc with waterfall loop from VGPRs 5365 // to SGPRs. 5366 CreatedBB = loadSRsrcFromVGPR(*this, MI, *Rsrc, MDT); 5367 return CreatedBB; 5368 } 5369 } 5370 return CreatedBB; 5371 } 5372 5373 MachineBasicBlock *SIInstrInfo::moveToVALU(MachineInstr &TopInst, 5374 MachineDominatorTree *MDT) const { 5375 SetVectorType Worklist; 5376 Worklist.insert(&TopInst); 5377 MachineBasicBlock *CreatedBB = nullptr; 5378 MachineBasicBlock *CreatedBBTmp = nullptr; 5379 5380 while (!Worklist.empty()) { 5381 MachineInstr &Inst = *Worklist.pop_back_val(); 5382 MachineBasicBlock *MBB = Inst.getParent(); 5383 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 5384 5385 unsigned Opcode = Inst.getOpcode(); 5386 unsigned NewOpcode = getVALUOp(Inst); 5387 5388 // Handle some special cases 5389 switch (Opcode) { 5390 default: 5391 break; 5392 case AMDGPU::S_ADD_U64_PSEUDO: 5393 case AMDGPU::S_SUB_U64_PSEUDO: 5394 splitScalar64BitAddSub(Worklist, Inst, MDT); 5395 Inst.eraseFromParent(); 5396 continue; 5397 case AMDGPU::S_ADD_I32: 5398 case AMDGPU::S_SUB_I32: { 5399 // FIXME: The u32 versions currently selected use the carry. 5400 bool Changed; 5401 std::tie(Changed, CreatedBBTmp) = moveScalarAddSub(Worklist, Inst, MDT); 5402 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp) 5403 CreatedBB = CreatedBBTmp; 5404 if (Changed) 5405 continue; 5406 5407 // Default handling 5408 break; 5409 } 5410 case AMDGPU::S_AND_B64: 5411 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32, MDT); 5412 Inst.eraseFromParent(); 5413 continue; 5414 5415 case AMDGPU::S_OR_B64: 5416 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32, MDT); 5417 Inst.eraseFromParent(); 5418 continue; 5419 5420 case AMDGPU::S_XOR_B64: 5421 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32, MDT); 5422 Inst.eraseFromParent(); 5423 continue; 5424 5425 case AMDGPU::S_NAND_B64: 5426 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NAND_B32, MDT); 5427 Inst.eraseFromParent(); 5428 continue; 5429 5430 case AMDGPU::S_NOR_B64: 5431 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NOR_B32, MDT); 5432 Inst.eraseFromParent(); 5433 continue; 5434 5435 case AMDGPU::S_XNOR_B64: 5436 if (ST.hasDLInsts()) 5437 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT); 5438 else 5439 splitScalar64BitXnor(Worklist, Inst, MDT); 5440 Inst.eraseFromParent(); 5441 continue; 5442 5443 case AMDGPU::S_ANDN2_B64: 5444 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ANDN2_B32, MDT); 5445 Inst.eraseFromParent(); 5446 continue; 5447 5448 case AMDGPU::S_ORN2_B64: 5449 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ORN2_B32, MDT); 5450 Inst.eraseFromParent(); 5451 continue; 5452 5453 case AMDGPU::S_NOT_B64: 5454 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32); 5455 Inst.eraseFromParent(); 5456 continue; 5457 5458 case AMDGPU::S_BCNT1_I32_B64: 5459 splitScalar64BitBCNT(Worklist, Inst); 5460 Inst.eraseFromParent(); 5461 continue; 5462 5463 case AMDGPU::S_BFE_I64: 5464 splitScalar64BitBFE(Worklist, Inst); 5465 Inst.eraseFromParent(); 5466 continue; 5467 5468 case AMDGPU::S_LSHL_B32: 5469 if (ST.hasOnlyRevVALUShifts()) { 5470 NewOpcode = AMDGPU::V_LSHLREV_B32_e64; 5471 swapOperands(Inst); 5472 } 5473 break; 5474 case AMDGPU::S_ASHR_I32: 5475 if (ST.hasOnlyRevVALUShifts()) { 5476 NewOpcode = AMDGPU::V_ASHRREV_I32_e64; 5477 swapOperands(Inst); 5478 } 5479 break; 5480 case AMDGPU::S_LSHR_B32: 5481 if (ST.hasOnlyRevVALUShifts()) { 5482 NewOpcode = AMDGPU::V_LSHRREV_B32_e64; 5483 swapOperands(Inst); 5484 } 5485 break; 5486 case AMDGPU::S_LSHL_B64: 5487 if (ST.hasOnlyRevVALUShifts()) { 5488 NewOpcode = AMDGPU::V_LSHLREV_B64_e64; 5489 swapOperands(Inst); 5490 } 5491 break; 5492 case AMDGPU::S_ASHR_I64: 5493 if (ST.hasOnlyRevVALUShifts()) { 5494 NewOpcode = AMDGPU::V_ASHRREV_I64_e64; 5495 swapOperands(Inst); 5496 } 5497 break; 5498 case AMDGPU::S_LSHR_B64: 5499 if (ST.hasOnlyRevVALUShifts()) { 5500 NewOpcode = AMDGPU::V_LSHRREV_B64_e64; 5501 swapOperands(Inst); 5502 } 5503 break; 5504 5505 case AMDGPU::S_ABS_I32: 5506 lowerScalarAbs(Worklist, Inst); 5507 Inst.eraseFromParent(); 5508 continue; 5509 5510 case AMDGPU::S_CBRANCH_SCC0: 5511 case AMDGPU::S_CBRANCH_SCC1: 5512 // Clear unused bits of vcc 5513 if (ST.isWave32()) 5514 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B32), 5515 AMDGPU::VCC_LO) 5516 .addReg(AMDGPU::EXEC_LO) 5517 .addReg(AMDGPU::VCC_LO); 5518 else 5519 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64), 5520 AMDGPU::VCC) 5521 .addReg(AMDGPU::EXEC) 5522 .addReg(AMDGPU::VCC); 5523 break; 5524 5525 case AMDGPU::S_BFE_U64: 5526 case AMDGPU::S_BFM_B64: 5527 llvm_unreachable("Moving this op to VALU not implemented"); 5528 5529 case AMDGPU::S_PACK_LL_B32_B16: 5530 case AMDGPU::S_PACK_LH_B32_B16: 5531 case AMDGPU::S_PACK_HH_B32_B16: 5532 movePackToVALU(Worklist, MRI, Inst); 5533 Inst.eraseFromParent(); 5534 continue; 5535 5536 case AMDGPU::S_XNOR_B32: 5537 lowerScalarXnor(Worklist, Inst); 5538 Inst.eraseFromParent(); 5539 continue; 5540 5541 case AMDGPU::S_NAND_B32: 5542 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_AND_B32); 5543 Inst.eraseFromParent(); 5544 continue; 5545 5546 case AMDGPU::S_NOR_B32: 5547 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_OR_B32); 5548 Inst.eraseFromParent(); 5549 continue; 5550 5551 case AMDGPU::S_ANDN2_B32: 5552 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_AND_B32); 5553 Inst.eraseFromParent(); 5554 continue; 5555 5556 case AMDGPU::S_ORN2_B32: 5557 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_OR_B32); 5558 Inst.eraseFromParent(); 5559 continue; 5560 5561 // TODO: remove as soon as everything is ready 5562 // to replace VGPR to SGPR copy with V_READFIRSTLANEs. 5563 // S_ADD/SUB_CO_PSEUDO as well as S_UADDO/USUBO_PSEUDO 5564 // can only be selected from the uniform SDNode. 5565 case AMDGPU::S_ADD_CO_PSEUDO: 5566 case AMDGPU::S_SUB_CO_PSEUDO: { 5567 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) 5568 ? AMDGPU::V_ADDC_U32_e64 5569 : AMDGPU::V_SUBB_U32_e64; 5570 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5571 5572 Register CarryInReg = Inst.getOperand(4).getReg(); 5573 if (!MRI.constrainRegClass(CarryInReg, CarryRC)) { 5574 Register NewCarryReg = MRI.createVirtualRegister(CarryRC); 5575 BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(AMDGPU::COPY), NewCarryReg) 5576 .addReg(CarryInReg); 5577 } 5578 5579 Register CarryOutReg = Inst.getOperand(1).getReg(); 5580 5581 Register DestReg = MRI.createVirtualRegister(RI.getEquivalentVGPRClass( 5582 MRI.getRegClass(Inst.getOperand(0).getReg()))); 5583 MachineInstr *CarryOp = 5584 BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(Opc), DestReg) 5585 .addReg(CarryOutReg, RegState::Define) 5586 .add(Inst.getOperand(2)) 5587 .add(Inst.getOperand(3)) 5588 .addReg(CarryInReg) 5589 .addImm(0); 5590 CreatedBBTmp = legalizeOperands(*CarryOp); 5591 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp) 5592 CreatedBB = CreatedBBTmp; 5593 MRI.replaceRegWith(Inst.getOperand(0).getReg(), DestReg); 5594 addUsersToMoveToVALUWorklist(DestReg, MRI, Worklist); 5595 Inst.eraseFromParent(); 5596 } 5597 continue; 5598 case AMDGPU::S_UADDO_PSEUDO: 5599 case AMDGPU::S_USUBO_PSEUDO: { 5600 const DebugLoc &DL = Inst.getDebugLoc(); 5601 MachineOperand &Dest0 = Inst.getOperand(0); 5602 MachineOperand &Dest1 = Inst.getOperand(1); 5603 MachineOperand &Src0 = Inst.getOperand(2); 5604 MachineOperand &Src1 = Inst.getOperand(3); 5605 5606 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_UADDO_PSEUDO) 5607 ? AMDGPU::V_ADD_CO_U32_e64 5608 : AMDGPU::V_SUB_CO_U32_e64; 5609 const TargetRegisterClass *NewRC = 5610 RI.getEquivalentVGPRClass(MRI.getRegClass(Dest0.getReg())); 5611 Register DestReg = MRI.createVirtualRegister(NewRC); 5612 MachineInstr *NewInstr = BuildMI(*MBB, &Inst, DL, get(Opc), DestReg) 5613 .addReg(Dest1.getReg(), RegState::Define) 5614 .add(Src0) 5615 .add(Src1) 5616 .addImm(0); // clamp bit 5617 5618 CreatedBBTmp = legalizeOperands(*NewInstr, MDT); 5619 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp) 5620 CreatedBB = CreatedBBTmp; 5621 5622 MRI.replaceRegWith(Dest0.getReg(), DestReg); 5623 addUsersToMoveToVALUWorklist(NewInstr->getOperand(0).getReg(), MRI, 5624 Worklist); 5625 Inst.eraseFromParent(); 5626 } 5627 continue; 5628 5629 case AMDGPU::S_CSELECT_B32: 5630 case AMDGPU::S_CSELECT_B64: 5631 lowerSelect(Worklist, Inst, MDT); 5632 Inst.eraseFromParent(); 5633 continue; 5634 } 5635 5636 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) { 5637 // We cannot move this instruction to the VALU, so we should try to 5638 // legalize its operands instead. 5639 CreatedBBTmp = legalizeOperands(Inst, MDT); 5640 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp) 5641 CreatedBB = CreatedBBTmp; 5642 continue; 5643 } 5644 5645 // Use the new VALU Opcode. 5646 const MCInstrDesc &NewDesc = get(NewOpcode); 5647 Inst.setDesc(NewDesc); 5648 5649 // Remove any references to SCC. Vector instructions can't read from it, and 5650 // We're just about to add the implicit use / defs of VCC, and we don't want 5651 // both. 5652 for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) { 5653 MachineOperand &Op = Inst.getOperand(i); 5654 if (Op.isReg() && Op.getReg() == AMDGPU::SCC) { 5655 // Only propagate through live-def of SCC. 5656 if (Op.isDef() && !Op.isDead()) 5657 addSCCDefUsersToVALUWorklist(Op, Inst, Worklist); 5658 Inst.RemoveOperand(i); 5659 } 5660 } 5661 5662 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) { 5663 // We are converting these to a BFE, so we need to add the missing 5664 // operands for the size and offset. 5665 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16; 5666 Inst.addOperand(MachineOperand::CreateImm(0)); 5667 Inst.addOperand(MachineOperand::CreateImm(Size)); 5668 5669 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) { 5670 // The VALU version adds the second operand to the result, so insert an 5671 // extra 0 operand. 5672 Inst.addOperand(MachineOperand::CreateImm(0)); 5673 } 5674 5675 Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent()); 5676 fixImplicitOperands(Inst); 5677 5678 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) { 5679 const MachineOperand &OffsetWidthOp = Inst.getOperand(2); 5680 // If we need to move this to VGPRs, we need to unpack the second operand 5681 // back into the 2 separate ones for bit offset and width. 5682 assert(OffsetWidthOp.isImm() && 5683 "Scalar BFE is only implemented for constant width and offset"); 5684 uint32_t Imm = OffsetWidthOp.getImm(); 5685 5686 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 5687 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 5688 Inst.RemoveOperand(2); // Remove old immediate. 5689 Inst.addOperand(MachineOperand::CreateImm(Offset)); 5690 Inst.addOperand(MachineOperand::CreateImm(BitWidth)); 5691 } 5692 5693 bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef(); 5694 unsigned NewDstReg = AMDGPU::NoRegister; 5695 if (HasDst) { 5696 Register DstReg = Inst.getOperand(0).getReg(); 5697 if (DstReg.isPhysical()) 5698 continue; 5699 5700 // Update the destination register class. 5701 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst); 5702 if (!NewDstRC) 5703 continue; 5704 5705 if (Inst.isCopy() && Inst.getOperand(1).getReg().isVirtual() && 5706 NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) { 5707 // Instead of creating a copy where src and dst are the same register 5708 // class, we just replace all uses of dst with src. These kinds of 5709 // copies interfere with the heuristics MachineSink uses to decide 5710 // whether or not to split a critical edge. Since the pass assumes 5711 // that copies will end up as machine instructions and not be 5712 // eliminated. 5713 addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist); 5714 MRI.replaceRegWith(DstReg, Inst.getOperand(1).getReg()); 5715 MRI.clearKillFlags(Inst.getOperand(1).getReg()); 5716 Inst.getOperand(0).setReg(DstReg); 5717 5718 // Make sure we don't leave around a dead VGPR->SGPR copy. Normally 5719 // these are deleted later, but at -O0 it would leave a suspicious 5720 // looking illegal copy of an undef register. 5721 for (unsigned I = Inst.getNumOperands() - 1; I != 0; --I) 5722 Inst.RemoveOperand(I); 5723 Inst.setDesc(get(AMDGPU::IMPLICIT_DEF)); 5724 continue; 5725 } 5726 5727 NewDstReg = MRI.createVirtualRegister(NewDstRC); 5728 MRI.replaceRegWith(DstReg, NewDstReg); 5729 } 5730 5731 // Legalize the operands 5732 CreatedBBTmp = legalizeOperands(Inst, MDT); 5733 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp) 5734 CreatedBB = CreatedBBTmp; 5735 5736 if (HasDst) 5737 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); 5738 } 5739 return CreatedBB; 5740 } 5741 5742 // Add/sub require special handling to deal with carry outs. 5743 std::pair<bool, MachineBasicBlock *> 5744 SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst, 5745 MachineDominatorTree *MDT) const { 5746 if (ST.hasAddNoCarry()) { 5747 // Assume there is no user of scc since we don't select this in that case. 5748 // Since scc isn't used, it doesn't really matter if the i32 or u32 variant 5749 // is used. 5750 5751 MachineBasicBlock &MBB = *Inst.getParent(); 5752 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5753 5754 Register OldDstReg = Inst.getOperand(0).getReg(); 5755 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5756 5757 unsigned Opc = Inst.getOpcode(); 5758 assert(Opc == AMDGPU::S_ADD_I32 || Opc == AMDGPU::S_SUB_I32); 5759 5760 unsigned NewOpc = Opc == AMDGPU::S_ADD_I32 ? 5761 AMDGPU::V_ADD_U32_e64 : AMDGPU::V_SUB_U32_e64; 5762 5763 assert(Inst.getOperand(3).getReg() == AMDGPU::SCC); 5764 Inst.RemoveOperand(3); 5765 5766 Inst.setDesc(get(NewOpc)); 5767 Inst.addOperand(MachineOperand::CreateImm(0)); // clamp bit 5768 Inst.addImplicitDefUseOperands(*MBB.getParent()); 5769 MRI.replaceRegWith(OldDstReg, ResultReg); 5770 MachineBasicBlock *NewBB = legalizeOperands(Inst, MDT); 5771 5772 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5773 return std::make_pair(true, NewBB); 5774 } 5775 5776 return std::make_pair(false, nullptr); 5777 } 5778 5779 void SIInstrInfo::lowerSelect(SetVectorType &Worklist, MachineInstr &Inst, 5780 MachineDominatorTree *MDT) const { 5781 5782 MachineBasicBlock &MBB = *Inst.getParent(); 5783 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5784 MachineBasicBlock::iterator MII = Inst; 5785 DebugLoc DL = Inst.getDebugLoc(); 5786 5787 MachineOperand &Dest = Inst.getOperand(0); 5788 MachineOperand &Src0 = Inst.getOperand(1); 5789 MachineOperand &Src1 = Inst.getOperand(2); 5790 MachineOperand &Cond = Inst.getOperand(3); 5791 5792 Register SCCSource = Cond.getReg(); 5793 // Find SCC def, and if that is a copy (SCC = COPY reg) then use reg instead. 5794 if (!Cond.isUndef()) { 5795 for (MachineInstr &CandI : 5796 make_range(std::next(MachineBasicBlock::reverse_iterator(Inst)), 5797 Inst.getParent()->rend())) { 5798 if (CandI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != 5799 -1) { 5800 if (CandI.isCopy() && CandI.getOperand(0).getReg() == AMDGPU::SCC) { 5801 SCCSource = CandI.getOperand(1).getReg(); 5802 } 5803 break; 5804 } 5805 } 5806 } 5807 5808 // If this is a trivial select where the condition is effectively not SCC 5809 // (SCCSource is a source of copy to SCC), then the select is semantically 5810 // equivalent to copying SCCSource. Hence, there is no need to create 5811 // V_CNDMASK, we can just use that and bail out. 5812 if ((SCCSource != AMDGPU::SCC) && Src0.isImm() && (Src0.getImm() == -1) && 5813 Src1.isImm() && (Src1.getImm() == 0)) { 5814 MRI.replaceRegWith(Dest.getReg(), SCCSource); 5815 return; 5816 } 5817 5818 const TargetRegisterClass *TC = ST.getWavefrontSize() == 64 5819 ? &AMDGPU::SReg_64_XEXECRegClass 5820 : &AMDGPU::SReg_32_XM0_XEXECRegClass; 5821 Register CopySCC = MRI.createVirtualRegister(TC); 5822 5823 if (SCCSource == AMDGPU::SCC) { 5824 // Insert a trivial select instead of creating a copy, because a copy from 5825 // SCC would semantically mean just copying a single bit, but we may need 5826 // the result to be a vector condition mask that needs preserving. 5827 unsigned Opcode = (ST.getWavefrontSize() == 64) ? AMDGPU::S_CSELECT_B64 5828 : AMDGPU::S_CSELECT_B32; 5829 auto NewSelect = 5830 BuildMI(MBB, MII, DL, get(Opcode), CopySCC).addImm(-1).addImm(0); 5831 NewSelect->getOperand(3).setIsUndef(Cond.isUndef()); 5832 } else { 5833 BuildMI(MBB, MII, DL, get(AMDGPU::COPY), CopySCC).addReg(SCCSource); 5834 } 5835 5836 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5837 5838 auto UpdatedInst = 5839 BuildMI(MBB, MII, DL, get(AMDGPU::V_CNDMASK_B32_e64), ResultReg) 5840 .addImm(0) 5841 .add(Src1) // False 5842 .addImm(0) 5843 .add(Src0) // True 5844 .addReg(CopySCC); 5845 5846 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5847 legalizeOperands(*UpdatedInst, MDT); 5848 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5849 } 5850 5851 void SIInstrInfo::lowerScalarAbs(SetVectorType &Worklist, 5852 MachineInstr &Inst) const { 5853 MachineBasicBlock &MBB = *Inst.getParent(); 5854 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5855 MachineBasicBlock::iterator MII = Inst; 5856 DebugLoc DL = Inst.getDebugLoc(); 5857 5858 MachineOperand &Dest = Inst.getOperand(0); 5859 MachineOperand &Src = Inst.getOperand(1); 5860 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5861 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5862 5863 unsigned SubOp = ST.hasAddNoCarry() ? 5864 AMDGPU::V_SUB_U32_e32 : AMDGPU::V_SUB_CO_U32_e32; 5865 5866 BuildMI(MBB, MII, DL, get(SubOp), TmpReg) 5867 .addImm(0) 5868 .addReg(Src.getReg()); 5869 5870 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg) 5871 .addReg(Src.getReg()) 5872 .addReg(TmpReg); 5873 5874 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5875 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5876 } 5877 5878 void SIInstrInfo::lowerScalarXnor(SetVectorType &Worklist, 5879 MachineInstr &Inst) const { 5880 MachineBasicBlock &MBB = *Inst.getParent(); 5881 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5882 MachineBasicBlock::iterator MII = Inst; 5883 const DebugLoc &DL = Inst.getDebugLoc(); 5884 5885 MachineOperand &Dest = Inst.getOperand(0); 5886 MachineOperand &Src0 = Inst.getOperand(1); 5887 MachineOperand &Src1 = Inst.getOperand(2); 5888 5889 if (ST.hasDLInsts()) { 5890 Register NewDest = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5891 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src0, MRI, DL); 5892 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src1, MRI, DL); 5893 5894 BuildMI(MBB, MII, DL, get(AMDGPU::V_XNOR_B32_e64), NewDest) 5895 .add(Src0) 5896 .add(Src1); 5897 5898 MRI.replaceRegWith(Dest.getReg(), NewDest); 5899 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5900 } else { 5901 // Using the identity !(x ^ y) == (!x ^ y) == (x ^ !y), we can 5902 // invert either source and then perform the XOR. If either source is a 5903 // scalar register, then we can leave the inversion on the scalar unit to 5904 // acheive a better distrubution of scalar and vector instructions. 5905 bool Src0IsSGPR = Src0.isReg() && 5906 RI.isSGPRClass(MRI.getRegClass(Src0.getReg())); 5907 bool Src1IsSGPR = Src1.isReg() && 5908 RI.isSGPRClass(MRI.getRegClass(Src1.getReg())); 5909 MachineInstr *Xor; 5910 Register Temp = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5911 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5912 5913 // Build a pair of scalar instructions and add them to the work list. 5914 // The next iteration over the work list will lower these to the vector 5915 // unit as necessary. 5916 if (Src0IsSGPR) { 5917 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src0); 5918 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 5919 .addReg(Temp) 5920 .add(Src1); 5921 } else if (Src1IsSGPR) { 5922 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src1); 5923 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 5924 .add(Src0) 5925 .addReg(Temp); 5926 } else { 5927 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), Temp) 5928 .add(Src0) 5929 .add(Src1); 5930 MachineInstr *Not = 5931 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest).addReg(Temp); 5932 Worklist.insert(Not); 5933 } 5934 5935 MRI.replaceRegWith(Dest.getReg(), NewDest); 5936 5937 Worklist.insert(Xor); 5938 5939 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5940 } 5941 } 5942 5943 void SIInstrInfo::splitScalarNotBinop(SetVectorType &Worklist, 5944 MachineInstr &Inst, 5945 unsigned Opcode) const { 5946 MachineBasicBlock &MBB = *Inst.getParent(); 5947 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5948 MachineBasicBlock::iterator MII = Inst; 5949 const DebugLoc &DL = Inst.getDebugLoc(); 5950 5951 MachineOperand &Dest = Inst.getOperand(0); 5952 MachineOperand &Src0 = Inst.getOperand(1); 5953 MachineOperand &Src1 = Inst.getOperand(2); 5954 5955 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5956 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5957 5958 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), Interm) 5959 .add(Src0) 5960 .add(Src1); 5961 5962 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest) 5963 .addReg(Interm); 5964 5965 Worklist.insert(&Op); 5966 Worklist.insert(&Not); 5967 5968 MRI.replaceRegWith(Dest.getReg(), NewDest); 5969 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5970 } 5971 5972 void SIInstrInfo::splitScalarBinOpN2(SetVectorType& Worklist, 5973 MachineInstr &Inst, 5974 unsigned Opcode) const { 5975 MachineBasicBlock &MBB = *Inst.getParent(); 5976 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5977 MachineBasicBlock::iterator MII = Inst; 5978 const DebugLoc &DL = Inst.getDebugLoc(); 5979 5980 MachineOperand &Dest = Inst.getOperand(0); 5981 MachineOperand &Src0 = Inst.getOperand(1); 5982 MachineOperand &Src1 = Inst.getOperand(2); 5983 5984 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5985 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5986 5987 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Interm) 5988 .add(Src1); 5989 5990 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), NewDest) 5991 .add(Src0) 5992 .addReg(Interm); 5993 5994 Worklist.insert(&Not); 5995 Worklist.insert(&Op); 5996 5997 MRI.replaceRegWith(Dest.getReg(), NewDest); 5998 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5999 } 6000 6001 void SIInstrInfo::splitScalar64BitUnaryOp( 6002 SetVectorType &Worklist, MachineInstr &Inst, 6003 unsigned Opcode) const { 6004 MachineBasicBlock &MBB = *Inst.getParent(); 6005 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6006 6007 MachineOperand &Dest = Inst.getOperand(0); 6008 MachineOperand &Src0 = Inst.getOperand(1); 6009 DebugLoc DL = Inst.getDebugLoc(); 6010 6011 MachineBasicBlock::iterator MII = Inst; 6012 6013 const MCInstrDesc &InstDesc = get(Opcode); 6014 const TargetRegisterClass *Src0RC = Src0.isReg() ? 6015 MRI.getRegClass(Src0.getReg()) : 6016 &AMDGPU::SGPR_32RegClass; 6017 6018 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 6019 6020 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6021 AMDGPU::sub0, Src0SubRC); 6022 6023 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 6024 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 6025 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 6026 6027 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 6028 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0); 6029 6030 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6031 AMDGPU::sub1, Src0SubRC); 6032 6033 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 6034 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1); 6035 6036 Register FullDestReg = MRI.createVirtualRegister(NewDestRC); 6037 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 6038 .addReg(DestSub0) 6039 .addImm(AMDGPU::sub0) 6040 .addReg(DestSub1) 6041 .addImm(AMDGPU::sub1); 6042 6043 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 6044 6045 Worklist.insert(&LoHalf); 6046 Worklist.insert(&HiHalf); 6047 6048 // We don't need to legalizeOperands here because for a single operand, src0 6049 // will support any kind of input. 6050 6051 // Move all users of this moved value. 6052 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 6053 } 6054 6055 void SIInstrInfo::splitScalar64BitAddSub(SetVectorType &Worklist, 6056 MachineInstr &Inst, 6057 MachineDominatorTree *MDT) const { 6058 bool IsAdd = (Inst.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); 6059 6060 MachineBasicBlock &MBB = *Inst.getParent(); 6061 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6062 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 6063 6064 Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 6065 Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6066 Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6067 6068 Register CarryReg = MRI.createVirtualRegister(CarryRC); 6069 Register DeadCarryReg = MRI.createVirtualRegister(CarryRC); 6070 6071 MachineOperand &Dest = Inst.getOperand(0); 6072 MachineOperand &Src0 = Inst.getOperand(1); 6073 MachineOperand &Src1 = Inst.getOperand(2); 6074 const DebugLoc &DL = Inst.getDebugLoc(); 6075 MachineBasicBlock::iterator MII = Inst; 6076 6077 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg()); 6078 const TargetRegisterClass *Src1RC = MRI.getRegClass(Src1.getReg()); 6079 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 6080 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 6081 6082 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6083 AMDGPU::sub0, Src0SubRC); 6084 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 6085 AMDGPU::sub0, Src1SubRC); 6086 6087 6088 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6089 AMDGPU::sub1, Src0SubRC); 6090 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 6091 AMDGPU::sub1, Src1SubRC); 6092 6093 unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64; 6094 MachineInstr *LoHalf = 6095 BuildMI(MBB, MII, DL, get(LoOpc), DestSub0) 6096 .addReg(CarryReg, RegState::Define) 6097 .add(SrcReg0Sub0) 6098 .add(SrcReg1Sub0) 6099 .addImm(0); // clamp bit 6100 6101 unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; 6102 MachineInstr *HiHalf = 6103 BuildMI(MBB, MII, DL, get(HiOpc), DestSub1) 6104 .addReg(DeadCarryReg, RegState::Define | RegState::Dead) 6105 .add(SrcReg0Sub1) 6106 .add(SrcReg1Sub1) 6107 .addReg(CarryReg, RegState::Kill) 6108 .addImm(0); // clamp bit 6109 6110 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 6111 .addReg(DestSub0) 6112 .addImm(AMDGPU::sub0) 6113 .addReg(DestSub1) 6114 .addImm(AMDGPU::sub1); 6115 6116 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 6117 6118 // Try to legalize the operands in case we need to swap the order to keep it 6119 // valid. 6120 legalizeOperands(*LoHalf, MDT); 6121 legalizeOperands(*HiHalf, MDT); 6122 6123 // Move all users of this moved vlaue. 6124 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 6125 } 6126 6127 void SIInstrInfo::splitScalar64BitBinaryOp(SetVectorType &Worklist, 6128 MachineInstr &Inst, unsigned Opcode, 6129 MachineDominatorTree *MDT) const { 6130 MachineBasicBlock &MBB = *Inst.getParent(); 6131 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6132 6133 MachineOperand &Dest = Inst.getOperand(0); 6134 MachineOperand &Src0 = Inst.getOperand(1); 6135 MachineOperand &Src1 = Inst.getOperand(2); 6136 DebugLoc DL = Inst.getDebugLoc(); 6137 6138 MachineBasicBlock::iterator MII = Inst; 6139 6140 const MCInstrDesc &InstDesc = get(Opcode); 6141 const TargetRegisterClass *Src0RC = Src0.isReg() ? 6142 MRI.getRegClass(Src0.getReg()) : 6143 &AMDGPU::SGPR_32RegClass; 6144 6145 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 6146 const TargetRegisterClass *Src1RC = Src1.isReg() ? 6147 MRI.getRegClass(Src1.getReg()) : 6148 &AMDGPU::SGPR_32RegClass; 6149 6150 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 6151 6152 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6153 AMDGPU::sub0, Src0SubRC); 6154 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 6155 AMDGPU::sub0, Src1SubRC); 6156 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6157 AMDGPU::sub1, Src0SubRC); 6158 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 6159 AMDGPU::sub1, Src1SubRC); 6160 6161 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 6162 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 6163 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 6164 6165 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 6166 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0) 6167 .add(SrcReg0Sub0) 6168 .add(SrcReg1Sub0); 6169 6170 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 6171 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1) 6172 .add(SrcReg0Sub1) 6173 .add(SrcReg1Sub1); 6174 6175 Register FullDestReg = MRI.createVirtualRegister(NewDestRC); 6176 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 6177 .addReg(DestSub0) 6178 .addImm(AMDGPU::sub0) 6179 .addReg(DestSub1) 6180 .addImm(AMDGPU::sub1); 6181 6182 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 6183 6184 Worklist.insert(&LoHalf); 6185 Worklist.insert(&HiHalf); 6186 6187 // Move all users of this moved vlaue. 6188 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 6189 } 6190 6191 void SIInstrInfo::splitScalar64BitXnor(SetVectorType &Worklist, 6192 MachineInstr &Inst, 6193 MachineDominatorTree *MDT) const { 6194 MachineBasicBlock &MBB = *Inst.getParent(); 6195 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6196 6197 MachineOperand &Dest = Inst.getOperand(0); 6198 MachineOperand &Src0 = Inst.getOperand(1); 6199 MachineOperand &Src1 = Inst.getOperand(2); 6200 const DebugLoc &DL = Inst.getDebugLoc(); 6201 6202 MachineBasicBlock::iterator MII = Inst; 6203 6204 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 6205 6206 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 6207 6208 MachineOperand* Op0; 6209 MachineOperand* Op1; 6210 6211 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) { 6212 Op0 = &Src0; 6213 Op1 = &Src1; 6214 } else { 6215 Op0 = &Src1; 6216 Op1 = &Src0; 6217 } 6218 6219 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B64), Interm) 6220 .add(*Op0); 6221 6222 Register NewDest = MRI.createVirtualRegister(DestRC); 6223 6224 MachineInstr &Xor = *BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B64), NewDest) 6225 .addReg(Interm) 6226 .add(*Op1); 6227 6228 MRI.replaceRegWith(Dest.getReg(), NewDest); 6229 6230 Worklist.insert(&Xor); 6231 } 6232 6233 void SIInstrInfo::splitScalar64BitBCNT( 6234 SetVectorType &Worklist, MachineInstr &Inst) const { 6235 MachineBasicBlock &MBB = *Inst.getParent(); 6236 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6237 6238 MachineBasicBlock::iterator MII = Inst; 6239 const DebugLoc &DL = Inst.getDebugLoc(); 6240 6241 MachineOperand &Dest = Inst.getOperand(0); 6242 MachineOperand &Src = Inst.getOperand(1); 6243 6244 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64); 6245 const TargetRegisterClass *SrcRC = Src.isReg() ? 6246 MRI.getRegClass(Src.getReg()) : 6247 &AMDGPU::SGPR_32RegClass; 6248 6249 Register MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6250 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6251 6252 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); 6253 6254 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 6255 AMDGPU::sub0, SrcSubRC); 6256 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 6257 AMDGPU::sub1, SrcSubRC); 6258 6259 BuildMI(MBB, MII, DL, InstDesc, MidReg).add(SrcRegSub0).addImm(0); 6260 6261 BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg); 6262 6263 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6264 6265 // We don't need to legalize operands here. src0 for etiher instruction can be 6266 // an SGPR, and the second input is unused or determined here. 6267 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6268 } 6269 6270 void SIInstrInfo::splitScalar64BitBFE(SetVectorType &Worklist, 6271 MachineInstr &Inst) const { 6272 MachineBasicBlock &MBB = *Inst.getParent(); 6273 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6274 MachineBasicBlock::iterator MII = Inst; 6275 const DebugLoc &DL = Inst.getDebugLoc(); 6276 6277 MachineOperand &Dest = Inst.getOperand(0); 6278 uint32_t Imm = Inst.getOperand(2).getImm(); 6279 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 6280 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 6281 6282 (void) Offset; 6283 6284 // Only sext_inreg cases handled. 6285 assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 && 6286 Offset == 0 && "Not implemented"); 6287 6288 if (BitWidth < 32) { 6289 Register MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6290 Register MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6291 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 6292 6293 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32_e64), MidRegLo) 6294 .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0) 6295 .addImm(0) 6296 .addImm(BitWidth); 6297 6298 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi) 6299 .addImm(31) 6300 .addReg(MidRegLo); 6301 6302 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 6303 .addReg(MidRegLo) 6304 .addImm(AMDGPU::sub0) 6305 .addReg(MidRegHi) 6306 .addImm(AMDGPU::sub1); 6307 6308 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6309 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6310 return; 6311 } 6312 6313 MachineOperand &Src = Inst.getOperand(1); 6314 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6315 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 6316 6317 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg) 6318 .addImm(31) 6319 .addReg(Src.getReg(), 0, AMDGPU::sub0); 6320 6321 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 6322 .addReg(Src.getReg(), 0, AMDGPU::sub0) 6323 .addImm(AMDGPU::sub0) 6324 .addReg(TmpReg) 6325 .addImm(AMDGPU::sub1); 6326 6327 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6328 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6329 } 6330 6331 void SIInstrInfo::addUsersToMoveToVALUWorklist( 6332 Register DstReg, 6333 MachineRegisterInfo &MRI, 6334 SetVectorType &Worklist) const { 6335 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg), 6336 E = MRI.use_end(); I != E;) { 6337 MachineInstr &UseMI = *I->getParent(); 6338 6339 unsigned OpNo = 0; 6340 6341 switch (UseMI.getOpcode()) { 6342 case AMDGPU::COPY: 6343 case AMDGPU::WQM: 6344 case AMDGPU::SOFT_WQM: 6345 case AMDGPU::WWM: 6346 case AMDGPU::REG_SEQUENCE: 6347 case AMDGPU::PHI: 6348 case AMDGPU::INSERT_SUBREG: 6349 break; 6350 default: 6351 OpNo = I.getOperandNo(); 6352 break; 6353 } 6354 6355 if (!RI.hasVectorRegisters(getOpRegClass(UseMI, OpNo))) { 6356 Worklist.insert(&UseMI); 6357 6358 do { 6359 ++I; 6360 } while (I != E && I->getParent() == &UseMI); 6361 } else { 6362 ++I; 6363 } 6364 } 6365 } 6366 6367 void SIInstrInfo::movePackToVALU(SetVectorType &Worklist, 6368 MachineRegisterInfo &MRI, 6369 MachineInstr &Inst) const { 6370 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6371 MachineBasicBlock *MBB = Inst.getParent(); 6372 MachineOperand &Src0 = Inst.getOperand(1); 6373 MachineOperand &Src1 = Inst.getOperand(2); 6374 const DebugLoc &DL = Inst.getDebugLoc(); 6375 6376 switch (Inst.getOpcode()) { 6377 case AMDGPU::S_PACK_LL_B32_B16: { 6378 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6379 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6380 6381 // FIXME: Can do a lot better if we know the high bits of src0 or src1 are 6382 // 0. 6383 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 6384 .addImm(0xffff); 6385 6386 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_B32_e64), TmpReg) 6387 .addReg(ImmReg, RegState::Kill) 6388 .add(Src0); 6389 6390 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHL_OR_B32_e64), ResultReg) 6391 .add(Src1) 6392 .addImm(16) 6393 .addReg(TmpReg, RegState::Kill); 6394 break; 6395 } 6396 case AMDGPU::S_PACK_LH_B32_B16: { 6397 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6398 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 6399 .addImm(0xffff); 6400 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_BFI_B32_e64), ResultReg) 6401 .addReg(ImmReg, RegState::Kill) 6402 .add(Src0) 6403 .add(Src1); 6404 break; 6405 } 6406 case AMDGPU::S_PACK_HH_B32_B16: { 6407 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6408 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6409 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg) 6410 .addImm(16) 6411 .add(Src0); 6412 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 6413 .addImm(0xffff0000); 6414 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_OR_B32_e64), ResultReg) 6415 .add(Src1) 6416 .addReg(ImmReg, RegState::Kill) 6417 .addReg(TmpReg, RegState::Kill); 6418 break; 6419 } 6420 default: 6421 llvm_unreachable("unhandled s_pack_* instruction"); 6422 } 6423 6424 MachineOperand &Dest = Inst.getOperand(0); 6425 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6426 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6427 } 6428 6429 void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op, 6430 MachineInstr &SCCDefInst, 6431 SetVectorType &Worklist) const { 6432 bool SCCUsedImplicitly = false; 6433 6434 // Ensure that def inst defines SCC, which is still live. 6435 assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isDef() && 6436 !Op.isDead() && Op.getParent() == &SCCDefInst); 6437 SmallVector<MachineInstr *, 4> CopyToDelete; 6438 // This assumes that all the users of SCC are in the same block 6439 // as the SCC def. 6440 for (MachineInstr &MI : // Skip the def inst itself. 6441 make_range(std::next(MachineBasicBlock::iterator(SCCDefInst)), 6442 SCCDefInst.getParent()->end())) { 6443 // Check if SCC is used first. 6444 if (MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI) != -1) { 6445 if (MI.isCopy()) { 6446 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 6447 Register DestReg = MI.getOperand(0).getReg(); 6448 6449 for (auto &User : MRI.use_nodbg_instructions(DestReg)) { 6450 if ((User.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) || 6451 (User.getOpcode() == AMDGPU::S_SUB_CO_PSEUDO)) { 6452 User.getOperand(4).setReg(RI.getVCC()); 6453 Worklist.insert(&User); 6454 } else if (User.getOpcode() == AMDGPU::V_CNDMASK_B32_e64) { 6455 User.getOperand(5).setReg(RI.getVCC()); 6456 // No need to add to Worklist. 6457 } 6458 } 6459 CopyToDelete.push_back(&MI); 6460 } else { 6461 if (MI.getOpcode() == AMDGPU::S_CSELECT_B32 || 6462 MI.getOpcode() == AMDGPU::S_CSELECT_B64) { 6463 // This is an implicit use of SCC and it is really expected by 6464 // the SCC users to handle. 6465 // We cannot preserve the edge to the user so add the explicit 6466 // copy: SCC = COPY VCC. 6467 // The copy will be cleaned up during the processing of the user 6468 // in lowerSelect. 6469 SCCUsedImplicitly = true; 6470 } 6471 6472 Worklist.insert(&MI); 6473 } 6474 } 6475 // Exit if we find another SCC def. 6476 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != -1) 6477 break; 6478 } 6479 for (auto &Copy : CopyToDelete) 6480 Copy->eraseFromParent(); 6481 6482 if (SCCUsedImplicitly) { 6483 BuildMI(*SCCDefInst.getParent(), std::next(SCCDefInst.getIterator()), 6484 SCCDefInst.getDebugLoc(), get(AMDGPU::COPY), AMDGPU::SCC) 6485 .addReg(RI.getVCC()); 6486 } 6487 } 6488 6489 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass( 6490 const MachineInstr &Inst) const { 6491 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0); 6492 6493 switch (Inst.getOpcode()) { 6494 // For target instructions, getOpRegClass just returns the virtual register 6495 // class associated with the operand, so we need to find an equivalent VGPR 6496 // register class in order to move the instruction to the VALU. 6497 case AMDGPU::COPY: 6498 case AMDGPU::PHI: 6499 case AMDGPU::REG_SEQUENCE: 6500 case AMDGPU::INSERT_SUBREG: 6501 case AMDGPU::WQM: 6502 case AMDGPU::SOFT_WQM: 6503 case AMDGPU::WWM: { 6504 const TargetRegisterClass *SrcRC = getOpRegClass(Inst, 1); 6505 if (RI.hasAGPRs(SrcRC)) { 6506 if (RI.hasAGPRs(NewDstRC)) 6507 return nullptr; 6508 6509 switch (Inst.getOpcode()) { 6510 case AMDGPU::PHI: 6511 case AMDGPU::REG_SEQUENCE: 6512 case AMDGPU::INSERT_SUBREG: 6513 NewDstRC = RI.getEquivalentAGPRClass(NewDstRC); 6514 break; 6515 default: 6516 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 6517 } 6518 6519 if (!NewDstRC) 6520 return nullptr; 6521 } else { 6522 if (RI.hasVGPRs(NewDstRC) || NewDstRC == &AMDGPU::VReg_1RegClass) 6523 return nullptr; 6524 6525 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 6526 if (!NewDstRC) 6527 return nullptr; 6528 } 6529 6530 return NewDstRC; 6531 } 6532 default: 6533 return NewDstRC; 6534 } 6535 } 6536 6537 // Find the one SGPR operand we are allowed to use. 6538 Register SIInstrInfo::findUsedSGPR(const MachineInstr &MI, 6539 int OpIndices[3]) const { 6540 const MCInstrDesc &Desc = MI.getDesc(); 6541 6542 // Find the one SGPR operand we are allowed to use. 6543 // 6544 // First we need to consider the instruction's operand requirements before 6545 // legalizing. Some operands are required to be SGPRs, such as implicit uses 6546 // of VCC, but we are still bound by the constant bus requirement to only use 6547 // one. 6548 // 6549 // If the operand's class is an SGPR, we can never move it. 6550 6551 Register SGPRReg = findImplicitSGPRRead(MI); 6552 if (SGPRReg != AMDGPU::NoRegister) 6553 return SGPRReg; 6554 6555 Register UsedSGPRs[3] = { AMDGPU::NoRegister }; 6556 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 6557 6558 for (unsigned i = 0; i < 3; ++i) { 6559 int Idx = OpIndices[i]; 6560 if (Idx == -1) 6561 break; 6562 6563 const MachineOperand &MO = MI.getOperand(Idx); 6564 if (!MO.isReg()) 6565 continue; 6566 6567 // Is this operand statically required to be an SGPR based on the operand 6568 // constraints? 6569 const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); 6570 bool IsRequiredSGPR = RI.isSGPRClass(OpRC); 6571 if (IsRequiredSGPR) 6572 return MO.getReg(); 6573 6574 // If this could be a VGPR or an SGPR, Check the dynamic register class. 6575 Register Reg = MO.getReg(); 6576 const TargetRegisterClass *RegRC = MRI.getRegClass(Reg); 6577 if (RI.isSGPRClass(RegRC)) 6578 UsedSGPRs[i] = Reg; 6579 } 6580 6581 // We don't have a required SGPR operand, so we have a bit more freedom in 6582 // selecting operands to move. 6583 6584 // Try to select the most used SGPR. If an SGPR is equal to one of the 6585 // others, we choose that. 6586 // 6587 // e.g. 6588 // V_FMA_F32 v0, s0, s0, s0 -> No moves 6589 // V_FMA_F32 v0, s0, s1, s0 -> Move s1 6590 6591 // TODO: If some of the operands are 64-bit SGPRs and some 32, we should 6592 // prefer those. 6593 6594 if (UsedSGPRs[0] != AMDGPU::NoRegister) { 6595 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2]) 6596 SGPRReg = UsedSGPRs[0]; 6597 } 6598 6599 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) { 6600 if (UsedSGPRs[1] == UsedSGPRs[2]) 6601 SGPRReg = UsedSGPRs[1]; 6602 } 6603 6604 return SGPRReg; 6605 } 6606 6607 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI, 6608 unsigned OperandName) const { 6609 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName); 6610 if (Idx == -1) 6611 return nullptr; 6612 6613 return &MI.getOperand(Idx); 6614 } 6615 6616 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const { 6617 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 6618 return (22ULL << 44) | // IMG_FORMAT_32_FLOAT 6619 (1ULL << 56) | // RESOURCE_LEVEL = 1 6620 (3ULL << 60); // OOB_SELECT = 3 6621 } 6622 6623 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT; 6624 if (ST.isAmdHsaOS()) { 6625 // Set ATC = 1. GFX9 doesn't have this bit. 6626 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) 6627 RsrcDataFormat |= (1ULL << 56); 6628 6629 // Set MTYPE = 2 (MTYPE_UC = uncached). GFX9 doesn't have this. 6630 // BTW, it disables TC L2 and therefore decreases performance. 6631 if (ST.getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS) 6632 RsrcDataFormat |= (2ULL << 59); 6633 } 6634 6635 return RsrcDataFormat; 6636 } 6637 6638 uint64_t SIInstrInfo::getScratchRsrcWords23() const { 6639 uint64_t Rsrc23 = getDefaultRsrcDataFormat() | 6640 AMDGPU::RSRC_TID_ENABLE | 6641 0xffffffff; // Size; 6642 6643 // GFX9 doesn't have ELEMENT_SIZE. 6644 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 6645 uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize(true)) - 1; 6646 Rsrc23 |= EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT; 6647 } 6648 6649 // IndexStride = 64 / 32. 6650 uint64_t IndexStride = ST.getWavefrontSize() == 64 ? 3 : 2; 6651 Rsrc23 |= IndexStride << AMDGPU::RSRC_INDEX_STRIDE_SHIFT; 6652 6653 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17]. 6654 // Clear them unless we want a huge stride. 6655 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS && 6656 ST.getGeneration() <= AMDGPUSubtarget::GFX9) 6657 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT; 6658 6659 return Rsrc23; 6660 } 6661 6662 bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const { 6663 unsigned Opc = MI.getOpcode(); 6664 6665 return isSMRD(Opc); 6666 } 6667 6668 bool SIInstrInfo::isHighLatencyDef(int Opc) const { 6669 return get(Opc).mayLoad() && 6670 (isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc) || isFLAT(Opc)); 6671 } 6672 6673 unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI, 6674 int &FrameIndex) const { 6675 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 6676 if (!Addr || !Addr->isFI()) 6677 return AMDGPU::NoRegister; 6678 6679 assert(!MI.memoperands_empty() && 6680 (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS); 6681 6682 FrameIndex = Addr->getIndex(); 6683 return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg(); 6684 } 6685 6686 unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI, 6687 int &FrameIndex) const { 6688 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr); 6689 assert(Addr && Addr->isFI()); 6690 FrameIndex = Addr->getIndex(); 6691 return getNamedOperand(MI, AMDGPU::OpName::data)->getReg(); 6692 } 6693 6694 unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 6695 int &FrameIndex) const { 6696 if (!MI.mayLoad()) 6697 return AMDGPU::NoRegister; 6698 6699 if (isMUBUF(MI) || isVGPRSpill(MI)) 6700 return isStackAccess(MI, FrameIndex); 6701 6702 if (isSGPRSpill(MI)) 6703 return isSGPRStackAccess(MI, FrameIndex); 6704 6705 return AMDGPU::NoRegister; 6706 } 6707 6708 unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 6709 int &FrameIndex) const { 6710 if (!MI.mayStore()) 6711 return AMDGPU::NoRegister; 6712 6713 if (isMUBUF(MI) || isVGPRSpill(MI)) 6714 return isStackAccess(MI, FrameIndex); 6715 6716 if (isSGPRSpill(MI)) 6717 return isSGPRStackAccess(MI, FrameIndex); 6718 6719 return AMDGPU::NoRegister; 6720 } 6721 6722 unsigned SIInstrInfo::getInstBundleSize(const MachineInstr &MI) const { 6723 unsigned Size = 0; 6724 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 6725 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 6726 while (++I != E && I->isInsideBundle()) { 6727 assert(!I->isBundle() && "No nested bundle!"); 6728 Size += getInstSizeInBytes(*I); 6729 } 6730 6731 return Size; 6732 } 6733 6734 unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { 6735 unsigned Opc = MI.getOpcode(); 6736 const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc); 6737 unsigned DescSize = Desc.getSize(); 6738 6739 // If we have a definitive size, we can use it. Otherwise we need to inspect 6740 // the operands to know the size. 6741 if (isFixedSize(MI)) { 6742 unsigned Size = DescSize; 6743 6744 // If we hit the buggy offset, an extra nop will be inserted in MC so 6745 // estimate the worst case. 6746 if (MI.isBranch() && ST.hasOffset3fBug()) 6747 Size += 4; 6748 6749 return Size; 6750 } 6751 6752 // 4-byte instructions may have a 32-bit literal encoded after them. Check 6753 // operands that coud ever be literals. 6754 if (isVALU(MI) || isSALU(MI)) { 6755 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 6756 if (Src0Idx == -1) 6757 return DescSize; // No operands. 6758 6759 if (isLiteralConstantLike(MI.getOperand(Src0Idx), Desc.OpInfo[Src0Idx])) 6760 return isVOP3(MI) ? 12 : (DescSize + 4); 6761 6762 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 6763 if (Src1Idx == -1) 6764 return DescSize; 6765 6766 if (isLiteralConstantLike(MI.getOperand(Src1Idx), Desc.OpInfo[Src1Idx])) 6767 return isVOP3(MI) ? 12 : (DescSize + 4); 6768 6769 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 6770 if (Src2Idx == -1) 6771 return DescSize; 6772 6773 if (isLiteralConstantLike(MI.getOperand(Src2Idx), Desc.OpInfo[Src2Idx])) 6774 return isVOP3(MI) ? 12 : (DescSize + 4); 6775 6776 return DescSize; 6777 } 6778 6779 // Check whether we have extra NSA words. 6780 if (isMIMG(MI)) { 6781 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); 6782 if (VAddr0Idx < 0) 6783 return 8; 6784 6785 int RSrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); 6786 return 8 + 4 * ((RSrcIdx - VAddr0Idx + 2) / 4); 6787 } 6788 6789 switch (Opc) { 6790 case TargetOpcode::IMPLICIT_DEF: 6791 case TargetOpcode::KILL: 6792 case TargetOpcode::DBG_VALUE: 6793 case TargetOpcode::EH_LABEL: 6794 return 0; 6795 case TargetOpcode::BUNDLE: 6796 return getInstBundleSize(MI); 6797 case TargetOpcode::INLINEASM: 6798 case TargetOpcode::INLINEASM_BR: { 6799 const MachineFunction *MF = MI.getParent()->getParent(); 6800 const char *AsmStr = MI.getOperand(0).getSymbolName(); 6801 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo(), &ST); 6802 } 6803 default: 6804 return DescSize; 6805 } 6806 } 6807 6808 bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const { 6809 if (!isFLAT(MI)) 6810 return false; 6811 6812 if (MI.memoperands_empty()) 6813 return true; 6814 6815 for (const MachineMemOperand *MMO : MI.memoperands()) { 6816 if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS) 6817 return true; 6818 } 6819 return false; 6820 } 6821 6822 bool SIInstrInfo::isNonUniformBranchInstr(MachineInstr &Branch) const { 6823 return Branch.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO; 6824 } 6825 6826 void SIInstrInfo::convertNonUniformIfRegion(MachineBasicBlock *IfEntry, 6827 MachineBasicBlock *IfEnd) const { 6828 MachineBasicBlock::iterator TI = IfEntry->getFirstTerminator(); 6829 assert(TI != IfEntry->end()); 6830 6831 MachineInstr *Branch = &(*TI); 6832 MachineFunction *MF = IfEntry->getParent(); 6833 MachineRegisterInfo &MRI = IfEntry->getParent()->getRegInfo(); 6834 6835 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 6836 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 6837 MachineInstr *SIIF = 6838 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_IF), DstReg) 6839 .add(Branch->getOperand(0)) 6840 .add(Branch->getOperand(1)); 6841 MachineInstr *SIEND = 6842 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_END_CF)) 6843 .addReg(DstReg); 6844 6845 IfEntry->erase(TI); 6846 IfEntry->insert(IfEntry->end(), SIIF); 6847 IfEnd->insert(IfEnd->getFirstNonPHI(), SIEND); 6848 } 6849 } 6850 6851 void SIInstrInfo::convertNonUniformLoopRegion( 6852 MachineBasicBlock *LoopEntry, MachineBasicBlock *LoopEnd) const { 6853 MachineBasicBlock::iterator TI = LoopEnd->getFirstTerminator(); 6854 // We expect 2 terminators, one conditional and one unconditional. 6855 assert(TI != LoopEnd->end()); 6856 6857 MachineInstr *Branch = &(*TI); 6858 MachineFunction *MF = LoopEnd->getParent(); 6859 MachineRegisterInfo &MRI = LoopEnd->getParent()->getRegInfo(); 6860 6861 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 6862 6863 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 6864 Register BackEdgeReg = MRI.createVirtualRegister(RI.getBoolRC()); 6865 MachineInstrBuilder HeaderPHIBuilder = 6866 BuildMI(*(MF), Branch->getDebugLoc(), get(TargetOpcode::PHI), DstReg); 6867 for (MachineBasicBlock::pred_iterator PI = LoopEntry->pred_begin(), 6868 E = LoopEntry->pred_end(); 6869 PI != E; ++PI) { 6870 if (*PI == LoopEnd) { 6871 HeaderPHIBuilder.addReg(BackEdgeReg); 6872 } else { 6873 MachineBasicBlock *PMBB = *PI; 6874 Register ZeroReg = MRI.createVirtualRegister(RI.getBoolRC()); 6875 materializeImmediate(*PMBB, PMBB->getFirstTerminator(), DebugLoc(), 6876 ZeroReg, 0); 6877 HeaderPHIBuilder.addReg(ZeroReg); 6878 } 6879 HeaderPHIBuilder.addMBB(*PI); 6880 } 6881 MachineInstr *HeaderPhi = HeaderPHIBuilder; 6882 MachineInstr *SIIFBREAK = BuildMI(*(MF), Branch->getDebugLoc(), 6883 get(AMDGPU::SI_IF_BREAK), BackEdgeReg) 6884 .addReg(DstReg) 6885 .add(Branch->getOperand(0)); 6886 MachineInstr *SILOOP = 6887 BuildMI(*(MF), Branch->getDebugLoc(), get(AMDGPU::SI_LOOP)) 6888 .addReg(BackEdgeReg) 6889 .addMBB(LoopEntry); 6890 6891 LoopEntry->insert(LoopEntry->begin(), HeaderPhi); 6892 LoopEnd->erase(TI); 6893 LoopEnd->insert(LoopEnd->end(), SIIFBREAK); 6894 LoopEnd->insert(LoopEnd->end(), SILOOP); 6895 } 6896 } 6897 6898 ArrayRef<std::pair<int, const char *>> 6899 SIInstrInfo::getSerializableTargetIndices() const { 6900 static const std::pair<int, const char *> TargetIndices[] = { 6901 {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"}, 6902 {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"}, 6903 {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"}, 6904 {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"}, 6905 {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}}; 6906 return makeArrayRef(TargetIndices); 6907 } 6908 6909 /// This is used by the post-RA scheduler (SchedulePostRAList.cpp). The 6910 /// post-RA version of misched uses CreateTargetMIHazardRecognizer. 6911 ScheduleHazardRecognizer * 6912 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 6913 const ScheduleDAG *DAG) const { 6914 return new GCNHazardRecognizer(DAG->MF); 6915 } 6916 6917 /// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer 6918 /// pass. 6919 ScheduleHazardRecognizer * 6920 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const { 6921 return new GCNHazardRecognizer(MF); 6922 } 6923 6924 std::pair<unsigned, unsigned> 6925 SIInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { 6926 return std::make_pair(TF & MO_MASK, TF & ~MO_MASK); 6927 } 6928 6929 ArrayRef<std::pair<unsigned, const char *>> 6930 SIInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { 6931 static const std::pair<unsigned, const char *> TargetFlags[] = { 6932 { MO_GOTPCREL, "amdgpu-gotprel" }, 6933 { MO_GOTPCREL32_LO, "amdgpu-gotprel32-lo" }, 6934 { MO_GOTPCREL32_HI, "amdgpu-gotprel32-hi" }, 6935 { MO_REL32_LO, "amdgpu-rel32-lo" }, 6936 { MO_REL32_HI, "amdgpu-rel32-hi" }, 6937 { MO_ABS32_LO, "amdgpu-abs32-lo" }, 6938 { MO_ABS32_HI, "amdgpu-abs32-hi" }, 6939 }; 6940 6941 return makeArrayRef(TargetFlags); 6942 } 6943 6944 bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const { 6945 return !MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY && 6946 MI.modifiesRegister(AMDGPU::EXEC, &RI); 6947 } 6948 6949 MachineInstrBuilder 6950 SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 6951 MachineBasicBlock::iterator I, 6952 const DebugLoc &DL, 6953 Register DestReg) const { 6954 if (ST.hasAddNoCarry()) 6955 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg); 6956 6957 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6958 Register UnusedCarry = MRI.createVirtualRegister(RI.getBoolRC()); 6959 MRI.setRegAllocationHint(UnusedCarry, 0, RI.getVCC()); 6960 6961 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_CO_U32_e64), DestReg) 6962 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 6963 } 6964 6965 MachineInstrBuilder SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 6966 MachineBasicBlock::iterator I, 6967 const DebugLoc &DL, 6968 Register DestReg, 6969 RegScavenger &RS) const { 6970 if (ST.hasAddNoCarry()) 6971 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e32), DestReg); 6972 6973 // If available, prefer to use vcc. 6974 Register UnusedCarry = !RS.isRegUsed(AMDGPU::VCC) 6975 ? Register(RI.getVCC()) 6976 : RS.scavengeRegister(RI.getBoolRC(), I, 0, false); 6977 6978 // TODO: Users need to deal with this. 6979 if (!UnusedCarry.isValid()) 6980 return MachineInstrBuilder(); 6981 6982 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_CO_U32_e64), DestReg) 6983 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 6984 } 6985 6986 bool SIInstrInfo::isKillTerminator(unsigned Opcode) { 6987 switch (Opcode) { 6988 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 6989 case AMDGPU::SI_KILL_I1_TERMINATOR: 6990 return true; 6991 default: 6992 return false; 6993 } 6994 } 6995 6996 const MCInstrDesc &SIInstrInfo::getKillTerminatorFromPseudo(unsigned Opcode) const { 6997 switch (Opcode) { 6998 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: 6999 return get(AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR); 7000 case AMDGPU::SI_KILL_I1_PSEUDO: 7001 return get(AMDGPU::SI_KILL_I1_TERMINATOR); 7002 default: 7003 llvm_unreachable("invalid opcode, expected SI_KILL_*_PSEUDO"); 7004 } 7005 } 7006 7007 void SIInstrInfo::fixImplicitOperands(MachineInstr &MI) const { 7008 if (!ST.isWave32()) 7009 return; 7010 7011 for (auto &Op : MI.implicit_operands()) { 7012 if (Op.isReg() && Op.getReg() == AMDGPU::VCC) 7013 Op.setReg(AMDGPU::VCC_LO); 7014 } 7015 } 7016 7017 bool SIInstrInfo::isBufferSMRD(const MachineInstr &MI) const { 7018 if (!isSMRD(MI)) 7019 return false; 7020 7021 // Check that it is using a buffer resource. 7022 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sbase); 7023 if (Idx == -1) // e.g. s_memtime 7024 return false; 7025 7026 const auto RCID = MI.getDesc().OpInfo[Idx].RegClass; 7027 return RI.getRegClass(RCID)->hasSubClassEq(&AMDGPU::SGPR_128RegClass); 7028 } 7029 7030 bool SIInstrInfo::isLegalFLATOffset(int64_t Offset, unsigned AddrSpace, 7031 bool Signed) const { 7032 // TODO: Should 0 be special cased? 7033 if (!ST.hasFlatInstOffsets()) 7034 return false; 7035 7036 if (ST.hasFlatSegmentOffsetBug() && AddrSpace == AMDGPUAS::FLAT_ADDRESS) 7037 return false; 7038 7039 unsigned N = AMDGPU::getNumFlatOffsetBits(ST, Signed); 7040 return Signed ? isIntN(N, Offset) : isUIntN(N, Offset); 7041 } 7042 7043 std::pair<int64_t, int64_t> SIInstrInfo::splitFlatOffset(int64_t COffsetVal, 7044 unsigned AddrSpace, 7045 bool IsSigned) const { 7046 int64_t RemainderOffset = COffsetVal; 7047 int64_t ImmField = 0; 7048 const unsigned NumBits = AMDGPU::getNumFlatOffsetBits(ST, IsSigned); 7049 if (IsSigned) { 7050 // Use signed division by a power of two to truncate towards 0. 7051 int64_t D = 1LL << (NumBits - 1); 7052 RemainderOffset = (COffsetVal / D) * D; 7053 ImmField = COffsetVal - RemainderOffset; 7054 } else if (COffsetVal >= 0) { 7055 ImmField = COffsetVal & maskTrailingOnes<uint64_t>(NumBits); 7056 RemainderOffset = COffsetVal - ImmField; 7057 } 7058 7059 assert(isLegalFLATOffset(ImmField, AddrSpace, IsSigned)); 7060 assert(RemainderOffset + ImmField == COffsetVal); 7061 return {ImmField, RemainderOffset}; 7062 } 7063 7064 // This must be kept in sync with the SIEncodingFamily class in SIInstrInfo.td 7065 enum SIEncodingFamily { 7066 SI = 0, 7067 VI = 1, 7068 SDWA = 2, 7069 SDWA9 = 3, 7070 GFX80 = 4, 7071 GFX9 = 5, 7072 GFX10 = 6, 7073 SDWA10 = 7 7074 }; 7075 7076 static SIEncodingFamily subtargetEncodingFamily(const GCNSubtarget &ST) { 7077 switch (ST.getGeneration()) { 7078 default: 7079 break; 7080 case AMDGPUSubtarget::SOUTHERN_ISLANDS: 7081 case AMDGPUSubtarget::SEA_ISLANDS: 7082 return SIEncodingFamily::SI; 7083 case AMDGPUSubtarget::VOLCANIC_ISLANDS: 7084 case AMDGPUSubtarget::GFX9: 7085 return SIEncodingFamily::VI; 7086 case AMDGPUSubtarget::GFX10: 7087 return SIEncodingFamily::GFX10; 7088 } 7089 llvm_unreachable("Unknown subtarget generation!"); 7090 } 7091 7092 bool SIInstrInfo::isAsmOnlyOpcode(int MCOp) const { 7093 switch(MCOp) { 7094 // These opcodes use indirect register addressing so 7095 // they need special handling by codegen (currently missing). 7096 // Therefore it is too risky to allow these opcodes 7097 // to be selected by dpp combiner or sdwa peepholer. 7098 case AMDGPU::V_MOVRELS_B32_dpp_gfx10: 7099 case AMDGPU::V_MOVRELS_B32_sdwa_gfx10: 7100 case AMDGPU::V_MOVRELD_B32_dpp_gfx10: 7101 case AMDGPU::V_MOVRELD_B32_sdwa_gfx10: 7102 case AMDGPU::V_MOVRELSD_B32_dpp_gfx10: 7103 case AMDGPU::V_MOVRELSD_B32_sdwa_gfx10: 7104 case AMDGPU::V_MOVRELSD_2_B32_dpp_gfx10: 7105 case AMDGPU::V_MOVRELSD_2_B32_sdwa_gfx10: 7106 return true; 7107 default: 7108 return false; 7109 } 7110 } 7111 7112 int SIInstrInfo::pseudoToMCOpcode(int Opcode) const { 7113 SIEncodingFamily Gen = subtargetEncodingFamily(ST); 7114 7115 if ((get(Opcode).TSFlags & SIInstrFlags::renamedInGFX9) != 0 && 7116 ST.getGeneration() == AMDGPUSubtarget::GFX9) 7117 Gen = SIEncodingFamily::GFX9; 7118 7119 // Adjust the encoding family to GFX80 for D16 buffer instructions when the 7120 // subtarget has UnpackedD16VMem feature. 7121 // TODO: remove this when we discard GFX80 encoding. 7122 if (ST.hasUnpackedD16VMem() && (get(Opcode).TSFlags & SIInstrFlags::D16Buf)) 7123 Gen = SIEncodingFamily::GFX80; 7124 7125 if (get(Opcode).TSFlags & SIInstrFlags::SDWA) { 7126 switch (ST.getGeneration()) { 7127 default: 7128 Gen = SIEncodingFamily::SDWA; 7129 break; 7130 case AMDGPUSubtarget::GFX9: 7131 Gen = SIEncodingFamily::SDWA9; 7132 break; 7133 case AMDGPUSubtarget::GFX10: 7134 Gen = SIEncodingFamily::SDWA10; 7135 break; 7136 } 7137 } 7138 7139 int MCOp = AMDGPU::getMCOpcode(Opcode, Gen); 7140 7141 // -1 means that Opcode is already a native instruction. 7142 if (MCOp == -1) 7143 return Opcode; 7144 7145 // (uint16_t)-1 means that Opcode is a pseudo instruction that has 7146 // no encoding in the given subtarget generation. 7147 if (MCOp == (uint16_t)-1) 7148 return -1; 7149 7150 if (isAsmOnlyOpcode(MCOp)) 7151 return -1; 7152 7153 return MCOp; 7154 } 7155 7156 static 7157 TargetInstrInfo::RegSubRegPair getRegOrUndef(const MachineOperand &RegOpnd) { 7158 assert(RegOpnd.isReg()); 7159 return RegOpnd.isUndef() ? TargetInstrInfo::RegSubRegPair() : 7160 getRegSubRegPair(RegOpnd); 7161 } 7162 7163 TargetInstrInfo::RegSubRegPair 7164 llvm::getRegSequenceSubReg(MachineInstr &MI, unsigned SubReg) { 7165 assert(MI.isRegSequence()); 7166 for (unsigned I = 0, E = (MI.getNumOperands() - 1)/ 2; I < E; ++I) 7167 if (MI.getOperand(1 + 2 * I + 1).getImm() == SubReg) { 7168 auto &RegOp = MI.getOperand(1 + 2 * I); 7169 return getRegOrUndef(RegOp); 7170 } 7171 return TargetInstrInfo::RegSubRegPair(); 7172 } 7173 7174 // Try to find the definition of reg:subreg in subreg-manipulation pseudos 7175 // Following a subreg of reg:subreg isn't supported 7176 static bool followSubRegDef(MachineInstr &MI, 7177 TargetInstrInfo::RegSubRegPair &RSR) { 7178 if (!RSR.SubReg) 7179 return false; 7180 switch (MI.getOpcode()) { 7181 default: break; 7182 case AMDGPU::REG_SEQUENCE: 7183 RSR = getRegSequenceSubReg(MI, RSR.SubReg); 7184 return true; 7185 // EXTRACT_SUBREG ins't supported as this would follow a subreg of subreg 7186 case AMDGPU::INSERT_SUBREG: 7187 if (RSR.SubReg == (unsigned)MI.getOperand(3).getImm()) 7188 // inserted the subreg we're looking for 7189 RSR = getRegOrUndef(MI.getOperand(2)); 7190 else { // the subreg in the rest of the reg 7191 auto R1 = getRegOrUndef(MI.getOperand(1)); 7192 if (R1.SubReg) // subreg of subreg isn't supported 7193 return false; 7194 RSR.Reg = R1.Reg; 7195 } 7196 return true; 7197 } 7198 return false; 7199 } 7200 7201 MachineInstr *llvm::getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P, 7202 MachineRegisterInfo &MRI) { 7203 assert(MRI.isSSA()); 7204 if (!P.Reg.isVirtual()) 7205 return nullptr; 7206 7207 auto RSR = P; 7208 auto *DefInst = MRI.getVRegDef(RSR.Reg); 7209 while (auto *MI = DefInst) { 7210 DefInst = nullptr; 7211 switch (MI->getOpcode()) { 7212 case AMDGPU::COPY: 7213 case AMDGPU::V_MOV_B32_e32: { 7214 auto &Op1 = MI->getOperand(1); 7215 if (Op1.isReg() && Op1.getReg().isVirtual()) { 7216 if (Op1.isUndef()) 7217 return nullptr; 7218 RSR = getRegSubRegPair(Op1); 7219 DefInst = MRI.getVRegDef(RSR.Reg); 7220 } 7221 break; 7222 } 7223 default: 7224 if (followSubRegDef(*MI, RSR)) { 7225 if (!RSR.Reg) 7226 return nullptr; 7227 DefInst = MRI.getVRegDef(RSR.Reg); 7228 } 7229 } 7230 if (!DefInst) 7231 return MI; 7232 } 7233 return nullptr; 7234 } 7235 7236 bool llvm::execMayBeModifiedBeforeUse(const MachineRegisterInfo &MRI, 7237 Register VReg, 7238 const MachineInstr &DefMI, 7239 const MachineInstr &UseMI) { 7240 assert(MRI.isSSA() && "Must be run on SSA"); 7241 7242 auto *TRI = MRI.getTargetRegisterInfo(); 7243 auto *DefBB = DefMI.getParent(); 7244 7245 // Don't bother searching between blocks, although it is possible this block 7246 // doesn't modify exec. 7247 if (UseMI.getParent() != DefBB) 7248 return true; 7249 7250 const int MaxInstScan = 20; 7251 int NumInst = 0; 7252 7253 // Stop scan at the use. 7254 auto E = UseMI.getIterator(); 7255 for (auto I = std::next(DefMI.getIterator()); I != E; ++I) { 7256 if (I->isDebugInstr()) 7257 continue; 7258 7259 if (++NumInst > MaxInstScan) 7260 return true; 7261 7262 if (I->modifiesRegister(AMDGPU::EXEC, TRI)) 7263 return true; 7264 } 7265 7266 return false; 7267 } 7268 7269 bool llvm::execMayBeModifiedBeforeAnyUse(const MachineRegisterInfo &MRI, 7270 Register VReg, 7271 const MachineInstr &DefMI) { 7272 assert(MRI.isSSA() && "Must be run on SSA"); 7273 7274 auto *TRI = MRI.getTargetRegisterInfo(); 7275 auto *DefBB = DefMI.getParent(); 7276 7277 const int MaxUseScan = 10; 7278 int NumUse = 0; 7279 7280 for (auto &Use : MRI.use_nodbg_operands(VReg)) { 7281 auto &UseInst = *Use.getParent(); 7282 // Don't bother searching between blocks, although it is possible this block 7283 // doesn't modify exec. 7284 if (UseInst.getParent() != DefBB) 7285 return true; 7286 7287 if (++NumUse > MaxUseScan) 7288 return true; 7289 } 7290 7291 if (NumUse == 0) 7292 return false; 7293 7294 const int MaxInstScan = 20; 7295 int NumInst = 0; 7296 7297 // Stop scan when we have seen all the uses. 7298 for (auto I = std::next(DefMI.getIterator()); ; ++I) { 7299 assert(I != DefBB->end()); 7300 7301 if (I->isDebugInstr()) 7302 continue; 7303 7304 if (++NumInst > MaxInstScan) 7305 return true; 7306 7307 for (const MachineOperand &Op : I->operands()) { 7308 // We don't check reg masks here as they're used only on calls: 7309 // 1. EXEC is only considered const within one BB 7310 // 2. Call should be a terminator instruction if present in a BB 7311 7312 if (!Op.isReg()) 7313 continue; 7314 7315 Register Reg = Op.getReg(); 7316 if (Op.isUse()) { 7317 if (Reg == VReg && --NumUse == 0) 7318 return false; 7319 } else if (TRI->regsOverlap(Reg, AMDGPU::EXEC)) 7320 return true; 7321 } 7322 } 7323 } 7324 7325 MachineInstr *SIInstrInfo::createPHIDestinationCopy( 7326 MachineBasicBlock &MBB, MachineBasicBlock::iterator LastPHIIt, 7327 const DebugLoc &DL, Register Src, Register Dst) const { 7328 auto Cur = MBB.begin(); 7329 if (Cur != MBB.end()) 7330 do { 7331 if (!Cur->isPHI() && Cur->readsRegister(Dst)) 7332 return BuildMI(MBB, Cur, DL, get(TargetOpcode::COPY), Dst).addReg(Src); 7333 ++Cur; 7334 } while (Cur != MBB.end() && Cur != LastPHIIt); 7335 7336 return TargetInstrInfo::createPHIDestinationCopy(MBB, LastPHIIt, DL, Src, 7337 Dst); 7338 } 7339 7340 MachineInstr *SIInstrInfo::createPHISourceCopy( 7341 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, 7342 const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const { 7343 if (InsPt != MBB.end() && 7344 (InsPt->getOpcode() == AMDGPU::SI_IF || 7345 InsPt->getOpcode() == AMDGPU::SI_ELSE || 7346 InsPt->getOpcode() == AMDGPU::SI_IF_BREAK) && 7347 InsPt->definesRegister(Src)) { 7348 InsPt++; 7349 return BuildMI(MBB, InsPt, DL, 7350 get(ST.isWave32() ? AMDGPU::S_MOV_B32_term 7351 : AMDGPU::S_MOV_B64_term), 7352 Dst) 7353 .addReg(Src, 0, SrcSubReg) 7354 .addReg(AMDGPU::EXEC, RegState::Implicit); 7355 } 7356 return TargetInstrInfo::createPHISourceCopy(MBB, InsPt, DL, Src, SrcSubReg, 7357 Dst); 7358 } 7359 7360 bool llvm::SIInstrInfo::isWave32() const { return ST.isWave32(); } 7361 7362 MachineInstr *SIInstrInfo::foldMemoryOperandImpl( 7363 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, 7364 MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS, 7365 VirtRegMap *VRM) const { 7366 // This is a bit of a hack (copied from AArch64). Consider this instruction: 7367 // 7368 // %0:sreg_32 = COPY $m0 7369 // 7370 // We explicitly chose SReg_32 for the virtual register so such a copy might 7371 // be eliminated by RegisterCoalescer. However, that may not be possible, and 7372 // %0 may even spill. We can't spill $m0 normally (it would require copying to 7373 // a numbered SGPR anyway), and since it is in the SReg_32 register class, 7374 // TargetInstrInfo::foldMemoryOperand() is going to try. 7375 // A similar issue also exists with spilling and reloading $exec registers. 7376 // 7377 // To prevent that, constrain the %0 register class here. 7378 if (MI.isFullCopy()) { 7379 Register DstReg = MI.getOperand(0).getReg(); 7380 Register SrcReg = MI.getOperand(1).getReg(); 7381 if ((DstReg.isVirtual() || SrcReg.isVirtual()) && 7382 (DstReg.isVirtual() != SrcReg.isVirtual())) { 7383 MachineRegisterInfo &MRI = MF.getRegInfo(); 7384 Register VirtReg = DstReg.isVirtual() ? DstReg : SrcReg; 7385 const TargetRegisterClass *RC = MRI.getRegClass(VirtReg); 7386 if (RC->hasSuperClassEq(&AMDGPU::SReg_32RegClass)) { 7387 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 7388 return nullptr; 7389 } else if (RC->hasSuperClassEq(&AMDGPU::SReg_64RegClass)) { 7390 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_64_XEXECRegClass); 7391 return nullptr; 7392 } 7393 } 7394 } 7395 7396 return nullptr; 7397 } 7398 7399 unsigned SIInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 7400 const MachineInstr &MI, 7401 unsigned *PredCost) const { 7402 if (MI.isBundle()) { 7403 MachineBasicBlock::const_instr_iterator I(MI.getIterator()); 7404 MachineBasicBlock::const_instr_iterator E(MI.getParent()->instr_end()); 7405 unsigned Lat = 0, Count = 0; 7406 for (++I; I != E && I->isBundledWithPred(); ++I) { 7407 ++Count; 7408 Lat = std::max(Lat, SchedModel.computeInstrLatency(&*I)); 7409 } 7410 return Lat + Count - 1; 7411 } 7412 7413 return SchedModel.computeInstrLatency(&MI); 7414 } 7415 7416 unsigned SIInstrInfo::getDSShaderTypeValue(const MachineFunction &MF) { 7417 switch (MF.getFunction().getCallingConv()) { 7418 case CallingConv::AMDGPU_PS: 7419 return 1; 7420 case CallingConv::AMDGPU_VS: 7421 return 2; 7422 case CallingConv::AMDGPU_GS: 7423 return 3; 7424 case CallingConv::AMDGPU_HS: 7425 case CallingConv::AMDGPU_LS: 7426 case CallingConv::AMDGPU_ES: 7427 report_fatal_error("ds_ordered_count unsupported for this calling conv"); 7428 case CallingConv::AMDGPU_CS: 7429 case CallingConv::AMDGPU_KERNEL: 7430 case CallingConv::C: 7431 case CallingConv::Fast: 7432 default: 7433 // Assume other calling conventions are various compute callable functions 7434 return 0; 7435 } 7436 } 7437