1 //===- SIInstrInfo.cpp - SI Instruction Information ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// SI Implementation of TargetInstrInfo. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SIInstrInfo.h" 15 #include "AMDGPU.h" 16 #include "AMDGPUSubtarget.h" 17 #include "GCNHazardRecognizer.h" 18 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 19 #include "SIDefines.h" 20 #include "SIMachineFunctionInfo.h" 21 #include "SIRegisterInfo.h" 22 #include "Utils/AMDGPUBaseInfo.h" 23 #include "llvm/ADT/APInt.h" 24 #include "llvm/ADT/ArrayRef.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/ADT/iterator_range.h" 28 #include "llvm/Analysis/AliasAnalysis.h" 29 #include "llvm/Analysis/MemoryLocation.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/CodeGen/LiveVariables.h" 32 #include "llvm/CodeGen/MachineBasicBlock.h" 33 #include "llvm/CodeGen/MachineDominators.h" 34 #include "llvm/CodeGen/MachineFrameInfo.h" 35 #include "llvm/CodeGen/MachineFunction.h" 36 #include "llvm/CodeGen/MachineInstr.h" 37 #include "llvm/CodeGen/MachineInstrBuilder.h" 38 #include "llvm/CodeGen/MachineInstrBundle.h" 39 #include "llvm/CodeGen/MachineMemOperand.h" 40 #include "llvm/CodeGen/MachineOperand.h" 41 #include "llvm/CodeGen/MachineRegisterInfo.h" 42 #include "llvm/CodeGen/RegisterScavenging.h" 43 #include "llvm/CodeGen/ScheduleDAG.h" 44 #include "llvm/CodeGen/SelectionDAGNodes.h" 45 #include "llvm/CodeGen/TargetOpcodes.h" 46 #include "llvm/CodeGen/TargetRegisterInfo.h" 47 #include "llvm/IR/DebugLoc.h" 48 #include "llvm/IR/DiagnosticInfo.h" 49 #include "llvm/IR/Function.h" 50 #include "llvm/IR/InlineAsm.h" 51 #include "llvm/IR/LLVMContext.h" 52 #include "llvm/MC/MCInstrDesc.h" 53 #include "llvm/Support/Casting.h" 54 #include "llvm/Support/CommandLine.h" 55 #include "llvm/Support/Compiler.h" 56 #include "llvm/Support/ErrorHandling.h" 57 #include "llvm/Support/MachineValueType.h" 58 #include "llvm/Support/MathExtras.h" 59 #include "llvm/Target/TargetMachine.h" 60 #include <cassert> 61 #include <cstdint> 62 #include <iterator> 63 #include <utility> 64 65 using namespace llvm; 66 67 #define DEBUG_TYPE "si-instr-info" 68 69 #define GET_INSTRINFO_CTOR_DTOR 70 #include "AMDGPUGenInstrInfo.inc" 71 72 namespace llvm { 73 namespace AMDGPU { 74 #define GET_D16ImageDimIntrinsics_IMPL 75 #define GET_ImageDimIntrinsicTable_IMPL 76 #define GET_RsrcIntrinsics_IMPL 77 #include "AMDGPUGenSearchableTables.inc" 78 } 79 } 80 81 82 // Must be at least 4 to be able to branch over minimum unconditional branch 83 // code. This is only for making it possible to write reasonably small tests for 84 // long branches. 85 static cl::opt<unsigned> 86 BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16), 87 cl::desc("Restrict range of branch instructions (DEBUG)")); 88 89 static cl::opt<bool> Fix16BitCopies( 90 "amdgpu-fix-16-bit-physreg-copies", 91 cl::desc("Fix copies between 32 and 16 bit registers by extending to 32 bit"), 92 cl::init(true), 93 cl::ReallyHidden); 94 95 SIInstrInfo::SIInstrInfo(const GCNSubtarget &ST) 96 : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN), 97 RI(ST), ST(ST) { 98 SchedModel.init(&ST); 99 } 100 101 //===----------------------------------------------------------------------===// 102 // TargetInstrInfo callbacks 103 //===----------------------------------------------------------------------===// 104 105 static unsigned getNumOperandsNoGlue(SDNode *Node) { 106 unsigned N = Node->getNumOperands(); 107 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) 108 --N; 109 return N; 110 } 111 112 /// Returns true if both nodes have the same value for the given 113 /// operand \p Op, or if both nodes do not have this operand. 114 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { 115 unsigned Opc0 = N0->getMachineOpcode(); 116 unsigned Opc1 = N1->getMachineOpcode(); 117 118 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); 119 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); 120 121 if (Op0Idx == -1 && Op1Idx == -1) 122 return true; 123 124 125 if ((Op0Idx == -1 && Op1Idx != -1) || 126 (Op1Idx == -1 && Op0Idx != -1)) 127 return false; 128 129 // getNamedOperandIdx returns the index for the MachineInstr's operands, 130 // which includes the result as the first operand. We are indexing into the 131 // MachineSDNode's operands, so we need to skip the result operand to get 132 // the real index. 133 --Op0Idx; 134 --Op1Idx; 135 136 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx); 137 } 138 139 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, 140 AliasAnalysis *AA) const { 141 // TODO: The generic check fails for VALU instructions that should be 142 // rematerializable due to implicit reads of exec. We really want all of the 143 // generic logic for this except for this. 144 switch (MI.getOpcode()) { 145 case AMDGPU::V_MOV_B32_e32: 146 case AMDGPU::V_MOV_B32_e64: 147 case AMDGPU::V_MOV_B64_PSEUDO: 148 case AMDGPU::V_ACCVGPR_READ_B32: 149 case AMDGPU::V_ACCVGPR_WRITE_B32: 150 // No implicit operands. 151 return MI.getNumOperands() == MI.getDesc().getNumOperands(); 152 default: 153 return false; 154 } 155 } 156 157 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, 158 int64_t &Offset0, 159 int64_t &Offset1) const { 160 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode()) 161 return false; 162 163 unsigned Opc0 = Load0->getMachineOpcode(); 164 unsigned Opc1 = Load1->getMachineOpcode(); 165 166 // Make sure both are actually loads. 167 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad()) 168 return false; 169 170 if (isDS(Opc0) && isDS(Opc1)) { 171 172 // FIXME: Handle this case: 173 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1)) 174 return false; 175 176 // Check base reg. 177 if (Load0->getOperand(0) != Load1->getOperand(0)) 178 return false; 179 180 // Skip read2 / write2 variants for simplicity. 181 // TODO: We should report true if the used offsets are adjacent (excluded 182 // st64 versions). 183 int Offset0Idx = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 184 int Offset1Idx = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 185 if (Offset0Idx == -1 || Offset1Idx == -1) 186 return false; 187 188 // XXX - be careful of datalesss loads 189 // getNamedOperandIdx returns the index for MachineInstrs. Since they 190 // include the output in the operand list, but SDNodes don't, we need to 191 // subtract the index by one. 192 Offset0Idx -= get(Opc0).NumDefs; 193 Offset1Idx -= get(Opc1).NumDefs; 194 Offset0 = cast<ConstantSDNode>(Load0->getOperand(Offset0Idx))->getZExtValue(); 195 Offset1 = cast<ConstantSDNode>(Load1->getOperand(Offset1Idx))->getZExtValue(); 196 return true; 197 } 198 199 if (isSMRD(Opc0) && isSMRD(Opc1)) { 200 // Skip time and cache invalidation instructions. 201 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::sbase) == -1 || 202 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::sbase) == -1) 203 return false; 204 205 assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1)); 206 207 // Check base reg. 208 if (Load0->getOperand(0) != Load1->getOperand(0)) 209 return false; 210 211 const ConstantSDNode *Load0Offset = 212 dyn_cast<ConstantSDNode>(Load0->getOperand(1)); 213 const ConstantSDNode *Load1Offset = 214 dyn_cast<ConstantSDNode>(Load1->getOperand(1)); 215 216 if (!Load0Offset || !Load1Offset) 217 return false; 218 219 Offset0 = Load0Offset->getZExtValue(); 220 Offset1 = Load1Offset->getZExtValue(); 221 return true; 222 } 223 224 // MUBUF and MTBUF can access the same addresses. 225 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) { 226 227 // MUBUF and MTBUF have vaddr at different indices. 228 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || 229 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || 230 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) 231 return false; 232 233 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 234 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 235 236 if (OffIdx0 == -1 || OffIdx1 == -1) 237 return false; 238 239 // getNamedOperandIdx returns the index for MachineInstrs. Since they 240 // include the output in the operand list, but SDNodes don't, we need to 241 // subtract the index by one. 242 OffIdx0 -= get(Opc0).NumDefs; 243 OffIdx1 -= get(Opc1).NumDefs; 244 245 SDValue Off0 = Load0->getOperand(OffIdx0); 246 SDValue Off1 = Load1->getOperand(OffIdx1); 247 248 // The offset might be a FrameIndexSDNode. 249 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1)) 250 return false; 251 252 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue(); 253 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue(); 254 return true; 255 } 256 257 return false; 258 } 259 260 static bool isStride64(unsigned Opc) { 261 switch (Opc) { 262 case AMDGPU::DS_READ2ST64_B32: 263 case AMDGPU::DS_READ2ST64_B64: 264 case AMDGPU::DS_WRITE2ST64_B32: 265 case AMDGPU::DS_WRITE2ST64_B64: 266 return true; 267 default: 268 return false; 269 } 270 } 271 272 bool SIInstrInfo::getMemOperandsWithOffsetWidth( 273 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps, 274 int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, 275 const TargetRegisterInfo *TRI) const { 276 if (!LdSt.mayLoadOrStore()) 277 return false; 278 279 unsigned Opc = LdSt.getOpcode(); 280 OffsetIsScalable = false; 281 const MachineOperand *BaseOp, *OffsetOp; 282 int DataOpIdx; 283 284 if (isDS(LdSt)) { 285 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr); 286 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset); 287 if (OffsetOp) { 288 // Normal, single offset LDS instruction. 289 if (!BaseOp) { 290 // DS_CONSUME/DS_APPEND use M0 for the base address. 291 // TODO: find the implicit use operand for M0 and use that as BaseOp? 292 return false; 293 } 294 BaseOps.push_back(BaseOp); 295 Offset = OffsetOp->getImm(); 296 // Get appropriate operand, and compute width accordingly. 297 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 298 if (DataOpIdx == -1) 299 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 300 Width = getOpSize(LdSt, DataOpIdx); 301 } else { 302 // The 2 offset instructions use offset0 and offset1 instead. We can treat 303 // these as a load with a single offset if the 2 offsets are consecutive. 304 // We will use this for some partially aligned loads. 305 const MachineOperand *Offset0Op = 306 getNamedOperand(LdSt, AMDGPU::OpName::offset0); 307 const MachineOperand *Offset1Op = 308 getNamedOperand(LdSt, AMDGPU::OpName::offset1); 309 310 unsigned Offset0 = Offset0Op->getImm(); 311 unsigned Offset1 = Offset1Op->getImm(); 312 if (Offset0 + 1 != Offset1) 313 return false; 314 315 // Each of these offsets is in element sized units, so we need to convert 316 // to bytes of the individual reads. 317 318 unsigned EltSize; 319 if (LdSt.mayLoad()) 320 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16; 321 else { 322 assert(LdSt.mayStore()); 323 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 324 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8; 325 } 326 327 if (isStride64(Opc)) 328 EltSize *= 64; 329 330 BaseOps.push_back(BaseOp); 331 Offset = EltSize * Offset0; 332 // Get appropriate operand(s), and compute width accordingly. 333 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 334 if (DataOpIdx == -1) { 335 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 336 Width = getOpSize(LdSt, DataOpIdx); 337 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1); 338 Width += getOpSize(LdSt, DataOpIdx); 339 } else { 340 Width = getOpSize(LdSt, DataOpIdx); 341 } 342 } 343 return true; 344 } 345 346 if (isMUBUF(LdSt) || isMTBUF(LdSt)) { 347 const MachineOperand *SOffset = getNamedOperand(LdSt, AMDGPU::OpName::soffset); 348 if (SOffset && SOffset->isReg()) { 349 // We can only handle this if it's a stack access, as any other resource 350 // would require reporting multiple base registers. 351 const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 352 if (AddrReg && !AddrReg->isFI()) 353 return false; 354 355 const MachineOperand *RSrc = getNamedOperand(LdSt, AMDGPU::OpName::srsrc); 356 const SIMachineFunctionInfo *MFI 357 = LdSt.getParent()->getParent()->getInfo<SIMachineFunctionInfo>(); 358 if (RSrc->getReg() != MFI->getScratchRSrcReg()) 359 return false; 360 361 const MachineOperand *OffsetImm = 362 getNamedOperand(LdSt, AMDGPU::OpName::offset); 363 BaseOps.push_back(RSrc); 364 BaseOps.push_back(SOffset); 365 Offset = OffsetImm->getImm(); 366 } else { 367 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::srsrc); 368 if (!BaseOp) // e.g. BUFFER_WBINVL1_VOL 369 return false; 370 BaseOps.push_back(BaseOp); 371 372 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 373 if (BaseOp) 374 BaseOps.push_back(BaseOp); 375 376 const MachineOperand *OffsetImm = 377 getNamedOperand(LdSt, AMDGPU::OpName::offset); 378 Offset = OffsetImm->getImm(); 379 if (SOffset) // soffset can be an inline immediate. 380 Offset += SOffset->getImm(); 381 } 382 // Get appropriate operand, and compute width accordingly. 383 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 384 if (DataOpIdx == -1) 385 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 386 Width = getOpSize(LdSt, DataOpIdx); 387 return true; 388 } 389 390 if (isMIMG(LdSt)) { 391 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); 392 BaseOps.push_back(&LdSt.getOperand(SRsrcIdx)); 393 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); 394 if (VAddr0Idx >= 0) { 395 // GFX10 possible NSA encoding. 396 for (int I = VAddr0Idx; I < SRsrcIdx; ++I) 397 BaseOps.push_back(&LdSt.getOperand(I)); 398 } else { 399 BaseOps.push_back(getNamedOperand(LdSt, AMDGPU::OpName::vaddr)); 400 } 401 Offset = 0; 402 // Get appropriate operand, and compute width accordingly. 403 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 404 Width = getOpSize(LdSt, DataOpIdx); 405 return true; 406 } 407 408 if (isSMRD(LdSt)) { 409 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::sbase); 410 if (!BaseOp) // e.g. S_MEMTIME 411 return false; 412 BaseOps.push_back(BaseOp); 413 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset); 414 Offset = OffsetOp ? OffsetOp->getImm() : 0; 415 // Get appropriate operand, and compute width accordingly. 416 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::sdst); 417 Width = getOpSize(LdSt, DataOpIdx); 418 return true; 419 } 420 421 if (isFLAT(LdSt)) { 422 // Instructions have either vaddr or saddr or both. 423 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 424 if (BaseOp) 425 BaseOps.push_back(BaseOp); 426 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::saddr); 427 if (BaseOp) 428 BaseOps.push_back(BaseOp); 429 Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm(); 430 // Get appropriate operand, and compute width accordingly. 431 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 432 if (DataOpIdx == -1) 433 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 434 Width = getOpSize(LdSt, DataOpIdx); 435 return true; 436 } 437 438 return false; 439 } 440 441 static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, 442 ArrayRef<const MachineOperand *> BaseOps1, 443 const MachineInstr &MI2, 444 ArrayRef<const MachineOperand *> BaseOps2) { 445 // Only examine the first "base" operand of each instruction, on the 446 // assumption that it represents the real base address of the memory access. 447 // Other operands are typically offsets or indices from this base address. 448 if (BaseOps1.front()->isIdenticalTo(*BaseOps2.front())) 449 return true; 450 451 if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand()) 452 return false; 453 454 auto MO1 = *MI1.memoperands_begin(); 455 auto MO2 = *MI2.memoperands_begin(); 456 if (MO1->getAddrSpace() != MO2->getAddrSpace()) 457 return false; 458 459 auto Base1 = MO1->getValue(); 460 auto Base2 = MO2->getValue(); 461 if (!Base1 || !Base2) 462 return false; 463 Base1 = getUnderlyingObject(Base1); 464 Base2 = getUnderlyingObject(Base2); 465 466 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2)) 467 return false; 468 469 return Base1 == Base2; 470 } 471 472 bool SIInstrInfo::shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1, 473 ArrayRef<const MachineOperand *> BaseOps2, 474 unsigned NumLoads, 475 unsigned NumBytes) const { 476 // If the mem ops (to be clustered) do not have the same base ptr, then they 477 // should not be clustered 478 assert(!BaseOps1.empty() && !BaseOps2.empty()); 479 const MachineInstr &FirstLdSt = *BaseOps1.front()->getParent(); 480 const MachineInstr &SecondLdSt = *BaseOps2.front()->getParent(); 481 if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOps1, SecondLdSt, BaseOps2)) 482 return false; 483 484 // In order to avoid regester pressure, on an average, the number of DWORDS 485 // loaded together by all clustered mem ops should not exceed 8. This is an 486 // empirical value based on certain observations and performance related 487 // experiments. 488 // The good thing about this heuristic is - it avoids clustering of too many 489 // sub-word loads, and also avoids clustering of wide loads. Below is the 490 // brief summary of how the heuristic behaves for various `LoadSize`. 491 // (1) 1 <= LoadSize <= 4: cluster at max 8 mem ops 492 // (2) 5 <= LoadSize <= 8: cluster at max 4 mem ops 493 // (3) 9 <= LoadSize <= 12: cluster at max 2 mem ops 494 // (4) 13 <= LoadSize <= 16: cluster at max 2 mem ops 495 // (5) LoadSize >= 17: do not cluster 496 const unsigned LoadSize = NumBytes / NumLoads; 497 const unsigned NumDWORDs = ((LoadSize + 3) / 4) * NumLoads; 498 return NumDWORDs <= 8; 499 } 500 501 // FIXME: This behaves strangely. If, for example, you have 32 load + stores, 502 // the first 16 loads will be interleaved with the stores, and the next 16 will 503 // be clustered as expected. It should really split into 2 16 store batches. 504 // 505 // Loads are clustered until this returns false, rather than trying to schedule 506 // groups of stores. This also means we have to deal with saying different 507 // address space loads should be clustered, and ones which might cause bank 508 // conflicts. 509 // 510 // This might be deprecated so it might not be worth that much effort to fix. 511 bool SIInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, 512 int64_t Offset0, int64_t Offset1, 513 unsigned NumLoads) const { 514 assert(Offset1 > Offset0 && 515 "Second offset should be larger than first offset!"); 516 // If we have less than 16 loads in a row, and the offsets are within 64 517 // bytes, then schedule together. 518 519 // A cacheline is 64 bytes (for global memory). 520 return (NumLoads <= 16 && (Offset1 - Offset0) < 64); 521 } 522 523 static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB, 524 MachineBasicBlock::iterator MI, 525 const DebugLoc &DL, MCRegister DestReg, 526 MCRegister SrcReg, bool KillSrc, 527 const char *Msg = "illegal SGPR to VGPR copy") { 528 MachineFunction *MF = MBB.getParent(); 529 DiagnosticInfoUnsupported IllegalCopy(MF->getFunction(), Msg, DL, DS_Error); 530 LLVMContext &C = MF->getFunction().getContext(); 531 C.diagnose(IllegalCopy); 532 533 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg) 534 .addReg(SrcReg, getKillRegState(KillSrc)); 535 } 536 537 /// Handle copying from SGPR to AGPR, or from AGPR to AGPR. It is not possible 538 /// to directly copy, so an intermediate VGPR needs to be used. 539 static void indirectCopyToAGPR(const SIInstrInfo &TII, 540 MachineBasicBlock &MBB, 541 MachineBasicBlock::iterator MI, 542 const DebugLoc &DL, MCRegister DestReg, 543 MCRegister SrcReg, bool KillSrc, 544 RegScavenger &RS, 545 Register ImpDefSuperReg = Register(), 546 Register ImpUseSuperReg = Register()) { 547 const SIRegisterInfo &RI = TII.getRegisterInfo(); 548 549 assert(AMDGPU::SReg_32RegClass.contains(SrcReg) || 550 AMDGPU::AGPR_32RegClass.contains(SrcReg)); 551 552 // First try to find defining accvgpr_write to avoid temporary registers. 553 for (auto Def = MI, E = MBB.begin(); Def != E; ) { 554 --Def; 555 if (!Def->definesRegister(SrcReg, &RI)) 556 continue; 557 if (Def->getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32) 558 break; 559 560 MachineOperand &DefOp = Def->getOperand(1); 561 assert(DefOp.isReg() || DefOp.isImm()); 562 563 if (DefOp.isReg()) { 564 // Check that register source operand if not clobbered before MI. 565 // Immediate operands are always safe to propagate. 566 bool SafeToPropagate = true; 567 for (auto I = Def; I != MI && SafeToPropagate; ++I) 568 if (I->modifiesRegister(DefOp.getReg(), &RI)) 569 SafeToPropagate = false; 570 571 if (!SafeToPropagate) 572 break; 573 574 DefOp.setIsKill(false); 575 } 576 577 MachineInstrBuilder Builder = 578 BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32), DestReg) 579 .add(DefOp); 580 if (ImpDefSuperReg) 581 Builder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit); 582 583 if (ImpUseSuperReg) { 584 Builder.addReg(ImpUseSuperReg, 585 getKillRegState(KillSrc) | RegState::Implicit); 586 } 587 588 return; 589 } 590 591 RS.enterBasicBlock(MBB); 592 RS.forward(MI); 593 594 // Ideally we want to have three registers for a long reg_sequence copy 595 // to hide 2 waitstates between v_mov_b32 and accvgpr_write. 596 unsigned MaxVGPRs = RI.getRegPressureLimit(&AMDGPU::VGPR_32RegClass, 597 *MBB.getParent()); 598 599 // Registers in the sequence are allocated contiguously so we can just 600 // use register number to pick one of three round-robin temps. 601 unsigned RegNo = DestReg % 3; 602 Register Tmp = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0); 603 if (!Tmp) 604 report_fatal_error("Cannot scavenge VGPR to copy to AGPR"); 605 RS.setRegUsed(Tmp); 606 // Only loop through if there are any free registers left, otherwise 607 // scavenger may report a fatal error without emergency spill slot 608 // or spill with the slot. 609 while (RegNo-- && RS.FindUnusedReg(&AMDGPU::VGPR_32RegClass)) { 610 Register Tmp2 = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0); 611 if (!Tmp2 || RI.getHWRegIndex(Tmp2) >= MaxVGPRs) 612 break; 613 Tmp = Tmp2; 614 RS.setRegUsed(Tmp); 615 } 616 617 // Insert copy to temporary VGPR. 618 unsigned TmpCopyOp = AMDGPU::V_MOV_B32_e32; 619 if (AMDGPU::AGPR_32RegClass.contains(SrcReg)) { 620 TmpCopyOp = AMDGPU::V_ACCVGPR_READ_B32; 621 } else { 622 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 623 } 624 625 MachineInstrBuilder UseBuilder = BuildMI(MBB, MI, DL, TII.get(TmpCopyOp), Tmp) 626 .addReg(SrcReg, getKillRegState(KillSrc)); 627 if (ImpUseSuperReg) { 628 UseBuilder.addReg(ImpUseSuperReg, 629 getKillRegState(KillSrc) | RegState::Implicit); 630 } 631 632 MachineInstrBuilder DefBuilder 633 = BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32), DestReg) 634 .addReg(Tmp, RegState::Kill); 635 636 if (ImpDefSuperReg) 637 DefBuilder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit); 638 } 639 640 static void expandSGPRCopy(const SIInstrInfo &TII, MachineBasicBlock &MBB, 641 MachineBasicBlock::iterator MI, const DebugLoc &DL, 642 MCRegister DestReg, MCRegister SrcReg, bool KillSrc, 643 const TargetRegisterClass *RC, bool Forward) { 644 const SIRegisterInfo &RI = TII.getRegisterInfo(); 645 ArrayRef<int16_t> BaseIndices = RI.getRegSplitParts(RC, 4); 646 MachineBasicBlock::iterator I = MI; 647 MachineInstr *FirstMI = nullptr, *LastMI = nullptr; 648 649 for (unsigned Idx = 0; Idx < BaseIndices.size(); ++Idx) { 650 int16_t SubIdx = BaseIndices[Idx]; 651 Register Reg = RI.getSubReg(DestReg, SubIdx); 652 unsigned Opcode = AMDGPU::S_MOV_B32; 653 654 // Is SGPR aligned? If so try to combine with next. 655 Register Src = RI.getSubReg(SrcReg, SubIdx); 656 bool AlignedDest = ((Reg - AMDGPU::SGPR0) % 2) == 0; 657 bool AlignedSrc = ((Src - AMDGPU::SGPR0) % 2) == 0; 658 if (AlignedDest && AlignedSrc && (Idx + 1 < BaseIndices.size())) { 659 // Can use SGPR64 copy 660 unsigned Channel = RI.getChannelFromSubReg(SubIdx); 661 SubIdx = RI.getSubRegFromChannel(Channel, 2); 662 Opcode = AMDGPU::S_MOV_B64; 663 Idx++; 664 } 665 666 LastMI = BuildMI(MBB, I, DL, TII.get(Opcode), RI.getSubReg(DestReg, SubIdx)) 667 .addReg(RI.getSubReg(SrcReg, SubIdx)) 668 .addReg(SrcReg, RegState::Implicit); 669 670 if (!FirstMI) 671 FirstMI = LastMI; 672 673 if (!Forward) 674 I--; 675 } 676 677 assert(FirstMI && LastMI); 678 if (!Forward) 679 std::swap(FirstMI, LastMI); 680 681 FirstMI->addOperand( 682 MachineOperand::CreateReg(DestReg, true /*IsDef*/, true /*IsImp*/)); 683 684 if (KillSrc) 685 LastMI->addRegisterKilled(SrcReg, &RI); 686 } 687 688 void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 689 MachineBasicBlock::iterator MI, 690 const DebugLoc &DL, MCRegister DestReg, 691 MCRegister SrcReg, bool KillSrc) const { 692 const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg); 693 694 // FIXME: This is hack to resolve copies between 16 bit and 32 bit 695 // registers until all patterns are fixed. 696 if (Fix16BitCopies && 697 ((RI.getRegSizeInBits(*RC) == 16) ^ 698 (RI.getRegSizeInBits(*RI.getPhysRegClass(SrcReg)) == 16))) { 699 MCRegister &RegToFix = (RI.getRegSizeInBits(*RC) == 16) ? DestReg : SrcReg; 700 MCRegister Super = RI.get32BitRegister(RegToFix); 701 assert(RI.getSubReg(Super, AMDGPU::lo16) == RegToFix); 702 RegToFix = Super; 703 704 if (DestReg == SrcReg) { 705 // Insert empty bundle since ExpandPostRA expects an instruction here. 706 BuildMI(MBB, MI, DL, get(AMDGPU::BUNDLE)); 707 return; 708 } 709 710 RC = RI.getPhysRegClass(DestReg); 711 } 712 713 if (RC == &AMDGPU::VGPR_32RegClass) { 714 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || 715 AMDGPU::SReg_32RegClass.contains(SrcReg) || 716 AMDGPU::AGPR_32RegClass.contains(SrcReg)); 717 unsigned Opc = AMDGPU::AGPR_32RegClass.contains(SrcReg) ? 718 AMDGPU::V_ACCVGPR_READ_B32 : AMDGPU::V_MOV_B32_e32; 719 BuildMI(MBB, MI, DL, get(Opc), DestReg) 720 .addReg(SrcReg, getKillRegState(KillSrc)); 721 return; 722 } 723 724 if (RC == &AMDGPU::SReg_32_XM0RegClass || 725 RC == &AMDGPU::SReg_32RegClass) { 726 if (SrcReg == AMDGPU::SCC) { 727 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg) 728 .addImm(1) 729 .addImm(0); 730 return; 731 } 732 733 if (DestReg == AMDGPU::VCC_LO) { 734 if (AMDGPU::SReg_32RegClass.contains(SrcReg)) { 735 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), AMDGPU::VCC_LO) 736 .addReg(SrcReg, getKillRegState(KillSrc)); 737 } else { 738 // FIXME: Hack until VReg_1 removed. 739 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 740 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 741 .addImm(0) 742 .addReg(SrcReg, getKillRegState(KillSrc)); 743 } 744 745 return; 746 } 747 748 if (!AMDGPU::SReg_32RegClass.contains(SrcReg)) { 749 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 750 return; 751 } 752 753 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 754 .addReg(SrcReg, getKillRegState(KillSrc)); 755 return; 756 } 757 758 if (RC == &AMDGPU::SReg_64RegClass) { 759 if (SrcReg == AMDGPU::SCC) { 760 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B64), DestReg) 761 .addImm(1) 762 .addImm(0); 763 return; 764 } 765 766 if (DestReg == AMDGPU::VCC) { 767 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 768 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC) 769 .addReg(SrcReg, getKillRegState(KillSrc)); 770 } else { 771 // FIXME: Hack until VReg_1 removed. 772 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 773 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 774 .addImm(0) 775 .addReg(SrcReg, getKillRegState(KillSrc)); 776 } 777 778 return; 779 } 780 781 if (!AMDGPU::SReg_64RegClass.contains(SrcReg)) { 782 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 783 return; 784 } 785 786 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 787 .addReg(SrcReg, getKillRegState(KillSrc)); 788 return; 789 } 790 791 if (DestReg == AMDGPU::SCC) { 792 // Copying 64-bit or 32-bit sources to SCC barely makes sense, 793 // but SelectionDAG emits such copies for i1 sources. 794 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 795 // This copy can only be produced by patterns 796 // with explicit SCC, which are known to be enabled 797 // only for subtargets with S_CMP_LG_U64 present. 798 assert(ST.hasScalarCompareEq64()); 799 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U64)) 800 .addReg(SrcReg, getKillRegState(KillSrc)) 801 .addImm(0); 802 } else { 803 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 804 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32)) 805 .addReg(SrcReg, getKillRegState(KillSrc)) 806 .addImm(0); 807 } 808 809 return; 810 } 811 812 813 if (RC == &AMDGPU::AGPR_32RegClass) { 814 if (AMDGPU::VGPR_32RegClass.contains(SrcReg)) { 815 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32), DestReg) 816 .addReg(SrcReg, getKillRegState(KillSrc)); 817 return; 818 } 819 820 // FIXME: Pass should maintain scavenger to avoid scan through the block on 821 // every AGPR spill. 822 RegScavenger RS; 823 indirectCopyToAGPR(*this, MBB, MI, DL, DestReg, SrcReg, KillSrc, RS); 824 return; 825 } 826 827 if (RI.getRegSizeInBits(*RC) == 16) { 828 assert(AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || 829 AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || 830 AMDGPU::SReg_LO16RegClass.contains(SrcReg) || 831 AMDGPU::AGPR_LO16RegClass.contains(SrcReg)); 832 833 bool IsSGPRDst = AMDGPU::SReg_LO16RegClass.contains(DestReg); 834 bool IsSGPRSrc = AMDGPU::SReg_LO16RegClass.contains(SrcReg); 835 bool IsAGPRDst = AMDGPU::AGPR_LO16RegClass.contains(DestReg); 836 bool IsAGPRSrc = AMDGPU::AGPR_LO16RegClass.contains(SrcReg); 837 bool DstLow = AMDGPU::VGPR_LO16RegClass.contains(DestReg) || 838 AMDGPU::SReg_LO16RegClass.contains(DestReg) || 839 AMDGPU::AGPR_LO16RegClass.contains(DestReg); 840 bool SrcLow = AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || 841 AMDGPU::SReg_LO16RegClass.contains(SrcReg) || 842 AMDGPU::AGPR_LO16RegClass.contains(SrcReg); 843 MCRegister NewDestReg = RI.get32BitRegister(DestReg); 844 MCRegister NewSrcReg = RI.get32BitRegister(SrcReg); 845 846 if (IsSGPRDst) { 847 if (!IsSGPRSrc) { 848 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 849 return; 850 } 851 852 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), NewDestReg) 853 .addReg(NewSrcReg, getKillRegState(KillSrc)); 854 return; 855 } 856 857 if (IsAGPRDst || IsAGPRSrc) { 858 if (!DstLow || !SrcLow) { 859 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc, 860 "Cannot use hi16 subreg with an AGPR!"); 861 } 862 863 copyPhysReg(MBB, MI, DL, NewDestReg, NewSrcReg, KillSrc); 864 return; 865 } 866 867 if (IsSGPRSrc && !ST.hasSDWAScalar()) { 868 if (!DstLow || !SrcLow) { 869 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc, 870 "Cannot use hi16 subreg on VI!"); 871 } 872 873 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), NewDestReg) 874 .addReg(NewSrcReg, getKillRegState(KillSrc)); 875 return; 876 } 877 878 auto MIB = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_sdwa), NewDestReg) 879 .addImm(0) // src0_modifiers 880 .addReg(NewSrcReg) 881 .addImm(0) // clamp 882 .addImm(DstLow ? AMDGPU::SDWA::SdwaSel::WORD_0 883 : AMDGPU::SDWA::SdwaSel::WORD_1) 884 .addImm(AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE) 885 .addImm(SrcLow ? AMDGPU::SDWA::SdwaSel::WORD_0 886 : AMDGPU::SDWA::SdwaSel::WORD_1) 887 .addReg(NewDestReg, RegState::Implicit | RegState::Undef); 888 // First implicit operand is $exec. 889 MIB->tieOperands(0, MIB->getNumOperands() - 1); 890 return; 891 } 892 893 const bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg); 894 if (RI.isSGPRClass(RC)) { 895 if (!RI.isSGPRClass(RI.getPhysRegClass(SrcReg))) { 896 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 897 return; 898 } 899 expandSGPRCopy(*this, MBB, MI, DL, DestReg, SrcReg, KillSrc, RC, Forward); 900 return; 901 } 902 903 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 904 if (RI.hasAGPRs(RC)) { 905 Opcode = RI.hasVGPRs(RI.getPhysRegClass(SrcReg)) ? 906 AMDGPU::V_ACCVGPR_WRITE_B32 : AMDGPU::INSTRUCTION_LIST_END; 907 } else if (RI.hasVGPRs(RC) && RI.hasAGPRs(RI.getPhysRegClass(SrcReg))) { 908 Opcode = AMDGPU::V_ACCVGPR_READ_B32; 909 } 910 911 // For the cases where we need an intermediate instruction/temporary register 912 // (destination is an AGPR), we need a scavenger. 913 // 914 // FIXME: The pass should maintain this for us so we don't have to re-scan the 915 // whole block for every handled copy. 916 std::unique_ptr<RegScavenger> RS; 917 if (Opcode == AMDGPU::INSTRUCTION_LIST_END) 918 RS.reset(new RegScavenger()); 919 920 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, 4); 921 922 // If there is an overlap, we can't kill the super-register on the last 923 // instruction, since it will also kill the components made live by this def. 924 const bool CanKillSuperReg = KillSrc && !RI.regsOverlap(SrcReg, DestReg); 925 926 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 927 unsigned SubIdx; 928 if (Forward) 929 SubIdx = SubIndices[Idx]; 930 else 931 SubIdx = SubIndices[SubIndices.size() - Idx - 1]; 932 933 bool UseKill = CanKillSuperReg && Idx == SubIndices.size() - 1; 934 935 if (Opcode == AMDGPU::INSTRUCTION_LIST_END) { 936 Register ImpDefSuper = Idx == 0 ? Register(DestReg) : Register(); 937 Register ImpUseSuper = SrcReg; 938 indirectCopyToAGPR(*this, MBB, MI, DL, RI.getSubReg(DestReg, SubIdx), 939 RI.getSubReg(SrcReg, SubIdx), UseKill, *RS, 940 ImpDefSuper, ImpUseSuper); 941 } else { 942 MachineInstrBuilder Builder = 943 BuildMI(MBB, MI, DL, get(Opcode), RI.getSubReg(DestReg, SubIdx)) 944 .addReg(RI.getSubReg(SrcReg, SubIdx)); 945 if (Idx == 0) 946 Builder.addReg(DestReg, RegState::Define | RegState::Implicit); 947 948 Builder.addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit); 949 } 950 } 951 } 952 953 int SIInstrInfo::commuteOpcode(unsigned Opcode) const { 954 int NewOpc; 955 956 // Try to map original to commuted opcode 957 NewOpc = AMDGPU::getCommuteRev(Opcode); 958 if (NewOpc != -1) 959 // Check if the commuted (REV) opcode exists on the target. 960 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 961 962 // Try to map commuted to original opcode 963 NewOpc = AMDGPU::getCommuteOrig(Opcode); 964 if (NewOpc != -1) 965 // Check if the original (non-REV) opcode exists on the target. 966 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 967 968 return Opcode; 969 } 970 971 void SIInstrInfo::materializeImmediate(MachineBasicBlock &MBB, 972 MachineBasicBlock::iterator MI, 973 const DebugLoc &DL, unsigned DestReg, 974 int64_t Value) const { 975 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 976 const TargetRegisterClass *RegClass = MRI.getRegClass(DestReg); 977 if (RegClass == &AMDGPU::SReg_32RegClass || 978 RegClass == &AMDGPU::SGPR_32RegClass || 979 RegClass == &AMDGPU::SReg_32_XM0RegClass || 980 RegClass == &AMDGPU::SReg_32_XM0_XEXECRegClass) { 981 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 982 .addImm(Value); 983 return; 984 } 985 986 if (RegClass == &AMDGPU::SReg_64RegClass || 987 RegClass == &AMDGPU::SGPR_64RegClass || 988 RegClass == &AMDGPU::SReg_64_XEXECRegClass) { 989 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 990 .addImm(Value); 991 return; 992 } 993 994 if (RegClass == &AMDGPU::VGPR_32RegClass) { 995 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) 996 .addImm(Value); 997 return; 998 } 999 if (RegClass == &AMDGPU::VReg_64RegClass) { 1000 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), DestReg) 1001 .addImm(Value); 1002 return; 1003 } 1004 1005 unsigned EltSize = 4; 1006 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 1007 if (RI.isSGPRClass(RegClass)) { 1008 if (RI.getRegSizeInBits(*RegClass) > 32) { 1009 Opcode = AMDGPU::S_MOV_B64; 1010 EltSize = 8; 1011 } else { 1012 Opcode = AMDGPU::S_MOV_B32; 1013 EltSize = 4; 1014 } 1015 } 1016 1017 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RegClass, EltSize); 1018 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 1019 int64_t IdxValue = Idx == 0 ? Value : 0; 1020 1021 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 1022 get(Opcode), RI.getSubReg(DestReg, SubIndices[Idx])); 1023 Builder.addImm(IdxValue); 1024 } 1025 } 1026 1027 const TargetRegisterClass * 1028 SIInstrInfo::getPreferredSelectRegClass(unsigned Size) const { 1029 return &AMDGPU::VGPR_32RegClass; 1030 } 1031 1032 void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB, 1033 MachineBasicBlock::iterator I, 1034 const DebugLoc &DL, Register DstReg, 1035 ArrayRef<MachineOperand> Cond, 1036 Register TrueReg, 1037 Register FalseReg) const { 1038 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 1039 const TargetRegisterClass *BoolXExecRC = 1040 RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 1041 assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass && 1042 "Not a VGPR32 reg"); 1043 1044 if (Cond.size() == 1) { 1045 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1046 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1047 .add(Cond[0]); 1048 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1049 .addImm(0) 1050 .addReg(FalseReg) 1051 .addImm(0) 1052 .addReg(TrueReg) 1053 .addReg(SReg); 1054 } else if (Cond.size() == 2) { 1055 assert(Cond[0].isImm() && "Cond[0] is not an immediate"); 1056 switch (Cond[0].getImm()) { 1057 case SIInstrInfo::SCC_TRUE: { 1058 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1059 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1060 : AMDGPU::S_CSELECT_B64), SReg) 1061 .addImm(1) 1062 .addImm(0); 1063 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1064 .addImm(0) 1065 .addReg(FalseReg) 1066 .addImm(0) 1067 .addReg(TrueReg) 1068 .addReg(SReg); 1069 break; 1070 } 1071 case SIInstrInfo::SCC_FALSE: { 1072 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1073 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1074 : AMDGPU::S_CSELECT_B64), SReg) 1075 .addImm(0) 1076 .addImm(1); 1077 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1078 .addImm(0) 1079 .addReg(FalseReg) 1080 .addImm(0) 1081 .addReg(TrueReg) 1082 .addReg(SReg); 1083 break; 1084 } 1085 case SIInstrInfo::VCCNZ: { 1086 MachineOperand RegOp = Cond[1]; 1087 RegOp.setImplicit(false); 1088 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1089 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1090 .add(RegOp); 1091 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1092 .addImm(0) 1093 .addReg(FalseReg) 1094 .addImm(0) 1095 .addReg(TrueReg) 1096 .addReg(SReg); 1097 break; 1098 } 1099 case SIInstrInfo::VCCZ: { 1100 MachineOperand RegOp = Cond[1]; 1101 RegOp.setImplicit(false); 1102 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1103 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1104 .add(RegOp); 1105 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1106 .addImm(0) 1107 .addReg(TrueReg) 1108 .addImm(0) 1109 .addReg(FalseReg) 1110 .addReg(SReg); 1111 break; 1112 } 1113 case SIInstrInfo::EXECNZ: { 1114 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1115 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 1116 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1117 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 1118 .addImm(0); 1119 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1120 : AMDGPU::S_CSELECT_B64), SReg) 1121 .addImm(1) 1122 .addImm(0); 1123 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1124 .addImm(0) 1125 .addReg(FalseReg) 1126 .addImm(0) 1127 .addReg(TrueReg) 1128 .addReg(SReg); 1129 break; 1130 } 1131 case SIInstrInfo::EXECZ: { 1132 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1133 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 1134 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1135 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 1136 .addImm(0); 1137 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1138 : AMDGPU::S_CSELECT_B64), SReg) 1139 .addImm(0) 1140 .addImm(1); 1141 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1142 .addImm(0) 1143 .addReg(FalseReg) 1144 .addImm(0) 1145 .addReg(TrueReg) 1146 .addReg(SReg); 1147 llvm_unreachable("Unhandled branch predicate EXECZ"); 1148 break; 1149 } 1150 default: 1151 llvm_unreachable("invalid branch predicate"); 1152 } 1153 } else { 1154 llvm_unreachable("Can only handle Cond size 1 or 2"); 1155 } 1156 } 1157 1158 Register SIInstrInfo::insertEQ(MachineBasicBlock *MBB, 1159 MachineBasicBlock::iterator I, 1160 const DebugLoc &DL, 1161 Register SrcReg, int Value) const { 1162 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1163 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 1164 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg) 1165 .addImm(Value) 1166 .addReg(SrcReg); 1167 1168 return Reg; 1169 } 1170 1171 Register SIInstrInfo::insertNE(MachineBasicBlock *MBB, 1172 MachineBasicBlock::iterator I, 1173 const DebugLoc &DL, 1174 Register SrcReg, int Value) const { 1175 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1176 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 1177 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg) 1178 .addImm(Value) 1179 .addReg(SrcReg); 1180 1181 return Reg; 1182 } 1183 1184 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const { 1185 1186 if (RI.hasAGPRs(DstRC)) 1187 return AMDGPU::COPY; 1188 if (RI.getRegSizeInBits(*DstRC) == 32) { 1189 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 1190 } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) { 1191 return AMDGPU::S_MOV_B64; 1192 } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) { 1193 return AMDGPU::V_MOV_B64_PSEUDO; 1194 } 1195 return AMDGPU::COPY; 1196 } 1197 1198 static unsigned getIndirectVGPRWritePseudoOpc(unsigned VecSize) { 1199 if (VecSize <= 32) // 4 bytes 1200 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V1; 1201 if (VecSize <= 64) // 8 bytes 1202 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V2; 1203 if (VecSize <= 96) // 12 bytes 1204 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V3; 1205 if (VecSize <= 128) // 16 bytes 1206 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V4; 1207 if (VecSize <= 160) // 20 bytes 1208 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V5; 1209 if (VecSize <= 256) // 32 bytes 1210 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V8; 1211 if (VecSize <= 512) // 64 bytes 1212 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V16; 1213 if (VecSize <= 1024) // 128 bytes 1214 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V32; 1215 1216 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1217 } 1218 1219 static unsigned getIndirectSGPRWritePseudo32(unsigned VecSize) { 1220 if (VecSize <= 32) // 4 bytes 1221 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V1; 1222 if (VecSize <= 64) // 8 bytes 1223 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V2; 1224 if (VecSize <= 96) // 12 bytes 1225 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V3; 1226 if (VecSize <= 128) // 16 bytes 1227 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V4; 1228 if (VecSize <= 160) // 20 bytes 1229 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V5; 1230 if (VecSize <= 256) // 32 bytes 1231 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V8; 1232 if (VecSize <= 512) // 64 bytes 1233 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V16; 1234 if (VecSize <= 1024) // 128 bytes 1235 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V32; 1236 1237 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1238 } 1239 1240 static unsigned getIndirectSGPRWritePseudo64(unsigned VecSize) { 1241 if (VecSize <= 64) // 8 bytes 1242 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V1; 1243 if (VecSize <= 128) // 16 bytes 1244 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V2; 1245 if (VecSize <= 256) // 32 bytes 1246 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V4; 1247 if (VecSize <= 512) // 64 bytes 1248 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V8; 1249 if (VecSize <= 1024) // 128 bytes 1250 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V16; 1251 1252 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1253 } 1254 1255 const MCInstrDesc &SIInstrInfo::getIndirectRegWritePseudo( 1256 unsigned VecSize, unsigned EltSize, bool IsSGPR) const { 1257 if (IsSGPR) { 1258 switch (EltSize) { 1259 case 32: 1260 return get(getIndirectSGPRWritePseudo32(VecSize)); 1261 case 64: 1262 return get(getIndirectSGPRWritePseudo64(VecSize)); 1263 default: 1264 llvm_unreachable("invalid reg indexing elt size"); 1265 } 1266 } 1267 1268 assert(EltSize == 32 && "invalid reg indexing elt size"); 1269 return get(getIndirectVGPRWritePseudoOpc(VecSize)); 1270 } 1271 1272 static unsigned getSGPRSpillSaveOpcode(unsigned Size) { 1273 switch (Size) { 1274 case 4: 1275 return AMDGPU::SI_SPILL_S32_SAVE; 1276 case 8: 1277 return AMDGPU::SI_SPILL_S64_SAVE; 1278 case 12: 1279 return AMDGPU::SI_SPILL_S96_SAVE; 1280 case 16: 1281 return AMDGPU::SI_SPILL_S128_SAVE; 1282 case 20: 1283 return AMDGPU::SI_SPILL_S160_SAVE; 1284 case 24: 1285 return AMDGPU::SI_SPILL_S192_SAVE; 1286 case 32: 1287 return AMDGPU::SI_SPILL_S256_SAVE; 1288 case 64: 1289 return AMDGPU::SI_SPILL_S512_SAVE; 1290 case 128: 1291 return AMDGPU::SI_SPILL_S1024_SAVE; 1292 default: 1293 llvm_unreachable("unknown register size"); 1294 } 1295 } 1296 1297 static unsigned getVGPRSpillSaveOpcode(unsigned Size) { 1298 switch (Size) { 1299 case 4: 1300 return AMDGPU::SI_SPILL_V32_SAVE; 1301 case 8: 1302 return AMDGPU::SI_SPILL_V64_SAVE; 1303 case 12: 1304 return AMDGPU::SI_SPILL_V96_SAVE; 1305 case 16: 1306 return AMDGPU::SI_SPILL_V128_SAVE; 1307 case 20: 1308 return AMDGPU::SI_SPILL_V160_SAVE; 1309 case 24: 1310 return AMDGPU::SI_SPILL_V192_SAVE; 1311 case 32: 1312 return AMDGPU::SI_SPILL_V256_SAVE; 1313 case 64: 1314 return AMDGPU::SI_SPILL_V512_SAVE; 1315 case 128: 1316 return AMDGPU::SI_SPILL_V1024_SAVE; 1317 default: 1318 llvm_unreachable("unknown register size"); 1319 } 1320 } 1321 1322 static unsigned getAGPRSpillSaveOpcode(unsigned Size) { 1323 switch (Size) { 1324 case 4: 1325 return AMDGPU::SI_SPILL_A32_SAVE; 1326 case 8: 1327 return AMDGPU::SI_SPILL_A64_SAVE; 1328 case 12: 1329 return AMDGPU::SI_SPILL_A96_SAVE; 1330 case 16: 1331 return AMDGPU::SI_SPILL_A128_SAVE; 1332 case 20: 1333 return AMDGPU::SI_SPILL_A160_SAVE; 1334 case 24: 1335 return AMDGPU::SI_SPILL_A192_SAVE; 1336 case 32: 1337 return AMDGPU::SI_SPILL_A256_SAVE; 1338 case 64: 1339 return AMDGPU::SI_SPILL_A512_SAVE; 1340 case 128: 1341 return AMDGPU::SI_SPILL_A1024_SAVE; 1342 default: 1343 llvm_unreachable("unknown register size"); 1344 } 1345 } 1346 1347 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 1348 MachineBasicBlock::iterator MI, 1349 Register SrcReg, bool isKill, 1350 int FrameIndex, 1351 const TargetRegisterClass *RC, 1352 const TargetRegisterInfo *TRI) const { 1353 MachineFunction *MF = MBB.getParent(); 1354 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1355 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 1356 const DebugLoc &DL = MBB.findDebugLoc(MI); 1357 1358 MachinePointerInfo PtrInfo 1359 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 1360 MachineMemOperand *MMO = MF->getMachineMemOperand( 1361 PtrInfo, MachineMemOperand::MOStore, FrameInfo.getObjectSize(FrameIndex), 1362 FrameInfo.getObjectAlign(FrameIndex)); 1363 unsigned SpillSize = TRI->getSpillSize(*RC); 1364 1365 if (RI.isSGPRClass(RC)) { 1366 MFI->setHasSpilledSGPRs(); 1367 assert(SrcReg != AMDGPU::M0 && "m0 should not be spilled"); 1368 assert(SrcReg != AMDGPU::EXEC_LO && SrcReg != AMDGPU::EXEC_HI && 1369 SrcReg != AMDGPU::EXEC && "exec should not be spilled"); 1370 1371 // We are only allowed to create one new instruction when spilling 1372 // registers, so we need to use pseudo instruction for spilling SGPRs. 1373 const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(SpillSize)); 1374 1375 // The SGPR spill/restore instructions only work on number sgprs, so we need 1376 // to make sure we are using the correct register class. 1377 if (SrcReg.isVirtual() && SpillSize == 4) { 1378 MachineRegisterInfo &MRI = MF->getRegInfo(); 1379 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 1380 } 1381 1382 BuildMI(MBB, MI, DL, OpDesc) 1383 .addReg(SrcReg, getKillRegState(isKill)) // data 1384 .addFrameIndex(FrameIndex) // addr 1385 .addMemOperand(MMO) 1386 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) 1387 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); 1388 // Add the scratch resource registers as implicit uses because we may end up 1389 // needing them, and need to ensure that the reserved registers are 1390 // correctly handled. 1391 if (RI.spillSGPRToVGPR()) 1392 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill); 1393 return; 1394 } 1395 1396 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillSaveOpcode(SpillSize) 1397 : getVGPRSpillSaveOpcode(SpillSize); 1398 MFI->setHasSpilledVGPRs(); 1399 1400 BuildMI(MBB, MI, DL, get(Opcode)) 1401 .addReg(SrcReg, getKillRegState(isKill)) // data 1402 .addFrameIndex(FrameIndex) // addr 1403 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 1404 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset 1405 .addImm(0) // offset 1406 .addMemOperand(MMO); 1407 } 1408 1409 static unsigned getSGPRSpillRestoreOpcode(unsigned Size) { 1410 switch (Size) { 1411 case 4: 1412 return AMDGPU::SI_SPILL_S32_RESTORE; 1413 case 8: 1414 return AMDGPU::SI_SPILL_S64_RESTORE; 1415 case 12: 1416 return AMDGPU::SI_SPILL_S96_RESTORE; 1417 case 16: 1418 return AMDGPU::SI_SPILL_S128_RESTORE; 1419 case 20: 1420 return AMDGPU::SI_SPILL_S160_RESTORE; 1421 case 24: 1422 return AMDGPU::SI_SPILL_S192_RESTORE; 1423 case 32: 1424 return AMDGPU::SI_SPILL_S256_RESTORE; 1425 case 64: 1426 return AMDGPU::SI_SPILL_S512_RESTORE; 1427 case 128: 1428 return AMDGPU::SI_SPILL_S1024_RESTORE; 1429 default: 1430 llvm_unreachable("unknown register size"); 1431 } 1432 } 1433 1434 static unsigned getVGPRSpillRestoreOpcode(unsigned Size) { 1435 switch (Size) { 1436 case 4: 1437 return AMDGPU::SI_SPILL_V32_RESTORE; 1438 case 8: 1439 return AMDGPU::SI_SPILL_V64_RESTORE; 1440 case 12: 1441 return AMDGPU::SI_SPILL_V96_RESTORE; 1442 case 16: 1443 return AMDGPU::SI_SPILL_V128_RESTORE; 1444 case 20: 1445 return AMDGPU::SI_SPILL_V160_RESTORE; 1446 case 24: 1447 return AMDGPU::SI_SPILL_V192_RESTORE; 1448 case 32: 1449 return AMDGPU::SI_SPILL_V256_RESTORE; 1450 case 64: 1451 return AMDGPU::SI_SPILL_V512_RESTORE; 1452 case 128: 1453 return AMDGPU::SI_SPILL_V1024_RESTORE; 1454 default: 1455 llvm_unreachable("unknown register size"); 1456 } 1457 } 1458 1459 static unsigned getAGPRSpillRestoreOpcode(unsigned Size) { 1460 switch (Size) { 1461 case 4: 1462 return AMDGPU::SI_SPILL_A32_RESTORE; 1463 case 8: 1464 return AMDGPU::SI_SPILL_A64_RESTORE; 1465 case 12: 1466 return AMDGPU::SI_SPILL_A96_RESTORE; 1467 case 16: 1468 return AMDGPU::SI_SPILL_A128_RESTORE; 1469 case 20: 1470 return AMDGPU::SI_SPILL_A160_RESTORE; 1471 case 24: 1472 return AMDGPU::SI_SPILL_A192_RESTORE; 1473 case 32: 1474 return AMDGPU::SI_SPILL_A256_RESTORE; 1475 case 64: 1476 return AMDGPU::SI_SPILL_A512_RESTORE; 1477 case 128: 1478 return AMDGPU::SI_SPILL_A1024_RESTORE; 1479 default: 1480 llvm_unreachable("unknown register size"); 1481 } 1482 } 1483 1484 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 1485 MachineBasicBlock::iterator MI, 1486 Register DestReg, int FrameIndex, 1487 const TargetRegisterClass *RC, 1488 const TargetRegisterInfo *TRI) const { 1489 MachineFunction *MF = MBB.getParent(); 1490 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1491 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 1492 const DebugLoc &DL = MBB.findDebugLoc(MI); 1493 unsigned SpillSize = TRI->getSpillSize(*RC); 1494 1495 MachinePointerInfo PtrInfo 1496 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 1497 1498 MachineMemOperand *MMO = MF->getMachineMemOperand( 1499 PtrInfo, MachineMemOperand::MOLoad, FrameInfo.getObjectSize(FrameIndex), 1500 FrameInfo.getObjectAlign(FrameIndex)); 1501 1502 if (RI.isSGPRClass(RC)) { 1503 MFI->setHasSpilledSGPRs(); 1504 assert(DestReg != AMDGPU::M0 && "m0 should not be reloaded into"); 1505 assert(DestReg != AMDGPU::EXEC_LO && DestReg != AMDGPU::EXEC_HI && 1506 DestReg != AMDGPU::EXEC && "exec should not be spilled"); 1507 1508 // FIXME: Maybe this should not include a memoperand because it will be 1509 // lowered to non-memory instructions. 1510 const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize)); 1511 if (DestReg.isVirtual() && SpillSize == 4) { 1512 MachineRegisterInfo &MRI = MF->getRegInfo(); 1513 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 1514 } 1515 1516 if (RI.spillSGPRToVGPR()) 1517 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill); 1518 BuildMI(MBB, MI, DL, OpDesc, DestReg) 1519 .addFrameIndex(FrameIndex) // addr 1520 .addMemOperand(MMO) 1521 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) 1522 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); 1523 return; 1524 } 1525 1526 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillRestoreOpcode(SpillSize) 1527 : getVGPRSpillRestoreOpcode(SpillSize); 1528 BuildMI(MBB, MI, DL, get(Opcode), DestReg) 1529 .addFrameIndex(FrameIndex) // vaddr 1530 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 1531 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset 1532 .addImm(0) // offset 1533 .addMemOperand(MMO); 1534 } 1535 1536 void SIInstrInfo::insertNoop(MachineBasicBlock &MBB, 1537 MachineBasicBlock::iterator MI) const { 1538 insertNoops(MBB, MI, 1); 1539 } 1540 1541 void SIInstrInfo::insertNoops(MachineBasicBlock &MBB, 1542 MachineBasicBlock::iterator MI, 1543 unsigned Quantity) const { 1544 DebugLoc DL = MBB.findDebugLoc(MI); 1545 while (Quantity > 0) { 1546 unsigned Arg = std::min(Quantity, 8u); 1547 Quantity -= Arg; 1548 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP)).addImm(Arg - 1); 1549 } 1550 } 1551 1552 void SIInstrInfo::insertReturn(MachineBasicBlock &MBB) const { 1553 auto MF = MBB.getParent(); 1554 SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 1555 1556 assert(Info->isEntryFunction()); 1557 1558 if (MBB.succ_empty()) { 1559 bool HasNoTerminator = MBB.getFirstTerminator() == MBB.end(); 1560 if (HasNoTerminator) { 1561 if (Info->returnsVoid()) { 1562 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::S_ENDPGM)).addImm(0); 1563 } else { 1564 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::SI_RETURN_TO_EPILOG)); 1565 } 1566 } 1567 } 1568 } 1569 1570 unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) { 1571 switch (MI.getOpcode()) { 1572 default: return 1; // FIXME: Do wait states equal cycles? 1573 1574 case AMDGPU::S_NOP: 1575 return MI.getOperand(0).getImm() + 1; 1576 } 1577 } 1578 1579 bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 1580 MachineBasicBlock &MBB = *MI.getParent(); 1581 DebugLoc DL = MBB.findDebugLoc(MI); 1582 switch (MI.getOpcode()) { 1583 default: return TargetInstrInfo::expandPostRAPseudo(MI); 1584 case AMDGPU::S_MOV_B64_term: 1585 // This is only a terminator to get the correct spill code placement during 1586 // register allocation. 1587 MI.setDesc(get(AMDGPU::S_MOV_B64)); 1588 break; 1589 1590 case AMDGPU::S_MOV_B32_term: 1591 // This is only a terminator to get the correct spill code placement during 1592 // register allocation. 1593 MI.setDesc(get(AMDGPU::S_MOV_B32)); 1594 break; 1595 1596 case AMDGPU::S_XOR_B64_term: 1597 // This is only a terminator to get the correct spill code placement during 1598 // register allocation. 1599 MI.setDesc(get(AMDGPU::S_XOR_B64)); 1600 break; 1601 1602 case AMDGPU::S_XOR_B32_term: 1603 // This is only a terminator to get the correct spill code placement during 1604 // register allocation. 1605 MI.setDesc(get(AMDGPU::S_XOR_B32)); 1606 break; 1607 case AMDGPU::S_OR_B64_term: 1608 // This is only a terminator to get the correct spill code placement during 1609 // register allocation. 1610 MI.setDesc(get(AMDGPU::S_OR_B64)); 1611 break; 1612 case AMDGPU::S_OR_B32_term: 1613 // This is only a terminator to get the correct spill code placement during 1614 // register allocation. 1615 MI.setDesc(get(AMDGPU::S_OR_B32)); 1616 break; 1617 1618 case AMDGPU::S_ANDN2_B64_term: 1619 // This is only a terminator to get the correct spill code placement during 1620 // register allocation. 1621 MI.setDesc(get(AMDGPU::S_ANDN2_B64)); 1622 break; 1623 1624 case AMDGPU::S_ANDN2_B32_term: 1625 // This is only a terminator to get the correct spill code placement during 1626 // register allocation. 1627 MI.setDesc(get(AMDGPU::S_ANDN2_B32)); 1628 break; 1629 1630 case AMDGPU::V_MOV_B64_PSEUDO: { 1631 Register Dst = MI.getOperand(0).getReg(); 1632 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 1633 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 1634 1635 const MachineOperand &SrcOp = MI.getOperand(1); 1636 // FIXME: Will this work for 64-bit floating point immediates? 1637 assert(!SrcOp.isFPImm()); 1638 if (SrcOp.isImm()) { 1639 APInt Imm(64, SrcOp.getImm()); 1640 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1641 .addImm(Imm.getLoBits(32).getZExtValue()) 1642 .addReg(Dst, RegState::Implicit | RegState::Define); 1643 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1644 .addImm(Imm.getHiBits(32).getZExtValue()) 1645 .addReg(Dst, RegState::Implicit | RegState::Define); 1646 } else { 1647 assert(SrcOp.isReg()); 1648 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1649 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) 1650 .addReg(Dst, RegState::Implicit | RegState::Define); 1651 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1652 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) 1653 .addReg(Dst, RegState::Implicit | RegState::Define); 1654 } 1655 MI.eraseFromParent(); 1656 break; 1657 } 1658 case AMDGPU::V_MOV_B64_DPP_PSEUDO: { 1659 expandMovDPP64(MI); 1660 break; 1661 } 1662 case AMDGPU::V_SET_INACTIVE_B32: { 1663 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; 1664 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1665 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1666 .addReg(Exec); 1667 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), MI.getOperand(0).getReg()) 1668 .add(MI.getOperand(2)); 1669 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1670 .addReg(Exec); 1671 MI.eraseFromParent(); 1672 break; 1673 } 1674 case AMDGPU::V_SET_INACTIVE_B64: { 1675 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; 1676 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1677 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1678 .addReg(Exec); 1679 MachineInstr *Copy = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), 1680 MI.getOperand(0).getReg()) 1681 .add(MI.getOperand(2)); 1682 expandPostRAPseudo(*Copy); 1683 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1684 .addReg(Exec); 1685 MI.eraseFromParent(); 1686 break; 1687 } 1688 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V1: 1689 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V2: 1690 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V3: 1691 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V4: 1692 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V5: 1693 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V8: 1694 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V16: 1695 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V32: 1696 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V1: 1697 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V2: 1698 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V3: 1699 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V4: 1700 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V5: 1701 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V8: 1702 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V16: 1703 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V32: 1704 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V1: 1705 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V2: 1706 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V4: 1707 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V8: 1708 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V16: { 1709 const TargetRegisterClass *EltRC = getOpRegClass(MI, 2); 1710 1711 unsigned Opc; 1712 if (RI.hasVGPRs(EltRC)) { 1713 Opc = ST.useVGPRIndexMode() ? 1714 AMDGPU::V_MOV_B32_indirect : AMDGPU::V_MOVRELD_B32_e32; 1715 } else { 1716 Opc = RI.getRegSizeInBits(*EltRC) == 64 ? 1717 AMDGPU::S_MOVRELD_B64 : AMDGPU::S_MOVRELD_B32; 1718 } 1719 1720 const MCInstrDesc &OpDesc = get(Opc); 1721 Register VecReg = MI.getOperand(0).getReg(); 1722 bool IsUndef = MI.getOperand(1).isUndef(); 1723 unsigned SubReg = MI.getOperand(3).getImm(); 1724 assert(VecReg == MI.getOperand(1).getReg()); 1725 1726 MachineInstrBuilder MIB = 1727 BuildMI(MBB, MI, DL, OpDesc) 1728 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 1729 .add(MI.getOperand(2)) 1730 .addReg(VecReg, RegState::ImplicitDefine) 1731 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0)); 1732 1733 const int ImpDefIdx = 1734 OpDesc.getNumOperands() + OpDesc.getNumImplicitUses(); 1735 const int ImpUseIdx = ImpDefIdx + 1; 1736 MIB->tieOperands(ImpDefIdx, ImpUseIdx); 1737 MI.eraseFromParent(); 1738 break; 1739 } 1740 case AMDGPU::SI_PC_ADD_REL_OFFSET: { 1741 MachineFunction &MF = *MBB.getParent(); 1742 Register Reg = MI.getOperand(0).getReg(); 1743 Register RegLo = RI.getSubReg(Reg, AMDGPU::sub0); 1744 Register RegHi = RI.getSubReg(Reg, AMDGPU::sub1); 1745 1746 // Create a bundle so these instructions won't be re-ordered by the 1747 // post-RA scheduler. 1748 MIBundleBuilder Bundler(MBB, MI); 1749 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg)); 1750 1751 // Add 32-bit offset from this instruction to the start of the 1752 // constant data. 1753 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo) 1754 .addReg(RegLo) 1755 .add(MI.getOperand(1))); 1756 1757 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi) 1758 .addReg(RegHi); 1759 MIB.add(MI.getOperand(2)); 1760 1761 Bundler.append(MIB); 1762 finalizeBundle(MBB, Bundler.begin()); 1763 1764 MI.eraseFromParent(); 1765 break; 1766 } 1767 case AMDGPU::ENTER_WWM: { 1768 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 1769 // WWM is entered. 1770 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1771 : AMDGPU::S_OR_SAVEEXEC_B64)); 1772 break; 1773 } 1774 case AMDGPU::EXIT_WWM: { 1775 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 1776 // WWM is exited. 1777 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64)); 1778 break; 1779 } 1780 } 1781 return true; 1782 } 1783 1784 std::pair<MachineInstr*, MachineInstr*> 1785 SIInstrInfo::expandMovDPP64(MachineInstr &MI) const { 1786 assert (MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO); 1787 1788 MachineBasicBlock &MBB = *MI.getParent(); 1789 DebugLoc DL = MBB.findDebugLoc(MI); 1790 MachineFunction *MF = MBB.getParent(); 1791 MachineRegisterInfo &MRI = MF->getRegInfo(); 1792 Register Dst = MI.getOperand(0).getReg(); 1793 unsigned Part = 0; 1794 MachineInstr *Split[2]; 1795 1796 1797 for (auto Sub : { AMDGPU::sub0, AMDGPU::sub1 }) { 1798 auto MovDPP = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_dpp)); 1799 if (Dst.isPhysical()) { 1800 MovDPP.addDef(RI.getSubReg(Dst, Sub)); 1801 } else { 1802 assert(MRI.isSSA()); 1803 auto Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1804 MovDPP.addDef(Tmp); 1805 } 1806 1807 for (unsigned I = 1; I <= 2; ++I) { // old and src operands. 1808 const MachineOperand &SrcOp = MI.getOperand(I); 1809 assert(!SrcOp.isFPImm()); 1810 if (SrcOp.isImm()) { 1811 APInt Imm(64, SrcOp.getImm()); 1812 Imm.ashrInPlace(Part * 32); 1813 MovDPP.addImm(Imm.getLoBits(32).getZExtValue()); 1814 } else { 1815 assert(SrcOp.isReg()); 1816 Register Src = SrcOp.getReg(); 1817 if (Src.isPhysical()) 1818 MovDPP.addReg(RI.getSubReg(Src, Sub)); 1819 else 1820 MovDPP.addReg(Src, SrcOp.isUndef() ? RegState::Undef : 0, Sub); 1821 } 1822 } 1823 1824 for (unsigned I = 3; I < MI.getNumExplicitOperands(); ++I) 1825 MovDPP.addImm(MI.getOperand(I).getImm()); 1826 1827 Split[Part] = MovDPP; 1828 ++Part; 1829 } 1830 1831 if (Dst.isVirtual()) 1832 BuildMI(MBB, MI, DL, get(AMDGPU::REG_SEQUENCE), Dst) 1833 .addReg(Split[0]->getOperand(0).getReg()) 1834 .addImm(AMDGPU::sub0) 1835 .addReg(Split[1]->getOperand(0).getReg()) 1836 .addImm(AMDGPU::sub1); 1837 1838 MI.eraseFromParent(); 1839 return std::make_pair(Split[0], Split[1]); 1840 } 1841 1842 bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI, 1843 MachineOperand &Src0, 1844 unsigned Src0OpName, 1845 MachineOperand &Src1, 1846 unsigned Src1OpName) const { 1847 MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName); 1848 if (!Src0Mods) 1849 return false; 1850 1851 MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName); 1852 assert(Src1Mods && 1853 "All commutable instructions have both src0 and src1 modifiers"); 1854 1855 int Src0ModsVal = Src0Mods->getImm(); 1856 int Src1ModsVal = Src1Mods->getImm(); 1857 1858 Src1Mods->setImm(Src0ModsVal); 1859 Src0Mods->setImm(Src1ModsVal); 1860 return true; 1861 } 1862 1863 static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI, 1864 MachineOperand &RegOp, 1865 MachineOperand &NonRegOp) { 1866 Register Reg = RegOp.getReg(); 1867 unsigned SubReg = RegOp.getSubReg(); 1868 bool IsKill = RegOp.isKill(); 1869 bool IsDead = RegOp.isDead(); 1870 bool IsUndef = RegOp.isUndef(); 1871 bool IsDebug = RegOp.isDebug(); 1872 1873 if (NonRegOp.isImm()) 1874 RegOp.ChangeToImmediate(NonRegOp.getImm()); 1875 else if (NonRegOp.isFI()) 1876 RegOp.ChangeToFrameIndex(NonRegOp.getIndex()); 1877 else if (NonRegOp.isGlobal()) { 1878 RegOp.ChangeToGA(NonRegOp.getGlobal(), NonRegOp.getOffset(), 1879 NonRegOp.getTargetFlags()); 1880 } else 1881 return nullptr; 1882 1883 // Make sure we don't reinterpret a subreg index in the target flags. 1884 RegOp.setTargetFlags(NonRegOp.getTargetFlags()); 1885 1886 NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug); 1887 NonRegOp.setSubReg(SubReg); 1888 1889 return &MI; 1890 } 1891 1892 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, 1893 unsigned Src0Idx, 1894 unsigned Src1Idx) const { 1895 assert(!NewMI && "this should never be used"); 1896 1897 unsigned Opc = MI.getOpcode(); 1898 int CommutedOpcode = commuteOpcode(Opc); 1899 if (CommutedOpcode == -1) 1900 return nullptr; 1901 1902 assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == 1903 static_cast<int>(Src0Idx) && 1904 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == 1905 static_cast<int>(Src1Idx) && 1906 "inconsistency with findCommutedOpIndices"); 1907 1908 MachineOperand &Src0 = MI.getOperand(Src0Idx); 1909 MachineOperand &Src1 = MI.getOperand(Src1Idx); 1910 1911 MachineInstr *CommutedMI = nullptr; 1912 if (Src0.isReg() && Src1.isReg()) { 1913 if (isOperandLegal(MI, Src1Idx, &Src0)) { 1914 // Be sure to copy the source modifiers to the right place. 1915 CommutedMI 1916 = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx); 1917 } 1918 1919 } else if (Src0.isReg() && !Src1.isReg()) { 1920 // src0 should always be able to support any operand type, so no need to 1921 // check operand legality. 1922 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1); 1923 } else if (!Src0.isReg() && Src1.isReg()) { 1924 if (isOperandLegal(MI, Src1Idx, &Src0)) 1925 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0); 1926 } else { 1927 // FIXME: Found two non registers to commute. This does happen. 1928 return nullptr; 1929 } 1930 1931 if (CommutedMI) { 1932 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers, 1933 Src1, AMDGPU::OpName::src1_modifiers); 1934 1935 CommutedMI->setDesc(get(CommutedOpcode)); 1936 } 1937 1938 return CommutedMI; 1939 } 1940 1941 // This needs to be implemented because the source modifiers may be inserted 1942 // between the true commutable operands, and the base 1943 // TargetInstrInfo::commuteInstruction uses it. 1944 bool SIInstrInfo::findCommutedOpIndices(const MachineInstr &MI, 1945 unsigned &SrcOpIdx0, 1946 unsigned &SrcOpIdx1) const { 1947 return findCommutedOpIndices(MI.getDesc(), SrcOpIdx0, SrcOpIdx1); 1948 } 1949 1950 bool SIInstrInfo::findCommutedOpIndices(MCInstrDesc Desc, unsigned &SrcOpIdx0, 1951 unsigned &SrcOpIdx1) const { 1952 if (!Desc.isCommutable()) 1953 return false; 1954 1955 unsigned Opc = Desc.getOpcode(); 1956 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 1957 if (Src0Idx == -1) 1958 return false; 1959 1960 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 1961 if (Src1Idx == -1) 1962 return false; 1963 1964 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx); 1965 } 1966 1967 bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp, 1968 int64_t BrOffset) const { 1969 // BranchRelaxation should never have to check s_setpc_b64 because its dest 1970 // block is unanalyzable. 1971 assert(BranchOp != AMDGPU::S_SETPC_B64); 1972 1973 // Convert to dwords. 1974 BrOffset /= 4; 1975 1976 // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is 1977 // from the next instruction. 1978 BrOffset -= 1; 1979 1980 return isIntN(BranchOffsetBits, BrOffset); 1981 } 1982 1983 MachineBasicBlock *SIInstrInfo::getBranchDestBlock( 1984 const MachineInstr &MI) const { 1985 if (MI.getOpcode() == AMDGPU::S_SETPC_B64) { 1986 // This would be a difficult analysis to perform, but can always be legal so 1987 // there's no need to analyze it. 1988 return nullptr; 1989 } 1990 1991 return MI.getOperand(0).getMBB(); 1992 } 1993 1994 unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB, 1995 MachineBasicBlock &DestBB, 1996 const DebugLoc &DL, 1997 int64_t BrOffset, 1998 RegScavenger *RS) const { 1999 assert(RS && "RegScavenger required for long branching"); 2000 assert(MBB.empty() && 2001 "new block should be inserted for expanding unconditional branch"); 2002 assert(MBB.pred_size() == 1); 2003 2004 MachineFunction *MF = MBB.getParent(); 2005 MachineRegisterInfo &MRI = MF->getRegInfo(); 2006 2007 // FIXME: Virtual register workaround for RegScavenger not working with empty 2008 // blocks. 2009 Register PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 2010 2011 auto I = MBB.end(); 2012 2013 // We need to compute the offset relative to the instruction immediately after 2014 // s_getpc_b64. Insert pc arithmetic code before last terminator. 2015 MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg); 2016 2017 // TODO: Handle > 32-bit block address. 2018 if (BrOffset >= 0) { 2019 BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32)) 2020 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 2021 .addReg(PCReg, 0, AMDGPU::sub0) 2022 .addMBB(&DestBB, MO_LONG_BRANCH_FORWARD); 2023 BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32)) 2024 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 2025 .addReg(PCReg, 0, AMDGPU::sub1) 2026 .addImm(0); 2027 } else { 2028 // Backwards branch. 2029 BuildMI(MBB, I, DL, get(AMDGPU::S_SUB_U32)) 2030 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 2031 .addReg(PCReg, 0, AMDGPU::sub0) 2032 .addMBB(&DestBB, MO_LONG_BRANCH_BACKWARD); 2033 BuildMI(MBB, I, DL, get(AMDGPU::S_SUBB_U32)) 2034 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 2035 .addReg(PCReg, 0, AMDGPU::sub1) 2036 .addImm(0); 2037 } 2038 2039 // Insert the indirect branch after the other terminator. 2040 BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64)) 2041 .addReg(PCReg); 2042 2043 // FIXME: If spilling is necessary, this will fail because this scavenger has 2044 // no emergency stack slots. It is non-trivial to spill in this situation, 2045 // because the restore code needs to be specially placed after the 2046 // jump. BranchRelaxation then needs to be made aware of the newly inserted 2047 // block. 2048 // 2049 // If a spill is needed for the pc register pair, we need to insert a spill 2050 // restore block right before the destination block, and insert a short branch 2051 // into the old destination block's fallthrough predecessor. 2052 // e.g.: 2053 // 2054 // s_cbranch_scc0 skip_long_branch: 2055 // 2056 // long_branch_bb: 2057 // spill s[8:9] 2058 // s_getpc_b64 s[8:9] 2059 // s_add_u32 s8, s8, restore_bb 2060 // s_addc_u32 s9, s9, 0 2061 // s_setpc_b64 s[8:9] 2062 // 2063 // skip_long_branch: 2064 // foo; 2065 // 2066 // ..... 2067 // 2068 // dest_bb_fallthrough_predecessor: 2069 // bar; 2070 // s_branch dest_bb 2071 // 2072 // restore_bb: 2073 // restore s[8:9] 2074 // fallthrough dest_bb 2075 /// 2076 // dest_bb: 2077 // buzz; 2078 2079 RS->enterBasicBlockEnd(MBB); 2080 Register Scav = RS->scavengeRegisterBackwards( 2081 AMDGPU::SReg_64RegClass, 2082 MachineBasicBlock::iterator(GetPC), false, 0); 2083 MRI.replaceRegWith(PCReg, Scav); 2084 MRI.clearVirtRegs(); 2085 RS->setRegUsed(Scav); 2086 2087 return 4 + 8 + 4 + 4; 2088 } 2089 2090 unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) { 2091 switch (Cond) { 2092 case SIInstrInfo::SCC_TRUE: 2093 return AMDGPU::S_CBRANCH_SCC1; 2094 case SIInstrInfo::SCC_FALSE: 2095 return AMDGPU::S_CBRANCH_SCC0; 2096 case SIInstrInfo::VCCNZ: 2097 return AMDGPU::S_CBRANCH_VCCNZ; 2098 case SIInstrInfo::VCCZ: 2099 return AMDGPU::S_CBRANCH_VCCZ; 2100 case SIInstrInfo::EXECNZ: 2101 return AMDGPU::S_CBRANCH_EXECNZ; 2102 case SIInstrInfo::EXECZ: 2103 return AMDGPU::S_CBRANCH_EXECZ; 2104 default: 2105 llvm_unreachable("invalid branch predicate"); 2106 } 2107 } 2108 2109 SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) { 2110 switch (Opcode) { 2111 case AMDGPU::S_CBRANCH_SCC0: 2112 return SCC_FALSE; 2113 case AMDGPU::S_CBRANCH_SCC1: 2114 return SCC_TRUE; 2115 case AMDGPU::S_CBRANCH_VCCNZ: 2116 return VCCNZ; 2117 case AMDGPU::S_CBRANCH_VCCZ: 2118 return VCCZ; 2119 case AMDGPU::S_CBRANCH_EXECNZ: 2120 return EXECNZ; 2121 case AMDGPU::S_CBRANCH_EXECZ: 2122 return EXECZ; 2123 default: 2124 return INVALID_BR; 2125 } 2126 } 2127 2128 bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB, 2129 MachineBasicBlock::iterator I, 2130 MachineBasicBlock *&TBB, 2131 MachineBasicBlock *&FBB, 2132 SmallVectorImpl<MachineOperand> &Cond, 2133 bool AllowModify) const { 2134 if (I->getOpcode() == AMDGPU::S_BRANCH) { 2135 // Unconditional Branch 2136 TBB = I->getOperand(0).getMBB(); 2137 return false; 2138 } 2139 2140 MachineBasicBlock *CondBB = nullptr; 2141 2142 if (I->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 2143 CondBB = I->getOperand(1).getMBB(); 2144 Cond.push_back(I->getOperand(0)); 2145 } else { 2146 BranchPredicate Pred = getBranchPredicate(I->getOpcode()); 2147 if (Pred == INVALID_BR) 2148 return true; 2149 2150 CondBB = I->getOperand(0).getMBB(); 2151 Cond.push_back(MachineOperand::CreateImm(Pred)); 2152 Cond.push_back(I->getOperand(1)); // Save the branch register. 2153 } 2154 ++I; 2155 2156 if (I == MBB.end()) { 2157 // Conditional branch followed by fall-through. 2158 TBB = CondBB; 2159 return false; 2160 } 2161 2162 if (I->getOpcode() == AMDGPU::S_BRANCH) { 2163 TBB = CondBB; 2164 FBB = I->getOperand(0).getMBB(); 2165 return false; 2166 } 2167 2168 return true; 2169 } 2170 2171 bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 2172 MachineBasicBlock *&FBB, 2173 SmallVectorImpl<MachineOperand> &Cond, 2174 bool AllowModify) const { 2175 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 2176 auto E = MBB.end(); 2177 if (I == E) 2178 return false; 2179 2180 // Skip over the instructions that are artificially terminators for special 2181 // exec management. 2182 while (I != E && !I->isBranch() && !I->isReturn() && 2183 I->getOpcode() != AMDGPU::SI_MASK_BRANCH) { 2184 switch (I->getOpcode()) { 2185 case AMDGPU::SI_MASK_BRANCH: 2186 case AMDGPU::S_MOV_B64_term: 2187 case AMDGPU::S_XOR_B64_term: 2188 case AMDGPU::S_OR_B64_term: 2189 case AMDGPU::S_ANDN2_B64_term: 2190 case AMDGPU::S_MOV_B32_term: 2191 case AMDGPU::S_XOR_B32_term: 2192 case AMDGPU::S_OR_B32_term: 2193 case AMDGPU::S_ANDN2_B32_term: 2194 break; 2195 case AMDGPU::SI_IF: 2196 case AMDGPU::SI_ELSE: 2197 case AMDGPU::SI_KILL_I1_TERMINATOR: 2198 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 2199 // FIXME: It's messy that these need to be considered here at all. 2200 return true; 2201 default: 2202 llvm_unreachable("unexpected non-branch terminator inst"); 2203 } 2204 2205 ++I; 2206 } 2207 2208 if (I == E) 2209 return false; 2210 2211 if (I->getOpcode() != AMDGPU::SI_MASK_BRANCH) 2212 return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify); 2213 2214 ++I; 2215 2216 // TODO: Should be able to treat as fallthrough? 2217 if (I == MBB.end()) 2218 return true; 2219 2220 if (analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify)) 2221 return true; 2222 2223 MachineBasicBlock *MaskBrDest = I->getOperand(0).getMBB(); 2224 2225 // Specifically handle the case where the conditional branch is to the same 2226 // destination as the mask branch. e.g. 2227 // 2228 // si_mask_branch BB8 2229 // s_cbranch_execz BB8 2230 // s_cbranch BB9 2231 // 2232 // This is required to understand divergent loops which may need the branches 2233 // to be relaxed. 2234 if (TBB != MaskBrDest || Cond.empty()) 2235 return true; 2236 2237 auto Pred = Cond[0].getImm(); 2238 return (Pred != EXECZ && Pred != EXECNZ); 2239 } 2240 2241 unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB, 2242 int *BytesRemoved) const { 2243 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 2244 2245 unsigned Count = 0; 2246 unsigned RemovedSize = 0; 2247 while (I != MBB.end()) { 2248 MachineBasicBlock::iterator Next = std::next(I); 2249 if (I->getOpcode() == AMDGPU::SI_MASK_BRANCH) { 2250 I = Next; 2251 continue; 2252 } 2253 2254 RemovedSize += getInstSizeInBytes(*I); 2255 I->eraseFromParent(); 2256 ++Count; 2257 I = Next; 2258 } 2259 2260 if (BytesRemoved) 2261 *BytesRemoved = RemovedSize; 2262 2263 return Count; 2264 } 2265 2266 // Copy the flags onto the implicit condition register operand. 2267 static void preserveCondRegFlags(MachineOperand &CondReg, 2268 const MachineOperand &OrigCond) { 2269 CondReg.setIsUndef(OrigCond.isUndef()); 2270 CondReg.setIsKill(OrigCond.isKill()); 2271 } 2272 2273 unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB, 2274 MachineBasicBlock *TBB, 2275 MachineBasicBlock *FBB, 2276 ArrayRef<MachineOperand> Cond, 2277 const DebugLoc &DL, 2278 int *BytesAdded) const { 2279 if (!FBB && Cond.empty()) { 2280 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 2281 .addMBB(TBB); 2282 if (BytesAdded) 2283 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4; 2284 return 1; 2285 } 2286 2287 if(Cond.size() == 1 && Cond[0].isReg()) { 2288 BuildMI(&MBB, DL, get(AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO)) 2289 .add(Cond[0]) 2290 .addMBB(TBB); 2291 return 1; 2292 } 2293 2294 assert(TBB && Cond[0].isImm()); 2295 2296 unsigned Opcode 2297 = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm())); 2298 2299 if (!FBB) { 2300 Cond[1].isUndef(); 2301 MachineInstr *CondBr = 2302 BuildMI(&MBB, DL, get(Opcode)) 2303 .addMBB(TBB); 2304 2305 // Copy the flags onto the implicit condition register operand. 2306 preserveCondRegFlags(CondBr->getOperand(1), Cond[1]); 2307 fixImplicitOperands(*CondBr); 2308 2309 if (BytesAdded) 2310 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4; 2311 return 1; 2312 } 2313 2314 assert(TBB && FBB); 2315 2316 MachineInstr *CondBr = 2317 BuildMI(&MBB, DL, get(Opcode)) 2318 .addMBB(TBB); 2319 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 2320 .addMBB(FBB); 2321 2322 MachineOperand &CondReg = CondBr->getOperand(1); 2323 CondReg.setIsUndef(Cond[1].isUndef()); 2324 CondReg.setIsKill(Cond[1].isKill()); 2325 2326 if (BytesAdded) 2327 *BytesAdded = ST.hasOffset3fBug() ? 16 : 8; 2328 2329 return 2; 2330 } 2331 2332 bool SIInstrInfo::reverseBranchCondition( 2333 SmallVectorImpl<MachineOperand> &Cond) const { 2334 if (Cond.size() != 2) { 2335 return true; 2336 } 2337 2338 if (Cond[0].isImm()) { 2339 Cond[0].setImm(-Cond[0].getImm()); 2340 return false; 2341 } 2342 2343 return true; 2344 } 2345 2346 bool SIInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, 2347 ArrayRef<MachineOperand> Cond, 2348 Register DstReg, Register TrueReg, 2349 Register FalseReg, int &CondCycles, 2350 int &TrueCycles, int &FalseCycles) const { 2351 switch (Cond[0].getImm()) { 2352 case VCCNZ: 2353 case VCCZ: { 2354 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2355 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 2356 if (MRI.getRegClass(FalseReg) != RC) 2357 return false; 2358 2359 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 2360 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 2361 2362 // Limit to equal cost for branch vs. N v_cndmask_b32s. 2363 return RI.hasVGPRs(RC) && NumInsts <= 6; 2364 } 2365 case SCC_TRUE: 2366 case SCC_FALSE: { 2367 // FIXME: We could insert for VGPRs if we could replace the original compare 2368 // with a vector one. 2369 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2370 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 2371 if (MRI.getRegClass(FalseReg) != RC) 2372 return false; 2373 2374 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 2375 2376 // Multiples of 8 can do s_cselect_b64 2377 if (NumInsts % 2 == 0) 2378 NumInsts /= 2; 2379 2380 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 2381 return RI.isSGPRClass(RC); 2382 } 2383 default: 2384 return false; 2385 } 2386 } 2387 2388 void SIInstrInfo::insertSelect(MachineBasicBlock &MBB, 2389 MachineBasicBlock::iterator I, const DebugLoc &DL, 2390 Register DstReg, ArrayRef<MachineOperand> Cond, 2391 Register TrueReg, Register FalseReg) const { 2392 BranchPredicate Pred = static_cast<BranchPredicate>(Cond[0].getImm()); 2393 if (Pred == VCCZ || Pred == SCC_FALSE) { 2394 Pred = static_cast<BranchPredicate>(-Pred); 2395 std::swap(TrueReg, FalseReg); 2396 } 2397 2398 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2399 const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg); 2400 unsigned DstSize = RI.getRegSizeInBits(*DstRC); 2401 2402 if (DstSize == 32) { 2403 MachineInstr *Select; 2404 if (Pred == SCC_TRUE) { 2405 Select = BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B32), DstReg) 2406 .addReg(TrueReg) 2407 .addReg(FalseReg); 2408 } else { 2409 // Instruction's operands are backwards from what is expected. 2410 Select = BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e32), DstReg) 2411 .addReg(FalseReg) 2412 .addReg(TrueReg); 2413 } 2414 2415 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2416 return; 2417 } 2418 2419 if (DstSize == 64 && Pred == SCC_TRUE) { 2420 MachineInstr *Select = 2421 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg) 2422 .addReg(TrueReg) 2423 .addReg(FalseReg); 2424 2425 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2426 return; 2427 } 2428 2429 static const int16_t Sub0_15[] = { 2430 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 2431 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 2432 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, 2433 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 2434 }; 2435 2436 static const int16_t Sub0_15_64[] = { 2437 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, 2438 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7, 2439 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11, 2440 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15, 2441 }; 2442 2443 unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32; 2444 const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass; 2445 const int16_t *SubIndices = Sub0_15; 2446 int NElts = DstSize / 32; 2447 2448 // 64-bit select is only available for SALU. 2449 // TODO: Split 96-bit into 64-bit and 32-bit, not 3x 32-bit. 2450 if (Pred == SCC_TRUE) { 2451 if (NElts % 2) { 2452 SelOp = AMDGPU::S_CSELECT_B32; 2453 EltRC = &AMDGPU::SGPR_32RegClass; 2454 } else { 2455 SelOp = AMDGPU::S_CSELECT_B64; 2456 EltRC = &AMDGPU::SGPR_64RegClass; 2457 SubIndices = Sub0_15_64; 2458 NElts /= 2; 2459 } 2460 } 2461 2462 MachineInstrBuilder MIB = BuildMI( 2463 MBB, I, DL, get(AMDGPU::REG_SEQUENCE), DstReg); 2464 2465 I = MIB->getIterator(); 2466 2467 SmallVector<Register, 8> Regs; 2468 for (int Idx = 0; Idx != NElts; ++Idx) { 2469 Register DstElt = MRI.createVirtualRegister(EltRC); 2470 Regs.push_back(DstElt); 2471 2472 unsigned SubIdx = SubIndices[Idx]; 2473 2474 MachineInstr *Select; 2475 if (SelOp == AMDGPU::V_CNDMASK_B32_e32) { 2476 Select = 2477 BuildMI(MBB, I, DL, get(SelOp), DstElt) 2478 .addReg(FalseReg, 0, SubIdx) 2479 .addReg(TrueReg, 0, SubIdx); 2480 } else { 2481 Select = 2482 BuildMI(MBB, I, DL, get(SelOp), DstElt) 2483 .addReg(TrueReg, 0, SubIdx) 2484 .addReg(FalseReg, 0, SubIdx); 2485 } 2486 2487 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2488 fixImplicitOperands(*Select); 2489 2490 MIB.addReg(DstElt) 2491 .addImm(SubIdx); 2492 } 2493 } 2494 2495 bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) const { 2496 switch (MI.getOpcode()) { 2497 case AMDGPU::V_MOV_B32_e32: 2498 case AMDGPU::V_MOV_B32_e64: 2499 case AMDGPU::V_MOV_B64_PSEUDO: { 2500 // If there are additional implicit register operands, this may be used for 2501 // register indexing so the source register operand isn't simply copied. 2502 unsigned NumOps = MI.getDesc().getNumOperands() + 2503 MI.getDesc().getNumImplicitUses(); 2504 2505 return MI.getNumOperands() == NumOps; 2506 } 2507 case AMDGPU::S_MOV_B32: 2508 case AMDGPU::S_MOV_B64: 2509 case AMDGPU::COPY: 2510 case AMDGPU::V_ACCVGPR_WRITE_B32: 2511 case AMDGPU::V_ACCVGPR_READ_B32: 2512 return true; 2513 default: 2514 return false; 2515 } 2516 } 2517 2518 unsigned SIInstrInfo::getAddressSpaceForPseudoSourceKind( 2519 unsigned Kind) const { 2520 switch(Kind) { 2521 case PseudoSourceValue::Stack: 2522 case PseudoSourceValue::FixedStack: 2523 return AMDGPUAS::PRIVATE_ADDRESS; 2524 case PseudoSourceValue::ConstantPool: 2525 case PseudoSourceValue::GOT: 2526 case PseudoSourceValue::JumpTable: 2527 case PseudoSourceValue::GlobalValueCallEntry: 2528 case PseudoSourceValue::ExternalSymbolCallEntry: 2529 case PseudoSourceValue::TargetCustom: 2530 return AMDGPUAS::CONSTANT_ADDRESS; 2531 } 2532 return AMDGPUAS::FLAT_ADDRESS; 2533 } 2534 2535 static void removeModOperands(MachineInstr &MI) { 2536 unsigned Opc = MI.getOpcode(); 2537 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2538 AMDGPU::OpName::src0_modifiers); 2539 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2540 AMDGPU::OpName::src1_modifiers); 2541 int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2542 AMDGPU::OpName::src2_modifiers); 2543 2544 MI.RemoveOperand(Src2ModIdx); 2545 MI.RemoveOperand(Src1ModIdx); 2546 MI.RemoveOperand(Src0ModIdx); 2547 } 2548 2549 bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, 2550 Register Reg, MachineRegisterInfo *MRI) const { 2551 if (!MRI->hasOneNonDBGUse(Reg)) 2552 return false; 2553 2554 switch (DefMI.getOpcode()) { 2555 default: 2556 return false; 2557 case AMDGPU::S_MOV_B64: 2558 // TODO: We could fold 64-bit immediates, but this get compilicated 2559 // when there are sub-registers. 2560 return false; 2561 2562 case AMDGPU::V_MOV_B32_e32: 2563 case AMDGPU::S_MOV_B32: 2564 case AMDGPU::V_ACCVGPR_WRITE_B32: 2565 break; 2566 } 2567 2568 const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0); 2569 assert(ImmOp); 2570 // FIXME: We could handle FrameIndex values here. 2571 if (!ImmOp->isImm()) 2572 return false; 2573 2574 unsigned Opc = UseMI.getOpcode(); 2575 if (Opc == AMDGPU::COPY) { 2576 Register DstReg = UseMI.getOperand(0).getReg(); 2577 bool Is16Bit = getOpSize(UseMI, 0) == 2; 2578 bool isVGPRCopy = RI.isVGPR(*MRI, DstReg); 2579 unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32; 2580 APInt Imm(32, ImmOp->getImm()); 2581 2582 if (UseMI.getOperand(1).getSubReg() == AMDGPU::hi16) 2583 Imm = Imm.ashr(16); 2584 2585 if (RI.isAGPR(*MRI, DstReg)) { 2586 if (!isInlineConstant(Imm)) 2587 return false; 2588 NewOpc = AMDGPU::V_ACCVGPR_WRITE_B32; 2589 } 2590 2591 if (Is16Bit) { 2592 if (isVGPRCopy) 2593 return false; // Do not clobber vgpr_hi16 2594 2595 if (DstReg.isVirtual() && 2596 UseMI.getOperand(0).getSubReg() != AMDGPU::lo16) 2597 return false; 2598 2599 UseMI.getOperand(0).setSubReg(0); 2600 if (DstReg.isPhysical()) { 2601 DstReg = RI.get32BitRegister(DstReg); 2602 UseMI.getOperand(0).setReg(DstReg); 2603 } 2604 assert(UseMI.getOperand(1).getReg().isVirtual()); 2605 } 2606 2607 UseMI.setDesc(get(NewOpc)); 2608 UseMI.getOperand(1).ChangeToImmediate(Imm.getSExtValue()); 2609 UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent()); 2610 return true; 2611 } 2612 2613 if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 || 2614 Opc == AMDGPU::V_MAD_F16 || Opc == AMDGPU::V_MAC_F16_e64 || 2615 Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64 || 2616 Opc == AMDGPU::V_FMA_F16 || Opc == AMDGPU::V_FMAC_F16_e64) { 2617 // Don't fold if we are using source or output modifiers. The new VOP2 2618 // instructions don't have them. 2619 if (hasAnyModifiersSet(UseMI)) 2620 return false; 2621 2622 // If this is a free constant, there's no reason to do this. 2623 // TODO: We could fold this here instead of letting SIFoldOperands do it 2624 // later. 2625 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0); 2626 2627 // Any src operand can be used for the legality check. 2628 if (isInlineConstant(UseMI, *Src0, *ImmOp)) 2629 return false; 2630 2631 bool IsF32 = Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 || 2632 Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64; 2633 bool IsFMA = Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64 || 2634 Opc == AMDGPU::V_FMA_F16 || Opc == AMDGPU::V_FMAC_F16_e64; 2635 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1); 2636 MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2); 2637 2638 // Multiplied part is the constant: Use v_madmk_{f16, f32}. 2639 // We should only expect these to be on src0 due to canonicalizations. 2640 if (Src0->isReg() && Src0->getReg() == Reg) { 2641 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) 2642 return false; 2643 2644 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg()))) 2645 return false; 2646 2647 unsigned NewOpc = 2648 IsFMA ? (IsF32 ? AMDGPU::V_FMAMK_F32 : AMDGPU::V_FMAMK_F16) 2649 : (IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16); 2650 if (pseudoToMCOpcode(NewOpc) == -1) 2651 return false; 2652 2653 // We need to swap operands 0 and 1 since madmk constant is at operand 1. 2654 2655 const int64_t Imm = ImmOp->getImm(); 2656 2657 // FIXME: This would be a lot easier if we could return a new instruction 2658 // instead of having to modify in place. 2659 2660 // Remove these first since they are at the end. 2661 UseMI.RemoveOperand( 2662 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 2663 UseMI.RemoveOperand( 2664 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 2665 2666 Register Src1Reg = Src1->getReg(); 2667 unsigned Src1SubReg = Src1->getSubReg(); 2668 Src0->setReg(Src1Reg); 2669 Src0->setSubReg(Src1SubReg); 2670 Src0->setIsKill(Src1->isKill()); 2671 2672 if (Opc == AMDGPU::V_MAC_F32_e64 || 2673 Opc == AMDGPU::V_MAC_F16_e64 || 2674 Opc == AMDGPU::V_FMAC_F32_e64 || 2675 Opc == AMDGPU::V_FMAC_F16_e64) 2676 UseMI.untieRegOperand( 2677 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 2678 2679 Src1->ChangeToImmediate(Imm); 2680 2681 removeModOperands(UseMI); 2682 UseMI.setDesc(get(NewOpc)); 2683 2684 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 2685 if (DeleteDef) 2686 DefMI.eraseFromParent(); 2687 2688 return true; 2689 } 2690 2691 // Added part is the constant: Use v_madak_{f16, f32}. 2692 if (Src2->isReg() && Src2->getReg() == Reg) { 2693 // Not allowed to use constant bus for another operand. 2694 // We can however allow an inline immediate as src0. 2695 bool Src0Inlined = false; 2696 if (Src0->isReg()) { 2697 // Try to inline constant if possible. 2698 // If the Def moves immediate and the use is single 2699 // We are saving VGPR here. 2700 MachineInstr *Def = MRI->getUniqueVRegDef(Src0->getReg()); 2701 if (Def && Def->isMoveImmediate() && 2702 isInlineConstant(Def->getOperand(1)) && 2703 MRI->hasOneUse(Src0->getReg())) { 2704 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 2705 Src0Inlined = true; 2706 } else if ((Src0->getReg().isPhysical() && 2707 (ST.getConstantBusLimit(Opc) <= 1 && 2708 RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg())))) || 2709 (Src0->getReg().isVirtual() && 2710 (ST.getConstantBusLimit(Opc) <= 1 && 2711 RI.isSGPRClass(MRI->getRegClass(Src0->getReg()))))) 2712 return false; 2713 // VGPR is okay as Src0 - fallthrough 2714 } 2715 2716 if (Src1->isReg() && !Src0Inlined ) { 2717 // We have one slot for inlinable constant so far - try to fill it 2718 MachineInstr *Def = MRI->getUniqueVRegDef(Src1->getReg()); 2719 if (Def && Def->isMoveImmediate() && 2720 isInlineConstant(Def->getOperand(1)) && 2721 MRI->hasOneUse(Src1->getReg()) && 2722 commuteInstruction(UseMI)) { 2723 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 2724 } else if ((Src1->getReg().isPhysical() && 2725 RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) || 2726 (Src1->getReg().isVirtual() && 2727 RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) 2728 return false; 2729 // VGPR is okay as Src1 - fallthrough 2730 } 2731 2732 unsigned NewOpc = 2733 IsFMA ? (IsF32 ? AMDGPU::V_FMAAK_F32 : AMDGPU::V_FMAAK_F16) 2734 : (IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16); 2735 if (pseudoToMCOpcode(NewOpc) == -1) 2736 return false; 2737 2738 const int64_t Imm = ImmOp->getImm(); 2739 2740 // FIXME: This would be a lot easier if we could return a new instruction 2741 // instead of having to modify in place. 2742 2743 // Remove these first since they are at the end. 2744 UseMI.RemoveOperand( 2745 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 2746 UseMI.RemoveOperand( 2747 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 2748 2749 if (Opc == AMDGPU::V_MAC_F32_e64 || 2750 Opc == AMDGPU::V_MAC_F16_e64 || 2751 Opc == AMDGPU::V_FMAC_F32_e64 || 2752 Opc == AMDGPU::V_FMAC_F16_e64) 2753 UseMI.untieRegOperand( 2754 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 2755 2756 // ChangingToImmediate adds Src2 back to the instruction. 2757 Src2->ChangeToImmediate(Imm); 2758 2759 // These come before src2. 2760 removeModOperands(UseMI); 2761 UseMI.setDesc(get(NewOpc)); 2762 // It might happen that UseMI was commuted 2763 // and we now have SGPR as SRC1. If so 2 inlined 2764 // constant and SGPR are illegal. 2765 legalizeOperands(UseMI); 2766 2767 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 2768 if (DeleteDef) 2769 DefMI.eraseFromParent(); 2770 2771 return true; 2772 } 2773 } 2774 2775 return false; 2776 } 2777 2778 static bool 2779 memOpsHaveSameBaseOperands(ArrayRef<const MachineOperand *> BaseOps1, 2780 ArrayRef<const MachineOperand *> BaseOps2) { 2781 if (BaseOps1.size() != BaseOps2.size()) 2782 return false; 2783 for (size_t I = 0, E = BaseOps1.size(); I < E; ++I) { 2784 if (!BaseOps1[I]->isIdenticalTo(*BaseOps2[I])) 2785 return false; 2786 } 2787 return true; 2788 } 2789 2790 static bool offsetsDoNotOverlap(int WidthA, int OffsetA, 2791 int WidthB, int OffsetB) { 2792 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; 2793 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; 2794 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; 2795 return LowOffset + LowWidth <= HighOffset; 2796 } 2797 2798 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa, 2799 const MachineInstr &MIb) const { 2800 SmallVector<const MachineOperand *, 4> BaseOps0, BaseOps1; 2801 int64_t Offset0, Offset1; 2802 unsigned Dummy0, Dummy1; 2803 bool Offset0IsScalable, Offset1IsScalable; 2804 if (!getMemOperandsWithOffsetWidth(MIa, BaseOps0, Offset0, Offset0IsScalable, 2805 Dummy0, &RI) || 2806 !getMemOperandsWithOffsetWidth(MIb, BaseOps1, Offset1, Offset1IsScalable, 2807 Dummy1, &RI)) 2808 return false; 2809 2810 if (!memOpsHaveSameBaseOperands(BaseOps0, BaseOps1)) 2811 return false; 2812 2813 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) { 2814 // FIXME: Handle ds_read2 / ds_write2. 2815 return false; 2816 } 2817 unsigned Width0 = MIa.memoperands().front()->getSize(); 2818 unsigned Width1 = MIb.memoperands().front()->getSize(); 2819 return offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1); 2820 } 2821 2822 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, 2823 const MachineInstr &MIb) const { 2824 assert(MIa.mayLoadOrStore() && 2825 "MIa must load from or modify a memory location"); 2826 assert(MIb.mayLoadOrStore() && 2827 "MIb must load from or modify a memory location"); 2828 2829 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects()) 2830 return false; 2831 2832 // XXX - Can we relax this between address spaces? 2833 if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef()) 2834 return false; 2835 2836 // TODO: Should we check the address space from the MachineMemOperand? That 2837 // would allow us to distinguish objects we know don't alias based on the 2838 // underlying address space, even if it was lowered to a different one, 2839 // e.g. private accesses lowered to use MUBUF instructions on a scratch 2840 // buffer. 2841 if (isDS(MIa)) { 2842 if (isDS(MIb)) 2843 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2844 2845 return !isFLAT(MIb) || isSegmentSpecificFLAT(MIb); 2846 } 2847 2848 if (isMUBUF(MIa) || isMTBUF(MIa)) { 2849 if (isMUBUF(MIb) || isMTBUF(MIb)) 2850 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2851 2852 return !isFLAT(MIb) && !isSMRD(MIb); 2853 } 2854 2855 if (isSMRD(MIa)) { 2856 if (isSMRD(MIb)) 2857 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2858 2859 return !isFLAT(MIb) && !isMUBUF(MIb) && !isMTBUF(MIb); 2860 } 2861 2862 if (isFLAT(MIa)) { 2863 if (isFLAT(MIb)) 2864 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2865 2866 return false; 2867 } 2868 2869 return false; 2870 } 2871 2872 static int64_t getFoldableImm(const MachineOperand* MO) { 2873 if (!MO->isReg()) 2874 return false; 2875 const MachineFunction *MF = MO->getParent()->getParent()->getParent(); 2876 const MachineRegisterInfo &MRI = MF->getRegInfo(); 2877 auto Def = MRI.getUniqueVRegDef(MO->getReg()); 2878 if (Def && Def->getOpcode() == AMDGPU::V_MOV_B32_e32 && 2879 Def->getOperand(1).isImm()) 2880 return Def->getOperand(1).getImm(); 2881 return AMDGPU::NoRegister; 2882 } 2883 2884 static void updateLiveVariables(LiveVariables *LV, MachineInstr &MI, 2885 MachineInstr &NewMI) { 2886 if (LV) { 2887 unsigned NumOps = MI.getNumOperands(); 2888 for (unsigned I = 1; I < NumOps; ++I) { 2889 MachineOperand &Op = MI.getOperand(I); 2890 if (Op.isReg() && Op.isKill()) 2891 LV->replaceKillInstruction(Op.getReg(), MI, NewMI); 2892 } 2893 } 2894 } 2895 2896 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB, 2897 MachineInstr &MI, 2898 LiveVariables *LV) const { 2899 unsigned Opc = MI.getOpcode(); 2900 bool IsF16 = false; 2901 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e32 || Opc == AMDGPU::V_FMAC_F32_e64 || 2902 Opc == AMDGPU::V_FMAC_F16_e32 || Opc == AMDGPU::V_FMAC_F16_e64; 2903 2904 switch (Opc) { 2905 default: 2906 return nullptr; 2907 case AMDGPU::V_MAC_F16_e64: 2908 case AMDGPU::V_FMAC_F16_e64: 2909 IsF16 = true; 2910 LLVM_FALLTHROUGH; 2911 case AMDGPU::V_MAC_F32_e64: 2912 case AMDGPU::V_FMAC_F32_e64: 2913 break; 2914 case AMDGPU::V_MAC_F16_e32: 2915 case AMDGPU::V_FMAC_F16_e32: 2916 IsF16 = true; 2917 LLVM_FALLTHROUGH; 2918 case AMDGPU::V_MAC_F32_e32: 2919 case AMDGPU::V_FMAC_F32_e32: { 2920 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 2921 AMDGPU::OpName::src0); 2922 const MachineOperand *Src0 = &MI.getOperand(Src0Idx); 2923 if (!Src0->isReg() && !Src0->isImm()) 2924 return nullptr; 2925 2926 if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0)) 2927 return nullptr; 2928 2929 break; 2930 } 2931 } 2932 2933 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 2934 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0); 2935 const MachineOperand *Src0Mods = 2936 getNamedOperand(MI, AMDGPU::OpName::src0_modifiers); 2937 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 2938 const MachineOperand *Src1Mods = 2939 getNamedOperand(MI, AMDGPU::OpName::src1_modifiers); 2940 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 2941 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 2942 const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod); 2943 MachineInstrBuilder MIB; 2944 2945 if (!Src0Mods && !Src1Mods && !Clamp && !Omod && 2946 // If we have an SGPR input, we will violate the constant bus restriction. 2947 (ST.getConstantBusLimit(Opc) > 1 || !Src0->isReg() || 2948 !RI.isSGPRReg(MBB->getParent()->getRegInfo(), Src0->getReg()))) { 2949 if (auto Imm = getFoldableImm(Src2)) { 2950 unsigned NewOpc = 2951 IsFMA ? (IsF16 ? AMDGPU::V_FMAAK_F16 : AMDGPU::V_FMAAK_F32) 2952 : (IsF16 ? AMDGPU::V_MADAK_F16 : AMDGPU::V_MADAK_F32); 2953 if (pseudoToMCOpcode(NewOpc) != -1) { 2954 MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2955 .add(*Dst) 2956 .add(*Src0) 2957 .add(*Src1) 2958 .addImm(Imm); 2959 updateLiveVariables(LV, MI, *MIB); 2960 return MIB; 2961 } 2962 } 2963 unsigned NewOpc = IsFMA 2964 ? (IsF16 ? AMDGPU::V_FMAMK_F16 : AMDGPU::V_FMAMK_F32) 2965 : (IsF16 ? AMDGPU::V_MADMK_F16 : AMDGPU::V_MADMK_F32); 2966 if (auto Imm = getFoldableImm(Src1)) { 2967 if (pseudoToMCOpcode(NewOpc) != -1) { 2968 MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2969 .add(*Dst) 2970 .add(*Src0) 2971 .addImm(Imm) 2972 .add(*Src2); 2973 updateLiveVariables(LV, MI, *MIB); 2974 return MIB; 2975 } 2976 } 2977 if (auto Imm = getFoldableImm(Src0)) { 2978 if (pseudoToMCOpcode(NewOpc) != -1 && 2979 isOperandLegal( 2980 MI, AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::src0), 2981 Src1)) { 2982 MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2983 .add(*Dst) 2984 .add(*Src1) 2985 .addImm(Imm) 2986 .add(*Src2); 2987 updateLiveVariables(LV, MI, *MIB); 2988 return MIB; 2989 } 2990 } 2991 } 2992 2993 unsigned NewOpc = IsFMA ? (IsF16 ? AMDGPU::V_FMA_F16 : AMDGPU::V_FMA_F32) 2994 : (IsF16 ? AMDGPU::V_MAD_F16 : AMDGPU::V_MAD_F32); 2995 if (pseudoToMCOpcode(NewOpc) == -1) 2996 return nullptr; 2997 2998 MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2999 .add(*Dst) 3000 .addImm(Src0Mods ? Src0Mods->getImm() : 0) 3001 .add(*Src0) 3002 .addImm(Src1Mods ? Src1Mods->getImm() : 0) 3003 .add(*Src1) 3004 .addImm(0) // Src mods 3005 .add(*Src2) 3006 .addImm(Clamp ? Clamp->getImm() : 0) 3007 .addImm(Omod ? Omod->getImm() : 0); 3008 updateLiveVariables(LV, MI, *MIB); 3009 return MIB; 3010 } 3011 3012 // It's not generally safe to move VALU instructions across these since it will 3013 // start using the register as a base index rather than directly. 3014 // XXX - Why isn't hasSideEffects sufficient for these? 3015 static bool changesVGPRIndexingMode(const MachineInstr &MI) { 3016 switch (MI.getOpcode()) { 3017 case AMDGPU::S_SET_GPR_IDX_ON: 3018 case AMDGPU::S_SET_GPR_IDX_MODE: 3019 case AMDGPU::S_SET_GPR_IDX_OFF: 3020 return true; 3021 default: 3022 return false; 3023 } 3024 } 3025 3026 bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 3027 const MachineBasicBlock *MBB, 3028 const MachineFunction &MF) const { 3029 // Skipping the check for SP writes in the base implementation. The reason it 3030 // was added was apparently due to compile time concerns. 3031 // 3032 // TODO: Do we really want this barrier? It triggers unnecessary hazard nops 3033 // but is probably avoidable. 3034 3035 // Copied from base implementation. 3036 // Terminators and labels can't be scheduled around. 3037 if (MI.isTerminator() || MI.isPosition()) 3038 return true; 3039 3040 // INLINEASM_BR can jump to another block 3041 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR) 3042 return true; 3043 3044 // Target-independent instructions do not have an implicit-use of EXEC, even 3045 // when they operate on VGPRs. Treating EXEC modifications as scheduling 3046 // boundaries prevents incorrect movements of such instructions. 3047 return MI.modifiesRegister(AMDGPU::EXEC, &RI) || 3048 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 || 3049 MI.getOpcode() == AMDGPU::S_SETREG_B32 || 3050 changesVGPRIndexingMode(MI); 3051 } 3052 3053 bool SIInstrInfo::isAlwaysGDS(uint16_t Opcode) const { 3054 return Opcode == AMDGPU::DS_ORDERED_COUNT || 3055 Opcode == AMDGPU::DS_GWS_INIT || 3056 Opcode == AMDGPU::DS_GWS_SEMA_V || 3057 Opcode == AMDGPU::DS_GWS_SEMA_BR || 3058 Opcode == AMDGPU::DS_GWS_SEMA_P || 3059 Opcode == AMDGPU::DS_GWS_SEMA_RELEASE_ALL || 3060 Opcode == AMDGPU::DS_GWS_BARRIER; 3061 } 3062 3063 bool SIInstrInfo::modifiesModeRegister(const MachineInstr &MI) { 3064 // Skip the full operand and register alias search modifiesRegister 3065 // does. There's only a handful of instructions that touch this, it's only an 3066 // implicit def, and doesn't alias any other registers. 3067 if (const MCPhysReg *ImpDef = MI.getDesc().getImplicitDefs()) { 3068 for (; ImpDef && *ImpDef; ++ImpDef) { 3069 if (*ImpDef == AMDGPU::MODE) 3070 return true; 3071 } 3072 } 3073 3074 return false; 3075 } 3076 3077 bool SIInstrInfo::hasUnwantedEffectsWhenEXECEmpty(const MachineInstr &MI) const { 3078 unsigned Opcode = MI.getOpcode(); 3079 3080 if (MI.mayStore() && isSMRD(MI)) 3081 return true; // scalar store or atomic 3082 3083 // This will terminate the function when other lanes may need to continue. 3084 if (MI.isReturn()) 3085 return true; 3086 3087 // These instructions cause shader I/O that may cause hardware lockups 3088 // when executed with an empty EXEC mask. 3089 // 3090 // Note: exp with VM = DONE = 0 is automatically skipped by hardware when 3091 // EXEC = 0, but checking for that case here seems not worth it 3092 // given the typical code patterns. 3093 if (Opcode == AMDGPU::S_SENDMSG || Opcode == AMDGPU::S_SENDMSGHALT || 3094 Opcode == AMDGPU::EXP || Opcode == AMDGPU::EXP_DONE || 3095 Opcode == AMDGPU::DS_ORDERED_COUNT || Opcode == AMDGPU::S_TRAP || 3096 Opcode == AMDGPU::DS_GWS_INIT || Opcode == AMDGPU::DS_GWS_BARRIER) 3097 return true; 3098 3099 if (MI.isCall() || MI.isInlineAsm()) 3100 return true; // conservative assumption 3101 3102 // A mode change is a scalar operation that influences vector instructions. 3103 if (modifiesModeRegister(MI)) 3104 return true; 3105 3106 // These are like SALU instructions in terms of effects, so it's questionable 3107 // whether we should return true for those. 3108 // 3109 // However, executing them with EXEC = 0 causes them to operate on undefined 3110 // data, which we avoid by returning true here. 3111 if (Opcode == AMDGPU::V_READFIRSTLANE_B32 || 3112 Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32) 3113 return true; 3114 3115 return false; 3116 } 3117 3118 bool SIInstrInfo::mayReadEXEC(const MachineRegisterInfo &MRI, 3119 const MachineInstr &MI) const { 3120 if (MI.isMetaInstruction()) 3121 return false; 3122 3123 // This won't read exec if this is an SGPR->SGPR copy. 3124 if (MI.isCopyLike()) { 3125 if (!RI.isSGPRReg(MRI, MI.getOperand(0).getReg())) 3126 return true; 3127 3128 // Make sure this isn't copying exec as a normal operand 3129 return MI.readsRegister(AMDGPU::EXEC, &RI); 3130 } 3131 3132 // Make a conservative assumption about the callee. 3133 if (MI.isCall()) 3134 return true; 3135 3136 // Be conservative with any unhandled generic opcodes. 3137 if (!isTargetSpecificOpcode(MI.getOpcode())) 3138 return true; 3139 3140 return !isSALU(MI) || MI.readsRegister(AMDGPU::EXEC, &RI); 3141 } 3142 3143 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const { 3144 switch (Imm.getBitWidth()) { 3145 case 1: // This likely will be a condition code mask. 3146 return true; 3147 3148 case 32: 3149 return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(), 3150 ST.hasInv2PiInlineImm()); 3151 case 64: 3152 return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(), 3153 ST.hasInv2PiInlineImm()); 3154 case 16: 3155 return ST.has16BitInsts() && 3156 AMDGPU::isInlinableLiteral16(Imm.getSExtValue(), 3157 ST.hasInv2PiInlineImm()); 3158 default: 3159 llvm_unreachable("invalid bitwidth"); 3160 } 3161 } 3162 3163 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO, 3164 uint8_t OperandType) const { 3165 if (!MO.isImm() || 3166 OperandType < AMDGPU::OPERAND_SRC_FIRST || 3167 OperandType > AMDGPU::OPERAND_SRC_LAST) 3168 return false; 3169 3170 // MachineOperand provides no way to tell the true operand size, since it only 3171 // records a 64-bit value. We need to know the size to determine if a 32-bit 3172 // floating point immediate bit pattern is legal for an integer immediate. It 3173 // would be for any 32-bit integer operand, but would not be for a 64-bit one. 3174 3175 int64_t Imm = MO.getImm(); 3176 switch (OperandType) { 3177 case AMDGPU::OPERAND_REG_IMM_INT32: 3178 case AMDGPU::OPERAND_REG_IMM_FP32: 3179 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 3180 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 3181 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 3182 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: { 3183 int32_t Trunc = static_cast<int32_t>(Imm); 3184 return AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm()); 3185 } 3186 case AMDGPU::OPERAND_REG_IMM_INT64: 3187 case AMDGPU::OPERAND_REG_IMM_FP64: 3188 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 3189 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 3190 return AMDGPU::isInlinableLiteral64(MO.getImm(), 3191 ST.hasInv2PiInlineImm()); 3192 case AMDGPU::OPERAND_REG_IMM_INT16: 3193 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 3194 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 3195 // We would expect inline immediates to not be concerned with an integer/fp 3196 // distinction. However, in the case of 16-bit integer operations, the 3197 // "floating point" values appear to not work. It seems read the low 16-bits 3198 // of 32-bit immediates, which happens to always work for the integer 3199 // values. 3200 // 3201 // See llvm bugzilla 46302. 3202 // 3203 // TODO: Theoretically we could use op-sel to use the high bits of the 3204 // 32-bit FP values. 3205 return AMDGPU::isInlinableIntLiteral(Imm); 3206 case AMDGPU::OPERAND_REG_IMM_V2INT16: 3207 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 3208 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16: 3209 // This suffers the same problem as the scalar 16-bit cases. 3210 return AMDGPU::isInlinableIntLiteralV216(Imm); 3211 case AMDGPU::OPERAND_REG_IMM_FP16: 3212 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 3213 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: { 3214 if (isInt<16>(Imm) || isUInt<16>(Imm)) { 3215 // A few special case instructions have 16-bit operands on subtargets 3216 // where 16-bit instructions are not legal. 3217 // TODO: Do the 32-bit immediates work? We shouldn't really need to handle 3218 // constants in these cases 3219 int16_t Trunc = static_cast<int16_t>(Imm); 3220 return ST.has16BitInsts() && 3221 AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm()); 3222 } 3223 3224 return false; 3225 } 3226 case AMDGPU::OPERAND_REG_IMM_V2FP16: 3227 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 3228 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: { 3229 uint32_t Trunc = static_cast<uint32_t>(Imm); 3230 return AMDGPU::isInlinableLiteralV216(Trunc, ST.hasInv2PiInlineImm()); 3231 } 3232 default: 3233 llvm_unreachable("invalid bitwidth"); 3234 } 3235 } 3236 3237 bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO, 3238 const MCOperandInfo &OpInfo) const { 3239 switch (MO.getType()) { 3240 case MachineOperand::MO_Register: 3241 return false; 3242 case MachineOperand::MO_Immediate: 3243 return !isInlineConstant(MO, OpInfo); 3244 case MachineOperand::MO_FrameIndex: 3245 case MachineOperand::MO_MachineBasicBlock: 3246 case MachineOperand::MO_ExternalSymbol: 3247 case MachineOperand::MO_GlobalAddress: 3248 case MachineOperand::MO_MCSymbol: 3249 return true; 3250 default: 3251 llvm_unreachable("unexpected operand type"); 3252 } 3253 } 3254 3255 static bool compareMachineOp(const MachineOperand &Op0, 3256 const MachineOperand &Op1) { 3257 if (Op0.getType() != Op1.getType()) 3258 return false; 3259 3260 switch (Op0.getType()) { 3261 case MachineOperand::MO_Register: 3262 return Op0.getReg() == Op1.getReg(); 3263 case MachineOperand::MO_Immediate: 3264 return Op0.getImm() == Op1.getImm(); 3265 default: 3266 llvm_unreachable("Didn't expect to be comparing these operand types"); 3267 } 3268 } 3269 3270 bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo, 3271 const MachineOperand &MO) const { 3272 const MCInstrDesc &InstDesc = MI.getDesc(); 3273 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo]; 3274 3275 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()); 3276 3277 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE) 3278 return true; 3279 3280 if (OpInfo.RegClass < 0) 3281 return false; 3282 3283 if (MO.isImm() && isInlineConstant(MO, OpInfo)) { 3284 if (isMAI(MI) && ST.hasMFMAInlineLiteralBug() && 3285 OpNo ==(unsigned)AMDGPU::getNamedOperandIdx(MI.getOpcode(), 3286 AMDGPU::OpName::src2)) 3287 return false; 3288 return RI.opCanUseInlineConstant(OpInfo.OperandType); 3289 } 3290 3291 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType)) 3292 return false; 3293 3294 if (!isVOP3(MI) || !AMDGPU::isSISrcOperand(InstDesc, OpNo)) 3295 return true; 3296 3297 return ST.hasVOP3Literal(); 3298 } 3299 3300 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const { 3301 int Op32 = AMDGPU::getVOPe32(Opcode); 3302 if (Op32 == -1) 3303 return false; 3304 3305 return pseudoToMCOpcode(Op32) != -1; 3306 } 3307 3308 bool SIInstrInfo::hasModifiers(unsigned Opcode) const { 3309 // The src0_modifier operand is present on all instructions 3310 // that have modifiers. 3311 3312 return AMDGPU::getNamedOperandIdx(Opcode, 3313 AMDGPU::OpName::src0_modifiers) != -1; 3314 } 3315 3316 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI, 3317 unsigned OpName) const { 3318 const MachineOperand *Mods = getNamedOperand(MI, OpName); 3319 return Mods && Mods->getImm(); 3320 } 3321 3322 bool SIInstrInfo::hasAnyModifiersSet(const MachineInstr &MI) const { 3323 return hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || 3324 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || 3325 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers) || 3326 hasModifiersSet(MI, AMDGPU::OpName::clamp) || 3327 hasModifiersSet(MI, AMDGPU::OpName::omod); 3328 } 3329 3330 bool SIInstrInfo::canShrink(const MachineInstr &MI, 3331 const MachineRegisterInfo &MRI) const { 3332 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3333 // Can't shrink instruction with three operands. 3334 // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add 3335 // a special case for it. It can only be shrunk if the third operand 3336 // is vcc, and src0_modifiers and src1_modifiers are not set. 3337 // We should handle this the same way we handle vopc, by addding 3338 // a register allocation hint pre-regalloc and then do the shrinking 3339 // post-regalloc. 3340 if (Src2) { 3341 switch (MI.getOpcode()) { 3342 default: return false; 3343 3344 case AMDGPU::V_ADDC_U32_e64: 3345 case AMDGPU::V_SUBB_U32_e64: 3346 case AMDGPU::V_SUBBREV_U32_e64: { 3347 const MachineOperand *Src1 3348 = getNamedOperand(MI, AMDGPU::OpName::src1); 3349 if (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg())) 3350 return false; 3351 // Additional verification is needed for sdst/src2. 3352 return true; 3353 } 3354 case AMDGPU::V_MAC_F32_e64: 3355 case AMDGPU::V_MAC_F16_e64: 3356 case AMDGPU::V_FMAC_F32_e64: 3357 case AMDGPU::V_FMAC_F16_e64: 3358 if (!Src2->isReg() || !RI.isVGPR(MRI, Src2->getReg()) || 3359 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers)) 3360 return false; 3361 break; 3362 3363 case AMDGPU::V_CNDMASK_B32_e64: 3364 break; 3365 } 3366 } 3367 3368 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3369 if (Src1 && (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()) || 3370 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers))) 3371 return false; 3372 3373 // We don't need to check src0, all input types are legal, so just make sure 3374 // src0 isn't using any modifiers. 3375 if (hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers)) 3376 return false; 3377 3378 // Can it be shrunk to a valid 32 bit opcode? 3379 if (!hasVALU32BitEncoding(MI.getOpcode())) 3380 return false; 3381 3382 // Check output modifiers 3383 return !hasModifiersSet(MI, AMDGPU::OpName::omod) && 3384 !hasModifiersSet(MI, AMDGPU::OpName::clamp); 3385 } 3386 3387 // Set VCC operand with all flags from \p Orig, except for setting it as 3388 // implicit. 3389 static void copyFlagsToImplicitVCC(MachineInstr &MI, 3390 const MachineOperand &Orig) { 3391 3392 for (MachineOperand &Use : MI.implicit_operands()) { 3393 if (Use.isUse() && 3394 (Use.getReg() == AMDGPU::VCC || Use.getReg() == AMDGPU::VCC_LO)) { 3395 Use.setIsUndef(Orig.isUndef()); 3396 Use.setIsKill(Orig.isKill()); 3397 return; 3398 } 3399 } 3400 } 3401 3402 MachineInstr *SIInstrInfo::buildShrunkInst(MachineInstr &MI, 3403 unsigned Op32) const { 3404 MachineBasicBlock *MBB = MI.getParent();; 3405 MachineInstrBuilder Inst32 = 3406 BuildMI(*MBB, MI, MI.getDebugLoc(), get(Op32)) 3407 .setMIFlags(MI.getFlags()); 3408 3409 // Add the dst operand if the 32-bit encoding also has an explicit $vdst. 3410 // For VOPC instructions, this is replaced by an implicit def of vcc. 3411 int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst); 3412 if (Op32DstIdx != -1) { 3413 // dst 3414 Inst32.add(MI.getOperand(0)); 3415 } else { 3416 assert(((MI.getOperand(0).getReg() == AMDGPU::VCC) || 3417 (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) && 3418 "Unexpected case"); 3419 } 3420 3421 Inst32.add(*getNamedOperand(MI, AMDGPU::OpName::src0)); 3422 3423 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3424 if (Src1) 3425 Inst32.add(*Src1); 3426 3427 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3428 3429 if (Src2) { 3430 int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2); 3431 if (Op32Src2Idx != -1) { 3432 Inst32.add(*Src2); 3433 } else { 3434 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is 3435 // replaced with an implicit read of vcc or vcc_lo. The implicit read 3436 // of vcc was already added during the initial BuildMI, but we 3437 // 1) may need to change vcc to vcc_lo to preserve the original register 3438 // 2) have to preserve the original flags. 3439 fixImplicitOperands(*Inst32); 3440 copyFlagsToImplicitVCC(*Inst32, *Src2); 3441 } 3442 } 3443 3444 return Inst32; 3445 } 3446 3447 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI, 3448 const MachineOperand &MO, 3449 const MCOperandInfo &OpInfo) const { 3450 // Literal constants use the constant bus. 3451 //if (isLiteralConstantLike(MO, OpInfo)) 3452 // return true; 3453 if (MO.isImm()) 3454 return !isInlineConstant(MO, OpInfo); 3455 3456 if (!MO.isReg()) 3457 return true; // Misc other operands like FrameIndex 3458 3459 if (!MO.isUse()) 3460 return false; 3461 3462 if (MO.getReg().isVirtual()) 3463 return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); 3464 3465 // Null is free 3466 if (MO.getReg() == AMDGPU::SGPR_NULL) 3467 return false; 3468 3469 // SGPRs use the constant bus 3470 if (MO.isImplicit()) { 3471 return MO.getReg() == AMDGPU::M0 || 3472 MO.getReg() == AMDGPU::VCC || 3473 MO.getReg() == AMDGPU::VCC_LO; 3474 } else { 3475 return AMDGPU::SReg_32RegClass.contains(MO.getReg()) || 3476 AMDGPU::SReg_64RegClass.contains(MO.getReg()); 3477 } 3478 } 3479 3480 static Register findImplicitSGPRRead(const MachineInstr &MI) { 3481 for (const MachineOperand &MO : MI.implicit_operands()) { 3482 // We only care about reads. 3483 if (MO.isDef()) 3484 continue; 3485 3486 switch (MO.getReg()) { 3487 case AMDGPU::VCC: 3488 case AMDGPU::VCC_LO: 3489 case AMDGPU::VCC_HI: 3490 case AMDGPU::M0: 3491 case AMDGPU::FLAT_SCR: 3492 return MO.getReg(); 3493 3494 default: 3495 break; 3496 } 3497 } 3498 3499 return AMDGPU::NoRegister; 3500 } 3501 3502 static bool shouldReadExec(const MachineInstr &MI) { 3503 if (SIInstrInfo::isVALU(MI)) { 3504 switch (MI.getOpcode()) { 3505 case AMDGPU::V_READLANE_B32: 3506 case AMDGPU::V_WRITELANE_B32: 3507 return false; 3508 } 3509 3510 return true; 3511 } 3512 3513 if (MI.isPreISelOpcode() || 3514 SIInstrInfo::isGenericOpcode(MI.getOpcode()) || 3515 SIInstrInfo::isSALU(MI) || 3516 SIInstrInfo::isSMRD(MI)) 3517 return false; 3518 3519 return true; 3520 } 3521 3522 static bool isSubRegOf(const SIRegisterInfo &TRI, 3523 const MachineOperand &SuperVec, 3524 const MachineOperand &SubReg) { 3525 if (SubReg.getReg().isPhysical()) 3526 return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg()); 3527 3528 return SubReg.getSubReg() != AMDGPU::NoSubRegister && 3529 SubReg.getReg() == SuperVec.getReg(); 3530 } 3531 3532 bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, 3533 StringRef &ErrInfo) const { 3534 uint16_t Opcode = MI.getOpcode(); 3535 if (SIInstrInfo::isGenericOpcode(MI.getOpcode())) 3536 return true; 3537 3538 const MachineFunction *MF = MI.getParent()->getParent(); 3539 const MachineRegisterInfo &MRI = MF->getRegInfo(); 3540 3541 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); 3542 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1); 3543 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2); 3544 3545 // Make sure the number of operands is correct. 3546 const MCInstrDesc &Desc = get(Opcode); 3547 if (!Desc.isVariadic() && 3548 Desc.getNumOperands() != MI.getNumExplicitOperands()) { 3549 ErrInfo = "Instruction has wrong number of operands."; 3550 return false; 3551 } 3552 3553 if (MI.isInlineAsm()) { 3554 // Verify register classes for inlineasm constraints. 3555 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands(); 3556 I != E; ++I) { 3557 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI); 3558 if (!RC) 3559 continue; 3560 3561 const MachineOperand &Op = MI.getOperand(I); 3562 if (!Op.isReg()) 3563 continue; 3564 3565 Register Reg = Op.getReg(); 3566 if (!Reg.isVirtual() && !RC->contains(Reg)) { 3567 ErrInfo = "inlineasm operand has incorrect register class."; 3568 return false; 3569 } 3570 } 3571 3572 return true; 3573 } 3574 3575 if (isMIMG(MI) && MI.memoperands_empty() && MI.mayLoadOrStore()) { 3576 ErrInfo = "missing memory operand from MIMG instruction."; 3577 return false; 3578 } 3579 3580 // Make sure the register classes are correct. 3581 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) { 3582 if (MI.getOperand(i).isFPImm()) { 3583 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast " 3584 "all fp values to integers."; 3585 return false; 3586 } 3587 3588 int RegClass = Desc.OpInfo[i].RegClass; 3589 3590 switch (Desc.OpInfo[i].OperandType) { 3591 case MCOI::OPERAND_REGISTER: 3592 if (MI.getOperand(i).isImm() || MI.getOperand(i).isGlobal()) { 3593 ErrInfo = "Illegal immediate value for operand."; 3594 return false; 3595 } 3596 break; 3597 case AMDGPU::OPERAND_REG_IMM_INT32: 3598 case AMDGPU::OPERAND_REG_IMM_FP32: 3599 break; 3600 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 3601 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 3602 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 3603 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 3604 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 3605 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 3606 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 3607 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: 3608 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 3609 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: { 3610 const MachineOperand &MO = MI.getOperand(i); 3611 if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) { 3612 ErrInfo = "Illegal immediate value for operand."; 3613 return false; 3614 } 3615 break; 3616 } 3617 case MCOI::OPERAND_IMMEDIATE: 3618 case AMDGPU::OPERAND_KIMM32: 3619 // Check if this operand is an immediate. 3620 // FrameIndex operands will be replaced by immediates, so they are 3621 // allowed. 3622 if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) { 3623 ErrInfo = "Expected immediate, but got non-immediate"; 3624 return false; 3625 } 3626 LLVM_FALLTHROUGH; 3627 default: 3628 continue; 3629 } 3630 3631 if (!MI.getOperand(i).isReg()) 3632 continue; 3633 3634 if (RegClass != -1) { 3635 Register Reg = MI.getOperand(i).getReg(); 3636 if (Reg == AMDGPU::NoRegister || Reg.isVirtual()) 3637 continue; 3638 3639 const TargetRegisterClass *RC = RI.getRegClass(RegClass); 3640 if (!RC->contains(Reg)) { 3641 ErrInfo = "Operand has incorrect register class."; 3642 return false; 3643 } 3644 } 3645 } 3646 3647 // Verify SDWA 3648 if (isSDWA(MI)) { 3649 if (!ST.hasSDWA()) { 3650 ErrInfo = "SDWA is not supported on this target"; 3651 return false; 3652 } 3653 3654 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst); 3655 3656 const int OpIndicies[] = { DstIdx, Src0Idx, Src1Idx, Src2Idx }; 3657 3658 for (int OpIdx: OpIndicies) { 3659 if (OpIdx == -1) 3660 continue; 3661 const MachineOperand &MO = MI.getOperand(OpIdx); 3662 3663 if (!ST.hasSDWAScalar()) { 3664 // Only VGPRS on VI 3665 if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) { 3666 ErrInfo = "Only VGPRs allowed as operands in SDWA instructions on VI"; 3667 return false; 3668 } 3669 } else { 3670 // No immediates on GFX9 3671 if (!MO.isReg()) { 3672 ErrInfo = 3673 "Only reg allowed as operands in SDWA instructions on GFX9+"; 3674 return false; 3675 } 3676 } 3677 } 3678 3679 if (!ST.hasSDWAOmod()) { 3680 // No omod allowed on VI 3681 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 3682 if (OMod != nullptr && 3683 (!OMod->isImm() || OMod->getImm() != 0)) { 3684 ErrInfo = "OMod not allowed in SDWA instructions on VI"; 3685 return false; 3686 } 3687 } 3688 3689 uint16_t BasicOpcode = AMDGPU::getBasicFromSDWAOp(Opcode); 3690 if (isVOPC(BasicOpcode)) { 3691 if (!ST.hasSDWASdst() && DstIdx != -1) { 3692 // Only vcc allowed as dst on VI for VOPC 3693 const MachineOperand &Dst = MI.getOperand(DstIdx); 3694 if (!Dst.isReg() || Dst.getReg() != AMDGPU::VCC) { 3695 ErrInfo = "Only VCC allowed as dst in SDWA instructions on VI"; 3696 return false; 3697 } 3698 } else if (!ST.hasSDWAOutModsVOPC()) { 3699 // No clamp allowed on GFX9 for VOPC 3700 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 3701 if (Clamp && (!Clamp->isImm() || Clamp->getImm() != 0)) { 3702 ErrInfo = "Clamp not allowed in VOPC SDWA instructions on VI"; 3703 return false; 3704 } 3705 3706 // No omod allowed on GFX9 for VOPC 3707 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 3708 if (OMod && (!OMod->isImm() || OMod->getImm() != 0)) { 3709 ErrInfo = "OMod not allowed in VOPC SDWA instructions on VI"; 3710 return false; 3711 } 3712 } 3713 } 3714 3715 const MachineOperand *DstUnused = getNamedOperand(MI, AMDGPU::OpName::dst_unused); 3716 if (DstUnused && DstUnused->isImm() && 3717 DstUnused->getImm() == AMDGPU::SDWA::UNUSED_PRESERVE) { 3718 const MachineOperand &Dst = MI.getOperand(DstIdx); 3719 if (!Dst.isReg() || !Dst.isTied()) { 3720 ErrInfo = "Dst register should have tied register"; 3721 return false; 3722 } 3723 3724 const MachineOperand &TiedMO = 3725 MI.getOperand(MI.findTiedOperandIdx(DstIdx)); 3726 if (!TiedMO.isReg() || !TiedMO.isImplicit() || !TiedMO.isUse()) { 3727 ErrInfo = 3728 "Dst register should be tied to implicit use of preserved register"; 3729 return false; 3730 } else if (TiedMO.getReg().isPhysical() && 3731 Dst.getReg() != TiedMO.getReg()) { 3732 ErrInfo = "Dst register should use same physical register as preserved"; 3733 return false; 3734 } 3735 } 3736 } 3737 3738 // Verify MIMG 3739 if (isMIMG(MI.getOpcode()) && !MI.mayStore()) { 3740 // Ensure that the return type used is large enough for all the options 3741 // being used TFE/LWE require an extra result register. 3742 const MachineOperand *DMask = getNamedOperand(MI, AMDGPU::OpName::dmask); 3743 if (DMask) { 3744 uint64_t DMaskImm = DMask->getImm(); 3745 uint32_t RegCount = 3746 isGather4(MI.getOpcode()) ? 4 : countPopulation(DMaskImm); 3747 const MachineOperand *TFE = getNamedOperand(MI, AMDGPU::OpName::tfe); 3748 const MachineOperand *LWE = getNamedOperand(MI, AMDGPU::OpName::lwe); 3749 const MachineOperand *D16 = getNamedOperand(MI, AMDGPU::OpName::d16); 3750 3751 // Adjust for packed 16 bit values 3752 if (D16 && D16->getImm() && !ST.hasUnpackedD16VMem()) 3753 RegCount >>= 1; 3754 3755 // Adjust if using LWE or TFE 3756 if ((LWE && LWE->getImm()) || (TFE && TFE->getImm())) 3757 RegCount += 1; 3758 3759 const uint32_t DstIdx = 3760 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata); 3761 const MachineOperand &Dst = MI.getOperand(DstIdx); 3762 if (Dst.isReg()) { 3763 const TargetRegisterClass *DstRC = getOpRegClass(MI, DstIdx); 3764 uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32; 3765 if (RegCount > DstSize) { 3766 ErrInfo = "MIMG instruction returns too many registers for dst " 3767 "register class"; 3768 return false; 3769 } 3770 } 3771 } 3772 } 3773 3774 // Verify VOP*. Ignore multiple sgpr operands on writelane. 3775 if (Desc.getOpcode() != AMDGPU::V_WRITELANE_B32 3776 && (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI) || isSDWA(MI))) { 3777 // Only look at the true operands. Only a real operand can use the constant 3778 // bus, and we don't want to check pseudo-operands like the source modifier 3779 // flags. 3780 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx }; 3781 3782 unsigned ConstantBusCount = 0; 3783 unsigned LiteralCount = 0; 3784 3785 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) 3786 ++ConstantBusCount; 3787 3788 SmallVector<Register, 2> SGPRsUsed; 3789 Register SGPRUsed; 3790 3791 for (int OpIdx : OpIndices) { 3792 if (OpIdx == -1) 3793 break; 3794 const MachineOperand &MO = MI.getOperand(OpIdx); 3795 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 3796 if (MO.isReg()) { 3797 SGPRUsed = MO.getReg(); 3798 if (llvm::all_of(SGPRsUsed, [SGPRUsed](unsigned SGPR) { 3799 return SGPRUsed != SGPR; 3800 })) { 3801 ++ConstantBusCount; 3802 SGPRsUsed.push_back(SGPRUsed); 3803 } 3804 } else { 3805 ++ConstantBusCount; 3806 ++LiteralCount; 3807 } 3808 } 3809 } 3810 3811 SGPRUsed = findImplicitSGPRRead(MI); 3812 if (SGPRUsed != AMDGPU::NoRegister) { 3813 // Implicit uses may safely overlap true overands 3814 if (llvm::all_of(SGPRsUsed, [this, SGPRUsed](unsigned SGPR) { 3815 return !RI.regsOverlap(SGPRUsed, SGPR); 3816 })) { 3817 ++ConstantBusCount; 3818 SGPRsUsed.push_back(SGPRUsed); 3819 } 3820 } 3821 3822 // v_writelane_b32 is an exception from constant bus restriction: 3823 // vsrc0 can be sgpr, const or m0 and lane select sgpr, m0 or inline-const 3824 if (ConstantBusCount > ST.getConstantBusLimit(Opcode) && 3825 Opcode != AMDGPU::V_WRITELANE_B32) { 3826 ErrInfo = "VOP* instruction violates constant bus restriction"; 3827 return false; 3828 } 3829 3830 if (isVOP3(MI) && LiteralCount) { 3831 if (!ST.hasVOP3Literal()) { 3832 ErrInfo = "VOP3 instruction uses literal"; 3833 return false; 3834 } 3835 if (LiteralCount > 1) { 3836 ErrInfo = "VOP3 instruction uses more than one literal"; 3837 return false; 3838 } 3839 } 3840 } 3841 3842 // Special case for writelane - this can break the multiple constant bus rule, 3843 // but still can't use more than one SGPR register 3844 if (Desc.getOpcode() == AMDGPU::V_WRITELANE_B32) { 3845 unsigned SGPRCount = 0; 3846 Register SGPRUsed = AMDGPU::NoRegister; 3847 3848 for (int OpIdx : {Src0Idx, Src1Idx, Src2Idx}) { 3849 if (OpIdx == -1) 3850 break; 3851 3852 const MachineOperand &MO = MI.getOperand(OpIdx); 3853 3854 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 3855 if (MO.isReg() && MO.getReg() != AMDGPU::M0) { 3856 if (MO.getReg() != SGPRUsed) 3857 ++SGPRCount; 3858 SGPRUsed = MO.getReg(); 3859 } 3860 } 3861 if (SGPRCount > ST.getConstantBusLimit(Opcode)) { 3862 ErrInfo = "WRITELANE instruction violates constant bus restriction"; 3863 return false; 3864 } 3865 } 3866 } 3867 3868 // Verify misc. restrictions on specific instructions. 3869 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 || 3870 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) { 3871 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3872 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 3873 const MachineOperand &Src2 = MI.getOperand(Src2Idx); 3874 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) { 3875 if (!compareMachineOp(Src0, Src1) && 3876 !compareMachineOp(Src0, Src2)) { 3877 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2"; 3878 return false; 3879 } 3880 } 3881 if ((getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm() & 3882 SISrcMods::ABS) || 3883 (getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm() & 3884 SISrcMods::ABS) || 3885 (getNamedOperand(MI, AMDGPU::OpName::src2_modifiers)->getImm() & 3886 SISrcMods::ABS)) { 3887 ErrInfo = "ABS not allowed in VOP3B instructions"; 3888 return false; 3889 } 3890 } 3891 3892 if (isSOP2(MI) || isSOPC(MI)) { 3893 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3894 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 3895 unsigned Immediates = 0; 3896 3897 if (!Src0.isReg() && 3898 !isInlineConstant(Src0, Desc.OpInfo[Src0Idx].OperandType)) 3899 Immediates++; 3900 if (!Src1.isReg() && 3901 !isInlineConstant(Src1, Desc.OpInfo[Src1Idx].OperandType)) 3902 Immediates++; 3903 3904 if (Immediates > 1) { 3905 ErrInfo = "SOP2/SOPC instruction requires too many immediate constants"; 3906 return false; 3907 } 3908 } 3909 3910 if (isSOPK(MI)) { 3911 auto Op = getNamedOperand(MI, AMDGPU::OpName::simm16); 3912 if (Desc.isBranch()) { 3913 if (!Op->isMBB()) { 3914 ErrInfo = "invalid branch target for SOPK instruction"; 3915 return false; 3916 } 3917 } else { 3918 uint64_t Imm = Op->getImm(); 3919 if (sopkIsZext(MI)) { 3920 if (!isUInt<16>(Imm)) { 3921 ErrInfo = "invalid immediate for SOPK instruction"; 3922 return false; 3923 } 3924 } else { 3925 if (!isInt<16>(Imm)) { 3926 ErrInfo = "invalid immediate for SOPK instruction"; 3927 return false; 3928 } 3929 } 3930 } 3931 } 3932 3933 if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 || 3934 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 || 3935 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 3936 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) { 3937 const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 3938 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64; 3939 3940 const unsigned StaticNumOps = Desc.getNumOperands() + 3941 Desc.getNumImplicitUses(); 3942 const unsigned NumImplicitOps = IsDst ? 2 : 1; 3943 3944 // Allow additional implicit operands. This allows a fixup done by the post 3945 // RA scheduler where the main implicit operand is killed and implicit-defs 3946 // are added for sub-registers that remain live after this instruction. 3947 if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) { 3948 ErrInfo = "missing implicit register operands"; 3949 return false; 3950 } 3951 3952 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 3953 if (IsDst) { 3954 if (!Dst->isUse()) { 3955 ErrInfo = "v_movreld_b32 vdst should be a use operand"; 3956 return false; 3957 } 3958 3959 unsigned UseOpIdx; 3960 if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) || 3961 UseOpIdx != StaticNumOps + 1) { 3962 ErrInfo = "movrel implicit operands should be tied"; 3963 return false; 3964 } 3965 } 3966 3967 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3968 const MachineOperand &ImpUse 3969 = MI.getOperand(StaticNumOps + NumImplicitOps - 1); 3970 if (!ImpUse.isReg() || !ImpUse.isUse() || 3971 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) { 3972 ErrInfo = "src0 should be subreg of implicit vector use"; 3973 return false; 3974 } 3975 } 3976 3977 // Make sure we aren't losing exec uses in the td files. This mostly requires 3978 // being careful when using let Uses to try to add other use registers. 3979 if (shouldReadExec(MI)) { 3980 if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) { 3981 ErrInfo = "VALU instruction does not implicitly read exec mask"; 3982 return false; 3983 } 3984 } 3985 3986 if (isSMRD(MI)) { 3987 if (MI.mayStore()) { 3988 // The register offset form of scalar stores may only use m0 as the 3989 // soffset register. 3990 const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff); 3991 if (Soff && Soff->getReg() != AMDGPU::M0) { 3992 ErrInfo = "scalar stores must use m0 as offset register"; 3993 return false; 3994 } 3995 } 3996 } 3997 3998 if (isFLAT(MI) && !ST.hasFlatInstOffsets()) { 3999 const MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 4000 if (Offset->getImm() != 0) { 4001 ErrInfo = "subtarget does not support offsets in flat instructions"; 4002 return false; 4003 } 4004 } 4005 4006 if (isMIMG(MI)) { 4007 const MachineOperand *DimOp = getNamedOperand(MI, AMDGPU::OpName::dim); 4008 if (DimOp) { 4009 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opcode, 4010 AMDGPU::OpName::vaddr0); 4011 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc); 4012 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opcode); 4013 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = 4014 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode); 4015 const AMDGPU::MIMGDimInfo *Dim = 4016 AMDGPU::getMIMGDimInfoByEncoding(DimOp->getImm()); 4017 4018 if (!Dim) { 4019 ErrInfo = "dim is out of range"; 4020 return false; 4021 } 4022 4023 bool IsA16 = false; 4024 if (ST.hasR128A16()) { 4025 const MachineOperand *R128A16 = getNamedOperand(MI, AMDGPU::OpName::r128); 4026 IsA16 = R128A16->getImm() != 0; 4027 } else if (ST.hasGFX10A16()) { 4028 const MachineOperand *A16 = getNamedOperand(MI, AMDGPU::OpName::a16); 4029 IsA16 = A16->getImm() != 0; 4030 } 4031 4032 bool PackDerivatives = IsA16 || BaseOpcode->G16; 4033 bool IsNSA = SRsrcIdx - VAddr0Idx > 1; 4034 4035 unsigned AddrWords = BaseOpcode->NumExtraArgs; 4036 unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) + 4037 (BaseOpcode->LodOrClampOrMip ? 1 : 0); 4038 if (IsA16) 4039 AddrWords += (AddrComponents + 1) / 2; 4040 else 4041 AddrWords += AddrComponents; 4042 4043 if (BaseOpcode->Gradients) { 4044 if (PackDerivatives) 4045 // There are two gradients per coordinate, we pack them separately. 4046 // For the 3d case, we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv) 4047 AddrWords += (Dim->NumGradients / 2 + 1) / 2 * 2; 4048 else 4049 AddrWords += Dim->NumGradients; 4050 } 4051 4052 unsigned VAddrWords; 4053 if (IsNSA) { 4054 VAddrWords = SRsrcIdx - VAddr0Idx; 4055 } else { 4056 const TargetRegisterClass *RC = getOpRegClass(MI, VAddr0Idx); 4057 VAddrWords = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC) / 32; 4058 if (AddrWords > 8) 4059 AddrWords = 16; 4060 else if (AddrWords > 4) 4061 AddrWords = 8; 4062 else if (AddrWords == 4) 4063 AddrWords = 4; 4064 else if (AddrWords == 3) 4065 AddrWords = 3; 4066 } 4067 4068 if (VAddrWords != AddrWords) { 4069 LLVM_DEBUG(dbgs() << "bad vaddr size, expected " << AddrWords 4070 << " but got " << VAddrWords << "\n"); 4071 ErrInfo = "bad vaddr size"; 4072 return false; 4073 } 4074 } 4075 } 4076 4077 const MachineOperand *DppCt = getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl); 4078 if (DppCt) { 4079 using namespace AMDGPU::DPP; 4080 4081 unsigned DC = DppCt->getImm(); 4082 if (DC == DppCtrl::DPP_UNUSED1 || DC == DppCtrl::DPP_UNUSED2 || 4083 DC == DppCtrl::DPP_UNUSED3 || DC > DppCtrl::DPP_LAST || 4084 (DC >= DppCtrl::DPP_UNUSED4_FIRST && DC <= DppCtrl::DPP_UNUSED4_LAST) || 4085 (DC >= DppCtrl::DPP_UNUSED5_FIRST && DC <= DppCtrl::DPP_UNUSED5_LAST) || 4086 (DC >= DppCtrl::DPP_UNUSED6_FIRST && DC <= DppCtrl::DPP_UNUSED6_LAST) || 4087 (DC >= DppCtrl::DPP_UNUSED7_FIRST && DC <= DppCtrl::DPP_UNUSED7_LAST) || 4088 (DC >= DppCtrl::DPP_UNUSED8_FIRST && DC <= DppCtrl::DPP_UNUSED8_LAST)) { 4089 ErrInfo = "Invalid dpp_ctrl value"; 4090 return false; 4091 } 4092 if (DC >= DppCtrl::WAVE_SHL1 && DC <= DppCtrl::WAVE_ROR1 && 4093 ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 4094 ErrInfo = "Invalid dpp_ctrl value: " 4095 "wavefront shifts are not supported on GFX10+"; 4096 return false; 4097 } 4098 if (DC >= DppCtrl::BCAST15 && DC <= DppCtrl::BCAST31 && 4099 ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 4100 ErrInfo = "Invalid dpp_ctrl value: " 4101 "broadcasts are not supported on GFX10+"; 4102 return false; 4103 } 4104 if (DC >= DppCtrl::ROW_SHARE_FIRST && DC <= DppCtrl::ROW_XMASK_LAST && 4105 ST.getGeneration() < AMDGPUSubtarget::GFX10) { 4106 ErrInfo = "Invalid dpp_ctrl value: " 4107 "row_share and row_xmask are not supported before GFX10"; 4108 return false; 4109 } 4110 } 4111 4112 return true; 4113 } 4114 4115 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) const { 4116 switch (MI.getOpcode()) { 4117 default: return AMDGPU::INSTRUCTION_LIST_END; 4118 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE; 4119 case AMDGPU::COPY: return AMDGPU::COPY; 4120 case AMDGPU::PHI: return AMDGPU::PHI; 4121 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG; 4122 case AMDGPU::WQM: return AMDGPU::WQM; 4123 case AMDGPU::SOFT_WQM: return AMDGPU::SOFT_WQM; 4124 case AMDGPU::WWM: return AMDGPU::WWM; 4125 case AMDGPU::S_MOV_B32: { 4126 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 4127 return MI.getOperand(1).isReg() || 4128 RI.isAGPR(MRI, MI.getOperand(0).getReg()) ? 4129 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32; 4130 } 4131 case AMDGPU::S_ADD_I32: 4132 return ST.hasAddNoCarry() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_CO_U32_e32; 4133 case AMDGPU::S_ADDC_U32: 4134 return AMDGPU::V_ADDC_U32_e32; 4135 case AMDGPU::S_SUB_I32: 4136 return ST.hasAddNoCarry() ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_CO_U32_e32; 4137 // FIXME: These are not consistently handled, and selected when the carry is 4138 // used. 4139 case AMDGPU::S_ADD_U32: 4140 return AMDGPU::V_ADD_CO_U32_e32; 4141 case AMDGPU::S_SUB_U32: 4142 return AMDGPU::V_SUB_CO_U32_e32; 4143 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32; 4144 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_U32; 4145 case AMDGPU::S_MUL_HI_U32: return AMDGPU::V_MUL_HI_U32; 4146 case AMDGPU::S_MUL_HI_I32: return AMDGPU::V_MUL_HI_I32; 4147 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64; 4148 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64; 4149 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64; 4150 case AMDGPU::S_XNOR_B32: 4151 return ST.hasDLInsts() ? AMDGPU::V_XNOR_B32_e64 : AMDGPU::INSTRUCTION_LIST_END; 4152 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64; 4153 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64; 4154 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64; 4155 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64; 4156 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32; 4157 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64; 4158 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32; 4159 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64; 4160 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32; 4161 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64; 4162 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32; 4163 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32; 4164 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32; 4165 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32; 4166 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64; 4167 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32; 4168 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32; 4169 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32; 4170 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32; 4171 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32; 4172 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32; 4173 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32; 4174 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32; 4175 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32; 4176 case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e32; 4177 case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e32; 4178 case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e32; 4179 case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e32; 4180 case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e32; 4181 case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e32; 4182 case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e32; 4183 case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e32; 4184 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64; 4185 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32; 4186 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32; 4187 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64; 4188 case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ; 4189 case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ; 4190 } 4191 llvm_unreachable( 4192 "Unexpected scalar opcode without corresponding vector one!"); 4193 } 4194 4195 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI, 4196 unsigned OpNo) const { 4197 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 4198 const MCInstrDesc &Desc = get(MI.getOpcode()); 4199 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() || 4200 Desc.OpInfo[OpNo].RegClass == -1) { 4201 Register Reg = MI.getOperand(OpNo).getReg(); 4202 4203 if (Reg.isVirtual()) 4204 return MRI.getRegClass(Reg); 4205 return RI.getPhysRegClass(Reg); 4206 } 4207 4208 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 4209 return RI.getRegClass(RCID); 4210 } 4211 4212 void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const { 4213 MachineBasicBlock::iterator I = MI; 4214 MachineBasicBlock *MBB = MI.getParent(); 4215 MachineOperand &MO = MI.getOperand(OpIdx); 4216 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 4217 unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass; 4218 const TargetRegisterClass *RC = RI.getRegClass(RCID); 4219 unsigned Size = RI.getRegSizeInBits(*RC); 4220 unsigned Opcode = (Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO : AMDGPU::V_MOV_B32_e32; 4221 if (MO.isReg()) 4222 Opcode = AMDGPU::COPY; 4223 else if (RI.isSGPRClass(RC)) 4224 Opcode = (Size == 64) ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; 4225 4226 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); 4227 if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC)) 4228 VRC = &AMDGPU::VReg_64RegClass; 4229 else 4230 VRC = &AMDGPU::VGPR_32RegClass; 4231 4232 Register Reg = MRI.createVirtualRegister(VRC); 4233 DebugLoc DL = MBB->findDebugLoc(I); 4234 BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO); 4235 MO.ChangeToRegister(Reg, false); 4236 } 4237 4238 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI, 4239 MachineRegisterInfo &MRI, 4240 MachineOperand &SuperReg, 4241 const TargetRegisterClass *SuperRC, 4242 unsigned SubIdx, 4243 const TargetRegisterClass *SubRC) 4244 const { 4245 MachineBasicBlock *MBB = MI->getParent(); 4246 DebugLoc DL = MI->getDebugLoc(); 4247 Register SubReg = MRI.createVirtualRegister(SubRC); 4248 4249 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) { 4250 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 4251 .addReg(SuperReg.getReg(), 0, SubIdx); 4252 return SubReg; 4253 } 4254 4255 // Just in case the super register is itself a sub-register, copy it to a new 4256 // value so we don't need to worry about merging its subreg index with the 4257 // SubIdx passed to this function. The register coalescer should be able to 4258 // eliminate this extra copy. 4259 Register NewSuperReg = MRI.createVirtualRegister(SuperRC); 4260 4261 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg) 4262 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg()); 4263 4264 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 4265 .addReg(NewSuperReg, 0, SubIdx); 4266 4267 return SubReg; 4268 } 4269 4270 MachineOperand SIInstrInfo::buildExtractSubRegOrImm( 4271 MachineBasicBlock::iterator MII, 4272 MachineRegisterInfo &MRI, 4273 MachineOperand &Op, 4274 const TargetRegisterClass *SuperRC, 4275 unsigned SubIdx, 4276 const TargetRegisterClass *SubRC) const { 4277 if (Op.isImm()) { 4278 if (SubIdx == AMDGPU::sub0) 4279 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm())); 4280 if (SubIdx == AMDGPU::sub1) 4281 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32)); 4282 4283 llvm_unreachable("Unhandled register index for immediate"); 4284 } 4285 4286 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC, 4287 SubIdx, SubRC); 4288 return MachineOperand::CreateReg(SubReg, false); 4289 } 4290 4291 // Change the order of operands from (0, 1, 2) to (0, 2, 1) 4292 void SIInstrInfo::swapOperands(MachineInstr &Inst) const { 4293 assert(Inst.getNumExplicitOperands() == 3); 4294 MachineOperand Op1 = Inst.getOperand(1); 4295 Inst.RemoveOperand(1); 4296 Inst.addOperand(Op1); 4297 } 4298 4299 bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI, 4300 const MCOperandInfo &OpInfo, 4301 const MachineOperand &MO) const { 4302 if (!MO.isReg()) 4303 return false; 4304 4305 Register Reg = MO.getReg(); 4306 4307 const TargetRegisterClass *DRC = RI.getRegClass(OpInfo.RegClass); 4308 if (Reg.isPhysical()) 4309 return DRC->contains(Reg); 4310 4311 const TargetRegisterClass *RC = MRI.getRegClass(Reg); 4312 4313 if (MO.getSubReg()) { 4314 const MachineFunction *MF = MO.getParent()->getParent()->getParent(); 4315 const TargetRegisterClass *SuperRC = RI.getLargestLegalSuperClass(RC, *MF); 4316 if (!SuperRC) 4317 return false; 4318 4319 DRC = RI.getMatchingSuperRegClass(SuperRC, DRC, MO.getSubReg()); 4320 if (!DRC) 4321 return false; 4322 } 4323 return RC->hasSuperClassEq(DRC); 4324 } 4325 4326 bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI, 4327 const MCOperandInfo &OpInfo, 4328 const MachineOperand &MO) const { 4329 if (MO.isReg()) 4330 return isLegalRegOperand(MRI, OpInfo, MO); 4331 4332 // Handle non-register types that are treated like immediates. 4333 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()); 4334 return true; 4335 } 4336 4337 bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx, 4338 const MachineOperand *MO) const { 4339 const MachineFunction &MF = *MI.getParent()->getParent(); 4340 const MachineRegisterInfo &MRI = MF.getRegInfo(); 4341 const MCInstrDesc &InstDesc = MI.getDesc(); 4342 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; 4343 const TargetRegisterClass *DefinedRC = 4344 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; 4345 if (!MO) 4346 MO = &MI.getOperand(OpIdx); 4347 4348 int ConstantBusLimit = ST.getConstantBusLimit(MI.getOpcode()); 4349 int VOP3LiteralLimit = ST.hasVOP3Literal() ? 1 : 0; 4350 if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) { 4351 if (isVOP3(MI) && isLiteralConstantLike(*MO, OpInfo) && !VOP3LiteralLimit--) 4352 return false; 4353 4354 SmallDenseSet<RegSubRegPair> SGPRsUsed; 4355 if (MO->isReg()) 4356 SGPRsUsed.insert(RegSubRegPair(MO->getReg(), MO->getSubReg())); 4357 4358 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 4359 if (i == OpIdx) 4360 continue; 4361 const MachineOperand &Op = MI.getOperand(i); 4362 if (Op.isReg()) { 4363 RegSubRegPair SGPR(Op.getReg(), Op.getSubReg()); 4364 if (!SGPRsUsed.count(SGPR) && 4365 usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) { 4366 if (--ConstantBusLimit <= 0) 4367 return false; 4368 SGPRsUsed.insert(SGPR); 4369 } 4370 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) { 4371 if (--ConstantBusLimit <= 0) 4372 return false; 4373 } else if (isVOP3(MI) && AMDGPU::isSISrcOperand(InstDesc, i) && 4374 isLiteralConstantLike(Op, InstDesc.OpInfo[i])) { 4375 if (!VOP3LiteralLimit--) 4376 return false; 4377 if (--ConstantBusLimit <= 0) 4378 return false; 4379 } 4380 } 4381 } 4382 4383 if (MO->isReg()) { 4384 assert(DefinedRC); 4385 return isLegalRegOperand(MRI, OpInfo, *MO); 4386 } 4387 4388 // Handle non-register types that are treated like immediates. 4389 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI() || MO->isGlobal()); 4390 4391 if (!DefinedRC) { 4392 // This operand expects an immediate. 4393 return true; 4394 } 4395 4396 return isImmOperandLegal(MI, OpIdx, *MO); 4397 } 4398 4399 void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI, 4400 MachineInstr &MI) const { 4401 unsigned Opc = MI.getOpcode(); 4402 const MCInstrDesc &InstrDesc = get(Opc); 4403 4404 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 4405 MachineOperand &Src0 = MI.getOperand(Src0Idx); 4406 4407 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 4408 MachineOperand &Src1 = MI.getOperand(Src1Idx); 4409 4410 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32 4411 // we need to only have one constant bus use before GFX10. 4412 bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister; 4413 if (HasImplicitSGPR && ST.getConstantBusLimit(Opc) <= 1 && 4414 Src0.isReg() && (RI.isSGPRReg(MRI, Src0.getReg()) || 4415 isLiteralConstantLike(Src0, InstrDesc.OpInfo[Src0Idx]))) 4416 legalizeOpWithMove(MI, Src0Idx); 4417 4418 // Special case: V_WRITELANE_B32 accepts only immediate or SGPR operands for 4419 // both the value to write (src0) and lane select (src1). Fix up non-SGPR 4420 // src0/src1 with V_READFIRSTLANE. 4421 if (Opc == AMDGPU::V_WRITELANE_B32) { 4422 const DebugLoc &DL = MI.getDebugLoc(); 4423 if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) { 4424 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4425 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4426 .add(Src0); 4427 Src0.ChangeToRegister(Reg, false); 4428 } 4429 if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) { 4430 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4431 const DebugLoc &DL = MI.getDebugLoc(); 4432 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4433 .add(Src1); 4434 Src1.ChangeToRegister(Reg, false); 4435 } 4436 return; 4437 } 4438 4439 // No VOP2 instructions support AGPRs. 4440 if (Src0.isReg() && RI.isAGPR(MRI, Src0.getReg())) 4441 legalizeOpWithMove(MI, Src0Idx); 4442 4443 if (Src1.isReg() && RI.isAGPR(MRI, Src1.getReg())) 4444 legalizeOpWithMove(MI, Src1Idx); 4445 4446 // VOP2 src0 instructions support all operand types, so we don't need to check 4447 // their legality. If src1 is already legal, we don't need to do anything. 4448 if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1)) 4449 return; 4450 4451 // Special case: V_READLANE_B32 accepts only immediate or SGPR operands for 4452 // lane select. Fix up using V_READFIRSTLANE, since we assume that the lane 4453 // select is uniform. 4454 if (Opc == AMDGPU::V_READLANE_B32 && Src1.isReg() && 4455 RI.isVGPR(MRI, Src1.getReg())) { 4456 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4457 const DebugLoc &DL = MI.getDebugLoc(); 4458 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4459 .add(Src1); 4460 Src1.ChangeToRegister(Reg, false); 4461 return; 4462 } 4463 4464 // We do not use commuteInstruction here because it is too aggressive and will 4465 // commute if it is possible. We only want to commute here if it improves 4466 // legality. This can be called a fairly large number of times so don't waste 4467 // compile time pointlessly swapping and checking legality again. 4468 if (HasImplicitSGPR || !MI.isCommutable()) { 4469 legalizeOpWithMove(MI, Src1Idx); 4470 return; 4471 } 4472 4473 // If src0 can be used as src1, commuting will make the operands legal. 4474 // Otherwise we have to give up and insert a move. 4475 // 4476 // TODO: Other immediate-like operand kinds could be commuted if there was a 4477 // MachineOperand::ChangeTo* for them. 4478 if ((!Src1.isImm() && !Src1.isReg()) || 4479 !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) { 4480 legalizeOpWithMove(MI, Src1Idx); 4481 return; 4482 } 4483 4484 int CommutedOpc = commuteOpcode(MI); 4485 if (CommutedOpc == -1) { 4486 legalizeOpWithMove(MI, Src1Idx); 4487 return; 4488 } 4489 4490 MI.setDesc(get(CommutedOpc)); 4491 4492 Register Src0Reg = Src0.getReg(); 4493 unsigned Src0SubReg = Src0.getSubReg(); 4494 bool Src0Kill = Src0.isKill(); 4495 4496 if (Src1.isImm()) 4497 Src0.ChangeToImmediate(Src1.getImm()); 4498 else if (Src1.isReg()) { 4499 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill()); 4500 Src0.setSubReg(Src1.getSubReg()); 4501 } else 4502 llvm_unreachable("Should only have register or immediate operands"); 4503 4504 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill); 4505 Src1.setSubReg(Src0SubReg); 4506 fixImplicitOperands(MI); 4507 } 4508 4509 // Legalize VOP3 operands. All operand types are supported for any operand 4510 // but only one literal constant and only starting from GFX10. 4511 void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI, 4512 MachineInstr &MI) const { 4513 unsigned Opc = MI.getOpcode(); 4514 4515 int VOP3Idx[3] = { 4516 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), 4517 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1), 4518 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2) 4519 }; 4520 4521 if (Opc == AMDGPU::V_PERMLANE16_B32 || 4522 Opc == AMDGPU::V_PERMLANEX16_B32) { 4523 // src1 and src2 must be scalar 4524 MachineOperand &Src1 = MI.getOperand(VOP3Idx[1]); 4525 MachineOperand &Src2 = MI.getOperand(VOP3Idx[2]); 4526 const DebugLoc &DL = MI.getDebugLoc(); 4527 if (Src1.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) { 4528 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4529 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4530 .add(Src1); 4531 Src1.ChangeToRegister(Reg, false); 4532 } 4533 if (Src2.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src2.getReg()))) { 4534 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4535 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4536 .add(Src2); 4537 Src2.ChangeToRegister(Reg, false); 4538 } 4539 } 4540 4541 // Find the one SGPR operand we are allowed to use. 4542 int ConstantBusLimit = ST.getConstantBusLimit(Opc); 4543 int LiteralLimit = ST.hasVOP3Literal() ? 1 : 0; 4544 SmallDenseSet<unsigned> SGPRsUsed; 4545 Register SGPRReg = findUsedSGPR(MI, VOP3Idx); 4546 if (SGPRReg != AMDGPU::NoRegister) { 4547 SGPRsUsed.insert(SGPRReg); 4548 --ConstantBusLimit; 4549 } 4550 4551 for (unsigned i = 0; i < 3; ++i) { 4552 int Idx = VOP3Idx[i]; 4553 if (Idx == -1) 4554 break; 4555 MachineOperand &MO = MI.getOperand(Idx); 4556 4557 if (!MO.isReg()) { 4558 if (!isLiteralConstantLike(MO, get(Opc).OpInfo[Idx])) 4559 continue; 4560 4561 if (LiteralLimit > 0 && ConstantBusLimit > 0) { 4562 --LiteralLimit; 4563 --ConstantBusLimit; 4564 continue; 4565 } 4566 4567 --LiteralLimit; 4568 --ConstantBusLimit; 4569 legalizeOpWithMove(MI, Idx); 4570 continue; 4571 } 4572 4573 if (RI.hasAGPRs(MRI.getRegClass(MO.getReg())) && 4574 !isOperandLegal(MI, Idx, &MO)) { 4575 legalizeOpWithMove(MI, Idx); 4576 continue; 4577 } 4578 4579 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg()))) 4580 continue; // VGPRs are legal 4581 4582 // We can use one SGPR in each VOP3 instruction prior to GFX10 4583 // and two starting from GFX10. 4584 if (SGPRsUsed.count(MO.getReg())) 4585 continue; 4586 if (ConstantBusLimit > 0) { 4587 SGPRsUsed.insert(MO.getReg()); 4588 --ConstantBusLimit; 4589 continue; 4590 } 4591 4592 // If we make it this far, then the operand is not legal and we must 4593 // legalize it. 4594 legalizeOpWithMove(MI, Idx); 4595 } 4596 } 4597 4598 Register SIInstrInfo::readlaneVGPRToSGPR(Register SrcReg, MachineInstr &UseMI, 4599 MachineRegisterInfo &MRI) const { 4600 const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg); 4601 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC); 4602 Register DstReg = MRI.createVirtualRegister(SRC); 4603 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32; 4604 4605 if (RI.hasAGPRs(VRC)) { 4606 VRC = RI.getEquivalentVGPRClass(VRC); 4607 Register NewSrcReg = MRI.createVirtualRegister(VRC); 4608 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4609 get(TargetOpcode::COPY), NewSrcReg) 4610 .addReg(SrcReg); 4611 SrcReg = NewSrcReg; 4612 } 4613 4614 if (SubRegs == 1) { 4615 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4616 get(AMDGPU::V_READFIRSTLANE_B32), DstReg) 4617 .addReg(SrcReg); 4618 return DstReg; 4619 } 4620 4621 SmallVector<unsigned, 8> SRegs; 4622 for (unsigned i = 0; i < SubRegs; ++i) { 4623 Register SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4624 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4625 get(AMDGPU::V_READFIRSTLANE_B32), SGPR) 4626 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i)); 4627 SRegs.push_back(SGPR); 4628 } 4629 4630 MachineInstrBuilder MIB = 4631 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4632 get(AMDGPU::REG_SEQUENCE), DstReg); 4633 for (unsigned i = 0; i < SubRegs; ++i) { 4634 MIB.addReg(SRegs[i]); 4635 MIB.addImm(RI.getSubRegFromChannel(i)); 4636 } 4637 return DstReg; 4638 } 4639 4640 void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI, 4641 MachineInstr &MI) const { 4642 4643 // If the pointer is store in VGPRs, then we need to move them to 4644 // SGPRs using v_readfirstlane. This is safe because we only select 4645 // loads with uniform pointers to SMRD instruction so we know the 4646 // pointer value is uniform. 4647 MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase); 4648 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) { 4649 Register SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI); 4650 SBase->setReg(SGPR); 4651 } 4652 MachineOperand *SOff = getNamedOperand(MI, AMDGPU::OpName::soff); 4653 if (SOff && !RI.isSGPRClass(MRI.getRegClass(SOff->getReg()))) { 4654 Register SGPR = readlaneVGPRToSGPR(SOff->getReg(), MI, MRI); 4655 SOff->setReg(SGPR); 4656 } 4657 } 4658 4659 // FIXME: Remove this when SelectionDAG is obsoleted. 4660 void SIInstrInfo::legalizeOperandsFLAT(MachineRegisterInfo &MRI, 4661 MachineInstr &MI) const { 4662 if (!isSegmentSpecificFLAT(MI)) 4663 return; 4664 4665 // Fixup SGPR operands in VGPRs. We only select these when the DAG divergence 4666 // thinks they are uniform, so a readfirstlane should be valid. 4667 MachineOperand *SAddr = getNamedOperand(MI, AMDGPU::OpName::saddr); 4668 if (!SAddr || RI.isSGPRClass(MRI.getRegClass(SAddr->getReg()))) 4669 return; 4670 4671 Register ToSGPR = readlaneVGPRToSGPR(SAddr->getReg(), MI, MRI); 4672 SAddr->setReg(ToSGPR); 4673 } 4674 4675 void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB, 4676 MachineBasicBlock::iterator I, 4677 const TargetRegisterClass *DstRC, 4678 MachineOperand &Op, 4679 MachineRegisterInfo &MRI, 4680 const DebugLoc &DL) const { 4681 Register OpReg = Op.getReg(); 4682 unsigned OpSubReg = Op.getSubReg(); 4683 4684 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg( 4685 RI.getRegClassForReg(MRI, OpReg), OpSubReg); 4686 4687 // Check if operand is already the correct register class. 4688 if (DstRC == OpRC) 4689 return; 4690 4691 Register DstReg = MRI.createVirtualRegister(DstRC); 4692 MachineInstr *Copy = 4693 BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op); 4694 4695 Op.setReg(DstReg); 4696 Op.setSubReg(0); 4697 4698 MachineInstr *Def = MRI.getVRegDef(OpReg); 4699 if (!Def) 4700 return; 4701 4702 // Try to eliminate the copy if it is copying an immediate value. 4703 if (Def->isMoveImmediate() && DstRC != &AMDGPU::VReg_1RegClass) 4704 FoldImmediate(*Copy, *Def, OpReg, &MRI); 4705 4706 bool ImpDef = Def->isImplicitDef(); 4707 while (!ImpDef && Def && Def->isCopy()) { 4708 if (Def->getOperand(1).getReg().isPhysical()) 4709 break; 4710 Def = MRI.getUniqueVRegDef(Def->getOperand(1).getReg()); 4711 ImpDef = Def && Def->isImplicitDef(); 4712 } 4713 if (!RI.isSGPRClass(DstRC) && !Copy->readsRegister(AMDGPU::EXEC, &RI) && 4714 !ImpDef) 4715 Copy->addOperand(MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 4716 } 4717 4718 // Emit the actual waterfall loop, executing the wrapped instruction for each 4719 // unique value of \p Rsrc across all lanes. In the best case we execute 1 4720 // iteration, in the worst case we execute 64 (once per lane). 4721 static void 4722 emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI, 4723 MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB, 4724 const DebugLoc &DL, MachineOperand &Rsrc) { 4725 MachineFunction &MF = *OrigBB.getParent(); 4726 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 4727 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 4728 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 4729 unsigned SaveExecOpc = 4730 ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 : AMDGPU::S_AND_SAVEEXEC_B64; 4731 unsigned XorTermOpc = 4732 ST.isWave32() ? AMDGPU::S_XOR_B32_term : AMDGPU::S_XOR_B64_term; 4733 unsigned AndOpc = 4734 ST.isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64; 4735 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 4736 4737 MachineBasicBlock::iterator I = LoopBB.begin(); 4738 4739 SmallVector<Register, 8> ReadlanePieces; 4740 Register CondReg = AMDGPU::NoRegister; 4741 4742 Register VRsrc = Rsrc.getReg(); 4743 unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef()); 4744 4745 unsigned RegSize = TRI->getRegSizeInBits(Rsrc.getReg(), MRI); 4746 unsigned NumSubRegs = RegSize / 32; 4747 assert(NumSubRegs % 2 == 0 && NumSubRegs <= 32 && "Unhandled register size"); 4748 4749 for (unsigned Idx = 0; Idx < NumSubRegs; Idx += 2) { 4750 4751 Register CurRegLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4752 Register CurRegHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4753 4754 // Read the next variant <- also loop target. 4755 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegLo) 4756 .addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx)); 4757 4758 // Read the next variant <- also loop target. 4759 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegHi) 4760 .addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx + 1)); 4761 4762 ReadlanePieces.push_back(CurRegLo); 4763 ReadlanePieces.push_back(CurRegHi); 4764 4765 // Comparison is to be done as 64-bit. 4766 Register CurReg = MRI.createVirtualRegister(&AMDGPU::SGPR_64RegClass); 4767 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), CurReg) 4768 .addReg(CurRegLo) 4769 .addImm(AMDGPU::sub0) 4770 .addReg(CurRegHi) 4771 .addImm(AMDGPU::sub1); 4772 4773 Register NewCondReg = MRI.createVirtualRegister(BoolXExecRC); 4774 auto Cmp = 4775 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), NewCondReg) 4776 .addReg(CurReg); 4777 if (NumSubRegs <= 2) 4778 Cmp.addReg(VRsrc); 4779 else 4780 Cmp.addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx, 2)); 4781 4782 // Combine the comparision results with AND. 4783 if (CondReg == AMDGPU::NoRegister) // First. 4784 CondReg = NewCondReg; 4785 else { // If not the first, we create an AND. 4786 Register AndReg = MRI.createVirtualRegister(BoolXExecRC); 4787 BuildMI(LoopBB, I, DL, TII.get(AndOpc), AndReg) 4788 .addReg(CondReg) 4789 .addReg(NewCondReg); 4790 CondReg = AndReg; 4791 } 4792 } // End for loop. 4793 4794 auto SRsrcRC = TRI->getEquivalentSGPRClass(MRI.getRegClass(VRsrc)); 4795 Register SRsrc = MRI.createVirtualRegister(SRsrcRC); 4796 4797 // Build scalar Rsrc. 4798 auto Merge = BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), SRsrc); 4799 unsigned Channel = 0; 4800 for (Register Piece : ReadlanePieces) { 4801 Merge.addReg(Piece) 4802 .addImm(TRI->getSubRegFromChannel(Channel++)); 4803 } 4804 4805 // Update Rsrc operand to use the SGPR Rsrc. 4806 Rsrc.setReg(SRsrc); 4807 Rsrc.setIsKill(true); 4808 4809 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); 4810 MRI.setSimpleHint(SaveExec, CondReg); 4811 4812 // Update EXEC to matching lanes, saving original to SaveExec. 4813 BuildMI(LoopBB, I, DL, TII.get(SaveExecOpc), SaveExec) 4814 .addReg(CondReg, RegState::Kill); 4815 4816 // The original instruction is here; we insert the terminators after it. 4817 I = LoopBB.end(); 4818 4819 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 4820 BuildMI(LoopBB, I, DL, TII.get(XorTermOpc), Exec) 4821 .addReg(Exec) 4822 .addReg(SaveExec); 4823 4824 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_CBRANCH_EXECNZ)).addMBB(&LoopBB); 4825 } 4826 4827 // Build a waterfall loop around \p MI, replacing the VGPR \p Rsrc register 4828 // with SGPRs by iterating over all unique values across all lanes. 4829 static void loadSRsrcFromVGPR(const SIInstrInfo &TII, MachineInstr &MI, 4830 MachineOperand &Rsrc, MachineDominatorTree *MDT, 4831 MachineBasicBlock::iterator Begin = nullptr, 4832 MachineBasicBlock::iterator End = nullptr) { 4833 MachineBasicBlock &MBB = *MI.getParent(); 4834 MachineFunction &MF = *MBB.getParent(); 4835 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 4836 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 4837 MachineRegisterInfo &MRI = MF.getRegInfo(); 4838 if (!Begin.isValid()) 4839 Begin = &MI; 4840 if (!End.isValid()) { 4841 End = &MI; 4842 ++End; 4843 } 4844 const DebugLoc &DL = MI.getDebugLoc(); 4845 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 4846 unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 4847 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 4848 4849 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); 4850 4851 // Save the EXEC mask 4852 BuildMI(MBB, Begin, DL, TII.get(MovExecOpc), SaveExec).addReg(Exec); 4853 4854 // Killed uses in the instruction we are waterfalling around will be 4855 // incorrect due to the added control-flow. 4856 MachineBasicBlock::iterator AfterMI = MI; 4857 ++AfterMI; 4858 for (auto I = Begin; I != AfterMI; I++) { 4859 for (auto &MO : I->uses()) { 4860 if (MO.isReg() && MO.isUse()) { 4861 MRI.clearKillFlags(MO.getReg()); 4862 } 4863 } 4864 } 4865 4866 // To insert the loop we need to split the block. Move everything after this 4867 // point to a new block, and insert a new empty block between the two. 4868 MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock(); 4869 MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock(); 4870 MachineFunction::iterator MBBI(MBB); 4871 ++MBBI; 4872 4873 MF.insert(MBBI, LoopBB); 4874 MF.insert(MBBI, RemainderBB); 4875 4876 LoopBB->addSuccessor(LoopBB); 4877 LoopBB->addSuccessor(RemainderBB); 4878 4879 // Move Begin to MI to the LoopBB, and the remainder of the block to 4880 // RemainderBB. 4881 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 4882 RemainderBB->splice(RemainderBB->begin(), &MBB, End, MBB.end()); 4883 LoopBB->splice(LoopBB->begin(), &MBB, Begin, MBB.end()); 4884 4885 MBB.addSuccessor(LoopBB); 4886 4887 // Update dominators. We know that MBB immediately dominates LoopBB, that 4888 // LoopBB immediately dominates RemainderBB, and that RemainderBB immediately 4889 // dominates all of the successors transferred to it from MBB that MBB used 4890 // to properly dominate. 4891 if (MDT) { 4892 MDT->addNewBlock(LoopBB, &MBB); 4893 MDT->addNewBlock(RemainderBB, LoopBB); 4894 for (auto &Succ : RemainderBB->successors()) { 4895 if (MDT->properlyDominates(&MBB, Succ)) { 4896 MDT->changeImmediateDominator(Succ, RemainderBB); 4897 } 4898 } 4899 } 4900 4901 emitLoadSRsrcFromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, Rsrc); 4902 4903 // Restore the EXEC mask 4904 MachineBasicBlock::iterator First = RemainderBB->begin(); 4905 BuildMI(*RemainderBB, First, DL, TII.get(MovExecOpc), Exec).addReg(SaveExec); 4906 } 4907 4908 // Extract pointer from Rsrc and return a zero-value Rsrc replacement. 4909 static std::tuple<unsigned, unsigned> 4910 extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc) { 4911 MachineBasicBlock &MBB = *MI.getParent(); 4912 MachineFunction &MF = *MBB.getParent(); 4913 MachineRegisterInfo &MRI = MF.getRegInfo(); 4914 4915 // Extract the ptr from the resource descriptor. 4916 unsigned RsrcPtr = 4917 TII.buildExtractSubReg(MI, MRI, Rsrc, &AMDGPU::VReg_128RegClass, 4918 AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass); 4919 4920 // Create an empty resource descriptor 4921 Register Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 4922 Register SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4923 Register SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4924 Register NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass); 4925 uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat(); 4926 4927 // Zero64 = 0 4928 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B64), Zero64) 4929 .addImm(0); 4930 4931 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0} 4932 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatLo) 4933 .addImm(RsrcDataFormat & 0xFFFFFFFF); 4934 4935 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32} 4936 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatHi) 4937 .addImm(RsrcDataFormat >> 32); 4938 4939 // NewSRsrc = {Zero64, SRsrcFormat} 4940 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::REG_SEQUENCE), NewSRsrc) 4941 .addReg(Zero64) 4942 .addImm(AMDGPU::sub0_sub1) 4943 .addReg(SRsrcFormatLo) 4944 .addImm(AMDGPU::sub2) 4945 .addReg(SRsrcFormatHi) 4946 .addImm(AMDGPU::sub3); 4947 4948 return std::make_tuple(RsrcPtr, NewSRsrc); 4949 } 4950 4951 void SIInstrInfo::legalizeOperands(MachineInstr &MI, 4952 MachineDominatorTree *MDT) const { 4953 MachineFunction &MF = *MI.getParent()->getParent(); 4954 MachineRegisterInfo &MRI = MF.getRegInfo(); 4955 4956 // Legalize VOP2 4957 if (isVOP2(MI) || isVOPC(MI)) { 4958 legalizeOperandsVOP2(MRI, MI); 4959 return; 4960 } 4961 4962 // Legalize VOP3 4963 if (isVOP3(MI)) { 4964 legalizeOperandsVOP3(MRI, MI); 4965 return; 4966 } 4967 4968 // Legalize SMRD 4969 if (isSMRD(MI)) { 4970 legalizeOperandsSMRD(MRI, MI); 4971 return; 4972 } 4973 4974 // Legalize FLAT 4975 if (isFLAT(MI)) { 4976 legalizeOperandsFLAT(MRI, MI); 4977 return; 4978 } 4979 4980 // Legalize REG_SEQUENCE and PHI 4981 // The register class of the operands much be the same type as the register 4982 // class of the output. 4983 if (MI.getOpcode() == AMDGPU::PHI) { 4984 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr; 4985 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) { 4986 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).getReg().isVirtual()) 4987 continue; 4988 const TargetRegisterClass *OpRC = 4989 MRI.getRegClass(MI.getOperand(i).getReg()); 4990 if (RI.hasVectorRegisters(OpRC)) { 4991 VRC = OpRC; 4992 } else { 4993 SRC = OpRC; 4994 } 4995 } 4996 4997 // If any of the operands are VGPR registers, then they all most be 4998 // otherwise we will create illegal VGPR->SGPR copies when legalizing 4999 // them. 5000 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) { 5001 if (!VRC) { 5002 assert(SRC); 5003 if (getOpRegClass(MI, 0) == &AMDGPU::VReg_1RegClass) { 5004 VRC = &AMDGPU::VReg_1RegClass; 5005 } else 5006 VRC = RI.hasAGPRs(getOpRegClass(MI, 0)) 5007 ? RI.getEquivalentAGPRClass(SRC) 5008 : RI.getEquivalentVGPRClass(SRC); 5009 } else { 5010 VRC = RI.hasAGPRs(getOpRegClass(MI, 0)) 5011 ? RI.getEquivalentAGPRClass(VRC) 5012 : RI.getEquivalentVGPRClass(VRC); 5013 } 5014 RC = VRC; 5015 } else { 5016 RC = SRC; 5017 } 5018 5019 // Update all the operands so they have the same type. 5020 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 5021 MachineOperand &Op = MI.getOperand(I); 5022 if (!Op.isReg() || !Op.getReg().isVirtual()) 5023 continue; 5024 5025 // MI is a PHI instruction. 5026 MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB(); 5027 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator(); 5028 5029 // Avoid creating no-op copies with the same src and dst reg class. These 5030 // confuse some of the machine passes. 5031 legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc()); 5032 } 5033 } 5034 5035 // REG_SEQUENCE doesn't really require operand legalization, but if one has a 5036 // VGPR dest type and SGPR sources, insert copies so all operands are 5037 // VGPRs. This seems to help operand folding / the register coalescer. 5038 if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) { 5039 MachineBasicBlock *MBB = MI.getParent(); 5040 const TargetRegisterClass *DstRC = getOpRegClass(MI, 0); 5041 if (RI.hasVGPRs(DstRC)) { 5042 // Update all the operands so they are VGPR register classes. These may 5043 // not be the same register class because REG_SEQUENCE supports mixing 5044 // subregister index types e.g. sub0_sub1 + sub2 + sub3 5045 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 5046 MachineOperand &Op = MI.getOperand(I); 5047 if (!Op.isReg() || !Op.getReg().isVirtual()) 5048 continue; 5049 5050 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg()); 5051 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); 5052 if (VRC == OpRC) 5053 continue; 5054 5055 legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc()); 5056 Op.setIsKill(); 5057 } 5058 } 5059 5060 return; 5061 } 5062 5063 // Legalize INSERT_SUBREG 5064 // src0 must have the same register class as dst 5065 if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) { 5066 Register Dst = MI.getOperand(0).getReg(); 5067 Register Src0 = MI.getOperand(1).getReg(); 5068 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst); 5069 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0); 5070 if (DstRC != Src0RC) { 5071 MachineBasicBlock *MBB = MI.getParent(); 5072 MachineOperand &Op = MI.getOperand(1); 5073 legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc()); 5074 } 5075 return; 5076 } 5077 5078 // Legalize SI_INIT_M0 5079 if (MI.getOpcode() == AMDGPU::SI_INIT_M0) { 5080 MachineOperand &Src = MI.getOperand(0); 5081 if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg()))) 5082 Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI)); 5083 return; 5084 } 5085 5086 // Legalize MIMG and MUBUF/MTBUF for shaders. 5087 // 5088 // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via 5089 // scratch memory access. In both cases, the legalization never involves 5090 // conversion to the addr64 form. 5091 if (isMIMG(MI) || 5092 (AMDGPU::isShader(MF.getFunction().getCallingConv()) && 5093 (isMUBUF(MI) || isMTBUF(MI)))) { 5094 MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc); 5095 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) 5096 loadSRsrcFromVGPR(*this, MI, *SRsrc, MDT); 5097 5098 MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp); 5099 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) 5100 loadSRsrcFromVGPR(*this, MI, *SSamp, MDT); 5101 5102 return; 5103 } 5104 5105 // Legalize SI_CALL 5106 if (MI.getOpcode() == AMDGPU::SI_CALL_ISEL) { 5107 MachineOperand *Dest = &MI.getOperand(0); 5108 if (!RI.isSGPRClass(MRI.getRegClass(Dest->getReg()))) { 5109 // Move everything between ADJCALLSTACKUP and ADJCALLSTACKDOWN and 5110 // following copies, we also need to move copies from and to physical 5111 // registers into the loop block. 5112 unsigned FrameSetupOpcode = getCallFrameSetupOpcode(); 5113 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode(); 5114 5115 // Also move the copies to physical registers into the loop block 5116 MachineBasicBlock &MBB = *MI.getParent(); 5117 MachineBasicBlock::iterator Start(&MI); 5118 while (Start->getOpcode() != FrameSetupOpcode) 5119 --Start; 5120 MachineBasicBlock::iterator End(&MI); 5121 while (End->getOpcode() != FrameDestroyOpcode) 5122 ++End; 5123 // Also include following copies of the return value 5124 ++End; 5125 while (End != MBB.end() && End->isCopy() && End->getOperand(1).isReg() && 5126 MI.definesRegister(End->getOperand(1).getReg())) 5127 ++End; 5128 loadSRsrcFromVGPR(*this, MI, *Dest, MDT, Start, End); 5129 } 5130 } 5131 5132 // Legalize MUBUF* instructions. 5133 int RsrcIdx = 5134 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc); 5135 if (RsrcIdx != -1) { 5136 // We have an MUBUF instruction 5137 MachineOperand *Rsrc = &MI.getOperand(RsrcIdx); 5138 unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass; 5139 if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()), 5140 RI.getRegClass(RsrcRC))) { 5141 // The operands are legal. 5142 // FIXME: We may need to legalize operands besided srsrc. 5143 return; 5144 } 5145 5146 // Legalize a VGPR Rsrc. 5147 // 5148 // If the instruction is _ADDR64, we can avoid a waterfall by extracting 5149 // the base pointer from the VGPR Rsrc, adding it to the VAddr, then using 5150 // a zero-value SRsrc. 5151 // 5152 // If the instruction is _OFFSET (both idxen and offen disabled), and we 5153 // support ADDR64 instructions, we can convert to ADDR64 and do the same as 5154 // above. 5155 // 5156 // Otherwise we are on non-ADDR64 hardware, and/or we have 5157 // idxen/offen/bothen and we fall back to a waterfall loop. 5158 5159 MachineBasicBlock &MBB = *MI.getParent(); 5160 5161 MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 5162 if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) { 5163 // This is already an ADDR64 instruction so we need to add the pointer 5164 // extracted from the resource descriptor to the current value of VAddr. 5165 Register NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5166 Register NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5167 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5168 5169 const auto *BoolXExecRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5170 Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC); 5171 Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC); 5172 5173 unsigned RsrcPtr, NewSRsrc; 5174 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 5175 5176 // NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0 5177 const DebugLoc &DL = MI.getDebugLoc(); 5178 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_CO_U32_e64), NewVAddrLo) 5179 .addDef(CondReg0) 5180 .addReg(RsrcPtr, 0, AMDGPU::sub0) 5181 .addReg(VAddr->getReg(), 0, AMDGPU::sub0) 5182 .addImm(0); 5183 5184 // NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1 5185 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e64), NewVAddrHi) 5186 .addDef(CondReg1, RegState::Dead) 5187 .addReg(RsrcPtr, 0, AMDGPU::sub1) 5188 .addReg(VAddr->getReg(), 0, AMDGPU::sub1) 5189 .addReg(CondReg0, RegState::Kill) 5190 .addImm(0); 5191 5192 // NewVaddr = {NewVaddrHi, NewVaddrLo} 5193 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) 5194 .addReg(NewVAddrLo) 5195 .addImm(AMDGPU::sub0) 5196 .addReg(NewVAddrHi) 5197 .addImm(AMDGPU::sub1); 5198 5199 VAddr->setReg(NewVAddr); 5200 Rsrc->setReg(NewSRsrc); 5201 } else if (!VAddr && ST.hasAddr64()) { 5202 // This instructions is the _OFFSET variant, so we need to convert it to 5203 // ADDR64. 5204 assert(ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS && 5205 "FIXME: Need to emit flat atomics here"); 5206 5207 unsigned RsrcPtr, NewSRsrc; 5208 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 5209 5210 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5211 MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata); 5212 MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 5213 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset); 5214 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode()); 5215 5216 // Atomics rith return have have an additional tied operand and are 5217 // missing some of the special bits. 5218 MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in); 5219 MachineInstr *Addr64; 5220 5221 if (!VDataIn) { 5222 // Regular buffer load / store. 5223 MachineInstrBuilder MIB = 5224 BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 5225 .add(*VData) 5226 .addReg(NewVAddr) 5227 .addReg(NewSRsrc) 5228 .add(*SOffset) 5229 .add(*Offset); 5230 5231 // Atomics do not have this operand. 5232 if (const MachineOperand *GLC = 5233 getNamedOperand(MI, AMDGPU::OpName::glc)) { 5234 MIB.addImm(GLC->getImm()); 5235 } 5236 if (const MachineOperand *DLC = 5237 getNamedOperand(MI, AMDGPU::OpName::dlc)) { 5238 MIB.addImm(DLC->getImm()); 5239 } 5240 5241 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)); 5242 5243 if (const MachineOperand *TFE = 5244 getNamedOperand(MI, AMDGPU::OpName::tfe)) { 5245 MIB.addImm(TFE->getImm()); 5246 } 5247 5248 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::swz)); 5249 5250 MIB.cloneMemRefs(MI); 5251 Addr64 = MIB; 5252 } else { 5253 // Atomics with return. 5254 Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 5255 .add(*VData) 5256 .add(*VDataIn) 5257 .addReg(NewVAddr) 5258 .addReg(NewSRsrc) 5259 .add(*SOffset) 5260 .add(*Offset) 5261 .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)) 5262 .cloneMemRefs(MI); 5263 } 5264 5265 MI.removeFromParent(); 5266 5267 // NewVaddr = {NewVaddrHi, NewVaddrLo} 5268 BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), 5269 NewVAddr) 5270 .addReg(RsrcPtr, 0, AMDGPU::sub0) 5271 .addImm(AMDGPU::sub0) 5272 .addReg(RsrcPtr, 0, AMDGPU::sub1) 5273 .addImm(AMDGPU::sub1); 5274 } else { 5275 // This is another variant; legalize Rsrc with waterfall loop from VGPRs 5276 // to SGPRs. 5277 loadSRsrcFromVGPR(*this, MI, *Rsrc, MDT); 5278 } 5279 } 5280 } 5281 5282 void SIInstrInfo::moveToVALU(MachineInstr &TopInst, 5283 MachineDominatorTree *MDT) const { 5284 SetVectorType Worklist; 5285 Worklist.insert(&TopInst); 5286 5287 while (!Worklist.empty()) { 5288 MachineInstr &Inst = *Worklist.pop_back_val(); 5289 MachineBasicBlock *MBB = Inst.getParent(); 5290 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 5291 5292 unsigned Opcode = Inst.getOpcode(); 5293 unsigned NewOpcode = getVALUOp(Inst); 5294 5295 // Handle some special cases 5296 switch (Opcode) { 5297 default: 5298 break; 5299 case AMDGPU::S_ADD_U64_PSEUDO: 5300 case AMDGPU::S_SUB_U64_PSEUDO: 5301 splitScalar64BitAddSub(Worklist, Inst, MDT); 5302 Inst.eraseFromParent(); 5303 continue; 5304 case AMDGPU::S_ADD_I32: 5305 case AMDGPU::S_SUB_I32: 5306 // FIXME: The u32 versions currently selected use the carry. 5307 if (moveScalarAddSub(Worklist, Inst, MDT)) 5308 continue; 5309 5310 // Default handling 5311 break; 5312 case AMDGPU::S_AND_B64: 5313 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32, MDT); 5314 Inst.eraseFromParent(); 5315 continue; 5316 5317 case AMDGPU::S_OR_B64: 5318 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32, MDT); 5319 Inst.eraseFromParent(); 5320 continue; 5321 5322 case AMDGPU::S_XOR_B64: 5323 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32, MDT); 5324 Inst.eraseFromParent(); 5325 continue; 5326 5327 case AMDGPU::S_NAND_B64: 5328 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NAND_B32, MDT); 5329 Inst.eraseFromParent(); 5330 continue; 5331 5332 case AMDGPU::S_NOR_B64: 5333 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NOR_B32, MDT); 5334 Inst.eraseFromParent(); 5335 continue; 5336 5337 case AMDGPU::S_XNOR_B64: 5338 if (ST.hasDLInsts()) 5339 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT); 5340 else 5341 splitScalar64BitXnor(Worklist, Inst, MDT); 5342 Inst.eraseFromParent(); 5343 continue; 5344 5345 case AMDGPU::S_ANDN2_B64: 5346 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ANDN2_B32, MDT); 5347 Inst.eraseFromParent(); 5348 continue; 5349 5350 case AMDGPU::S_ORN2_B64: 5351 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ORN2_B32, MDT); 5352 Inst.eraseFromParent(); 5353 continue; 5354 5355 case AMDGPU::S_NOT_B64: 5356 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32); 5357 Inst.eraseFromParent(); 5358 continue; 5359 5360 case AMDGPU::S_BCNT1_I32_B64: 5361 splitScalar64BitBCNT(Worklist, Inst); 5362 Inst.eraseFromParent(); 5363 continue; 5364 5365 case AMDGPU::S_BFE_I64: 5366 splitScalar64BitBFE(Worklist, Inst); 5367 Inst.eraseFromParent(); 5368 continue; 5369 5370 case AMDGPU::S_LSHL_B32: 5371 if (ST.hasOnlyRevVALUShifts()) { 5372 NewOpcode = AMDGPU::V_LSHLREV_B32_e64; 5373 swapOperands(Inst); 5374 } 5375 break; 5376 case AMDGPU::S_ASHR_I32: 5377 if (ST.hasOnlyRevVALUShifts()) { 5378 NewOpcode = AMDGPU::V_ASHRREV_I32_e64; 5379 swapOperands(Inst); 5380 } 5381 break; 5382 case AMDGPU::S_LSHR_B32: 5383 if (ST.hasOnlyRevVALUShifts()) { 5384 NewOpcode = AMDGPU::V_LSHRREV_B32_e64; 5385 swapOperands(Inst); 5386 } 5387 break; 5388 case AMDGPU::S_LSHL_B64: 5389 if (ST.hasOnlyRevVALUShifts()) { 5390 NewOpcode = AMDGPU::V_LSHLREV_B64; 5391 swapOperands(Inst); 5392 } 5393 break; 5394 case AMDGPU::S_ASHR_I64: 5395 if (ST.hasOnlyRevVALUShifts()) { 5396 NewOpcode = AMDGPU::V_ASHRREV_I64; 5397 swapOperands(Inst); 5398 } 5399 break; 5400 case AMDGPU::S_LSHR_B64: 5401 if (ST.hasOnlyRevVALUShifts()) { 5402 NewOpcode = AMDGPU::V_LSHRREV_B64; 5403 swapOperands(Inst); 5404 } 5405 break; 5406 5407 case AMDGPU::S_ABS_I32: 5408 lowerScalarAbs(Worklist, Inst); 5409 Inst.eraseFromParent(); 5410 continue; 5411 5412 case AMDGPU::S_CBRANCH_SCC0: 5413 case AMDGPU::S_CBRANCH_SCC1: 5414 // Clear unused bits of vcc 5415 if (ST.isWave32()) 5416 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B32), 5417 AMDGPU::VCC_LO) 5418 .addReg(AMDGPU::EXEC_LO) 5419 .addReg(AMDGPU::VCC_LO); 5420 else 5421 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64), 5422 AMDGPU::VCC) 5423 .addReg(AMDGPU::EXEC) 5424 .addReg(AMDGPU::VCC); 5425 break; 5426 5427 case AMDGPU::S_BFE_U64: 5428 case AMDGPU::S_BFM_B64: 5429 llvm_unreachable("Moving this op to VALU not implemented"); 5430 5431 case AMDGPU::S_PACK_LL_B32_B16: 5432 case AMDGPU::S_PACK_LH_B32_B16: 5433 case AMDGPU::S_PACK_HH_B32_B16: 5434 movePackToVALU(Worklist, MRI, Inst); 5435 Inst.eraseFromParent(); 5436 continue; 5437 5438 case AMDGPU::S_XNOR_B32: 5439 lowerScalarXnor(Worklist, Inst); 5440 Inst.eraseFromParent(); 5441 continue; 5442 5443 case AMDGPU::S_NAND_B32: 5444 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_AND_B32); 5445 Inst.eraseFromParent(); 5446 continue; 5447 5448 case AMDGPU::S_NOR_B32: 5449 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_OR_B32); 5450 Inst.eraseFromParent(); 5451 continue; 5452 5453 case AMDGPU::S_ANDN2_B32: 5454 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_AND_B32); 5455 Inst.eraseFromParent(); 5456 continue; 5457 5458 case AMDGPU::S_ORN2_B32: 5459 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_OR_B32); 5460 Inst.eraseFromParent(); 5461 continue; 5462 5463 // TODO: remove as soon as everything is ready 5464 // to replace VGPR to SGPR copy with V_READFIRSTLANEs. 5465 // S_ADD/SUB_CO_PSEUDO as well as S_UADDO/USUBO_PSEUDO 5466 // can only be selected from the uniform SDNode. 5467 case AMDGPU::S_ADD_CO_PSEUDO: 5468 case AMDGPU::S_SUB_CO_PSEUDO: { 5469 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) 5470 ? AMDGPU::V_ADDC_U32_e64 5471 : AMDGPU::V_SUBB_U32_e64; 5472 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5473 5474 Register CarryInReg = Inst.getOperand(4).getReg(); 5475 if (!MRI.constrainRegClass(CarryInReg, CarryRC)) { 5476 Register NewCarryReg = MRI.createVirtualRegister(CarryRC); 5477 BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(AMDGPU::COPY), NewCarryReg) 5478 .addReg(CarryInReg); 5479 } 5480 5481 Register CarryOutReg = Inst.getOperand(1).getReg(); 5482 5483 Register DestReg = MRI.createVirtualRegister(RI.getEquivalentVGPRClass( 5484 MRI.getRegClass(Inst.getOperand(0).getReg()))); 5485 MachineInstr *CarryOp = 5486 BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(Opc), DestReg) 5487 .addReg(CarryOutReg, RegState::Define) 5488 .add(Inst.getOperand(2)) 5489 .add(Inst.getOperand(3)) 5490 .addReg(CarryInReg) 5491 .addImm(0); 5492 legalizeOperands(*CarryOp); 5493 MRI.replaceRegWith(Inst.getOperand(0).getReg(), DestReg); 5494 addUsersToMoveToVALUWorklist(DestReg, MRI, Worklist); 5495 Inst.eraseFromParent(); 5496 } 5497 continue; 5498 case AMDGPU::S_UADDO_PSEUDO: 5499 case AMDGPU::S_USUBO_PSEUDO: { 5500 const DebugLoc &DL = Inst.getDebugLoc(); 5501 MachineOperand &Dest0 = Inst.getOperand(0); 5502 MachineOperand &Dest1 = Inst.getOperand(1); 5503 MachineOperand &Src0 = Inst.getOperand(2); 5504 MachineOperand &Src1 = Inst.getOperand(3); 5505 5506 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_UADDO_PSEUDO) 5507 ? AMDGPU::V_ADD_CO_U32_e64 5508 : AMDGPU::V_SUB_CO_U32_e64; 5509 const TargetRegisterClass *NewRC = 5510 RI.getEquivalentVGPRClass(MRI.getRegClass(Dest0.getReg())); 5511 Register DestReg = MRI.createVirtualRegister(NewRC); 5512 MachineInstr *NewInstr = BuildMI(*MBB, &Inst, DL, get(Opc), DestReg) 5513 .addReg(Dest1.getReg(), RegState::Define) 5514 .add(Src0) 5515 .add(Src1) 5516 .addImm(0); // clamp bit 5517 5518 legalizeOperands(*NewInstr, MDT); 5519 5520 MRI.replaceRegWith(Dest0.getReg(), DestReg); 5521 addUsersToMoveToVALUWorklist(NewInstr->getOperand(0).getReg(), MRI, 5522 Worklist); 5523 Inst.eraseFromParent(); 5524 } 5525 continue; 5526 5527 case AMDGPU::S_CSELECT_B32: 5528 case AMDGPU::S_CSELECT_B64: 5529 lowerSelect(Worklist, Inst, MDT); 5530 Inst.eraseFromParent(); 5531 continue; 5532 } 5533 5534 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) { 5535 // We cannot move this instruction to the VALU, so we should try to 5536 // legalize its operands instead. 5537 legalizeOperands(Inst, MDT); 5538 continue; 5539 } 5540 5541 // Use the new VALU Opcode. 5542 const MCInstrDesc &NewDesc = get(NewOpcode); 5543 Inst.setDesc(NewDesc); 5544 5545 // Remove any references to SCC. Vector instructions can't read from it, and 5546 // We're just about to add the implicit use / defs of VCC, and we don't want 5547 // both. 5548 for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) { 5549 MachineOperand &Op = Inst.getOperand(i); 5550 if (Op.isReg() && Op.getReg() == AMDGPU::SCC) { 5551 // Only propagate through live-def of SCC. 5552 if (Op.isDef() && !Op.isDead()) 5553 addSCCDefUsersToVALUWorklist(Op, Inst, Worklist); 5554 Inst.RemoveOperand(i); 5555 } 5556 } 5557 5558 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) { 5559 // We are converting these to a BFE, so we need to add the missing 5560 // operands for the size and offset. 5561 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16; 5562 Inst.addOperand(MachineOperand::CreateImm(0)); 5563 Inst.addOperand(MachineOperand::CreateImm(Size)); 5564 5565 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) { 5566 // The VALU version adds the second operand to the result, so insert an 5567 // extra 0 operand. 5568 Inst.addOperand(MachineOperand::CreateImm(0)); 5569 } 5570 5571 Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent()); 5572 fixImplicitOperands(Inst); 5573 5574 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) { 5575 const MachineOperand &OffsetWidthOp = Inst.getOperand(2); 5576 // If we need to move this to VGPRs, we need to unpack the second operand 5577 // back into the 2 separate ones for bit offset and width. 5578 assert(OffsetWidthOp.isImm() && 5579 "Scalar BFE is only implemented for constant width and offset"); 5580 uint32_t Imm = OffsetWidthOp.getImm(); 5581 5582 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 5583 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 5584 Inst.RemoveOperand(2); // Remove old immediate. 5585 Inst.addOperand(MachineOperand::CreateImm(Offset)); 5586 Inst.addOperand(MachineOperand::CreateImm(BitWidth)); 5587 } 5588 5589 bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef(); 5590 unsigned NewDstReg = AMDGPU::NoRegister; 5591 if (HasDst) { 5592 Register DstReg = Inst.getOperand(0).getReg(); 5593 if (DstReg.isPhysical()) 5594 continue; 5595 5596 // Update the destination register class. 5597 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst); 5598 if (!NewDstRC) 5599 continue; 5600 5601 if (Inst.isCopy() && Inst.getOperand(1).getReg().isVirtual() && 5602 NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) { 5603 // Instead of creating a copy where src and dst are the same register 5604 // class, we just replace all uses of dst with src. These kinds of 5605 // copies interfere with the heuristics MachineSink uses to decide 5606 // whether or not to split a critical edge. Since the pass assumes 5607 // that copies will end up as machine instructions and not be 5608 // eliminated. 5609 addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist); 5610 MRI.replaceRegWith(DstReg, Inst.getOperand(1).getReg()); 5611 MRI.clearKillFlags(Inst.getOperand(1).getReg()); 5612 Inst.getOperand(0).setReg(DstReg); 5613 5614 // Make sure we don't leave around a dead VGPR->SGPR copy. Normally 5615 // these are deleted later, but at -O0 it would leave a suspicious 5616 // looking illegal copy of an undef register. 5617 for (unsigned I = Inst.getNumOperands() - 1; I != 0; --I) 5618 Inst.RemoveOperand(I); 5619 Inst.setDesc(get(AMDGPU::IMPLICIT_DEF)); 5620 continue; 5621 } 5622 5623 NewDstReg = MRI.createVirtualRegister(NewDstRC); 5624 MRI.replaceRegWith(DstReg, NewDstReg); 5625 } 5626 5627 // Legalize the operands 5628 legalizeOperands(Inst, MDT); 5629 5630 if (HasDst) 5631 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); 5632 } 5633 } 5634 5635 // Add/sub require special handling to deal with carry outs. 5636 bool SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst, 5637 MachineDominatorTree *MDT) const { 5638 if (ST.hasAddNoCarry()) { 5639 // Assume there is no user of scc since we don't select this in that case. 5640 // Since scc isn't used, it doesn't really matter if the i32 or u32 variant 5641 // is used. 5642 5643 MachineBasicBlock &MBB = *Inst.getParent(); 5644 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5645 5646 Register OldDstReg = Inst.getOperand(0).getReg(); 5647 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5648 5649 unsigned Opc = Inst.getOpcode(); 5650 assert(Opc == AMDGPU::S_ADD_I32 || Opc == AMDGPU::S_SUB_I32); 5651 5652 unsigned NewOpc = Opc == AMDGPU::S_ADD_I32 ? 5653 AMDGPU::V_ADD_U32_e64 : AMDGPU::V_SUB_U32_e64; 5654 5655 assert(Inst.getOperand(3).getReg() == AMDGPU::SCC); 5656 Inst.RemoveOperand(3); 5657 5658 Inst.setDesc(get(NewOpc)); 5659 Inst.addOperand(MachineOperand::CreateImm(0)); // clamp bit 5660 Inst.addImplicitDefUseOperands(*MBB.getParent()); 5661 MRI.replaceRegWith(OldDstReg, ResultReg); 5662 legalizeOperands(Inst, MDT); 5663 5664 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5665 return true; 5666 } 5667 5668 return false; 5669 } 5670 5671 void SIInstrInfo::lowerSelect(SetVectorType &Worklist, MachineInstr &Inst, 5672 MachineDominatorTree *MDT) const { 5673 5674 MachineBasicBlock &MBB = *Inst.getParent(); 5675 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5676 MachineBasicBlock::iterator MII = Inst; 5677 DebugLoc DL = Inst.getDebugLoc(); 5678 5679 MachineOperand &Dest = Inst.getOperand(0); 5680 MachineOperand &Src0 = Inst.getOperand(1); 5681 MachineOperand &Src1 = Inst.getOperand(2); 5682 MachineOperand &Cond = Inst.getOperand(3); 5683 5684 Register SCCSource = Cond.getReg(); 5685 // Find SCC def, and if that is a copy (SCC = COPY reg) then use reg instead. 5686 if (!Cond.isUndef()) { 5687 for (MachineInstr &CandI : 5688 make_range(std::next(MachineBasicBlock::reverse_iterator(Inst)), 5689 Inst.getParent()->rend())) { 5690 if (CandI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != 5691 -1) { 5692 if (CandI.isCopy() && CandI.getOperand(0).getReg() == AMDGPU::SCC) { 5693 SCCSource = CandI.getOperand(1).getReg(); 5694 } 5695 break; 5696 } 5697 } 5698 } 5699 5700 // If this is a trivial select where the condition is effectively not SCC 5701 // (SCCSource is a source of copy to SCC), then the select is semantically 5702 // equivalent to copying SCCSource. Hence, there is no need to create 5703 // V_CNDMASK, we can just use that and bail out. 5704 if ((SCCSource != AMDGPU::SCC) && Src0.isImm() && (Src0.getImm() == -1) && 5705 Src1.isImm() && (Src1.getImm() == 0)) { 5706 MRI.replaceRegWith(Dest.getReg(), SCCSource); 5707 return; 5708 } 5709 5710 const TargetRegisterClass *TC = ST.getWavefrontSize() == 64 5711 ? &AMDGPU::SReg_64_XEXECRegClass 5712 : &AMDGPU::SReg_32_XM0_XEXECRegClass; 5713 Register CopySCC = MRI.createVirtualRegister(TC); 5714 5715 if (SCCSource == AMDGPU::SCC) { 5716 // Insert a trivial select instead of creating a copy, because a copy from 5717 // SCC would semantically mean just copying a single bit, but we may need 5718 // the result to be a vector condition mask that needs preserving. 5719 unsigned Opcode = (ST.getWavefrontSize() == 64) ? AMDGPU::S_CSELECT_B64 5720 : AMDGPU::S_CSELECT_B32; 5721 auto NewSelect = 5722 BuildMI(MBB, MII, DL, get(Opcode), CopySCC).addImm(-1).addImm(0); 5723 NewSelect->getOperand(3).setIsUndef(Cond.isUndef()); 5724 } else { 5725 BuildMI(MBB, MII, DL, get(AMDGPU::COPY), CopySCC).addReg(SCCSource); 5726 } 5727 5728 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5729 5730 auto UpdatedInst = 5731 BuildMI(MBB, MII, DL, get(AMDGPU::V_CNDMASK_B32_e64), ResultReg) 5732 .addImm(0) 5733 .add(Src1) // False 5734 .addImm(0) 5735 .add(Src0) // True 5736 .addReg(CopySCC); 5737 5738 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5739 legalizeOperands(*UpdatedInst, MDT); 5740 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5741 } 5742 5743 void SIInstrInfo::lowerScalarAbs(SetVectorType &Worklist, 5744 MachineInstr &Inst) const { 5745 MachineBasicBlock &MBB = *Inst.getParent(); 5746 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5747 MachineBasicBlock::iterator MII = Inst; 5748 DebugLoc DL = Inst.getDebugLoc(); 5749 5750 MachineOperand &Dest = Inst.getOperand(0); 5751 MachineOperand &Src = Inst.getOperand(1); 5752 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5753 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5754 5755 unsigned SubOp = ST.hasAddNoCarry() ? 5756 AMDGPU::V_SUB_U32_e32 : AMDGPU::V_SUB_CO_U32_e32; 5757 5758 BuildMI(MBB, MII, DL, get(SubOp), TmpReg) 5759 .addImm(0) 5760 .addReg(Src.getReg()); 5761 5762 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg) 5763 .addReg(Src.getReg()) 5764 .addReg(TmpReg); 5765 5766 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5767 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5768 } 5769 5770 void SIInstrInfo::lowerScalarXnor(SetVectorType &Worklist, 5771 MachineInstr &Inst) const { 5772 MachineBasicBlock &MBB = *Inst.getParent(); 5773 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5774 MachineBasicBlock::iterator MII = Inst; 5775 const DebugLoc &DL = Inst.getDebugLoc(); 5776 5777 MachineOperand &Dest = Inst.getOperand(0); 5778 MachineOperand &Src0 = Inst.getOperand(1); 5779 MachineOperand &Src1 = Inst.getOperand(2); 5780 5781 if (ST.hasDLInsts()) { 5782 Register NewDest = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5783 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src0, MRI, DL); 5784 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src1, MRI, DL); 5785 5786 BuildMI(MBB, MII, DL, get(AMDGPU::V_XNOR_B32_e64), NewDest) 5787 .add(Src0) 5788 .add(Src1); 5789 5790 MRI.replaceRegWith(Dest.getReg(), NewDest); 5791 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5792 } else { 5793 // Using the identity !(x ^ y) == (!x ^ y) == (x ^ !y), we can 5794 // invert either source and then perform the XOR. If either source is a 5795 // scalar register, then we can leave the inversion on the scalar unit to 5796 // acheive a better distrubution of scalar and vector instructions. 5797 bool Src0IsSGPR = Src0.isReg() && 5798 RI.isSGPRClass(MRI.getRegClass(Src0.getReg())); 5799 bool Src1IsSGPR = Src1.isReg() && 5800 RI.isSGPRClass(MRI.getRegClass(Src1.getReg())); 5801 MachineInstr *Xor; 5802 Register Temp = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5803 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5804 5805 // Build a pair of scalar instructions and add them to the work list. 5806 // The next iteration over the work list will lower these to the vector 5807 // unit as necessary. 5808 if (Src0IsSGPR) { 5809 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src0); 5810 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 5811 .addReg(Temp) 5812 .add(Src1); 5813 } else if (Src1IsSGPR) { 5814 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src1); 5815 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 5816 .add(Src0) 5817 .addReg(Temp); 5818 } else { 5819 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), Temp) 5820 .add(Src0) 5821 .add(Src1); 5822 MachineInstr *Not = 5823 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest).addReg(Temp); 5824 Worklist.insert(Not); 5825 } 5826 5827 MRI.replaceRegWith(Dest.getReg(), NewDest); 5828 5829 Worklist.insert(Xor); 5830 5831 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5832 } 5833 } 5834 5835 void SIInstrInfo::splitScalarNotBinop(SetVectorType &Worklist, 5836 MachineInstr &Inst, 5837 unsigned Opcode) const { 5838 MachineBasicBlock &MBB = *Inst.getParent(); 5839 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5840 MachineBasicBlock::iterator MII = Inst; 5841 const DebugLoc &DL = Inst.getDebugLoc(); 5842 5843 MachineOperand &Dest = Inst.getOperand(0); 5844 MachineOperand &Src0 = Inst.getOperand(1); 5845 MachineOperand &Src1 = Inst.getOperand(2); 5846 5847 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5848 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5849 5850 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), Interm) 5851 .add(Src0) 5852 .add(Src1); 5853 5854 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest) 5855 .addReg(Interm); 5856 5857 Worklist.insert(&Op); 5858 Worklist.insert(&Not); 5859 5860 MRI.replaceRegWith(Dest.getReg(), NewDest); 5861 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5862 } 5863 5864 void SIInstrInfo::splitScalarBinOpN2(SetVectorType& Worklist, 5865 MachineInstr &Inst, 5866 unsigned Opcode) const { 5867 MachineBasicBlock &MBB = *Inst.getParent(); 5868 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5869 MachineBasicBlock::iterator MII = Inst; 5870 const DebugLoc &DL = Inst.getDebugLoc(); 5871 5872 MachineOperand &Dest = Inst.getOperand(0); 5873 MachineOperand &Src0 = Inst.getOperand(1); 5874 MachineOperand &Src1 = Inst.getOperand(2); 5875 5876 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5877 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5878 5879 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Interm) 5880 .add(Src1); 5881 5882 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), NewDest) 5883 .add(Src0) 5884 .addReg(Interm); 5885 5886 Worklist.insert(&Not); 5887 Worklist.insert(&Op); 5888 5889 MRI.replaceRegWith(Dest.getReg(), NewDest); 5890 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5891 } 5892 5893 void SIInstrInfo::splitScalar64BitUnaryOp( 5894 SetVectorType &Worklist, MachineInstr &Inst, 5895 unsigned Opcode) const { 5896 MachineBasicBlock &MBB = *Inst.getParent(); 5897 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5898 5899 MachineOperand &Dest = Inst.getOperand(0); 5900 MachineOperand &Src0 = Inst.getOperand(1); 5901 DebugLoc DL = Inst.getDebugLoc(); 5902 5903 MachineBasicBlock::iterator MII = Inst; 5904 5905 const MCInstrDesc &InstDesc = get(Opcode); 5906 const TargetRegisterClass *Src0RC = Src0.isReg() ? 5907 MRI.getRegClass(Src0.getReg()) : 5908 &AMDGPU::SGPR_32RegClass; 5909 5910 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 5911 5912 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5913 AMDGPU::sub0, Src0SubRC); 5914 5915 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 5916 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 5917 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 5918 5919 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 5920 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0); 5921 5922 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5923 AMDGPU::sub1, Src0SubRC); 5924 5925 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 5926 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1); 5927 5928 Register FullDestReg = MRI.createVirtualRegister(NewDestRC); 5929 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 5930 .addReg(DestSub0) 5931 .addImm(AMDGPU::sub0) 5932 .addReg(DestSub1) 5933 .addImm(AMDGPU::sub1); 5934 5935 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 5936 5937 Worklist.insert(&LoHalf); 5938 Worklist.insert(&HiHalf); 5939 5940 // We don't need to legalizeOperands here because for a single operand, src0 5941 // will support any kind of input. 5942 5943 // Move all users of this moved value. 5944 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 5945 } 5946 5947 void SIInstrInfo::splitScalar64BitAddSub(SetVectorType &Worklist, 5948 MachineInstr &Inst, 5949 MachineDominatorTree *MDT) const { 5950 bool IsAdd = (Inst.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); 5951 5952 MachineBasicBlock &MBB = *Inst.getParent(); 5953 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5954 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5955 5956 Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5957 Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5958 Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5959 5960 Register CarryReg = MRI.createVirtualRegister(CarryRC); 5961 Register DeadCarryReg = MRI.createVirtualRegister(CarryRC); 5962 5963 MachineOperand &Dest = Inst.getOperand(0); 5964 MachineOperand &Src0 = Inst.getOperand(1); 5965 MachineOperand &Src1 = Inst.getOperand(2); 5966 const DebugLoc &DL = Inst.getDebugLoc(); 5967 MachineBasicBlock::iterator MII = Inst; 5968 5969 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg()); 5970 const TargetRegisterClass *Src1RC = MRI.getRegClass(Src1.getReg()); 5971 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 5972 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 5973 5974 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5975 AMDGPU::sub0, Src0SubRC); 5976 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 5977 AMDGPU::sub0, Src1SubRC); 5978 5979 5980 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5981 AMDGPU::sub1, Src0SubRC); 5982 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 5983 AMDGPU::sub1, Src1SubRC); 5984 5985 unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64; 5986 MachineInstr *LoHalf = 5987 BuildMI(MBB, MII, DL, get(LoOpc), DestSub0) 5988 .addReg(CarryReg, RegState::Define) 5989 .add(SrcReg0Sub0) 5990 .add(SrcReg1Sub0) 5991 .addImm(0); // clamp bit 5992 5993 unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; 5994 MachineInstr *HiHalf = 5995 BuildMI(MBB, MII, DL, get(HiOpc), DestSub1) 5996 .addReg(DeadCarryReg, RegState::Define | RegState::Dead) 5997 .add(SrcReg0Sub1) 5998 .add(SrcReg1Sub1) 5999 .addReg(CarryReg, RegState::Kill) 6000 .addImm(0); // clamp bit 6001 6002 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 6003 .addReg(DestSub0) 6004 .addImm(AMDGPU::sub0) 6005 .addReg(DestSub1) 6006 .addImm(AMDGPU::sub1); 6007 6008 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 6009 6010 // Try to legalize the operands in case we need to swap the order to keep it 6011 // valid. 6012 legalizeOperands(*LoHalf, MDT); 6013 legalizeOperands(*HiHalf, MDT); 6014 6015 // Move all users of this moved vlaue. 6016 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 6017 } 6018 6019 void SIInstrInfo::splitScalar64BitBinaryOp(SetVectorType &Worklist, 6020 MachineInstr &Inst, unsigned Opcode, 6021 MachineDominatorTree *MDT) const { 6022 MachineBasicBlock &MBB = *Inst.getParent(); 6023 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6024 6025 MachineOperand &Dest = Inst.getOperand(0); 6026 MachineOperand &Src0 = Inst.getOperand(1); 6027 MachineOperand &Src1 = Inst.getOperand(2); 6028 DebugLoc DL = Inst.getDebugLoc(); 6029 6030 MachineBasicBlock::iterator MII = Inst; 6031 6032 const MCInstrDesc &InstDesc = get(Opcode); 6033 const TargetRegisterClass *Src0RC = Src0.isReg() ? 6034 MRI.getRegClass(Src0.getReg()) : 6035 &AMDGPU::SGPR_32RegClass; 6036 6037 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 6038 const TargetRegisterClass *Src1RC = Src1.isReg() ? 6039 MRI.getRegClass(Src1.getReg()) : 6040 &AMDGPU::SGPR_32RegClass; 6041 6042 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 6043 6044 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6045 AMDGPU::sub0, Src0SubRC); 6046 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 6047 AMDGPU::sub0, Src1SubRC); 6048 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6049 AMDGPU::sub1, Src0SubRC); 6050 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 6051 AMDGPU::sub1, Src1SubRC); 6052 6053 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 6054 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 6055 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 6056 6057 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 6058 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0) 6059 .add(SrcReg0Sub0) 6060 .add(SrcReg1Sub0); 6061 6062 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 6063 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1) 6064 .add(SrcReg0Sub1) 6065 .add(SrcReg1Sub1); 6066 6067 Register FullDestReg = MRI.createVirtualRegister(NewDestRC); 6068 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 6069 .addReg(DestSub0) 6070 .addImm(AMDGPU::sub0) 6071 .addReg(DestSub1) 6072 .addImm(AMDGPU::sub1); 6073 6074 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 6075 6076 Worklist.insert(&LoHalf); 6077 Worklist.insert(&HiHalf); 6078 6079 // Move all users of this moved vlaue. 6080 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 6081 } 6082 6083 void SIInstrInfo::splitScalar64BitXnor(SetVectorType &Worklist, 6084 MachineInstr &Inst, 6085 MachineDominatorTree *MDT) const { 6086 MachineBasicBlock &MBB = *Inst.getParent(); 6087 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6088 6089 MachineOperand &Dest = Inst.getOperand(0); 6090 MachineOperand &Src0 = Inst.getOperand(1); 6091 MachineOperand &Src1 = Inst.getOperand(2); 6092 const DebugLoc &DL = Inst.getDebugLoc(); 6093 6094 MachineBasicBlock::iterator MII = Inst; 6095 6096 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 6097 6098 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 6099 6100 MachineOperand* Op0; 6101 MachineOperand* Op1; 6102 6103 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) { 6104 Op0 = &Src0; 6105 Op1 = &Src1; 6106 } else { 6107 Op0 = &Src1; 6108 Op1 = &Src0; 6109 } 6110 6111 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B64), Interm) 6112 .add(*Op0); 6113 6114 Register NewDest = MRI.createVirtualRegister(DestRC); 6115 6116 MachineInstr &Xor = *BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B64), NewDest) 6117 .addReg(Interm) 6118 .add(*Op1); 6119 6120 MRI.replaceRegWith(Dest.getReg(), NewDest); 6121 6122 Worklist.insert(&Xor); 6123 } 6124 6125 void SIInstrInfo::splitScalar64BitBCNT( 6126 SetVectorType &Worklist, MachineInstr &Inst) const { 6127 MachineBasicBlock &MBB = *Inst.getParent(); 6128 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6129 6130 MachineBasicBlock::iterator MII = Inst; 6131 const DebugLoc &DL = Inst.getDebugLoc(); 6132 6133 MachineOperand &Dest = Inst.getOperand(0); 6134 MachineOperand &Src = Inst.getOperand(1); 6135 6136 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64); 6137 const TargetRegisterClass *SrcRC = Src.isReg() ? 6138 MRI.getRegClass(Src.getReg()) : 6139 &AMDGPU::SGPR_32RegClass; 6140 6141 Register MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6142 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6143 6144 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); 6145 6146 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 6147 AMDGPU::sub0, SrcSubRC); 6148 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 6149 AMDGPU::sub1, SrcSubRC); 6150 6151 BuildMI(MBB, MII, DL, InstDesc, MidReg).add(SrcRegSub0).addImm(0); 6152 6153 BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg); 6154 6155 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6156 6157 // We don't need to legalize operands here. src0 for etiher instruction can be 6158 // an SGPR, and the second input is unused or determined here. 6159 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6160 } 6161 6162 void SIInstrInfo::splitScalar64BitBFE(SetVectorType &Worklist, 6163 MachineInstr &Inst) const { 6164 MachineBasicBlock &MBB = *Inst.getParent(); 6165 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6166 MachineBasicBlock::iterator MII = Inst; 6167 const DebugLoc &DL = Inst.getDebugLoc(); 6168 6169 MachineOperand &Dest = Inst.getOperand(0); 6170 uint32_t Imm = Inst.getOperand(2).getImm(); 6171 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 6172 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 6173 6174 (void) Offset; 6175 6176 // Only sext_inreg cases handled. 6177 assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 && 6178 Offset == 0 && "Not implemented"); 6179 6180 if (BitWidth < 32) { 6181 Register MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6182 Register MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6183 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 6184 6185 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo) 6186 .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0) 6187 .addImm(0) 6188 .addImm(BitWidth); 6189 6190 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi) 6191 .addImm(31) 6192 .addReg(MidRegLo); 6193 6194 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 6195 .addReg(MidRegLo) 6196 .addImm(AMDGPU::sub0) 6197 .addReg(MidRegHi) 6198 .addImm(AMDGPU::sub1); 6199 6200 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6201 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6202 return; 6203 } 6204 6205 MachineOperand &Src = Inst.getOperand(1); 6206 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6207 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 6208 6209 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg) 6210 .addImm(31) 6211 .addReg(Src.getReg(), 0, AMDGPU::sub0); 6212 6213 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 6214 .addReg(Src.getReg(), 0, AMDGPU::sub0) 6215 .addImm(AMDGPU::sub0) 6216 .addReg(TmpReg) 6217 .addImm(AMDGPU::sub1); 6218 6219 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6220 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6221 } 6222 6223 void SIInstrInfo::addUsersToMoveToVALUWorklist( 6224 Register DstReg, 6225 MachineRegisterInfo &MRI, 6226 SetVectorType &Worklist) const { 6227 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg), 6228 E = MRI.use_end(); I != E;) { 6229 MachineInstr &UseMI = *I->getParent(); 6230 6231 unsigned OpNo = 0; 6232 6233 switch (UseMI.getOpcode()) { 6234 case AMDGPU::COPY: 6235 case AMDGPU::WQM: 6236 case AMDGPU::SOFT_WQM: 6237 case AMDGPU::WWM: 6238 case AMDGPU::REG_SEQUENCE: 6239 case AMDGPU::PHI: 6240 case AMDGPU::INSERT_SUBREG: 6241 break; 6242 default: 6243 OpNo = I.getOperandNo(); 6244 break; 6245 } 6246 6247 if (!RI.hasVectorRegisters(getOpRegClass(UseMI, OpNo))) { 6248 Worklist.insert(&UseMI); 6249 6250 do { 6251 ++I; 6252 } while (I != E && I->getParent() == &UseMI); 6253 } else { 6254 ++I; 6255 } 6256 } 6257 } 6258 6259 void SIInstrInfo::movePackToVALU(SetVectorType &Worklist, 6260 MachineRegisterInfo &MRI, 6261 MachineInstr &Inst) const { 6262 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6263 MachineBasicBlock *MBB = Inst.getParent(); 6264 MachineOperand &Src0 = Inst.getOperand(1); 6265 MachineOperand &Src1 = Inst.getOperand(2); 6266 const DebugLoc &DL = Inst.getDebugLoc(); 6267 6268 switch (Inst.getOpcode()) { 6269 case AMDGPU::S_PACK_LL_B32_B16: { 6270 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6271 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6272 6273 // FIXME: Can do a lot better if we know the high bits of src0 or src1 are 6274 // 0. 6275 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 6276 .addImm(0xffff); 6277 6278 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_B32_e64), TmpReg) 6279 .addReg(ImmReg, RegState::Kill) 6280 .add(Src0); 6281 6282 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHL_OR_B32), ResultReg) 6283 .add(Src1) 6284 .addImm(16) 6285 .addReg(TmpReg, RegState::Kill); 6286 break; 6287 } 6288 case AMDGPU::S_PACK_LH_B32_B16: { 6289 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6290 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 6291 .addImm(0xffff); 6292 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_BFI_B32), ResultReg) 6293 .addReg(ImmReg, RegState::Kill) 6294 .add(Src0) 6295 .add(Src1); 6296 break; 6297 } 6298 case AMDGPU::S_PACK_HH_B32_B16: { 6299 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6300 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6301 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg) 6302 .addImm(16) 6303 .add(Src0); 6304 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 6305 .addImm(0xffff0000); 6306 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_OR_B32), ResultReg) 6307 .add(Src1) 6308 .addReg(ImmReg, RegState::Kill) 6309 .addReg(TmpReg, RegState::Kill); 6310 break; 6311 } 6312 default: 6313 llvm_unreachable("unhandled s_pack_* instruction"); 6314 } 6315 6316 MachineOperand &Dest = Inst.getOperand(0); 6317 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6318 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6319 } 6320 6321 void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op, 6322 MachineInstr &SCCDefInst, 6323 SetVectorType &Worklist) const { 6324 bool SCCUsedImplicitly = false; 6325 6326 // Ensure that def inst defines SCC, which is still live. 6327 assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isDef() && 6328 !Op.isDead() && Op.getParent() == &SCCDefInst); 6329 SmallVector<MachineInstr *, 4> CopyToDelete; 6330 // This assumes that all the users of SCC are in the same block 6331 // as the SCC def. 6332 for (MachineInstr &MI : // Skip the def inst itself. 6333 make_range(std::next(MachineBasicBlock::iterator(SCCDefInst)), 6334 SCCDefInst.getParent()->end())) { 6335 // Check if SCC is used first. 6336 if (MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI) != -1) { 6337 if (MI.isCopy()) { 6338 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 6339 Register DestReg = MI.getOperand(0).getReg(); 6340 6341 for (auto &User : MRI.use_nodbg_instructions(DestReg)) { 6342 if ((User.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) || 6343 (User.getOpcode() == AMDGPU::S_SUB_CO_PSEUDO)) { 6344 User.getOperand(4).setReg(RI.getVCC()); 6345 Worklist.insert(&User); 6346 } else if (User.getOpcode() == AMDGPU::V_CNDMASK_B32_e64) { 6347 User.getOperand(5).setReg(RI.getVCC()); 6348 // No need to add to Worklist. 6349 } 6350 } 6351 CopyToDelete.push_back(&MI); 6352 } else { 6353 if (MI.getOpcode() == AMDGPU::S_CSELECT_B32 || 6354 MI.getOpcode() == AMDGPU::S_CSELECT_B64) { 6355 // This is an implicit use of SCC and it is really expected by 6356 // the SCC users to handle. 6357 // We cannot preserve the edge to the user so add the explicit 6358 // copy: SCC = COPY VCC. 6359 // The copy will be cleaned up during the processing of the user 6360 // in lowerSelect. 6361 SCCUsedImplicitly = true; 6362 } 6363 6364 Worklist.insert(&MI); 6365 } 6366 } 6367 // Exit if we find another SCC def. 6368 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != -1) 6369 break; 6370 } 6371 for (auto &Copy : CopyToDelete) 6372 Copy->eraseFromParent(); 6373 6374 if (SCCUsedImplicitly) { 6375 BuildMI(*SCCDefInst.getParent(), std::next(SCCDefInst.getIterator()), 6376 SCCDefInst.getDebugLoc(), get(AMDGPU::COPY), AMDGPU::SCC) 6377 .addReg(RI.getVCC()); 6378 } 6379 } 6380 6381 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass( 6382 const MachineInstr &Inst) const { 6383 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0); 6384 6385 switch (Inst.getOpcode()) { 6386 // For target instructions, getOpRegClass just returns the virtual register 6387 // class associated with the operand, so we need to find an equivalent VGPR 6388 // register class in order to move the instruction to the VALU. 6389 case AMDGPU::COPY: 6390 case AMDGPU::PHI: 6391 case AMDGPU::REG_SEQUENCE: 6392 case AMDGPU::INSERT_SUBREG: 6393 case AMDGPU::WQM: 6394 case AMDGPU::SOFT_WQM: 6395 case AMDGPU::WWM: { 6396 const TargetRegisterClass *SrcRC = getOpRegClass(Inst, 1); 6397 if (RI.hasAGPRs(SrcRC)) { 6398 if (RI.hasAGPRs(NewDstRC)) 6399 return nullptr; 6400 6401 switch (Inst.getOpcode()) { 6402 case AMDGPU::PHI: 6403 case AMDGPU::REG_SEQUENCE: 6404 case AMDGPU::INSERT_SUBREG: 6405 NewDstRC = RI.getEquivalentAGPRClass(NewDstRC); 6406 break; 6407 default: 6408 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 6409 } 6410 6411 if (!NewDstRC) 6412 return nullptr; 6413 } else { 6414 if (RI.hasVGPRs(NewDstRC) || NewDstRC == &AMDGPU::VReg_1RegClass) 6415 return nullptr; 6416 6417 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 6418 if (!NewDstRC) 6419 return nullptr; 6420 } 6421 6422 return NewDstRC; 6423 } 6424 default: 6425 return NewDstRC; 6426 } 6427 } 6428 6429 // Find the one SGPR operand we are allowed to use. 6430 Register SIInstrInfo::findUsedSGPR(const MachineInstr &MI, 6431 int OpIndices[3]) const { 6432 const MCInstrDesc &Desc = MI.getDesc(); 6433 6434 // Find the one SGPR operand we are allowed to use. 6435 // 6436 // First we need to consider the instruction's operand requirements before 6437 // legalizing. Some operands are required to be SGPRs, such as implicit uses 6438 // of VCC, but we are still bound by the constant bus requirement to only use 6439 // one. 6440 // 6441 // If the operand's class is an SGPR, we can never move it. 6442 6443 Register SGPRReg = findImplicitSGPRRead(MI); 6444 if (SGPRReg != AMDGPU::NoRegister) 6445 return SGPRReg; 6446 6447 Register UsedSGPRs[3] = { AMDGPU::NoRegister }; 6448 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 6449 6450 for (unsigned i = 0; i < 3; ++i) { 6451 int Idx = OpIndices[i]; 6452 if (Idx == -1) 6453 break; 6454 6455 const MachineOperand &MO = MI.getOperand(Idx); 6456 if (!MO.isReg()) 6457 continue; 6458 6459 // Is this operand statically required to be an SGPR based on the operand 6460 // constraints? 6461 const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); 6462 bool IsRequiredSGPR = RI.isSGPRClass(OpRC); 6463 if (IsRequiredSGPR) 6464 return MO.getReg(); 6465 6466 // If this could be a VGPR or an SGPR, Check the dynamic register class. 6467 Register Reg = MO.getReg(); 6468 const TargetRegisterClass *RegRC = MRI.getRegClass(Reg); 6469 if (RI.isSGPRClass(RegRC)) 6470 UsedSGPRs[i] = Reg; 6471 } 6472 6473 // We don't have a required SGPR operand, so we have a bit more freedom in 6474 // selecting operands to move. 6475 6476 // Try to select the most used SGPR. If an SGPR is equal to one of the 6477 // others, we choose that. 6478 // 6479 // e.g. 6480 // V_FMA_F32 v0, s0, s0, s0 -> No moves 6481 // V_FMA_F32 v0, s0, s1, s0 -> Move s1 6482 6483 // TODO: If some of the operands are 64-bit SGPRs and some 32, we should 6484 // prefer those. 6485 6486 if (UsedSGPRs[0] != AMDGPU::NoRegister) { 6487 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2]) 6488 SGPRReg = UsedSGPRs[0]; 6489 } 6490 6491 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) { 6492 if (UsedSGPRs[1] == UsedSGPRs[2]) 6493 SGPRReg = UsedSGPRs[1]; 6494 } 6495 6496 return SGPRReg; 6497 } 6498 6499 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI, 6500 unsigned OperandName) const { 6501 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName); 6502 if (Idx == -1) 6503 return nullptr; 6504 6505 return &MI.getOperand(Idx); 6506 } 6507 6508 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const { 6509 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 6510 return (22ULL << 44) | // IMG_FORMAT_32_FLOAT 6511 (1ULL << 56) | // RESOURCE_LEVEL = 1 6512 (3ULL << 60); // OOB_SELECT = 3 6513 } 6514 6515 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT; 6516 if (ST.isAmdHsaOS()) { 6517 // Set ATC = 1. GFX9 doesn't have this bit. 6518 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) 6519 RsrcDataFormat |= (1ULL << 56); 6520 6521 // Set MTYPE = 2 (MTYPE_UC = uncached). GFX9 doesn't have this. 6522 // BTW, it disables TC L2 and therefore decreases performance. 6523 if (ST.getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS) 6524 RsrcDataFormat |= (2ULL << 59); 6525 } 6526 6527 return RsrcDataFormat; 6528 } 6529 6530 uint64_t SIInstrInfo::getScratchRsrcWords23() const { 6531 uint64_t Rsrc23 = getDefaultRsrcDataFormat() | 6532 AMDGPU::RSRC_TID_ENABLE | 6533 0xffffffff; // Size; 6534 6535 // GFX9 doesn't have ELEMENT_SIZE. 6536 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 6537 uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize()) - 1; 6538 Rsrc23 |= EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT; 6539 } 6540 6541 // IndexStride = 64 / 32. 6542 uint64_t IndexStride = ST.getWavefrontSize() == 64 ? 3 : 2; 6543 Rsrc23 |= IndexStride << AMDGPU::RSRC_INDEX_STRIDE_SHIFT; 6544 6545 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17]. 6546 // Clear them unless we want a huge stride. 6547 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS && 6548 ST.getGeneration() <= AMDGPUSubtarget::GFX9) 6549 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT; 6550 6551 return Rsrc23; 6552 } 6553 6554 bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const { 6555 unsigned Opc = MI.getOpcode(); 6556 6557 return isSMRD(Opc); 6558 } 6559 6560 bool SIInstrInfo::isHighLatencyDef(int Opc) const { 6561 return get(Opc).mayLoad() && 6562 (isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc) || isFLAT(Opc)); 6563 } 6564 6565 unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI, 6566 int &FrameIndex) const { 6567 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 6568 if (!Addr || !Addr->isFI()) 6569 return AMDGPU::NoRegister; 6570 6571 assert(!MI.memoperands_empty() && 6572 (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS); 6573 6574 FrameIndex = Addr->getIndex(); 6575 return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg(); 6576 } 6577 6578 unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI, 6579 int &FrameIndex) const { 6580 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr); 6581 assert(Addr && Addr->isFI()); 6582 FrameIndex = Addr->getIndex(); 6583 return getNamedOperand(MI, AMDGPU::OpName::data)->getReg(); 6584 } 6585 6586 unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 6587 int &FrameIndex) const { 6588 if (!MI.mayLoad()) 6589 return AMDGPU::NoRegister; 6590 6591 if (isMUBUF(MI) || isVGPRSpill(MI)) 6592 return isStackAccess(MI, FrameIndex); 6593 6594 if (isSGPRSpill(MI)) 6595 return isSGPRStackAccess(MI, FrameIndex); 6596 6597 return AMDGPU::NoRegister; 6598 } 6599 6600 unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 6601 int &FrameIndex) const { 6602 if (!MI.mayStore()) 6603 return AMDGPU::NoRegister; 6604 6605 if (isMUBUF(MI) || isVGPRSpill(MI)) 6606 return isStackAccess(MI, FrameIndex); 6607 6608 if (isSGPRSpill(MI)) 6609 return isSGPRStackAccess(MI, FrameIndex); 6610 6611 return AMDGPU::NoRegister; 6612 } 6613 6614 unsigned SIInstrInfo::getInstBundleSize(const MachineInstr &MI) const { 6615 unsigned Size = 0; 6616 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 6617 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 6618 while (++I != E && I->isInsideBundle()) { 6619 assert(!I->isBundle() && "No nested bundle!"); 6620 Size += getInstSizeInBytes(*I); 6621 } 6622 6623 return Size; 6624 } 6625 6626 unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { 6627 unsigned Opc = MI.getOpcode(); 6628 const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc); 6629 unsigned DescSize = Desc.getSize(); 6630 6631 // If we have a definitive size, we can use it. Otherwise we need to inspect 6632 // the operands to know the size. 6633 if (isFixedSize(MI)) { 6634 unsigned Size = DescSize; 6635 6636 // If we hit the buggy offset, an extra nop will be inserted in MC so 6637 // estimate the worst case. 6638 if (MI.isBranch() && ST.hasOffset3fBug()) 6639 Size += 4; 6640 6641 return Size; 6642 } 6643 6644 // 4-byte instructions may have a 32-bit literal encoded after them. Check 6645 // operands that coud ever be literals. 6646 if (isVALU(MI) || isSALU(MI)) { 6647 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 6648 if (Src0Idx == -1) 6649 return DescSize; // No operands. 6650 6651 if (isLiteralConstantLike(MI.getOperand(Src0Idx), Desc.OpInfo[Src0Idx])) 6652 return isVOP3(MI) ? 12 : (DescSize + 4); 6653 6654 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 6655 if (Src1Idx == -1) 6656 return DescSize; 6657 6658 if (isLiteralConstantLike(MI.getOperand(Src1Idx), Desc.OpInfo[Src1Idx])) 6659 return isVOP3(MI) ? 12 : (DescSize + 4); 6660 6661 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 6662 if (Src2Idx == -1) 6663 return DescSize; 6664 6665 if (isLiteralConstantLike(MI.getOperand(Src2Idx), Desc.OpInfo[Src2Idx])) 6666 return isVOP3(MI) ? 12 : (DescSize + 4); 6667 6668 return DescSize; 6669 } 6670 6671 // Check whether we have extra NSA words. 6672 if (isMIMG(MI)) { 6673 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); 6674 if (VAddr0Idx < 0) 6675 return 8; 6676 6677 int RSrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); 6678 return 8 + 4 * ((RSrcIdx - VAddr0Idx + 2) / 4); 6679 } 6680 6681 switch (Opc) { 6682 case TargetOpcode::IMPLICIT_DEF: 6683 case TargetOpcode::KILL: 6684 case TargetOpcode::DBG_VALUE: 6685 case TargetOpcode::EH_LABEL: 6686 return 0; 6687 case TargetOpcode::BUNDLE: 6688 return getInstBundleSize(MI); 6689 case TargetOpcode::INLINEASM: 6690 case TargetOpcode::INLINEASM_BR: { 6691 const MachineFunction *MF = MI.getParent()->getParent(); 6692 const char *AsmStr = MI.getOperand(0).getSymbolName(); 6693 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo(), &ST); 6694 } 6695 default: 6696 return DescSize; 6697 } 6698 } 6699 6700 bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const { 6701 if (!isFLAT(MI)) 6702 return false; 6703 6704 if (MI.memoperands_empty()) 6705 return true; 6706 6707 for (const MachineMemOperand *MMO : MI.memoperands()) { 6708 if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS) 6709 return true; 6710 } 6711 return false; 6712 } 6713 6714 bool SIInstrInfo::isNonUniformBranchInstr(MachineInstr &Branch) const { 6715 return Branch.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO; 6716 } 6717 6718 void SIInstrInfo::convertNonUniformIfRegion(MachineBasicBlock *IfEntry, 6719 MachineBasicBlock *IfEnd) const { 6720 MachineBasicBlock::iterator TI = IfEntry->getFirstTerminator(); 6721 assert(TI != IfEntry->end()); 6722 6723 MachineInstr *Branch = &(*TI); 6724 MachineFunction *MF = IfEntry->getParent(); 6725 MachineRegisterInfo &MRI = IfEntry->getParent()->getRegInfo(); 6726 6727 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 6728 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 6729 MachineInstr *SIIF = 6730 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_IF), DstReg) 6731 .add(Branch->getOperand(0)) 6732 .add(Branch->getOperand(1)); 6733 MachineInstr *SIEND = 6734 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_END_CF)) 6735 .addReg(DstReg); 6736 6737 IfEntry->erase(TI); 6738 IfEntry->insert(IfEntry->end(), SIIF); 6739 IfEnd->insert(IfEnd->getFirstNonPHI(), SIEND); 6740 } 6741 } 6742 6743 void SIInstrInfo::convertNonUniformLoopRegion( 6744 MachineBasicBlock *LoopEntry, MachineBasicBlock *LoopEnd) const { 6745 MachineBasicBlock::iterator TI = LoopEnd->getFirstTerminator(); 6746 // We expect 2 terminators, one conditional and one unconditional. 6747 assert(TI != LoopEnd->end()); 6748 6749 MachineInstr *Branch = &(*TI); 6750 MachineFunction *MF = LoopEnd->getParent(); 6751 MachineRegisterInfo &MRI = LoopEnd->getParent()->getRegInfo(); 6752 6753 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 6754 6755 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 6756 Register BackEdgeReg = MRI.createVirtualRegister(RI.getBoolRC()); 6757 MachineInstrBuilder HeaderPHIBuilder = 6758 BuildMI(*(MF), Branch->getDebugLoc(), get(TargetOpcode::PHI), DstReg); 6759 for (MachineBasicBlock::pred_iterator PI = LoopEntry->pred_begin(), 6760 E = LoopEntry->pred_end(); 6761 PI != E; ++PI) { 6762 if (*PI == LoopEnd) { 6763 HeaderPHIBuilder.addReg(BackEdgeReg); 6764 } else { 6765 MachineBasicBlock *PMBB = *PI; 6766 Register ZeroReg = MRI.createVirtualRegister(RI.getBoolRC()); 6767 materializeImmediate(*PMBB, PMBB->getFirstTerminator(), DebugLoc(), 6768 ZeroReg, 0); 6769 HeaderPHIBuilder.addReg(ZeroReg); 6770 } 6771 HeaderPHIBuilder.addMBB(*PI); 6772 } 6773 MachineInstr *HeaderPhi = HeaderPHIBuilder; 6774 MachineInstr *SIIFBREAK = BuildMI(*(MF), Branch->getDebugLoc(), 6775 get(AMDGPU::SI_IF_BREAK), BackEdgeReg) 6776 .addReg(DstReg) 6777 .add(Branch->getOperand(0)); 6778 MachineInstr *SILOOP = 6779 BuildMI(*(MF), Branch->getDebugLoc(), get(AMDGPU::SI_LOOP)) 6780 .addReg(BackEdgeReg) 6781 .addMBB(LoopEntry); 6782 6783 LoopEntry->insert(LoopEntry->begin(), HeaderPhi); 6784 LoopEnd->erase(TI); 6785 LoopEnd->insert(LoopEnd->end(), SIIFBREAK); 6786 LoopEnd->insert(LoopEnd->end(), SILOOP); 6787 } 6788 } 6789 6790 ArrayRef<std::pair<int, const char *>> 6791 SIInstrInfo::getSerializableTargetIndices() const { 6792 static const std::pair<int, const char *> TargetIndices[] = { 6793 {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"}, 6794 {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"}, 6795 {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"}, 6796 {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"}, 6797 {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}}; 6798 return makeArrayRef(TargetIndices); 6799 } 6800 6801 /// This is used by the post-RA scheduler (SchedulePostRAList.cpp). The 6802 /// post-RA version of misched uses CreateTargetMIHazardRecognizer. 6803 ScheduleHazardRecognizer * 6804 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 6805 const ScheduleDAG *DAG) const { 6806 return new GCNHazardRecognizer(DAG->MF); 6807 } 6808 6809 /// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer 6810 /// pass. 6811 ScheduleHazardRecognizer * 6812 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const { 6813 return new GCNHazardRecognizer(MF); 6814 } 6815 6816 std::pair<unsigned, unsigned> 6817 SIInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { 6818 return std::make_pair(TF & MO_MASK, TF & ~MO_MASK); 6819 } 6820 6821 ArrayRef<std::pair<unsigned, const char *>> 6822 SIInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { 6823 static const std::pair<unsigned, const char *> TargetFlags[] = { 6824 { MO_GOTPCREL, "amdgpu-gotprel" }, 6825 { MO_GOTPCREL32_LO, "amdgpu-gotprel32-lo" }, 6826 { MO_GOTPCREL32_HI, "amdgpu-gotprel32-hi" }, 6827 { MO_REL32_LO, "amdgpu-rel32-lo" }, 6828 { MO_REL32_HI, "amdgpu-rel32-hi" }, 6829 { MO_ABS32_LO, "amdgpu-abs32-lo" }, 6830 { MO_ABS32_HI, "amdgpu-abs32-hi" }, 6831 }; 6832 6833 return makeArrayRef(TargetFlags); 6834 } 6835 6836 bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const { 6837 return !MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY && 6838 MI.modifiesRegister(AMDGPU::EXEC, &RI); 6839 } 6840 6841 MachineInstrBuilder 6842 SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 6843 MachineBasicBlock::iterator I, 6844 const DebugLoc &DL, 6845 Register DestReg) const { 6846 if (ST.hasAddNoCarry()) 6847 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg); 6848 6849 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6850 Register UnusedCarry = MRI.createVirtualRegister(RI.getBoolRC()); 6851 MRI.setRegAllocationHint(UnusedCarry, 0, RI.getVCC()); 6852 6853 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_CO_U32_e64), DestReg) 6854 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 6855 } 6856 6857 MachineInstrBuilder SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 6858 MachineBasicBlock::iterator I, 6859 const DebugLoc &DL, 6860 Register DestReg, 6861 RegScavenger &RS) const { 6862 if (ST.hasAddNoCarry()) 6863 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e32), DestReg); 6864 6865 // If available, prefer to use vcc. 6866 Register UnusedCarry = !RS.isRegUsed(AMDGPU::VCC) 6867 ? Register(RI.getVCC()) 6868 : RS.scavengeRegister(RI.getBoolRC(), I, 0, false); 6869 6870 // TODO: Users need to deal with this. 6871 if (!UnusedCarry.isValid()) 6872 return MachineInstrBuilder(); 6873 6874 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_CO_U32_e64), DestReg) 6875 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 6876 } 6877 6878 bool SIInstrInfo::isKillTerminator(unsigned Opcode) { 6879 switch (Opcode) { 6880 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 6881 case AMDGPU::SI_KILL_I1_TERMINATOR: 6882 return true; 6883 default: 6884 return false; 6885 } 6886 } 6887 6888 const MCInstrDesc &SIInstrInfo::getKillTerminatorFromPseudo(unsigned Opcode) const { 6889 switch (Opcode) { 6890 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: 6891 return get(AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR); 6892 case AMDGPU::SI_KILL_I1_PSEUDO: 6893 return get(AMDGPU::SI_KILL_I1_TERMINATOR); 6894 default: 6895 llvm_unreachable("invalid opcode, expected SI_KILL_*_PSEUDO"); 6896 } 6897 } 6898 6899 void SIInstrInfo::fixImplicitOperands(MachineInstr &MI) const { 6900 if (!ST.isWave32()) 6901 return; 6902 6903 for (auto &Op : MI.implicit_operands()) { 6904 if (Op.isReg() && Op.getReg() == AMDGPU::VCC) 6905 Op.setReg(AMDGPU::VCC_LO); 6906 } 6907 } 6908 6909 bool SIInstrInfo::isBufferSMRD(const MachineInstr &MI) const { 6910 if (!isSMRD(MI)) 6911 return false; 6912 6913 // Check that it is using a buffer resource. 6914 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sbase); 6915 if (Idx == -1) // e.g. s_memtime 6916 return false; 6917 6918 const auto RCID = MI.getDesc().OpInfo[Idx].RegClass; 6919 return RI.getRegClass(RCID)->hasSubClassEq(&AMDGPU::SGPR_128RegClass); 6920 } 6921 6922 unsigned SIInstrInfo::getNumFlatOffsetBits(bool Signed) const { 6923 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) 6924 return Signed ? 12 : 11; 6925 6926 return Signed ? 13 : 12; 6927 } 6928 6929 bool SIInstrInfo::isLegalFLATOffset(int64_t Offset, unsigned AddrSpace, 6930 bool Signed) const { 6931 // TODO: Should 0 be special cased? 6932 if (!ST.hasFlatInstOffsets()) 6933 return false; 6934 6935 if (ST.hasFlatSegmentOffsetBug() && AddrSpace == AMDGPUAS::FLAT_ADDRESS) 6936 return false; 6937 6938 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) 6939 return Signed ? isInt<12>(Offset) : isUInt<11>(Offset); 6940 6941 return Signed ? isInt<13>(Offset) :isUInt<12>(Offset); 6942 } 6943 6944 6945 // This must be kept in sync with the SIEncodingFamily class in SIInstrInfo.td 6946 enum SIEncodingFamily { 6947 SI = 0, 6948 VI = 1, 6949 SDWA = 2, 6950 SDWA9 = 3, 6951 GFX80 = 4, 6952 GFX9 = 5, 6953 GFX10 = 6, 6954 SDWA10 = 7 6955 }; 6956 6957 static SIEncodingFamily subtargetEncodingFamily(const GCNSubtarget &ST) { 6958 switch (ST.getGeneration()) { 6959 default: 6960 break; 6961 case AMDGPUSubtarget::SOUTHERN_ISLANDS: 6962 case AMDGPUSubtarget::SEA_ISLANDS: 6963 return SIEncodingFamily::SI; 6964 case AMDGPUSubtarget::VOLCANIC_ISLANDS: 6965 case AMDGPUSubtarget::GFX9: 6966 return SIEncodingFamily::VI; 6967 case AMDGPUSubtarget::GFX10: 6968 return SIEncodingFamily::GFX10; 6969 } 6970 llvm_unreachable("Unknown subtarget generation!"); 6971 } 6972 6973 bool SIInstrInfo::isAsmOnlyOpcode(int MCOp) const { 6974 switch(MCOp) { 6975 // These opcodes use indirect register addressing so 6976 // they need special handling by codegen (currently missing). 6977 // Therefore it is too risky to allow these opcodes 6978 // to be selected by dpp combiner or sdwa peepholer. 6979 case AMDGPU::V_MOVRELS_B32_dpp_gfx10: 6980 case AMDGPU::V_MOVRELS_B32_sdwa_gfx10: 6981 case AMDGPU::V_MOVRELD_B32_dpp_gfx10: 6982 case AMDGPU::V_MOVRELD_B32_sdwa_gfx10: 6983 case AMDGPU::V_MOVRELSD_B32_dpp_gfx10: 6984 case AMDGPU::V_MOVRELSD_B32_sdwa_gfx10: 6985 case AMDGPU::V_MOVRELSD_2_B32_dpp_gfx10: 6986 case AMDGPU::V_MOVRELSD_2_B32_sdwa_gfx10: 6987 return true; 6988 default: 6989 return false; 6990 } 6991 } 6992 6993 int SIInstrInfo::pseudoToMCOpcode(int Opcode) const { 6994 SIEncodingFamily Gen = subtargetEncodingFamily(ST); 6995 6996 if ((get(Opcode).TSFlags & SIInstrFlags::renamedInGFX9) != 0 && 6997 ST.getGeneration() == AMDGPUSubtarget::GFX9) 6998 Gen = SIEncodingFamily::GFX9; 6999 7000 // Adjust the encoding family to GFX80 for D16 buffer instructions when the 7001 // subtarget has UnpackedD16VMem feature. 7002 // TODO: remove this when we discard GFX80 encoding. 7003 if (ST.hasUnpackedD16VMem() && (get(Opcode).TSFlags & SIInstrFlags::D16Buf)) 7004 Gen = SIEncodingFamily::GFX80; 7005 7006 if (get(Opcode).TSFlags & SIInstrFlags::SDWA) { 7007 switch (ST.getGeneration()) { 7008 default: 7009 Gen = SIEncodingFamily::SDWA; 7010 break; 7011 case AMDGPUSubtarget::GFX9: 7012 Gen = SIEncodingFamily::SDWA9; 7013 break; 7014 case AMDGPUSubtarget::GFX10: 7015 Gen = SIEncodingFamily::SDWA10; 7016 break; 7017 } 7018 } 7019 7020 int MCOp = AMDGPU::getMCOpcode(Opcode, Gen); 7021 7022 // -1 means that Opcode is already a native instruction. 7023 if (MCOp == -1) 7024 return Opcode; 7025 7026 // (uint16_t)-1 means that Opcode is a pseudo instruction that has 7027 // no encoding in the given subtarget generation. 7028 if (MCOp == (uint16_t)-1) 7029 return -1; 7030 7031 if (isAsmOnlyOpcode(MCOp)) 7032 return -1; 7033 7034 return MCOp; 7035 } 7036 7037 static 7038 TargetInstrInfo::RegSubRegPair getRegOrUndef(const MachineOperand &RegOpnd) { 7039 assert(RegOpnd.isReg()); 7040 return RegOpnd.isUndef() ? TargetInstrInfo::RegSubRegPair() : 7041 getRegSubRegPair(RegOpnd); 7042 } 7043 7044 TargetInstrInfo::RegSubRegPair 7045 llvm::getRegSequenceSubReg(MachineInstr &MI, unsigned SubReg) { 7046 assert(MI.isRegSequence()); 7047 for (unsigned I = 0, E = (MI.getNumOperands() - 1)/ 2; I < E; ++I) 7048 if (MI.getOperand(1 + 2 * I + 1).getImm() == SubReg) { 7049 auto &RegOp = MI.getOperand(1 + 2 * I); 7050 return getRegOrUndef(RegOp); 7051 } 7052 return TargetInstrInfo::RegSubRegPair(); 7053 } 7054 7055 // Try to find the definition of reg:subreg in subreg-manipulation pseudos 7056 // Following a subreg of reg:subreg isn't supported 7057 static bool followSubRegDef(MachineInstr &MI, 7058 TargetInstrInfo::RegSubRegPair &RSR) { 7059 if (!RSR.SubReg) 7060 return false; 7061 switch (MI.getOpcode()) { 7062 default: break; 7063 case AMDGPU::REG_SEQUENCE: 7064 RSR = getRegSequenceSubReg(MI, RSR.SubReg); 7065 return true; 7066 // EXTRACT_SUBREG ins't supported as this would follow a subreg of subreg 7067 case AMDGPU::INSERT_SUBREG: 7068 if (RSR.SubReg == (unsigned)MI.getOperand(3).getImm()) 7069 // inserted the subreg we're looking for 7070 RSR = getRegOrUndef(MI.getOperand(2)); 7071 else { // the subreg in the rest of the reg 7072 auto R1 = getRegOrUndef(MI.getOperand(1)); 7073 if (R1.SubReg) // subreg of subreg isn't supported 7074 return false; 7075 RSR.Reg = R1.Reg; 7076 } 7077 return true; 7078 } 7079 return false; 7080 } 7081 7082 MachineInstr *llvm::getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P, 7083 MachineRegisterInfo &MRI) { 7084 assert(MRI.isSSA()); 7085 if (!P.Reg.isVirtual()) 7086 return nullptr; 7087 7088 auto RSR = P; 7089 auto *DefInst = MRI.getVRegDef(RSR.Reg); 7090 while (auto *MI = DefInst) { 7091 DefInst = nullptr; 7092 switch (MI->getOpcode()) { 7093 case AMDGPU::COPY: 7094 case AMDGPU::V_MOV_B32_e32: { 7095 auto &Op1 = MI->getOperand(1); 7096 if (Op1.isReg() && Op1.getReg().isVirtual()) { 7097 if (Op1.isUndef()) 7098 return nullptr; 7099 RSR = getRegSubRegPair(Op1); 7100 DefInst = MRI.getVRegDef(RSR.Reg); 7101 } 7102 break; 7103 } 7104 default: 7105 if (followSubRegDef(*MI, RSR)) { 7106 if (!RSR.Reg) 7107 return nullptr; 7108 DefInst = MRI.getVRegDef(RSR.Reg); 7109 } 7110 } 7111 if (!DefInst) 7112 return MI; 7113 } 7114 return nullptr; 7115 } 7116 7117 bool llvm::execMayBeModifiedBeforeUse(const MachineRegisterInfo &MRI, 7118 Register VReg, 7119 const MachineInstr &DefMI, 7120 const MachineInstr &UseMI) { 7121 assert(MRI.isSSA() && "Must be run on SSA"); 7122 7123 auto *TRI = MRI.getTargetRegisterInfo(); 7124 auto *DefBB = DefMI.getParent(); 7125 7126 // Don't bother searching between blocks, although it is possible this block 7127 // doesn't modify exec. 7128 if (UseMI.getParent() != DefBB) 7129 return true; 7130 7131 const int MaxInstScan = 20; 7132 int NumInst = 0; 7133 7134 // Stop scan at the use. 7135 auto E = UseMI.getIterator(); 7136 for (auto I = std::next(DefMI.getIterator()); I != E; ++I) { 7137 if (I->isDebugInstr()) 7138 continue; 7139 7140 if (++NumInst > MaxInstScan) 7141 return true; 7142 7143 if (I->modifiesRegister(AMDGPU::EXEC, TRI)) 7144 return true; 7145 } 7146 7147 return false; 7148 } 7149 7150 bool llvm::execMayBeModifiedBeforeAnyUse(const MachineRegisterInfo &MRI, 7151 Register VReg, 7152 const MachineInstr &DefMI) { 7153 assert(MRI.isSSA() && "Must be run on SSA"); 7154 7155 auto *TRI = MRI.getTargetRegisterInfo(); 7156 auto *DefBB = DefMI.getParent(); 7157 7158 const int MaxUseScan = 10; 7159 int NumUse = 0; 7160 7161 for (auto &Use : MRI.use_nodbg_operands(VReg)) { 7162 auto &UseInst = *Use.getParent(); 7163 // Don't bother searching between blocks, although it is possible this block 7164 // doesn't modify exec. 7165 if (UseInst.getParent() != DefBB) 7166 return true; 7167 7168 if (++NumUse > MaxUseScan) 7169 return true; 7170 } 7171 7172 if (NumUse == 0) 7173 return false; 7174 7175 const int MaxInstScan = 20; 7176 int NumInst = 0; 7177 7178 // Stop scan when we have seen all the uses. 7179 for (auto I = std::next(DefMI.getIterator()); ; ++I) { 7180 assert(I != DefBB->end()); 7181 7182 if (I->isDebugInstr()) 7183 continue; 7184 7185 if (++NumInst > MaxInstScan) 7186 return true; 7187 7188 for (const MachineOperand &Op : I->operands()) { 7189 // We don't check reg masks here as they're used only on calls: 7190 // 1. EXEC is only considered const within one BB 7191 // 2. Call should be a terminator instruction if present in a BB 7192 7193 if (!Op.isReg()) 7194 continue; 7195 7196 Register Reg = Op.getReg(); 7197 if (Op.isUse()) { 7198 if (Reg == VReg && --NumUse == 0) 7199 return false; 7200 } else if (TRI->regsOverlap(Reg, AMDGPU::EXEC)) 7201 return true; 7202 } 7203 } 7204 } 7205 7206 MachineInstr *SIInstrInfo::createPHIDestinationCopy( 7207 MachineBasicBlock &MBB, MachineBasicBlock::iterator LastPHIIt, 7208 const DebugLoc &DL, Register Src, Register Dst) const { 7209 auto Cur = MBB.begin(); 7210 if (Cur != MBB.end()) 7211 do { 7212 if (!Cur->isPHI() && Cur->readsRegister(Dst)) 7213 return BuildMI(MBB, Cur, DL, get(TargetOpcode::COPY), Dst).addReg(Src); 7214 ++Cur; 7215 } while (Cur != MBB.end() && Cur != LastPHIIt); 7216 7217 return TargetInstrInfo::createPHIDestinationCopy(MBB, LastPHIIt, DL, Src, 7218 Dst); 7219 } 7220 7221 MachineInstr *SIInstrInfo::createPHISourceCopy( 7222 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, 7223 const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const { 7224 if (InsPt != MBB.end() && 7225 (InsPt->getOpcode() == AMDGPU::SI_IF || 7226 InsPt->getOpcode() == AMDGPU::SI_ELSE || 7227 InsPt->getOpcode() == AMDGPU::SI_IF_BREAK) && 7228 InsPt->definesRegister(Src)) { 7229 InsPt++; 7230 return BuildMI(MBB, InsPt, DL, 7231 get(ST.isWave32() ? AMDGPU::S_MOV_B32_term 7232 : AMDGPU::S_MOV_B64_term), 7233 Dst) 7234 .addReg(Src, 0, SrcSubReg) 7235 .addReg(AMDGPU::EXEC, RegState::Implicit); 7236 } 7237 return TargetInstrInfo::createPHISourceCopy(MBB, InsPt, DL, Src, SrcSubReg, 7238 Dst); 7239 } 7240 7241 bool llvm::SIInstrInfo::isWave32() const { return ST.isWave32(); } 7242 7243 MachineInstr *SIInstrInfo::foldMemoryOperandImpl( 7244 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, 7245 MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS, 7246 VirtRegMap *VRM) const { 7247 // This is a bit of a hack (copied from AArch64). Consider this instruction: 7248 // 7249 // %0:sreg_32 = COPY $m0 7250 // 7251 // We explicitly chose SReg_32 for the virtual register so such a copy might 7252 // be eliminated by RegisterCoalescer. However, that may not be possible, and 7253 // %0 may even spill. We can't spill $m0 normally (it would require copying to 7254 // a numbered SGPR anyway), and since it is in the SReg_32 register class, 7255 // TargetInstrInfo::foldMemoryOperand() is going to try. 7256 // A similar issue also exists with spilling and reloading $exec registers. 7257 // 7258 // To prevent that, constrain the %0 register class here. 7259 if (MI.isFullCopy()) { 7260 Register DstReg = MI.getOperand(0).getReg(); 7261 Register SrcReg = MI.getOperand(1).getReg(); 7262 if ((DstReg.isVirtual() || SrcReg.isVirtual()) && 7263 (DstReg.isVirtual() != SrcReg.isVirtual())) { 7264 MachineRegisterInfo &MRI = MF.getRegInfo(); 7265 Register VirtReg = DstReg.isVirtual() ? DstReg : SrcReg; 7266 const TargetRegisterClass *RC = MRI.getRegClass(VirtReg); 7267 if (RC->hasSuperClassEq(&AMDGPU::SReg_32RegClass)) { 7268 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 7269 return nullptr; 7270 } else if (RC->hasSuperClassEq(&AMDGPU::SReg_64RegClass)) { 7271 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_64_XEXECRegClass); 7272 return nullptr; 7273 } 7274 } 7275 } 7276 7277 return nullptr; 7278 } 7279 7280 unsigned SIInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 7281 const MachineInstr &MI, 7282 unsigned *PredCost) const { 7283 if (MI.isBundle()) { 7284 MachineBasicBlock::const_instr_iterator I(MI.getIterator()); 7285 MachineBasicBlock::const_instr_iterator E(MI.getParent()->instr_end()); 7286 unsigned Lat = 0, Count = 0; 7287 for (++I; I != E && I->isBundledWithPred(); ++I) { 7288 ++Count; 7289 Lat = std::max(Lat, SchedModel.computeInstrLatency(&*I)); 7290 } 7291 return Lat + Count - 1; 7292 } 7293 7294 return SchedModel.computeInstrLatency(&MI); 7295 } 7296 7297 unsigned SIInstrInfo::getDSShaderTypeValue(const MachineFunction &MF) { 7298 switch (MF.getFunction().getCallingConv()) { 7299 case CallingConv::AMDGPU_PS: 7300 return 1; 7301 case CallingConv::AMDGPU_VS: 7302 return 2; 7303 case CallingConv::AMDGPU_GS: 7304 return 3; 7305 case CallingConv::AMDGPU_HS: 7306 case CallingConv::AMDGPU_LS: 7307 case CallingConv::AMDGPU_ES: 7308 report_fatal_error("ds_ordered_count unsupported for this calling conv"); 7309 case CallingConv::AMDGPU_CS: 7310 case CallingConv::AMDGPU_KERNEL: 7311 case CallingConv::C: 7312 case CallingConv::Fast: 7313 default: 7314 // Assume other calling conventions are various compute callable functions 7315 return 0; 7316 } 7317 } 7318