1 //===- SIInstrInfo.cpp - SI Instruction Information ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// SI Implementation of TargetInstrInfo. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SIInstrInfo.h" 15 #include "AMDGPU.h" 16 #include "AMDGPUSubtarget.h" 17 #include "GCNHazardRecognizer.h" 18 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 19 #include "SIDefines.h" 20 #include "SIMachineFunctionInfo.h" 21 #include "SIRegisterInfo.h" 22 #include "Utils/AMDGPUBaseInfo.h" 23 #include "llvm/ADT/APInt.h" 24 #include "llvm/ADT/ArrayRef.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/ADT/iterator_range.h" 28 #include "llvm/Analysis/AliasAnalysis.h" 29 #include "llvm/Analysis/MemoryLocation.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/CodeGen/LiveVariables.h" 32 #include "llvm/CodeGen/MachineBasicBlock.h" 33 #include "llvm/CodeGen/MachineDominators.h" 34 #include "llvm/CodeGen/MachineFrameInfo.h" 35 #include "llvm/CodeGen/MachineFunction.h" 36 #include "llvm/CodeGen/MachineInstr.h" 37 #include "llvm/CodeGen/MachineInstrBuilder.h" 38 #include "llvm/CodeGen/MachineInstrBundle.h" 39 #include "llvm/CodeGen/MachineMemOperand.h" 40 #include "llvm/CodeGen/MachineOperand.h" 41 #include "llvm/CodeGen/MachineRegisterInfo.h" 42 #include "llvm/CodeGen/RegisterScavenging.h" 43 #include "llvm/CodeGen/ScheduleDAG.h" 44 #include "llvm/CodeGen/SelectionDAGNodes.h" 45 #include "llvm/CodeGen/TargetOpcodes.h" 46 #include "llvm/CodeGen/TargetRegisterInfo.h" 47 #include "llvm/IR/DebugLoc.h" 48 #include "llvm/IR/DiagnosticInfo.h" 49 #include "llvm/IR/Function.h" 50 #include "llvm/IR/InlineAsm.h" 51 #include "llvm/IR/LLVMContext.h" 52 #include "llvm/MC/MCInstrDesc.h" 53 #include "llvm/Support/Casting.h" 54 #include "llvm/Support/CommandLine.h" 55 #include "llvm/Support/Compiler.h" 56 #include "llvm/Support/ErrorHandling.h" 57 #include "llvm/Support/MachineValueType.h" 58 #include "llvm/Support/MathExtras.h" 59 #include "llvm/Target/TargetMachine.h" 60 #include <cassert> 61 #include <cstdint> 62 #include <iterator> 63 #include <utility> 64 65 using namespace llvm; 66 67 #define DEBUG_TYPE "si-instr-info" 68 69 #define GET_INSTRINFO_CTOR_DTOR 70 #include "AMDGPUGenInstrInfo.inc" 71 72 namespace llvm { 73 namespace AMDGPU { 74 #define GET_D16ImageDimIntrinsics_IMPL 75 #define GET_ImageDimIntrinsicTable_IMPL 76 #define GET_RsrcIntrinsics_IMPL 77 #include "AMDGPUGenSearchableTables.inc" 78 } 79 } 80 81 82 // Must be at least 4 to be able to branch over minimum unconditional branch 83 // code. This is only for making it possible to write reasonably small tests for 84 // long branches. 85 static cl::opt<unsigned> 86 BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16), 87 cl::desc("Restrict range of branch instructions (DEBUG)")); 88 89 static cl::opt<bool> Fix16BitCopies( 90 "amdgpu-fix-16-bit-physreg-copies", 91 cl::desc("Fix copies between 32 and 16 bit registers by extending to 32 bit"), 92 cl::init(true), 93 cl::ReallyHidden); 94 95 SIInstrInfo::SIInstrInfo(const GCNSubtarget &ST) 96 : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN), 97 RI(ST), ST(ST) { 98 SchedModel.init(&ST); 99 } 100 101 //===----------------------------------------------------------------------===// 102 // TargetInstrInfo callbacks 103 //===----------------------------------------------------------------------===// 104 105 static unsigned getNumOperandsNoGlue(SDNode *Node) { 106 unsigned N = Node->getNumOperands(); 107 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) 108 --N; 109 return N; 110 } 111 112 /// Returns true if both nodes have the same value for the given 113 /// operand \p Op, or if both nodes do not have this operand. 114 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { 115 unsigned Opc0 = N0->getMachineOpcode(); 116 unsigned Opc1 = N1->getMachineOpcode(); 117 118 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); 119 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); 120 121 if (Op0Idx == -1 && Op1Idx == -1) 122 return true; 123 124 125 if ((Op0Idx == -1 && Op1Idx != -1) || 126 (Op1Idx == -1 && Op0Idx != -1)) 127 return false; 128 129 // getNamedOperandIdx returns the index for the MachineInstr's operands, 130 // which includes the result as the first operand. We are indexing into the 131 // MachineSDNode's operands, so we need to skip the result operand to get 132 // the real index. 133 --Op0Idx; 134 --Op1Idx; 135 136 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx); 137 } 138 139 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, 140 AliasAnalysis *AA) const { 141 // TODO: The generic check fails for VALU instructions that should be 142 // rematerializable due to implicit reads of exec. We really want all of the 143 // generic logic for this except for this. 144 switch (MI.getOpcode()) { 145 case AMDGPU::V_MOV_B32_e32: 146 case AMDGPU::V_MOV_B32_e64: 147 case AMDGPU::V_MOV_B64_PSEUDO: 148 case AMDGPU::V_ACCVGPR_READ_B32: 149 case AMDGPU::V_ACCVGPR_WRITE_B32: 150 // No implicit operands. 151 return MI.getNumOperands() == MI.getDesc().getNumOperands(); 152 default: 153 return false; 154 } 155 } 156 157 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, 158 int64_t &Offset0, 159 int64_t &Offset1) const { 160 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode()) 161 return false; 162 163 unsigned Opc0 = Load0->getMachineOpcode(); 164 unsigned Opc1 = Load1->getMachineOpcode(); 165 166 // Make sure both are actually loads. 167 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad()) 168 return false; 169 170 if (isDS(Opc0) && isDS(Opc1)) { 171 172 // FIXME: Handle this case: 173 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1)) 174 return false; 175 176 // Check base reg. 177 if (Load0->getOperand(0) != Load1->getOperand(0)) 178 return false; 179 180 // Skip read2 / write2 variants for simplicity. 181 // TODO: We should report true if the used offsets are adjacent (excluded 182 // st64 versions). 183 int Offset0Idx = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 184 int Offset1Idx = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 185 if (Offset0Idx == -1 || Offset1Idx == -1) 186 return false; 187 188 // XXX - be careful of datalesss loads 189 // getNamedOperandIdx returns the index for MachineInstrs. Since they 190 // include the output in the operand list, but SDNodes don't, we need to 191 // subtract the index by one. 192 Offset0Idx -= get(Opc0).NumDefs; 193 Offset1Idx -= get(Opc1).NumDefs; 194 Offset0 = cast<ConstantSDNode>(Load0->getOperand(Offset0Idx))->getZExtValue(); 195 Offset1 = cast<ConstantSDNode>(Load1->getOperand(Offset1Idx))->getZExtValue(); 196 return true; 197 } 198 199 if (isSMRD(Opc0) && isSMRD(Opc1)) { 200 // Skip time and cache invalidation instructions. 201 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::sbase) == -1 || 202 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::sbase) == -1) 203 return false; 204 205 assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1)); 206 207 // Check base reg. 208 if (Load0->getOperand(0) != Load1->getOperand(0)) 209 return false; 210 211 const ConstantSDNode *Load0Offset = 212 dyn_cast<ConstantSDNode>(Load0->getOperand(1)); 213 const ConstantSDNode *Load1Offset = 214 dyn_cast<ConstantSDNode>(Load1->getOperand(1)); 215 216 if (!Load0Offset || !Load1Offset) 217 return false; 218 219 Offset0 = Load0Offset->getZExtValue(); 220 Offset1 = Load1Offset->getZExtValue(); 221 return true; 222 } 223 224 // MUBUF and MTBUF can access the same addresses. 225 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) { 226 227 // MUBUF and MTBUF have vaddr at different indices. 228 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || 229 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || 230 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) 231 return false; 232 233 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 234 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 235 236 if (OffIdx0 == -1 || OffIdx1 == -1) 237 return false; 238 239 // getNamedOperandIdx returns the index for MachineInstrs. Since they 240 // include the output in the operand list, but SDNodes don't, we need to 241 // subtract the index by one. 242 OffIdx0 -= get(Opc0).NumDefs; 243 OffIdx1 -= get(Opc1).NumDefs; 244 245 SDValue Off0 = Load0->getOperand(OffIdx0); 246 SDValue Off1 = Load1->getOperand(OffIdx1); 247 248 // The offset might be a FrameIndexSDNode. 249 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1)) 250 return false; 251 252 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue(); 253 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue(); 254 return true; 255 } 256 257 return false; 258 } 259 260 static bool isStride64(unsigned Opc) { 261 switch (Opc) { 262 case AMDGPU::DS_READ2ST64_B32: 263 case AMDGPU::DS_READ2ST64_B64: 264 case AMDGPU::DS_WRITE2ST64_B32: 265 case AMDGPU::DS_WRITE2ST64_B64: 266 return true; 267 default: 268 return false; 269 } 270 } 271 272 bool SIInstrInfo::getMemOperandsWithOffsetWidth( 273 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps, 274 int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, 275 const TargetRegisterInfo *TRI) const { 276 if (!LdSt.mayLoadOrStore()) 277 return false; 278 279 unsigned Opc = LdSt.getOpcode(); 280 OffsetIsScalable = false; 281 const MachineOperand *BaseOp, *OffsetOp; 282 int DataOpIdx; 283 284 if (isDS(LdSt)) { 285 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr); 286 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset); 287 if (OffsetOp) { 288 // Normal, single offset LDS instruction. 289 if (!BaseOp) { 290 // DS_CONSUME/DS_APPEND use M0 for the base address. 291 // TODO: find the implicit use operand for M0 and use that as BaseOp? 292 return false; 293 } 294 BaseOps.push_back(BaseOp); 295 Offset = OffsetOp->getImm(); 296 // Get appropriate operand, and compute width accordingly. 297 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 298 if (DataOpIdx == -1) 299 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 300 Width = getOpSize(LdSt, DataOpIdx); 301 } else { 302 // The 2 offset instructions use offset0 and offset1 instead. We can treat 303 // these as a load with a single offset if the 2 offsets are consecutive. 304 // We will use this for some partially aligned loads. 305 const MachineOperand *Offset0Op = 306 getNamedOperand(LdSt, AMDGPU::OpName::offset0); 307 const MachineOperand *Offset1Op = 308 getNamedOperand(LdSt, AMDGPU::OpName::offset1); 309 310 unsigned Offset0 = Offset0Op->getImm(); 311 unsigned Offset1 = Offset1Op->getImm(); 312 if (Offset0 + 1 != Offset1) 313 return false; 314 315 // Each of these offsets is in element sized units, so we need to convert 316 // to bytes of the individual reads. 317 318 unsigned EltSize; 319 if (LdSt.mayLoad()) 320 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16; 321 else { 322 assert(LdSt.mayStore()); 323 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 324 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8; 325 } 326 327 if (isStride64(Opc)) 328 EltSize *= 64; 329 330 BaseOps.push_back(BaseOp); 331 Offset = EltSize * Offset0; 332 // Get appropriate operand(s), and compute width accordingly. 333 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 334 if (DataOpIdx == -1) { 335 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 336 Width = getOpSize(LdSt, DataOpIdx); 337 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1); 338 Width += getOpSize(LdSt, DataOpIdx); 339 } else { 340 Width = getOpSize(LdSt, DataOpIdx); 341 } 342 } 343 return true; 344 } 345 346 if (isMUBUF(LdSt) || isMTBUF(LdSt)) { 347 const MachineOperand *SOffset = getNamedOperand(LdSt, AMDGPU::OpName::soffset); 348 if (SOffset && SOffset->isReg()) { 349 // We can only handle this if it's a stack access, as any other resource 350 // would require reporting multiple base registers. 351 const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 352 if (AddrReg && !AddrReg->isFI()) 353 return false; 354 355 const MachineOperand *RSrc = getNamedOperand(LdSt, AMDGPU::OpName::srsrc); 356 const SIMachineFunctionInfo *MFI 357 = LdSt.getParent()->getParent()->getInfo<SIMachineFunctionInfo>(); 358 if (RSrc->getReg() != MFI->getScratchRSrcReg()) 359 return false; 360 361 const MachineOperand *OffsetImm = 362 getNamedOperand(LdSt, AMDGPU::OpName::offset); 363 BaseOps.push_back(RSrc); 364 BaseOps.push_back(SOffset); 365 Offset = OffsetImm->getImm(); 366 } else { 367 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::srsrc); 368 if (!BaseOp) // e.g. BUFFER_WBINVL1_VOL 369 return false; 370 BaseOps.push_back(BaseOp); 371 372 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 373 if (BaseOp) 374 BaseOps.push_back(BaseOp); 375 376 const MachineOperand *OffsetImm = 377 getNamedOperand(LdSt, AMDGPU::OpName::offset); 378 Offset = OffsetImm->getImm(); 379 if (SOffset) // soffset can be an inline immediate. 380 Offset += SOffset->getImm(); 381 } 382 // Get appropriate operand, and compute width accordingly. 383 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 384 if (DataOpIdx == -1) 385 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 386 Width = getOpSize(LdSt, DataOpIdx); 387 return true; 388 } 389 390 if (isMIMG(LdSt)) { 391 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); 392 BaseOps.push_back(&LdSt.getOperand(SRsrcIdx)); 393 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); 394 if (VAddr0Idx >= 0) { 395 // GFX10 possible NSA encoding. 396 for (int I = VAddr0Idx; I < SRsrcIdx; ++I) 397 BaseOps.push_back(&LdSt.getOperand(I)); 398 } else { 399 BaseOps.push_back(getNamedOperand(LdSt, AMDGPU::OpName::vaddr)); 400 } 401 Offset = 0; 402 // Get appropriate operand, and compute width accordingly. 403 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 404 Width = getOpSize(LdSt, DataOpIdx); 405 return true; 406 } 407 408 if (isSMRD(LdSt)) { 409 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::sbase); 410 if (!BaseOp) // e.g. S_MEMTIME 411 return false; 412 BaseOps.push_back(BaseOp); 413 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset); 414 Offset = OffsetOp ? OffsetOp->getImm() : 0; 415 // Get appropriate operand, and compute width accordingly. 416 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::sdst); 417 Width = getOpSize(LdSt, DataOpIdx); 418 return true; 419 } 420 421 if (isFLAT(LdSt)) { 422 // Instructions have either vaddr or saddr or both. 423 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 424 if (BaseOp) 425 BaseOps.push_back(BaseOp); 426 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::saddr); 427 if (BaseOp) 428 BaseOps.push_back(BaseOp); 429 Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm(); 430 // Get appropriate operand, and compute width accordingly. 431 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 432 if (DataOpIdx == -1) 433 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 434 Width = getOpSize(LdSt, DataOpIdx); 435 return true; 436 } 437 438 return false; 439 } 440 441 static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, 442 ArrayRef<const MachineOperand *> BaseOps1, 443 const MachineInstr &MI2, 444 ArrayRef<const MachineOperand *> BaseOps2) { 445 // Only examine the first "base" operand of each instruction, on the 446 // assumption that it represents the real base address of the memory access. 447 // Other operands are typically offsets or indices from this base address. 448 if (BaseOps1.front()->isIdenticalTo(*BaseOps2.front())) 449 return true; 450 451 if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand()) 452 return false; 453 454 auto MO1 = *MI1.memoperands_begin(); 455 auto MO2 = *MI2.memoperands_begin(); 456 if (MO1->getAddrSpace() != MO2->getAddrSpace()) 457 return false; 458 459 auto Base1 = MO1->getValue(); 460 auto Base2 = MO2->getValue(); 461 if (!Base1 || !Base2) 462 return false; 463 Base1 = getUnderlyingObject(Base1); 464 Base2 = getUnderlyingObject(Base2); 465 466 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2)) 467 return false; 468 469 return Base1 == Base2; 470 } 471 472 bool SIInstrInfo::shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1, 473 ArrayRef<const MachineOperand *> BaseOps2, 474 unsigned NumLoads, 475 unsigned NumBytes) const { 476 // If the mem ops (to be clustered) do not have the same base ptr, then they 477 // should not be clustered 478 assert(!BaseOps1.empty() && !BaseOps2.empty()); 479 const MachineInstr &FirstLdSt = *BaseOps1.front()->getParent(); 480 const MachineInstr &SecondLdSt = *BaseOps2.front()->getParent(); 481 if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOps1, SecondLdSt, BaseOps2)) 482 return false; 483 484 // In order to avoid regester pressure, on an average, the number of DWORDS 485 // loaded together by all clustered mem ops should not exceed 8. This is an 486 // empirical value based on certain observations and performance related 487 // experiments. 488 // The good thing about this heuristic is - it avoids clustering of too many 489 // sub-word loads, and also avoids clustering of wide loads. Below is the 490 // brief summary of how the heuristic behaves for various `LoadSize`. 491 // (1) 1 <= LoadSize <= 4: cluster at max 8 mem ops 492 // (2) 5 <= LoadSize <= 8: cluster at max 4 mem ops 493 // (3) 9 <= LoadSize <= 12: cluster at max 2 mem ops 494 // (4) 13 <= LoadSize <= 16: cluster at max 2 mem ops 495 // (5) LoadSize >= 17: do not cluster 496 const unsigned LoadSize = NumBytes / NumLoads; 497 const unsigned NumDWORDs = ((LoadSize + 3) / 4) * NumLoads; 498 return NumDWORDs <= 8; 499 } 500 501 // FIXME: This behaves strangely. If, for example, you have 32 load + stores, 502 // the first 16 loads will be interleaved with the stores, and the next 16 will 503 // be clustered as expected. It should really split into 2 16 store batches. 504 // 505 // Loads are clustered until this returns false, rather than trying to schedule 506 // groups of stores. This also means we have to deal with saying different 507 // address space loads should be clustered, and ones which might cause bank 508 // conflicts. 509 // 510 // This might be deprecated so it might not be worth that much effort to fix. 511 bool SIInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, 512 int64_t Offset0, int64_t Offset1, 513 unsigned NumLoads) const { 514 assert(Offset1 > Offset0 && 515 "Second offset should be larger than first offset!"); 516 // If we have less than 16 loads in a row, and the offsets are within 64 517 // bytes, then schedule together. 518 519 // A cacheline is 64 bytes (for global memory). 520 return (NumLoads <= 16 && (Offset1 - Offset0) < 64); 521 } 522 523 static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB, 524 MachineBasicBlock::iterator MI, 525 const DebugLoc &DL, MCRegister DestReg, 526 MCRegister SrcReg, bool KillSrc, 527 const char *Msg = "illegal SGPR to VGPR copy") { 528 MachineFunction *MF = MBB.getParent(); 529 DiagnosticInfoUnsupported IllegalCopy(MF->getFunction(), Msg, DL, DS_Error); 530 LLVMContext &C = MF->getFunction().getContext(); 531 C.diagnose(IllegalCopy); 532 533 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg) 534 .addReg(SrcReg, getKillRegState(KillSrc)); 535 } 536 537 /// Handle copying from SGPR to AGPR, or from AGPR to AGPR. It is not possible 538 /// to directly copy, so an intermediate VGPR needs to be used. 539 static void indirectCopyToAGPR(const SIInstrInfo &TII, 540 MachineBasicBlock &MBB, 541 MachineBasicBlock::iterator MI, 542 const DebugLoc &DL, MCRegister DestReg, 543 MCRegister SrcReg, bool KillSrc, 544 RegScavenger &RS, 545 Register ImpDefSuperReg = Register(), 546 Register ImpUseSuperReg = Register()) { 547 const SIRegisterInfo &RI = TII.getRegisterInfo(); 548 549 assert(AMDGPU::SReg_32RegClass.contains(SrcReg) || 550 AMDGPU::AGPR_32RegClass.contains(SrcReg)); 551 552 // First try to find defining accvgpr_write to avoid temporary registers. 553 for (auto Def = MI, E = MBB.begin(); Def != E; ) { 554 --Def; 555 if (!Def->definesRegister(SrcReg, &RI)) 556 continue; 557 if (Def->getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32) 558 break; 559 560 MachineOperand &DefOp = Def->getOperand(1); 561 assert(DefOp.isReg() || DefOp.isImm()); 562 563 if (DefOp.isReg()) { 564 // Check that register source operand if not clobbered before MI. 565 // Immediate operands are always safe to propagate. 566 bool SafeToPropagate = true; 567 for (auto I = Def; I != MI && SafeToPropagate; ++I) 568 if (I->modifiesRegister(DefOp.getReg(), &RI)) 569 SafeToPropagate = false; 570 571 if (!SafeToPropagate) 572 break; 573 574 DefOp.setIsKill(false); 575 } 576 577 MachineInstrBuilder Builder = 578 BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32), DestReg) 579 .add(DefOp); 580 if (ImpDefSuperReg) 581 Builder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit); 582 583 if (ImpUseSuperReg) { 584 Builder.addReg(ImpUseSuperReg, 585 getKillRegState(KillSrc) | RegState::Implicit); 586 } 587 588 return; 589 } 590 591 RS.enterBasicBlock(MBB); 592 RS.forward(MI); 593 594 // Ideally we want to have three registers for a long reg_sequence copy 595 // to hide 2 waitstates between v_mov_b32 and accvgpr_write. 596 unsigned MaxVGPRs = RI.getRegPressureLimit(&AMDGPU::VGPR_32RegClass, 597 *MBB.getParent()); 598 599 // Registers in the sequence are allocated contiguously so we can just 600 // use register number to pick one of three round-robin temps. 601 unsigned RegNo = DestReg % 3; 602 Register Tmp = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0); 603 if (!Tmp) 604 report_fatal_error("Cannot scavenge VGPR to copy to AGPR"); 605 RS.setRegUsed(Tmp); 606 // Only loop through if there are any free registers left, otherwise 607 // scavenger may report a fatal error without emergency spill slot 608 // or spill with the slot. 609 while (RegNo-- && RS.FindUnusedReg(&AMDGPU::VGPR_32RegClass)) { 610 Register Tmp2 = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0); 611 if (!Tmp2 || RI.getHWRegIndex(Tmp2) >= MaxVGPRs) 612 break; 613 Tmp = Tmp2; 614 RS.setRegUsed(Tmp); 615 } 616 617 // Insert copy to temporary VGPR. 618 unsigned TmpCopyOp = AMDGPU::V_MOV_B32_e32; 619 if (AMDGPU::AGPR_32RegClass.contains(SrcReg)) { 620 TmpCopyOp = AMDGPU::V_ACCVGPR_READ_B32; 621 } else { 622 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 623 } 624 625 MachineInstrBuilder UseBuilder = BuildMI(MBB, MI, DL, TII.get(TmpCopyOp), Tmp) 626 .addReg(SrcReg, getKillRegState(KillSrc)); 627 if (ImpUseSuperReg) { 628 UseBuilder.addReg(ImpUseSuperReg, 629 getKillRegState(KillSrc) | RegState::Implicit); 630 } 631 632 MachineInstrBuilder DefBuilder 633 = BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32), DestReg) 634 .addReg(Tmp, RegState::Kill); 635 636 if (ImpDefSuperReg) 637 DefBuilder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit); 638 } 639 640 static void expandSGPRCopy(const SIInstrInfo &TII, MachineBasicBlock &MBB, 641 MachineBasicBlock::iterator MI, const DebugLoc &DL, 642 MCRegister DestReg, MCRegister SrcReg, bool KillSrc, 643 const TargetRegisterClass *RC, bool Forward) { 644 const SIRegisterInfo &RI = TII.getRegisterInfo(); 645 ArrayRef<int16_t> BaseIndices = RI.getRegSplitParts(RC, 4); 646 MachineBasicBlock::iterator I = MI; 647 MachineInstr *FirstMI = nullptr, *LastMI = nullptr; 648 649 for (unsigned Idx = 0; Idx < BaseIndices.size(); ++Idx) { 650 int16_t SubIdx = BaseIndices[Idx]; 651 Register Reg = RI.getSubReg(DestReg, SubIdx); 652 unsigned Opcode = AMDGPU::S_MOV_B32; 653 654 // Is SGPR aligned? If so try to combine with next. 655 Register Src = RI.getSubReg(SrcReg, SubIdx); 656 bool AlignedDest = ((Reg - AMDGPU::SGPR0) % 2) == 0; 657 bool AlignedSrc = ((Src - AMDGPU::SGPR0) % 2) == 0; 658 if (AlignedDest && AlignedSrc && (Idx + 1 < BaseIndices.size())) { 659 // Can use SGPR64 copy 660 unsigned Channel = RI.getChannelFromSubReg(SubIdx); 661 SubIdx = RI.getSubRegFromChannel(Channel, 2); 662 Opcode = AMDGPU::S_MOV_B64; 663 Idx++; 664 } 665 666 LastMI = BuildMI(MBB, I, DL, TII.get(Opcode), RI.getSubReg(DestReg, SubIdx)) 667 .addReg(RI.getSubReg(SrcReg, SubIdx)) 668 .addReg(SrcReg, RegState::Implicit); 669 670 if (!FirstMI) 671 FirstMI = LastMI; 672 673 if (!Forward) 674 I--; 675 } 676 677 assert(FirstMI && LastMI); 678 if (!Forward) 679 std::swap(FirstMI, LastMI); 680 681 FirstMI->addOperand( 682 MachineOperand::CreateReg(DestReg, true /*IsDef*/, true /*IsImp*/)); 683 684 if (KillSrc) 685 LastMI->addRegisterKilled(SrcReg, &RI); 686 } 687 688 void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 689 MachineBasicBlock::iterator MI, 690 const DebugLoc &DL, MCRegister DestReg, 691 MCRegister SrcReg, bool KillSrc) const { 692 const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg); 693 694 // FIXME: This is hack to resolve copies between 16 bit and 32 bit 695 // registers until all patterns are fixed. 696 if (Fix16BitCopies && 697 ((RI.getRegSizeInBits(*RC) == 16) ^ 698 (RI.getRegSizeInBits(*RI.getPhysRegClass(SrcReg)) == 16))) { 699 MCRegister &RegToFix = (RI.getRegSizeInBits(*RC) == 16) ? DestReg : SrcReg; 700 MCRegister Super = RI.get32BitRegister(RegToFix); 701 assert(RI.getSubReg(Super, AMDGPU::lo16) == RegToFix); 702 RegToFix = Super; 703 704 if (DestReg == SrcReg) { 705 // Insert empty bundle since ExpandPostRA expects an instruction here. 706 BuildMI(MBB, MI, DL, get(AMDGPU::BUNDLE)); 707 return; 708 } 709 710 RC = RI.getPhysRegClass(DestReg); 711 } 712 713 if (RC == &AMDGPU::VGPR_32RegClass) { 714 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || 715 AMDGPU::SReg_32RegClass.contains(SrcReg) || 716 AMDGPU::AGPR_32RegClass.contains(SrcReg)); 717 unsigned Opc = AMDGPU::AGPR_32RegClass.contains(SrcReg) ? 718 AMDGPU::V_ACCVGPR_READ_B32 : AMDGPU::V_MOV_B32_e32; 719 BuildMI(MBB, MI, DL, get(Opc), DestReg) 720 .addReg(SrcReg, getKillRegState(KillSrc)); 721 return; 722 } 723 724 if (RC == &AMDGPU::SReg_32_XM0RegClass || 725 RC == &AMDGPU::SReg_32RegClass) { 726 if (SrcReg == AMDGPU::SCC) { 727 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg) 728 .addImm(1) 729 .addImm(0); 730 return; 731 } 732 733 if (DestReg == AMDGPU::VCC_LO) { 734 if (AMDGPU::SReg_32RegClass.contains(SrcReg)) { 735 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), AMDGPU::VCC_LO) 736 .addReg(SrcReg, getKillRegState(KillSrc)); 737 } else { 738 // FIXME: Hack until VReg_1 removed. 739 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 740 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 741 .addImm(0) 742 .addReg(SrcReg, getKillRegState(KillSrc)); 743 } 744 745 return; 746 } 747 748 if (!AMDGPU::SReg_32RegClass.contains(SrcReg)) { 749 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 750 return; 751 } 752 753 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 754 .addReg(SrcReg, getKillRegState(KillSrc)); 755 return; 756 } 757 758 if (RC == &AMDGPU::SReg_64RegClass) { 759 if (SrcReg == AMDGPU::SCC) { 760 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B64), DestReg) 761 .addImm(1) 762 .addImm(0); 763 return; 764 } 765 766 if (DestReg == AMDGPU::VCC) { 767 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 768 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC) 769 .addReg(SrcReg, getKillRegState(KillSrc)); 770 } else { 771 // FIXME: Hack until VReg_1 removed. 772 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 773 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 774 .addImm(0) 775 .addReg(SrcReg, getKillRegState(KillSrc)); 776 } 777 778 return; 779 } 780 781 if (!AMDGPU::SReg_64RegClass.contains(SrcReg)) { 782 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 783 return; 784 } 785 786 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 787 .addReg(SrcReg, getKillRegState(KillSrc)); 788 return; 789 } 790 791 if (DestReg == AMDGPU::SCC) { 792 // Copying 64-bit or 32-bit sources to SCC barely makes sense, 793 // but SelectionDAG emits such copies for i1 sources. 794 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 795 // This copy can only be produced by patterns 796 // with explicit SCC, which are known to be enabled 797 // only for subtargets with S_CMP_LG_U64 present. 798 assert(ST.hasScalarCompareEq64()); 799 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U64)) 800 .addReg(SrcReg, getKillRegState(KillSrc)) 801 .addImm(0); 802 } else { 803 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 804 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32)) 805 .addReg(SrcReg, getKillRegState(KillSrc)) 806 .addImm(0); 807 } 808 809 return; 810 } 811 812 813 if (RC == &AMDGPU::AGPR_32RegClass) { 814 if (AMDGPU::VGPR_32RegClass.contains(SrcReg)) { 815 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32), DestReg) 816 .addReg(SrcReg, getKillRegState(KillSrc)); 817 return; 818 } 819 820 // FIXME: Pass should maintain scavenger to avoid scan through the block on 821 // every AGPR spill. 822 RegScavenger RS; 823 indirectCopyToAGPR(*this, MBB, MI, DL, DestReg, SrcReg, KillSrc, RS); 824 return; 825 } 826 827 if (RI.getRegSizeInBits(*RC) == 16) { 828 assert(AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || 829 AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || 830 AMDGPU::SReg_LO16RegClass.contains(SrcReg) || 831 AMDGPU::AGPR_LO16RegClass.contains(SrcReg)); 832 833 bool IsSGPRDst = AMDGPU::SReg_LO16RegClass.contains(DestReg); 834 bool IsSGPRSrc = AMDGPU::SReg_LO16RegClass.contains(SrcReg); 835 bool IsAGPRDst = AMDGPU::AGPR_LO16RegClass.contains(DestReg); 836 bool IsAGPRSrc = AMDGPU::AGPR_LO16RegClass.contains(SrcReg); 837 bool DstLow = AMDGPU::VGPR_LO16RegClass.contains(DestReg) || 838 AMDGPU::SReg_LO16RegClass.contains(DestReg) || 839 AMDGPU::AGPR_LO16RegClass.contains(DestReg); 840 bool SrcLow = AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || 841 AMDGPU::SReg_LO16RegClass.contains(SrcReg) || 842 AMDGPU::AGPR_LO16RegClass.contains(SrcReg); 843 MCRegister NewDestReg = RI.get32BitRegister(DestReg); 844 MCRegister NewSrcReg = RI.get32BitRegister(SrcReg); 845 846 if (IsSGPRDst) { 847 if (!IsSGPRSrc) { 848 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 849 return; 850 } 851 852 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), NewDestReg) 853 .addReg(NewSrcReg, getKillRegState(KillSrc)); 854 return; 855 } 856 857 if (IsAGPRDst || IsAGPRSrc) { 858 if (!DstLow || !SrcLow) { 859 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc, 860 "Cannot use hi16 subreg with an AGPR!"); 861 } 862 863 copyPhysReg(MBB, MI, DL, NewDestReg, NewSrcReg, KillSrc); 864 return; 865 } 866 867 if (IsSGPRSrc && !ST.hasSDWAScalar()) { 868 if (!DstLow || !SrcLow) { 869 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc, 870 "Cannot use hi16 subreg on VI!"); 871 } 872 873 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), NewDestReg) 874 .addReg(NewSrcReg, getKillRegState(KillSrc)); 875 return; 876 } 877 878 auto MIB = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_sdwa), NewDestReg) 879 .addImm(0) // src0_modifiers 880 .addReg(NewSrcReg) 881 .addImm(0) // clamp 882 .addImm(DstLow ? AMDGPU::SDWA::SdwaSel::WORD_0 883 : AMDGPU::SDWA::SdwaSel::WORD_1) 884 .addImm(AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE) 885 .addImm(SrcLow ? AMDGPU::SDWA::SdwaSel::WORD_0 886 : AMDGPU::SDWA::SdwaSel::WORD_1) 887 .addReg(NewDestReg, RegState::Implicit | RegState::Undef); 888 // First implicit operand is $exec. 889 MIB->tieOperands(0, MIB->getNumOperands() - 1); 890 return; 891 } 892 893 const bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg); 894 if (RI.isSGPRClass(RC)) { 895 if (!RI.isSGPRClass(RI.getPhysRegClass(SrcReg))) { 896 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 897 return; 898 } 899 expandSGPRCopy(*this, MBB, MI, DL, DestReg, SrcReg, KillSrc, RC, Forward); 900 return; 901 } 902 903 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 904 if (RI.hasAGPRs(RC)) { 905 Opcode = RI.hasVGPRs(RI.getPhysRegClass(SrcReg)) ? 906 AMDGPU::V_ACCVGPR_WRITE_B32 : AMDGPU::INSTRUCTION_LIST_END; 907 } else if (RI.hasVGPRs(RC) && RI.hasAGPRs(RI.getPhysRegClass(SrcReg))) { 908 Opcode = AMDGPU::V_ACCVGPR_READ_B32; 909 } 910 911 // For the cases where we need an intermediate instruction/temporary register 912 // (destination is an AGPR), we need a scavenger. 913 // 914 // FIXME: The pass should maintain this for us so we don't have to re-scan the 915 // whole block for every handled copy. 916 std::unique_ptr<RegScavenger> RS; 917 if (Opcode == AMDGPU::INSTRUCTION_LIST_END) 918 RS.reset(new RegScavenger()); 919 920 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, 4); 921 922 // If there is an overlap, we can't kill the super-register on the last 923 // instruction, since it will also kill the components made live by this def. 924 const bool CanKillSuperReg = KillSrc && !RI.regsOverlap(SrcReg, DestReg); 925 926 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 927 unsigned SubIdx; 928 if (Forward) 929 SubIdx = SubIndices[Idx]; 930 else 931 SubIdx = SubIndices[SubIndices.size() - Idx - 1]; 932 933 bool UseKill = CanKillSuperReg && Idx == SubIndices.size() - 1; 934 935 if (Opcode == AMDGPU::INSTRUCTION_LIST_END) { 936 Register ImpDefSuper = Idx == 0 ? Register(DestReg) : Register(); 937 Register ImpUseSuper = SrcReg; 938 indirectCopyToAGPR(*this, MBB, MI, DL, RI.getSubReg(DestReg, SubIdx), 939 RI.getSubReg(SrcReg, SubIdx), UseKill, *RS, 940 ImpDefSuper, ImpUseSuper); 941 } else { 942 MachineInstrBuilder Builder = 943 BuildMI(MBB, MI, DL, get(Opcode), RI.getSubReg(DestReg, SubIdx)) 944 .addReg(RI.getSubReg(SrcReg, SubIdx)); 945 if (Idx == 0) 946 Builder.addReg(DestReg, RegState::Define | RegState::Implicit); 947 948 Builder.addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit); 949 } 950 } 951 } 952 953 int SIInstrInfo::commuteOpcode(unsigned Opcode) const { 954 int NewOpc; 955 956 // Try to map original to commuted opcode 957 NewOpc = AMDGPU::getCommuteRev(Opcode); 958 if (NewOpc != -1) 959 // Check if the commuted (REV) opcode exists on the target. 960 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 961 962 // Try to map commuted to original opcode 963 NewOpc = AMDGPU::getCommuteOrig(Opcode); 964 if (NewOpc != -1) 965 // Check if the original (non-REV) opcode exists on the target. 966 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 967 968 return Opcode; 969 } 970 971 void SIInstrInfo::materializeImmediate(MachineBasicBlock &MBB, 972 MachineBasicBlock::iterator MI, 973 const DebugLoc &DL, unsigned DestReg, 974 int64_t Value) const { 975 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 976 const TargetRegisterClass *RegClass = MRI.getRegClass(DestReg); 977 if (RegClass == &AMDGPU::SReg_32RegClass || 978 RegClass == &AMDGPU::SGPR_32RegClass || 979 RegClass == &AMDGPU::SReg_32_XM0RegClass || 980 RegClass == &AMDGPU::SReg_32_XM0_XEXECRegClass) { 981 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 982 .addImm(Value); 983 return; 984 } 985 986 if (RegClass == &AMDGPU::SReg_64RegClass || 987 RegClass == &AMDGPU::SGPR_64RegClass || 988 RegClass == &AMDGPU::SReg_64_XEXECRegClass) { 989 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 990 .addImm(Value); 991 return; 992 } 993 994 if (RegClass == &AMDGPU::VGPR_32RegClass) { 995 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) 996 .addImm(Value); 997 return; 998 } 999 if (RegClass == &AMDGPU::VReg_64RegClass) { 1000 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), DestReg) 1001 .addImm(Value); 1002 return; 1003 } 1004 1005 unsigned EltSize = 4; 1006 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 1007 if (RI.isSGPRClass(RegClass)) { 1008 if (RI.getRegSizeInBits(*RegClass) > 32) { 1009 Opcode = AMDGPU::S_MOV_B64; 1010 EltSize = 8; 1011 } else { 1012 Opcode = AMDGPU::S_MOV_B32; 1013 EltSize = 4; 1014 } 1015 } 1016 1017 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RegClass, EltSize); 1018 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 1019 int64_t IdxValue = Idx == 0 ? Value : 0; 1020 1021 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 1022 get(Opcode), RI.getSubReg(DestReg, SubIndices[Idx])); 1023 Builder.addImm(IdxValue); 1024 } 1025 } 1026 1027 const TargetRegisterClass * 1028 SIInstrInfo::getPreferredSelectRegClass(unsigned Size) const { 1029 return &AMDGPU::VGPR_32RegClass; 1030 } 1031 1032 void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB, 1033 MachineBasicBlock::iterator I, 1034 const DebugLoc &DL, Register DstReg, 1035 ArrayRef<MachineOperand> Cond, 1036 Register TrueReg, 1037 Register FalseReg) const { 1038 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 1039 const TargetRegisterClass *BoolXExecRC = 1040 RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 1041 assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass && 1042 "Not a VGPR32 reg"); 1043 1044 if (Cond.size() == 1) { 1045 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1046 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1047 .add(Cond[0]); 1048 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1049 .addImm(0) 1050 .addReg(FalseReg) 1051 .addImm(0) 1052 .addReg(TrueReg) 1053 .addReg(SReg); 1054 } else if (Cond.size() == 2) { 1055 assert(Cond[0].isImm() && "Cond[0] is not an immediate"); 1056 switch (Cond[0].getImm()) { 1057 case SIInstrInfo::SCC_TRUE: { 1058 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1059 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1060 : AMDGPU::S_CSELECT_B64), SReg) 1061 .addImm(1) 1062 .addImm(0); 1063 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1064 .addImm(0) 1065 .addReg(FalseReg) 1066 .addImm(0) 1067 .addReg(TrueReg) 1068 .addReg(SReg); 1069 break; 1070 } 1071 case SIInstrInfo::SCC_FALSE: { 1072 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1073 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1074 : AMDGPU::S_CSELECT_B64), SReg) 1075 .addImm(0) 1076 .addImm(1); 1077 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1078 .addImm(0) 1079 .addReg(FalseReg) 1080 .addImm(0) 1081 .addReg(TrueReg) 1082 .addReg(SReg); 1083 break; 1084 } 1085 case SIInstrInfo::VCCNZ: { 1086 MachineOperand RegOp = Cond[1]; 1087 RegOp.setImplicit(false); 1088 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1089 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1090 .add(RegOp); 1091 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1092 .addImm(0) 1093 .addReg(FalseReg) 1094 .addImm(0) 1095 .addReg(TrueReg) 1096 .addReg(SReg); 1097 break; 1098 } 1099 case SIInstrInfo::VCCZ: { 1100 MachineOperand RegOp = Cond[1]; 1101 RegOp.setImplicit(false); 1102 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1103 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1104 .add(RegOp); 1105 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1106 .addImm(0) 1107 .addReg(TrueReg) 1108 .addImm(0) 1109 .addReg(FalseReg) 1110 .addReg(SReg); 1111 break; 1112 } 1113 case SIInstrInfo::EXECNZ: { 1114 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1115 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 1116 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1117 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 1118 .addImm(0); 1119 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1120 : AMDGPU::S_CSELECT_B64), SReg) 1121 .addImm(1) 1122 .addImm(0); 1123 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1124 .addImm(0) 1125 .addReg(FalseReg) 1126 .addImm(0) 1127 .addReg(TrueReg) 1128 .addReg(SReg); 1129 break; 1130 } 1131 case SIInstrInfo::EXECZ: { 1132 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1133 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 1134 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1135 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 1136 .addImm(0); 1137 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1138 : AMDGPU::S_CSELECT_B64), SReg) 1139 .addImm(0) 1140 .addImm(1); 1141 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1142 .addImm(0) 1143 .addReg(FalseReg) 1144 .addImm(0) 1145 .addReg(TrueReg) 1146 .addReg(SReg); 1147 llvm_unreachable("Unhandled branch predicate EXECZ"); 1148 break; 1149 } 1150 default: 1151 llvm_unreachable("invalid branch predicate"); 1152 } 1153 } else { 1154 llvm_unreachable("Can only handle Cond size 1 or 2"); 1155 } 1156 } 1157 1158 Register SIInstrInfo::insertEQ(MachineBasicBlock *MBB, 1159 MachineBasicBlock::iterator I, 1160 const DebugLoc &DL, 1161 Register SrcReg, int Value) const { 1162 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1163 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 1164 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg) 1165 .addImm(Value) 1166 .addReg(SrcReg); 1167 1168 return Reg; 1169 } 1170 1171 Register SIInstrInfo::insertNE(MachineBasicBlock *MBB, 1172 MachineBasicBlock::iterator I, 1173 const DebugLoc &DL, 1174 Register SrcReg, int Value) const { 1175 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1176 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 1177 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg) 1178 .addImm(Value) 1179 .addReg(SrcReg); 1180 1181 return Reg; 1182 } 1183 1184 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const { 1185 1186 if (RI.hasAGPRs(DstRC)) 1187 return AMDGPU::COPY; 1188 if (RI.getRegSizeInBits(*DstRC) == 32) { 1189 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 1190 } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) { 1191 return AMDGPU::S_MOV_B64; 1192 } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) { 1193 return AMDGPU::V_MOV_B64_PSEUDO; 1194 } 1195 return AMDGPU::COPY; 1196 } 1197 1198 static unsigned getIndirectVGPRWritePseudoOpc(unsigned VecSize) { 1199 if (VecSize <= 32) // 4 bytes 1200 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V1; 1201 if (VecSize <= 64) // 8 bytes 1202 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V2; 1203 if (VecSize <= 96) // 12 bytes 1204 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V3; 1205 if (VecSize <= 128) // 16 bytes 1206 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V4; 1207 if (VecSize <= 160) // 20 bytes 1208 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V5; 1209 if (VecSize <= 256) // 32 bytes 1210 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V8; 1211 if (VecSize <= 512) // 64 bytes 1212 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V16; 1213 if (VecSize <= 1024) // 128 bytes 1214 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V32; 1215 1216 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1217 } 1218 1219 static unsigned getIndirectSGPRWritePseudo32(unsigned VecSize) { 1220 if (VecSize <= 32) // 4 bytes 1221 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V1; 1222 if (VecSize <= 64) // 8 bytes 1223 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V2; 1224 if (VecSize <= 96) // 12 bytes 1225 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V3; 1226 if (VecSize <= 128) // 16 bytes 1227 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V4; 1228 if (VecSize <= 160) // 20 bytes 1229 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V5; 1230 if (VecSize <= 256) // 32 bytes 1231 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V8; 1232 if (VecSize <= 512) // 64 bytes 1233 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V16; 1234 if (VecSize <= 1024) // 128 bytes 1235 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V32; 1236 1237 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1238 } 1239 1240 static unsigned getIndirectSGPRWritePseudo64(unsigned VecSize) { 1241 if (VecSize <= 64) // 8 bytes 1242 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V1; 1243 if (VecSize <= 128) // 16 bytes 1244 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V2; 1245 if (VecSize <= 256) // 32 bytes 1246 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V4; 1247 if (VecSize <= 512) // 64 bytes 1248 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V8; 1249 if (VecSize <= 1024) // 128 bytes 1250 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V16; 1251 1252 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1253 } 1254 1255 const MCInstrDesc &SIInstrInfo::getIndirectRegWritePseudo( 1256 unsigned VecSize, unsigned EltSize, bool IsSGPR) const { 1257 if (IsSGPR) { 1258 switch (EltSize) { 1259 case 32: 1260 return get(getIndirectSGPRWritePseudo32(VecSize)); 1261 case 64: 1262 return get(getIndirectSGPRWritePseudo64(VecSize)); 1263 default: 1264 llvm_unreachable("invalid reg indexing elt size"); 1265 } 1266 } 1267 1268 assert(EltSize == 32 && "invalid reg indexing elt size"); 1269 return get(getIndirectVGPRWritePseudoOpc(VecSize)); 1270 } 1271 1272 static unsigned getSGPRSpillSaveOpcode(unsigned Size) { 1273 switch (Size) { 1274 case 4: 1275 return AMDGPU::SI_SPILL_S32_SAVE; 1276 case 8: 1277 return AMDGPU::SI_SPILL_S64_SAVE; 1278 case 12: 1279 return AMDGPU::SI_SPILL_S96_SAVE; 1280 case 16: 1281 return AMDGPU::SI_SPILL_S128_SAVE; 1282 case 20: 1283 return AMDGPU::SI_SPILL_S160_SAVE; 1284 case 24: 1285 return AMDGPU::SI_SPILL_S192_SAVE; 1286 case 32: 1287 return AMDGPU::SI_SPILL_S256_SAVE; 1288 case 64: 1289 return AMDGPU::SI_SPILL_S512_SAVE; 1290 case 128: 1291 return AMDGPU::SI_SPILL_S1024_SAVE; 1292 default: 1293 llvm_unreachable("unknown register size"); 1294 } 1295 } 1296 1297 static unsigned getVGPRSpillSaveOpcode(unsigned Size) { 1298 switch (Size) { 1299 case 4: 1300 return AMDGPU::SI_SPILL_V32_SAVE; 1301 case 8: 1302 return AMDGPU::SI_SPILL_V64_SAVE; 1303 case 12: 1304 return AMDGPU::SI_SPILL_V96_SAVE; 1305 case 16: 1306 return AMDGPU::SI_SPILL_V128_SAVE; 1307 case 20: 1308 return AMDGPU::SI_SPILL_V160_SAVE; 1309 case 24: 1310 return AMDGPU::SI_SPILL_V192_SAVE; 1311 case 32: 1312 return AMDGPU::SI_SPILL_V256_SAVE; 1313 case 64: 1314 return AMDGPU::SI_SPILL_V512_SAVE; 1315 case 128: 1316 return AMDGPU::SI_SPILL_V1024_SAVE; 1317 default: 1318 llvm_unreachable("unknown register size"); 1319 } 1320 } 1321 1322 static unsigned getAGPRSpillSaveOpcode(unsigned Size) { 1323 switch (Size) { 1324 case 4: 1325 return AMDGPU::SI_SPILL_A32_SAVE; 1326 case 8: 1327 return AMDGPU::SI_SPILL_A64_SAVE; 1328 case 12: 1329 return AMDGPU::SI_SPILL_A96_SAVE; 1330 case 16: 1331 return AMDGPU::SI_SPILL_A128_SAVE; 1332 case 20: 1333 return AMDGPU::SI_SPILL_A160_SAVE; 1334 case 24: 1335 return AMDGPU::SI_SPILL_A192_SAVE; 1336 case 32: 1337 return AMDGPU::SI_SPILL_A256_SAVE; 1338 case 64: 1339 return AMDGPU::SI_SPILL_A512_SAVE; 1340 case 128: 1341 return AMDGPU::SI_SPILL_A1024_SAVE; 1342 default: 1343 llvm_unreachable("unknown register size"); 1344 } 1345 } 1346 1347 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 1348 MachineBasicBlock::iterator MI, 1349 Register SrcReg, bool isKill, 1350 int FrameIndex, 1351 const TargetRegisterClass *RC, 1352 const TargetRegisterInfo *TRI) const { 1353 MachineFunction *MF = MBB.getParent(); 1354 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1355 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 1356 const DebugLoc &DL = MBB.findDebugLoc(MI); 1357 1358 MachinePointerInfo PtrInfo 1359 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 1360 MachineMemOperand *MMO = MF->getMachineMemOperand( 1361 PtrInfo, MachineMemOperand::MOStore, FrameInfo.getObjectSize(FrameIndex), 1362 FrameInfo.getObjectAlign(FrameIndex)); 1363 unsigned SpillSize = TRI->getSpillSize(*RC); 1364 1365 if (RI.isSGPRClass(RC)) { 1366 MFI->setHasSpilledSGPRs(); 1367 assert(SrcReg != AMDGPU::M0 && "m0 should not be spilled"); 1368 assert(SrcReg != AMDGPU::EXEC_LO && SrcReg != AMDGPU::EXEC_HI && 1369 SrcReg != AMDGPU::EXEC && "exec should not be spilled"); 1370 1371 // We are only allowed to create one new instruction when spilling 1372 // registers, so we need to use pseudo instruction for spilling SGPRs. 1373 const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(SpillSize)); 1374 1375 // The SGPR spill/restore instructions only work on number sgprs, so we need 1376 // to make sure we are using the correct register class. 1377 if (SrcReg.isVirtual() && SpillSize == 4) { 1378 MachineRegisterInfo &MRI = MF->getRegInfo(); 1379 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 1380 } 1381 1382 BuildMI(MBB, MI, DL, OpDesc) 1383 .addReg(SrcReg, getKillRegState(isKill)) // data 1384 .addFrameIndex(FrameIndex) // addr 1385 .addMemOperand(MMO) 1386 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) 1387 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); 1388 // Add the scratch resource registers as implicit uses because we may end up 1389 // needing them, and need to ensure that the reserved registers are 1390 // correctly handled. 1391 if (RI.spillSGPRToVGPR()) 1392 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill); 1393 return; 1394 } 1395 1396 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillSaveOpcode(SpillSize) 1397 : getVGPRSpillSaveOpcode(SpillSize); 1398 MFI->setHasSpilledVGPRs(); 1399 1400 BuildMI(MBB, MI, DL, get(Opcode)) 1401 .addReg(SrcReg, getKillRegState(isKill)) // data 1402 .addFrameIndex(FrameIndex) // addr 1403 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 1404 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset 1405 .addImm(0) // offset 1406 .addMemOperand(MMO); 1407 } 1408 1409 static unsigned getSGPRSpillRestoreOpcode(unsigned Size) { 1410 switch (Size) { 1411 case 4: 1412 return AMDGPU::SI_SPILL_S32_RESTORE; 1413 case 8: 1414 return AMDGPU::SI_SPILL_S64_RESTORE; 1415 case 12: 1416 return AMDGPU::SI_SPILL_S96_RESTORE; 1417 case 16: 1418 return AMDGPU::SI_SPILL_S128_RESTORE; 1419 case 20: 1420 return AMDGPU::SI_SPILL_S160_RESTORE; 1421 case 24: 1422 return AMDGPU::SI_SPILL_S192_RESTORE; 1423 case 32: 1424 return AMDGPU::SI_SPILL_S256_RESTORE; 1425 case 64: 1426 return AMDGPU::SI_SPILL_S512_RESTORE; 1427 case 128: 1428 return AMDGPU::SI_SPILL_S1024_RESTORE; 1429 default: 1430 llvm_unreachable("unknown register size"); 1431 } 1432 } 1433 1434 static unsigned getVGPRSpillRestoreOpcode(unsigned Size) { 1435 switch (Size) { 1436 case 4: 1437 return AMDGPU::SI_SPILL_V32_RESTORE; 1438 case 8: 1439 return AMDGPU::SI_SPILL_V64_RESTORE; 1440 case 12: 1441 return AMDGPU::SI_SPILL_V96_RESTORE; 1442 case 16: 1443 return AMDGPU::SI_SPILL_V128_RESTORE; 1444 case 20: 1445 return AMDGPU::SI_SPILL_V160_RESTORE; 1446 case 24: 1447 return AMDGPU::SI_SPILL_V192_RESTORE; 1448 case 32: 1449 return AMDGPU::SI_SPILL_V256_RESTORE; 1450 case 64: 1451 return AMDGPU::SI_SPILL_V512_RESTORE; 1452 case 128: 1453 return AMDGPU::SI_SPILL_V1024_RESTORE; 1454 default: 1455 llvm_unreachable("unknown register size"); 1456 } 1457 } 1458 1459 static unsigned getAGPRSpillRestoreOpcode(unsigned Size) { 1460 switch (Size) { 1461 case 4: 1462 return AMDGPU::SI_SPILL_A32_RESTORE; 1463 case 8: 1464 return AMDGPU::SI_SPILL_A64_RESTORE; 1465 case 12: 1466 return AMDGPU::SI_SPILL_A96_RESTORE; 1467 case 16: 1468 return AMDGPU::SI_SPILL_A128_RESTORE; 1469 case 20: 1470 return AMDGPU::SI_SPILL_A160_RESTORE; 1471 case 24: 1472 return AMDGPU::SI_SPILL_A192_RESTORE; 1473 case 32: 1474 return AMDGPU::SI_SPILL_A256_RESTORE; 1475 case 64: 1476 return AMDGPU::SI_SPILL_A512_RESTORE; 1477 case 128: 1478 return AMDGPU::SI_SPILL_A1024_RESTORE; 1479 default: 1480 llvm_unreachable("unknown register size"); 1481 } 1482 } 1483 1484 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 1485 MachineBasicBlock::iterator MI, 1486 Register DestReg, int FrameIndex, 1487 const TargetRegisterClass *RC, 1488 const TargetRegisterInfo *TRI) const { 1489 MachineFunction *MF = MBB.getParent(); 1490 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1491 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 1492 const DebugLoc &DL = MBB.findDebugLoc(MI); 1493 unsigned SpillSize = TRI->getSpillSize(*RC); 1494 1495 MachinePointerInfo PtrInfo 1496 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 1497 1498 MachineMemOperand *MMO = MF->getMachineMemOperand( 1499 PtrInfo, MachineMemOperand::MOLoad, FrameInfo.getObjectSize(FrameIndex), 1500 FrameInfo.getObjectAlign(FrameIndex)); 1501 1502 if (RI.isSGPRClass(RC)) { 1503 MFI->setHasSpilledSGPRs(); 1504 assert(DestReg != AMDGPU::M0 && "m0 should not be reloaded into"); 1505 assert(DestReg != AMDGPU::EXEC_LO && DestReg != AMDGPU::EXEC_HI && 1506 DestReg != AMDGPU::EXEC && "exec should not be spilled"); 1507 1508 // FIXME: Maybe this should not include a memoperand because it will be 1509 // lowered to non-memory instructions. 1510 const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize)); 1511 if (DestReg.isVirtual() && SpillSize == 4) { 1512 MachineRegisterInfo &MRI = MF->getRegInfo(); 1513 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 1514 } 1515 1516 if (RI.spillSGPRToVGPR()) 1517 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill); 1518 BuildMI(MBB, MI, DL, OpDesc, DestReg) 1519 .addFrameIndex(FrameIndex) // addr 1520 .addMemOperand(MMO) 1521 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) 1522 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); 1523 return; 1524 } 1525 1526 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillRestoreOpcode(SpillSize) 1527 : getVGPRSpillRestoreOpcode(SpillSize); 1528 BuildMI(MBB, MI, DL, get(Opcode), DestReg) 1529 .addFrameIndex(FrameIndex) // vaddr 1530 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 1531 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset 1532 .addImm(0) // offset 1533 .addMemOperand(MMO); 1534 } 1535 1536 void SIInstrInfo::insertNoop(MachineBasicBlock &MBB, 1537 MachineBasicBlock::iterator MI) const { 1538 insertNoops(MBB, MI, 1); 1539 } 1540 1541 void SIInstrInfo::insertNoops(MachineBasicBlock &MBB, 1542 MachineBasicBlock::iterator MI, 1543 unsigned Quantity) const { 1544 DebugLoc DL = MBB.findDebugLoc(MI); 1545 while (Quantity > 0) { 1546 unsigned Arg = std::min(Quantity, 8u); 1547 Quantity -= Arg; 1548 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP)).addImm(Arg - 1); 1549 } 1550 } 1551 1552 void SIInstrInfo::insertReturn(MachineBasicBlock &MBB) const { 1553 auto MF = MBB.getParent(); 1554 SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 1555 1556 assert(Info->isEntryFunction()); 1557 1558 if (MBB.succ_empty()) { 1559 bool HasNoTerminator = MBB.getFirstTerminator() == MBB.end(); 1560 if (HasNoTerminator) { 1561 if (Info->returnsVoid()) { 1562 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::S_ENDPGM)).addImm(0); 1563 } else { 1564 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::SI_RETURN_TO_EPILOG)); 1565 } 1566 } 1567 } 1568 } 1569 1570 unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) { 1571 switch (MI.getOpcode()) { 1572 default: return 1; // FIXME: Do wait states equal cycles? 1573 1574 case AMDGPU::S_NOP: 1575 return MI.getOperand(0).getImm() + 1; 1576 } 1577 } 1578 1579 bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 1580 MachineBasicBlock &MBB = *MI.getParent(); 1581 DebugLoc DL = MBB.findDebugLoc(MI); 1582 switch (MI.getOpcode()) { 1583 default: return TargetInstrInfo::expandPostRAPseudo(MI); 1584 case AMDGPU::S_MOV_B64_term: 1585 // This is only a terminator to get the correct spill code placement during 1586 // register allocation. 1587 MI.setDesc(get(AMDGPU::S_MOV_B64)); 1588 break; 1589 1590 case AMDGPU::S_MOV_B32_term: 1591 // This is only a terminator to get the correct spill code placement during 1592 // register allocation. 1593 MI.setDesc(get(AMDGPU::S_MOV_B32)); 1594 break; 1595 1596 case AMDGPU::S_XOR_B64_term: 1597 // This is only a terminator to get the correct spill code placement during 1598 // register allocation. 1599 MI.setDesc(get(AMDGPU::S_XOR_B64)); 1600 break; 1601 1602 case AMDGPU::S_XOR_B32_term: 1603 // This is only a terminator to get the correct spill code placement during 1604 // register allocation. 1605 MI.setDesc(get(AMDGPU::S_XOR_B32)); 1606 break; 1607 case AMDGPU::S_OR_B64_term: 1608 // This is only a terminator to get the correct spill code placement during 1609 // register allocation. 1610 MI.setDesc(get(AMDGPU::S_OR_B64)); 1611 break; 1612 case AMDGPU::S_OR_B32_term: 1613 // This is only a terminator to get the correct spill code placement during 1614 // register allocation. 1615 MI.setDesc(get(AMDGPU::S_OR_B32)); 1616 break; 1617 1618 case AMDGPU::S_ANDN2_B64_term: 1619 // This is only a terminator to get the correct spill code placement during 1620 // register allocation. 1621 MI.setDesc(get(AMDGPU::S_ANDN2_B64)); 1622 break; 1623 1624 case AMDGPU::S_ANDN2_B32_term: 1625 // This is only a terminator to get the correct spill code placement during 1626 // register allocation. 1627 MI.setDesc(get(AMDGPU::S_ANDN2_B32)); 1628 break; 1629 1630 case AMDGPU::V_MOV_B64_PSEUDO: { 1631 Register Dst = MI.getOperand(0).getReg(); 1632 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 1633 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 1634 1635 const MachineOperand &SrcOp = MI.getOperand(1); 1636 // FIXME: Will this work for 64-bit floating point immediates? 1637 assert(!SrcOp.isFPImm()); 1638 if (SrcOp.isImm()) { 1639 APInt Imm(64, SrcOp.getImm()); 1640 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1641 .addImm(Imm.getLoBits(32).getZExtValue()) 1642 .addReg(Dst, RegState::Implicit | RegState::Define); 1643 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1644 .addImm(Imm.getHiBits(32).getZExtValue()) 1645 .addReg(Dst, RegState::Implicit | RegState::Define); 1646 } else { 1647 assert(SrcOp.isReg()); 1648 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1649 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) 1650 .addReg(Dst, RegState::Implicit | RegState::Define); 1651 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1652 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) 1653 .addReg(Dst, RegState::Implicit | RegState::Define); 1654 } 1655 MI.eraseFromParent(); 1656 break; 1657 } 1658 case AMDGPU::V_MOV_B64_DPP_PSEUDO: { 1659 expandMovDPP64(MI); 1660 break; 1661 } 1662 case AMDGPU::V_SET_INACTIVE_B32: { 1663 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; 1664 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1665 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1666 .addReg(Exec); 1667 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), MI.getOperand(0).getReg()) 1668 .add(MI.getOperand(2)); 1669 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1670 .addReg(Exec); 1671 MI.eraseFromParent(); 1672 break; 1673 } 1674 case AMDGPU::V_SET_INACTIVE_B64: { 1675 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; 1676 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1677 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1678 .addReg(Exec); 1679 MachineInstr *Copy = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), 1680 MI.getOperand(0).getReg()) 1681 .add(MI.getOperand(2)); 1682 expandPostRAPseudo(*Copy); 1683 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1684 .addReg(Exec); 1685 MI.eraseFromParent(); 1686 break; 1687 } 1688 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V1: 1689 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V2: 1690 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V3: 1691 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V4: 1692 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V5: 1693 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V8: 1694 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V16: 1695 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V32: 1696 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V1: 1697 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V2: 1698 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V3: 1699 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V4: 1700 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V5: 1701 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V8: 1702 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V16: 1703 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V32: 1704 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V1: 1705 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V2: 1706 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V4: 1707 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V8: 1708 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V16: { 1709 const TargetRegisterClass *EltRC = getOpRegClass(MI, 2); 1710 1711 unsigned Opc; 1712 if (RI.hasVGPRs(EltRC)) { 1713 Opc = ST.useVGPRIndexMode() ? 1714 AMDGPU::V_MOV_B32_indirect : AMDGPU::V_MOVRELD_B32_e32; 1715 } else { 1716 Opc = RI.getRegSizeInBits(*EltRC) == 64 ? 1717 AMDGPU::S_MOVRELD_B64 : AMDGPU::S_MOVRELD_B32; 1718 } 1719 1720 const MCInstrDesc &OpDesc = get(Opc); 1721 Register VecReg = MI.getOperand(0).getReg(); 1722 bool IsUndef = MI.getOperand(1).isUndef(); 1723 unsigned SubReg = MI.getOperand(3).getImm(); 1724 assert(VecReg == MI.getOperand(1).getReg()); 1725 1726 MachineInstrBuilder MIB = 1727 BuildMI(MBB, MI, DL, OpDesc) 1728 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 1729 .add(MI.getOperand(2)) 1730 .addReg(VecReg, RegState::ImplicitDefine) 1731 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0)); 1732 1733 const int ImpDefIdx = 1734 OpDesc.getNumOperands() + OpDesc.getNumImplicitUses(); 1735 const int ImpUseIdx = ImpDefIdx + 1; 1736 MIB->tieOperands(ImpDefIdx, ImpUseIdx); 1737 MI.eraseFromParent(); 1738 break; 1739 } 1740 case AMDGPU::SI_PC_ADD_REL_OFFSET: { 1741 MachineFunction &MF = *MBB.getParent(); 1742 Register Reg = MI.getOperand(0).getReg(); 1743 Register RegLo = RI.getSubReg(Reg, AMDGPU::sub0); 1744 Register RegHi = RI.getSubReg(Reg, AMDGPU::sub1); 1745 1746 // Create a bundle so these instructions won't be re-ordered by the 1747 // post-RA scheduler. 1748 MIBundleBuilder Bundler(MBB, MI); 1749 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg)); 1750 1751 // Add 32-bit offset from this instruction to the start of the 1752 // constant data. 1753 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo) 1754 .addReg(RegLo) 1755 .add(MI.getOperand(1))); 1756 1757 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi) 1758 .addReg(RegHi); 1759 MIB.add(MI.getOperand(2)); 1760 1761 Bundler.append(MIB); 1762 finalizeBundle(MBB, Bundler.begin()); 1763 1764 MI.eraseFromParent(); 1765 break; 1766 } 1767 case AMDGPU::ENTER_WWM: { 1768 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 1769 // WWM is entered. 1770 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1771 : AMDGPU::S_OR_SAVEEXEC_B64)); 1772 break; 1773 } 1774 case AMDGPU::EXIT_WWM: { 1775 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 1776 // WWM is exited. 1777 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64)); 1778 break; 1779 } 1780 } 1781 return true; 1782 } 1783 1784 std::pair<MachineInstr*, MachineInstr*> 1785 SIInstrInfo::expandMovDPP64(MachineInstr &MI) const { 1786 assert (MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO); 1787 1788 MachineBasicBlock &MBB = *MI.getParent(); 1789 DebugLoc DL = MBB.findDebugLoc(MI); 1790 MachineFunction *MF = MBB.getParent(); 1791 MachineRegisterInfo &MRI = MF->getRegInfo(); 1792 Register Dst = MI.getOperand(0).getReg(); 1793 unsigned Part = 0; 1794 MachineInstr *Split[2]; 1795 1796 1797 for (auto Sub : { AMDGPU::sub0, AMDGPU::sub1 }) { 1798 auto MovDPP = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_dpp)); 1799 if (Dst.isPhysical()) { 1800 MovDPP.addDef(RI.getSubReg(Dst, Sub)); 1801 } else { 1802 assert(MRI.isSSA()); 1803 auto Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1804 MovDPP.addDef(Tmp); 1805 } 1806 1807 for (unsigned I = 1; I <= 2; ++I) { // old and src operands. 1808 const MachineOperand &SrcOp = MI.getOperand(I); 1809 assert(!SrcOp.isFPImm()); 1810 if (SrcOp.isImm()) { 1811 APInt Imm(64, SrcOp.getImm()); 1812 Imm.ashrInPlace(Part * 32); 1813 MovDPP.addImm(Imm.getLoBits(32).getZExtValue()); 1814 } else { 1815 assert(SrcOp.isReg()); 1816 Register Src = SrcOp.getReg(); 1817 if (Src.isPhysical()) 1818 MovDPP.addReg(RI.getSubReg(Src, Sub)); 1819 else 1820 MovDPP.addReg(Src, SrcOp.isUndef() ? RegState::Undef : 0, Sub); 1821 } 1822 } 1823 1824 for (unsigned I = 3; I < MI.getNumExplicitOperands(); ++I) 1825 MovDPP.addImm(MI.getOperand(I).getImm()); 1826 1827 Split[Part] = MovDPP; 1828 ++Part; 1829 } 1830 1831 if (Dst.isVirtual()) 1832 BuildMI(MBB, MI, DL, get(AMDGPU::REG_SEQUENCE), Dst) 1833 .addReg(Split[0]->getOperand(0).getReg()) 1834 .addImm(AMDGPU::sub0) 1835 .addReg(Split[1]->getOperand(0).getReg()) 1836 .addImm(AMDGPU::sub1); 1837 1838 MI.eraseFromParent(); 1839 return std::make_pair(Split[0], Split[1]); 1840 } 1841 1842 bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI, 1843 MachineOperand &Src0, 1844 unsigned Src0OpName, 1845 MachineOperand &Src1, 1846 unsigned Src1OpName) const { 1847 MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName); 1848 if (!Src0Mods) 1849 return false; 1850 1851 MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName); 1852 assert(Src1Mods && 1853 "All commutable instructions have both src0 and src1 modifiers"); 1854 1855 int Src0ModsVal = Src0Mods->getImm(); 1856 int Src1ModsVal = Src1Mods->getImm(); 1857 1858 Src1Mods->setImm(Src0ModsVal); 1859 Src0Mods->setImm(Src1ModsVal); 1860 return true; 1861 } 1862 1863 static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI, 1864 MachineOperand &RegOp, 1865 MachineOperand &NonRegOp) { 1866 Register Reg = RegOp.getReg(); 1867 unsigned SubReg = RegOp.getSubReg(); 1868 bool IsKill = RegOp.isKill(); 1869 bool IsDead = RegOp.isDead(); 1870 bool IsUndef = RegOp.isUndef(); 1871 bool IsDebug = RegOp.isDebug(); 1872 1873 if (NonRegOp.isImm()) 1874 RegOp.ChangeToImmediate(NonRegOp.getImm()); 1875 else if (NonRegOp.isFI()) 1876 RegOp.ChangeToFrameIndex(NonRegOp.getIndex()); 1877 else if (NonRegOp.isGlobal()) { 1878 RegOp.ChangeToGA(NonRegOp.getGlobal(), NonRegOp.getOffset(), 1879 NonRegOp.getTargetFlags()); 1880 } else 1881 return nullptr; 1882 1883 // Make sure we don't reinterpret a subreg index in the target flags. 1884 RegOp.setTargetFlags(NonRegOp.getTargetFlags()); 1885 1886 NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug); 1887 NonRegOp.setSubReg(SubReg); 1888 1889 return &MI; 1890 } 1891 1892 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, 1893 unsigned Src0Idx, 1894 unsigned Src1Idx) const { 1895 assert(!NewMI && "this should never be used"); 1896 1897 unsigned Opc = MI.getOpcode(); 1898 int CommutedOpcode = commuteOpcode(Opc); 1899 if (CommutedOpcode == -1) 1900 return nullptr; 1901 1902 assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == 1903 static_cast<int>(Src0Idx) && 1904 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == 1905 static_cast<int>(Src1Idx) && 1906 "inconsistency with findCommutedOpIndices"); 1907 1908 MachineOperand &Src0 = MI.getOperand(Src0Idx); 1909 MachineOperand &Src1 = MI.getOperand(Src1Idx); 1910 1911 MachineInstr *CommutedMI = nullptr; 1912 if (Src0.isReg() && Src1.isReg()) { 1913 if (isOperandLegal(MI, Src1Idx, &Src0)) { 1914 // Be sure to copy the source modifiers to the right place. 1915 CommutedMI 1916 = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx); 1917 } 1918 1919 } else if (Src0.isReg() && !Src1.isReg()) { 1920 // src0 should always be able to support any operand type, so no need to 1921 // check operand legality. 1922 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1); 1923 } else if (!Src0.isReg() && Src1.isReg()) { 1924 if (isOperandLegal(MI, Src1Idx, &Src0)) 1925 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0); 1926 } else { 1927 // FIXME: Found two non registers to commute. This does happen. 1928 return nullptr; 1929 } 1930 1931 if (CommutedMI) { 1932 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers, 1933 Src1, AMDGPU::OpName::src1_modifiers); 1934 1935 CommutedMI->setDesc(get(CommutedOpcode)); 1936 } 1937 1938 return CommutedMI; 1939 } 1940 1941 // This needs to be implemented because the source modifiers may be inserted 1942 // between the true commutable operands, and the base 1943 // TargetInstrInfo::commuteInstruction uses it. 1944 bool SIInstrInfo::findCommutedOpIndices(const MachineInstr &MI, 1945 unsigned &SrcOpIdx0, 1946 unsigned &SrcOpIdx1) const { 1947 return findCommutedOpIndices(MI.getDesc(), SrcOpIdx0, SrcOpIdx1); 1948 } 1949 1950 bool SIInstrInfo::findCommutedOpIndices(MCInstrDesc Desc, unsigned &SrcOpIdx0, 1951 unsigned &SrcOpIdx1) const { 1952 if (!Desc.isCommutable()) 1953 return false; 1954 1955 unsigned Opc = Desc.getOpcode(); 1956 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 1957 if (Src0Idx == -1) 1958 return false; 1959 1960 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 1961 if (Src1Idx == -1) 1962 return false; 1963 1964 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx); 1965 } 1966 1967 bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp, 1968 int64_t BrOffset) const { 1969 // BranchRelaxation should never have to check s_setpc_b64 because its dest 1970 // block is unanalyzable. 1971 assert(BranchOp != AMDGPU::S_SETPC_B64); 1972 1973 // Convert to dwords. 1974 BrOffset /= 4; 1975 1976 // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is 1977 // from the next instruction. 1978 BrOffset -= 1; 1979 1980 return isIntN(BranchOffsetBits, BrOffset); 1981 } 1982 1983 MachineBasicBlock *SIInstrInfo::getBranchDestBlock( 1984 const MachineInstr &MI) const { 1985 if (MI.getOpcode() == AMDGPU::S_SETPC_B64) { 1986 // This would be a difficult analysis to perform, but can always be legal so 1987 // there's no need to analyze it. 1988 return nullptr; 1989 } 1990 1991 return MI.getOperand(0).getMBB(); 1992 } 1993 1994 unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB, 1995 MachineBasicBlock &DestBB, 1996 const DebugLoc &DL, 1997 int64_t BrOffset, 1998 RegScavenger *RS) const { 1999 assert(RS && "RegScavenger required for long branching"); 2000 assert(MBB.empty() && 2001 "new block should be inserted for expanding unconditional branch"); 2002 assert(MBB.pred_size() == 1); 2003 2004 MachineFunction *MF = MBB.getParent(); 2005 MachineRegisterInfo &MRI = MF->getRegInfo(); 2006 2007 // FIXME: Virtual register workaround for RegScavenger not working with empty 2008 // blocks. 2009 Register PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 2010 2011 auto I = MBB.end(); 2012 2013 // We need to compute the offset relative to the instruction immediately after 2014 // s_getpc_b64. Insert pc arithmetic code before last terminator. 2015 MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg); 2016 2017 // TODO: Handle > 32-bit block address. 2018 if (BrOffset >= 0) { 2019 BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32)) 2020 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 2021 .addReg(PCReg, 0, AMDGPU::sub0) 2022 .addMBB(&DestBB, MO_LONG_BRANCH_FORWARD); 2023 BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32)) 2024 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 2025 .addReg(PCReg, 0, AMDGPU::sub1) 2026 .addImm(0); 2027 } else { 2028 // Backwards branch. 2029 BuildMI(MBB, I, DL, get(AMDGPU::S_SUB_U32)) 2030 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 2031 .addReg(PCReg, 0, AMDGPU::sub0) 2032 .addMBB(&DestBB, MO_LONG_BRANCH_BACKWARD); 2033 BuildMI(MBB, I, DL, get(AMDGPU::S_SUBB_U32)) 2034 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 2035 .addReg(PCReg, 0, AMDGPU::sub1) 2036 .addImm(0); 2037 } 2038 2039 // Insert the indirect branch after the other terminator. 2040 BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64)) 2041 .addReg(PCReg); 2042 2043 // FIXME: If spilling is necessary, this will fail because this scavenger has 2044 // no emergency stack slots. It is non-trivial to spill in this situation, 2045 // because the restore code needs to be specially placed after the 2046 // jump. BranchRelaxation then needs to be made aware of the newly inserted 2047 // block. 2048 // 2049 // If a spill is needed for the pc register pair, we need to insert a spill 2050 // restore block right before the destination block, and insert a short branch 2051 // into the old destination block's fallthrough predecessor. 2052 // e.g.: 2053 // 2054 // s_cbranch_scc0 skip_long_branch: 2055 // 2056 // long_branch_bb: 2057 // spill s[8:9] 2058 // s_getpc_b64 s[8:9] 2059 // s_add_u32 s8, s8, restore_bb 2060 // s_addc_u32 s9, s9, 0 2061 // s_setpc_b64 s[8:9] 2062 // 2063 // skip_long_branch: 2064 // foo; 2065 // 2066 // ..... 2067 // 2068 // dest_bb_fallthrough_predecessor: 2069 // bar; 2070 // s_branch dest_bb 2071 // 2072 // restore_bb: 2073 // restore s[8:9] 2074 // fallthrough dest_bb 2075 /// 2076 // dest_bb: 2077 // buzz; 2078 2079 RS->enterBasicBlockEnd(MBB); 2080 Register Scav = RS->scavengeRegisterBackwards( 2081 AMDGPU::SReg_64RegClass, 2082 MachineBasicBlock::iterator(GetPC), false, 0); 2083 MRI.replaceRegWith(PCReg, Scav); 2084 MRI.clearVirtRegs(); 2085 RS->setRegUsed(Scav); 2086 2087 return 4 + 8 + 4 + 4; 2088 } 2089 2090 unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) { 2091 switch (Cond) { 2092 case SIInstrInfo::SCC_TRUE: 2093 return AMDGPU::S_CBRANCH_SCC1; 2094 case SIInstrInfo::SCC_FALSE: 2095 return AMDGPU::S_CBRANCH_SCC0; 2096 case SIInstrInfo::VCCNZ: 2097 return AMDGPU::S_CBRANCH_VCCNZ; 2098 case SIInstrInfo::VCCZ: 2099 return AMDGPU::S_CBRANCH_VCCZ; 2100 case SIInstrInfo::EXECNZ: 2101 return AMDGPU::S_CBRANCH_EXECNZ; 2102 case SIInstrInfo::EXECZ: 2103 return AMDGPU::S_CBRANCH_EXECZ; 2104 default: 2105 llvm_unreachable("invalid branch predicate"); 2106 } 2107 } 2108 2109 SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) { 2110 switch (Opcode) { 2111 case AMDGPU::S_CBRANCH_SCC0: 2112 return SCC_FALSE; 2113 case AMDGPU::S_CBRANCH_SCC1: 2114 return SCC_TRUE; 2115 case AMDGPU::S_CBRANCH_VCCNZ: 2116 return VCCNZ; 2117 case AMDGPU::S_CBRANCH_VCCZ: 2118 return VCCZ; 2119 case AMDGPU::S_CBRANCH_EXECNZ: 2120 return EXECNZ; 2121 case AMDGPU::S_CBRANCH_EXECZ: 2122 return EXECZ; 2123 default: 2124 return INVALID_BR; 2125 } 2126 } 2127 2128 bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB, 2129 MachineBasicBlock::iterator I, 2130 MachineBasicBlock *&TBB, 2131 MachineBasicBlock *&FBB, 2132 SmallVectorImpl<MachineOperand> &Cond, 2133 bool AllowModify) const { 2134 if (I->getOpcode() == AMDGPU::S_BRANCH) { 2135 // Unconditional Branch 2136 TBB = I->getOperand(0).getMBB(); 2137 return false; 2138 } 2139 2140 MachineBasicBlock *CondBB = nullptr; 2141 2142 if (I->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 2143 CondBB = I->getOperand(1).getMBB(); 2144 Cond.push_back(I->getOperand(0)); 2145 } else { 2146 BranchPredicate Pred = getBranchPredicate(I->getOpcode()); 2147 if (Pred == INVALID_BR) 2148 return true; 2149 2150 CondBB = I->getOperand(0).getMBB(); 2151 Cond.push_back(MachineOperand::CreateImm(Pred)); 2152 Cond.push_back(I->getOperand(1)); // Save the branch register. 2153 } 2154 ++I; 2155 2156 if (I == MBB.end()) { 2157 // Conditional branch followed by fall-through. 2158 TBB = CondBB; 2159 return false; 2160 } 2161 2162 if (I->getOpcode() == AMDGPU::S_BRANCH) { 2163 TBB = CondBB; 2164 FBB = I->getOperand(0).getMBB(); 2165 return false; 2166 } 2167 2168 return true; 2169 } 2170 2171 bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 2172 MachineBasicBlock *&FBB, 2173 SmallVectorImpl<MachineOperand> &Cond, 2174 bool AllowModify) const { 2175 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 2176 auto E = MBB.end(); 2177 if (I == E) 2178 return false; 2179 2180 // Skip over the instructions that are artificially terminators for special 2181 // exec management. 2182 while (I != E && !I->isBranch() && !I->isReturn() && 2183 I->getOpcode() != AMDGPU::SI_MASK_BRANCH) { 2184 switch (I->getOpcode()) { 2185 case AMDGPU::SI_MASK_BRANCH: 2186 case AMDGPU::S_MOV_B64_term: 2187 case AMDGPU::S_XOR_B64_term: 2188 case AMDGPU::S_OR_B64_term: 2189 case AMDGPU::S_ANDN2_B64_term: 2190 case AMDGPU::S_MOV_B32_term: 2191 case AMDGPU::S_XOR_B32_term: 2192 case AMDGPU::S_OR_B32_term: 2193 case AMDGPU::S_ANDN2_B32_term: 2194 break; 2195 case AMDGPU::SI_IF: 2196 case AMDGPU::SI_ELSE: 2197 case AMDGPU::SI_KILL_I1_TERMINATOR: 2198 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 2199 // FIXME: It's messy that these need to be considered here at all. 2200 return true; 2201 default: 2202 llvm_unreachable("unexpected non-branch terminator inst"); 2203 } 2204 2205 ++I; 2206 } 2207 2208 if (I == E) 2209 return false; 2210 2211 if (I->getOpcode() != AMDGPU::SI_MASK_BRANCH) 2212 return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify); 2213 2214 ++I; 2215 2216 // TODO: Should be able to treat as fallthrough? 2217 if (I == MBB.end()) 2218 return true; 2219 2220 if (analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify)) 2221 return true; 2222 2223 MachineBasicBlock *MaskBrDest = I->getOperand(0).getMBB(); 2224 2225 // Specifically handle the case where the conditional branch is to the same 2226 // destination as the mask branch. e.g. 2227 // 2228 // si_mask_branch BB8 2229 // s_cbranch_execz BB8 2230 // s_cbranch BB9 2231 // 2232 // This is required to understand divergent loops which may need the branches 2233 // to be relaxed. 2234 if (TBB != MaskBrDest || Cond.empty()) 2235 return true; 2236 2237 auto Pred = Cond[0].getImm(); 2238 return (Pred != EXECZ && Pred != EXECNZ); 2239 } 2240 2241 unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB, 2242 int *BytesRemoved) const { 2243 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 2244 2245 unsigned Count = 0; 2246 unsigned RemovedSize = 0; 2247 while (I != MBB.end()) { 2248 MachineBasicBlock::iterator Next = std::next(I); 2249 if (I->getOpcode() == AMDGPU::SI_MASK_BRANCH) { 2250 I = Next; 2251 continue; 2252 } 2253 2254 RemovedSize += getInstSizeInBytes(*I); 2255 I->eraseFromParent(); 2256 ++Count; 2257 I = Next; 2258 } 2259 2260 if (BytesRemoved) 2261 *BytesRemoved = RemovedSize; 2262 2263 return Count; 2264 } 2265 2266 // Copy the flags onto the implicit condition register operand. 2267 static void preserveCondRegFlags(MachineOperand &CondReg, 2268 const MachineOperand &OrigCond) { 2269 CondReg.setIsUndef(OrigCond.isUndef()); 2270 CondReg.setIsKill(OrigCond.isKill()); 2271 } 2272 2273 unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB, 2274 MachineBasicBlock *TBB, 2275 MachineBasicBlock *FBB, 2276 ArrayRef<MachineOperand> Cond, 2277 const DebugLoc &DL, 2278 int *BytesAdded) const { 2279 if (!FBB && Cond.empty()) { 2280 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 2281 .addMBB(TBB); 2282 if (BytesAdded) 2283 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4; 2284 return 1; 2285 } 2286 2287 if(Cond.size() == 1 && Cond[0].isReg()) { 2288 BuildMI(&MBB, DL, get(AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO)) 2289 .add(Cond[0]) 2290 .addMBB(TBB); 2291 return 1; 2292 } 2293 2294 assert(TBB && Cond[0].isImm()); 2295 2296 unsigned Opcode 2297 = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm())); 2298 2299 if (!FBB) { 2300 Cond[1].isUndef(); 2301 MachineInstr *CondBr = 2302 BuildMI(&MBB, DL, get(Opcode)) 2303 .addMBB(TBB); 2304 2305 // Copy the flags onto the implicit condition register operand. 2306 preserveCondRegFlags(CondBr->getOperand(1), Cond[1]); 2307 fixImplicitOperands(*CondBr); 2308 2309 if (BytesAdded) 2310 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4; 2311 return 1; 2312 } 2313 2314 assert(TBB && FBB); 2315 2316 MachineInstr *CondBr = 2317 BuildMI(&MBB, DL, get(Opcode)) 2318 .addMBB(TBB); 2319 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 2320 .addMBB(FBB); 2321 2322 MachineOperand &CondReg = CondBr->getOperand(1); 2323 CondReg.setIsUndef(Cond[1].isUndef()); 2324 CondReg.setIsKill(Cond[1].isKill()); 2325 2326 if (BytesAdded) 2327 *BytesAdded = ST.hasOffset3fBug() ? 16 : 8; 2328 2329 return 2; 2330 } 2331 2332 bool SIInstrInfo::reverseBranchCondition( 2333 SmallVectorImpl<MachineOperand> &Cond) const { 2334 if (Cond.size() != 2) { 2335 return true; 2336 } 2337 2338 if (Cond[0].isImm()) { 2339 Cond[0].setImm(-Cond[0].getImm()); 2340 return false; 2341 } 2342 2343 return true; 2344 } 2345 2346 bool SIInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, 2347 ArrayRef<MachineOperand> Cond, 2348 Register DstReg, Register TrueReg, 2349 Register FalseReg, int &CondCycles, 2350 int &TrueCycles, int &FalseCycles) const { 2351 switch (Cond[0].getImm()) { 2352 case VCCNZ: 2353 case VCCZ: { 2354 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2355 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 2356 if (MRI.getRegClass(FalseReg) != RC) 2357 return false; 2358 2359 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 2360 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 2361 2362 // Limit to equal cost for branch vs. N v_cndmask_b32s. 2363 return RI.hasVGPRs(RC) && NumInsts <= 6; 2364 } 2365 case SCC_TRUE: 2366 case SCC_FALSE: { 2367 // FIXME: We could insert for VGPRs if we could replace the original compare 2368 // with a vector one. 2369 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2370 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 2371 if (MRI.getRegClass(FalseReg) != RC) 2372 return false; 2373 2374 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 2375 2376 // Multiples of 8 can do s_cselect_b64 2377 if (NumInsts % 2 == 0) 2378 NumInsts /= 2; 2379 2380 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 2381 return RI.isSGPRClass(RC); 2382 } 2383 default: 2384 return false; 2385 } 2386 } 2387 2388 void SIInstrInfo::insertSelect(MachineBasicBlock &MBB, 2389 MachineBasicBlock::iterator I, const DebugLoc &DL, 2390 Register DstReg, ArrayRef<MachineOperand> Cond, 2391 Register TrueReg, Register FalseReg) const { 2392 BranchPredicate Pred = static_cast<BranchPredicate>(Cond[0].getImm()); 2393 if (Pred == VCCZ || Pred == SCC_FALSE) { 2394 Pred = static_cast<BranchPredicate>(-Pred); 2395 std::swap(TrueReg, FalseReg); 2396 } 2397 2398 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2399 const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg); 2400 unsigned DstSize = RI.getRegSizeInBits(*DstRC); 2401 2402 if (DstSize == 32) { 2403 MachineInstr *Select; 2404 if (Pred == SCC_TRUE) { 2405 Select = BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B32), DstReg) 2406 .addReg(TrueReg) 2407 .addReg(FalseReg); 2408 } else { 2409 // Instruction's operands are backwards from what is expected. 2410 Select = BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e32), DstReg) 2411 .addReg(FalseReg) 2412 .addReg(TrueReg); 2413 } 2414 2415 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2416 return; 2417 } 2418 2419 if (DstSize == 64 && Pred == SCC_TRUE) { 2420 MachineInstr *Select = 2421 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg) 2422 .addReg(TrueReg) 2423 .addReg(FalseReg); 2424 2425 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2426 return; 2427 } 2428 2429 static const int16_t Sub0_15[] = { 2430 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 2431 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 2432 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, 2433 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 2434 }; 2435 2436 static const int16_t Sub0_15_64[] = { 2437 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, 2438 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7, 2439 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11, 2440 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15, 2441 }; 2442 2443 unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32; 2444 const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass; 2445 const int16_t *SubIndices = Sub0_15; 2446 int NElts = DstSize / 32; 2447 2448 // 64-bit select is only available for SALU. 2449 // TODO: Split 96-bit into 64-bit and 32-bit, not 3x 32-bit. 2450 if (Pred == SCC_TRUE) { 2451 if (NElts % 2) { 2452 SelOp = AMDGPU::S_CSELECT_B32; 2453 EltRC = &AMDGPU::SGPR_32RegClass; 2454 } else { 2455 SelOp = AMDGPU::S_CSELECT_B64; 2456 EltRC = &AMDGPU::SGPR_64RegClass; 2457 SubIndices = Sub0_15_64; 2458 NElts /= 2; 2459 } 2460 } 2461 2462 MachineInstrBuilder MIB = BuildMI( 2463 MBB, I, DL, get(AMDGPU::REG_SEQUENCE), DstReg); 2464 2465 I = MIB->getIterator(); 2466 2467 SmallVector<Register, 8> Regs; 2468 for (int Idx = 0; Idx != NElts; ++Idx) { 2469 Register DstElt = MRI.createVirtualRegister(EltRC); 2470 Regs.push_back(DstElt); 2471 2472 unsigned SubIdx = SubIndices[Idx]; 2473 2474 MachineInstr *Select; 2475 if (SelOp == AMDGPU::V_CNDMASK_B32_e32) { 2476 Select = 2477 BuildMI(MBB, I, DL, get(SelOp), DstElt) 2478 .addReg(FalseReg, 0, SubIdx) 2479 .addReg(TrueReg, 0, SubIdx); 2480 } else { 2481 Select = 2482 BuildMI(MBB, I, DL, get(SelOp), DstElt) 2483 .addReg(TrueReg, 0, SubIdx) 2484 .addReg(FalseReg, 0, SubIdx); 2485 } 2486 2487 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2488 fixImplicitOperands(*Select); 2489 2490 MIB.addReg(DstElt) 2491 .addImm(SubIdx); 2492 } 2493 } 2494 2495 bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) const { 2496 switch (MI.getOpcode()) { 2497 case AMDGPU::V_MOV_B32_e32: 2498 case AMDGPU::V_MOV_B32_e64: 2499 case AMDGPU::V_MOV_B64_PSEUDO: { 2500 // If there are additional implicit register operands, this may be used for 2501 // register indexing so the source register operand isn't simply copied. 2502 unsigned NumOps = MI.getDesc().getNumOperands() + 2503 MI.getDesc().getNumImplicitUses(); 2504 2505 return MI.getNumOperands() == NumOps; 2506 } 2507 case AMDGPU::S_MOV_B32: 2508 case AMDGPU::S_MOV_B64: 2509 case AMDGPU::COPY: 2510 case AMDGPU::V_ACCVGPR_WRITE_B32: 2511 case AMDGPU::V_ACCVGPR_READ_B32: 2512 return true; 2513 default: 2514 return false; 2515 } 2516 } 2517 2518 unsigned SIInstrInfo::getAddressSpaceForPseudoSourceKind( 2519 unsigned Kind) const { 2520 switch(Kind) { 2521 case PseudoSourceValue::Stack: 2522 case PseudoSourceValue::FixedStack: 2523 return AMDGPUAS::PRIVATE_ADDRESS; 2524 case PseudoSourceValue::ConstantPool: 2525 case PseudoSourceValue::GOT: 2526 case PseudoSourceValue::JumpTable: 2527 case PseudoSourceValue::GlobalValueCallEntry: 2528 case PseudoSourceValue::ExternalSymbolCallEntry: 2529 case PseudoSourceValue::TargetCustom: 2530 return AMDGPUAS::CONSTANT_ADDRESS; 2531 } 2532 return AMDGPUAS::FLAT_ADDRESS; 2533 } 2534 2535 static void removeModOperands(MachineInstr &MI) { 2536 unsigned Opc = MI.getOpcode(); 2537 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2538 AMDGPU::OpName::src0_modifiers); 2539 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2540 AMDGPU::OpName::src1_modifiers); 2541 int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2542 AMDGPU::OpName::src2_modifiers); 2543 2544 MI.RemoveOperand(Src2ModIdx); 2545 MI.RemoveOperand(Src1ModIdx); 2546 MI.RemoveOperand(Src0ModIdx); 2547 } 2548 2549 bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, 2550 Register Reg, MachineRegisterInfo *MRI) const { 2551 if (!MRI->hasOneNonDBGUse(Reg)) 2552 return false; 2553 2554 switch (DefMI.getOpcode()) { 2555 default: 2556 return false; 2557 case AMDGPU::S_MOV_B64: 2558 // TODO: We could fold 64-bit immediates, but this get compilicated 2559 // when there are sub-registers. 2560 return false; 2561 2562 case AMDGPU::V_MOV_B32_e32: 2563 case AMDGPU::S_MOV_B32: 2564 case AMDGPU::V_ACCVGPR_WRITE_B32: 2565 break; 2566 } 2567 2568 const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0); 2569 assert(ImmOp); 2570 // FIXME: We could handle FrameIndex values here. 2571 if (!ImmOp->isImm()) 2572 return false; 2573 2574 unsigned Opc = UseMI.getOpcode(); 2575 if (Opc == AMDGPU::COPY) { 2576 Register DstReg = UseMI.getOperand(0).getReg(); 2577 bool Is16Bit = getOpSize(UseMI, 0) == 2; 2578 bool isVGPRCopy = RI.isVGPR(*MRI, DstReg); 2579 unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32; 2580 APInt Imm(32, ImmOp->getImm()); 2581 2582 if (UseMI.getOperand(1).getSubReg() == AMDGPU::hi16) 2583 Imm = Imm.ashr(16); 2584 2585 if (RI.isAGPR(*MRI, DstReg)) { 2586 if (!isInlineConstant(Imm)) 2587 return false; 2588 NewOpc = AMDGPU::V_ACCVGPR_WRITE_B32; 2589 } 2590 2591 if (Is16Bit) { 2592 if (isVGPRCopy) 2593 return false; // Do not clobber vgpr_hi16 2594 2595 if (DstReg.isVirtual() && 2596 UseMI.getOperand(0).getSubReg() != AMDGPU::lo16) 2597 return false; 2598 2599 UseMI.getOperand(0).setSubReg(0); 2600 if (DstReg.isPhysical()) { 2601 DstReg = RI.get32BitRegister(DstReg); 2602 UseMI.getOperand(0).setReg(DstReg); 2603 } 2604 assert(UseMI.getOperand(1).getReg().isVirtual()); 2605 } 2606 2607 UseMI.setDesc(get(NewOpc)); 2608 UseMI.getOperand(1).ChangeToImmediate(Imm.getSExtValue()); 2609 UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent()); 2610 return true; 2611 } 2612 2613 if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 || 2614 Opc == AMDGPU::V_MAD_F16 || Opc == AMDGPU::V_MAC_F16_e64 || 2615 Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64 || 2616 Opc == AMDGPU::V_FMA_F16 || Opc == AMDGPU::V_FMAC_F16_e64) { 2617 // Don't fold if we are using source or output modifiers. The new VOP2 2618 // instructions don't have them. 2619 if (hasAnyModifiersSet(UseMI)) 2620 return false; 2621 2622 // If this is a free constant, there's no reason to do this. 2623 // TODO: We could fold this here instead of letting SIFoldOperands do it 2624 // later. 2625 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0); 2626 2627 // Any src operand can be used for the legality check. 2628 if (isInlineConstant(UseMI, *Src0, *ImmOp)) 2629 return false; 2630 2631 bool IsF32 = Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 || 2632 Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64; 2633 bool IsFMA = Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64 || 2634 Opc == AMDGPU::V_FMA_F16 || Opc == AMDGPU::V_FMAC_F16_e64; 2635 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1); 2636 MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2); 2637 2638 // Multiplied part is the constant: Use v_madmk_{f16, f32}. 2639 // We should only expect these to be on src0 due to canonicalizations. 2640 if (Src0->isReg() && Src0->getReg() == Reg) { 2641 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) 2642 return false; 2643 2644 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg()))) 2645 return false; 2646 2647 unsigned NewOpc = 2648 IsFMA ? (IsF32 ? AMDGPU::V_FMAMK_F32 : AMDGPU::V_FMAMK_F16) 2649 : (IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16); 2650 if (pseudoToMCOpcode(NewOpc) == -1) 2651 return false; 2652 2653 // We need to swap operands 0 and 1 since madmk constant is at operand 1. 2654 2655 const int64_t Imm = ImmOp->getImm(); 2656 2657 // FIXME: This would be a lot easier if we could return a new instruction 2658 // instead of having to modify in place. 2659 2660 // Remove these first since they are at the end. 2661 UseMI.RemoveOperand( 2662 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 2663 UseMI.RemoveOperand( 2664 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 2665 2666 Register Src1Reg = Src1->getReg(); 2667 unsigned Src1SubReg = Src1->getSubReg(); 2668 Src0->setReg(Src1Reg); 2669 Src0->setSubReg(Src1SubReg); 2670 Src0->setIsKill(Src1->isKill()); 2671 2672 if (Opc == AMDGPU::V_MAC_F32_e64 || 2673 Opc == AMDGPU::V_MAC_F16_e64 || 2674 Opc == AMDGPU::V_FMAC_F32_e64 || 2675 Opc == AMDGPU::V_FMAC_F16_e64) 2676 UseMI.untieRegOperand( 2677 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 2678 2679 Src1->ChangeToImmediate(Imm); 2680 2681 removeModOperands(UseMI); 2682 UseMI.setDesc(get(NewOpc)); 2683 2684 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 2685 if (DeleteDef) 2686 DefMI.eraseFromParent(); 2687 2688 return true; 2689 } 2690 2691 // Added part is the constant: Use v_madak_{f16, f32}. 2692 if (Src2->isReg() && Src2->getReg() == Reg) { 2693 // Not allowed to use constant bus for another operand. 2694 // We can however allow an inline immediate as src0. 2695 bool Src0Inlined = false; 2696 if (Src0->isReg()) { 2697 // Try to inline constant if possible. 2698 // If the Def moves immediate and the use is single 2699 // We are saving VGPR here. 2700 MachineInstr *Def = MRI->getUniqueVRegDef(Src0->getReg()); 2701 if (Def && Def->isMoveImmediate() && 2702 isInlineConstant(Def->getOperand(1)) && 2703 MRI->hasOneUse(Src0->getReg())) { 2704 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 2705 Src0Inlined = true; 2706 } else if ((Src0->getReg().isPhysical() && 2707 (ST.getConstantBusLimit(Opc) <= 1 && 2708 RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg())))) || 2709 (Src0->getReg().isVirtual() && 2710 (ST.getConstantBusLimit(Opc) <= 1 && 2711 RI.isSGPRClass(MRI->getRegClass(Src0->getReg()))))) 2712 return false; 2713 // VGPR is okay as Src0 - fallthrough 2714 } 2715 2716 if (Src1->isReg() && !Src0Inlined ) { 2717 // We have one slot for inlinable constant so far - try to fill it 2718 MachineInstr *Def = MRI->getUniqueVRegDef(Src1->getReg()); 2719 if (Def && Def->isMoveImmediate() && 2720 isInlineConstant(Def->getOperand(1)) && 2721 MRI->hasOneUse(Src1->getReg()) && 2722 commuteInstruction(UseMI)) { 2723 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 2724 } else if ((Src1->getReg().isPhysical() && 2725 RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) || 2726 (Src1->getReg().isVirtual() && 2727 RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) 2728 return false; 2729 // VGPR is okay as Src1 - fallthrough 2730 } 2731 2732 unsigned NewOpc = 2733 IsFMA ? (IsF32 ? AMDGPU::V_FMAAK_F32 : AMDGPU::V_FMAAK_F16) 2734 : (IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16); 2735 if (pseudoToMCOpcode(NewOpc) == -1) 2736 return false; 2737 2738 const int64_t Imm = ImmOp->getImm(); 2739 2740 // FIXME: This would be a lot easier if we could return a new instruction 2741 // instead of having to modify in place. 2742 2743 // Remove these first since they are at the end. 2744 UseMI.RemoveOperand( 2745 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 2746 UseMI.RemoveOperand( 2747 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 2748 2749 if (Opc == AMDGPU::V_MAC_F32_e64 || 2750 Opc == AMDGPU::V_MAC_F16_e64 || 2751 Opc == AMDGPU::V_FMAC_F32_e64 || 2752 Opc == AMDGPU::V_FMAC_F16_e64) 2753 UseMI.untieRegOperand( 2754 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 2755 2756 // ChangingToImmediate adds Src2 back to the instruction. 2757 Src2->ChangeToImmediate(Imm); 2758 2759 // These come before src2. 2760 removeModOperands(UseMI); 2761 UseMI.setDesc(get(NewOpc)); 2762 // It might happen that UseMI was commuted 2763 // and we now have SGPR as SRC1. If so 2 inlined 2764 // constant and SGPR are illegal. 2765 legalizeOperands(UseMI); 2766 2767 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 2768 if (DeleteDef) 2769 DefMI.eraseFromParent(); 2770 2771 return true; 2772 } 2773 } 2774 2775 return false; 2776 } 2777 2778 static bool 2779 memOpsHaveSameBaseOperands(ArrayRef<const MachineOperand *> BaseOps1, 2780 ArrayRef<const MachineOperand *> BaseOps2) { 2781 if (BaseOps1.size() != BaseOps2.size()) 2782 return false; 2783 for (size_t I = 0, E = BaseOps1.size(); I < E; ++I) { 2784 if (!BaseOps1[I]->isIdenticalTo(*BaseOps2[I])) 2785 return false; 2786 } 2787 return true; 2788 } 2789 2790 static bool offsetsDoNotOverlap(int WidthA, int OffsetA, 2791 int WidthB, int OffsetB) { 2792 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; 2793 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; 2794 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; 2795 return LowOffset + LowWidth <= HighOffset; 2796 } 2797 2798 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa, 2799 const MachineInstr &MIb) const { 2800 SmallVector<const MachineOperand *, 4> BaseOps0, BaseOps1; 2801 int64_t Offset0, Offset1; 2802 unsigned Dummy0, Dummy1; 2803 bool Offset0IsScalable, Offset1IsScalable; 2804 if (!getMemOperandsWithOffsetWidth(MIa, BaseOps0, Offset0, Offset0IsScalable, 2805 Dummy0, &RI) || 2806 !getMemOperandsWithOffsetWidth(MIb, BaseOps1, Offset1, Offset1IsScalable, 2807 Dummy1, &RI)) 2808 return false; 2809 2810 if (!memOpsHaveSameBaseOperands(BaseOps0, BaseOps1)) 2811 return false; 2812 2813 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) { 2814 // FIXME: Handle ds_read2 / ds_write2. 2815 return false; 2816 } 2817 unsigned Width0 = MIa.memoperands().front()->getSize(); 2818 unsigned Width1 = MIb.memoperands().front()->getSize(); 2819 return offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1); 2820 } 2821 2822 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, 2823 const MachineInstr &MIb) const { 2824 assert(MIa.mayLoadOrStore() && 2825 "MIa must load from or modify a memory location"); 2826 assert(MIb.mayLoadOrStore() && 2827 "MIb must load from or modify a memory location"); 2828 2829 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects()) 2830 return false; 2831 2832 // XXX - Can we relax this between address spaces? 2833 if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef()) 2834 return false; 2835 2836 // TODO: Should we check the address space from the MachineMemOperand? That 2837 // would allow us to distinguish objects we know don't alias based on the 2838 // underlying address space, even if it was lowered to a different one, 2839 // e.g. private accesses lowered to use MUBUF instructions on a scratch 2840 // buffer. 2841 if (isDS(MIa)) { 2842 if (isDS(MIb)) 2843 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2844 2845 return !isFLAT(MIb) || isSegmentSpecificFLAT(MIb); 2846 } 2847 2848 if (isMUBUF(MIa) || isMTBUF(MIa)) { 2849 if (isMUBUF(MIb) || isMTBUF(MIb)) 2850 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2851 2852 return !isFLAT(MIb) && !isSMRD(MIb); 2853 } 2854 2855 if (isSMRD(MIa)) { 2856 if (isSMRD(MIb)) 2857 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2858 2859 return !isFLAT(MIb) && !isMUBUF(MIb) && !isMTBUF(MIb); 2860 } 2861 2862 if (isFLAT(MIa)) { 2863 if (isFLAT(MIb)) 2864 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2865 2866 return false; 2867 } 2868 2869 return false; 2870 } 2871 2872 static int64_t getFoldableImm(const MachineOperand* MO) { 2873 if (!MO->isReg()) 2874 return false; 2875 const MachineFunction *MF = MO->getParent()->getParent()->getParent(); 2876 const MachineRegisterInfo &MRI = MF->getRegInfo(); 2877 auto Def = MRI.getUniqueVRegDef(MO->getReg()); 2878 if (Def && Def->getOpcode() == AMDGPU::V_MOV_B32_e32 && 2879 Def->getOperand(1).isImm()) 2880 return Def->getOperand(1).getImm(); 2881 return AMDGPU::NoRegister; 2882 } 2883 2884 static void updateLiveVariables(LiveVariables *LV, MachineInstr &MI, 2885 MachineInstr &NewMI) { 2886 if (LV) { 2887 unsigned NumOps = MI.getNumOperands(); 2888 for (unsigned I = 1; I < NumOps; ++I) { 2889 MachineOperand &Op = MI.getOperand(I); 2890 if (Op.isReg() && Op.isKill()) 2891 LV->replaceKillInstruction(Op.getReg(), MI, NewMI); 2892 } 2893 } 2894 } 2895 2896 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB, 2897 MachineInstr &MI, 2898 LiveVariables *LV) const { 2899 unsigned Opc = MI.getOpcode(); 2900 bool IsF16 = false; 2901 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e32 || Opc == AMDGPU::V_FMAC_F32_e64 || 2902 Opc == AMDGPU::V_FMAC_F16_e32 || Opc == AMDGPU::V_FMAC_F16_e64; 2903 2904 switch (Opc) { 2905 default: 2906 return nullptr; 2907 case AMDGPU::V_MAC_F16_e64: 2908 case AMDGPU::V_FMAC_F16_e64: 2909 IsF16 = true; 2910 LLVM_FALLTHROUGH; 2911 case AMDGPU::V_MAC_F32_e64: 2912 case AMDGPU::V_FMAC_F32_e64: 2913 break; 2914 case AMDGPU::V_MAC_F16_e32: 2915 case AMDGPU::V_FMAC_F16_e32: 2916 IsF16 = true; 2917 LLVM_FALLTHROUGH; 2918 case AMDGPU::V_MAC_F32_e32: 2919 case AMDGPU::V_FMAC_F32_e32: { 2920 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 2921 AMDGPU::OpName::src0); 2922 const MachineOperand *Src0 = &MI.getOperand(Src0Idx); 2923 if (!Src0->isReg() && !Src0->isImm()) 2924 return nullptr; 2925 2926 if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0)) 2927 return nullptr; 2928 2929 break; 2930 } 2931 } 2932 2933 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 2934 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0); 2935 const MachineOperand *Src0Mods = 2936 getNamedOperand(MI, AMDGPU::OpName::src0_modifiers); 2937 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 2938 const MachineOperand *Src1Mods = 2939 getNamedOperand(MI, AMDGPU::OpName::src1_modifiers); 2940 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 2941 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 2942 const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod); 2943 MachineInstrBuilder MIB; 2944 2945 if (!Src0Mods && !Src1Mods && !Clamp && !Omod && 2946 // If we have an SGPR input, we will violate the constant bus restriction. 2947 (ST.getConstantBusLimit(Opc) > 1 || !Src0->isReg() || 2948 !RI.isSGPRReg(MBB->getParent()->getRegInfo(), Src0->getReg()))) { 2949 if (auto Imm = getFoldableImm(Src2)) { 2950 unsigned NewOpc = 2951 IsFMA ? (IsF16 ? AMDGPU::V_FMAAK_F16 : AMDGPU::V_FMAAK_F32) 2952 : (IsF16 ? AMDGPU::V_MADAK_F16 : AMDGPU::V_MADAK_F32); 2953 if (pseudoToMCOpcode(NewOpc) != -1) { 2954 MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2955 .add(*Dst) 2956 .add(*Src0) 2957 .add(*Src1) 2958 .addImm(Imm); 2959 updateLiveVariables(LV, MI, *MIB); 2960 return MIB; 2961 } 2962 } 2963 unsigned NewOpc = IsFMA 2964 ? (IsF16 ? AMDGPU::V_FMAMK_F16 : AMDGPU::V_FMAMK_F32) 2965 : (IsF16 ? AMDGPU::V_MADMK_F16 : AMDGPU::V_MADMK_F32); 2966 if (auto Imm = getFoldableImm(Src1)) { 2967 if (pseudoToMCOpcode(NewOpc) != -1) { 2968 MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2969 .add(*Dst) 2970 .add(*Src0) 2971 .addImm(Imm) 2972 .add(*Src2); 2973 updateLiveVariables(LV, MI, *MIB); 2974 return MIB; 2975 } 2976 } 2977 if (auto Imm = getFoldableImm(Src0)) { 2978 if (pseudoToMCOpcode(NewOpc) != -1 && 2979 isOperandLegal( 2980 MI, AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::src0), 2981 Src1)) { 2982 MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2983 .add(*Dst) 2984 .add(*Src1) 2985 .addImm(Imm) 2986 .add(*Src2); 2987 updateLiveVariables(LV, MI, *MIB); 2988 return MIB; 2989 } 2990 } 2991 } 2992 2993 unsigned NewOpc = IsFMA ? (IsF16 ? AMDGPU::V_FMA_F16 : AMDGPU::V_FMA_F32) 2994 : (IsF16 ? AMDGPU::V_MAD_F16 : AMDGPU::V_MAD_F32); 2995 if (pseudoToMCOpcode(NewOpc) == -1) 2996 return nullptr; 2997 2998 MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2999 .add(*Dst) 3000 .addImm(Src0Mods ? Src0Mods->getImm() : 0) 3001 .add(*Src0) 3002 .addImm(Src1Mods ? Src1Mods->getImm() : 0) 3003 .add(*Src1) 3004 .addImm(0) // Src mods 3005 .add(*Src2) 3006 .addImm(Clamp ? Clamp->getImm() : 0) 3007 .addImm(Omod ? Omod->getImm() : 0); 3008 updateLiveVariables(LV, MI, *MIB); 3009 return MIB; 3010 } 3011 3012 // It's not generally safe to move VALU instructions across these since it will 3013 // start using the register as a base index rather than directly. 3014 // XXX - Why isn't hasSideEffects sufficient for these? 3015 static bool changesVGPRIndexingMode(const MachineInstr &MI) { 3016 switch (MI.getOpcode()) { 3017 case AMDGPU::S_SET_GPR_IDX_ON: 3018 case AMDGPU::S_SET_GPR_IDX_MODE: 3019 case AMDGPU::S_SET_GPR_IDX_OFF: 3020 return true; 3021 default: 3022 return false; 3023 } 3024 } 3025 3026 bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 3027 const MachineBasicBlock *MBB, 3028 const MachineFunction &MF) const { 3029 // Skipping the check for SP writes in the base implementation. The reason it 3030 // was added was apparently due to compile time concerns. 3031 // 3032 // TODO: Do we really want this barrier? It triggers unnecessary hazard nops 3033 // but is probably avoidable. 3034 3035 // Copied from base implementation. 3036 // Terminators and labels can't be scheduled around. 3037 if (MI.isTerminator() || MI.isPosition()) 3038 return true; 3039 3040 // INLINEASM_BR can jump to another block 3041 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR) 3042 return true; 3043 3044 // Target-independent instructions do not have an implicit-use of EXEC, even 3045 // when they operate on VGPRs. Treating EXEC modifications as scheduling 3046 // boundaries prevents incorrect movements of such instructions. 3047 return MI.modifiesRegister(AMDGPU::EXEC, &RI) || 3048 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 || 3049 MI.getOpcode() == AMDGPU::S_SETREG_B32 || 3050 changesVGPRIndexingMode(MI); 3051 } 3052 3053 bool SIInstrInfo::isAlwaysGDS(uint16_t Opcode) const { 3054 return Opcode == AMDGPU::DS_ORDERED_COUNT || 3055 Opcode == AMDGPU::DS_GWS_INIT || 3056 Opcode == AMDGPU::DS_GWS_SEMA_V || 3057 Opcode == AMDGPU::DS_GWS_SEMA_BR || 3058 Opcode == AMDGPU::DS_GWS_SEMA_P || 3059 Opcode == AMDGPU::DS_GWS_SEMA_RELEASE_ALL || 3060 Opcode == AMDGPU::DS_GWS_BARRIER; 3061 } 3062 3063 bool SIInstrInfo::modifiesModeRegister(const MachineInstr &MI) { 3064 // Skip the full operand and register alias search modifiesRegister 3065 // does. There's only a handful of instructions that touch this, it's only an 3066 // implicit def, and doesn't alias any other registers. 3067 if (const MCPhysReg *ImpDef = MI.getDesc().getImplicitDefs()) { 3068 for (; ImpDef && *ImpDef; ++ImpDef) { 3069 if (*ImpDef == AMDGPU::MODE) 3070 return true; 3071 } 3072 } 3073 3074 return false; 3075 } 3076 3077 bool SIInstrInfo::hasUnwantedEffectsWhenEXECEmpty(const MachineInstr &MI) const { 3078 unsigned Opcode = MI.getOpcode(); 3079 3080 if (MI.mayStore() && isSMRD(MI)) 3081 return true; // scalar store or atomic 3082 3083 // This will terminate the function when other lanes may need to continue. 3084 if (MI.isReturn()) 3085 return true; 3086 3087 // These instructions cause shader I/O that may cause hardware lockups 3088 // when executed with an empty EXEC mask. 3089 // 3090 // Note: exp with VM = DONE = 0 is automatically skipped by hardware when 3091 // EXEC = 0, but checking for that case here seems not worth it 3092 // given the typical code patterns. 3093 if (Opcode == AMDGPU::S_SENDMSG || Opcode == AMDGPU::S_SENDMSGHALT || 3094 Opcode == AMDGPU::EXP || Opcode == AMDGPU::EXP_DONE || 3095 Opcode == AMDGPU::DS_ORDERED_COUNT || Opcode == AMDGPU::S_TRAP || 3096 Opcode == AMDGPU::DS_GWS_INIT || Opcode == AMDGPU::DS_GWS_BARRIER) 3097 return true; 3098 3099 if (MI.isCall() || MI.isInlineAsm()) 3100 return true; // conservative assumption 3101 3102 // A mode change is a scalar operation that influences vector instructions. 3103 if (modifiesModeRegister(MI)) 3104 return true; 3105 3106 // These are like SALU instructions in terms of effects, so it's questionable 3107 // whether we should return true for those. 3108 // 3109 // However, executing them with EXEC = 0 causes them to operate on undefined 3110 // data, which we avoid by returning true here. 3111 if (Opcode == AMDGPU::V_READFIRSTLANE_B32 || Opcode == AMDGPU::V_READLANE_B32) 3112 return true; 3113 3114 return false; 3115 } 3116 3117 bool SIInstrInfo::mayReadEXEC(const MachineRegisterInfo &MRI, 3118 const MachineInstr &MI) const { 3119 if (MI.isMetaInstruction()) 3120 return false; 3121 3122 // This won't read exec if this is an SGPR->SGPR copy. 3123 if (MI.isCopyLike()) { 3124 if (!RI.isSGPRReg(MRI, MI.getOperand(0).getReg())) 3125 return true; 3126 3127 // Make sure this isn't copying exec as a normal operand 3128 return MI.readsRegister(AMDGPU::EXEC, &RI); 3129 } 3130 3131 // Make a conservative assumption about the callee. 3132 if (MI.isCall()) 3133 return true; 3134 3135 // Be conservative with any unhandled generic opcodes. 3136 if (!isTargetSpecificOpcode(MI.getOpcode())) 3137 return true; 3138 3139 return !isSALU(MI) || MI.readsRegister(AMDGPU::EXEC, &RI); 3140 } 3141 3142 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const { 3143 switch (Imm.getBitWidth()) { 3144 case 1: // This likely will be a condition code mask. 3145 return true; 3146 3147 case 32: 3148 return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(), 3149 ST.hasInv2PiInlineImm()); 3150 case 64: 3151 return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(), 3152 ST.hasInv2PiInlineImm()); 3153 case 16: 3154 return ST.has16BitInsts() && 3155 AMDGPU::isInlinableLiteral16(Imm.getSExtValue(), 3156 ST.hasInv2PiInlineImm()); 3157 default: 3158 llvm_unreachable("invalid bitwidth"); 3159 } 3160 } 3161 3162 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO, 3163 uint8_t OperandType) const { 3164 if (!MO.isImm() || 3165 OperandType < AMDGPU::OPERAND_SRC_FIRST || 3166 OperandType > AMDGPU::OPERAND_SRC_LAST) 3167 return false; 3168 3169 // MachineOperand provides no way to tell the true operand size, since it only 3170 // records a 64-bit value. We need to know the size to determine if a 32-bit 3171 // floating point immediate bit pattern is legal for an integer immediate. It 3172 // would be for any 32-bit integer operand, but would not be for a 64-bit one. 3173 3174 int64_t Imm = MO.getImm(); 3175 switch (OperandType) { 3176 case AMDGPU::OPERAND_REG_IMM_INT32: 3177 case AMDGPU::OPERAND_REG_IMM_FP32: 3178 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 3179 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 3180 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 3181 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: { 3182 int32_t Trunc = static_cast<int32_t>(Imm); 3183 return AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm()); 3184 } 3185 case AMDGPU::OPERAND_REG_IMM_INT64: 3186 case AMDGPU::OPERAND_REG_IMM_FP64: 3187 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 3188 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 3189 return AMDGPU::isInlinableLiteral64(MO.getImm(), 3190 ST.hasInv2PiInlineImm()); 3191 case AMDGPU::OPERAND_REG_IMM_INT16: 3192 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 3193 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 3194 // We would expect inline immediates to not be concerned with an integer/fp 3195 // distinction. However, in the case of 16-bit integer operations, the 3196 // "floating point" values appear to not work. It seems read the low 16-bits 3197 // of 32-bit immediates, which happens to always work for the integer 3198 // values. 3199 // 3200 // See llvm bugzilla 46302. 3201 // 3202 // TODO: Theoretically we could use op-sel to use the high bits of the 3203 // 32-bit FP values. 3204 return AMDGPU::isInlinableIntLiteral(Imm); 3205 case AMDGPU::OPERAND_REG_IMM_V2INT16: 3206 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 3207 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16: 3208 // This suffers the same problem as the scalar 16-bit cases. 3209 return AMDGPU::isInlinableIntLiteralV216(Imm); 3210 case AMDGPU::OPERAND_REG_IMM_FP16: 3211 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 3212 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: { 3213 if (isInt<16>(Imm) || isUInt<16>(Imm)) { 3214 // A few special case instructions have 16-bit operands on subtargets 3215 // where 16-bit instructions are not legal. 3216 // TODO: Do the 32-bit immediates work? We shouldn't really need to handle 3217 // constants in these cases 3218 int16_t Trunc = static_cast<int16_t>(Imm); 3219 return ST.has16BitInsts() && 3220 AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm()); 3221 } 3222 3223 return false; 3224 } 3225 case AMDGPU::OPERAND_REG_IMM_V2FP16: 3226 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 3227 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: { 3228 uint32_t Trunc = static_cast<uint32_t>(Imm); 3229 return AMDGPU::isInlinableLiteralV216(Trunc, ST.hasInv2PiInlineImm()); 3230 } 3231 default: 3232 llvm_unreachable("invalid bitwidth"); 3233 } 3234 } 3235 3236 bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO, 3237 const MCOperandInfo &OpInfo) const { 3238 switch (MO.getType()) { 3239 case MachineOperand::MO_Register: 3240 return false; 3241 case MachineOperand::MO_Immediate: 3242 return !isInlineConstant(MO, OpInfo); 3243 case MachineOperand::MO_FrameIndex: 3244 case MachineOperand::MO_MachineBasicBlock: 3245 case MachineOperand::MO_ExternalSymbol: 3246 case MachineOperand::MO_GlobalAddress: 3247 case MachineOperand::MO_MCSymbol: 3248 return true; 3249 default: 3250 llvm_unreachable("unexpected operand type"); 3251 } 3252 } 3253 3254 static bool compareMachineOp(const MachineOperand &Op0, 3255 const MachineOperand &Op1) { 3256 if (Op0.getType() != Op1.getType()) 3257 return false; 3258 3259 switch (Op0.getType()) { 3260 case MachineOperand::MO_Register: 3261 return Op0.getReg() == Op1.getReg(); 3262 case MachineOperand::MO_Immediate: 3263 return Op0.getImm() == Op1.getImm(); 3264 default: 3265 llvm_unreachable("Didn't expect to be comparing these operand types"); 3266 } 3267 } 3268 3269 bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo, 3270 const MachineOperand &MO) const { 3271 const MCInstrDesc &InstDesc = MI.getDesc(); 3272 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo]; 3273 3274 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()); 3275 3276 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE) 3277 return true; 3278 3279 if (OpInfo.RegClass < 0) 3280 return false; 3281 3282 if (MO.isImm() && isInlineConstant(MO, OpInfo)) { 3283 if (isMAI(MI) && ST.hasMFMAInlineLiteralBug() && 3284 OpNo ==(unsigned)AMDGPU::getNamedOperandIdx(MI.getOpcode(), 3285 AMDGPU::OpName::src2)) 3286 return false; 3287 return RI.opCanUseInlineConstant(OpInfo.OperandType); 3288 } 3289 3290 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType)) 3291 return false; 3292 3293 if (!isVOP3(MI) || !AMDGPU::isSISrcOperand(InstDesc, OpNo)) 3294 return true; 3295 3296 return ST.hasVOP3Literal(); 3297 } 3298 3299 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const { 3300 int Op32 = AMDGPU::getVOPe32(Opcode); 3301 if (Op32 == -1) 3302 return false; 3303 3304 return pseudoToMCOpcode(Op32) != -1; 3305 } 3306 3307 bool SIInstrInfo::hasModifiers(unsigned Opcode) const { 3308 // The src0_modifier operand is present on all instructions 3309 // that have modifiers. 3310 3311 return AMDGPU::getNamedOperandIdx(Opcode, 3312 AMDGPU::OpName::src0_modifiers) != -1; 3313 } 3314 3315 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI, 3316 unsigned OpName) const { 3317 const MachineOperand *Mods = getNamedOperand(MI, OpName); 3318 return Mods && Mods->getImm(); 3319 } 3320 3321 bool SIInstrInfo::hasAnyModifiersSet(const MachineInstr &MI) const { 3322 return hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || 3323 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || 3324 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers) || 3325 hasModifiersSet(MI, AMDGPU::OpName::clamp) || 3326 hasModifiersSet(MI, AMDGPU::OpName::omod); 3327 } 3328 3329 bool SIInstrInfo::canShrink(const MachineInstr &MI, 3330 const MachineRegisterInfo &MRI) const { 3331 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3332 // Can't shrink instruction with three operands. 3333 // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add 3334 // a special case for it. It can only be shrunk if the third operand 3335 // is vcc, and src0_modifiers and src1_modifiers are not set. 3336 // We should handle this the same way we handle vopc, by addding 3337 // a register allocation hint pre-regalloc and then do the shrinking 3338 // post-regalloc. 3339 if (Src2) { 3340 switch (MI.getOpcode()) { 3341 default: return false; 3342 3343 case AMDGPU::V_ADDC_U32_e64: 3344 case AMDGPU::V_SUBB_U32_e64: 3345 case AMDGPU::V_SUBBREV_U32_e64: { 3346 const MachineOperand *Src1 3347 = getNamedOperand(MI, AMDGPU::OpName::src1); 3348 if (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg())) 3349 return false; 3350 // Additional verification is needed for sdst/src2. 3351 return true; 3352 } 3353 case AMDGPU::V_MAC_F32_e64: 3354 case AMDGPU::V_MAC_F16_e64: 3355 case AMDGPU::V_FMAC_F32_e64: 3356 case AMDGPU::V_FMAC_F16_e64: 3357 if (!Src2->isReg() || !RI.isVGPR(MRI, Src2->getReg()) || 3358 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers)) 3359 return false; 3360 break; 3361 3362 case AMDGPU::V_CNDMASK_B32_e64: 3363 break; 3364 } 3365 } 3366 3367 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3368 if (Src1 && (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()) || 3369 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers))) 3370 return false; 3371 3372 // We don't need to check src0, all input types are legal, so just make sure 3373 // src0 isn't using any modifiers. 3374 if (hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers)) 3375 return false; 3376 3377 // Can it be shrunk to a valid 32 bit opcode? 3378 if (!hasVALU32BitEncoding(MI.getOpcode())) 3379 return false; 3380 3381 // Check output modifiers 3382 return !hasModifiersSet(MI, AMDGPU::OpName::omod) && 3383 !hasModifiersSet(MI, AMDGPU::OpName::clamp); 3384 } 3385 3386 // Set VCC operand with all flags from \p Orig, except for setting it as 3387 // implicit. 3388 static void copyFlagsToImplicitVCC(MachineInstr &MI, 3389 const MachineOperand &Orig) { 3390 3391 for (MachineOperand &Use : MI.implicit_operands()) { 3392 if (Use.isUse() && 3393 (Use.getReg() == AMDGPU::VCC || Use.getReg() == AMDGPU::VCC_LO)) { 3394 Use.setIsUndef(Orig.isUndef()); 3395 Use.setIsKill(Orig.isKill()); 3396 return; 3397 } 3398 } 3399 } 3400 3401 MachineInstr *SIInstrInfo::buildShrunkInst(MachineInstr &MI, 3402 unsigned Op32) const { 3403 MachineBasicBlock *MBB = MI.getParent();; 3404 MachineInstrBuilder Inst32 = 3405 BuildMI(*MBB, MI, MI.getDebugLoc(), get(Op32)) 3406 .setMIFlags(MI.getFlags()); 3407 3408 // Add the dst operand if the 32-bit encoding also has an explicit $vdst. 3409 // For VOPC instructions, this is replaced by an implicit def of vcc. 3410 int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst); 3411 if (Op32DstIdx != -1) { 3412 // dst 3413 Inst32.add(MI.getOperand(0)); 3414 } else { 3415 assert(((MI.getOperand(0).getReg() == AMDGPU::VCC) || 3416 (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) && 3417 "Unexpected case"); 3418 } 3419 3420 Inst32.add(*getNamedOperand(MI, AMDGPU::OpName::src0)); 3421 3422 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3423 if (Src1) 3424 Inst32.add(*Src1); 3425 3426 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3427 3428 if (Src2) { 3429 int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2); 3430 if (Op32Src2Idx != -1) { 3431 Inst32.add(*Src2); 3432 } else { 3433 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is 3434 // replaced with an implicit read of vcc or vcc_lo. The implicit read 3435 // of vcc was already added during the initial BuildMI, but we 3436 // 1) may need to change vcc to vcc_lo to preserve the original register 3437 // 2) have to preserve the original flags. 3438 fixImplicitOperands(*Inst32); 3439 copyFlagsToImplicitVCC(*Inst32, *Src2); 3440 } 3441 } 3442 3443 return Inst32; 3444 } 3445 3446 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI, 3447 const MachineOperand &MO, 3448 const MCOperandInfo &OpInfo) const { 3449 // Literal constants use the constant bus. 3450 //if (isLiteralConstantLike(MO, OpInfo)) 3451 // return true; 3452 if (MO.isImm()) 3453 return !isInlineConstant(MO, OpInfo); 3454 3455 if (!MO.isReg()) 3456 return true; // Misc other operands like FrameIndex 3457 3458 if (!MO.isUse()) 3459 return false; 3460 3461 if (MO.getReg().isVirtual()) 3462 return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); 3463 3464 // Null is free 3465 if (MO.getReg() == AMDGPU::SGPR_NULL) 3466 return false; 3467 3468 // SGPRs use the constant bus 3469 if (MO.isImplicit()) { 3470 return MO.getReg() == AMDGPU::M0 || 3471 MO.getReg() == AMDGPU::VCC || 3472 MO.getReg() == AMDGPU::VCC_LO; 3473 } else { 3474 return AMDGPU::SReg_32RegClass.contains(MO.getReg()) || 3475 AMDGPU::SReg_64RegClass.contains(MO.getReg()); 3476 } 3477 } 3478 3479 static Register findImplicitSGPRRead(const MachineInstr &MI) { 3480 for (const MachineOperand &MO : MI.implicit_operands()) { 3481 // We only care about reads. 3482 if (MO.isDef()) 3483 continue; 3484 3485 switch (MO.getReg()) { 3486 case AMDGPU::VCC: 3487 case AMDGPU::VCC_LO: 3488 case AMDGPU::VCC_HI: 3489 case AMDGPU::M0: 3490 case AMDGPU::FLAT_SCR: 3491 return MO.getReg(); 3492 3493 default: 3494 break; 3495 } 3496 } 3497 3498 return AMDGPU::NoRegister; 3499 } 3500 3501 static bool shouldReadExec(const MachineInstr &MI) { 3502 if (SIInstrInfo::isVALU(MI)) { 3503 switch (MI.getOpcode()) { 3504 case AMDGPU::V_READLANE_B32: 3505 case AMDGPU::V_WRITELANE_B32: 3506 return false; 3507 } 3508 3509 return true; 3510 } 3511 3512 if (MI.isPreISelOpcode() || 3513 SIInstrInfo::isGenericOpcode(MI.getOpcode()) || 3514 SIInstrInfo::isSALU(MI) || 3515 SIInstrInfo::isSMRD(MI)) 3516 return false; 3517 3518 return true; 3519 } 3520 3521 static bool isSubRegOf(const SIRegisterInfo &TRI, 3522 const MachineOperand &SuperVec, 3523 const MachineOperand &SubReg) { 3524 if (SubReg.getReg().isPhysical()) 3525 return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg()); 3526 3527 return SubReg.getSubReg() != AMDGPU::NoSubRegister && 3528 SubReg.getReg() == SuperVec.getReg(); 3529 } 3530 3531 bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, 3532 StringRef &ErrInfo) const { 3533 uint16_t Opcode = MI.getOpcode(); 3534 if (SIInstrInfo::isGenericOpcode(MI.getOpcode())) 3535 return true; 3536 3537 const MachineFunction *MF = MI.getParent()->getParent(); 3538 const MachineRegisterInfo &MRI = MF->getRegInfo(); 3539 3540 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); 3541 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1); 3542 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2); 3543 3544 // Make sure the number of operands is correct. 3545 const MCInstrDesc &Desc = get(Opcode); 3546 if (!Desc.isVariadic() && 3547 Desc.getNumOperands() != MI.getNumExplicitOperands()) { 3548 ErrInfo = "Instruction has wrong number of operands."; 3549 return false; 3550 } 3551 3552 if (MI.isInlineAsm()) { 3553 // Verify register classes for inlineasm constraints. 3554 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands(); 3555 I != E; ++I) { 3556 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI); 3557 if (!RC) 3558 continue; 3559 3560 const MachineOperand &Op = MI.getOperand(I); 3561 if (!Op.isReg()) 3562 continue; 3563 3564 Register Reg = Op.getReg(); 3565 if (!Reg.isVirtual() && !RC->contains(Reg)) { 3566 ErrInfo = "inlineasm operand has incorrect register class."; 3567 return false; 3568 } 3569 } 3570 3571 return true; 3572 } 3573 3574 if (isMIMG(MI) && MI.memoperands_empty() && MI.mayLoadOrStore()) { 3575 ErrInfo = "missing memory operand from MIMG instruction."; 3576 return false; 3577 } 3578 3579 // Make sure the register classes are correct. 3580 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) { 3581 if (MI.getOperand(i).isFPImm()) { 3582 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast " 3583 "all fp values to integers."; 3584 return false; 3585 } 3586 3587 int RegClass = Desc.OpInfo[i].RegClass; 3588 3589 switch (Desc.OpInfo[i].OperandType) { 3590 case MCOI::OPERAND_REGISTER: 3591 if (MI.getOperand(i).isImm() || MI.getOperand(i).isGlobal()) { 3592 ErrInfo = "Illegal immediate value for operand."; 3593 return false; 3594 } 3595 break; 3596 case AMDGPU::OPERAND_REG_IMM_INT32: 3597 case AMDGPU::OPERAND_REG_IMM_FP32: 3598 break; 3599 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 3600 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 3601 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 3602 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 3603 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 3604 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 3605 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 3606 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: 3607 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 3608 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: { 3609 const MachineOperand &MO = MI.getOperand(i); 3610 if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) { 3611 ErrInfo = "Illegal immediate value for operand."; 3612 return false; 3613 } 3614 break; 3615 } 3616 case MCOI::OPERAND_IMMEDIATE: 3617 case AMDGPU::OPERAND_KIMM32: 3618 // Check if this operand is an immediate. 3619 // FrameIndex operands will be replaced by immediates, so they are 3620 // allowed. 3621 if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) { 3622 ErrInfo = "Expected immediate, but got non-immediate"; 3623 return false; 3624 } 3625 LLVM_FALLTHROUGH; 3626 default: 3627 continue; 3628 } 3629 3630 if (!MI.getOperand(i).isReg()) 3631 continue; 3632 3633 if (RegClass != -1) { 3634 Register Reg = MI.getOperand(i).getReg(); 3635 if (Reg == AMDGPU::NoRegister || Reg.isVirtual()) 3636 continue; 3637 3638 const TargetRegisterClass *RC = RI.getRegClass(RegClass); 3639 if (!RC->contains(Reg)) { 3640 ErrInfo = "Operand has incorrect register class."; 3641 return false; 3642 } 3643 } 3644 } 3645 3646 // Verify SDWA 3647 if (isSDWA(MI)) { 3648 if (!ST.hasSDWA()) { 3649 ErrInfo = "SDWA is not supported on this target"; 3650 return false; 3651 } 3652 3653 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst); 3654 3655 const int OpIndicies[] = { DstIdx, Src0Idx, Src1Idx, Src2Idx }; 3656 3657 for (int OpIdx: OpIndicies) { 3658 if (OpIdx == -1) 3659 continue; 3660 const MachineOperand &MO = MI.getOperand(OpIdx); 3661 3662 if (!ST.hasSDWAScalar()) { 3663 // Only VGPRS on VI 3664 if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) { 3665 ErrInfo = "Only VGPRs allowed as operands in SDWA instructions on VI"; 3666 return false; 3667 } 3668 } else { 3669 // No immediates on GFX9 3670 if (!MO.isReg()) { 3671 ErrInfo = 3672 "Only reg allowed as operands in SDWA instructions on GFX9+"; 3673 return false; 3674 } 3675 } 3676 } 3677 3678 if (!ST.hasSDWAOmod()) { 3679 // No omod allowed on VI 3680 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 3681 if (OMod != nullptr && 3682 (!OMod->isImm() || OMod->getImm() != 0)) { 3683 ErrInfo = "OMod not allowed in SDWA instructions on VI"; 3684 return false; 3685 } 3686 } 3687 3688 uint16_t BasicOpcode = AMDGPU::getBasicFromSDWAOp(Opcode); 3689 if (isVOPC(BasicOpcode)) { 3690 if (!ST.hasSDWASdst() && DstIdx != -1) { 3691 // Only vcc allowed as dst on VI for VOPC 3692 const MachineOperand &Dst = MI.getOperand(DstIdx); 3693 if (!Dst.isReg() || Dst.getReg() != AMDGPU::VCC) { 3694 ErrInfo = "Only VCC allowed as dst in SDWA instructions on VI"; 3695 return false; 3696 } 3697 } else if (!ST.hasSDWAOutModsVOPC()) { 3698 // No clamp allowed on GFX9 for VOPC 3699 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 3700 if (Clamp && (!Clamp->isImm() || Clamp->getImm() != 0)) { 3701 ErrInfo = "Clamp not allowed in VOPC SDWA instructions on VI"; 3702 return false; 3703 } 3704 3705 // No omod allowed on GFX9 for VOPC 3706 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 3707 if (OMod && (!OMod->isImm() || OMod->getImm() != 0)) { 3708 ErrInfo = "OMod not allowed in VOPC SDWA instructions on VI"; 3709 return false; 3710 } 3711 } 3712 } 3713 3714 const MachineOperand *DstUnused = getNamedOperand(MI, AMDGPU::OpName::dst_unused); 3715 if (DstUnused && DstUnused->isImm() && 3716 DstUnused->getImm() == AMDGPU::SDWA::UNUSED_PRESERVE) { 3717 const MachineOperand &Dst = MI.getOperand(DstIdx); 3718 if (!Dst.isReg() || !Dst.isTied()) { 3719 ErrInfo = "Dst register should have tied register"; 3720 return false; 3721 } 3722 3723 const MachineOperand &TiedMO = 3724 MI.getOperand(MI.findTiedOperandIdx(DstIdx)); 3725 if (!TiedMO.isReg() || !TiedMO.isImplicit() || !TiedMO.isUse()) { 3726 ErrInfo = 3727 "Dst register should be tied to implicit use of preserved register"; 3728 return false; 3729 } else if (TiedMO.getReg().isPhysical() && 3730 Dst.getReg() != TiedMO.getReg()) { 3731 ErrInfo = "Dst register should use same physical register as preserved"; 3732 return false; 3733 } 3734 } 3735 } 3736 3737 // Verify MIMG 3738 if (isMIMG(MI.getOpcode()) && !MI.mayStore()) { 3739 // Ensure that the return type used is large enough for all the options 3740 // being used TFE/LWE require an extra result register. 3741 const MachineOperand *DMask = getNamedOperand(MI, AMDGPU::OpName::dmask); 3742 if (DMask) { 3743 uint64_t DMaskImm = DMask->getImm(); 3744 uint32_t RegCount = 3745 isGather4(MI.getOpcode()) ? 4 : countPopulation(DMaskImm); 3746 const MachineOperand *TFE = getNamedOperand(MI, AMDGPU::OpName::tfe); 3747 const MachineOperand *LWE = getNamedOperand(MI, AMDGPU::OpName::lwe); 3748 const MachineOperand *D16 = getNamedOperand(MI, AMDGPU::OpName::d16); 3749 3750 // Adjust for packed 16 bit values 3751 if (D16 && D16->getImm() && !ST.hasUnpackedD16VMem()) 3752 RegCount >>= 1; 3753 3754 // Adjust if using LWE or TFE 3755 if ((LWE && LWE->getImm()) || (TFE && TFE->getImm())) 3756 RegCount += 1; 3757 3758 const uint32_t DstIdx = 3759 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata); 3760 const MachineOperand &Dst = MI.getOperand(DstIdx); 3761 if (Dst.isReg()) { 3762 const TargetRegisterClass *DstRC = getOpRegClass(MI, DstIdx); 3763 uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32; 3764 if (RegCount > DstSize) { 3765 ErrInfo = "MIMG instruction returns too many registers for dst " 3766 "register class"; 3767 return false; 3768 } 3769 } 3770 } 3771 } 3772 3773 // Verify VOP*. Ignore multiple sgpr operands on writelane. 3774 if (Desc.getOpcode() != AMDGPU::V_WRITELANE_B32 3775 && (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI) || isSDWA(MI))) { 3776 // Only look at the true operands. Only a real operand can use the constant 3777 // bus, and we don't want to check pseudo-operands like the source modifier 3778 // flags. 3779 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx }; 3780 3781 unsigned ConstantBusCount = 0; 3782 unsigned LiteralCount = 0; 3783 3784 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) 3785 ++ConstantBusCount; 3786 3787 SmallVector<Register, 2> SGPRsUsed; 3788 Register SGPRUsed; 3789 3790 for (int OpIdx : OpIndices) { 3791 if (OpIdx == -1) 3792 break; 3793 const MachineOperand &MO = MI.getOperand(OpIdx); 3794 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 3795 if (MO.isReg()) { 3796 SGPRUsed = MO.getReg(); 3797 if (llvm::all_of(SGPRsUsed, [SGPRUsed](unsigned SGPR) { 3798 return SGPRUsed != SGPR; 3799 })) { 3800 ++ConstantBusCount; 3801 SGPRsUsed.push_back(SGPRUsed); 3802 } 3803 } else { 3804 ++ConstantBusCount; 3805 ++LiteralCount; 3806 } 3807 } 3808 } 3809 3810 SGPRUsed = findImplicitSGPRRead(MI); 3811 if (SGPRUsed != AMDGPU::NoRegister) { 3812 // Implicit uses may safely overlap true overands 3813 if (llvm::all_of(SGPRsUsed, [this, SGPRUsed](unsigned SGPR) { 3814 return !RI.regsOverlap(SGPRUsed, SGPR); 3815 })) { 3816 ++ConstantBusCount; 3817 SGPRsUsed.push_back(SGPRUsed); 3818 } 3819 } 3820 3821 // v_writelane_b32 is an exception from constant bus restriction: 3822 // vsrc0 can be sgpr, const or m0 and lane select sgpr, m0 or inline-const 3823 if (ConstantBusCount > ST.getConstantBusLimit(Opcode) && 3824 Opcode != AMDGPU::V_WRITELANE_B32) { 3825 ErrInfo = "VOP* instruction violates constant bus restriction"; 3826 return false; 3827 } 3828 3829 if (isVOP3(MI) && LiteralCount) { 3830 if (!ST.hasVOP3Literal()) { 3831 ErrInfo = "VOP3 instruction uses literal"; 3832 return false; 3833 } 3834 if (LiteralCount > 1) { 3835 ErrInfo = "VOP3 instruction uses more than one literal"; 3836 return false; 3837 } 3838 } 3839 } 3840 3841 // Special case for writelane - this can break the multiple constant bus rule, 3842 // but still can't use more than one SGPR register 3843 if (Desc.getOpcode() == AMDGPU::V_WRITELANE_B32) { 3844 unsigned SGPRCount = 0; 3845 Register SGPRUsed = AMDGPU::NoRegister; 3846 3847 for (int OpIdx : {Src0Idx, Src1Idx, Src2Idx}) { 3848 if (OpIdx == -1) 3849 break; 3850 3851 const MachineOperand &MO = MI.getOperand(OpIdx); 3852 3853 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 3854 if (MO.isReg() && MO.getReg() != AMDGPU::M0) { 3855 if (MO.getReg() != SGPRUsed) 3856 ++SGPRCount; 3857 SGPRUsed = MO.getReg(); 3858 } 3859 } 3860 if (SGPRCount > ST.getConstantBusLimit(Opcode)) { 3861 ErrInfo = "WRITELANE instruction violates constant bus restriction"; 3862 return false; 3863 } 3864 } 3865 } 3866 3867 // Verify misc. restrictions on specific instructions. 3868 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 || 3869 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) { 3870 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3871 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 3872 const MachineOperand &Src2 = MI.getOperand(Src2Idx); 3873 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) { 3874 if (!compareMachineOp(Src0, Src1) && 3875 !compareMachineOp(Src0, Src2)) { 3876 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2"; 3877 return false; 3878 } 3879 } 3880 if ((getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm() & 3881 SISrcMods::ABS) || 3882 (getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm() & 3883 SISrcMods::ABS) || 3884 (getNamedOperand(MI, AMDGPU::OpName::src2_modifiers)->getImm() & 3885 SISrcMods::ABS)) { 3886 ErrInfo = "ABS not allowed in VOP3B instructions"; 3887 return false; 3888 } 3889 } 3890 3891 if (isSOP2(MI) || isSOPC(MI)) { 3892 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3893 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 3894 unsigned Immediates = 0; 3895 3896 if (!Src0.isReg() && 3897 !isInlineConstant(Src0, Desc.OpInfo[Src0Idx].OperandType)) 3898 Immediates++; 3899 if (!Src1.isReg() && 3900 !isInlineConstant(Src1, Desc.OpInfo[Src1Idx].OperandType)) 3901 Immediates++; 3902 3903 if (Immediates > 1) { 3904 ErrInfo = "SOP2/SOPC instruction requires too many immediate constants"; 3905 return false; 3906 } 3907 } 3908 3909 if (isSOPK(MI)) { 3910 auto Op = getNamedOperand(MI, AMDGPU::OpName::simm16); 3911 if (Desc.isBranch()) { 3912 if (!Op->isMBB()) { 3913 ErrInfo = "invalid branch target for SOPK instruction"; 3914 return false; 3915 } 3916 } else { 3917 uint64_t Imm = Op->getImm(); 3918 if (sopkIsZext(MI)) { 3919 if (!isUInt<16>(Imm)) { 3920 ErrInfo = "invalid immediate for SOPK instruction"; 3921 return false; 3922 } 3923 } else { 3924 if (!isInt<16>(Imm)) { 3925 ErrInfo = "invalid immediate for SOPK instruction"; 3926 return false; 3927 } 3928 } 3929 } 3930 } 3931 3932 if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 || 3933 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 || 3934 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 3935 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) { 3936 const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 3937 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64; 3938 3939 const unsigned StaticNumOps = Desc.getNumOperands() + 3940 Desc.getNumImplicitUses(); 3941 const unsigned NumImplicitOps = IsDst ? 2 : 1; 3942 3943 // Allow additional implicit operands. This allows a fixup done by the post 3944 // RA scheduler where the main implicit operand is killed and implicit-defs 3945 // are added for sub-registers that remain live after this instruction. 3946 if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) { 3947 ErrInfo = "missing implicit register operands"; 3948 return false; 3949 } 3950 3951 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 3952 if (IsDst) { 3953 if (!Dst->isUse()) { 3954 ErrInfo = "v_movreld_b32 vdst should be a use operand"; 3955 return false; 3956 } 3957 3958 unsigned UseOpIdx; 3959 if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) || 3960 UseOpIdx != StaticNumOps + 1) { 3961 ErrInfo = "movrel implicit operands should be tied"; 3962 return false; 3963 } 3964 } 3965 3966 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3967 const MachineOperand &ImpUse 3968 = MI.getOperand(StaticNumOps + NumImplicitOps - 1); 3969 if (!ImpUse.isReg() || !ImpUse.isUse() || 3970 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) { 3971 ErrInfo = "src0 should be subreg of implicit vector use"; 3972 return false; 3973 } 3974 } 3975 3976 // Make sure we aren't losing exec uses in the td files. This mostly requires 3977 // being careful when using let Uses to try to add other use registers. 3978 if (shouldReadExec(MI)) { 3979 if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) { 3980 ErrInfo = "VALU instruction does not implicitly read exec mask"; 3981 return false; 3982 } 3983 } 3984 3985 if (isSMRD(MI)) { 3986 if (MI.mayStore()) { 3987 // The register offset form of scalar stores may only use m0 as the 3988 // soffset register. 3989 const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff); 3990 if (Soff && Soff->getReg() != AMDGPU::M0) { 3991 ErrInfo = "scalar stores must use m0 as offset register"; 3992 return false; 3993 } 3994 } 3995 } 3996 3997 if (isFLAT(MI) && !ST.hasFlatInstOffsets()) { 3998 const MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 3999 if (Offset->getImm() != 0) { 4000 ErrInfo = "subtarget does not support offsets in flat instructions"; 4001 return false; 4002 } 4003 } 4004 4005 if (isMIMG(MI)) { 4006 const MachineOperand *DimOp = getNamedOperand(MI, AMDGPU::OpName::dim); 4007 if (DimOp) { 4008 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opcode, 4009 AMDGPU::OpName::vaddr0); 4010 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc); 4011 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opcode); 4012 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = 4013 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode); 4014 const AMDGPU::MIMGDimInfo *Dim = 4015 AMDGPU::getMIMGDimInfoByEncoding(DimOp->getImm()); 4016 4017 if (!Dim) { 4018 ErrInfo = "dim is out of range"; 4019 return false; 4020 } 4021 4022 bool IsA16 = false; 4023 if (ST.hasR128A16()) { 4024 const MachineOperand *R128A16 = getNamedOperand(MI, AMDGPU::OpName::r128); 4025 IsA16 = R128A16->getImm() != 0; 4026 } else if (ST.hasGFX10A16()) { 4027 const MachineOperand *A16 = getNamedOperand(MI, AMDGPU::OpName::a16); 4028 IsA16 = A16->getImm() != 0; 4029 } 4030 4031 bool PackDerivatives = IsA16 || BaseOpcode->G16; 4032 bool IsNSA = SRsrcIdx - VAddr0Idx > 1; 4033 4034 unsigned AddrWords = BaseOpcode->NumExtraArgs; 4035 unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) + 4036 (BaseOpcode->LodOrClampOrMip ? 1 : 0); 4037 if (IsA16) 4038 AddrWords += (AddrComponents + 1) / 2; 4039 else 4040 AddrWords += AddrComponents; 4041 4042 if (BaseOpcode->Gradients) { 4043 if (PackDerivatives) 4044 // There are two gradients per coordinate, we pack them separately. 4045 // For the 3d case, we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv) 4046 AddrWords += (Dim->NumGradients / 2 + 1) / 2 * 2; 4047 else 4048 AddrWords += Dim->NumGradients; 4049 } 4050 4051 unsigned VAddrWords; 4052 if (IsNSA) { 4053 VAddrWords = SRsrcIdx - VAddr0Idx; 4054 } else { 4055 const TargetRegisterClass *RC = getOpRegClass(MI, VAddr0Idx); 4056 VAddrWords = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC) / 32; 4057 if (AddrWords > 8) 4058 AddrWords = 16; 4059 else if (AddrWords > 4) 4060 AddrWords = 8; 4061 else if (AddrWords == 4) 4062 AddrWords = 4; 4063 else if (AddrWords == 3) 4064 AddrWords = 3; 4065 } 4066 4067 if (VAddrWords != AddrWords) { 4068 LLVM_DEBUG(dbgs() << "bad vaddr size, expected " << AddrWords 4069 << " but got " << VAddrWords << "\n"); 4070 ErrInfo = "bad vaddr size"; 4071 return false; 4072 } 4073 } 4074 } 4075 4076 const MachineOperand *DppCt = getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl); 4077 if (DppCt) { 4078 using namespace AMDGPU::DPP; 4079 4080 unsigned DC = DppCt->getImm(); 4081 if (DC == DppCtrl::DPP_UNUSED1 || DC == DppCtrl::DPP_UNUSED2 || 4082 DC == DppCtrl::DPP_UNUSED3 || DC > DppCtrl::DPP_LAST || 4083 (DC >= DppCtrl::DPP_UNUSED4_FIRST && DC <= DppCtrl::DPP_UNUSED4_LAST) || 4084 (DC >= DppCtrl::DPP_UNUSED5_FIRST && DC <= DppCtrl::DPP_UNUSED5_LAST) || 4085 (DC >= DppCtrl::DPP_UNUSED6_FIRST && DC <= DppCtrl::DPP_UNUSED6_LAST) || 4086 (DC >= DppCtrl::DPP_UNUSED7_FIRST && DC <= DppCtrl::DPP_UNUSED7_LAST) || 4087 (DC >= DppCtrl::DPP_UNUSED8_FIRST && DC <= DppCtrl::DPP_UNUSED8_LAST)) { 4088 ErrInfo = "Invalid dpp_ctrl value"; 4089 return false; 4090 } 4091 if (DC >= DppCtrl::WAVE_SHL1 && DC <= DppCtrl::WAVE_ROR1 && 4092 ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 4093 ErrInfo = "Invalid dpp_ctrl value: " 4094 "wavefront shifts are not supported on GFX10+"; 4095 return false; 4096 } 4097 if (DC >= DppCtrl::BCAST15 && DC <= DppCtrl::BCAST31 && 4098 ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 4099 ErrInfo = "Invalid dpp_ctrl value: " 4100 "broadcasts are not supported on GFX10+"; 4101 return false; 4102 } 4103 if (DC >= DppCtrl::ROW_SHARE_FIRST && DC <= DppCtrl::ROW_XMASK_LAST && 4104 ST.getGeneration() < AMDGPUSubtarget::GFX10) { 4105 ErrInfo = "Invalid dpp_ctrl value: " 4106 "row_share and row_xmask are not supported before GFX10"; 4107 return false; 4108 } 4109 } 4110 4111 return true; 4112 } 4113 4114 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) const { 4115 switch (MI.getOpcode()) { 4116 default: return AMDGPU::INSTRUCTION_LIST_END; 4117 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE; 4118 case AMDGPU::COPY: return AMDGPU::COPY; 4119 case AMDGPU::PHI: return AMDGPU::PHI; 4120 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG; 4121 case AMDGPU::WQM: return AMDGPU::WQM; 4122 case AMDGPU::SOFT_WQM: return AMDGPU::SOFT_WQM; 4123 case AMDGPU::WWM: return AMDGPU::WWM; 4124 case AMDGPU::S_MOV_B32: { 4125 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 4126 return MI.getOperand(1).isReg() || 4127 RI.isAGPR(MRI, MI.getOperand(0).getReg()) ? 4128 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32; 4129 } 4130 case AMDGPU::S_ADD_I32: 4131 return ST.hasAddNoCarry() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_CO_U32_e32; 4132 case AMDGPU::S_ADDC_U32: 4133 return AMDGPU::V_ADDC_U32_e32; 4134 case AMDGPU::S_SUB_I32: 4135 return ST.hasAddNoCarry() ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_CO_U32_e32; 4136 // FIXME: These are not consistently handled, and selected when the carry is 4137 // used. 4138 case AMDGPU::S_ADD_U32: 4139 return AMDGPU::V_ADD_CO_U32_e32; 4140 case AMDGPU::S_SUB_U32: 4141 return AMDGPU::V_SUB_CO_U32_e32; 4142 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32; 4143 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_U32; 4144 case AMDGPU::S_MUL_HI_U32: return AMDGPU::V_MUL_HI_U32; 4145 case AMDGPU::S_MUL_HI_I32: return AMDGPU::V_MUL_HI_I32; 4146 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64; 4147 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64; 4148 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64; 4149 case AMDGPU::S_XNOR_B32: 4150 return ST.hasDLInsts() ? AMDGPU::V_XNOR_B32_e64 : AMDGPU::INSTRUCTION_LIST_END; 4151 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64; 4152 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64; 4153 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64; 4154 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64; 4155 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32; 4156 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64; 4157 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32; 4158 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64; 4159 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32; 4160 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64; 4161 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32; 4162 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32; 4163 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32; 4164 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32; 4165 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64; 4166 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32; 4167 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32; 4168 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32; 4169 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32; 4170 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32; 4171 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32; 4172 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32; 4173 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32; 4174 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32; 4175 case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e32; 4176 case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e32; 4177 case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e32; 4178 case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e32; 4179 case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e32; 4180 case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e32; 4181 case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e32; 4182 case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e32; 4183 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64; 4184 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32; 4185 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32; 4186 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64; 4187 case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ; 4188 case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ; 4189 } 4190 llvm_unreachable( 4191 "Unexpected scalar opcode without corresponding vector one!"); 4192 } 4193 4194 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI, 4195 unsigned OpNo) const { 4196 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 4197 const MCInstrDesc &Desc = get(MI.getOpcode()); 4198 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() || 4199 Desc.OpInfo[OpNo].RegClass == -1) { 4200 Register Reg = MI.getOperand(OpNo).getReg(); 4201 4202 if (Reg.isVirtual()) 4203 return MRI.getRegClass(Reg); 4204 return RI.getPhysRegClass(Reg); 4205 } 4206 4207 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 4208 return RI.getRegClass(RCID); 4209 } 4210 4211 void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const { 4212 MachineBasicBlock::iterator I = MI; 4213 MachineBasicBlock *MBB = MI.getParent(); 4214 MachineOperand &MO = MI.getOperand(OpIdx); 4215 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 4216 unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass; 4217 const TargetRegisterClass *RC = RI.getRegClass(RCID); 4218 unsigned Size = RI.getRegSizeInBits(*RC); 4219 unsigned Opcode = (Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO : AMDGPU::V_MOV_B32_e32; 4220 if (MO.isReg()) 4221 Opcode = AMDGPU::COPY; 4222 else if (RI.isSGPRClass(RC)) 4223 Opcode = (Size == 64) ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; 4224 4225 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); 4226 if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC)) 4227 VRC = &AMDGPU::VReg_64RegClass; 4228 else 4229 VRC = &AMDGPU::VGPR_32RegClass; 4230 4231 Register Reg = MRI.createVirtualRegister(VRC); 4232 DebugLoc DL = MBB->findDebugLoc(I); 4233 BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO); 4234 MO.ChangeToRegister(Reg, false); 4235 } 4236 4237 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI, 4238 MachineRegisterInfo &MRI, 4239 MachineOperand &SuperReg, 4240 const TargetRegisterClass *SuperRC, 4241 unsigned SubIdx, 4242 const TargetRegisterClass *SubRC) 4243 const { 4244 MachineBasicBlock *MBB = MI->getParent(); 4245 DebugLoc DL = MI->getDebugLoc(); 4246 Register SubReg = MRI.createVirtualRegister(SubRC); 4247 4248 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) { 4249 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 4250 .addReg(SuperReg.getReg(), 0, SubIdx); 4251 return SubReg; 4252 } 4253 4254 // Just in case the super register is itself a sub-register, copy it to a new 4255 // value so we don't need to worry about merging its subreg index with the 4256 // SubIdx passed to this function. The register coalescer should be able to 4257 // eliminate this extra copy. 4258 Register NewSuperReg = MRI.createVirtualRegister(SuperRC); 4259 4260 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg) 4261 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg()); 4262 4263 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 4264 .addReg(NewSuperReg, 0, SubIdx); 4265 4266 return SubReg; 4267 } 4268 4269 MachineOperand SIInstrInfo::buildExtractSubRegOrImm( 4270 MachineBasicBlock::iterator MII, 4271 MachineRegisterInfo &MRI, 4272 MachineOperand &Op, 4273 const TargetRegisterClass *SuperRC, 4274 unsigned SubIdx, 4275 const TargetRegisterClass *SubRC) const { 4276 if (Op.isImm()) { 4277 if (SubIdx == AMDGPU::sub0) 4278 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm())); 4279 if (SubIdx == AMDGPU::sub1) 4280 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32)); 4281 4282 llvm_unreachable("Unhandled register index for immediate"); 4283 } 4284 4285 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC, 4286 SubIdx, SubRC); 4287 return MachineOperand::CreateReg(SubReg, false); 4288 } 4289 4290 // Change the order of operands from (0, 1, 2) to (0, 2, 1) 4291 void SIInstrInfo::swapOperands(MachineInstr &Inst) const { 4292 assert(Inst.getNumExplicitOperands() == 3); 4293 MachineOperand Op1 = Inst.getOperand(1); 4294 Inst.RemoveOperand(1); 4295 Inst.addOperand(Op1); 4296 } 4297 4298 bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI, 4299 const MCOperandInfo &OpInfo, 4300 const MachineOperand &MO) const { 4301 if (!MO.isReg()) 4302 return false; 4303 4304 Register Reg = MO.getReg(); 4305 4306 const TargetRegisterClass *DRC = RI.getRegClass(OpInfo.RegClass); 4307 if (Reg.isPhysical()) 4308 return DRC->contains(Reg); 4309 4310 const TargetRegisterClass *RC = MRI.getRegClass(Reg); 4311 4312 if (MO.getSubReg()) { 4313 const MachineFunction *MF = MO.getParent()->getParent()->getParent(); 4314 const TargetRegisterClass *SuperRC = RI.getLargestLegalSuperClass(RC, *MF); 4315 if (!SuperRC) 4316 return false; 4317 4318 DRC = RI.getMatchingSuperRegClass(SuperRC, DRC, MO.getSubReg()); 4319 if (!DRC) 4320 return false; 4321 } 4322 return RC->hasSuperClassEq(DRC); 4323 } 4324 4325 bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI, 4326 const MCOperandInfo &OpInfo, 4327 const MachineOperand &MO) const { 4328 if (MO.isReg()) 4329 return isLegalRegOperand(MRI, OpInfo, MO); 4330 4331 // Handle non-register types that are treated like immediates. 4332 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()); 4333 return true; 4334 } 4335 4336 bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx, 4337 const MachineOperand *MO) const { 4338 const MachineFunction &MF = *MI.getParent()->getParent(); 4339 const MachineRegisterInfo &MRI = MF.getRegInfo(); 4340 const MCInstrDesc &InstDesc = MI.getDesc(); 4341 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; 4342 const TargetRegisterClass *DefinedRC = 4343 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; 4344 if (!MO) 4345 MO = &MI.getOperand(OpIdx); 4346 4347 int ConstantBusLimit = ST.getConstantBusLimit(MI.getOpcode()); 4348 int VOP3LiteralLimit = ST.hasVOP3Literal() ? 1 : 0; 4349 if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) { 4350 if (isVOP3(MI) && isLiteralConstantLike(*MO, OpInfo) && !VOP3LiteralLimit--) 4351 return false; 4352 4353 SmallDenseSet<RegSubRegPair> SGPRsUsed; 4354 if (MO->isReg()) 4355 SGPRsUsed.insert(RegSubRegPair(MO->getReg(), MO->getSubReg())); 4356 4357 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 4358 if (i == OpIdx) 4359 continue; 4360 const MachineOperand &Op = MI.getOperand(i); 4361 if (Op.isReg()) { 4362 RegSubRegPair SGPR(Op.getReg(), Op.getSubReg()); 4363 if (!SGPRsUsed.count(SGPR) && 4364 usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) { 4365 if (--ConstantBusLimit <= 0) 4366 return false; 4367 SGPRsUsed.insert(SGPR); 4368 } 4369 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) { 4370 if (--ConstantBusLimit <= 0) 4371 return false; 4372 } else if (isVOP3(MI) && AMDGPU::isSISrcOperand(InstDesc, i) && 4373 isLiteralConstantLike(Op, InstDesc.OpInfo[i])) { 4374 if (!VOP3LiteralLimit--) 4375 return false; 4376 if (--ConstantBusLimit <= 0) 4377 return false; 4378 } 4379 } 4380 } 4381 4382 if (MO->isReg()) { 4383 assert(DefinedRC); 4384 return isLegalRegOperand(MRI, OpInfo, *MO); 4385 } 4386 4387 // Handle non-register types that are treated like immediates. 4388 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI() || MO->isGlobal()); 4389 4390 if (!DefinedRC) { 4391 // This operand expects an immediate. 4392 return true; 4393 } 4394 4395 return isImmOperandLegal(MI, OpIdx, *MO); 4396 } 4397 4398 void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI, 4399 MachineInstr &MI) const { 4400 unsigned Opc = MI.getOpcode(); 4401 const MCInstrDesc &InstrDesc = get(Opc); 4402 4403 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 4404 MachineOperand &Src0 = MI.getOperand(Src0Idx); 4405 4406 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 4407 MachineOperand &Src1 = MI.getOperand(Src1Idx); 4408 4409 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32 4410 // we need to only have one constant bus use before GFX10. 4411 bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister; 4412 if (HasImplicitSGPR && ST.getConstantBusLimit(Opc) <= 1 && 4413 Src0.isReg() && (RI.isSGPRReg(MRI, Src0.getReg()) || 4414 isLiteralConstantLike(Src0, InstrDesc.OpInfo[Src0Idx]))) 4415 legalizeOpWithMove(MI, Src0Idx); 4416 4417 // Special case: V_WRITELANE_B32 accepts only immediate or SGPR operands for 4418 // both the value to write (src0) and lane select (src1). Fix up non-SGPR 4419 // src0/src1 with V_READFIRSTLANE. 4420 if (Opc == AMDGPU::V_WRITELANE_B32) { 4421 const DebugLoc &DL = MI.getDebugLoc(); 4422 if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) { 4423 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4424 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4425 .add(Src0); 4426 Src0.ChangeToRegister(Reg, false); 4427 } 4428 if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) { 4429 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4430 const DebugLoc &DL = MI.getDebugLoc(); 4431 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4432 .add(Src1); 4433 Src1.ChangeToRegister(Reg, false); 4434 } 4435 return; 4436 } 4437 4438 // No VOP2 instructions support AGPRs. 4439 if (Src0.isReg() && RI.isAGPR(MRI, Src0.getReg())) 4440 legalizeOpWithMove(MI, Src0Idx); 4441 4442 if (Src1.isReg() && RI.isAGPR(MRI, Src1.getReg())) 4443 legalizeOpWithMove(MI, Src1Idx); 4444 4445 // VOP2 src0 instructions support all operand types, so we don't need to check 4446 // their legality. If src1 is already legal, we don't need to do anything. 4447 if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1)) 4448 return; 4449 4450 // Special case: V_READLANE_B32 accepts only immediate or SGPR operands for 4451 // lane select. Fix up using V_READFIRSTLANE, since we assume that the lane 4452 // select is uniform. 4453 if (Opc == AMDGPU::V_READLANE_B32 && Src1.isReg() && 4454 RI.isVGPR(MRI, Src1.getReg())) { 4455 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4456 const DebugLoc &DL = MI.getDebugLoc(); 4457 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4458 .add(Src1); 4459 Src1.ChangeToRegister(Reg, false); 4460 return; 4461 } 4462 4463 // We do not use commuteInstruction here because it is too aggressive and will 4464 // commute if it is possible. We only want to commute here if it improves 4465 // legality. This can be called a fairly large number of times so don't waste 4466 // compile time pointlessly swapping and checking legality again. 4467 if (HasImplicitSGPR || !MI.isCommutable()) { 4468 legalizeOpWithMove(MI, Src1Idx); 4469 return; 4470 } 4471 4472 // If src0 can be used as src1, commuting will make the operands legal. 4473 // Otherwise we have to give up and insert a move. 4474 // 4475 // TODO: Other immediate-like operand kinds could be commuted if there was a 4476 // MachineOperand::ChangeTo* for them. 4477 if ((!Src1.isImm() && !Src1.isReg()) || 4478 !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) { 4479 legalizeOpWithMove(MI, Src1Idx); 4480 return; 4481 } 4482 4483 int CommutedOpc = commuteOpcode(MI); 4484 if (CommutedOpc == -1) { 4485 legalizeOpWithMove(MI, Src1Idx); 4486 return; 4487 } 4488 4489 MI.setDesc(get(CommutedOpc)); 4490 4491 Register Src0Reg = Src0.getReg(); 4492 unsigned Src0SubReg = Src0.getSubReg(); 4493 bool Src0Kill = Src0.isKill(); 4494 4495 if (Src1.isImm()) 4496 Src0.ChangeToImmediate(Src1.getImm()); 4497 else if (Src1.isReg()) { 4498 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill()); 4499 Src0.setSubReg(Src1.getSubReg()); 4500 } else 4501 llvm_unreachable("Should only have register or immediate operands"); 4502 4503 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill); 4504 Src1.setSubReg(Src0SubReg); 4505 fixImplicitOperands(MI); 4506 } 4507 4508 // Legalize VOP3 operands. All operand types are supported for any operand 4509 // but only one literal constant and only starting from GFX10. 4510 void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI, 4511 MachineInstr &MI) const { 4512 unsigned Opc = MI.getOpcode(); 4513 4514 int VOP3Idx[3] = { 4515 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), 4516 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1), 4517 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2) 4518 }; 4519 4520 if (Opc == AMDGPU::V_PERMLANE16_B32 || 4521 Opc == AMDGPU::V_PERMLANEX16_B32) { 4522 // src1 and src2 must be scalar 4523 MachineOperand &Src1 = MI.getOperand(VOP3Idx[1]); 4524 MachineOperand &Src2 = MI.getOperand(VOP3Idx[2]); 4525 const DebugLoc &DL = MI.getDebugLoc(); 4526 if (Src1.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) { 4527 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4528 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4529 .add(Src1); 4530 Src1.ChangeToRegister(Reg, false); 4531 } 4532 if (Src2.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src2.getReg()))) { 4533 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4534 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4535 .add(Src2); 4536 Src2.ChangeToRegister(Reg, false); 4537 } 4538 } 4539 4540 // Find the one SGPR operand we are allowed to use. 4541 int ConstantBusLimit = ST.getConstantBusLimit(Opc); 4542 int LiteralLimit = ST.hasVOP3Literal() ? 1 : 0; 4543 SmallDenseSet<unsigned> SGPRsUsed; 4544 Register SGPRReg = findUsedSGPR(MI, VOP3Idx); 4545 if (SGPRReg != AMDGPU::NoRegister) { 4546 SGPRsUsed.insert(SGPRReg); 4547 --ConstantBusLimit; 4548 } 4549 4550 for (unsigned i = 0; i < 3; ++i) { 4551 int Idx = VOP3Idx[i]; 4552 if (Idx == -1) 4553 break; 4554 MachineOperand &MO = MI.getOperand(Idx); 4555 4556 if (!MO.isReg()) { 4557 if (!isLiteralConstantLike(MO, get(Opc).OpInfo[Idx])) 4558 continue; 4559 4560 if (LiteralLimit > 0 && ConstantBusLimit > 0) { 4561 --LiteralLimit; 4562 --ConstantBusLimit; 4563 continue; 4564 } 4565 4566 --LiteralLimit; 4567 --ConstantBusLimit; 4568 legalizeOpWithMove(MI, Idx); 4569 continue; 4570 } 4571 4572 if (RI.hasAGPRs(MRI.getRegClass(MO.getReg())) && 4573 !isOperandLegal(MI, Idx, &MO)) { 4574 legalizeOpWithMove(MI, Idx); 4575 continue; 4576 } 4577 4578 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg()))) 4579 continue; // VGPRs are legal 4580 4581 // We can use one SGPR in each VOP3 instruction prior to GFX10 4582 // and two starting from GFX10. 4583 if (SGPRsUsed.count(MO.getReg())) 4584 continue; 4585 if (ConstantBusLimit > 0) { 4586 SGPRsUsed.insert(MO.getReg()); 4587 --ConstantBusLimit; 4588 continue; 4589 } 4590 4591 // If we make it this far, then the operand is not legal and we must 4592 // legalize it. 4593 legalizeOpWithMove(MI, Idx); 4594 } 4595 } 4596 4597 Register SIInstrInfo::readlaneVGPRToSGPR(Register SrcReg, MachineInstr &UseMI, 4598 MachineRegisterInfo &MRI) const { 4599 const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg); 4600 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC); 4601 Register DstReg = MRI.createVirtualRegister(SRC); 4602 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32; 4603 4604 if (RI.hasAGPRs(VRC)) { 4605 VRC = RI.getEquivalentVGPRClass(VRC); 4606 Register NewSrcReg = MRI.createVirtualRegister(VRC); 4607 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4608 get(TargetOpcode::COPY), NewSrcReg) 4609 .addReg(SrcReg); 4610 SrcReg = NewSrcReg; 4611 } 4612 4613 if (SubRegs == 1) { 4614 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4615 get(AMDGPU::V_READFIRSTLANE_B32), DstReg) 4616 .addReg(SrcReg); 4617 return DstReg; 4618 } 4619 4620 SmallVector<unsigned, 8> SRegs; 4621 for (unsigned i = 0; i < SubRegs; ++i) { 4622 Register SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4623 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4624 get(AMDGPU::V_READFIRSTLANE_B32), SGPR) 4625 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i)); 4626 SRegs.push_back(SGPR); 4627 } 4628 4629 MachineInstrBuilder MIB = 4630 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4631 get(AMDGPU::REG_SEQUENCE), DstReg); 4632 for (unsigned i = 0; i < SubRegs; ++i) { 4633 MIB.addReg(SRegs[i]); 4634 MIB.addImm(RI.getSubRegFromChannel(i)); 4635 } 4636 return DstReg; 4637 } 4638 4639 void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI, 4640 MachineInstr &MI) const { 4641 4642 // If the pointer is store in VGPRs, then we need to move them to 4643 // SGPRs using v_readfirstlane. This is safe because we only select 4644 // loads with uniform pointers to SMRD instruction so we know the 4645 // pointer value is uniform. 4646 MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase); 4647 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) { 4648 Register SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI); 4649 SBase->setReg(SGPR); 4650 } 4651 MachineOperand *SOff = getNamedOperand(MI, AMDGPU::OpName::soff); 4652 if (SOff && !RI.isSGPRClass(MRI.getRegClass(SOff->getReg()))) { 4653 Register SGPR = readlaneVGPRToSGPR(SOff->getReg(), MI, MRI); 4654 SOff->setReg(SGPR); 4655 } 4656 } 4657 4658 // FIXME: Remove this when SelectionDAG is obsoleted. 4659 void SIInstrInfo::legalizeOperandsFLAT(MachineRegisterInfo &MRI, 4660 MachineInstr &MI) const { 4661 if (!isSegmentSpecificFLAT(MI)) 4662 return; 4663 4664 // Fixup SGPR operands in VGPRs. We only select these when the DAG divergence 4665 // thinks they are uniform, so a readfirstlane should be valid. 4666 MachineOperand *SAddr = getNamedOperand(MI, AMDGPU::OpName::saddr); 4667 if (!SAddr || RI.isSGPRClass(MRI.getRegClass(SAddr->getReg()))) 4668 return; 4669 4670 Register ToSGPR = readlaneVGPRToSGPR(SAddr->getReg(), MI, MRI); 4671 SAddr->setReg(ToSGPR); 4672 } 4673 4674 void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB, 4675 MachineBasicBlock::iterator I, 4676 const TargetRegisterClass *DstRC, 4677 MachineOperand &Op, 4678 MachineRegisterInfo &MRI, 4679 const DebugLoc &DL) const { 4680 Register OpReg = Op.getReg(); 4681 unsigned OpSubReg = Op.getSubReg(); 4682 4683 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg( 4684 RI.getRegClassForReg(MRI, OpReg), OpSubReg); 4685 4686 // Check if operand is already the correct register class. 4687 if (DstRC == OpRC) 4688 return; 4689 4690 Register DstReg = MRI.createVirtualRegister(DstRC); 4691 MachineInstr *Copy = 4692 BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op); 4693 4694 Op.setReg(DstReg); 4695 Op.setSubReg(0); 4696 4697 MachineInstr *Def = MRI.getVRegDef(OpReg); 4698 if (!Def) 4699 return; 4700 4701 // Try to eliminate the copy if it is copying an immediate value. 4702 if (Def->isMoveImmediate() && DstRC != &AMDGPU::VReg_1RegClass) 4703 FoldImmediate(*Copy, *Def, OpReg, &MRI); 4704 4705 bool ImpDef = Def->isImplicitDef(); 4706 while (!ImpDef && Def && Def->isCopy()) { 4707 if (Def->getOperand(1).getReg().isPhysical()) 4708 break; 4709 Def = MRI.getUniqueVRegDef(Def->getOperand(1).getReg()); 4710 ImpDef = Def && Def->isImplicitDef(); 4711 } 4712 if (!RI.isSGPRClass(DstRC) && !Copy->readsRegister(AMDGPU::EXEC, &RI) && 4713 !ImpDef) 4714 Copy->addOperand(MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 4715 } 4716 4717 // Emit the actual waterfall loop, executing the wrapped instruction for each 4718 // unique value of \p Rsrc across all lanes. In the best case we execute 1 4719 // iteration, in the worst case we execute 64 (once per lane). 4720 static void 4721 emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI, 4722 MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB, 4723 const DebugLoc &DL, MachineOperand &Rsrc) { 4724 MachineFunction &MF = *OrigBB.getParent(); 4725 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 4726 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 4727 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 4728 unsigned SaveExecOpc = 4729 ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 : AMDGPU::S_AND_SAVEEXEC_B64; 4730 unsigned XorTermOpc = 4731 ST.isWave32() ? AMDGPU::S_XOR_B32_term : AMDGPU::S_XOR_B64_term; 4732 unsigned AndOpc = 4733 ST.isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64; 4734 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 4735 4736 MachineBasicBlock::iterator I = LoopBB.begin(); 4737 4738 SmallVector<Register, 8> ReadlanePieces; 4739 Register CondReg = AMDGPU::NoRegister; 4740 4741 Register VRsrc = Rsrc.getReg(); 4742 unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef()); 4743 4744 unsigned RegSize = TRI->getRegSizeInBits(Rsrc.getReg(), MRI); 4745 unsigned NumSubRegs = RegSize / 32; 4746 assert(NumSubRegs % 2 == 0 && NumSubRegs <= 32 && "Unhandled register size"); 4747 4748 for (unsigned Idx = 0; Idx < NumSubRegs; Idx += 2) { 4749 4750 Register CurRegLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4751 Register CurRegHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4752 4753 // Read the next variant <- also loop target. 4754 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegLo) 4755 .addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx)); 4756 4757 // Read the next variant <- also loop target. 4758 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegHi) 4759 .addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx + 1)); 4760 4761 ReadlanePieces.push_back(CurRegLo); 4762 ReadlanePieces.push_back(CurRegHi); 4763 4764 // Comparison is to be done as 64-bit. 4765 Register CurReg = MRI.createVirtualRegister(&AMDGPU::SGPR_64RegClass); 4766 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), CurReg) 4767 .addReg(CurRegLo) 4768 .addImm(AMDGPU::sub0) 4769 .addReg(CurRegHi) 4770 .addImm(AMDGPU::sub1); 4771 4772 Register NewCondReg = MRI.createVirtualRegister(BoolXExecRC); 4773 auto Cmp = 4774 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), NewCondReg) 4775 .addReg(CurReg); 4776 if (NumSubRegs <= 2) 4777 Cmp.addReg(VRsrc); 4778 else 4779 Cmp.addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx, 2)); 4780 4781 // Combine the comparision results with AND. 4782 if (CondReg == AMDGPU::NoRegister) // First. 4783 CondReg = NewCondReg; 4784 else { // If not the first, we create an AND. 4785 Register AndReg = MRI.createVirtualRegister(BoolXExecRC); 4786 BuildMI(LoopBB, I, DL, TII.get(AndOpc), AndReg) 4787 .addReg(CondReg) 4788 .addReg(NewCondReg); 4789 CondReg = AndReg; 4790 } 4791 } // End for loop. 4792 4793 auto SRsrcRC = TRI->getEquivalentSGPRClass(MRI.getRegClass(VRsrc)); 4794 Register SRsrc = MRI.createVirtualRegister(SRsrcRC); 4795 4796 // Build scalar Rsrc. 4797 auto Merge = BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), SRsrc); 4798 unsigned Channel = 0; 4799 for (Register Piece : ReadlanePieces) { 4800 Merge.addReg(Piece) 4801 .addImm(TRI->getSubRegFromChannel(Channel++)); 4802 } 4803 4804 // Update Rsrc operand to use the SGPR Rsrc. 4805 Rsrc.setReg(SRsrc); 4806 Rsrc.setIsKill(true); 4807 4808 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); 4809 MRI.setSimpleHint(SaveExec, CondReg); 4810 4811 // Update EXEC to matching lanes, saving original to SaveExec. 4812 BuildMI(LoopBB, I, DL, TII.get(SaveExecOpc), SaveExec) 4813 .addReg(CondReg, RegState::Kill); 4814 4815 // The original instruction is here; we insert the terminators after it. 4816 I = LoopBB.end(); 4817 4818 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 4819 BuildMI(LoopBB, I, DL, TII.get(XorTermOpc), Exec) 4820 .addReg(Exec) 4821 .addReg(SaveExec); 4822 4823 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_CBRANCH_EXECNZ)).addMBB(&LoopBB); 4824 } 4825 4826 // Build a waterfall loop around \p MI, replacing the VGPR \p Rsrc register 4827 // with SGPRs by iterating over all unique values across all lanes. 4828 static void loadSRsrcFromVGPR(const SIInstrInfo &TII, MachineInstr &MI, 4829 MachineOperand &Rsrc, MachineDominatorTree *MDT, 4830 MachineBasicBlock::iterator Begin = nullptr, 4831 MachineBasicBlock::iterator End = nullptr) { 4832 MachineBasicBlock &MBB = *MI.getParent(); 4833 MachineFunction &MF = *MBB.getParent(); 4834 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 4835 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 4836 MachineRegisterInfo &MRI = MF.getRegInfo(); 4837 if (!Begin.isValid()) 4838 Begin = &MI; 4839 if (!End.isValid()) { 4840 End = &MI; 4841 ++End; 4842 } 4843 const DebugLoc &DL = MI.getDebugLoc(); 4844 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 4845 unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 4846 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 4847 4848 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); 4849 4850 // Save the EXEC mask 4851 BuildMI(MBB, Begin, DL, TII.get(MovExecOpc), SaveExec).addReg(Exec); 4852 4853 // Killed uses in the instruction we are waterfalling around will be 4854 // incorrect due to the added control-flow. 4855 MachineBasicBlock::iterator AfterMI = MI; 4856 ++AfterMI; 4857 for (auto I = Begin; I != AfterMI; I++) { 4858 for (auto &MO : I->uses()) { 4859 if (MO.isReg() && MO.isUse()) { 4860 MRI.clearKillFlags(MO.getReg()); 4861 } 4862 } 4863 } 4864 4865 // To insert the loop we need to split the block. Move everything after this 4866 // point to a new block, and insert a new empty block between the two. 4867 MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock(); 4868 MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock(); 4869 MachineFunction::iterator MBBI(MBB); 4870 ++MBBI; 4871 4872 MF.insert(MBBI, LoopBB); 4873 MF.insert(MBBI, RemainderBB); 4874 4875 LoopBB->addSuccessor(LoopBB); 4876 LoopBB->addSuccessor(RemainderBB); 4877 4878 // Move Begin to MI to the LoopBB, and the remainder of the block to 4879 // RemainderBB. 4880 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 4881 RemainderBB->splice(RemainderBB->begin(), &MBB, End, MBB.end()); 4882 LoopBB->splice(LoopBB->begin(), &MBB, Begin, MBB.end()); 4883 4884 MBB.addSuccessor(LoopBB); 4885 4886 // Update dominators. We know that MBB immediately dominates LoopBB, that 4887 // LoopBB immediately dominates RemainderBB, and that RemainderBB immediately 4888 // dominates all of the successors transferred to it from MBB that MBB used 4889 // to properly dominate. 4890 if (MDT) { 4891 MDT->addNewBlock(LoopBB, &MBB); 4892 MDT->addNewBlock(RemainderBB, LoopBB); 4893 for (auto &Succ : RemainderBB->successors()) { 4894 if (MDT->properlyDominates(&MBB, Succ)) { 4895 MDT->changeImmediateDominator(Succ, RemainderBB); 4896 } 4897 } 4898 } 4899 4900 emitLoadSRsrcFromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, Rsrc); 4901 4902 // Restore the EXEC mask 4903 MachineBasicBlock::iterator First = RemainderBB->begin(); 4904 BuildMI(*RemainderBB, First, DL, TII.get(MovExecOpc), Exec).addReg(SaveExec); 4905 } 4906 4907 // Extract pointer from Rsrc and return a zero-value Rsrc replacement. 4908 static std::tuple<unsigned, unsigned> 4909 extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc) { 4910 MachineBasicBlock &MBB = *MI.getParent(); 4911 MachineFunction &MF = *MBB.getParent(); 4912 MachineRegisterInfo &MRI = MF.getRegInfo(); 4913 4914 // Extract the ptr from the resource descriptor. 4915 unsigned RsrcPtr = 4916 TII.buildExtractSubReg(MI, MRI, Rsrc, &AMDGPU::VReg_128RegClass, 4917 AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass); 4918 4919 // Create an empty resource descriptor 4920 Register Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 4921 Register SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4922 Register SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4923 Register NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass); 4924 uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat(); 4925 4926 // Zero64 = 0 4927 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B64), Zero64) 4928 .addImm(0); 4929 4930 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0} 4931 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatLo) 4932 .addImm(RsrcDataFormat & 0xFFFFFFFF); 4933 4934 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32} 4935 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatHi) 4936 .addImm(RsrcDataFormat >> 32); 4937 4938 // NewSRsrc = {Zero64, SRsrcFormat} 4939 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::REG_SEQUENCE), NewSRsrc) 4940 .addReg(Zero64) 4941 .addImm(AMDGPU::sub0_sub1) 4942 .addReg(SRsrcFormatLo) 4943 .addImm(AMDGPU::sub2) 4944 .addReg(SRsrcFormatHi) 4945 .addImm(AMDGPU::sub3); 4946 4947 return std::make_tuple(RsrcPtr, NewSRsrc); 4948 } 4949 4950 void SIInstrInfo::legalizeOperands(MachineInstr &MI, 4951 MachineDominatorTree *MDT) const { 4952 MachineFunction &MF = *MI.getParent()->getParent(); 4953 MachineRegisterInfo &MRI = MF.getRegInfo(); 4954 4955 // Legalize VOP2 4956 if (isVOP2(MI) || isVOPC(MI)) { 4957 legalizeOperandsVOP2(MRI, MI); 4958 return; 4959 } 4960 4961 // Legalize VOP3 4962 if (isVOP3(MI)) { 4963 legalizeOperandsVOP3(MRI, MI); 4964 return; 4965 } 4966 4967 // Legalize SMRD 4968 if (isSMRD(MI)) { 4969 legalizeOperandsSMRD(MRI, MI); 4970 return; 4971 } 4972 4973 // Legalize FLAT 4974 if (isFLAT(MI)) { 4975 legalizeOperandsFLAT(MRI, MI); 4976 return; 4977 } 4978 4979 // Legalize REG_SEQUENCE and PHI 4980 // The register class of the operands much be the same type as the register 4981 // class of the output. 4982 if (MI.getOpcode() == AMDGPU::PHI) { 4983 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr; 4984 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) { 4985 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).getReg().isVirtual()) 4986 continue; 4987 const TargetRegisterClass *OpRC = 4988 MRI.getRegClass(MI.getOperand(i).getReg()); 4989 if (RI.hasVectorRegisters(OpRC)) { 4990 VRC = OpRC; 4991 } else { 4992 SRC = OpRC; 4993 } 4994 } 4995 4996 // If any of the operands are VGPR registers, then they all most be 4997 // otherwise we will create illegal VGPR->SGPR copies when legalizing 4998 // them. 4999 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) { 5000 if (!VRC) { 5001 assert(SRC); 5002 if (getOpRegClass(MI, 0) == &AMDGPU::VReg_1RegClass) { 5003 VRC = &AMDGPU::VReg_1RegClass; 5004 } else 5005 VRC = RI.hasAGPRs(getOpRegClass(MI, 0)) 5006 ? RI.getEquivalentAGPRClass(SRC) 5007 : RI.getEquivalentVGPRClass(SRC); 5008 } else { 5009 VRC = RI.hasAGPRs(getOpRegClass(MI, 0)) 5010 ? RI.getEquivalentAGPRClass(VRC) 5011 : RI.getEquivalentVGPRClass(VRC); 5012 } 5013 RC = VRC; 5014 } else { 5015 RC = SRC; 5016 } 5017 5018 // Update all the operands so they have the same type. 5019 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 5020 MachineOperand &Op = MI.getOperand(I); 5021 if (!Op.isReg() || !Op.getReg().isVirtual()) 5022 continue; 5023 5024 // MI is a PHI instruction. 5025 MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB(); 5026 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator(); 5027 5028 // Avoid creating no-op copies with the same src and dst reg class. These 5029 // confuse some of the machine passes. 5030 legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc()); 5031 } 5032 } 5033 5034 // REG_SEQUENCE doesn't really require operand legalization, but if one has a 5035 // VGPR dest type and SGPR sources, insert copies so all operands are 5036 // VGPRs. This seems to help operand folding / the register coalescer. 5037 if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) { 5038 MachineBasicBlock *MBB = MI.getParent(); 5039 const TargetRegisterClass *DstRC = getOpRegClass(MI, 0); 5040 if (RI.hasVGPRs(DstRC)) { 5041 // Update all the operands so they are VGPR register classes. These may 5042 // not be the same register class because REG_SEQUENCE supports mixing 5043 // subregister index types e.g. sub0_sub1 + sub2 + sub3 5044 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 5045 MachineOperand &Op = MI.getOperand(I); 5046 if (!Op.isReg() || !Op.getReg().isVirtual()) 5047 continue; 5048 5049 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg()); 5050 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); 5051 if (VRC == OpRC) 5052 continue; 5053 5054 legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc()); 5055 Op.setIsKill(); 5056 } 5057 } 5058 5059 return; 5060 } 5061 5062 // Legalize INSERT_SUBREG 5063 // src0 must have the same register class as dst 5064 if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) { 5065 Register Dst = MI.getOperand(0).getReg(); 5066 Register Src0 = MI.getOperand(1).getReg(); 5067 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst); 5068 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0); 5069 if (DstRC != Src0RC) { 5070 MachineBasicBlock *MBB = MI.getParent(); 5071 MachineOperand &Op = MI.getOperand(1); 5072 legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc()); 5073 } 5074 return; 5075 } 5076 5077 // Legalize SI_INIT_M0 5078 if (MI.getOpcode() == AMDGPU::SI_INIT_M0) { 5079 MachineOperand &Src = MI.getOperand(0); 5080 if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg()))) 5081 Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI)); 5082 return; 5083 } 5084 5085 // Legalize MIMG and MUBUF/MTBUF for shaders. 5086 // 5087 // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via 5088 // scratch memory access. In both cases, the legalization never involves 5089 // conversion to the addr64 form. 5090 if (isMIMG(MI) || 5091 (AMDGPU::isShader(MF.getFunction().getCallingConv()) && 5092 (isMUBUF(MI) || isMTBUF(MI)))) { 5093 MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc); 5094 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) 5095 loadSRsrcFromVGPR(*this, MI, *SRsrc, MDT); 5096 5097 MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp); 5098 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) 5099 loadSRsrcFromVGPR(*this, MI, *SSamp, MDT); 5100 5101 return; 5102 } 5103 5104 // Legalize SI_CALL 5105 if (MI.getOpcode() == AMDGPU::SI_CALL_ISEL) { 5106 MachineOperand *Dest = &MI.getOperand(0); 5107 if (!RI.isSGPRClass(MRI.getRegClass(Dest->getReg()))) { 5108 // Move everything between ADJCALLSTACKUP and ADJCALLSTACKDOWN and 5109 // following copies, we also need to move copies from and to physical 5110 // registers into the loop block. 5111 unsigned FrameSetupOpcode = getCallFrameSetupOpcode(); 5112 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode(); 5113 5114 // Also move the copies to physical registers into the loop block 5115 MachineBasicBlock &MBB = *MI.getParent(); 5116 MachineBasicBlock::iterator Start(&MI); 5117 while (Start->getOpcode() != FrameSetupOpcode) 5118 --Start; 5119 MachineBasicBlock::iterator End(&MI); 5120 while (End->getOpcode() != FrameDestroyOpcode) 5121 ++End; 5122 // Also include following copies of the return value 5123 ++End; 5124 while (End != MBB.end() && End->isCopy() && End->getOperand(1).isReg() && 5125 MI.definesRegister(End->getOperand(1).getReg())) 5126 ++End; 5127 loadSRsrcFromVGPR(*this, MI, *Dest, MDT, Start, End); 5128 } 5129 } 5130 5131 // Legalize MUBUF* instructions. 5132 int RsrcIdx = 5133 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc); 5134 if (RsrcIdx != -1) { 5135 // We have an MUBUF instruction 5136 MachineOperand *Rsrc = &MI.getOperand(RsrcIdx); 5137 unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass; 5138 if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()), 5139 RI.getRegClass(RsrcRC))) { 5140 // The operands are legal. 5141 // FIXME: We may need to legalize operands besided srsrc. 5142 return; 5143 } 5144 5145 // Legalize a VGPR Rsrc. 5146 // 5147 // If the instruction is _ADDR64, we can avoid a waterfall by extracting 5148 // the base pointer from the VGPR Rsrc, adding it to the VAddr, then using 5149 // a zero-value SRsrc. 5150 // 5151 // If the instruction is _OFFSET (both idxen and offen disabled), and we 5152 // support ADDR64 instructions, we can convert to ADDR64 and do the same as 5153 // above. 5154 // 5155 // Otherwise we are on non-ADDR64 hardware, and/or we have 5156 // idxen/offen/bothen and we fall back to a waterfall loop. 5157 5158 MachineBasicBlock &MBB = *MI.getParent(); 5159 5160 MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 5161 if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) { 5162 // This is already an ADDR64 instruction so we need to add the pointer 5163 // extracted from the resource descriptor to the current value of VAddr. 5164 Register NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5165 Register NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5166 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5167 5168 const auto *BoolXExecRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5169 Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC); 5170 Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC); 5171 5172 unsigned RsrcPtr, NewSRsrc; 5173 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 5174 5175 // NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0 5176 const DebugLoc &DL = MI.getDebugLoc(); 5177 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_CO_U32_e64), NewVAddrLo) 5178 .addDef(CondReg0) 5179 .addReg(RsrcPtr, 0, AMDGPU::sub0) 5180 .addReg(VAddr->getReg(), 0, AMDGPU::sub0) 5181 .addImm(0); 5182 5183 // NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1 5184 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e64), NewVAddrHi) 5185 .addDef(CondReg1, RegState::Dead) 5186 .addReg(RsrcPtr, 0, AMDGPU::sub1) 5187 .addReg(VAddr->getReg(), 0, AMDGPU::sub1) 5188 .addReg(CondReg0, RegState::Kill) 5189 .addImm(0); 5190 5191 // NewVaddr = {NewVaddrHi, NewVaddrLo} 5192 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) 5193 .addReg(NewVAddrLo) 5194 .addImm(AMDGPU::sub0) 5195 .addReg(NewVAddrHi) 5196 .addImm(AMDGPU::sub1); 5197 5198 VAddr->setReg(NewVAddr); 5199 Rsrc->setReg(NewSRsrc); 5200 } else if (!VAddr && ST.hasAddr64()) { 5201 // This instructions is the _OFFSET variant, so we need to convert it to 5202 // ADDR64. 5203 assert(ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS && 5204 "FIXME: Need to emit flat atomics here"); 5205 5206 unsigned RsrcPtr, NewSRsrc; 5207 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 5208 5209 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5210 MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata); 5211 MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 5212 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset); 5213 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode()); 5214 5215 // Atomics rith return have have an additional tied operand and are 5216 // missing some of the special bits. 5217 MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in); 5218 MachineInstr *Addr64; 5219 5220 if (!VDataIn) { 5221 // Regular buffer load / store. 5222 MachineInstrBuilder MIB = 5223 BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 5224 .add(*VData) 5225 .addReg(NewVAddr) 5226 .addReg(NewSRsrc) 5227 .add(*SOffset) 5228 .add(*Offset); 5229 5230 // Atomics do not have this operand. 5231 if (const MachineOperand *GLC = 5232 getNamedOperand(MI, AMDGPU::OpName::glc)) { 5233 MIB.addImm(GLC->getImm()); 5234 } 5235 if (const MachineOperand *DLC = 5236 getNamedOperand(MI, AMDGPU::OpName::dlc)) { 5237 MIB.addImm(DLC->getImm()); 5238 } 5239 5240 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)); 5241 5242 if (const MachineOperand *TFE = 5243 getNamedOperand(MI, AMDGPU::OpName::tfe)) { 5244 MIB.addImm(TFE->getImm()); 5245 } 5246 5247 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::swz)); 5248 5249 MIB.cloneMemRefs(MI); 5250 Addr64 = MIB; 5251 } else { 5252 // Atomics with return. 5253 Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 5254 .add(*VData) 5255 .add(*VDataIn) 5256 .addReg(NewVAddr) 5257 .addReg(NewSRsrc) 5258 .add(*SOffset) 5259 .add(*Offset) 5260 .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)) 5261 .cloneMemRefs(MI); 5262 } 5263 5264 MI.removeFromParent(); 5265 5266 // NewVaddr = {NewVaddrHi, NewVaddrLo} 5267 BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), 5268 NewVAddr) 5269 .addReg(RsrcPtr, 0, AMDGPU::sub0) 5270 .addImm(AMDGPU::sub0) 5271 .addReg(RsrcPtr, 0, AMDGPU::sub1) 5272 .addImm(AMDGPU::sub1); 5273 } else { 5274 // This is another variant; legalize Rsrc with waterfall loop from VGPRs 5275 // to SGPRs. 5276 loadSRsrcFromVGPR(*this, MI, *Rsrc, MDT); 5277 } 5278 } 5279 } 5280 5281 void SIInstrInfo::moveToVALU(MachineInstr &TopInst, 5282 MachineDominatorTree *MDT) const { 5283 SetVectorType Worklist; 5284 Worklist.insert(&TopInst); 5285 5286 while (!Worklist.empty()) { 5287 MachineInstr &Inst = *Worklist.pop_back_val(); 5288 MachineBasicBlock *MBB = Inst.getParent(); 5289 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 5290 5291 unsigned Opcode = Inst.getOpcode(); 5292 unsigned NewOpcode = getVALUOp(Inst); 5293 5294 // Handle some special cases 5295 switch (Opcode) { 5296 default: 5297 break; 5298 case AMDGPU::S_ADD_U64_PSEUDO: 5299 case AMDGPU::S_SUB_U64_PSEUDO: 5300 splitScalar64BitAddSub(Worklist, Inst, MDT); 5301 Inst.eraseFromParent(); 5302 continue; 5303 case AMDGPU::S_ADD_I32: 5304 case AMDGPU::S_SUB_I32: 5305 // FIXME: The u32 versions currently selected use the carry. 5306 if (moveScalarAddSub(Worklist, Inst, MDT)) 5307 continue; 5308 5309 // Default handling 5310 break; 5311 case AMDGPU::S_AND_B64: 5312 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32, MDT); 5313 Inst.eraseFromParent(); 5314 continue; 5315 5316 case AMDGPU::S_OR_B64: 5317 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32, MDT); 5318 Inst.eraseFromParent(); 5319 continue; 5320 5321 case AMDGPU::S_XOR_B64: 5322 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32, MDT); 5323 Inst.eraseFromParent(); 5324 continue; 5325 5326 case AMDGPU::S_NAND_B64: 5327 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NAND_B32, MDT); 5328 Inst.eraseFromParent(); 5329 continue; 5330 5331 case AMDGPU::S_NOR_B64: 5332 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NOR_B32, MDT); 5333 Inst.eraseFromParent(); 5334 continue; 5335 5336 case AMDGPU::S_XNOR_B64: 5337 if (ST.hasDLInsts()) 5338 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT); 5339 else 5340 splitScalar64BitXnor(Worklist, Inst, MDT); 5341 Inst.eraseFromParent(); 5342 continue; 5343 5344 case AMDGPU::S_ANDN2_B64: 5345 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ANDN2_B32, MDT); 5346 Inst.eraseFromParent(); 5347 continue; 5348 5349 case AMDGPU::S_ORN2_B64: 5350 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ORN2_B32, MDT); 5351 Inst.eraseFromParent(); 5352 continue; 5353 5354 case AMDGPU::S_NOT_B64: 5355 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32); 5356 Inst.eraseFromParent(); 5357 continue; 5358 5359 case AMDGPU::S_BCNT1_I32_B64: 5360 splitScalar64BitBCNT(Worklist, Inst); 5361 Inst.eraseFromParent(); 5362 continue; 5363 5364 case AMDGPU::S_BFE_I64: 5365 splitScalar64BitBFE(Worklist, Inst); 5366 Inst.eraseFromParent(); 5367 continue; 5368 5369 case AMDGPU::S_LSHL_B32: 5370 if (ST.hasOnlyRevVALUShifts()) { 5371 NewOpcode = AMDGPU::V_LSHLREV_B32_e64; 5372 swapOperands(Inst); 5373 } 5374 break; 5375 case AMDGPU::S_ASHR_I32: 5376 if (ST.hasOnlyRevVALUShifts()) { 5377 NewOpcode = AMDGPU::V_ASHRREV_I32_e64; 5378 swapOperands(Inst); 5379 } 5380 break; 5381 case AMDGPU::S_LSHR_B32: 5382 if (ST.hasOnlyRevVALUShifts()) { 5383 NewOpcode = AMDGPU::V_LSHRREV_B32_e64; 5384 swapOperands(Inst); 5385 } 5386 break; 5387 case AMDGPU::S_LSHL_B64: 5388 if (ST.hasOnlyRevVALUShifts()) { 5389 NewOpcode = AMDGPU::V_LSHLREV_B64; 5390 swapOperands(Inst); 5391 } 5392 break; 5393 case AMDGPU::S_ASHR_I64: 5394 if (ST.hasOnlyRevVALUShifts()) { 5395 NewOpcode = AMDGPU::V_ASHRREV_I64; 5396 swapOperands(Inst); 5397 } 5398 break; 5399 case AMDGPU::S_LSHR_B64: 5400 if (ST.hasOnlyRevVALUShifts()) { 5401 NewOpcode = AMDGPU::V_LSHRREV_B64; 5402 swapOperands(Inst); 5403 } 5404 break; 5405 5406 case AMDGPU::S_ABS_I32: 5407 lowerScalarAbs(Worklist, Inst); 5408 Inst.eraseFromParent(); 5409 continue; 5410 5411 case AMDGPU::S_CBRANCH_SCC0: 5412 case AMDGPU::S_CBRANCH_SCC1: 5413 // Clear unused bits of vcc 5414 if (ST.isWave32()) 5415 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B32), 5416 AMDGPU::VCC_LO) 5417 .addReg(AMDGPU::EXEC_LO) 5418 .addReg(AMDGPU::VCC_LO); 5419 else 5420 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64), 5421 AMDGPU::VCC) 5422 .addReg(AMDGPU::EXEC) 5423 .addReg(AMDGPU::VCC); 5424 break; 5425 5426 case AMDGPU::S_BFE_U64: 5427 case AMDGPU::S_BFM_B64: 5428 llvm_unreachable("Moving this op to VALU not implemented"); 5429 5430 case AMDGPU::S_PACK_LL_B32_B16: 5431 case AMDGPU::S_PACK_LH_B32_B16: 5432 case AMDGPU::S_PACK_HH_B32_B16: 5433 movePackToVALU(Worklist, MRI, Inst); 5434 Inst.eraseFromParent(); 5435 continue; 5436 5437 case AMDGPU::S_XNOR_B32: 5438 lowerScalarXnor(Worklist, Inst); 5439 Inst.eraseFromParent(); 5440 continue; 5441 5442 case AMDGPU::S_NAND_B32: 5443 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_AND_B32); 5444 Inst.eraseFromParent(); 5445 continue; 5446 5447 case AMDGPU::S_NOR_B32: 5448 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_OR_B32); 5449 Inst.eraseFromParent(); 5450 continue; 5451 5452 case AMDGPU::S_ANDN2_B32: 5453 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_AND_B32); 5454 Inst.eraseFromParent(); 5455 continue; 5456 5457 case AMDGPU::S_ORN2_B32: 5458 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_OR_B32); 5459 Inst.eraseFromParent(); 5460 continue; 5461 5462 // TODO: remove as soon as everything is ready 5463 // to replace VGPR to SGPR copy with V_READFIRSTLANEs. 5464 // S_ADD/SUB_CO_PSEUDO as well as S_UADDO/USUBO_PSEUDO 5465 // can only be selected from the uniform SDNode. 5466 case AMDGPU::S_ADD_CO_PSEUDO: 5467 case AMDGPU::S_SUB_CO_PSEUDO: { 5468 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) 5469 ? AMDGPU::V_ADDC_U32_e64 5470 : AMDGPU::V_SUBB_U32_e64; 5471 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5472 5473 Register CarryInReg = Inst.getOperand(4).getReg(); 5474 if (!MRI.constrainRegClass(CarryInReg, CarryRC)) { 5475 Register NewCarryReg = MRI.createVirtualRegister(CarryRC); 5476 BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(AMDGPU::COPY), NewCarryReg) 5477 .addReg(CarryInReg); 5478 } 5479 5480 Register CarryOutReg = Inst.getOperand(1).getReg(); 5481 5482 Register DestReg = MRI.createVirtualRegister(RI.getEquivalentVGPRClass( 5483 MRI.getRegClass(Inst.getOperand(0).getReg()))); 5484 MachineInstr *CarryOp = 5485 BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(Opc), DestReg) 5486 .addReg(CarryOutReg, RegState::Define) 5487 .add(Inst.getOperand(2)) 5488 .add(Inst.getOperand(3)) 5489 .addReg(CarryInReg) 5490 .addImm(0); 5491 legalizeOperands(*CarryOp); 5492 MRI.replaceRegWith(Inst.getOperand(0).getReg(), DestReg); 5493 addUsersToMoveToVALUWorklist(DestReg, MRI, Worklist); 5494 Inst.eraseFromParent(); 5495 } 5496 continue; 5497 case AMDGPU::S_UADDO_PSEUDO: 5498 case AMDGPU::S_USUBO_PSEUDO: { 5499 const DebugLoc &DL = Inst.getDebugLoc(); 5500 MachineOperand &Dest0 = Inst.getOperand(0); 5501 MachineOperand &Dest1 = Inst.getOperand(1); 5502 MachineOperand &Src0 = Inst.getOperand(2); 5503 MachineOperand &Src1 = Inst.getOperand(3); 5504 5505 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_UADDO_PSEUDO) 5506 ? AMDGPU::V_ADD_CO_U32_e64 5507 : AMDGPU::V_SUB_CO_U32_e64; 5508 const TargetRegisterClass *NewRC = 5509 RI.getEquivalentVGPRClass(MRI.getRegClass(Dest0.getReg())); 5510 Register DestReg = MRI.createVirtualRegister(NewRC); 5511 MachineInstr *NewInstr = BuildMI(*MBB, &Inst, DL, get(Opc), DestReg) 5512 .addReg(Dest1.getReg(), RegState::Define) 5513 .add(Src0) 5514 .add(Src1) 5515 .addImm(0); // clamp bit 5516 5517 legalizeOperands(*NewInstr, MDT); 5518 5519 MRI.replaceRegWith(Dest0.getReg(), DestReg); 5520 addUsersToMoveToVALUWorklist(NewInstr->getOperand(0).getReg(), MRI, 5521 Worklist); 5522 Inst.eraseFromParent(); 5523 } 5524 continue; 5525 5526 case AMDGPU::S_CSELECT_B32: 5527 case AMDGPU::S_CSELECT_B64: 5528 lowerSelect(Worklist, Inst, MDT); 5529 Inst.eraseFromParent(); 5530 continue; 5531 } 5532 5533 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) { 5534 // We cannot move this instruction to the VALU, so we should try to 5535 // legalize its operands instead. 5536 legalizeOperands(Inst, MDT); 5537 continue; 5538 } 5539 5540 // Use the new VALU Opcode. 5541 const MCInstrDesc &NewDesc = get(NewOpcode); 5542 Inst.setDesc(NewDesc); 5543 5544 // Remove any references to SCC. Vector instructions can't read from it, and 5545 // We're just about to add the implicit use / defs of VCC, and we don't want 5546 // both. 5547 for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) { 5548 MachineOperand &Op = Inst.getOperand(i); 5549 if (Op.isReg() && Op.getReg() == AMDGPU::SCC) { 5550 // Only propagate through live-def of SCC. 5551 if (Op.isDef() && !Op.isDead()) 5552 addSCCDefUsersToVALUWorklist(Op, Inst, Worklist); 5553 Inst.RemoveOperand(i); 5554 } 5555 } 5556 5557 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) { 5558 // We are converting these to a BFE, so we need to add the missing 5559 // operands for the size and offset. 5560 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16; 5561 Inst.addOperand(MachineOperand::CreateImm(0)); 5562 Inst.addOperand(MachineOperand::CreateImm(Size)); 5563 5564 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) { 5565 // The VALU version adds the second operand to the result, so insert an 5566 // extra 0 operand. 5567 Inst.addOperand(MachineOperand::CreateImm(0)); 5568 } 5569 5570 Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent()); 5571 fixImplicitOperands(Inst); 5572 5573 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) { 5574 const MachineOperand &OffsetWidthOp = Inst.getOperand(2); 5575 // If we need to move this to VGPRs, we need to unpack the second operand 5576 // back into the 2 separate ones for bit offset and width. 5577 assert(OffsetWidthOp.isImm() && 5578 "Scalar BFE is only implemented for constant width and offset"); 5579 uint32_t Imm = OffsetWidthOp.getImm(); 5580 5581 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 5582 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 5583 Inst.RemoveOperand(2); // Remove old immediate. 5584 Inst.addOperand(MachineOperand::CreateImm(Offset)); 5585 Inst.addOperand(MachineOperand::CreateImm(BitWidth)); 5586 } 5587 5588 bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef(); 5589 unsigned NewDstReg = AMDGPU::NoRegister; 5590 if (HasDst) { 5591 Register DstReg = Inst.getOperand(0).getReg(); 5592 if (DstReg.isPhysical()) 5593 continue; 5594 5595 // Update the destination register class. 5596 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst); 5597 if (!NewDstRC) 5598 continue; 5599 5600 if (Inst.isCopy() && Inst.getOperand(1).getReg().isVirtual() && 5601 NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) { 5602 // Instead of creating a copy where src and dst are the same register 5603 // class, we just replace all uses of dst with src. These kinds of 5604 // copies interfere with the heuristics MachineSink uses to decide 5605 // whether or not to split a critical edge. Since the pass assumes 5606 // that copies will end up as machine instructions and not be 5607 // eliminated. 5608 addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist); 5609 MRI.replaceRegWith(DstReg, Inst.getOperand(1).getReg()); 5610 MRI.clearKillFlags(Inst.getOperand(1).getReg()); 5611 Inst.getOperand(0).setReg(DstReg); 5612 5613 // Make sure we don't leave around a dead VGPR->SGPR copy. Normally 5614 // these are deleted later, but at -O0 it would leave a suspicious 5615 // looking illegal copy of an undef register. 5616 for (unsigned I = Inst.getNumOperands() - 1; I != 0; --I) 5617 Inst.RemoveOperand(I); 5618 Inst.setDesc(get(AMDGPU::IMPLICIT_DEF)); 5619 continue; 5620 } 5621 5622 NewDstReg = MRI.createVirtualRegister(NewDstRC); 5623 MRI.replaceRegWith(DstReg, NewDstReg); 5624 } 5625 5626 // Legalize the operands 5627 legalizeOperands(Inst, MDT); 5628 5629 if (HasDst) 5630 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); 5631 } 5632 } 5633 5634 // Add/sub require special handling to deal with carry outs. 5635 bool SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst, 5636 MachineDominatorTree *MDT) const { 5637 if (ST.hasAddNoCarry()) { 5638 // Assume there is no user of scc since we don't select this in that case. 5639 // Since scc isn't used, it doesn't really matter if the i32 or u32 variant 5640 // is used. 5641 5642 MachineBasicBlock &MBB = *Inst.getParent(); 5643 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5644 5645 Register OldDstReg = Inst.getOperand(0).getReg(); 5646 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5647 5648 unsigned Opc = Inst.getOpcode(); 5649 assert(Opc == AMDGPU::S_ADD_I32 || Opc == AMDGPU::S_SUB_I32); 5650 5651 unsigned NewOpc = Opc == AMDGPU::S_ADD_I32 ? 5652 AMDGPU::V_ADD_U32_e64 : AMDGPU::V_SUB_U32_e64; 5653 5654 assert(Inst.getOperand(3).getReg() == AMDGPU::SCC); 5655 Inst.RemoveOperand(3); 5656 5657 Inst.setDesc(get(NewOpc)); 5658 Inst.addOperand(MachineOperand::CreateImm(0)); // clamp bit 5659 Inst.addImplicitDefUseOperands(*MBB.getParent()); 5660 MRI.replaceRegWith(OldDstReg, ResultReg); 5661 legalizeOperands(Inst, MDT); 5662 5663 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5664 return true; 5665 } 5666 5667 return false; 5668 } 5669 5670 void SIInstrInfo::lowerSelect(SetVectorType &Worklist, MachineInstr &Inst, 5671 MachineDominatorTree *MDT) const { 5672 5673 MachineBasicBlock &MBB = *Inst.getParent(); 5674 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5675 MachineBasicBlock::iterator MII = Inst; 5676 DebugLoc DL = Inst.getDebugLoc(); 5677 5678 MachineOperand &Dest = Inst.getOperand(0); 5679 MachineOperand &Src0 = Inst.getOperand(1); 5680 MachineOperand &Src1 = Inst.getOperand(2); 5681 MachineOperand &Cond = Inst.getOperand(3); 5682 5683 Register SCCSource = Cond.getReg(); 5684 // Find SCC def, and if that is a copy (SCC = COPY reg) then use reg instead. 5685 if (!Cond.isUndef()) { 5686 for (MachineInstr &CandI : 5687 make_range(std::next(MachineBasicBlock::reverse_iterator(Inst)), 5688 Inst.getParent()->rend())) { 5689 if (CandI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != 5690 -1) { 5691 if (CandI.isCopy() && CandI.getOperand(0).getReg() == AMDGPU::SCC) { 5692 SCCSource = CandI.getOperand(1).getReg(); 5693 } 5694 break; 5695 } 5696 } 5697 } 5698 5699 // If this is a trivial select where the condition is effectively not SCC 5700 // (SCCSource is a source of copy to SCC), then the select is semantically 5701 // equivalent to copying SCCSource. Hence, there is no need to create 5702 // V_CNDMASK, we can just use that and bail out. 5703 if ((SCCSource != AMDGPU::SCC) && Src0.isImm() && (Src0.getImm() == -1) && 5704 Src1.isImm() && (Src1.getImm() == 0)) { 5705 MRI.replaceRegWith(Dest.getReg(), SCCSource); 5706 return; 5707 } 5708 5709 const TargetRegisterClass *TC = ST.getWavefrontSize() == 64 5710 ? &AMDGPU::SReg_64_XEXECRegClass 5711 : &AMDGPU::SReg_32_XM0_XEXECRegClass; 5712 Register CopySCC = MRI.createVirtualRegister(TC); 5713 5714 if (SCCSource == AMDGPU::SCC) { 5715 // Insert a trivial select instead of creating a copy, because a copy from 5716 // SCC would semantically mean just copying a single bit, but we may need 5717 // the result to be a vector condition mask that needs preserving. 5718 unsigned Opcode = (ST.getWavefrontSize() == 64) ? AMDGPU::S_CSELECT_B64 5719 : AMDGPU::S_CSELECT_B32; 5720 auto NewSelect = 5721 BuildMI(MBB, MII, DL, get(Opcode), CopySCC).addImm(-1).addImm(0); 5722 NewSelect->getOperand(3).setIsUndef(Cond.isUndef()); 5723 } else { 5724 BuildMI(MBB, MII, DL, get(AMDGPU::COPY), CopySCC).addReg(SCCSource); 5725 } 5726 5727 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5728 5729 auto UpdatedInst = 5730 BuildMI(MBB, MII, DL, get(AMDGPU::V_CNDMASK_B32_e64), ResultReg) 5731 .addImm(0) 5732 .add(Src1) // False 5733 .addImm(0) 5734 .add(Src0) // True 5735 .addReg(CopySCC); 5736 5737 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5738 legalizeOperands(*UpdatedInst, MDT); 5739 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5740 } 5741 5742 void SIInstrInfo::lowerScalarAbs(SetVectorType &Worklist, 5743 MachineInstr &Inst) const { 5744 MachineBasicBlock &MBB = *Inst.getParent(); 5745 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5746 MachineBasicBlock::iterator MII = Inst; 5747 DebugLoc DL = Inst.getDebugLoc(); 5748 5749 MachineOperand &Dest = Inst.getOperand(0); 5750 MachineOperand &Src = Inst.getOperand(1); 5751 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5752 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5753 5754 unsigned SubOp = ST.hasAddNoCarry() ? 5755 AMDGPU::V_SUB_U32_e32 : AMDGPU::V_SUB_CO_U32_e32; 5756 5757 BuildMI(MBB, MII, DL, get(SubOp), TmpReg) 5758 .addImm(0) 5759 .addReg(Src.getReg()); 5760 5761 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg) 5762 .addReg(Src.getReg()) 5763 .addReg(TmpReg); 5764 5765 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5766 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5767 } 5768 5769 void SIInstrInfo::lowerScalarXnor(SetVectorType &Worklist, 5770 MachineInstr &Inst) const { 5771 MachineBasicBlock &MBB = *Inst.getParent(); 5772 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5773 MachineBasicBlock::iterator MII = Inst; 5774 const DebugLoc &DL = Inst.getDebugLoc(); 5775 5776 MachineOperand &Dest = Inst.getOperand(0); 5777 MachineOperand &Src0 = Inst.getOperand(1); 5778 MachineOperand &Src1 = Inst.getOperand(2); 5779 5780 if (ST.hasDLInsts()) { 5781 Register NewDest = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5782 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src0, MRI, DL); 5783 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src1, MRI, DL); 5784 5785 BuildMI(MBB, MII, DL, get(AMDGPU::V_XNOR_B32_e64), NewDest) 5786 .add(Src0) 5787 .add(Src1); 5788 5789 MRI.replaceRegWith(Dest.getReg(), NewDest); 5790 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5791 } else { 5792 // Using the identity !(x ^ y) == (!x ^ y) == (x ^ !y), we can 5793 // invert either source and then perform the XOR. If either source is a 5794 // scalar register, then we can leave the inversion on the scalar unit to 5795 // acheive a better distrubution of scalar and vector instructions. 5796 bool Src0IsSGPR = Src0.isReg() && 5797 RI.isSGPRClass(MRI.getRegClass(Src0.getReg())); 5798 bool Src1IsSGPR = Src1.isReg() && 5799 RI.isSGPRClass(MRI.getRegClass(Src1.getReg())); 5800 MachineInstr *Xor; 5801 Register Temp = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5802 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5803 5804 // Build a pair of scalar instructions and add them to the work list. 5805 // The next iteration over the work list will lower these to the vector 5806 // unit as necessary. 5807 if (Src0IsSGPR) { 5808 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src0); 5809 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 5810 .addReg(Temp) 5811 .add(Src1); 5812 } else if (Src1IsSGPR) { 5813 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src1); 5814 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 5815 .add(Src0) 5816 .addReg(Temp); 5817 } else { 5818 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), Temp) 5819 .add(Src0) 5820 .add(Src1); 5821 MachineInstr *Not = 5822 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest).addReg(Temp); 5823 Worklist.insert(Not); 5824 } 5825 5826 MRI.replaceRegWith(Dest.getReg(), NewDest); 5827 5828 Worklist.insert(Xor); 5829 5830 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5831 } 5832 } 5833 5834 void SIInstrInfo::splitScalarNotBinop(SetVectorType &Worklist, 5835 MachineInstr &Inst, 5836 unsigned Opcode) const { 5837 MachineBasicBlock &MBB = *Inst.getParent(); 5838 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5839 MachineBasicBlock::iterator MII = Inst; 5840 const DebugLoc &DL = Inst.getDebugLoc(); 5841 5842 MachineOperand &Dest = Inst.getOperand(0); 5843 MachineOperand &Src0 = Inst.getOperand(1); 5844 MachineOperand &Src1 = Inst.getOperand(2); 5845 5846 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5847 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5848 5849 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), Interm) 5850 .add(Src0) 5851 .add(Src1); 5852 5853 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest) 5854 .addReg(Interm); 5855 5856 Worklist.insert(&Op); 5857 Worklist.insert(&Not); 5858 5859 MRI.replaceRegWith(Dest.getReg(), NewDest); 5860 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5861 } 5862 5863 void SIInstrInfo::splitScalarBinOpN2(SetVectorType& Worklist, 5864 MachineInstr &Inst, 5865 unsigned Opcode) const { 5866 MachineBasicBlock &MBB = *Inst.getParent(); 5867 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5868 MachineBasicBlock::iterator MII = Inst; 5869 const DebugLoc &DL = Inst.getDebugLoc(); 5870 5871 MachineOperand &Dest = Inst.getOperand(0); 5872 MachineOperand &Src0 = Inst.getOperand(1); 5873 MachineOperand &Src1 = Inst.getOperand(2); 5874 5875 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5876 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5877 5878 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Interm) 5879 .add(Src1); 5880 5881 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), NewDest) 5882 .add(Src0) 5883 .addReg(Interm); 5884 5885 Worklist.insert(&Not); 5886 Worklist.insert(&Op); 5887 5888 MRI.replaceRegWith(Dest.getReg(), NewDest); 5889 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5890 } 5891 5892 void SIInstrInfo::splitScalar64BitUnaryOp( 5893 SetVectorType &Worklist, MachineInstr &Inst, 5894 unsigned Opcode) const { 5895 MachineBasicBlock &MBB = *Inst.getParent(); 5896 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5897 5898 MachineOperand &Dest = Inst.getOperand(0); 5899 MachineOperand &Src0 = Inst.getOperand(1); 5900 DebugLoc DL = Inst.getDebugLoc(); 5901 5902 MachineBasicBlock::iterator MII = Inst; 5903 5904 const MCInstrDesc &InstDesc = get(Opcode); 5905 const TargetRegisterClass *Src0RC = Src0.isReg() ? 5906 MRI.getRegClass(Src0.getReg()) : 5907 &AMDGPU::SGPR_32RegClass; 5908 5909 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 5910 5911 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5912 AMDGPU::sub0, Src0SubRC); 5913 5914 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 5915 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 5916 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 5917 5918 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 5919 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0); 5920 5921 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5922 AMDGPU::sub1, Src0SubRC); 5923 5924 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 5925 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1); 5926 5927 Register FullDestReg = MRI.createVirtualRegister(NewDestRC); 5928 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 5929 .addReg(DestSub0) 5930 .addImm(AMDGPU::sub0) 5931 .addReg(DestSub1) 5932 .addImm(AMDGPU::sub1); 5933 5934 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 5935 5936 Worklist.insert(&LoHalf); 5937 Worklist.insert(&HiHalf); 5938 5939 // We don't need to legalizeOperands here because for a single operand, src0 5940 // will support any kind of input. 5941 5942 // Move all users of this moved value. 5943 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 5944 } 5945 5946 void SIInstrInfo::splitScalar64BitAddSub(SetVectorType &Worklist, 5947 MachineInstr &Inst, 5948 MachineDominatorTree *MDT) const { 5949 bool IsAdd = (Inst.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); 5950 5951 MachineBasicBlock &MBB = *Inst.getParent(); 5952 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5953 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5954 5955 Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5956 Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5957 Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5958 5959 Register CarryReg = MRI.createVirtualRegister(CarryRC); 5960 Register DeadCarryReg = MRI.createVirtualRegister(CarryRC); 5961 5962 MachineOperand &Dest = Inst.getOperand(0); 5963 MachineOperand &Src0 = Inst.getOperand(1); 5964 MachineOperand &Src1 = Inst.getOperand(2); 5965 const DebugLoc &DL = Inst.getDebugLoc(); 5966 MachineBasicBlock::iterator MII = Inst; 5967 5968 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg()); 5969 const TargetRegisterClass *Src1RC = MRI.getRegClass(Src1.getReg()); 5970 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 5971 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 5972 5973 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5974 AMDGPU::sub0, Src0SubRC); 5975 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 5976 AMDGPU::sub0, Src1SubRC); 5977 5978 5979 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5980 AMDGPU::sub1, Src0SubRC); 5981 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 5982 AMDGPU::sub1, Src1SubRC); 5983 5984 unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64; 5985 MachineInstr *LoHalf = 5986 BuildMI(MBB, MII, DL, get(LoOpc), DestSub0) 5987 .addReg(CarryReg, RegState::Define) 5988 .add(SrcReg0Sub0) 5989 .add(SrcReg1Sub0) 5990 .addImm(0); // clamp bit 5991 5992 unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; 5993 MachineInstr *HiHalf = 5994 BuildMI(MBB, MII, DL, get(HiOpc), DestSub1) 5995 .addReg(DeadCarryReg, RegState::Define | RegState::Dead) 5996 .add(SrcReg0Sub1) 5997 .add(SrcReg1Sub1) 5998 .addReg(CarryReg, RegState::Kill) 5999 .addImm(0); // clamp bit 6000 6001 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 6002 .addReg(DestSub0) 6003 .addImm(AMDGPU::sub0) 6004 .addReg(DestSub1) 6005 .addImm(AMDGPU::sub1); 6006 6007 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 6008 6009 // Try to legalize the operands in case we need to swap the order to keep it 6010 // valid. 6011 legalizeOperands(*LoHalf, MDT); 6012 legalizeOperands(*HiHalf, MDT); 6013 6014 // Move all users of this moved vlaue. 6015 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 6016 } 6017 6018 void SIInstrInfo::splitScalar64BitBinaryOp(SetVectorType &Worklist, 6019 MachineInstr &Inst, unsigned Opcode, 6020 MachineDominatorTree *MDT) const { 6021 MachineBasicBlock &MBB = *Inst.getParent(); 6022 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6023 6024 MachineOperand &Dest = Inst.getOperand(0); 6025 MachineOperand &Src0 = Inst.getOperand(1); 6026 MachineOperand &Src1 = Inst.getOperand(2); 6027 DebugLoc DL = Inst.getDebugLoc(); 6028 6029 MachineBasicBlock::iterator MII = Inst; 6030 6031 const MCInstrDesc &InstDesc = get(Opcode); 6032 const TargetRegisterClass *Src0RC = Src0.isReg() ? 6033 MRI.getRegClass(Src0.getReg()) : 6034 &AMDGPU::SGPR_32RegClass; 6035 6036 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 6037 const TargetRegisterClass *Src1RC = Src1.isReg() ? 6038 MRI.getRegClass(Src1.getReg()) : 6039 &AMDGPU::SGPR_32RegClass; 6040 6041 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 6042 6043 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6044 AMDGPU::sub0, Src0SubRC); 6045 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 6046 AMDGPU::sub0, Src1SubRC); 6047 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6048 AMDGPU::sub1, Src0SubRC); 6049 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 6050 AMDGPU::sub1, Src1SubRC); 6051 6052 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 6053 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 6054 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 6055 6056 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 6057 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0) 6058 .add(SrcReg0Sub0) 6059 .add(SrcReg1Sub0); 6060 6061 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 6062 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1) 6063 .add(SrcReg0Sub1) 6064 .add(SrcReg1Sub1); 6065 6066 Register FullDestReg = MRI.createVirtualRegister(NewDestRC); 6067 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 6068 .addReg(DestSub0) 6069 .addImm(AMDGPU::sub0) 6070 .addReg(DestSub1) 6071 .addImm(AMDGPU::sub1); 6072 6073 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 6074 6075 Worklist.insert(&LoHalf); 6076 Worklist.insert(&HiHalf); 6077 6078 // Move all users of this moved vlaue. 6079 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 6080 } 6081 6082 void SIInstrInfo::splitScalar64BitXnor(SetVectorType &Worklist, 6083 MachineInstr &Inst, 6084 MachineDominatorTree *MDT) const { 6085 MachineBasicBlock &MBB = *Inst.getParent(); 6086 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6087 6088 MachineOperand &Dest = Inst.getOperand(0); 6089 MachineOperand &Src0 = Inst.getOperand(1); 6090 MachineOperand &Src1 = Inst.getOperand(2); 6091 const DebugLoc &DL = Inst.getDebugLoc(); 6092 6093 MachineBasicBlock::iterator MII = Inst; 6094 6095 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 6096 6097 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 6098 6099 MachineOperand* Op0; 6100 MachineOperand* Op1; 6101 6102 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) { 6103 Op0 = &Src0; 6104 Op1 = &Src1; 6105 } else { 6106 Op0 = &Src1; 6107 Op1 = &Src0; 6108 } 6109 6110 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B64), Interm) 6111 .add(*Op0); 6112 6113 Register NewDest = MRI.createVirtualRegister(DestRC); 6114 6115 MachineInstr &Xor = *BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B64), NewDest) 6116 .addReg(Interm) 6117 .add(*Op1); 6118 6119 MRI.replaceRegWith(Dest.getReg(), NewDest); 6120 6121 Worklist.insert(&Xor); 6122 } 6123 6124 void SIInstrInfo::splitScalar64BitBCNT( 6125 SetVectorType &Worklist, MachineInstr &Inst) const { 6126 MachineBasicBlock &MBB = *Inst.getParent(); 6127 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6128 6129 MachineBasicBlock::iterator MII = Inst; 6130 const DebugLoc &DL = Inst.getDebugLoc(); 6131 6132 MachineOperand &Dest = Inst.getOperand(0); 6133 MachineOperand &Src = Inst.getOperand(1); 6134 6135 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64); 6136 const TargetRegisterClass *SrcRC = Src.isReg() ? 6137 MRI.getRegClass(Src.getReg()) : 6138 &AMDGPU::SGPR_32RegClass; 6139 6140 Register MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6141 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6142 6143 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); 6144 6145 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 6146 AMDGPU::sub0, SrcSubRC); 6147 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 6148 AMDGPU::sub1, SrcSubRC); 6149 6150 BuildMI(MBB, MII, DL, InstDesc, MidReg).add(SrcRegSub0).addImm(0); 6151 6152 BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg); 6153 6154 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6155 6156 // We don't need to legalize operands here. src0 for etiher instruction can be 6157 // an SGPR, and the second input is unused or determined here. 6158 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6159 } 6160 6161 void SIInstrInfo::splitScalar64BitBFE(SetVectorType &Worklist, 6162 MachineInstr &Inst) const { 6163 MachineBasicBlock &MBB = *Inst.getParent(); 6164 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6165 MachineBasicBlock::iterator MII = Inst; 6166 const DebugLoc &DL = Inst.getDebugLoc(); 6167 6168 MachineOperand &Dest = Inst.getOperand(0); 6169 uint32_t Imm = Inst.getOperand(2).getImm(); 6170 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 6171 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 6172 6173 (void) Offset; 6174 6175 // Only sext_inreg cases handled. 6176 assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 && 6177 Offset == 0 && "Not implemented"); 6178 6179 if (BitWidth < 32) { 6180 Register MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6181 Register MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6182 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 6183 6184 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo) 6185 .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0) 6186 .addImm(0) 6187 .addImm(BitWidth); 6188 6189 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi) 6190 .addImm(31) 6191 .addReg(MidRegLo); 6192 6193 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 6194 .addReg(MidRegLo) 6195 .addImm(AMDGPU::sub0) 6196 .addReg(MidRegHi) 6197 .addImm(AMDGPU::sub1); 6198 6199 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6200 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6201 return; 6202 } 6203 6204 MachineOperand &Src = Inst.getOperand(1); 6205 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6206 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 6207 6208 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg) 6209 .addImm(31) 6210 .addReg(Src.getReg(), 0, AMDGPU::sub0); 6211 6212 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 6213 .addReg(Src.getReg(), 0, AMDGPU::sub0) 6214 .addImm(AMDGPU::sub0) 6215 .addReg(TmpReg) 6216 .addImm(AMDGPU::sub1); 6217 6218 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6219 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6220 } 6221 6222 void SIInstrInfo::addUsersToMoveToVALUWorklist( 6223 Register DstReg, 6224 MachineRegisterInfo &MRI, 6225 SetVectorType &Worklist) const { 6226 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg), 6227 E = MRI.use_end(); I != E;) { 6228 MachineInstr &UseMI = *I->getParent(); 6229 6230 unsigned OpNo = 0; 6231 6232 switch (UseMI.getOpcode()) { 6233 case AMDGPU::COPY: 6234 case AMDGPU::WQM: 6235 case AMDGPU::SOFT_WQM: 6236 case AMDGPU::WWM: 6237 case AMDGPU::REG_SEQUENCE: 6238 case AMDGPU::PHI: 6239 case AMDGPU::INSERT_SUBREG: 6240 break; 6241 default: 6242 OpNo = I.getOperandNo(); 6243 break; 6244 } 6245 6246 if (!RI.hasVectorRegisters(getOpRegClass(UseMI, OpNo))) { 6247 Worklist.insert(&UseMI); 6248 6249 do { 6250 ++I; 6251 } while (I != E && I->getParent() == &UseMI); 6252 } else { 6253 ++I; 6254 } 6255 } 6256 } 6257 6258 void SIInstrInfo::movePackToVALU(SetVectorType &Worklist, 6259 MachineRegisterInfo &MRI, 6260 MachineInstr &Inst) const { 6261 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6262 MachineBasicBlock *MBB = Inst.getParent(); 6263 MachineOperand &Src0 = Inst.getOperand(1); 6264 MachineOperand &Src1 = Inst.getOperand(2); 6265 const DebugLoc &DL = Inst.getDebugLoc(); 6266 6267 switch (Inst.getOpcode()) { 6268 case AMDGPU::S_PACK_LL_B32_B16: { 6269 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6270 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6271 6272 // FIXME: Can do a lot better if we know the high bits of src0 or src1 are 6273 // 0. 6274 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 6275 .addImm(0xffff); 6276 6277 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_B32_e64), TmpReg) 6278 .addReg(ImmReg, RegState::Kill) 6279 .add(Src0); 6280 6281 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHL_OR_B32), ResultReg) 6282 .add(Src1) 6283 .addImm(16) 6284 .addReg(TmpReg, RegState::Kill); 6285 break; 6286 } 6287 case AMDGPU::S_PACK_LH_B32_B16: { 6288 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6289 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 6290 .addImm(0xffff); 6291 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_BFI_B32), ResultReg) 6292 .addReg(ImmReg, RegState::Kill) 6293 .add(Src0) 6294 .add(Src1); 6295 break; 6296 } 6297 case AMDGPU::S_PACK_HH_B32_B16: { 6298 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6299 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6300 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg) 6301 .addImm(16) 6302 .add(Src0); 6303 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 6304 .addImm(0xffff0000); 6305 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_OR_B32), ResultReg) 6306 .add(Src1) 6307 .addReg(ImmReg, RegState::Kill) 6308 .addReg(TmpReg, RegState::Kill); 6309 break; 6310 } 6311 default: 6312 llvm_unreachable("unhandled s_pack_* instruction"); 6313 } 6314 6315 MachineOperand &Dest = Inst.getOperand(0); 6316 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6317 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6318 } 6319 6320 void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op, 6321 MachineInstr &SCCDefInst, 6322 SetVectorType &Worklist) const { 6323 bool SCCUsedImplicitly = false; 6324 6325 // Ensure that def inst defines SCC, which is still live. 6326 assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isDef() && 6327 !Op.isDead() && Op.getParent() == &SCCDefInst); 6328 SmallVector<MachineInstr *, 4> CopyToDelete; 6329 // This assumes that all the users of SCC are in the same block 6330 // as the SCC def. 6331 for (MachineInstr &MI : // Skip the def inst itself. 6332 make_range(std::next(MachineBasicBlock::iterator(SCCDefInst)), 6333 SCCDefInst.getParent()->end())) { 6334 // Check if SCC is used first. 6335 if (MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI) != -1) { 6336 if (MI.isCopy()) { 6337 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 6338 Register DestReg = MI.getOperand(0).getReg(); 6339 6340 for (auto &User : MRI.use_nodbg_instructions(DestReg)) { 6341 if ((User.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) || 6342 (User.getOpcode() == AMDGPU::S_SUB_CO_PSEUDO)) { 6343 User.getOperand(4).setReg(RI.getVCC()); 6344 Worklist.insert(&User); 6345 } else if (User.getOpcode() == AMDGPU::V_CNDMASK_B32_e64) { 6346 User.getOperand(5).setReg(RI.getVCC()); 6347 // No need to add to Worklist. 6348 } 6349 } 6350 CopyToDelete.push_back(&MI); 6351 } else { 6352 if (MI.getOpcode() == AMDGPU::S_CSELECT_B32 || 6353 MI.getOpcode() == AMDGPU::S_CSELECT_B64) { 6354 // This is an implicit use of SCC and it is really expected by 6355 // the SCC users to handle. 6356 // We cannot preserve the edge to the user so add the explicit 6357 // copy: SCC = COPY VCC. 6358 // The copy will be cleaned up during the processing of the user 6359 // in lowerSelect. 6360 SCCUsedImplicitly = true; 6361 } 6362 6363 Worklist.insert(&MI); 6364 } 6365 } 6366 // Exit if we find another SCC def. 6367 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != -1) 6368 break; 6369 } 6370 for (auto &Copy : CopyToDelete) 6371 Copy->eraseFromParent(); 6372 6373 if (SCCUsedImplicitly) { 6374 BuildMI(*SCCDefInst.getParent(), std::next(SCCDefInst.getIterator()), 6375 SCCDefInst.getDebugLoc(), get(AMDGPU::COPY), AMDGPU::SCC) 6376 .addReg(RI.getVCC()); 6377 } 6378 } 6379 6380 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass( 6381 const MachineInstr &Inst) const { 6382 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0); 6383 6384 switch (Inst.getOpcode()) { 6385 // For target instructions, getOpRegClass just returns the virtual register 6386 // class associated with the operand, so we need to find an equivalent VGPR 6387 // register class in order to move the instruction to the VALU. 6388 case AMDGPU::COPY: 6389 case AMDGPU::PHI: 6390 case AMDGPU::REG_SEQUENCE: 6391 case AMDGPU::INSERT_SUBREG: 6392 case AMDGPU::WQM: 6393 case AMDGPU::SOFT_WQM: 6394 case AMDGPU::WWM: { 6395 const TargetRegisterClass *SrcRC = getOpRegClass(Inst, 1); 6396 if (RI.hasAGPRs(SrcRC)) { 6397 if (RI.hasAGPRs(NewDstRC)) 6398 return nullptr; 6399 6400 switch (Inst.getOpcode()) { 6401 case AMDGPU::PHI: 6402 case AMDGPU::REG_SEQUENCE: 6403 case AMDGPU::INSERT_SUBREG: 6404 NewDstRC = RI.getEquivalentAGPRClass(NewDstRC); 6405 break; 6406 default: 6407 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 6408 } 6409 6410 if (!NewDstRC) 6411 return nullptr; 6412 } else { 6413 if (RI.hasVGPRs(NewDstRC) || NewDstRC == &AMDGPU::VReg_1RegClass) 6414 return nullptr; 6415 6416 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 6417 if (!NewDstRC) 6418 return nullptr; 6419 } 6420 6421 return NewDstRC; 6422 } 6423 default: 6424 return NewDstRC; 6425 } 6426 } 6427 6428 // Find the one SGPR operand we are allowed to use. 6429 Register SIInstrInfo::findUsedSGPR(const MachineInstr &MI, 6430 int OpIndices[3]) const { 6431 const MCInstrDesc &Desc = MI.getDesc(); 6432 6433 // Find the one SGPR operand we are allowed to use. 6434 // 6435 // First we need to consider the instruction's operand requirements before 6436 // legalizing. Some operands are required to be SGPRs, such as implicit uses 6437 // of VCC, but we are still bound by the constant bus requirement to only use 6438 // one. 6439 // 6440 // If the operand's class is an SGPR, we can never move it. 6441 6442 Register SGPRReg = findImplicitSGPRRead(MI); 6443 if (SGPRReg != AMDGPU::NoRegister) 6444 return SGPRReg; 6445 6446 Register UsedSGPRs[3] = { AMDGPU::NoRegister }; 6447 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 6448 6449 for (unsigned i = 0; i < 3; ++i) { 6450 int Idx = OpIndices[i]; 6451 if (Idx == -1) 6452 break; 6453 6454 const MachineOperand &MO = MI.getOperand(Idx); 6455 if (!MO.isReg()) 6456 continue; 6457 6458 // Is this operand statically required to be an SGPR based on the operand 6459 // constraints? 6460 const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); 6461 bool IsRequiredSGPR = RI.isSGPRClass(OpRC); 6462 if (IsRequiredSGPR) 6463 return MO.getReg(); 6464 6465 // If this could be a VGPR or an SGPR, Check the dynamic register class. 6466 Register Reg = MO.getReg(); 6467 const TargetRegisterClass *RegRC = MRI.getRegClass(Reg); 6468 if (RI.isSGPRClass(RegRC)) 6469 UsedSGPRs[i] = Reg; 6470 } 6471 6472 // We don't have a required SGPR operand, so we have a bit more freedom in 6473 // selecting operands to move. 6474 6475 // Try to select the most used SGPR. If an SGPR is equal to one of the 6476 // others, we choose that. 6477 // 6478 // e.g. 6479 // V_FMA_F32 v0, s0, s0, s0 -> No moves 6480 // V_FMA_F32 v0, s0, s1, s0 -> Move s1 6481 6482 // TODO: If some of the operands are 64-bit SGPRs and some 32, we should 6483 // prefer those. 6484 6485 if (UsedSGPRs[0] != AMDGPU::NoRegister) { 6486 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2]) 6487 SGPRReg = UsedSGPRs[0]; 6488 } 6489 6490 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) { 6491 if (UsedSGPRs[1] == UsedSGPRs[2]) 6492 SGPRReg = UsedSGPRs[1]; 6493 } 6494 6495 return SGPRReg; 6496 } 6497 6498 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI, 6499 unsigned OperandName) const { 6500 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName); 6501 if (Idx == -1) 6502 return nullptr; 6503 6504 return &MI.getOperand(Idx); 6505 } 6506 6507 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const { 6508 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 6509 return (22ULL << 44) | // IMG_FORMAT_32_FLOAT 6510 (1ULL << 56) | // RESOURCE_LEVEL = 1 6511 (3ULL << 60); // OOB_SELECT = 3 6512 } 6513 6514 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT; 6515 if (ST.isAmdHsaOS()) { 6516 // Set ATC = 1. GFX9 doesn't have this bit. 6517 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) 6518 RsrcDataFormat |= (1ULL << 56); 6519 6520 // Set MTYPE = 2 (MTYPE_UC = uncached). GFX9 doesn't have this. 6521 // BTW, it disables TC L2 and therefore decreases performance. 6522 if (ST.getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS) 6523 RsrcDataFormat |= (2ULL << 59); 6524 } 6525 6526 return RsrcDataFormat; 6527 } 6528 6529 uint64_t SIInstrInfo::getScratchRsrcWords23() const { 6530 uint64_t Rsrc23 = getDefaultRsrcDataFormat() | 6531 AMDGPU::RSRC_TID_ENABLE | 6532 0xffffffff; // Size; 6533 6534 // GFX9 doesn't have ELEMENT_SIZE. 6535 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 6536 uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize()) - 1; 6537 Rsrc23 |= EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT; 6538 } 6539 6540 // IndexStride = 64 / 32. 6541 uint64_t IndexStride = ST.getWavefrontSize() == 64 ? 3 : 2; 6542 Rsrc23 |= IndexStride << AMDGPU::RSRC_INDEX_STRIDE_SHIFT; 6543 6544 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17]. 6545 // Clear them unless we want a huge stride. 6546 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS && 6547 ST.getGeneration() <= AMDGPUSubtarget::GFX9) 6548 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT; 6549 6550 return Rsrc23; 6551 } 6552 6553 bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const { 6554 unsigned Opc = MI.getOpcode(); 6555 6556 return isSMRD(Opc); 6557 } 6558 6559 bool SIInstrInfo::isHighLatencyDef(int Opc) const { 6560 return get(Opc).mayLoad() && 6561 (isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc) || isFLAT(Opc)); 6562 } 6563 6564 unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI, 6565 int &FrameIndex) const { 6566 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 6567 if (!Addr || !Addr->isFI()) 6568 return AMDGPU::NoRegister; 6569 6570 assert(!MI.memoperands_empty() && 6571 (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS); 6572 6573 FrameIndex = Addr->getIndex(); 6574 return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg(); 6575 } 6576 6577 unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI, 6578 int &FrameIndex) const { 6579 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr); 6580 assert(Addr && Addr->isFI()); 6581 FrameIndex = Addr->getIndex(); 6582 return getNamedOperand(MI, AMDGPU::OpName::data)->getReg(); 6583 } 6584 6585 unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 6586 int &FrameIndex) const { 6587 if (!MI.mayLoad()) 6588 return AMDGPU::NoRegister; 6589 6590 if (isMUBUF(MI) || isVGPRSpill(MI)) 6591 return isStackAccess(MI, FrameIndex); 6592 6593 if (isSGPRSpill(MI)) 6594 return isSGPRStackAccess(MI, FrameIndex); 6595 6596 return AMDGPU::NoRegister; 6597 } 6598 6599 unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 6600 int &FrameIndex) const { 6601 if (!MI.mayStore()) 6602 return AMDGPU::NoRegister; 6603 6604 if (isMUBUF(MI) || isVGPRSpill(MI)) 6605 return isStackAccess(MI, FrameIndex); 6606 6607 if (isSGPRSpill(MI)) 6608 return isSGPRStackAccess(MI, FrameIndex); 6609 6610 return AMDGPU::NoRegister; 6611 } 6612 6613 unsigned SIInstrInfo::getInstBundleSize(const MachineInstr &MI) const { 6614 unsigned Size = 0; 6615 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 6616 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 6617 while (++I != E && I->isInsideBundle()) { 6618 assert(!I->isBundle() && "No nested bundle!"); 6619 Size += getInstSizeInBytes(*I); 6620 } 6621 6622 return Size; 6623 } 6624 6625 unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { 6626 unsigned Opc = MI.getOpcode(); 6627 const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc); 6628 unsigned DescSize = Desc.getSize(); 6629 6630 // If we have a definitive size, we can use it. Otherwise we need to inspect 6631 // the operands to know the size. 6632 if (isFixedSize(MI)) { 6633 unsigned Size = DescSize; 6634 6635 // If we hit the buggy offset, an extra nop will be inserted in MC so 6636 // estimate the worst case. 6637 if (MI.isBranch() && ST.hasOffset3fBug()) 6638 Size += 4; 6639 6640 return Size; 6641 } 6642 6643 // 4-byte instructions may have a 32-bit literal encoded after them. Check 6644 // operands that coud ever be literals. 6645 if (isVALU(MI) || isSALU(MI)) { 6646 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 6647 if (Src0Idx == -1) 6648 return DescSize; // No operands. 6649 6650 if (isLiteralConstantLike(MI.getOperand(Src0Idx), Desc.OpInfo[Src0Idx])) 6651 return isVOP3(MI) ? 12 : (DescSize + 4); 6652 6653 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 6654 if (Src1Idx == -1) 6655 return DescSize; 6656 6657 if (isLiteralConstantLike(MI.getOperand(Src1Idx), Desc.OpInfo[Src1Idx])) 6658 return isVOP3(MI) ? 12 : (DescSize + 4); 6659 6660 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 6661 if (Src2Idx == -1) 6662 return DescSize; 6663 6664 if (isLiteralConstantLike(MI.getOperand(Src2Idx), Desc.OpInfo[Src2Idx])) 6665 return isVOP3(MI) ? 12 : (DescSize + 4); 6666 6667 return DescSize; 6668 } 6669 6670 // Check whether we have extra NSA words. 6671 if (isMIMG(MI)) { 6672 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); 6673 if (VAddr0Idx < 0) 6674 return 8; 6675 6676 int RSrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); 6677 return 8 + 4 * ((RSrcIdx - VAddr0Idx + 2) / 4); 6678 } 6679 6680 switch (Opc) { 6681 case TargetOpcode::IMPLICIT_DEF: 6682 case TargetOpcode::KILL: 6683 case TargetOpcode::DBG_VALUE: 6684 case TargetOpcode::EH_LABEL: 6685 return 0; 6686 case TargetOpcode::BUNDLE: 6687 return getInstBundleSize(MI); 6688 case TargetOpcode::INLINEASM: 6689 case TargetOpcode::INLINEASM_BR: { 6690 const MachineFunction *MF = MI.getParent()->getParent(); 6691 const char *AsmStr = MI.getOperand(0).getSymbolName(); 6692 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo(), &ST); 6693 } 6694 default: 6695 return DescSize; 6696 } 6697 } 6698 6699 bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const { 6700 if (!isFLAT(MI)) 6701 return false; 6702 6703 if (MI.memoperands_empty()) 6704 return true; 6705 6706 for (const MachineMemOperand *MMO : MI.memoperands()) { 6707 if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS) 6708 return true; 6709 } 6710 return false; 6711 } 6712 6713 bool SIInstrInfo::isNonUniformBranchInstr(MachineInstr &Branch) const { 6714 return Branch.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO; 6715 } 6716 6717 void SIInstrInfo::convertNonUniformIfRegion(MachineBasicBlock *IfEntry, 6718 MachineBasicBlock *IfEnd) const { 6719 MachineBasicBlock::iterator TI = IfEntry->getFirstTerminator(); 6720 assert(TI != IfEntry->end()); 6721 6722 MachineInstr *Branch = &(*TI); 6723 MachineFunction *MF = IfEntry->getParent(); 6724 MachineRegisterInfo &MRI = IfEntry->getParent()->getRegInfo(); 6725 6726 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 6727 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 6728 MachineInstr *SIIF = 6729 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_IF), DstReg) 6730 .add(Branch->getOperand(0)) 6731 .add(Branch->getOperand(1)); 6732 MachineInstr *SIEND = 6733 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_END_CF)) 6734 .addReg(DstReg); 6735 6736 IfEntry->erase(TI); 6737 IfEntry->insert(IfEntry->end(), SIIF); 6738 IfEnd->insert(IfEnd->getFirstNonPHI(), SIEND); 6739 } 6740 } 6741 6742 void SIInstrInfo::convertNonUniformLoopRegion( 6743 MachineBasicBlock *LoopEntry, MachineBasicBlock *LoopEnd) const { 6744 MachineBasicBlock::iterator TI = LoopEnd->getFirstTerminator(); 6745 // We expect 2 terminators, one conditional and one unconditional. 6746 assert(TI != LoopEnd->end()); 6747 6748 MachineInstr *Branch = &(*TI); 6749 MachineFunction *MF = LoopEnd->getParent(); 6750 MachineRegisterInfo &MRI = LoopEnd->getParent()->getRegInfo(); 6751 6752 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 6753 6754 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 6755 Register BackEdgeReg = MRI.createVirtualRegister(RI.getBoolRC()); 6756 MachineInstrBuilder HeaderPHIBuilder = 6757 BuildMI(*(MF), Branch->getDebugLoc(), get(TargetOpcode::PHI), DstReg); 6758 for (MachineBasicBlock::pred_iterator PI = LoopEntry->pred_begin(), 6759 E = LoopEntry->pred_end(); 6760 PI != E; ++PI) { 6761 if (*PI == LoopEnd) { 6762 HeaderPHIBuilder.addReg(BackEdgeReg); 6763 } else { 6764 MachineBasicBlock *PMBB = *PI; 6765 Register ZeroReg = MRI.createVirtualRegister(RI.getBoolRC()); 6766 materializeImmediate(*PMBB, PMBB->getFirstTerminator(), DebugLoc(), 6767 ZeroReg, 0); 6768 HeaderPHIBuilder.addReg(ZeroReg); 6769 } 6770 HeaderPHIBuilder.addMBB(*PI); 6771 } 6772 MachineInstr *HeaderPhi = HeaderPHIBuilder; 6773 MachineInstr *SIIFBREAK = BuildMI(*(MF), Branch->getDebugLoc(), 6774 get(AMDGPU::SI_IF_BREAK), BackEdgeReg) 6775 .addReg(DstReg) 6776 .add(Branch->getOperand(0)); 6777 MachineInstr *SILOOP = 6778 BuildMI(*(MF), Branch->getDebugLoc(), get(AMDGPU::SI_LOOP)) 6779 .addReg(BackEdgeReg) 6780 .addMBB(LoopEntry); 6781 6782 LoopEntry->insert(LoopEntry->begin(), HeaderPhi); 6783 LoopEnd->erase(TI); 6784 LoopEnd->insert(LoopEnd->end(), SIIFBREAK); 6785 LoopEnd->insert(LoopEnd->end(), SILOOP); 6786 } 6787 } 6788 6789 ArrayRef<std::pair<int, const char *>> 6790 SIInstrInfo::getSerializableTargetIndices() const { 6791 static const std::pair<int, const char *> TargetIndices[] = { 6792 {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"}, 6793 {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"}, 6794 {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"}, 6795 {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"}, 6796 {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}}; 6797 return makeArrayRef(TargetIndices); 6798 } 6799 6800 /// This is used by the post-RA scheduler (SchedulePostRAList.cpp). The 6801 /// post-RA version of misched uses CreateTargetMIHazardRecognizer. 6802 ScheduleHazardRecognizer * 6803 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 6804 const ScheduleDAG *DAG) const { 6805 return new GCNHazardRecognizer(DAG->MF); 6806 } 6807 6808 /// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer 6809 /// pass. 6810 ScheduleHazardRecognizer * 6811 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const { 6812 return new GCNHazardRecognizer(MF); 6813 } 6814 6815 std::pair<unsigned, unsigned> 6816 SIInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { 6817 return std::make_pair(TF & MO_MASK, TF & ~MO_MASK); 6818 } 6819 6820 ArrayRef<std::pair<unsigned, const char *>> 6821 SIInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { 6822 static const std::pair<unsigned, const char *> TargetFlags[] = { 6823 { MO_GOTPCREL, "amdgpu-gotprel" }, 6824 { MO_GOTPCREL32_LO, "amdgpu-gotprel32-lo" }, 6825 { MO_GOTPCREL32_HI, "amdgpu-gotprel32-hi" }, 6826 { MO_REL32_LO, "amdgpu-rel32-lo" }, 6827 { MO_REL32_HI, "amdgpu-rel32-hi" }, 6828 { MO_ABS32_LO, "amdgpu-abs32-lo" }, 6829 { MO_ABS32_HI, "amdgpu-abs32-hi" }, 6830 }; 6831 6832 return makeArrayRef(TargetFlags); 6833 } 6834 6835 bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const { 6836 return !MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY && 6837 MI.modifiesRegister(AMDGPU::EXEC, &RI); 6838 } 6839 6840 MachineInstrBuilder 6841 SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 6842 MachineBasicBlock::iterator I, 6843 const DebugLoc &DL, 6844 Register DestReg) const { 6845 if (ST.hasAddNoCarry()) 6846 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg); 6847 6848 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6849 Register UnusedCarry = MRI.createVirtualRegister(RI.getBoolRC()); 6850 MRI.setRegAllocationHint(UnusedCarry, 0, RI.getVCC()); 6851 6852 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_CO_U32_e64), DestReg) 6853 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 6854 } 6855 6856 MachineInstrBuilder SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 6857 MachineBasicBlock::iterator I, 6858 const DebugLoc &DL, 6859 Register DestReg, 6860 RegScavenger &RS) const { 6861 if (ST.hasAddNoCarry()) 6862 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e32), DestReg); 6863 6864 // If available, prefer to use vcc. 6865 Register UnusedCarry = !RS.isRegUsed(AMDGPU::VCC) 6866 ? Register(RI.getVCC()) 6867 : RS.scavengeRegister(RI.getBoolRC(), I, 0, false); 6868 6869 // TODO: Users need to deal with this. 6870 if (!UnusedCarry.isValid()) 6871 return MachineInstrBuilder(); 6872 6873 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_CO_U32_e64), DestReg) 6874 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 6875 } 6876 6877 bool SIInstrInfo::isKillTerminator(unsigned Opcode) { 6878 switch (Opcode) { 6879 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 6880 case AMDGPU::SI_KILL_I1_TERMINATOR: 6881 return true; 6882 default: 6883 return false; 6884 } 6885 } 6886 6887 const MCInstrDesc &SIInstrInfo::getKillTerminatorFromPseudo(unsigned Opcode) const { 6888 switch (Opcode) { 6889 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: 6890 return get(AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR); 6891 case AMDGPU::SI_KILL_I1_PSEUDO: 6892 return get(AMDGPU::SI_KILL_I1_TERMINATOR); 6893 default: 6894 llvm_unreachable("invalid opcode, expected SI_KILL_*_PSEUDO"); 6895 } 6896 } 6897 6898 void SIInstrInfo::fixImplicitOperands(MachineInstr &MI) const { 6899 if (!ST.isWave32()) 6900 return; 6901 6902 for (auto &Op : MI.implicit_operands()) { 6903 if (Op.isReg() && Op.getReg() == AMDGPU::VCC) 6904 Op.setReg(AMDGPU::VCC_LO); 6905 } 6906 } 6907 6908 bool SIInstrInfo::isBufferSMRD(const MachineInstr &MI) const { 6909 if (!isSMRD(MI)) 6910 return false; 6911 6912 // Check that it is using a buffer resource. 6913 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sbase); 6914 if (Idx == -1) // e.g. s_memtime 6915 return false; 6916 6917 const auto RCID = MI.getDesc().OpInfo[Idx].RegClass; 6918 return RI.getRegClass(RCID)->hasSubClassEq(&AMDGPU::SGPR_128RegClass); 6919 } 6920 6921 unsigned SIInstrInfo::getNumFlatOffsetBits(bool Signed) const { 6922 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) 6923 return Signed ? 12 : 11; 6924 6925 return Signed ? 13 : 12; 6926 } 6927 6928 bool SIInstrInfo::isLegalFLATOffset(int64_t Offset, unsigned AddrSpace, 6929 bool Signed) const { 6930 // TODO: Should 0 be special cased? 6931 if (!ST.hasFlatInstOffsets()) 6932 return false; 6933 6934 if (ST.hasFlatSegmentOffsetBug() && AddrSpace == AMDGPUAS::FLAT_ADDRESS) 6935 return false; 6936 6937 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) 6938 return Signed ? isInt<12>(Offset) : isUInt<11>(Offset); 6939 6940 return Signed ? isInt<13>(Offset) :isUInt<12>(Offset); 6941 } 6942 6943 6944 // This must be kept in sync with the SIEncodingFamily class in SIInstrInfo.td 6945 enum SIEncodingFamily { 6946 SI = 0, 6947 VI = 1, 6948 SDWA = 2, 6949 SDWA9 = 3, 6950 GFX80 = 4, 6951 GFX9 = 5, 6952 GFX10 = 6, 6953 SDWA10 = 7 6954 }; 6955 6956 static SIEncodingFamily subtargetEncodingFamily(const GCNSubtarget &ST) { 6957 switch (ST.getGeneration()) { 6958 default: 6959 break; 6960 case AMDGPUSubtarget::SOUTHERN_ISLANDS: 6961 case AMDGPUSubtarget::SEA_ISLANDS: 6962 return SIEncodingFamily::SI; 6963 case AMDGPUSubtarget::VOLCANIC_ISLANDS: 6964 case AMDGPUSubtarget::GFX9: 6965 return SIEncodingFamily::VI; 6966 case AMDGPUSubtarget::GFX10: 6967 return SIEncodingFamily::GFX10; 6968 } 6969 llvm_unreachable("Unknown subtarget generation!"); 6970 } 6971 6972 bool SIInstrInfo::isAsmOnlyOpcode(int MCOp) const { 6973 switch(MCOp) { 6974 // These opcodes use indirect register addressing so 6975 // they need special handling by codegen (currently missing). 6976 // Therefore it is too risky to allow these opcodes 6977 // to be selected by dpp combiner or sdwa peepholer. 6978 case AMDGPU::V_MOVRELS_B32_dpp_gfx10: 6979 case AMDGPU::V_MOVRELS_B32_sdwa_gfx10: 6980 case AMDGPU::V_MOVRELD_B32_dpp_gfx10: 6981 case AMDGPU::V_MOVRELD_B32_sdwa_gfx10: 6982 case AMDGPU::V_MOVRELSD_B32_dpp_gfx10: 6983 case AMDGPU::V_MOVRELSD_B32_sdwa_gfx10: 6984 case AMDGPU::V_MOVRELSD_2_B32_dpp_gfx10: 6985 case AMDGPU::V_MOVRELSD_2_B32_sdwa_gfx10: 6986 return true; 6987 default: 6988 return false; 6989 } 6990 } 6991 6992 int SIInstrInfo::pseudoToMCOpcode(int Opcode) const { 6993 SIEncodingFamily Gen = subtargetEncodingFamily(ST); 6994 6995 if ((get(Opcode).TSFlags & SIInstrFlags::renamedInGFX9) != 0 && 6996 ST.getGeneration() == AMDGPUSubtarget::GFX9) 6997 Gen = SIEncodingFamily::GFX9; 6998 6999 // Adjust the encoding family to GFX80 for D16 buffer instructions when the 7000 // subtarget has UnpackedD16VMem feature. 7001 // TODO: remove this when we discard GFX80 encoding. 7002 if (ST.hasUnpackedD16VMem() && (get(Opcode).TSFlags & SIInstrFlags::D16Buf)) 7003 Gen = SIEncodingFamily::GFX80; 7004 7005 if (get(Opcode).TSFlags & SIInstrFlags::SDWA) { 7006 switch (ST.getGeneration()) { 7007 default: 7008 Gen = SIEncodingFamily::SDWA; 7009 break; 7010 case AMDGPUSubtarget::GFX9: 7011 Gen = SIEncodingFamily::SDWA9; 7012 break; 7013 case AMDGPUSubtarget::GFX10: 7014 Gen = SIEncodingFamily::SDWA10; 7015 break; 7016 } 7017 } 7018 7019 int MCOp = AMDGPU::getMCOpcode(Opcode, Gen); 7020 7021 // -1 means that Opcode is already a native instruction. 7022 if (MCOp == -1) 7023 return Opcode; 7024 7025 // (uint16_t)-1 means that Opcode is a pseudo instruction that has 7026 // no encoding in the given subtarget generation. 7027 if (MCOp == (uint16_t)-1) 7028 return -1; 7029 7030 if (isAsmOnlyOpcode(MCOp)) 7031 return -1; 7032 7033 return MCOp; 7034 } 7035 7036 static 7037 TargetInstrInfo::RegSubRegPair getRegOrUndef(const MachineOperand &RegOpnd) { 7038 assert(RegOpnd.isReg()); 7039 return RegOpnd.isUndef() ? TargetInstrInfo::RegSubRegPair() : 7040 getRegSubRegPair(RegOpnd); 7041 } 7042 7043 TargetInstrInfo::RegSubRegPair 7044 llvm::getRegSequenceSubReg(MachineInstr &MI, unsigned SubReg) { 7045 assert(MI.isRegSequence()); 7046 for (unsigned I = 0, E = (MI.getNumOperands() - 1)/ 2; I < E; ++I) 7047 if (MI.getOperand(1 + 2 * I + 1).getImm() == SubReg) { 7048 auto &RegOp = MI.getOperand(1 + 2 * I); 7049 return getRegOrUndef(RegOp); 7050 } 7051 return TargetInstrInfo::RegSubRegPair(); 7052 } 7053 7054 // Try to find the definition of reg:subreg in subreg-manipulation pseudos 7055 // Following a subreg of reg:subreg isn't supported 7056 static bool followSubRegDef(MachineInstr &MI, 7057 TargetInstrInfo::RegSubRegPair &RSR) { 7058 if (!RSR.SubReg) 7059 return false; 7060 switch (MI.getOpcode()) { 7061 default: break; 7062 case AMDGPU::REG_SEQUENCE: 7063 RSR = getRegSequenceSubReg(MI, RSR.SubReg); 7064 return true; 7065 // EXTRACT_SUBREG ins't supported as this would follow a subreg of subreg 7066 case AMDGPU::INSERT_SUBREG: 7067 if (RSR.SubReg == (unsigned)MI.getOperand(3).getImm()) 7068 // inserted the subreg we're looking for 7069 RSR = getRegOrUndef(MI.getOperand(2)); 7070 else { // the subreg in the rest of the reg 7071 auto R1 = getRegOrUndef(MI.getOperand(1)); 7072 if (R1.SubReg) // subreg of subreg isn't supported 7073 return false; 7074 RSR.Reg = R1.Reg; 7075 } 7076 return true; 7077 } 7078 return false; 7079 } 7080 7081 MachineInstr *llvm::getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P, 7082 MachineRegisterInfo &MRI) { 7083 assert(MRI.isSSA()); 7084 if (!P.Reg.isVirtual()) 7085 return nullptr; 7086 7087 auto RSR = P; 7088 auto *DefInst = MRI.getVRegDef(RSR.Reg); 7089 while (auto *MI = DefInst) { 7090 DefInst = nullptr; 7091 switch (MI->getOpcode()) { 7092 case AMDGPU::COPY: 7093 case AMDGPU::V_MOV_B32_e32: { 7094 auto &Op1 = MI->getOperand(1); 7095 if (Op1.isReg() && Op1.getReg().isVirtual()) { 7096 if (Op1.isUndef()) 7097 return nullptr; 7098 RSR = getRegSubRegPair(Op1); 7099 DefInst = MRI.getVRegDef(RSR.Reg); 7100 } 7101 break; 7102 } 7103 default: 7104 if (followSubRegDef(*MI, RSR)) { 7105 if (!RSR.Reg) 7106 return nullptr; 7107 DefInst = MRI.getVRegDef(RSR.Reg); 7108 } 7109 } 7110 if (!DefInst) 7111 return MI; 7112 } 7113 return nullptr; 7114 } 7115 7116 bool llvm::execMayBeModifiedBeforeUse(const MachineRegisterInfo &MRI, 7117 Register VReg, 7118 const MachineInstr &DefMI, 7119 const MachineInstr &UseMI) { 7120 assert(MRI.isSSA() && "Must be run on SSA"); 7121 7122 auto *TRI = MRI.getTargetRegisterInfo(); 7123 auto *DefBB = DefMI.getParent(); 7124 7125 // Don't bother searching between blocks, although it is possible this block 7126 // doesn't modify exec. 7127 if (UseMI.getParent() != DefBB) 7128 return true; 7129 7130 const int MaxInstScan = 20; 7131 int NumInst = 0; 7132 7133 // Stop scan at the use. 7134 auto E = UseMI.getIterator(); 7135 for (auto I = std::next(DefMI.getIterator()); I != E; ++I) { 7136 if (I->isDebugInstr()) 7137 continue; 7138 7139 if (++NumInst > MaxInstScan) 7140 return true; 7141 7142 if (I->modifiesRegister(AMDGPU::EXEC, TRI)) 7143 return true; 7144 } 7145 7146 return false; 7147 } 7148 7149 bool llvm::execMayBeModifiedBeforeAnyUse(const MachineRegisterInfo &MRI, 7150 Register VReg, 7151 const MachineInstr &DefMI) { 7152 assert(MRI.isSSA() && "Must be run on SSA"); 7153 7154 auto *TRI = MRI.getTargetRegisterInfo(); 7155 auto *DefBB = DefMI.getParent(); 7156 7157 const int MaxUseScan = 10; 7158 int NumUse = 0; 7159 7160 for (auto &Use : MRI.use_nodbg_operands(VReg)) { 7161 auto &UseInst = *Use.getParent(); 7162 // Don't bother searching between blocks, although it is possible this block 7163 // doesn't modify exec. 7164 if (UseInst.getParent() != DefBB) 7165 return true; 7166 7167 if (++NumUse > MaxUseScan) 7168 return true; 7169 } 7170 7171 if (NumUse == 0) 7172 return false; 7173 7174 const int MaxInstScan = 20; 7175 int NumInst = 0; 7176 7177 // Stop scan when we have seen all the uses. 7178 for (auto I = std::next(DefMI.getIterator()); ; ++I) { 7179 assert(I != DefBB->end()); 7180 7181 if (I->isDebugInstr()) 7182 continue; 7183 7184 if (++NumInst > MaxInstScan) 7185 return true; 7186 7187 for (const MachineOperand &Op : I->operands()) { 7188 // We don't check reg masks here as they're used only on calls: 7189 // 1. EXEC is only considered const within one BB 7190 // 2. Call should be a terminator instruction if present in a BB 7191 7192 if (!Op.isReg()) 7193 continue; 7194 7195 Register Reg = Op.getReg(); 7196 if (Op.isUse()) { 7197 if (Reg == VReg && --NumUse == 0) 7198 return false; 7199 } else if (TRI->regsOverlap(Reg, AMDGPU::EXEC)) 7200 return true; 7201 } 7202 } 7203 } 7204 7205 MachineInstr *SIInstrInfo::createPHIDestinationCopy( 7206 MachineBasicBlock &MBB, MachineBasicBlock::iterator LastPHIIt, 7207 const DebugLoc &DL, Register Src, Register Dst) const { 7208 auto Cur = MBB.begin(); 7209 if (Cur != MBB.end()) 7210 do { 7211 if (!Cur->isPHI() && Cur->readsRegister(Dst)) 7212 return BuildMI(MBB, Cur, DL, get(TargetOpcode::COPY), Dst).addReg(Src); 7213 ++Cur; 7214 } while (Cur != MBB.end() && Cur != LastPHIIt); 7215 7216 return TargetInstrInfo::createPHIDestinationCopy(MBB, LastPHIIt, DL, Src, 7217 Dst); 7218 } 7219 7220 MachineInstr *SIInstrInfo::createPHISourceCopy( 7221 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, 7222 const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const { 7223 if (InsPt != MBB.end() && 7224 (InsPt->getOpcode() == AMDGPU::SI_IF || 7225 InsPt->getOpcode() == AMDGPU::SI_ELSE || 7226 InsPt->getOpcode() == AMDGPU::SI_IF_BREAK) && 7227 InsPt->definesRegister(Src)) { 7228 InsPt++; 7229 return BuildMI(MBB, InsPt, DL, 7230 get(ST.isWave32() ? AMDGPU::S_MOV_B32_term 7231 : AMDGPU::S_MOV_B64_term), 7232 Dst) 7233 .addReg(Src, 0, SrcSubReg) 7234 .addReg(AMDGPU::EXEC, RegState::Implicit); 7235 } 7236 return TargetInstrInfo::createPHISourceCopy(MBB, InsPt, DL, Src, SrcSubReg, 7237 Dst); 7238 } 7239 7240 bool llvm::SIInstrInfo::isWave32() const { return ST.isWave32(); } 7241 7242 MachineInstr *SIInstrInfo::foldMemoryOperandImpl( 7243 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, 7244 MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS, 7245 VirtRegMap *VRM) const { 7246 // This is a bit of a hack (copied from AArch64). Consider this instruction: 7247 // 7248 // %0:sreg_32 = COPY $m0 7249 // 7250 // We explicitly chose SReg_32 for the virtual register so such a copy might 7251 // be eliminated by RegisterCoalescer. However, that may not be possible, and 7252 // %0 may even spill. We can't spill $m0 normally (it would require copying to 7253 // a numbered SGPR anyway), and since it is in the SReg_32 register class, 7254 // TargetInstrInfo::foldMemoryOperand() is going to try. 7255 // A similar issue also exists with spilling and reloading $exec registers. 7256 // 7257 // To prevent that, constrain the %0 register class here. 7258 if (MI.isFullCopy()) { 7259 Register DstReg = MI.getOperand(0).getReg(); 7260 Register SrcReg = MI.getOperand(1).getReg(); 7261 if ((DstReg.isVirtual() || SrcReg.isVirtual()) && 7262 (DstReg.isVirtual() != SrcReg.isVirtual())) { 7263 MachineRegisterInfo &MRI = MF.getRegInfo(); 7264 Register VirtReg = DstReg.isVirtual() ? DstReg : SrcReg; 7265 const TargetRegisterClass *RC = MRI.getRegClass(VirtReg); 7266 if (RC->hasSuperClassEq(&AMDGPU::SReg_32RegClass)) { 7267 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 7268 return nullptr; 7269 } else if (RC->hasSuperClassEq(&AMDGPU::SReg_64RegClass)) { 7270 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_64_XEXECRegClass); 7271 return nullptr; 7272 } 7273 } 7274 } 7275 7276 return nullptr; 7277 } 7278 7279 unsigned SIInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 7280 const MachineInstr &MI, 7281 unsigned *PredCost) const { 7282 if (MI.isBundle()) { 7283 MachineBasicBlock::const_instr_iterator I(MI.getIterator()); 7284 MachineBasicBlock::const_instr_iterator E(MI.getParent()->instr_end()); 7285 unsigned Lat = 0, Count = 0; 7286 for (++I; I != E && I->isBundledWithPred(); ++I) { 7287 ++Count; 7288 Lat = std::max(Lat, SchedModel.computeInstrLatency(&*I)); 7289 } 7290 return Lat + Count - 1; 7291 } 7292 7293 return SchedModel.computeInstrLatency(&MI); 7294 } 7295 7296 unsigned SIInstrInfo::getDSShaderTypeValue(const MachineFunction &MF) { 7297 switch (MF.getFunction().getCallingConv()) { 7298 case CallingConv::AMDGPU_PS: 7299 return 1; 7300 case CallingConv::AMDGPU_VS: 7301 return 2; 7302 case CallingConv::AMDGPU_GS: 7303 return 3; 7304 case CallingConv::AMDGPU_HS: 7305 case CallingConv::AMDGPU_LS: 7306 case CallingConv::AMDGPU_ES: 7307 report_fatal_error("ds_ordered_count unsupported for this calling conv"); 7308 case CallingConv::AMDGPU_CS: 7309 case CallingConv::AMDGPU_KERNEL: 7310 case CallingConv::C: 7311 case CallingConv::Fast: 7312 default: 7313 // Assume other calling conventions are various compute callable functions 7314 return 0; 7315 } 7316 } 7317