1 //===- SIInstrInfo.cpp - SI Instruction Information ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// SI Implementation of TargetInstrInfo. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SIInstrInfo.h" 15 #include "AMDGPU.h" 16 #include "AMDGPUSubtarget.h" 17 #include "GCNHazardRecognizer.h" 18 #include "SIDefines.h" 19 #include "SIMachineFunctionInfo.h" 20 #include "SIRegisterInfo.h" 21 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 22 #include "Utils/AMDGPUBaseInfo.h" 23 #include "llvm/ADT/APInt.h" 24 #include "llvm/ADT/ArrayRef.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/ADT/iterator_range.h" 28 #include "llvm/Analysis/AliasAnalysis.h" 29 #include "llvm/Analysis/MemoryLocation.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/CodeGen/MachineBasicBlock.h" 32 #include "llvm/CodeGen/MachineDominators.h" 33 #include "llvm/CodeGen/MachineFrameInfo.h" 34 #include "llvm/CodeGen/MachineFunction.h" 35 #include "llvm/CodeGen/MachineInstr.h" 36 #include "llvm/CodeGen/MachineInstrBuilder.h" 37 #include "llvm/CodeGen/MachineInstrBundle.h" 38 #include "llvm/CodeGen/MachineMemOperand.h" 39 #include "llvm/CodeGen/MachineOperand.h" 40 #include "llvm/CodeGen/MachineRegisterInfo.h" 41 #include "llvm/CodeGen/RegisterScavenging.h" 42 #include "llvm/CodeGen/ScheduleDAG.h" 43 #include "llvm/CodeGen/SelectionDAGNodes.h" 44 #include "llvm/CodeGen/TargetOpcodes.h" 45 #include "llvm/CodeGen/TargetRegisterInfo.h" 46 #include "llvm/IR/DebugLoc.h" 47 #include "llvm/IR/DiagnosticInfo.h" 48 #include "llvm/IR/Function.h" 49 #include "llvm/IR/InlineAsm.h" 50 #include "llvm/IR/LLVMContext.h" 51 #include "llvm/MC/MCInstrDesc.h" 52 #include "llvm/Support/Casting.h" 53 #include "llvm/Support/CommandLine.h" 54 #include "llvm/Support/Compiler.h" 55 #include "llvm/Support/ErrorHandling.h" 56 #include "llvm/Support/MachineValueType.h" 57 #include "llvm/Support/MathExtras.h" 58 #include "llvm/Target/TargetMachine.h" 59 #include <cassert> 60 #include <cstdint> 61 #include <iterator> 62 #include <utility> 63 64 using namespace llvm; 65 66 #define DEBUG_TYPE "si-instr-info" 67 68 #define GET_INSTRINFO_CTOR_DTOR 69 #include "AMDGPUGenInstrInfo.inc" 70 71 namespace llvm { 72 namespace AMDGPU { 73 #define GET_D16ImageDimIntrinsics_IMPL 74 #define GET_ImageDimIntrinsicTable_IMPL 75 #define GET_RsrcIntrinsics_IMPL 76 #include "AMDGPUGenSearchableTables.inc" 77 } 78 } 79 80 81 // Must be at least 4 to be able to branch over minimum unconditional branch 82 // code. This is only for making it possible to write reasonably small tests for 83 // long branches. 84 static cl::opt<unsigned> 85 BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16), 86 cl::desc("Restrict range of branch instructions (DEBUG)")); 87 88 static cl::opt<bool> Fix16BitCopies( 89 "amdgpu-fix-16-bit-physreg-copies", 90 cl::desc("Fix copies between 32 and 16 bit registers by extending to 32 bit"), 91 cl::init(true), 92 cl::ReallyHidden); 93 94 SIInstrInfo::SIInstrInfo(const GCNSubtarget &ST) 95 : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN), 96 RI(ST), ST(ST) { 97 SchedModel.init(&ST); 98 } 99 100 //===----------------------------------------------------------------------===// 101 // TargetInstrInfo callbacks 102 //===----------------------------------------------------------------------===// 103 104 static unsigned getNumOperandsNoGlue(SDNode *Node) { 105 unsigned N = Node->getNumOperands(); 106 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) 107 --N; 108 return N; 109 } 110 111 /// Returns true if both nodes have the same value for the given 112 /// operand \p Op, or if both nodes do not have this operand. 113 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { 114 unsigned Opc0 = N0->getMachineOpcode(); 115 unsigned Opc1 = N1->getMachineOpcode(); 116 117 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); 118 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); 119 120 if (Op0Idx == -1 && Op1Idx == -1) 121 return true; 122 123 124 if ((Op0Idx == -1 && Op1Idx != -1) || 125 (Op1Idx == -1 && Op0Idx != -1)) 126 return false; 127 128 // getNamedOperandIdx returns the index for the MachineInstr's operands, 129 // which includes the result as the first operand. We are indexing into the 130 // MachineSDNode's operands, so we need to skip the result operand to get 131 // the real index. 132 --Op0Idx; 133 --Op1Idx; 134 135 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx); 136 } 137 138 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, 139 AliasAnalysis *AA) const { 140 // TODO: The generic check fails for VALU instructions that should be 141 // rematerializable due to implicit reads of exec. We really want all of the 142 // generic logic for this except for this. 143 switch (MI.getOpcode()) { 144 case AMDGPU::V_MOV_B32_e32: 145 case AMDGPU::V_MOV_B32_e64: 146 case AMDGPU::V_MOV_B64_PSEUDO: 147 case AMDGPU::V_ACCVGPR_READ_B32: 148 case AMDGPU::V_ACCVGPR_WRITE_B32: 149 // No implicit operands. 150 return MI.getNumOperands() == MI.getDesc().getNumOperands(); 151 default: 152 return false; 153 } 154 } 155 156 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, 157 int64_t &Offset0, 158 int64_t &Offset1) const { 159 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode()) 160 return false; 161 162 unsigned Opc0 = Load0->getMachineOpcode(); 163 unsigned Opc1 = Load1->getMachineOpcode(); 164 165 // Make sure both are actually loads. 166 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad()) 167 return false; 168 169 if (isDS(Opc0) && isDS(Opc1)) { 170 171 // FIXME: Handle this case: 172 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1)) 173 return false; 174 175 // Check base reg. 176 if (Load0->getOperand(0) != Load1->getOperand(0)) 177 return false; 178 179 // Skip read2 / write2 variants for simplicity. 180 // TODO: We should report true if the used offsets are adjacent (excluded 181 // st64 versions). 182 int Offset0Idx = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 183 int Offset1Idx = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 184 if (Offset0Idx == -1 || Offset1Idx == -1) 185 return false; 186 187 // XXX - be careful of datalesss loads 188 // getNamedOperandIdx returns the index for MachineInstrs. Since they 189 // include the output in the operand list, but SDNodes don't, we need to 190 // subtract the index by one. 191 Offset0Idx -= get(Opc0).NumDefs; 192 Offset1Idx -= get(Opc1).NumDefs; 193 Offset0 = cast<ConstantSDNode>(Load0->getOperand(Offset0Idx))->getZExtValue(); 194 Offset1 = cast<ConstantSDNode>(Load1->getOperand(Offset1Idx))->getZExtValue(); 195 return true; 196 } 197 198 if (isSMRD(Opc0) && isSMRD(Opc1)) { 199 // Skip time and cache invalidation instructions. 200 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::sbase) == -1 || 201 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::sbase) == -1) 202 return false; 203 204 assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1)); 205 206 // Check base reg. 207 if (Load0->getOperand(0) != Load1->getOperand(0)) 208 return false; 209 210 const ConstantSDNode *Load0Offset = 211 dyn_cast<ConstantSDNode>(Load0->getOperand(1)); 212 const ConstantSDNode *Load1Offset = 213 dyn_cast<ConstantSDNode>(Load1->getOperand(1)); 214 215 if (!Load0Offset || !Load1Offset) 216 return false; 217 218 Offset0 = Load0Offset->getZExtValue(); 219 Offset1 = Load1Offset->getZExtValue(); 220 return true; 221 } 222 223 // MUBUF and MTBUF can access the same addresses. 224 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) { 225 226 // MUBUF and MTBUF have vaddr at different indices. 227 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || 228 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || 229 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) 230 return false; 231 232 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 233 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 234 235 if (OffIdx0 == -1 || OffIdx1 == -1) 236 return false; 237 238 // getNamedOperandIdx returns the index for MachineInstrs. Since they 239 // include the output in the operand list, but SDNodes don't, we need to 240 // subtract the index by one. 241 OffIdx0 -= get(Opc0).NumDefs; 242 OffIdx1 -= get(Opc1).NumDefs; 243 244 SDValue Off0 = Load0->getOperand(OffIdx0); 245 SDValue Off1 = Load1->getOperand(OffIdx1); 246 247 // The offset might be a FrameIndexSDNode. 248 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1)) 249 return false; 250 251 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue(); 252 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue(); 253 return true; 254 } 255 256 return false; 257 } 258 259 static bool isStride64(unsigned Opc) { 260 switch (Opc) { 261 case AMDGPU::DS_READ2ST64_B32: 262 case AMDGPU::DS_READ2ST64_B64: 263 case AMDGPU::DS_WRITE2ST64_B32: 264 case AMDGPU::DS_WRITE2ST64_B64: 265 return true; 266 default: 267 return false; 268 } 269 } 270 271 bool SIInstrInfo::getMemOperandsWithOffsetWidth( 272 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps, 273 int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, 274 const TargetRegisterInfo *TRI) const { 275 if (!LdSt.mayLoadOrStore()) 276 return false; 277 278 unsigned Opc = LdSt.getOpcode(); 279 OffsetIsScalable = false; 280 const MachineOperand *BaseOp, *OffsetOp; 281 int DataOpIdx; 282 283 if (isDS(LdSt)) { 284 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr); 285 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset); 286 if (OffsetOp) { 287 // Normal, single offset LDS instruction. 288 if (!BaseOp) { 289 // DS_CONSUME/DS_APPEND use M0 for the base address. 290 // TODO: find the implicit use operand for M0 and use that as BaseOp? 291 return false; 292 } 293 BaseOps.push_back(BaseOp); 294 Offset = OffsetOp->getImm(); 295 // Get appropriate operand, and compute width accordingly. 296 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 297 if (DataOpIdx == -1) 298 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 299 Width = getOpSize(LdSt, DataOpIdx); 300 } else { 301 // The 2 offset instructions use offset0 and offset1 instead. We can treat 302 // these as a load with a single offset if the 2 offsets are consecutive. 303 // We will use this for some partially aligned loads. 304 const MachineOperand *Offset0Op = 305 getNamedOperand(LdSt, AMDGPU::OpName::offset0); 306 const MachineOperand *Offset1Op = 307 getNamedOperand(LdSt, AMDGPU::OpName::offset1); 308 309 unsigned Offset0 = Offset0Op->getImm(); 310 unsigned Offset1 = Offset1Op->getImm(); 311 if (Offset0 + 1 != Offset1) 312 return false; 313 314 // Each of these offsets is in element sized units, so we need to convert 315 // to bytes of the individual reads. 316 317 unsigned EltSize; 318 if (LdSt.mayLoad()) 319 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16; 320 else { 321 assert(LdSt.mayStore()); 322 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 323 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8; 324 } 325 326 if (isStride64(Opc)) 327 EltSize *= 64; 328 329 BaseOps.push_back(BaseOp); 330 Offset = EltSize * Offset0; 331 // Get appropriate operand(s), and compute width accordingly. 332 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 333 if (DataOpIdx == -1) { 334 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 335 Width = getOpSize(LdSt, DataOpIdx); 336 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1); 337 Width += getOpSize(LdSt, DataOpIdx); 338 } else { 339 Width = getOpSize(LdSt, DataOpIdx); 340 } 341 } 342 return true; 343 } 344 345 if (isMUBUF(LdSt) || isMTBUF(LdSt)) { 346 const MachineOperand *SOffset = getNamedOperand(LdSt, AMDGPU::OpName::soffset); 347 if (SOffset && SOffset->isReg()) { 348 // We can only handle this if it's a stack access, as any other resource 349 // would require reporting multiple base registers. 350 const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 351 if (AddrReg && !AddrReg->isFI()) 352 return false; 353 354 const MachineOperand *RSrc = getNamedOperand(LdSt, AMDGPU::OpName::srsrc); 355 const SIMachineFunctionInfo *MFI 356 = LdSt.getParent()->getParent()->getInfo<SIMachineFunctionInfo>(); 357 if (RSrc->getReg() != MFI->getScratchRSrcReg()) 358 return false; 359 360 const MachineOperand *OffsetImm = 361 getNamedOperand(LdSt, AMDGPU::OpName::offset); 362 BaseOps.push_back(RSrc); 363 BaseOps.push_back(SOffset); 364 Offset = OffsetImm->getImm(); 365 } else { 366 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::srsrc); 367 if (!BaseOp) // e.g. BUFFER_WBINVL1_VOL 368 return false; 369 BaseOps.push_back(BaseOp); 370 371 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 372 if (BaseOp) 373 BaseOps.push_back(BaseOp); 374 375 const MachineOperand *OffsetImm = 376 getNamedOperand(LdSt, AMDGPU::OpName::offset); 377 Offset = OffsetImm->getImm(); 378 if (SOffset) // soffset can be an inline immediate. 379 Offset += SOffset->getImm(); 380 } 381 // Get appropriate operand, and compute width accordingly. 382 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 383 if (DataOpIdx == -1) 384 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 385 Width = getOpSize(LdSt, DataOpIdx); 386 return true; 387 } 388 389 if (isMIMG(LdSt)) { 390 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); 391 BaseOps.push_back(&LdSt.getOperand(SRsrcIdx)); 392 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); 393 if (VAddr0Idx >= 0) { 394 // GFX10 possible NSA encoding. 395 for (int I = VAddr0Idx; I < SRsrcIdx; ++I) 396 BaseOps.push_back(&LdSt.getOperand(I)); 397 } else { 398 BaseOps.push_back(getNamedOperand(LdSt, AMDGPU::OpName::vaddr)); 399 } 400 Offset = 0; 401 // Get appropriate operand, and compute width accordingly. 402 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 403 Width = getOpSize(LdSt, DataOpIdx); 404 return true; 405 } 406 407 if (isSMRD(LdSt)) { 408 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::sbase); 409 if (!BaseOp) // e.g. S_MEMTIME 410 return false; 411 BaseOps.push_back(BaseOp); 412 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset); 413 Offset = OffsetOp ? OffsetOp->getImm() : 0; 414 // Get appropriate operand, and compute width accordingly. 415 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::sdst); 416 Width = getOpSize(LdSt, DataOpIdx); 417 return true; 418 } 419 420 if (isFLAT(LdSt)) { 421 // Instructions have either vaddr or saddr or both. 422 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 423 if (BaseOp) 424 BaseOps.push_back(BaseOp); 425 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::saddr); 426 if (BaseOp) 427 BaseOps.push_back(BaseOp); 428 Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm(); 429 // Get appropriate operand, and compute width accordingly. 430 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 431 if (DataOpIdx == -1) 432 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 433 Width = getOpSize(LdSt, DataOpIdx); 434 return true; 435 } 436 437 return false; 438 } 439 440 static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, 441 ArrayRef<const MachineOperand *> BaseOps1, 442 const MachineInstr &MI2, 443 ArrayRef<const MachineOperand *> BaseOps2) { 444 // Only examine the first "base" operand of each instruction, on the 445 // assumption that it represents the real base address of the memory access. 446 // Other operands are typically offsets or indices from this base address. 447 if (BaseOps1.front()->isIdenticalTo(*BaseOps2.front())) 448 return true; 449 450 if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand()) 451 return false; 452 453 auto MO1 = *MI1.memoperands_begin(); 454 auto MO2 = *MI2.memoperands_begin(); 455 if (MO1->getAddrSpace() != MO2->getAddrSpace()) 456 return false; 457 458 auto Base1 = MO1->getValue(); 459 auto Base2 = MO2->getValue(); 460 if (!Base1 || !Base2) 461 return false; 462 Base1 = getUnderlyingObject(Base1); 463 Base2 = getUnderlyingObject(Base2); 464 465 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2)) 466 return false; 467 468 return Base1 == Base2; 469 } 470 471 bool SIInstrInfo::shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1, 472 ArrayRef<const MachineOperand *> BaseOps2, 473 unsigned NumLoads, 474 unsigned NumBytes) const { 475 // If the mem ops (to be clustered) do not have the same base ptr, then they 476 // should not be clustered 477 assert(!BaseOps1.empty() && !BaseOps2.empty()); 478 const MachineInstr &FirstLdSt = *BaseOps1.front()->getParent(); 479 const MachineInstr &SecondLdSt = *BaseOps2.front()->getParent(); 480 if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOps1, SecondLdSt, BaseOps2)) 481 return false; 482 483 // In order to avoid regester pressure, on an average, the number of DWORDS 484 // loaded together by all clustered mem ops should not exceed 8. This is an 485 // empirical value based on certain observations and performance related 486 // experiments. 487 // The good thing about this heuristic is - it avoids clustering of too many 488 // sub-word loads, and also avoids clustering of wide loads. Below is the 489 // brief summary of how the heuristic behaves for various `LoadSize`. 490 // (1) 1 <= LoadSize <= 4: cluster at max 8 mem ops 491 // (2) 5 <= LoadSize <= 8: cluster at max 4 mem ops 492 // (3) 9 <= LoadSize <= 12: cluster at max 2 mem ops 493 // (4) 13 <= LoadSize <= 16: cluster at max 2 mem ops 494 // (5) LoadSize >= 17: do not cluster 495 const unsigned LoadSize = NumBytes / NumLoads; 496 const unsigned NumDWORDs = ((LoadSize + 3) / 4) * NumLoads; 497 return NumDWORDs <= 8; 498 } 499 500 // FIXME: This behaves strangely. If, for example, you have 32 load + stores, 501 // the first 16 loads will be interleaved with the stores, and the next 16 will 502 // be clustered as expected. It should really split into 2 16 store batches. 503 // 504 // Loads are clustered until this returns false, rather than trying to schedule 505 // groups of stores. This also means we have to deal with saying different 506 // address space loads should be clustered, and ones which might cause bank 507 // conflicts. 508 // 509 // This might be deprecated so it might not be worth that much effort to fix. 510 bool SIInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, 511 int64_t Offset0, int64_t Offset1, 512 unsigned NumLoads) const { 513 assert(Offset1 > Offset0 && 514 "Second offset should be larger than first offset!"); 515 // If we have less than 16 loads in a row, and the offsets are within 64 516 // bytes, then schedule together. 517 518 // A cacheline is 64 bytes (for global memory). 519 return (NumLoads <= 16 && (Offset1 - Offset0) < 64); 520 } 521 522 static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB, 523 MachineBasicBlock::iterator MI, 524 const DebugLoc &DL, MCRegister DestReg, 525 MCRegister SrcReg, bool KillSrc, 526 const char *Msg = "illegal SGPR to VGPR copy") { 527 MachineFunction *MF = MBB.getParent(); 528 DiagnosticInfoUnsupported IllegalCopy(MF->getFunction(), Msg, DL, DS_Error); 529 LLVMContext &C = MF->getFunction().getContext(); 530 C.diagnose(IllegalCopy); 531 532 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg) 533 .addReg(SrcReg, getKillRegState(KillSrc)); 534 } 535 536 /// Handle copying from SGPR to AGPR, or from AGPR to AGPR. It is not possible 537 /// to directly copy, so an intermediate VGPR needs to be used. 538 static void indirectCopyToAGPR(const SIInstrInfo &TII, 539 MachineBasicBlock &MBB, 540 MachineBasicBlock::iterator MI, 541 const DebugLoc &DL, MCRegister DestReg, 542 MCRegister SrcReg, bool KillSrc, 543 RegScavenger &RS, 544 Register ImpDefSuperReg = Register(), 545 Register ImpUseSuperReg = Register()) { 546 const SIRegisterInfo &RI = TII.getRegisterInfo(); 547 548 assert(AMDGPU::SReg_32RegClass.contains(SrcReg) || 549 AMDGPU::AGPR_32RegClass.contains(SrcReg)); 550 551 // First try to find defining accvgpr_write to avoid temporary registers. 552 for (auto Def = MI, E = MBB.begin(); Def != E; ) { 553 --Def; 554 if (!Def->definesRegister(SrcReg, &RI)) 555 continue; 556 if (Def->getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32) 557 break; 558 559 MachineOperand &DefOp = Def->getOperand(1); 560 assert(DefOp.isReg() || DefOp.isImm()); 561 562 if (DefOp.isReg()) { 563 // Check that register source operand if not clobbered before MI. 564 // Immediate operands are always safe to propagate. 565 bool SafeToPropagate = true; 566 for (auto I = Def; I != MI && SafeToPropagate; ++I) 567 if (I->modifiesRegister(DefOp.getReg(), &RI)) 568 SafeToPropagate = false; 569 570 if (!SafeToPropagate) 571 break; 572 573 DefOp.setIsKill(false); 574 } 575 576 MachineInstrBuilder Builder = 577 BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32), DestReg) 578 .add(DefOp); 579 if (ImpDefSuperReg) 580 Builder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit); 581 582 if (ImpUseSuperReg) { 583 Builder.addReg(ImpUseSuperReg, 584 getKillRegState(KillSrc) | RegState::Implicit); 585 } 586 587 return; 588 } 589 590 RS.enterBasicBlock(MBB); 591 RS.forward(MI); 592 593 // Ideally we want to have three registers for a long reg_sequence copy 594 // to hide 2 waitstates between v_mov_b32 and accvgpr_write. 595 unsigned MaxVGPRs = RI.getRegPressureLimit(&AMDGPU::VGPR_32RegClass, 596 *MBB.getParent()); 597 598 // Registers in the sequence are allocated contiguously so we can just 599 // use register number to pick one of three round-robin temps. 600 unsigned RegNo = DestReg % 3; 601 Register Tmp = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0); 602 if (!Tmp) 603 report_fatal_error("Cannot scavenge VGPR to copy to AGPR"); 604 RS.setRegUsed(Tmp); 605 // Only loop through if there are any free registers left, otherwise 606 // scavenger may report a fatal error without emergency spill slot 607 // or spill with the slot. 608 while (RegNo-- && RS.FindUnusedReg(&AMDGPU::VGPR_32RegClass)) { 609 Register Tmp2 = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0); 610 if (!Tmp2 || RI.getHWRegIndex(Tmp2) >= MaxVGPRs) 611 break; 612 Tmp = Tmp2; 613 RS.setRegUsed(Tmp); 614 } 615 616 // Insert copy to temporary VGPR. 617 unsigned TmpCopyOp = AMDGPU::V_MOV_B32_e32; 618 if (AMDGPU::AGPR_32RegClass.contains(SrcReg)) { 619 TmpCopyOp = AMDGPU::V_ACCVGPR_READ_B32; 620 } else { 621 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 622 } 623 624 MachineInstrBuilder UseBuilder = BuildMI(MBB, MI, DL, TII.get(TmpCopyOp), Tmp) 625 .addReg(SrcReg, getKillRegState(KillSrc)); 626 if (ImpUseSuperReg) { 627 UseBuilder.addReg(ImpUseSuperReg, 628 getKillRegState(KillSrc) | RegState::Implicit); 629 } 630 631 MachineInstrBuilder DefBuilder 632 = BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32), DestReg) 633 .addReg(Tmp, RegState::Kill); 634 635 if (ImpDefSuperReg) 636 DefBuilder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit); 637 } 638 639 void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 640 MachineBasicBlock::iterator MI, 641 const DebugLoc &DL, MCRegister DestReg, 642 MCRegister SrcReg, bool KillSrc) const { 643 const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg); 644 645 // FIXME: This is hack to resolve copies between 16 bit and 32 bit 646 // registers until all patterns are fixed. 647 if (Fix16BitCopies && 648 ((RI.getRegSizeInBits(*RC) == 16) ^ 649 (RI.getRegSizeInBits(*RI.getPhysRegClass(SrcReg)) == 16))) { 650 MCRegister &RegToFix = (RI.getRegSizeInBits(*RC) == 16) ? DestReg : SrcReg; 651 MCRegister Super = RI.get32BitRegister(RegToFix); 652 assert(RI.getSubReg(Super, AMDGPU::lo16) == RegToFix); 653 RegToFix = Super; 654 655 if (DestReg == SrcReg) { 656 // Insert empty bundle since ExpandPostRA expects an instruction here. 657 BuildMI(MBB, MI, DL, get(AMDGPU::BUNDLE)); 658 return; 659 } 660 661 RC = RI.getPhysRegClass(DestReg); 662 } 663 664 if (RC == &AMDGPU::VGPR_32RegClass) { 665 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || 666 AMDGPU::SReg_32RegClass.contains(SrcReg) || 667 AMDGPU::AGPR_32RegClass.contains(SrcReg)); 668 unsigned Opc = AMDGPU::AGPR_32RegClass.contains(SrcReg) ? 669 AMDGPU::V_ACCVGPR_READ_B32 : AMDGPU::V_MOV_B32_e32; 670 BuildMI(MBB, MI, DL, get(Opc), DestReg) 671 .addReg(SrcReg, getKillRegState(KillSrc)); 672 return; 673 } 674 675 if (RC == &AMDGPU::SReg_32_XM0RegClass || 676 RC == &AMDGPU::SReg_32RegClass) { 677 if (SrcReg == AMDGPU::SCC) { 678 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg) 679 .addImm(1) 680 .addImm(0); 681 return; 682 } 683 684 if (DestReg == AMDGPU::VCC_LO) { 685 if (AMDGPU::SReg_32RegClass.contains(SrcReg)) { 686 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), AMDGPU::VCC_LO) 687 .addReg(SrcReg, getKillRegState(KillSrc)); 688 } else { 689 // FIXME: Hack until VReg_1 removed. 690 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 691 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 692 .addImm(0) 693 .addReg(SrcReg, getKillRegState(KillSrc)); 694 } 695 696 return; 697 } 698 699 if (!AMDGPU::SReg_32RegClass.contains(SrcReg)) { 700 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 701 return; 702 } 703 704 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 705 .addReg(SrcReg, getKillRegState(KillSrc)); 706 return; 707 } 708 709 if (RC == &AMDGPU::SReg_64RegClass) { 710 if (SrcReg == AMDGPU::SCC) { 711 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B64), DestReg) 712 .addImm(1) 713 .addImm(0); 714 return; 715 } 716 717 if (DestReg == AMDGPU::VCC) { 718 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 719 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC) 720 .addReg(SrcReg, getKillRegState(KillSrc)); 721 } else { 722 // FIXME: Hack until VReg_1 removed. 723 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 724 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 725 .addImm(0) 726 .addReg(SrcReg, getKillRegState(KillSrc)); 727 } 728 729 return; 730 } 731 732 if (!AMDGPU::SReg_64RegClass.contains(SrcReg)) { 733 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 734 return; 735 } 736 737 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 738 .addReg(SrcReg, getKillRegState(KillSrc)); 739 return; 740 } 741 742 if (DestReg == AMDGPU::SCC) { 743 // Copying 64-bit or 32-bit sources to SCC barely makes sense, 744 // but SelectionDAG emits such copies for i1 sources. 745 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 746 // This copy can only be produced by patterns 747 // with explicit SCC, which are known to be enabled 748 // only for subtargets with S_CMP_LG_U64 present. 749 assert(ST.hasScalarCompareEq64()); 750 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U64)) 751 .addReg(SrcReg, getKillRegState(KillSrc)) 752 .addImm(0); 753 } else { 754 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 755 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32)) 756 .addReg(SrcReg, getKillRegState(KillSrc)) 757 .addImm(0); 758 } 759 760 return; 761 } 762 763 764 if (RC == &AMDGPU::AGPR_32RegClass) { 765 if (AMDGPU::VGPR_32RegClass.contains(SrcReg)) { 766 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32), DestReg) 767 .addReg(SrcReg, getKillRegState(KillSrc)); 768 return; 769 } 770 771 // FIXME: Pass should maintain scavenger to avoid scan through the block on 772 // every AGPR spill. 773 RegScavenger RS; 774 indirectCopyToAGPR(*this, MBB, MI, DL, DestReg, SrcReg, KillSrc, RS); 775 return; 776 } 777 778 if (RI.getRegSizeInBits(*RC) == 16) { 779 assert(AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || 780 AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || 781 AMDGPU::SReg_LO16RegClass.contains(SrcReg) || 782 AMDGPU::AGPR_LO16RegClass.contains(SrcReg)); 783 784 bool IsSGPRDst = AMDGPU::SReg_LO16RegClass.contains(DestReg); 785 bool IsSGPRSrc = AMDGPU::SReg_LO16RegClass.contains(SrcReg); 786 bool IsAGPRDst = AMDGPU::AGPR_LO16RegClass.contains(DestReg); 787 bool IsAGPRSrc = AMDGPU::AGPR_LO16RegClass.contains(SrcReg); 788 bool DstLow = AMDGPU::VGPR_LO16RegClass.contains(DestReg) || 789 AMDGPU::SReg_LO16RegClass.contains(DestReg) || 790 AMDGPU::AGPR_LO16RegClass.contains(DestReg); 791 bool SrcLow = AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || 792 AMDGPU::SReg_LO16RegClass.contains(SrcReg) || 793 AMDGPU::AGPR_LO16RegClass.contains(SrcReg); 794 MCRegister NewDestReg = RI.get32BitRegister(DestReg); 795 MCRegister NewSrcReg = RI.get32BitRegister(SrcReg); 796 797 if (IsSGPRDst) { 798 if (!IsSGPRSrc) { 799 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 800 return; 801 } 802 803 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), NewDestReg) 804 .addReg(NewSrcReg, getKillRegState(KillSrc)); 805 return; 806 } 807 808 if (IsAGPRDst || IsAGPRSrc) { 809 if (!DstLow || !SrcLow) { 810 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc, 811 "Cannot use hi16 subreg with an AGPR!"); 812 } 813 814 copyPhysReg(MBB, MI, DL, NewDestReg, NewSrcReg, KillSrc); 815 return; 816 } 817 818 if (IsSGPRSrc && !ST.hasSDWAScalar()) { 819 if (!DstLow || !SrcLow) { 820 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc, 821 "Cannot use hi16 subreg on VI!"); 822 } 823 824 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), NewDestReg) 825 .addReg(NewSrcReg, getKillRegState(KillSrc)); 826 return; 827 } 828 829 auto MIB = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_sdwa), NewDestReg) 830 .addImm(0) // src0_modifiers 831 .addReg(NewSrcReg) 832 .addImm(0) // clamp 833 .addImm(DstLow ? AMDGPU::SDWA::SdwaSel::WORD_0 834 : AMDGPU::SDWA::SdwaSel::WORD_1) 835 .addImm(AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE) 836 .addImm(SrcLow ? AMDGPU::SDWA::SdwaSel::WORD_0 837 : AMDGPU::SDWA::SdwaSel::WORD_1) 838 .addReg(NewDestReg, RegState::Implicit | RegState::Undef); 839 // First implicit operand is $exec. 840 MIB->tieOperands(0, MIB->getNumOperands() - 1); 841 return; 842 } 843 844 unsigned EltSize = 4; 845 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 846 if (RI.isSGPRClass(RC)) { 847 // TODO: Copy vec3/vec5 with s_mov_b64s then final s_mov_b32. 848 if (!(RI.getRegSizeInBits(*RC) % 64)) { 849 Opcode = AMDGPU::S_MOV_B64; 850 EltSize = 8; 851 } else { 852 Opcode = AMDGPU::S_MOV_B32; 853 EltSize = 4; 854 } 855 856 if (!RI.isSGPRClass(RI.getPhysRegClass(SrcReg))) { 857 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 858 return; 859 } 860 } else if (RI.hasAGPRs(RC)) { 861 Opcode = RI.hasVGPRs(RI.getPhysRegClass(SrcReg)) ? 862 AMDGPU::V_ACCVGPR_WRITE_B32 : AMDGPU::INSTRUCTION_LIST_END; 863 } else if (RI.hasVGPRs(RC) && RI.hasAGPRs(RI.getPhysRegClass(SrcReg))) { 864 Opcode = AMDGPU::V_ACCVGPR_READ_B32; 865 } 866 867 // For the cases where we need an intermediate instruction/temporary register 868 // (the result is an SGPR, and the source is either an SGPR or AGPR), we need 869 // a scavenger. 870 // 871 // FIXME: The pass should maintain this for us so we don't have to re-scan the 872 // whole block for every handled copy. 873 std::unique_ptr<RegScavenger> RS; 874 if (Opcode == AMDGPU::INSTRUCTION_LIST_END) 875 RS.reset(new RegScavenger()); 876 877 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize); 878 bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg); 879 880 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 881 unsigned SubIdx; 882 if (Forward) 883 SubIdx = SubIndices[Idx]; 884 else 885 SubIdx = SubIndices[SubIndices.size() - Idx - 1]; 886 887 888 bool UseKill = KillSrc && Idx == SubIndices.size() - 1; 889 890 if (Opcode == AMDGPU::INSTRUCTION_LIST_END) { 891 Register ImpDefSuper = Idx == 0 ? Register(DestReg) : Register(); 892 Register ImpUseSuper = SrcReg; 893 indirectCopyToAGPR(*this, MBB, MI, DL, RI.getSubReg(DestReg, SubIdx), 894 RI.getSubReg(SrcReg, SubIdx), UseKill, *RS, 895 ImpDefSuper, ImpUseSuper); 896 } else { 897 MachineInstrBuilder Builder = 898 BuildMI(MBB, MI, DL, get(Opcode), RI.getSubReg(DestReg, SubIdx)) 899 .addReg(RI.getSubReg(SrcReg, SubIdx)); 900 if (Idx == 0) 901 Builder.addReg(DestReg, RegState::Define | RegState::Implicit); 902 903 Builder.addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit); 904 } 905 } 906 } 907 908 int SIInstrInfo::commuteOpcode(unsigned Opcode) const { 909 int NewOpc; 910 911 // Try to map original to commuted opcode 912 NewOpc = AMDGPU::getCommuteRev(Opcode); 913 if (NewOpc != -1) 914 // Check if the commuted (REV) opcode exists on the target. 915 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 916 917 // Try to map commuted to original opcode 918 NewOpc = AMDGPU::getCommuteOrig(Opcode); 919 if (NewOpc != -1) 920 // Check if the original (non-REV) opcode exists on the target. 921 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 922 923 return Opcode; 924 } 925 926 void SIInstrInfo::materializeImmediate(MachineBasicBlock &MBB, 927 MachineBasicBlock::iterator MI, 928 const DebugLoc &DL, unsigned DestReg, 929 int64_t Value) const { 930 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 931 const TargetRegisterClass *RegClass = MRI.getRegClass(DestReg); 932 if (RegClass == &AMDGPU::SReg_32RegClass || 933 RegClass == &AMDGPU::SGPR_32RegClass || 934 RegClass == &AMDGPU::SReg_32_XM0RegClass || 935 RegClass == &AMDGPU::SReg_32_XM0_XEXECRegClass) { 936 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 937 .addImm(Value); 938 return; 939 } 940 941 if (RegClass == &AMDGPU::SReg_64RegClass || 942 RegClass == &AMDGPU::SGPR_64RegClass || 943 RegClass == &AMDGPU::SReg_64_XEXECRegClass) { 944 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 945 .addImm(Value); 946 return; 947 } 948 949 if (RegClass == &AMDGPU::VGPR_32RegClass) { 950 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) 951 .addImm(Value); 952 return; 953 } 954 if (RegClass == &AMDGPU::VReg_64RegClass) { 955 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), DestReg) 956 .addImm(Value); 957 return; 958 } 959 960 unsigned EltSize = 4; 961 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 962 if (RI.isSGPRClass(RegClass)) { 963 if (RI.getRegSizeInBits(*RegClass) > 32) { 964 Opcode = AMDGPU::S_MOV_B64; 965 EltSize = 8; 966 } else { 967 Opcode = AMDGPU::S_MOV_B32; 968 EltSize = 4; 969 } 970 } 971 972 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RegClass, EltSize); 973 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 974 int64_t IdxValue = Idx == 0 ? Value : 0; 975 976 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 977 get(Opcode), RI.getSubReg(DestReg, SubIndices[Idx])); 978 Builder.addImm(IdxValue); 979 } 980 } 981 982 const TargetRegisterClass * 983 SIInstrInfo::getPreferredSelectRegClass(unsigned Size) const { 984 return &AMDGPU::VGPR_32RegClass; 985 } 986 987 void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB, 988 MachineBasicBlock::iterator I, 989 const DebugLoc &DL, Register DstReg, 990 ArrayRef<MachineOperand> Cond, 991 Register TrueReg, 992 Register FalseReg) const { 993 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 994 const TargetRegisterClass *BoolXExecRC = 995 RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 996 assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass && 997 "Not a VGPR32 reg"); 998 999 if (Cond.size() == 1) { 1000 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1001 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1002 .add(Cond[0]); 1003 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1004 .addImm(0) 1005 .addReg(FalseReg) 1006 .addImm(0) 1007 .addReg(TrueReg) 1008 .addReg(SReg); 1009 } else if (Cond.size() == 2) { 1010 assert(Cond[0].isImm() && "Cond[0] is not an immediate"); 1011 switch (Cond[0].getImm()) { 1012 case SIInstrInfo::SCC_TRUE: { 1013 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1014 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1015 : AMDGPU::S_CSELECT_B64), SReg) 1016 .addImm(1) 1017 .addImm(0); 1018 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1019 .addImm(0) 1020 .addReg(FalseReg) 1021 .addImm(0) 1022 .addReg(TrueReg) 1023 .addReg(SReg); 1024 break; 1025 } 1026 case SIInstrInfo::SCC_FALSE: { 1027 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1028 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1029 : AMDGPU::S_CSELECT_B64), SReg) 1030 .addImm(0) 1031 .addImm(1); 1032 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1033 .addImm(0) 1034 .addReg(FalseReg) 1035 .addImm(0) 1036 .addReg(TrueReg) 1037 .addReg(SReg); 1038 break; 1039 } 1040 case SIInstrInfo::VCCNZ: { 1041 MachineOperand RegOp = Cond[1]; 1042 RegOp.setImplicit(false); 1043 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1044 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1045 .add(RegOp); 1046 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1047 .addImm(0) 1048 .addReg(FalseReg) 1049 .addImm(0) 1050 .addReg(TrueReg) 1051 .addReg(SReg); 1052 break; 1053 } 1054 case SIInstrInfo::VCCZ: { 1055 MachineOperand RegOp = Cond[1]; 1056 RegOp.setImplicit(false); 1057 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1058 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1059 .add(RegOp); 1060 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1061 .addImm(0) 1062 .addReg(TrueReg) 1063 .addImm(0) 1064 .addReg(FalseReg) 1065 .addReg(SReg); 1066 break; 1067 } 1068 case SIInstrInfo::EXECNZ: { 1069 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1070 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 1071 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1072 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 1073 .addImm(0); 1074 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1075 : AMDGPU::S_CSELECT_B64), SReg) 1076 .addImm(1) 1077 .addImm(0); 1078 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1079 .addImm(0) 1080 .addReg(FalseReg) 1081 .addImm(0) 1082 .addReg(TrueReg) 1083 .addReg(SReg); 1084 break; 1085 } 1086 case SIInstrInfo::EXECZ: { 1087 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1088 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 1089 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1090 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 1091 .addImm(0); 1092 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1093 : AMDGPU::S_CSELECT_B64), SReg) 1094 .addImm(0) 1095 .addImm(1); 1096 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1097 .addImm(0) 1098 .addReg(FalseReg) 1099 .addImm(0) 1100 .addReg(TrueReg) 1101 .addReg(SReg); 1102 llvm_unreachable("Unhandled branch predicate EXECZ"); 1103 break; 1104 } 1105 default: 1106 llvm_unreachable("invalid branch predicate"); 1107 } 1108 } else { 1109 llvm_unreachable("Can only handle Cond size 1 or 2"); 1110 } 1111 } 1112 1113 Register SIInstrInfo::insertEQ(MachineBasicBlock *MBB, 1114 MachineBasicBlock::iterator I, 1115 const DebugLoc &DL, 1116 Register SrcReg, int Value) const { 1117 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1118 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 1119 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg) 1120 .addImm(Value) 1121 .addReg(SrcReg); 1122 1123 return Reg; 1124 } 1125 1126 Register SIInstrInfo::insertNE(MachineBasicBlock *MBB, 1127 MachineBasicBlock::iterator I, 1128 const DebugLoc &DL, 1129 Register SrcReg, int Value) const { 1130 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1131 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 1132 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg) 1133 .addImm(Value) 1134 .addReg(SrcReg); 1135 1136 return Reg; 1137 } 1138 1139 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const { 1140 1141 if (RI.hasAGPRs(DstRC)) 1142 return AMDGPU::COPY; 1143 if (RI.getRegSizeInBits(*DstRC) == 32) { 1144 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 1145 } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) { 1146 return AMDGPU::S_MOV_B64; 1147 } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) { 1148 return AMDGPU::V_MOV_B64_PSEUDO; 1149 } 1150 return AMDGPU::COPY; 1151 } 1152 1153 static unsigned getIndirectVGPRWritePseudoOpc(unsigned VecSize) { 1154 if (VecSize <= 32) // 4 bytes 1155 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V1; 1156 if (VecSize <= 64) // 8 bytes 1157 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V2; 1158 if (VecSize <= 96) // 12 bytes 1159 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V3; 1160 if (VecSize <= 128) // 16 bytes 1161 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V4; 1162 if (VecSize <= 160) // 20 bytes 1163 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V5; 1164 if (VecSize <= 256) // 32 bytes 1165 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V8; 1166 if (VecSize <= 512) // 64 bytes 1167 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V16; 1168 if (VecSize <= 1024) // 128 bytes 1169 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V32; 1170 1171 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1172 } 1173 1174 static unsigned getIndirectSGPRWritePseudo32(unsigned VecSize) { 1175 if (VecSize <= 32) // 4 bytes 1176 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V1; 1177 if (VecSize <= 64) // 8 bytes 1178 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V2; 1179 if (VecSize <= 96) // 12 bytes 1180 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V3; 1181 if (VecSize <= 128) // 16 bytes 1182 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V4; 1183 if (VecSize <= 160) // 20 bytes 1184 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V5; 1185 if (VecSize <= 256) // 32 bytes 1186 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V8; 1187 if (VecSize <= 512) // 64 bytes 1188 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V16; 1189 if (VecSize <= 1024) // 128 bytes 1190 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V32; 1191 1192 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1193 } 1194 1195 static unsigned getIndirectSGPRWritePseudo64(unsigned VecSize) { 1196 if (VecSize <= 64) // 8 bytes 1197 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V1; 1198 if (VecSize <= 128) // 16 bytes 1199 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V2; 1200 if (VecSize <= 256) // 32 bytes 1201 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V4; 1202 if (VecSize <= 512) // 64 bytes 1203 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V8; 1204 if (VecSize <= 1024) // 128 bytes 1205 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V16; 1206 1207 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1208 } 1209 1210 const MCInstrDesc &SIInstrInfo::getIndirectRegWritePseudo( 1211 unsigned VecSize, unsigned EltSize, bool IsSGPR) const { 1212 if (IsSGPR) { 1213 switch (EltSize) { 1214 case 32: 1215 return get(getIndirectSGPRWritePseudo32(VecSize)); 1216 case 64: 1217 return get(getIndirectSGPRWritePseudo64(VecSize)); 1218 default: 1219 llvm_unreachable("invalid reg indexing elt size"); 1220 } 1221 } 1222 1223 assert(EltSize == 32 && "invalid reg indexing elt size"); 1224 return get(getIndirectVGPRWritePseudoOpc(VecSize)); 1225 } 1226 1227 static unsigned getSGPRSpillSaveOpcode(unsigned Size) { 1228 switch (Size) { 1229 case 4: 1230 return AMDGPU::SI_SPILL_S32_SAVE; 1231 case 8: 1232 return AMDGPU::SI_SPILL_S64_SAVE; 1233 case 12: 1234 return AMDGPU::SI_SPILL_S96_SAVE; 1235 case 16: 1236 return AMDGPU::SI_SPILL_S128_SAVE; 1237 case 20: 1238 return AMDGPU::SI_SPILL_S160_SAVE; 1239 case 24: 1240 return AMDGPU::SI_SPILL_S192_SAVE; 1241 case 32: 1242 return AMDGPU::SI_SPILL_S256_SAVE; 1243 case 64: 1244 return AMDGPU::SI_SPILL_S512_SAVE; 1245 case 128: 1246 return AMDGPU::SI_SPILL_S1024_SAVE; 1247 default: 1248 llvm_unreachable("unknown register size"); 1249 } 1250 } 1251 1252 static unsigned getVGPRSpillSaveOpcode(unsigned Size) { 1253 switch (Size) { 1254 case 4: 1255 return AMDGPU::SI_SPILL_V32_SAVE; 1256 case 8: 1257 return AMDGPU::SI_SPILL_V64_SAVE; 1258 case 12: 1259 return AMDGPU::SI_SPILL_V96_SAVE; 1260 case 16: 1261 return AMDGPU::SI_SPILL_V128_SAVE; 1262 case 20: 1263 return AMDGPU::SI_SPILL_V160_SAVE; 1264 case 24: 1265 return AMDGPU::SI_SPILL_V192_SAVE; 1266 case 32: 1267 return AMDGPU::SI_SPILL_V256_SAVE; 1268 case 64: 1269 return AMDGPU::SI_SPILL_V512_SAVE; 1270 case 128: 1271 return AMDGPU::SI_SPILL_V1024_SAVE; 1272 default: 1273 llvm_unreachable("unknown register size"); 1274 } 1275 } 1276 1277 static unsigned getAGPRSpillSaveOpcode(unsigned Size) { 1278 switch (Size) { 1279 case 4: 1280 return AMDGPU::SI_SPILL_A32_SAVE; 1281 case 8: 1282 return AMDGPU::SI_SPILL_A64_SAVE; 1283 case 12: 1284 return AMDGPU::SI_SPILL_A96_SAVE; 1285 case 16: 1286 return AMDGPU::SI_SPILL_A128_SAVE; 1287 case 20: 1288 return AMDGPU::SI_SPILL_A160_SAVE; 1289 case 24: 1290 return AMDGPU::SI_SPILL_A192_SAVE; 1291 case 32: 1292 return AMDGPU::SI_SPILL_A256_SAVE; 1293 case 64: 1294 return AMDGPU::SI_SPILL_A512_SAVE; 1295 case 128: 1296 return AMDGPU::SI_SPILL_A1024_SAVE; 1297 default: 1298 llvm_unreachable("unknown register size"); 1299 } 1300 } 1301 1302 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 1303 MachineBasicBlock::iterator MI, 1304 Register SrcReg, bool isKill, 1305 int FrameIndex, 1306 const TargetRegisterClass *RC, 1307 const TargetRegisterInfo *TRI) const { 1308 MachineFunction *MF = MBB.getParent(); 1309 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1310 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 1311 const DebugLoc &DL = MBB.findDebugLoc(MI); 1312 1313 MachinePointerInfo PtrInfo 1314 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 1315 MachineMemOperand *MMO = MF->getMachineMemOperand( 1316 PtrInfo, MachineMemOperand::MOStore, FrameInfo.getObjectSize(FrameIndex), 1317 FrameInfo.getObjectAlign(FrameIndex)); 1318 unsigned SpillSize = TRI->getSpillSize(*RC); 1319 1320 if (RI.isSGPRClass(RC)) { 1321 MFI->setHasSpilledSGPRs(); 1322 assert(SrcReg != AMDGPU::M0 && "m0 should not be spilled"); 1323 assert(SrcReg != AMDGPU::EXEC_LO && SrcReg != AMDGPU::EXEC_HI && 1324 SrcReg != AMDGPU::EXEC && "exec should not be spilled"); 1325 1326 // We are only allowed to create one new instruction when spilling 1327 // registers, so we need to use pseudo instruction for spilling SGPRs. 1328 const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(SpillSize)); 1329 1330 // The SGPR spill/restore instructions only work on number sgprs, so we need 1331 // to make sure we are using the correct register class. 1332 if (SrcReg.isVirtual() && SpillSize == 4) { 1333 MachineRegisterInfo &MRI = MF->getRegInfo(); 1334 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 1335 } 1336 1337 BuildMI(MBB, MI, DL, OpDesc) 1338 .addReg(SrcReg, getKillRegState(isKill)) // data 1339 .addFrameIndex(FrameIndex) // addr 1340 .addMemOperand(MMO) 1341 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) 1342 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); 1343 // Add the scratch resource registers as implicit uses because we may end up 1344 // needing them, and need to ensure that the reserved registers are 1345 // correctly handled. 1346 if (RI.spillSGPRToVGPR()) 1347 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill); 1348 return; 1349 } 1350 1351 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillSaveOpcode(SpillSize) 1352 : getVGPRSpillSaveOpcode(SpillSize); 1353 MFI->setHasSpilledVGPRs(); 1354 1355 BuildMI(MBB, MI, DL, get(Opcode)) 1356 .addReg(SrcReg, getKillRegState(isKill)) // data 1357 .addFrameIndex(FrameIndex) // addr 1358 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 1359 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset 1360 .addImm(0) // offset 1361 .addMemOperand(MMO); 1362 } 1363 1364 static unsigned getSGPRSpillRestoreOpcode(unsigned Size) { 1365 switch (Size) { 1366 case 4: 1367 return AMDGPU::SI_SPILL_S32_RESTORE; 1368 case 8: 1369 return AMDGPU::SI_SPILL_S64_RESTORE; 1370 case 12: 1371 return AMDGPU::SI_SPILL_S96_RESTORE; 1372 case 16: 1373 return AMDGPU::SI_SPILL_S128_RESTORE; 1374 case 20: 1375 return AMDGPU::SI_SPILL_S160_RESTORE; 1376 case 24: 1377 return AMDGPU::SI_SPILL_S192_RESTORE; 1378 case 32: 1379 return AMDGPU::SI_SPILL_S256_RESTORE; 1380 case 64: 1381 return AMDGPU::SI_SPILL_S512_RESTORE; 1382 case 128: 1383 return AMDGPU::SI_SPILL_S1024_RESTORE; 1384 default: 1385 llvm_unreachable("unknown register size"); 1386 } 1387 } 1388 1389 static unsigned getVGPRSpillRestoreOpcode(unsigned Size) { 1390 switch (Size) { 1391 case 4: 1392 return AMDGPU::SI_SPILL_V32_RESTORE; 1393 case 8: 1394 return AMDGPU::SI_SPILL_V64_RESTORE; 1395 case 12: 1396 return AMDGPU::SI_SPILL_V96_RESTORE; 1397 case 16: 1398 return AMDGPU::SI_SPILL_V128_RESTORE; 1399 case 20: 1400 return AMDGPU::SI_SPILL_V160_RESTORE; 1401 case 24: 1402 return AMDGPU::SI_SPILL_V192_RESTORE; 1403 case 32: 1404 return AMDGPU::SI_SPILL_V256_RESTORE; 1405 case 64: 1406 return AMDGPU::SI_SPILL_V512_RESTORE; 1407 case 128: 1408 return AMDGPU::SI_SPILL_V1024_RESTORE; 1409 default: 1410 llvm_unreachable("unknown register size"); 1411 } 1412 } 1413 1414 static unsigned getAGPRSpillRestoreOpcode(unsigned Size) { 1415 switch (Size) { 1416 case 4: 1417 return AMDGPU::SI_SPILL_A32_RESTORE; 1418 case 8: 1419 return AMDGPU::SI_SPILL_A64_RESTORE; 1420 case 12: 1421 return AMDGPU::SI_SPILL_A96_RESTORE; 1422 case 16: 1423 return AMDGPU::SI_SPILL_A128_RESTORE; 1424 case 20: 1425 return AMDGPU::SI_SPILL_A160_RESTORE; 1426 case 24: 1427 return AMDGPU::SI_SPILL_A192_RESTORE; 1428 case 32: 1429 return AMDGPU::SI_SPILL_A256_RESTORE; 1430 case 64: 1431 return AMDGPU::SI_SPILL_A512_RESTORE; 1432 case 128: 1433 return AMDGPU::SI_SPILL_A1024_RESTORE; 1434 default: 1435 llvm_unreachable("unknown register size"); 1436 } 1437 } 1438 1439 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 1440 MachineBasicBlock::iterator MI, 1441 Register DestReg, int FrameIndex, 1442 const TargetRegisterClass *RC, 1443 const TargetRegisterInfo *TRI) const { 1444 MachineFunction *MF = MBB.getParent(); 1445 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1446 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 1447 const DebugLoc &DL = MBB.findDebugLoc(MI); 1448 unsigned SpillSize = TRI->getSpillSize(*RC); 1449 1450 MachinePointerInfo PtrInfo 1451 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 1452 1453 MachineMemOperand *MMO = MF->getMachineMemOperand( 1454 PtrInfo, MachineMemOperand::MOLoad, FrameInfo.getObjectSize(FrameIndex), 1455 FrameInfo.getObjectAlign(FrameIndex)); 1456 1457 if (RI.isSGPRClass(RC)) { 1458 MFI->setHasSpilledSGPRs(); 1459 assert(DestReg != AMDGPU::M0 && "m0 should not be reloaded into"); 1460 assert(DestReg != AMDGPU::EXEC_LO && DestReg != AMDGPU::EXEC_HI && 1461 DestReg != AMDGPU::EXEC && "exec should not be spilled"); 1462 1463 // FIXME: Maybe this should not include a memoperand because it will be 1464 // lowered to non-memory instructions. 1465 const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize)); 1466 if (DestReg.isVirtual() && SpillSize == 4) { 1467 MachineRegisterInfo &MRI = MF->getRegInfo(); 1468 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 1469 } 1470 1471 if (RI.spillSGPRToVGPR()) 1472 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill); 1473 BuildMI(MBB, MI, DL, OpDesc, DestReg) 1474 .addFrameIndex(FrameIndex) // addr 1475 .addMemOperand(MMO) 1476 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) 1477 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); 1478 return; 1479 } 1480 1481 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillRestoreOpcode(SpillSize) 1482 : getVGPRSpillRestoreOpcode(SpillSize); 1483 BuildMI(MBB, MI, DL, get(Opcode), DestReg) 1484 .addFrameIndex(FrameIndex) // vaddr 1485 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 1486 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset 1487 .addImm(0) // offset 1488 .addMemOperand(MMO); 1489 } 1490 1491 void SIInstrInfo::insertWaitStates(MachineBasicBlock &MBB, 1492 MachineBasicBlock::iterator MI, 1493 int Count) const { 1494 DebugLoc DL = MBB.findDebugLoc(MI); 1495 while (Count > 0) { 1496 int Arg; 1497 if (Count >= 8) 1498 Arg = 7; 1499 else 1500 Arg = Count - 1; 1501 Count -= 8; 1502 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP)) 1503 .addImm(Arg); 1504 } 1505 } 1506 1507 void SIInstrInfo::insertNoop(MachineBasicBlock &MBB, 1508 MachineBasicBlock::iterator MI) const { 1509 insertWaitStates(MBB, MI, 1); 1510 } 1511 1512 void SIInstrInfo::insertReturn(MachineBasicBlock &MBB) const { 1513 auto MF = MBB.getParent(); 1514 SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 1515 1516 assert(Info->isEntryFunction()); 1517 1518 if (MBB.succ_empty()) { 1519 bool HasNoTerminator = MBB.getFirstTerminator() == MBB.end(); 1520 if (HasNoTerminator) { 1521 if (Info->returnsVoid()) { 1522 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::S_ENDPGM)).addImm(0); 1523 } else { 1524 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::SI_RETURN_TO_EPILOG)); 1525 } 1526 } 1527 } 1528 } 1529 1530 unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) { 1531 switch (MI.getOpcode()) { 1532 default: return 1; // FIXME: Do wait states equal cycles? 1533 1534 case AMDGPU::S_NOP: 1535 return MI.getOperand(0).getImm() + 1; 1536 } 1537 } 1538 1539 bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 1540 MachineBasicBlock &MBB = *MI.getParent(); 1541 DebugLoc DL = MBB.findDebugLoc(MI); 1542 switch (MI.getOpcode()) { 1543 default: return TargetInstrInfo::expandPostRAPseudo(MI); 1544 case AMDGPU::S_MOV_B64_term: 1545 // This is only a terminator to get the correct spill code placement during 1546 // register allocation. 1547 MI.setDesc(get(AMDGPU::S_MOV_B64)); 1548 break; 1549 1550 case AMDGPU::S_MOV_B32_term: 1551 // This is only a terminator to get the correct spill code placement during 1552 // register allocation. 1553 MI.setDesc(get(AMDGPU::S_MOV_B32)); 1554 break; 1555 1556 case AMDGPU::S_XOR_B64_term: 1557 // This is only a terminator to get the correct spill code placement during 1558 // register allocation. 1559 MI.setDesc(get(AMDGPU::S_XOR_B64)); 1560 break; 1561 1562 case AMDGPU::S_XOR_B32_term: 1563 // This is only a terminator to get the correct spill code placement during 1564 // register allocation. 1565 MI.setDesc(get(AMDGPU::S_XOR_B32)); 1566 break; 1567 case AMDGPU::S_OR_B64_term: 1568 // This is only a terminator to get the correct spill code placement during 1569 // register allocation. 1570 MI.setDesc(get(AMDGPU::S_OR_B64)); 1571 break; 1572 case AMDGPU::S_OR_B32_term: 1573 // This is only a terminator to get the correct spill code placement during 1574 // register allocation. 1575 MI.setDesc(get(AMDGPU::S_OR_B32)); 1576 break; 1577 1578 case AMDGPU::S_ANDN2_B64_term: 1579 // This is only a terminator to get the correct spill code placement during 1580 // register allocation. 1581 MI.setDesc(get(AMDGPU::S_ANDN2_B64)); 1582 break; 1583 1584 case AMDGPU::S_ANDN2_B32_term: 1585 // This is only a terminator to get the correct spill code placement during 1586 // register allocation. 1587 MI.setDesc(get(AMDGPU::S_ANDN2_B32)); 1588 break; 1589 1590 case AMDGPU::V_MOV_B64_PSEUDO: { 1591 Register Dst = MI.getOperand(0).getReg(); 1592 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 1593 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 1594 1595 const MachineOperand &SrcOp = MI.getOperand(1); 1596 // FIXME: Will this work for 64-bit floating point immediates? 1597 assert(!SrcOp.isFPImm()); 1598 if (SrcOp.isImm()) { 1599 APInt Imm(64, SrcOp.getImm()); 1600 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1601 .addImm(Imm.getLoBits(32).getZExtValue()) 1602 .addReg(Dst, RegState::Implicit | RegState::Define); 1603 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1604 .addImm(Imm.getHiBits(32).getZExtValue()) 1605 .addReg(Dst, RegState::Implicit | RegState::Define); 1606 } else { 1607 assert(SrcOp.isReg()); 1608 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1609 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) 1610 .addReg(Dst, RegState::Implicit | RegState::Define); 1611 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1612 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) 1613 .addReg(Dst, RegState::Implicit | RegState::Define); 1614 } 1615 MI.eraseFromParent(); 1616 break; 1617 } 1618 case AMDGPU::V_MOV_B64_DPP_PSEUDO: { 1619 expandMovDPP64(MI); 1620 break; 1621 } 1622 case AMDGPU::V_SET_INACTIVE_B32: { 1623 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; 1624 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1625 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1626 .addReg(Exec); 1627 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), MI.getOperand(0).getReg()) 1628 .add(MI.getOperand(2)); 1629 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1630 .addReg(Exec); 1631 MI.eraseFromParent(); 1632 break; 1633 } 1634 case AMDGPU::V_SET_INACTIVE_B64: { 1635 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; 1636 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1637 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1638 .addReg(Exec); 1639 MachineInstr *Copy = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), 1640 MI.getOperand(0).getReg()) 1641 .add(MI.getOperand(2)); 1642 expandPostRAPseudo(*Copy); 1643 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1644 .addReg(Exec); 1645 MI.eraseFromParent(); 1646 break; 1647 } 1648 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V1: 1649 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V2: 1650 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V3: 1651 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V4: 1652 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V5: 1653 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V8: 1654 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V16: 1655 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V32: 1656 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V1: 1657 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V2: 1658 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V3: 1659 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V4: 1660 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V5: 1661 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V8: 1662 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V16: 1663 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V32: 1664 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V1: 1665 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V2: 1666 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V4: 1667 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V8: 1668 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V16: { 1669 const TargetRegisterClass *EltRC = getOpRegClass(MI, 2); 1670 1671 unsigned Opc; 1672 if (RI.hasVGPRs(EltRC)) { 1673 Opc = ST.useVGPRIndexMode() ? 1674 AMDGPU::V_MOV_B32_indirect : AMDGPU::V_MOVRELD_B32_e32; 1675 } else { 1676 Opc = RI.getRegSizeInBits(*EltRC) == 64 ? 1677 AMDGPU::S_MOVRELD_B64 : AMDGPU::S_MOVRELD_B32; 1678 } 1679 1680 const MCInstrDesc &OpDesc = get(Opc); 1681 Register VecReg = MI.getOperand(0).getReg(); 1682 bool IsUndef = MI.getOperand(1).isUndef(); 1683 unsigned SubReg = MI.getOperand(3).getImm(); 1684 assert(VecReg == MI.getOperand(1).getReg()); 1685 1686 MachineInstrBuilder MIB = 1687 BuildMI(MBB, MI, DL, OpDesc) 1688 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 1689 .add(MI.getOperand(2)) 1690 .addReg(VecReg, RegState::ImplicitDefine) 1691 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0)); 1692 1693 const int ImpDefIdx = 1694 OpDesc.getNumOperands() + OpDesc.getNumImplicitUses(); 1695 const int ImpUseIdx = ImpDefIdx + 1; 1696 MIB->tieOperands(ImpDefIdx, ImpUseIdx); 1697 MI.eraseFromParent(); 1698 break; 1699 } 1700 case AMDGPU::SI_PC_ADD_REL_OFFSET: { 1701 MachineFunction &MF = *MBB.getParent(); 1702 Register Reg = MI.getOperand(0).getReg(); 1703 Register RegLo = RI.getSubReg(Reg, AMDGPU::sub0); 1704 Register RegHi = RI.getSubReg(Reg, AMDGPU::sub1); 1705 1706 // Create a bundle so these instructions won't be re-ordered by the 1707 // post-RA scheduler. 1708 MIBundleBuilder Bundler(MBB, MI); 1709 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg)); 1710 1711 // Add 32-bit offset from this instruction to the start of the 1712 // constant data. 1713 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo) 1714 .addReg(RegLo) 1715 .add(MI.getOperand(1))); 1716 1717 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi) 1718 .addReg(RegHi); 1719 MIB.add(MI.getOperand(2)); 1720 1721 Bundler.append(MIB); 1722 finalizeBundle(MBB, Bundler.begin()); 1723 1724 MI.eraseFromParent(); 1725 break; 1726 } 1727 case AMDGPU::ENTER_WWM: { 1728 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 1729 // WWM is entered. 1730 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1731 : AMDGPU::S_OR_SAVEEXEC_B64)); 1732 break; 1733 } 1734 case AMDGPU::EXIT_WWM: { 1735 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 1736 // WWM is exited. 1737 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64)); 1738 break; 1739 } 1740 } 1741 return true; 1742 } 1743 1744 std::pair<MachineInstr*, MachineInstr*> 1745 SIInstrInfo::expandMovDPP64(MachineInstr &MI) const { 1746 assert (MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO); 1747 1748 MachineBasicBlock &MBB = *MI.getParent(); 1749 DebugLoc DL = MBB.findDebugLoc(MI); 1750 MachineFunction *MF = MBB.getParent(); 1751 MachineRegisterInfo &MRI = MF->getRegInfo(); 1752 Register Dst = MI.getOperand(0).getReg(); 1753 unsigned Part = 0; 1754 MachineInstr *Split[2]; 1755 1756 1757 for (auto Sub : { AMDGPU::sub0, AMDGPU::sub1 }) { 1758 auto MovDPP = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_dpp)); 1759 if (Dst.isPhysical()) { 1760 MovDPP.addDef(RI.getSubReg(Dst, Sub)); 1761 } else { 1762 assert(MRI.isSSA()); 1763 auto Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1764 MovDPP.addDef(Tmp); 1765 } 1766 1767 for (unsigned I = 1; I <= 2; ++I) { // old and src operands. 1768 const MachineOperand &SrcOp = MI.getOperand(I); 1769 assert(!SrcOp.isFPImm()); 1770 if (SrcOp.isImm()) { 1771 APInt Imm(64, SrcOp.getImm()); 1772 Imm.ashrInPlace(Part * 32); 1773 MovDPP.addImm(Imm.getLoBits(32).getZExtValue()); 1774 } else { 1775 assert(SrcOp.isReg()); 1776 Register Src = SrcOp.getReg(); 1777 if (Src.isPhysical()) 1778 MovDPP.addReg(RI.getSubReg(Src, Sub)); 1779 else 1780 MovDPP.addReg(Src, SrcOp.isUndef() ? RegState::Undef : 0, Sub); 1781 } 1782 } 1783 1784 for (unsigned I = 3; I < MI.getNumExplicitOperands(); ++I) 1785 MovDPP.addImm(MI.getOperand(I).getImm()); 1786 1787 Split[Part] = MovDPP; 1788 ++Part; 1789 } 1790 1791 if (Dst.isVirtual()) 1792 BuildMI(MBB, MI, DL, get(AMDGPU::REG_SEQUENCE), Dst) 1793 .addReg(Split[0]->getOperand(0).getReg()) 1794 .addImm(AMDGPU::sub0) 1795 .addReg(Split[1]->getOperand(0).getReg()) 1796 .addImm(AMDGPU::sub1); 1797 1798 MI.eraseFromParent(); 1799 return std::make_pair(Split[0], Split[1]); 1800 } 1801 1802 bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI, 1803 MachineOperand &Src0, 1804 unsigned Src0OpName, 1805 MachineOperand &Src1, 1806 unsigned Src1OpName) const { 1807 MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName); 1808 if (!Src0Mods) 1809 return false; 1810 1811 MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName); 1812 assert(Src1Mods && 1813 "All commutable instructions have both src0 and src1 modifiers"); 1814 1815 int Src0ModsVal = Src0Mods->getImm(); 1816 int Src1ModsVal = Src1Mods->getImm(); 1817 1818 Src1Mods->setImm(Src0ModsVal); 1819 Src0Mods->setImm(Src1ModsVal); 1820 return true; 1821 } 1822 1823 static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI, 1824 MachineOperand &RegOp, 1825 MachineOperand &NonRegOp) { 1826 Register Reg = RegOp.getReg(); 1827 unsigned SubReg = RegOp.getSubReg(); 1828 bool IsKill = RegOp.isKill(); 1829 bool IsDead = RegOp.isDead(); 1830 bool IsUndef = RegOp.isUndef(); 1831 bool IsDebug = RegOp.isDebug(); 1832 1833 if (NonRegOp.isImm()) 1834 RegOp.ChangeToImmediate(NonRegOp.getImm()); 1835 else if (NonRegOp.isFI()) 1836 RegOp.ChangeToFrameIndex(NonRegOp.getIndex()); 1837 else if (NonRegOp.isGlobal()) { 1838 RegOp.ChangeToGA(NonRegOp.getGlobal(), NonRegOp.getOffset(), 1839 NonRegOp.getTargetFlags()); 1840 } else 1841 return nullptr; 1842 1843 // Make sure we don't reinterpret a subreg index in the target flags. 1844 RegOp.setTargetFlags(NonRegOp.getTargetFlags()); 1845 1846 NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug); 1847 NonRegOp.setSubReg(SubReg); 1848 1849 return &MI; 1850 } 1851 1852 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, 1853 unsigned Src0Idx, 1854 unsigned Src1Idx) const { 1855 assert(!NewMI && "this should never be used"); 1856 1857 unsigned Opc = MI.getOpcode(); 1858 int CommutedOpcode = commuteOpcode(Opc); 1859 if (CommutedOpcode == -1) 1860 return nullptr; 1861 1862 assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == 1863 static_cast<int>(Src0Idx) && 1864 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == 1865 static_cast<int>(Src1Idx) && 1866 "inconsistency with findCommutedOpIndices"); 1867 1868 MachineOperand &Src0 = MI.getOperand(Src0Idx); 1869 MachineOperand &Src1 = MI.getOperand(Src1Idx); 1870 1871 MachineInstr *CommutedMI = nullptr; 1872 if (Src0.isReg() && Src1.isReg()) { 1873 if (isOperandLegal(MI, Src1Idx, &Src0)) { 1874 // Be sure to copy the source modifiers to the right place. 1875 CommutedMI 1876 = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx); 1877 } 1878 1879 } else if (Src0.isReg() && !Src1.isReg()) { 1880 // src0 should always be able to support any operand type, so no need to 1881 // check operand legality. 1882 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1); 1883 } else if (!Src0.isReg() && Src1.isReg()) { 1884 if (isOperandLegal(MI, Src1Idx, &Src0)) 1885 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0); 1886 } else { 1887 // FIXME: Found two non registers to commute. This does happen. 1888 return nullptr; 1889 } 1890 1891 if (CommutedMI) { 1892 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers, 1893 Src1, AMDGPU::OpName::src1_modifiers); 1894 1895 CommutedMI->setDesc(get(CommutedOpcode)); 1896 } 1897 1898 return CommutedMI; 1899 } 1900 1901 // This needs to be implemented because the source modifiers may be inserted 1902 // between the true commutable operands, and the base 1903 // TargetInstrInfo::commuteInstruction uses it. 1904 bool SIInstrInfo::findCommutedOpIndices(const MachineInstr &MI, 1905 unsigned &SrcOpIdx0, 1906 unsigned &SrcOpIdx1) const { 1907 return findCommutedOpIndices(MI.getDesc(), SrcOpIdx0, SrcOpIdx1); 1908 } 1909 1910 bool SIInstrInfo::findCommutedOpIndices(MCInstrDesc Desc, unsigned &SrcOpIdx0, 1911 unsigned &SrcOpIdx1) const { 1912 if (!Desc.isCommutable()) 1913 return false; 1914 1915 unsigned Opc = Desc.getOpcode(); 1916 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 1917 if (Src0Idx == -1) 1918 return false; 1919 1920 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 1921 if (Src1Idx == -1) 1922 return false; 1923 1924 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx); 1925 } 1926 1927 bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp, 1928 int64_t BrOffset) const { 1929 // BranchRelaxation should never have to check s_setpc_b64 because its dest 1930 // block is unanalyzable. 1931 assert(BranchOp != AMDGPU::S_SETPC_B64); 1932 1933 // Convert to dwords. 1934 BrOffset /= 4; 1935 1936 // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is 1937 // from the next instruction. 1938 BrOffset -= 1; 1939 1940 return isIntN(BranchOffsetBits, BrOffset); 1941 } 1942 1943 MachineBasicBlock *SIInstrInfo::getBranchDestBlock( 1944 const MachineInstr &MI) const { 1945 if (MI.getOpcode() == AMDGPU::S_SETPC_B64) { 1946 // This would be a difficult analysis to perform, but can always be legal so 1947 // there's no need to analyze it. 1948 return nullptr; 1949 } 1950 1951 return MI.getOperand(0).getMBB(); 1952 } 1953 1954 unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB, 1955 MachineBasicBlock &DestBB, 1956 const DebugLoc &DL, 1957 int64_t BrOffset, 1958 RegScavenger *RS) const { 1959 assert(RS && "RegScavenger required for long branching"); 1960 assert(MBB.empty() && 1961 "new block should be inserted for expanding unconditional branch"); 1962 assert(MBB.pred_size() == 1); 1963 1964 MachineFunction *MF = MBB.getParent(); 1965 MachineRegisterInfo &MRI = MF->getRegInfo(); 1966 1967 // FIXME: Virtual register workaround for RegScavenger not working with empty 1968 // blocks. 1969 Register PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1970 1971 auto I = MBB.end(); 1972 1973 // We need to compute the offset relative to the instruction immediately after 1974 // s_getpc_b64. Insert pc arithmetic code before last terminator. 1975 MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg); 1976 1977 // TODO: Handle > 32-bit block address. 1978 if (BrOffset >= 0) { 1979 BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32)) 1980 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 1981 .addReg(PCReg, 0, AMDGPU::sub0) 1982 .addMBB(&DestBB, MO_LONG_BRANCH_FORWARD); 1983 BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32)) 1984 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 1985 .addReg(PCReg, 0, AMDGPU::sub1) 1986 .addImm(0); 1987 } else { 1988 // Backwards branch. 1989 BuildMI(MBB, I, DL, get(AMDGPU::S_SUB_U32)) 1990 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 1991 .addReg(PCReg, 0, AMDGPU::sub0) 1992 .addMBB(&DestBB, MO_LONG_BRANCH_BACKWARD); 1993 BuildMI(MBB, I, DL, get(AMDGPU::S_SUBB_U32)) 1994 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 1995 .addReg(PCReg, 0, AMDGPU::sub1) 1996 .addImm(0); 1997 } 1998 1999 // Insert the indirect branch after the other terminator. 2000 BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64)) 2001 .addReg(PCReg); 2002 2003 // FIXME: If spilling is necessary, this will fail because this scavenger has 2004 // no emergency stack slots. It is non-trivial to spill in this situation, 2005 // because the restore code needs to be specially placed after the 2006 // jump. BranchRelaxation then needs to be made aware of the newly inserted 2007 // block. 2008 // 2009 // If a spill is needed for the pc register pair, we need to insert a spill 2010 // restore block right before the destination block, and insert a short branch 2011 // into the old destination block's fallthrough predecessor. 2012 // e.g.: 2013 // 2014 // s_cbranch_scc0 skip_long_branch: 2015 // 2016 // long_branch_bb: 2017 // spill s[8:9] 2018 // s_getpc_b64 s[8:9] 2019 // s_add_u32 s8, s8, restore_bb 2020 // s_addc_u32 s9, s9, 0 2021 // s_setpc_b64 s[8:9] 2022 // 2023 // skip_long_branch: 2024 // foo; 2025 // 2026 // ..... 2027 // 2028 // dest_bb_fallthrough_predecessor: 2029 // bar; 2030 // s_branch dest_bb 2031 // 2032 // restore_bb: 2033 // restore s[8:9] 2034 // fallthrough dest_bb 2035 /// 2036 // dest_bb: 2037 // buzz; 2038 2039 RS->enterBasicBlockEnd(MBB); 2040 Register Scav = RS->scavengeRegisterBackwards( 2041 AMDGPU::SReg_64RegClass, 2042 MachineBasicBlock::iterator(GetPC), false, 0); 2043 MRI.replaceRegWith(PCReg, Scav); 2044 MRI.clearVirtRegs(); 2045 RS->setRegUsed(Scav); 2046 2047 return 4 + 8 + 4 + 4; 2048 } 2049 2050 unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) { 2051 switch (Cond) { 2052 case SIInstrInfo::SCC_TRUE: 2053 return AMDGPU::S_CBRANCH_SCC1; 2054 case SIInstrInfo::SCC_FALSE: 2055 return AMDGPU::S_CBRANCH_SCC0; 2056 case SIInstrInfo::VCCNZ: 2057 return AMDGPU::S_CBRANCH_VCCNZ; 2058 case SIInstrInfo::VCCZ: 2059 return AMDGPU::S_CBRANCH_VCCZ; 2060 case SIInstrInfo::EXECNZ: 2061 return AMDGPU::S_CBRANCH_EXECNZ; 2062 case SIInstrInfo::EXECZ: 2063 return AMDGPU::S_CBRANCH_EXECZ; 2064 default: 2065 llvm_unreachable("invalid branch predicate"); 2066 } 2067 } 2068 2069 SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) { 2070 switch (Opcode) { 2071 case AMDGPU::S_CBRANCH_SCC0: 2072 return SCC_FALSE; 2073 case AMDGPU::S_CBRANCH_SCC1: 2074 return SCC_TRUE; 2075 case AMDGPU::S_CBRANCH_VCCNZ: 2076 return VCCNZ; 2077 case AMDGPU::S_CBRANCH_VCCZ: 2078 return VCCZ; 2079 case AMDGPU::S_CBRANCH_EXECNZ: 2080 return EXECNZ; 2081 case AMDGPU::S_CBRANCH_EXECZ: 2082 return EXECZ; 2083 default: 2084 return INVALID_BR; 2085 } 2086 } 2087 2088 bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB, 2089 MachineBasicBlock::iterator I, 2090 MachineBasicBlock *&TBB, 2091 MachineBasicBlock *&FBB, 2092 SmallVectorImpl<MachineOperand> &Cond, 2093 bool AllowModify) const { 2094 if (I->getOpcode() == AMDGPU::S_BRANCH) { 2095 // Unconditional Branch 2096 TBB = I->getOperand(0).getMBB(); 2097 return false; 2098 } 2099 2100 MachineBasicBlock *CondBB = nullptr; 2101 2102 if (I->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 2103 CondBB = I->getOperand(1).getMBB(); 2104 Cond.push_back(I->getOperand(0)); 2105 } else { 2106 BranchPredicate Pred = getBranchPredicate(I->getOpcode()); 2107 if (Pred == INVALID_BR) 2108 return true; 2109 2110 CondBB = I->getOperand(0).getMBB(); 2111 Cond.push_back(MachineOperand::CreateImm(Pred)); 2112 Cond.push_back(I->getOperand(1)); // Save the branch register. 2113 } 2114 ++I; 2115 2116 if (I == MBB.end()) { 2117 // Conditional branch followed by fall-through. 2118 TBB = CondBB; 2119 return false; 2120 } 2121 2122 if (I->getOpcode() == AMDGPU::S_BRANCH) { 2123 TBB = CondBB; 2124 FBB = I->getOperand(0).getMBB(); 2125 return false; 2126 } 2127 2128 return true; 2129 } 2130 2131 bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 2132 MachineBasicBlock *&FBB, 2133 SmallVectorImpl<MachineOperand> &Cond, 2134 bool AllowModify) const { 2135 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 2136 auto E = MBB.end(); 2137 if (I == E) 2138 return false; 2139 2140 // Skip over the instructions that are artificially terminators for special 2141 // exec management. 2142 while (I != E && !I->isBranch() && !I->isReturn() && 2143 I->getOpcode() != AMDGPU::SI_MASK_BRANCH) { 2144 switch (I->getOpcode()) { 2145 case AMDGPU::SI_MASK_BRANCH: 2146 case AMDGPU::S_MOV_B64_term: 2147 case AMDGPU::S_XOR_B64_term: 2148 case AMDGPU::S_OR_B64_term: 2149 case AMDGPU::S_ANDN2_B64_term: 2150 case AMDGPU::S_MOV_B32_term: 2151 case AMDGPU::S_XOR_B32_term: 2152 case AMDGPU::S_OR_B32_term: 2153 case AMDGPU::S_ANDN2_B32_term: 2154 break; 2155 case AMDGPU::SI_IF: 2156 case AMDGPU::SI_ELSE: 2157 case AMDGPU::SI_KILL_I1_TERMINATOR: 2158 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 2159 // FIXME: It's messy that these need to be considered here at all. 2160 return true; 2161 default: 2162 llvm_unreachable("unexpected non-branch terminator inst"); 2163 } 2164 2165 ++I; 2166 } 2167 2168 if (I == E) 2169 return false; 2170 2171 if (I->getOpcode() != AMDGPU::SI_MASK_BRANCH) 2172 return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify); 2173 2174 ++I; 2175 2176 // TODO: Should be able to treat as fallthrough? 2177 if (I == MBB.end()) 2178 return true; 2179 2180 if (analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify)) 2181 return true; 2182 2183 MachineBasicBlock *MaskBrDest = I->getOperand(0).getMBB(); 2184 2185 // Specifically handle the case where the conditional branch is to the same 2186 // destination as the mask branch. e.g. 2187 // 2188 // si_mask_branch BB8 2189 // s_cbranch_execz BB8 2190 // s_cbranch BB9 2191 // 2192 // This is required to understand divergent loops which may need the branches 2193 // to be relaxed. 2194 if (TBB != MaskBrDest || Cond.empty()) 2195 return true; 2196 2197 auto Pred = Cond[0].getImm(); 2198 return (Pred != EXECZ && Pred != EXECNZ); 2199 } 2200 2201 unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB, 2202 int *BytesRemoved) const { 2203 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 2204 2205 unsigned Count = 0; 2206 unsigned RemovedSize = 0; 2207 while (I != MBB.end()) { 2208 MachineBasicBlock::iterator Next = std::next(I); 2209 if (I->getOpcode() == AMDGPU::SI_MASK_BRANCH) { 2210 I = Next; 2211 continue; 2212 } 2213 2214 RemovedSize += getInstSizeInBytes(*I); 2215 I->eraseFromParent(); 2216 ++Count; 2217 I = Next; 2218 } 2219 2220 if (BytesRemoved) 2221 *BytesRemoved = RemovedSize; 2222 2223 return Count; 2224 } 2225 2226 // Copy the flags onto the implicit condition register operand. 2227 static void preserveCondRegFlags(MachineOperand &CondReg, 2228 const MachineOperand &OrigCond) { 2229 CondReg.setIsUndef(OrigCond.isUndef()); 2230 CondReg.setIsKill(OrigCond.isKill()); 2231 } 2232 2233 unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB, 2234 MachineBasicBlock *TBB, 2235 MachineBasicBlock *FBB, 2236 ArrayRef<MachineOperand> Cond, 2237 const DebugLoc &DL, 2238 int *BytesAdded) const { 2239 if (!FBB && Cond.empty()) { 2240 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 2241 .addMBB(TBB); 2242 if (BytesAdded) 2243 *BytesAdded = 4; 2244 return 1; 2245 } 2246 2247 if(Cond.size() == 1 && Cond[0].isReg()) { 2248 BuildMI(&MBB, DL, get(AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO)) 2249 .add(Cond[0]) 2250 .addMBB(TBB); 2251 return 1; 2252 } 2253 2254 assert(TBB && Cond[0].isImm()); 2255 2256 unsigned Opcode 2257 = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm())); 2258 2259 if (!FBB) { 2260 Cond[1].isUndef(); 2261 MachineInstr *CondBr = 2262 BuildMI(&MBB, DL, get(Opcode)) 2263 .addMBB(TBB); 2264 2265 // Copy the flags onto the implicit condition register operand. 2266 preserveCondRegFlags(CondBr->getOperand(1), Cond[1]); 2267 fixImplicitOperands(*CondBr); 2268 2269 if (BytesAdded) 2270 *BytesAdded = 4; 2271 return 1; 2272 } 2273 2274 assert(TBB && FBB); 2275 2276 MachineInstr *CondBr = 2277 BuildMI(&MBB, DL, get(Opcode)) 2278 .addMBB(TBB); 2279 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 2280 .addMBB(FBB); 2281 2282 MachineOperand &CondReg = CondBr->getOperand(1); 2283 CondReg.setIsUndef(Cond[1].isUndef()); 2284 CondReg.setIsKill(Cond[1].isKill()); 2285 2286 if (BytesAdded) 2287 *BytesAdded = 8; 2288 2289 return 2; 2290 } 2291 2292 bool SIInstrInfo::reverseBranchCondition( 2293 SmallVectorImpl<MachineOperand> &Cond) const { 2294 if (Cond.size() != 2) { 2295 return true; 2296 } 2297 2298 if (Cond[0].isImm()) { 2299 Cond[0].setImm(-Cond[0].getImm()); 2300 return false; 2301 } 2302 2303 return true; 2304 } 2305 2306 bool SIInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, 2307 ArrayRef<MachineOperand> Cond, 2308 Register DstReg, Register TrueReg, 2309 Register FalseReg, int &CondCycles, 2310 int &TrueCycles, int &FalseCycles) const { 2311 switch (Cond[0].getImm()) { 2312 case VCCNZ: 2313 case VCCZ: { 2314 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2315 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 2316 if (MRI.getRegClass(FalseReg) != RC) 2317 return false; 2318 2319 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 2320 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 2321 2322 // Limit to equal cost for branch vs. N v_cndmask_b32s. 2323 return RI.hasVGPRs(RC) && NumInsts <= 6; 2324 } 2325 case SCC_TRUE: 2326 case SCC_FALSE: { 2327 // FIXME: We could insert for VGPRs if we could replace the original compare 2328 // with a vector one. 2329 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2330 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 2331 if (MRI.getRegClass(FalseReg) != RC) 2332 return false; 2333 2334 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 2335 2336 // Multiples of 8 can do s_cselect_b64 2337 if (NumInsts % 2 == 0) 2338 NumInsts /= 2; 2339 2340 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 2341 return RI.isSGPRClass(RC); 2342 } 2343 default: 2344 return false; 2345 } 2346 } 2347 2348 void SIInstrInfo::insertSelect(MachineBasicBlock &MBB, 2349 MachineBasicBlock::iterator I, const DebugLoc &DL, 2350 Register DstReg, ArrayRef<MachineOperand> Cond, 2351 Register TrueReg, Register FalseReg) const { 2352 BranchPredicate Pred = static_cast<BranchPredicate>(Cond[0].getImm()); 2353 if (Pred == VCCZ || Pred == SCC_FALSE) { 2354 Pred = static_cast<BranchPredicate>(-Pred); 2355 std::swap(TrueReg, FalseReg); 2356 } 2357 2358 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2359 const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg); 2360 unsigned DstSize = RI.getRegSizeInBits(*DstRC); 2361 2362 if (DstSize == 32) { 2363 MachineInstr *Select; 2364 if (Pred == SCC_TRUE) { 2365 Select = BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B32), DstReg) 2366 .addReg(TrueReg) 2367 .addReg(FalseReg); 2368 } else { 2369 // Instruction's operands are backwards from what is expected. 2370 Select = BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e32), DstReg) 2371 .addReg(FalseReg) 2372 .addReg(TrueReg); 2373 } 2374 2375 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2376 return; 2377 } 2378 2379 if (DstSize == 64 && Pred == SCC_TRUE) { 2380 MachineInstr *Select = 2381 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg) 2382 .addReg(TrueReg) 2383 .addReg(FalseReg); 2384 2385 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2386 return; 2387 } 2388 2389 static const int16_t Sub0_15[] = { 2390 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 2391 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 2392 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, 2393 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 2394 }; 2395 2396 static const int16_t Sub0_15_64[] = { 2397 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, 2398 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7, 2399 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11, 2400 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15, 2401 }; 2402 2403 unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32; 2404 const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass; 2405 const int16_t *SubIndices = Sub0_15; 2406 int NElts = DstSize / 32; 2407 2408 // 64-bit select is only available for SALU. 2409 // TODO: Split 96-bit into 64-bit and 32-bit, not 3x 32-bit. 2410 if (Pred == SCC_TRUE) { 2411 if (NElts % 2) { 2412 SelOp = AMDGPU::S_CSELECT_B32; 2413 EltRC = &AMDGPU::SGPR_32RegClass; 2414 } else { 2415 SelOp = AMDGPU::S_CSELECT_B64; 2416 EltRC = &AMDGPU::SGPR_64RegClass; 2417 SubIndices = Sub0_15_64; 2418 NElts /= 2; 2419 } 2420 } 2421 2422 MachineInstrBuilder MIB = BuildMI( 2423 MBB, I, DL, get(AMDGPU::REG_SEQUENCE), DstReg); 2424 2425 I = MIB->getIterator(); 2426 2427 SmallVector<Register, 8> Regs; 2428 for (int Idx = 0; Idx != NElts; ++Idx) { 2429 Register DstElt = MRI.createVirtualRegister(EltRC); 2430 Regs.push_back(DstElt); 2431 2432 unsigned SubIdx = SubIndices[Idx]; 2433 2434 MachineInstr *Select; 2435 if (SelOp == AMDGPU::V_CNDMASK_B32_e32) { 2436 Select = 2437 BuildMI(MBB, I, DL, get(SelOp), DstElt) 2438 .addReg(FalseReg, 0, SubIdx) 2439 .addReg(TrueReg, 0, SubIdx); 2440 } else { 2441 Select = 2442 BuildMI(MBB, I, DL, get(SelOp), DstElt) 2443 .addReg(TrueReg, 0, SubIdx) 2444 .addReg(FalseReg, 0, SubIdx); 2445 } 2446 2447 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2448 fixImplicitOperands(*Select); 2449 2450 MIB.addReg(DstElt) 2451 .addImm(SubIdx); 2452 } 2453 } 2454 2455 bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) const { 2456 switch (MI.getOpcode()) { 2457 case AMDGPU::V_MOV_B32_e32: 2458 case AMDGPU::V_MOV_B32_e64: 2459 case AMDGPU::V_MOV_B64_PSEUDO: { 2460 // If there are additional implicit register operands, this may be used for 2461 // register indexing so the source register operand isn't simply copied. 2462 unsigned NumOps = MI.getDesc().getNumOperands() + 2463 MI.getDesc().getNumImplicitUses(); 2464 2465 return MI.getNumOperands() == NumOps; 2466 } 2467 case AMDGPU::S_MOV_B32: 2468 case AMDGPU::S_MOV_B64: 2469 case AMDGPU::COPY: 2470 case AMDGPU::V_ACCVGPR_WRITE_B32: 2471 case AMDGPU::V_ACCVGPR_READ_B32: 2472 return true; 2473 default: 2474 return false; 2475 } 2476 } 2477 2478 unsigned SIInstrInfo::getAddressSpaceForPseudoSourceKind( 2479 unsigned Kind) const { 2480 switch(Kind) { 2481 case PseudoSourceValue::Stack: 2482 case PseudoSourceValue::FixedStack: 2483 return AMDGPUAS::PRIVATE_ADDRESS; 2484 case PseudoSourceValue::ConstantPool: 2485 case PseudoSourceValue::GOT: 2486 case PseudoSourceValue::JumpTable: 2487 case PseudoSourceValue::GlobalValueCallEntry: 2488 case PseudoSourceValue::ExternalSymbolCallEntry: 2489 case PseudoSourceValue::TargetCustom: 2490 return AMDGPUAS::CONSTANT_ADDRESS; 2491 } 2492 return AMDGPUAS::FLAT_ADDRESS; 2493 } 2494 2495 static void removeModOperands(MachineInstr &MI) { 2496 unsigned Opc = MI.getOpcode(); 2497 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2498 AMDGPU::OpName::src0_modifiers); 2499 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2500 AMDGPU::OpName::src1_modifiers); 2501 int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2502 AMDGPU::OpName::src2_modifiers); 2503 2504 MI.RemoveOperand(Src2ModIdx); 2505 MI.RemoveOperand(Src1ModIdx); 2506 MI.RemoveOperand(Src0ModIdx); 2507 } 2508 2509 bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, 2510 Register Reg, MachineRegisterInfo *MRI) const { 2511 if (!MRI->hasOneNonDBGUse(Reg)) 2512 return false; 2513 2514 switch (DefMI.getOpcode()) { 2515 default: 2516 return false; 2517 case AMDGPU::S_MOV_B64: 2518 // TODO: We could fold 64-bit immediates, but this get compilicated 2519 // when there are sub-registers. 2520 return false; 2521 2522 case AMDGPU::V_MOV_B32_e32: 2523 case AMDGPU::S_MOV_B32: 2524 case AMDGPU::V_ACCVGPR_WRITE_B32: 2525 break; 2526 } 2527 2528 const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0); 2529 assert(ImmOp); 2530 // FIXME: We could handle FrameIndex values here. 2531 if (!ImmOp->isImm()) 2532 return false; 2533 2534 unsigned Opc = UseMI.getOpcode(); 2535 if (Opc == AMDGPU::COPY) { 2536 Register DstReg = UseMI.getOperand(0).getReg(); 2537 bool Is16Bit = getOpSize(UseMI, 0) == 2; 2538 bool isVGPRCopy = RI.isVGPR(*MRI, DstReg); 2539 unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32; 2540 APInt Imm(32, ImmOp->getImm()); 2541 2542 if (UseMI.getOperand(1).getSubReg() == AMDGPU::hi16) 2543 Imm = Imm.ashr(16); 2544 2545 if (RI.isAGPR(*MRI, DstReg)) { 2546 if (!isInlineConstant(Imm)) 2547 return false; 2548 NewOpc = AMDGPU::V_ACCVGPR_WRITE_B32; 2549 } 2550 2551 if (Is16Bit) { 2552 if (isVGPRCopy) 2553 return false; // Do not clobber vgpr_hi16 2554 2555 if (DstReg.isVirtual() && 2556 UseMI.getOperand(0).getSubReg() != AMDGPU::lo16) 2557 return false; 2558 2559 UseMI.getOperand(0).setSubReg(0); 2560 if (DstReg.isPhysical()) { 2561 DstReg = RI.get32BitRegister(DstReg); 2562 UseMI.getOperand(0).setReg(DstReg); 2563 } 2564 assert(UseMI.getOperand(1).getReg().isVirtual()); 2565 } 2566 2567 UseMI.setDesc(get(NewOpc)); 2568 UseMI.getOperand(1).ChangeToImmediate(Imm.getSExtValue()); 2569 UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent()); 2570 return true; 2571 } 2572 2573 if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 || 2574 Opc == AMDGPU::V_MAD_F16 || Opc == AMDGPU::V_MAC_F16_e64 || 2575 Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64 || 2576 Opc == AMDGPU::V_FMA_F16 || Opc == AMDGPU::V_FMAC_F16_e64) { 2577 // Don't fold if we are using source or output modifiers. The new VOP2 2578 // instructions don't have them. 2579 if (hasAnyModifiersSet(UseMI)) 2580 return false; 2581 2582 // If this is a free constant, there's no reason to do this. 2583 // TODO: We could fold this here instead of letting SIFoldOperands do it 2584 // later. 2585 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0); 2586 2587 // Any src operand can be used for the legality check. 2588 if (isInlineConstant(UseMI, *Src0, *ImmOp)) 2589 return false; 2590 2591 bool IsF32 = Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 || 2592 Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64; 2593 bool IsFMA = Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64 || 2594 Opc == AMDGPU::V_FMA_F16 || Opc == AMDGPU::V_FMAC_F16_e64; 2595 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1); 2596 MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2); 2597 2598 // Multiplied part is the constant: Use v_madmk_{f16, f32}. 2599 // We should only expect these to be on src0 due to canonicalizations. 2600 if (Src0->isReg() && Src0->getReg() == Reg) { 2601 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) 2602 return false; 2603 2604 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg()))) 2605 return false; 2606 2607 unsigned NewOpc = 2608 IsFMA ? (IsF32 ? AMDGPU::V_FMAMK_F32 : AMDGPU::V_FMAMK_F16) 2609 : (IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16); 2610 if (pseudoToMCOpcode(NewOpc) == -1) 2611 return false; 2612 2613 // We need to swap operands 0 and 1 since madmk constant is at operand 1. 2614 2615 const int64_t Imm = ImmOp->getImm(); 2616 2617 // FIXME: This would be a lot easier if we could return a new instruction 2618 // instead of having to modify in place. 2619 2620 // Remove these first since they are at the end. 2621 UseMI.RemoveOperand( 2622 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 2623 UseMI.RemoveOperand( 2624 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 2625 2626 Register Src1Reg = Src1->getReg(); 2627 unsigned Src1SubReg = Src1->getSubReg(); 2628 Src0->setReg(Src1Reg); 2629 Src0->setSubReg(Src1SubReg); 2630 Src0->setIsKill(Src1->isKill()); 2631 2632 if (Opc == AMDGPU::V_MAC_F32_e64 || 2633 Opc == AMDGPU::V_MAC_F16_e64 || 2634 Opc == AMDGPU::V_FMAC_F32_e64 || 2635 Opc == AMDGPU::V_FMAC_F16_e64) 2636 UseMI.untieRegOperand( 2637 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 2638 2639 Src1->ChangeToImmediate(Imm); 2640 2641 removeModOperands(UseMI); 2642 UseMI.setDesc(get(NewOpc)); 2643 2644 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 2645 if (DeleteDef) 2646 DefMI.eraseFromParent(); 2647 2648 return true; 2649 } 2650 2651 // Added part is the constant: Use v_madak_{f16, f32}. 2652 if (Src2->isReg() && Src2->getReg() == Reg) { 2653 // Not allowed to use constant bus for another operand. 2654 // We can however allow an inline immediate as src0. 2655 bool Src0Inlined = false; 2656 if (Src0->isReg()) { 2657 // Try to inline constant if possible. 2658 // If the Def moves immediate and the use is single 2659 // We are saving VGPR here. 2660 MachineInstr *Def = MRI->getUniqueVRegDef(Src0->getReg()); 2661 if (Def && Def->isMoveImmediate() && 2662 isInlineConstant(Def->getOperand(1)) && 2663 MRI->hasOneUse(Src0->getReg())) { 2664 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 2665 Src0Inlined = true; 2666 } else if ((Src0->getReg().isPhysical() && 2667 (ST.getConstantBusLimit(Opc) <= 1 && 2668 RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg())))) || 2669 (Src0->getReg().isVirtual() && 2670 (ST.getConstantBusLimit(Opc) <= 1 && 2671 RI.isSGPRClass(MRI->getRegClass(Src0->getReg()))))) 2672 return false; 2673 // VGPR is okay as Src0 - fallthrough 2674 } 2675 2676 if (Src1->isReg() && !Src0Inlined ) { 2677 // We have one slot for inlinable constant so far - try to fill it 2678 MachineInstr *Def = MRI->getUniqueVRegDef(Src1->getReg()); 2679 if (Def && Def->isMoveImmediate() && 2680 isInlineConstant(Def->getOperand(1)) && 2681 MRI->hasOneUse(Src1->getReg()) && 2682 commuteInstruction(UseMI)) { 2683 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 2684 } else if ((Src1->getReg().isPhysical() && 2685 RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) || 2686 (Src1->getReg().isVirtual() && 2687 RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) 2688 return false; 2689 // VGPR is okay as Src1 - fallthrough 2690 } 2691 2692 unsigned NewOpc = 2693 IsFMA ? (IsF32 ? AMDGPU::V_FMAAK_F32 : AMDGPU::V_FMAAK_F16) 2694 : (IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16); 2695 if (pseudoToMCOpcode(NewOpc) == -1) 2696 return false; 2697 2698 const int64_t Imm = ImmOp->getImm(); 2699 2700 // FIXME: This would be a lot easier if we could return a new instruction 2701 // instead of having to modify in place. 2702 2703 // Remove these first since they are at the end. 2704 UseMI.RemoveOperand( 2705 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 2706 UseMI.RemoveOperand( 2707 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 2708 2709 if (Opc == AMDGPU::V_MAC_F32_e64 || 2710 Opc == AMDGPU::V_MAC_F16_e64 || 2711 Opc == AMDGPU::V_FMAC_F32_e64 || 2712 Opc == AMDGPU::V_FMAC_F16_e64) 2713 UseMI.untieRegOperand( 2714 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 2715 2716 // ChangingToImmediate adds Src2 back to the instruction. 2717 Src2->ChangeToImmediate(Imm); 2718 2719 // These come before src2. 2720 removeModOperands(UseMI); 2721 UseMI.setDesc(get(NewOpc)); 2722 // It might happen that UseMI was commuted 2723 // and we now have SGPR as SRC1. If so 2 inlined 2724 // constant and SGPR are illegal. 2725 legalizeOperands(UseMI); 2726 2727 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 2728 if (DeleteDef) 2729 DefMI.eraseFromParent(); 2730 2731 return true; 2732 } 2733 } 2734 2735 return false; 2736 } 2737 2738 static bool 2739 memOpsHaveSameBaseOperands(ArrayRef<const MachineOperand *> BaseOps1, 2740 ArrayRef<const MachineOperand *> BaseOps2) { 2741 if (BaseOps1.size() != BaseOps2.size()) 2742 return false; 2743 for (size_t I = 0, E = BaseOps1.size(); I < E; ++I) { 2744 if (!BaseOps1[I]->isIdenticalTo(*BaseOps2[I])) 2745 return false; 2746 } 2747 return true; 2748 } 2749 2750 static bool offsetsDoNotOverlap(int WidthA, int OffsetA, 2751 int WidthB, int OffsetB) { 2752 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; 2753 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; 2754 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; 2755 return LowOffset + LowWidth <= HighOffset; 2756 } 2757 2758 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa, 2759 const MachineInstr &MIb) const { 2760 SmallVector<const MachineOperand *, 4> BaseOps0, BaseOps1; 2761 int64_t Offset0, Offset1; 2762 unsigned Dummy0, Dummy1; 2763 bool Offset0IsScalable, Offset1IsScalable; 2764 if (!getMemOperandsWithOffsetWidth(MIa, BaseOps0, Offset0, Offset0IsScalable, 2765 Dummy0, &RI) || 2766 !getMemOperandsWithOffsetWidth(MIb, BaseOps1, Offset1, Offset1IsScalable, 2767 Dummy1, &RI)) 2768 return false; 2769 2770 if (!memOpsHaveSameBaseOperands(BaseOps0, BaseOps1)) 2771 return false; 2772 2773 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) { 2774 // FIXME: Handle ds_read2 / ds_write2. 2775 return false; 2776 } 2777 unsigned Width0 = MIa.memoperands().front()->getSize(); 2778 unsigned Width1 = MIb.memoperands().front()->getSize(); 2779 return offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1); 2780 } 2781 2782 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, 2783 const MachineInstr &MIb) const { 2784 assert(MIa.mayLoadOrStore() && 2785 "MIa must load from or modify a memory location"); 2786 assert(MIb.mayLoadOrStore() && 2787 "MIb must load from or modify a memory location"); 2788 2789 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects()) 2790 return false; 2791 2792 // XXX - Can we relax this between address spaces? 2793 if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef()) 2794 return false; 2795 2796 // TODO: Should we check the address space from the MachineMemOperand? That 2797 // would allow us to distinguish objects we know don't alias based on the 2798 // underlying address space, even if it was lowered to a different one, 2799 // e.g. private accesses lowered to use MUBUF instructions on a scratch 2800 // buffer. 2801 if (isDS(MIa)) { 2802 if (isDS(MIb)) 2803 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2804 2805 return !isFLAT(MIb) || isSegmentSpecificFLAT(MIb); 2806 } 2807 2808 if (isMUBUF(MIa) || isMTBUF(MIa)) { 2809 if (isMUBUF(MIb) || isMTBUF(MIb)) 2810 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2811 2812 return !isFLAT(MIb) && !isSMRD(MIb); 2813 } 2814 2815 if (isSMRD(MIa)) { 2816 if (isSMRD(MIb)) 2817 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2818 2819 return !isFLAT(MIb) && !isMUBUF(MIb) && !isMTBUF(MIb); 2820 } 2821 2822 if (isFLAT(MIa)) { 2823 if (isFLAT(MIb)) 2824 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2825 2826 return false; 2827 } 2828 2829 return false; 2830 } 2831 2832 static int64_t getFoldableImm(const MachineOperand* MO) { 2833 if (!MO->isReg()) 2834 return false; 2835 const MachineFunction *MF = MO->getParent()->getParent()->getParent(); 2836 const MachineRegisterInfo &MRI = MF->getRegInfo(); 2837 auto Def = MRI.getUniqueVRegDef(MO->getReg()); 2838 if (Def && Def->getOpcode() == AMDGPU::V_MOV_B32_e32 && 2839 Def->getOperand(1).isImm()) 2840 return Def->getOperand(1).getImm(); 2841 return AMDGPU::NoRegister; 2842 } 2843 2844 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB, 2845 MachineInstr &MI, 2846 LiveVariables *LV) const { 2847 unsigned Opc = MI.getOpcode(); 2848 bool IsF16 = false; 2849 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e32 || Opc == AMDGPU::V_FMAC_F32_e64 || 2850 Opc == AMDGPU::V_FMAC_F16_e32 || Opc == AMDGPU::V_FMAC_F16_e64; 2851 2852 switch (Opc) { 2853 default: 2854 return nullptr; 2855 case AMDGPU::V_MAC_F16_e64: 2856 case AMDGPU::V_FMAC_F16_e64: 2857 IsF16 = true; 2858 LLVM_FALLTHROUGH; 2859 case AMDGPU::V_MAC_F32_e64: 2860 case AMDGPU::V_FMAC_F32_e64: 2861 break; 2862 case AMDGPU::V_MAC_F16_e32: 2863 case AMDGPU::V_FMAC_F16_e32: 2864 IsF16 = true; 2865 LLVM_FALLTHROUGH; 2866 case AMDGPU::V_MAC_F32_e32: 2867 case AMDGPU::V_FMAC_F32_e32: { 2868 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 2869 AMDGPU::OpName::src0); 2870 const MachineOperand *Src0 = &MI.getOperand(Src0Idx); 2871 if (!Src0->isReg() && !Src0->isImm()) 2872 return nullptr; 2873 2874 if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0)) 2875 return nullptr; 2876 2877 break; 2878 } 2879 } 2880 2881 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 2882 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0); 2883 const MachineOperand *Src0Mods = 2884 getNamedOperand(MI, AMDGPU::OpName::src0_modifiers); 2885 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 2886 const MachineOperand *Src1Mods = 2887 getNamedOperand(MI, AMDGPU::OpName::src1_modifiers); 2888 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 2889 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 2890 const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod); 2891 2892 if (!Src0Mods && !Src1Mods && !Clamp && !Omod && 2893 // If we have an SGPR input, we will violate the constant bus restriction. 2894 (ST.getConstantBusLimit(Opc) > 1 || 2895 !Src0->isReg() || 2896 !RI.isSGPRReg(MBB->getParent()->getRegInfo(), Src0->getReg()))) { 2897 if (auto Imm = getFoldableImm(Src2)) { 2898 unsigned NewOpc = 2899 IsFMA ? (IsF16 ? AMDGPU::V_FMAAK_F16 : AMDGPU::V_FMAAK_F32) 2900 : (IsF16 ? AMDGPU::V_MADAK_F16 : AMDGPU::V_MADAK_F32); 2901 if (pseudoToMCOpcode(NewOpc) != -1) 2902 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2903 .add(*Dst) 2904 .add(*Src0) 2905 .add(*Src1) 2906 .addImm(Imm); 2907 } 2908 unsigned NewOpc = 2909 IsFMA ? (IsF16 ? AMDGPU::V_FMAMK_F16 : AMDGPU::V_FMAMK_F32) 2910 : (IsF16 ? AMDGPU::V_MADMK_F16 : AMDGPU::V_MADMK_F32); 2911 if (auto Imm = getFoldableImm(Src1)) { 2912 if (pseudoToMCOpcode(NewOpc) != -1) 2913 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2914 .add(*Dst) 2915 .add(*Src0) 2916 .addImm(Imm) 2917 .add(*Src2); 2918 } 2919 if (auto Imm = getFoldableImm(Src0)) { 2920 if (pseudoToMCOpcode(NewOpc) != -1 && 2921 isOperandLegal(MI, AMDGPU::getNamedOperandIdx(NewOpc, 2922 AMDGPU::OpName::src0), Src1)) 2923 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2924 .add(*Dst) 2925 .add(*Src1) 2926 .addImm(Imm) 2927 .add(*Src2); 2928 } 2929 } 2930 2931 unsigned NewOpc = IsFMA ? (IsF16 ? AMDGPU::V_FMA_F16 : AMDGPU::V_FMA_F32) 2932 : (IsF16 ? AMDGPU::V_MAD_F16 : AMDGPU::V_MAD_F32); 2933 if (pseudoToMCOpcode(NewOpc) == -1) 2934 return nullptr; 2935 2936 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2937 .add(*Dst) 2938 .addImm(Src0Mods ? Src0Mods->getImm() : 0) 2939 .add(*Src0) 2940 .addImm(Src1Mods ? Src1Mods->getImm() : 0) 2941 .add(*Src1) 2942 .addImm(0) // Src mods 2943 .add(*Src2) 2944 .addImm(Clamp ? Clamp->getImm() : 0) 2945 .addImm(Omod ? Omod->getImm() : 0); 2946 } 2947 2948 // It's not generally safe to move VALU instructions across these since it will 2949 // start using the register as a base index rather than directly. 2950 // XXX - Why isn't hasSideEffects sufficient for these? 2951 static bool changesVGPRIndexingMode(const MachineInstr &MI) { 2952 switch (MI.getOpcode()) { 2953 case AMDGPU::S_SET_GPR_IDX_ON: 2954 case AMDGPU::S_SET_GPR_IDX_MODE: 2955 case AMDGPU::S_SET_GPR_IDX_OFF: 2956 return true; 2957 default: 2958 return false; 2959 } 2960 } 2961 2962 bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 2963 const MachineBasicBlock *MBB, 2964 const MachineFunction &MF) const { 2965 // Skipping the check for SP writes in the base implementation. The reason it 2966 // was added was apparently due to compile time concerns. 2967 // 2968 // TODO: Do we really want this barrier? It triggers unnecessary hazard nops 2969 // but is probably avoidable. 2970 2971 // Copied from base implementation. 2972 // Terminators and labels can't be scheduled around. 2973 if (MI.isTerminator() || MI.isPosition()) 2974 return true; 2975 2976 // INLINEASM_BR can jump to another block 2977 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR) 2978 return true; 2979 2980 // Target-independent instructions do not have an implicit-use of EXEC, even 2981 // when they operate on VGPRs. Treating EXEC modifications as scheduling 2982 // boundaries prevents incorrect movements of such instructions. 2983 return MI.modifiesRegister(AMDGPU::EXEC, &RI) || 2984 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 || 2985 MI.getOpcode() == AMDGPU::S_SETREG_B32 || 2986 changesVGPRIndexingMode(MI); 2987 } 2988 2989 bool SIInstrInfo::isAlwaysGDS(uint16_t Opcode) const { 2990 return Opcode == AMDGPU::DS_ORDERED_COUNT || 2991 Opcode == AMDGPU::DS_GWS_INIT || 2992 Opcode == AMDGPU::DS_GWS_SEMA_V || 2993 Opcode == AMDGPU::DS_GWS_SEMA_BR || 2994 Opcode == AMDGPU::DS_GWS_SEMA_P || 2995 Opcode == AMDGPU::DS_GWS_SEMA_RELEASE_ALL || 2996 Opcode == AMDGPU::DS_GWS_BARRIER; 2997 } 2998 2999 bool SIInstrInfo::modifiesModeRegister(const MachineInstr &MI) { 3000 // Skip the full operand and register alias search modifiesRegister 3001 // does. There's only a handful of instructions that touch this, it's only an 3002 // implicit def, and doesn't alias any other registers. 3003 if (const MCPhysReg *ImpDef = MI.getDesc().getImplicitDefs()) { 3004 for (; ImpDef && *ImpDef; ++ImpDef) { 3005 if (*ImpDef == AMDGPU::MODE) 3006 return true; 3007 } 3008 } 3009 3010 return false; 3011 } 3012 3013 bool SIInstrInfo::hasUnwantedEffectsWhenEXECEmpty(const MachineInstr &MI) const { 3014 unsigned Opcode = MI.getOpcode(); 3015 3016 if (MI.mayStore() && isSMRD(MI)) 3017 return true; // scalar store or atomic 3018 3019 // This will terminate the function when other lanes may need to continue. 3020 if (MI.isReturn()) 3021 return true; 3022 3023 // These instructions cause shader I/O that may cause hardware lockups 3024 // when executed with an empty EXEC mask. 3025 // 3026 // Note: exp with VM = DONE = 0 is automatically skipped by hardware when 3027 // EXEC = 0, but checking for that case here seems not worth it 3028 // given the typical code patterns. 3029 if (Opcode == AMDGPU::S_SENDMSG || Opcode == AMDGPU::S_SENDMSGHALT || 3030 Opcode == AMDGPU::EXP || Opcode == AMDGPU::EXP_DONE || 3031 Opcode == AMDGPU::DS_ORDERED_COUNT || Opcode == AMDGPU::S_TRAP || 3032 Opcode == AMDGPU::DS_GWS_INIT || Opcode == AMDGPU::DS_GWS_BARRIER) 3033 return true; 3034 3035 if (MI.isCall() || MI.isInlineAsm()) 3036 return true; // conservative assumption 3037 3038 // A mode change is a scalar operation that influences vector instructions. 3039 if (modifiesModeRegister(MI)) 3040 return true; 3041 3042 // These are like SALU instructions in terms of effects, so it's questionable 3043 // whether we should return true for those. 3044 // 3045 // However, executing them with EXEC = 0 causes them to operate on undefined 3046 // data, which we avoid by returning true here. 3047 if (Opcode == AMDGPU::V_READFIRSTLANE_B32 || Opcode == AMDGPU::V_READLANE_B32) 3048 return true; 3049 3050 return false; 3051 } 3052 3053 bool SIInstrInfo::mayReadEXEC(const MachineRegisterInfo &MRI, 3054 const MachineInstr &MI) const { 3055 if (MI.isMetaInstruction()) 3056 return false; 3057 3058 // This won't read exec if this is an SGPR->SGPR copy. 3059 if (MI.isCopyLike()) { 3060 if (!RI.isSGPRReg(MRI, MI.getOperand(0).getReg())) 3061 return true; 3062 3063 // Make sure this isn't copying exec as a normal operand 3064 return MI.readsRegister(AMDGPU::EXEC, &RI); 3065 } 3066 3067 // Make a conservative assumption about the callee. 3068 if (MI.isCall()) 3069 return true; 3070 3071 // Be conservative with any unhandled generic opcodes. 3072 if (!isTargetSpecificOpcode(MI.getOpcode())) 3073 return true; 3074 3075 return !isSALU(MI) || MI.readsRegister(AMDGPU::EXEC, &RI); 3076 } 3077 3078 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const { 3079 switch (Imm.getBitWidth()) { 3080 case 1: // This likely will be a condition code mask. 3081 return true; 3082 3083 case 32: 3084 return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(), 3085 ST.hasInv2PiInlineImm()); 3086 case 64: 3087 return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(), 3088 ST.hasInv2PiInlineImm()); 3089 case 16: 3090 return ST.has16BitInsts() && 3091 AMDGPU::isInlinableLiteral16(Imm.getSExtValue(), 3092 ST.hasInv2PiInlineImm()); 3093 default: 3094 llvm_unreachable("invalid bitwidth"); 3095 } 3096 } 3097 3098 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO, 3099 uint8_t OperandType) const { 3100 if (!MO.isImm() || 3101 OperandType < AMDGPU::OPERAND_SRC_FIRST || 3102 OperandType > AMDGPU::OPERAND_SRC_LAST) 3103 return false; 3104 3105 // MachineOperand provides no way to tell the true operand size, since it only 3106 // records a 64-bit value. We need to know the size to determine if a 32-bit 3107 // floating point immediate bit pattern is legal for an integer immediate. It 3108 // would be for any 32-bit integer operand, but would not be for a 64-bit one. 3109 3110 int64_t Imm = MO.getImm(); 3111 switch (OperandType) { 3112 case AMDGPU::OPERAND_REG_IMM_INT32: 3113 case AMDGPU::OPERAND_REG_IMM_FP32: 3114 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 3115 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 3116 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 3117 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: { 3118 int32_t Trunc = static_cast<int32_t>(Imm); 3119 return AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm()); 3120 } 3121 case AMDGPU::OPERAND_REG_IMM_INT64: 3122 case AMDGPU::OPERAND_REG_IMM_FP64: 3123 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 3124 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 3125 return AMDGPU::isInlinableLiteral64(MO.getImm(), 3126 ST.hasInv2PiInlineImm()); 3127 case AMDGPU::OPERAND_REG_IMM_INT16: 3128 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 3129 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 3130 // We would expect inline immediates to not be concerned with an integer/fp 3131 // distinction. However, in the case of 16-bit integer operations, the 3132 // "floating point" values appear to not work. It seems read the low 16-bits 3133 // of 32-bit immediates, which happens to always work for the integer 3134 // values. 3135 // 3136 // See llvm bugzilla 46302. 3137 // 3138 // TODO: Theoretically we could use op-sel to use the high bits of the 3139 // 32-bit FP values. 3140 return AMDGPU::isInlinableIntLiteral(Imm); 3141 case AMDGPU::OPERAND_REG_IMM_V2INT16: 3142 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 3143 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16: 3144 // This suffers the same problem as the scalar 16-bit cases. 3145 return AMDGPU::isInlinableIntLiteralV216(Imm); 3146 case AMDGPU::OPERAND_REG_IMM_FP16: 3147 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 3148 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: { 3149 if (isInt<16>(Imm) || isUInt<16>(Imm)) { 3150 // A few special case instructions have 16-bit operands on subtargets 3151 // where 16-bit instructions are not legal. 3152 // TODO: Do the 32-bit immediates work? We shouldn't really need to handle 3153 // constants in these cases 3154 int16_t Trunc = static_cast<int16_t>(Imm); 3155 return ST.has16BitInsts() && 3156 AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm()); 3157 } 3158 3159 return false; 3160 } 3161 case AMDGPU::OPERAND_REG_IMM_V2FP16: 3162 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 3163 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: { 3164 uint32_t Trunc = static_cast<uint32_t>(Imm); 3165 return AMDGPU::isInlinableLiteralV216(Trunc, ST.hasInv2PiInlineImm()); 3166 } 3167 default: 3168 llvm_unreachable("invalid bitwidth"); 3169 } 3170 } 3171 3172 bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO, 3173 const MCOperandInfo &OpInfo) const { 3174 switch (MO.getType()) { 3175 case MachineOperand::MO_Register: 3176 return false; 3177 case MachineOperand::MO_Immediate: 3178 return !isInlineConstant(MO, OpInfo); 3179 case MachineOperand::MO_FrameIndex: 3180 case MachineOperand::MO_MachineBasicBlock: 3181 case MachineOperand::MO_ExternalSymbol: 3182 case MachineOperand::MO_GlobalAddress: 3183 case MachineOperand::MO_MCSymbol: 3184 return true; 3185 default: 3186 llvm_unreachable("unexpected operand type"); 3187 } 3188 } 3189 3190 static bool compareMachineOp(const MachineOperand &Op0, 3191 const MachineOperand &Op1) { 3192 if (Op0.getType() != Op1.getType()) 3193 return false; 3194 3195 switch (Op0.getType()) { 3196 case MachineOperand::MO_Register: 3197 return Op0.getReg() == Op1.getReg(); 3198 case MachineOperand::MO_Immediate: 3199 return Op0.getImm() == Op1.getImm(); 3200 default: 3201 llvm_unreachable("Didn't expect to be comparing these operand types"); 3202 } 3203 } 3204 3205 bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo, 3206 const MachineOperand &MO) const { 3207 const MCInstrDesc &InstDesc = MI.getDesc(); 3208 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo]; 3209 3210 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()); 3211 3212 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE) 3213 return true; 3214 3215 if (OpInfo.RegClass < 0) 3216 return false; 3217 3218 if (MO.isImm() && isInlineConstant(MO, OpInfo)) { 3219 if (isMAI(MI) && ST.hasMFMAInlineLiteralBug() && 3220 OpNo ==(unsigned)AMDGPU::getNamedOperandIdx(MI.getOpcode(), 3221 AMDGPU::OpName::src2)) 3222 return false; 3223 return RI.opCanUseInlineConstant(OpInfo.OperandType); 3224 } 3225 3226 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType)) 3227 return false; 3228 3229 if (!isVOP3(MI) || !AMDGPU::isSISrcOperand(InstDesc, OpNo)) 3230 return true; 3231 3232 return ST.hasVOP3Literal(); 3233 } 3234 3235 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const { 3236 int Op32 = AMDGPU::getVOPe32(Opcode); 3237 if (Op32 == -1) 3238 return false; 3239 3240 return pseudoToMCOpcode(Op32) != -1; 3241 } 3242 3243 bool SIInstrInfo::hasModifiers(unsigned Opcode) const { 3244 // The src0_modifier operand is present on all instructions 3245 // that have modifiers. 3246 3247 return AMDGPU::getNamedOperandIdx(Opcode, 3248 AMDGPU::OpName::src0_modifiers) != -1; 3249 } 3250 3251 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI, 3252 unsigned OpName) const { 3253 const MachineOperand *Mods = getNamedOperand(MI, OpName); 3254 return Mods && Mods->getImm(); 3255 } 3256 3257 bool SIInstrInfo::hasAnyModifiersSet(const MachineInstr &MI) const { 3258 return hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || 3259 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || 3260 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers) || 3261 hasModifiersSet(MI, AMDGPU::OpName::clamp) || 3262 hasModifiersSet(MI, AMDGPU::OpName::omod); 3263 } 3264 3265 bool SIInstrInfo::canShrink(const MachineInstr &MI, 3266 const MachineRegisterInfo &MRI) const { 3267 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3268 // Can't shrink instruction with three operands. 3269 // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add 3270 // a special case for it. It can only be shrunk if the third operand 3271 // is vcc, and src0_modifiers and src1_modifiers are not set. 3272 // We should handle this the same way we handle vopc, by addding 3273 // a register allocation hint pre-regalloc and then do the shrinking 3274 // post-regalloc. 3275 if (Src2) { 3276 switch (MI.getOpcode()) { 3277 default: return false; 3278 3279 case AMDGPU::V_ADDC_U32_e64: 3280 case AMDGPU::V_SUBB_U32_e64: 3281 case AMDGPU::V_SUBBREV_U32_e64: { 3282 const MachineOperand *Src1 3283 = getNamedOperand(MI, AMDGPU::OpName::src1); 3284 if (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg())) 3285 return false; 3286 // Additional verification is needed for sdst/src2. 3287 return true; 3288 } 3289 case AMDGPU::V_MAC_F32_e64: 3290 case AMDGPU::V_MAC_F16_e64: 3291 case AMDGPU::V_FMAC_F32_e64: 3292 case AMDGPU::V_FMAC_F16_e64: 3293 if (!Src2->isReg() || !RI.isVGPR(MRI, Src2->getReg()) || 3294 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers)) 3295 return false; 3296 break; 3297 3298 case AMDGPU::V_CNDMASK_B32_e64: 3299 break; 3300 } 3301 } 3302 3303 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3304 if (Src1 && (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()) || 3305 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers))) 3306 return false; 3307 3308 // We don't need to check src0, all input types are legal, so just make sure 3309 // src0 isn't using any modifiers. 3310 if (hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers)) 3311 return false; 3312 3313 // Can it be shrunk to a valid 32 bit opcode? 3314 if (!hasVALU32BitEncoding(MI.getOpcode())) 3315 return false; 3316 3317 // Check output modifiers 3318 return !hasModifiersSet(MI, AMDGPU::OpName::omod) && 3319 !hasModifiersSet(MI, AMDGPU::OpName::clamp); 3320 } 3321 3322 // Set VCC operand with all flags from \p Orig, except for setting it as 3323 // implicit. 3324 static void copyFlagsToImplicitVCC(MachineInstr &MI, 3325 const MachineOperand &Orig) { 3326 3327 for (MachineOperand &Use : MI.implicit_operands()) { 3328 if (Use.isUse() && 3329 (Use.getReg() == AMDGPU::VCC || Use.getReg() == AMDGPU::VCC_LO)) { 3330 Use.setIsUndef(Orig.isUndef()); 3331 Use.setIsKill(Orig.isKill()); 3332 return; 3333 } 3334 } 3335 } 3336 3337 MachineInstr *SIInstrInfo::buildShrunkInst(MachineInstr &MI, 3338 unsigned Op32) const { 3339 MachineBasicBlock *MBB = MI.getParent();; 3340 MachineInstrBuilder Inst32 = 3341 BuildMI(*MBB, MI, MI.getDebugLoc(), get(Op32)) 3342 .setMIFlags(MI.getFlags()); 3343 3344 // Add the dst operand if the 32-bit encoding also has an explicit $vdst. 3345 // For VOPC instructions, this is replaced by an implicit def of vcc. 3346 int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst); 3347 if (Op32DstIdx != -1) { 3348 // dst 3349 Inst32.add(MI.getOperand(0)); 3350 } else { 3351 assert(((MI.getOperand(0).getReg() == AMDGPU::VCC) || 3352 (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) && 3353 "Unexpected case"); 3354 } 3355 3356 Inst32.add(*getNamedOperand(MI, AMDGPU::OpName::src0)); 3357 3358 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3359 if (Src1) 3360 Inst32.add(*Src1); 3361 3362 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3363 3364 if (Src2) { 3365 int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2); 3366 if (Op32Src2Idx != -1) { 3367 Inst32.add(*Src2); 3368 } else { 3369 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is 3370 // replaced with an implicit read of vcc or vcc_lo. The implicit read 3371 // of vcc was already added during the initial BuildMI, but we 3372 // 1) may need to change vcc to vcc_lo to preserve the original register 3373 // 2) have to preserve the original flags. 3374 fixImplicitOperands(*Inst32); 3375 copyFlagsToImplicitVCC(*Inst32, *Src2); 3376 } 3377 } 3378 3379 return Inst32; 3380 } 3381 3382 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI, 3383 const MachineOperand &MO, 3384 const MCOperandInfo &OpInfo) const { 3385 // Literal constants use the constant bus. 3386 //if (isLiteralConstantLike(MO, OpInfo)) 3387 // return true; 3388 if (MO.isImm()) 3389 return !isInlineConstant(MO, OpInfo); 3390 3391 if (!MO.isReg()) 3392 return true; // Misc other operands like FrameIndex 3393 3394 if (!MO.isUse()) 3395 return false; 3396 3397 if (MO.getReg().isVirtual()) 3398 return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); 3399 3400 // Null is free 3401 if (MO.getReg() == AMDGPU::SGPR_NULL) 3402 return false; 3403 3404 // SGPRs use the constant bus 3405 if (MO.isImplicit()) { 3406 return MO.getReg() == AMDGPU::M0 || 3407 MO.getReg() == AMDGPU::VCC || 3408 MO.getReg() == AMDGPU::VCC_LO; 3409 } else { 3410 return AMDGPU::SReg_32RegClass.contains(MO.getReg()) || 3411 AMDGPU::SReg_64RegClass.contains(MO.getReg()); 3412 } 3413 } 3414 3415 static Register findImplicitSGPRRead(const MachineInstr &MI) { 3416 for (const MachineOperand &MO : MI.implicit_operands()) { 3417 // We only care about reads. 3418 if (MO.isDef()) 3419 continue; 3420 3421 switch (MO.getReg()) { 3422 case AMDGPU::VCC: 3423 case AMDGPU::VCC_LO: 3424 case AMDGPU::VCC_HI: 3425 case AMDGPU::M0: 3426 case AMDGPU::FLAT_SCR: 3427 return MO.getReg(); 3428 3429 default: 3430 break; 3431 } 3432 } 3433 3434 return AMDGPU::NoRegister; 3435 } 3436 3437 static bool shouldReadExec(const MachineInstr &MI) { 3438 if (SIInstrInfo::isVALU(MI)) { 3439 switch (MI.getOpcode()) { 3440 case AMDGPU::V_READLANE_B32: 3441 case AMDGPU::V_READLANE_B32_gfx6_gfx7: 3442 case AMDGPU::V_READLANE_B32_gfx10: 3443 case AMDGPU::V_READLANE_B32_vi: 3444 case AMDGPU::V_WRITELANE_B32: 3445 case AMDGPU::V_WRITELANE_B32_gfx6_gfx7: 3446 case AMDGPU::V_WRITELANE_B32_gfx10: 3447 case AMDGPU::V_WRITELANE_B32_vi: 3448 return false; 3449 } 3450 3451 return true; 3452 } 3453 3454 if (MI.isPreISelOpcode() || 3455 SIInstrInfo::isGenericOpcode(MI.getOpcode()) || 3456 SIInstrInfo::isSALU(MI) || 3457 SIInstrInfo::isSMRD(MI)) 3458 return false; 3459 3460 return true; 3461 } 3462 3463 static bool isSubRegOf(const SIRegisterInfo &TRI, 3464 const MachineOperand &SuperVec, 3465 const MachineOperand &SubReg) { 3466 if (SubReg.getReg().isPhysical()) 3467 return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg()); 3468 3469 return SubReg.getSubReg() != AMDGPU::NoSubRegister && 3470 SubReg.getReg() == SuperVec.getReg(); 3471 } 3472 3473 bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, 3474 StringRef &ErrInfo) const { 3475 uint16_t Opcode = MI.getOpcode(); 3476 if (SIInstrInfo::isGenericOpcode(MI.getOpcode())) 3477 return true; 3478 3479 const MachineFunction *MF = MI.getParent()->getParent(); 3480 const MachineRegisterInfo &MRI = MF->getRegInfo(); 3481 3482 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); 3483 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1); 3484 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2); 3485 3486 // Make sure the number of operands is correct. 3487 const MCInstrDesc &Desc = get(Opcode); 3488 if (!Desc.isVariadic() && 3489 Desc.getNumOperands() != MI.getNumExplicitOperands()) { 3490 ErrInfo = "Instruction has wrong number of operands."; 3491 return false; 3492 } 3493 3494 if (MI.isInlineAsm()) { 3495 // Verify register classes for inlineasm constraints. 3496 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands(); 3497 I != E; ++I) { 3498 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI); 3499 if (!RC) 3500 continue; 3501 3502 const MachineOperand &Op = MI.getOperand(I); 3503 if (!Op.isReg()) 3504 continue; 3505 3506 Register Reg = Op.getReg(); 3507 if (!Reg.isVirtual() && !RC->contains(Reg)) { 3508 ErrInfo = "inlineasm operand has incorrect register class."; 3509 return false; 3510 } 3511 } 3512 3513 return true; 3514 } 3515 3516 if (isMIMG(MI) && MI.memoperands_empty() && MI.mayLoadOrStore()) { 3517 ErrInfo = "missing memory operand from MIMG instruction."; 3518 return false; 3519 } 3520 3521 // Make sure the register classes are correct. 3522 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) { 3523 if (MI.getOperand(i).isFPImm()) { 3524 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast " 3525 "all fp values to integers."; 3526 return false; 3527 } 3528 3529 int RegClass = Desc.OpInfo[i].RegClass; 3530 3531 switch (Desc.OpInfo[i].OperandType) { 3532 case MCOI::OPERAND_REGISTER: 3533 if (MI.getOperand(i).isImm() || MI.getOperand(i).isGlobal()) { 3534 ErrInfo = "Illegal immediate value for operand."; 3535 return false; 3536 } 3537 break; 3538 case AMDGPU::OPERAND_REG_IMM_INT32: 3539 case AMDGPU::OPERAND_REG_IMM_FP32: 3540 break; 3541 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 3542 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 3543 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 3544 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 3545 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 3546 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 3547 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 3548 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: 3549 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 3550 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: { 3551 const MachineOperand &MO = MI.getOperand(i); 3552 if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) { 3553 ErrInfo = "Illegal immediate value for operand."; 3554 return false; 3555 } 3556 break; 3557 } 3558 case MCOI::OPERAND_IMMEDIATE: 3559 case AMDGPU::OPERAND_KIMM32: 3560 // Check if this operand is an immediate. 3561 // FrameIndex operands will be replaced by immediates, so they are 3562 // allowed. 3563 if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) { 3564 ErrInfo = "Expected immediate, but got non-immediate"; 3565 return false; 3566 } 3567 LLVM_FALLTHROUGH; 3568 default: 3569 continue; 3570 } 3571 3572 if (!MI.getOperand(i).isReg()) 3573 continue; 3574 3575 if (RegClass != -1) { 3576 Register Reg = MI.getOperand(i).getReg(); 3577 if (Reg == AMDGPU::NoRegister || Reg.isVirtual()) 3578 continue; 3579 3580 const TargetRegisterClass *RC = RI.getRegClass(RegClass); 3581 if (!RC->contains(Reg)) { 3582 ErrInfo = "Operand has incorrect register class."; 3583 return false; 3584 } 3585 } 3586 } 3587 3588 // Verify SDWA 3589 if (isSDWA(MI)) { 3590 if (!ST.hasSDWA()) { 3591 ErrInfo = "SDWA is not supported on this target"; 3592 return false; 3593 } 3594 3595 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst); 3596 3597 const int OpIndicies[] = { DstIdx, Src0Idx, Src1Idx, Src2Idx }; 3598 3599 for (int OpIdx: OpIndicies) { 3600 if (OpIdx == -1) 3601 continue; 3602 const MachineOperand &MO = MI.getOperand(OpIdx); 3603 3604 if (!ST.hasSDWAScalar()) { 3605 // Only VGPRS on VI 3606 if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) { 3607 ErrInfo = "Only VGPRs allowed as operands in SDWA instructions on VI"; 3608 return false; 3609 } 3610 } else { 3611 // No immediates on GFX9 3612 if (!MO.isReg()) { 3613 ErrInfo = 3614 "Only reg allowed as operands in SDWA instructions on GFX9+"; 3615 return false; 3616 } 3617 } 3618 } 3619 3620 if (!ST.hasSDWAOmod()) { 3621 // No omod allowed on VI 3622 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 3623 if (OMod != nullptr && 3624 (!OMod->isImm() || OMod->getImm() != 0)) { 3625 ErrInfo = "OMod not allowed in SDWA instructions on VI"; 3626 return false; 3627 } 3628 } 3629 3630 uint16_t BasicOpcode = AMDGPU::getBasicFromSDWAOp(Opcode); 3631 if (isVOPC(BasicOpcode)) { 3632 if (!ST.hasSDWASdst() && DstIdx != -1) { 3633 // Only vcc allowed as dst on VI for VOPC 3634 const MachineOperand &Dst = MI.getOperand(DstIdx); 3635 if (!Dst.isReg() || Dst.getReg() != AMDGPU::VCC) { 3636 ErrInfo = "Only VCC allowed as dst in SDWA instructions on VI"; 3637 return false; 3638 } 3639 } else if (!ST.hasSDWAOutModsVOPC()) { 3640 // No clamp allowed on GFX9 for VOPC 3641 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 3642 if (Clamp && (!Clamp->isImm() || Clamp->getImm() != 0)) { 3643 ErrInfo = "Clamp not allowed in VOPC SDWA instructions on VI"; 3644 return false; 3645 } 3646 3647 // No omod allowed on GFX9 for VOPC 3648 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 3649 if (OMod && (!OMod->isImm() || OMod->getImm() != 0)) { 3650 ErrInfo = "OMod not allowed in VOPC SDWA instructions on VI"; 3651 return false; 3652 } 3653 } 3654 } 3655 3656 const MachineOperand *DstUnused = getNamedOperand(MI, AMDGPU::OpName::dst_unused); 3657 if (DstUnused && DstUnused->isImm() && 3658 DstUnused->getImm() == AMDGPU::SDWA::UNUSED_PRESERVE) { 3659 const MachineOperand &Dst = MI.getOperand(DstIdx); 3660 if (!Dst.isReg() || !Dst.isTied()) { 3661 ErrInfo = "Dst register should have tied register"; 3662 return false; 3663 } 3664 3665 const MachineOperand &TiedMO = 3666 MI.getOperand(MI.findTiedOperandIdx(DstIdx)); 3667 if (!TiedMO.isReg() || !TiedMO.isImplicit() || !TiedMO.isUse()) { 3668 ErrInfo = 3669 "Dst register should be tied to implicit use of preserved register"; 3670 return false; 3671 } else if (TiedMO.getReg().isPhysical() && 3672 Dst.getReg() != TiedMO.getReg()) { 3673 ErrInfo = "Dst register should use same physical register as preserved"; 3674 return false; 3675 } 3676 } 3677 } 3678 3679 // Verify MIMG 3680 if (isMIMG(MI.getOpcode()) && !MI.mayStore()) { 3681 // Ensure that the return type used is large enough for all the options 3682 // being used TFE/LWE require an extra result register. 3683 const MachineOperand *DMask = getNamedOperand(MI, AMDGPU::OpName::dmask); 3684 if (DMask) { 3685 uint64_t DMaskImm = DMask->getImm(); 3686 uint32_t RegCount = 3687 isGather4(MI.getOpcode()) ? 4 : countPopulation(DMaskImm); 3688 const MachineOperand *TFE = getNamedOperand(MI, AMDGPU::OpName::tfe); 3689 const MachineOperand *LWE = getNamedOperand(MI, AMDGPU::OpName::lwe); 3690 const MachineOperand *D16 = getNamedOperand(MI, AMDGPU::OpName::d16); 3691 3692 // Adjust for packed 16 bit values 3693 if (D16 && D16->getImm() && !ST.hasUnpackedD16VMem()) 3694 RegCount >>= 1; 3695 3696 // Adjust if using LWE or TFE 3697 if ((LWE && LWE->getImm()) || (TFE && TFE->getImm())) 3698 RegCount += 1; 3699 3700 const uint32_t DstIdx = 3701 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata); 3702 const MachineOperand &Dst = MI.getOperand(DstIdx); 3703 if (Dst.isReg()) { 3704 const TargetRegisterClass *DstRC = getOpRegClass(MI, DstIdx); 3705 uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32; 3706 if (RegCount > DstSize) { 3707 ErrInfo = "MIMG instruction returns too many registers for dst " 3708 "register class"; 3709 return false; 3710 } 3711 } 3712 } 3713 } 3714 3715 // Verify VOP*. Ignore multiple sgpr operands on writelane. 3716 if (Desc.getOpcode() != AMDGPU::V_WRITELANE_B32 3717 && (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI) || isSDWA(MI))) { 3718 // Only look at the true operands. Only a real operand can use the constant 3719 // bus, and we don't want to check pseudo-operands like the source modifier 3720 // flags. 3721 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx }; 3722 3723 unsigned ConstantBusCount = 0; 3724 unsigned LiteralCount = 0; 3725 3726 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) 3727 ++ConstantBusCount; 3728 3729 SmallVector<Register, 2> SGPRsUsed; 3730 Register SGPRUsed; 3731 3732 for (int OpIdx : OpIndices) { 3733 if (OpIdx == -1) 3734 break; 3735 const MachineOperand &MO = MI.getOperand(OpIdx); 3736 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 3737 if (MO.isReg()) { 3738 SGPRUsed = MO.getReg(); 3739 if (llvm::all_of(SGPRsUsed, [SGPRUsed](unsigned SGPR) { 3740 return SGPRUsed != SGPR; 3741 })) { 3742 ++ConstantBusCount; 3743 SGPRsUsed.push_back(SGPRUsed); 3744 } 3745 } else { 3746 ++ConstantBusCount; 3747 ++LiteralCount; 3748 } 3749 } 3750 } 3751 3752 SGPRUsed = findImplicitSGPRRead(MI); 3753 if (SGPRUsed != AMDGPU::NoRegister) { 3754 // Implicit uses may safely overlap true overands 3755 if (llvm::all_of(SGPRsUsed, [this, SGPRUsed](unsigned SGPR) { 3756 return !RI.regsOverlap(SGPRUsed, SGPR); 3757 })) { 3758 ++ConstantBusCount; 3759 SGPRsUsed.push_back(SGPRUsed); 3760 } 3761 } 3762 3763 // v_writelane_b32 is an exception from constant bus restriction: 3764 // vsrc0 can be sgpr, const or m0 and lane select sgpr, m0 or inline-const 3765 if (ConstantBusCount > ST.getConstantBusLimit(Opcode) && 3766 Opcode != AMDGPU::V_WRITELANE_B32) { 3767 ErrInfo = "VOP* instruction violates constant bus restriction"; 3768 return false; 3769 } 3770 3771 if (isVOP3(MI) && LiteralCount) { 3772 if (!ST.hasVOP3Literal()) { 3773 ErrInfo = "VOP3 instruction uses literal"; 3774 return false; 3775 } 3776 if (LiteralCount > 1) { 3777 ErrInfo = "VOP3 instruction uses more than one literal"; 3778 return false; 3779 } 3780 } 3781 } 3782 3783 // Special case for writelane - this can break the multiple constant bus rule, 3784 // but still can't use more than one SGPR register 3785 if (Desc.getOpcode() == AMDGPU::V_WRITELANE_B32) { 3786 unsigned SGPRCount = 0; 3787 Register SGPRUsed = AMDGPU::NoRegister; 3788 3789 for (int OpIdx : {Src0Idx, Src1Idx, Src2Idx}) { 3790 if (OpIdx == -1) 3791 break; 3792 3793 const MachineOperand &MO = MI.getOperand(OpIdx); 3794 3795 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 3796 if (MO.isReg() && MO.getReg() != AMDGPU::M0) { 3797 if (MO.getReg() != SGPRUsed) 3798 ++SGPRCount; 3799 SGPRUsed = MO.getReg(); 3800 } 3801 } 3802 if (SGPRCount > ST.getConstantBusLimit(Opcode)) { 3803 ErrInfo = "WRITELANE instruction violates constant bus restriction"; 3804 return false; 3805 } 3806 } 3807 } 3808 3809 // Verify misc. restrictions on specific instructions. 3810 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 || 3811 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) { 3812 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3813 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 3814 const MachineOperand &Src2 = MI.getOperand(Src2Idx); 3815 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) { 3816 if (!compareMachineOp(Src0, Src1) && 3817 !compareMachineOp(Src0, Src2)) { 3818 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2"; 3819 return false; 3820 } 3821 } 3822 } 3823 3824 if (isSOP2(MI) || isSOPC(MI)) { 3825 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3826 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 3827 unsigned Immediates = 0; 3828 3829 if (!Src0.isReg() && 3830 !isInlineConstant(Src0, Desc.OpInfo[Src0Idx].OperandType)) 3831 Immediates++; 3832 if (!Src1.isReg() && 3833 !isInlineConstant(Src1, Desc.OpInfo[Src1Idx].OperandType)) 3834 Immediates++; 3835 3836 if (Immediates > 1) { 3837 ErrInfo = "SOP2/SOPC instruction requires too many immediate constants"; 3838 return false; 3839 } 3840 } 3841 3842 if (isSOPK(MI)) { 3843 auto Op = getNamedOperand(MI, AMDGPU::OpName::simm16); 3844 if (Desc.isBranch()) { 3845 if (!Op->isMBB()) { 3846 ErrInfo = "invalid branch target for SOPK instruction"; 3847 return false; 3848 } 3849 } else { 3850 uint64_t Imm = Op->getImm(); 3851 if (sopkIsZext(MI)) { 3852 if (!isUInt<16>(Imm)) { 3853 ErrInfo = "invalid immediate for SOPK instruction"; 3854 return false; 3855 } 3856 } else { 3857 if (!isInt<16>(Imm)) { 3858 ErrInfo = "invalid immediate for SOPK instruction"; 3859 return false; 3860 } 3861 } 3862 } 3863 } 3864 3865 if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 || 3866 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 || 3867 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 3868 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) { 3869 const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 3870 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64; 3871 3872 const unsigned StaticNumOps = Desc.getNumOperands() + 3873 Desc.getNumImplicitUses(); 3874 const unsigned NumImplicitOps = IsDst ? 2 : 1; 3875 3876 // Allow additional implicit operands. This allows a fixup done by the post 3877 // RA scheduler where the main implicit operand is killed and implicit-defs 3878 // are added for sub-registers that remain live after this instruction. 3879 if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) { 3880 ErrInfo = "missing implicit register operands"; 3881 return false; 3882 } 3883 3884 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 3885 if (IsDst) { 3886 if (!Dst->isUse()) { 3887 ErrInfo = "v_movreld_b32 vdst should be a use operand"; 3888 return false; 3889 } 3890 3891 unsigned UseOpIdx; 3892 if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) || 3893 UseOpIdx != StaticNumOps + 1) { 3894 ErrInfo = "movrel implicit operands should be tied"; 3895 return false; 3896 } 3897 } 3898 3899 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3900 const MachineOperand &ImpUse 3901 = MI.getOperand(StaticNumOps + NumImplicitOps - 1); 3902 if (!ImpUse.isReg() || !ImpUse.isUse() || 3903 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) { 3904 ErrInfo = "src0 should be subreg of implicit vector use"; 3905 return false; 3906 } 3907 } 3908 3909 // Make sure we aren't losing exec uses in the td files. This mostly requires 3910 // being careful when using let Uses to try to add other use registers. 3911 if (shouldReadExec(MI)) { 3912 if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) { 3913 ErrInfo = "VALU instruction does not implicitly read exec mask"; 3914 return false; 3915 } 3916 } 3917 3918 if (isSMRD(MI)) { 3919 if (MI.mayStore()) { 3920 // The register offset form of scalar stores may only use m0 as the 3921 // soffset register. 3922 const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff); 3923 if (Soff && Soff->getReg() != AMDGPU::M0) { 3924 ErrInfo = "scalar stores must use m0 as offset register"; 3925 return false; 3926 } 3927 } 3928 } 3929 3930 if (isFLAT(MI) && !ST.hasFlatInstOffsets()) { 3931 const MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 3932 if (Offset->getImm() != 0) { 3933 ErrInfo = "subtarget does not support offsets in flat instructions"; 3934 return false; 3935 } 3936 } 3937 3938 if (isMIMG(MI)) { 3939 const MachineOperand *DimOp = getNamedOperand(MI, AMDGPU::OpName::dim); 3940 if (DimOp) { 3941 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opcode, 3942 AMDGPU::OpName::vaddr0); 3943 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc); 3944 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opcode); 3945 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = 3946 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode); 3947 const AMDGPU::MIMGDimInfo *Dim = 3948 AMDGPU::getMIMGDimInfoByEncoding(DimOp->getImm()); 3949 3950 if (!Dim) { 3951 ErrInfo = "dim is out of range"; 3952 return false; 3953 } 3954 3955 bool IsA16 = false; 3956 if (ST.hasR128A16()) { 3957 const MachineOperand *R128A16 = getNamedOperand(MI, AMDGPU::OpName::r128); 3958 IsA16 = R128A16->getImm() != 0; 3959 } else if (ST.hasGFX10A16()) { 3960 const MachineOperand *A16 = getNamedOperand(MI, AMDGPU::OpName::a16); 3961 IsA16 = A16->getImm() != 0; 3962 } 3963 3964 bool PackDerivatives = IsA16 || BaseOpcode->G16; 3965 bool IsNSA = SRsrcIdx - VAddr0Idx > 1; 3966 3967 unsigned AddrWords = BaseOpcode->NumExtraArgs; 3968 unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) + 3969 (BaseOpcode->LodOrClampOrMip ? 1 : 0); 3970 if (IsA16) 3971 AddrWords += (AddrComponents + 1) / 2; 3972 else 3973 AddrWords += AddrComponents; 3974 3975 if (BaseOpcode->Gradients) { 3976 if (PackDerivatives) 3977 // There are two gradients per coordinate, we pack them separately. 3978 // For the 3d case, we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv) 3979 AddrWords += (Dim->NumGradients / 2 + 1) / 2 * 2; 3980 else 3981 AddrWords += Dim->NumGradients; 3982 } 3983 3984 unsigned VAddrWords; 3985 if (IsNSA) { 3986 VAddrWords = SRsrcIdx - VAddr0Idx; 3987 } else { 3988 const TargetRegisterClass *RC = getOpRegClass(MI, VAddr0Idx); 3989 VAddrWords = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC) / 32; 3990 if (AddrWords > 8) 3991 AddrWords = 16; 3992 else if (AddrWords > 4) 3993 AddrWords = 8; 3994 else if (AddrWords == 4) 3995 AddrWords = 4; 3996 else if (AddrWords == 3) 3997 AddrWords = 3; 3998 } 3999 4000 if (VAddrWords != AddrWords) { 4001 LLVM_DEBUG(dbgs() << "bad vaddr size, expected " << AddrWords 4002 << " but got " << VAddrWords << "\n"); 4003 ErrInfo = "bad vaddr size"; 4004 return false; 4005 } 4006 } 4007 } 4008 4009 const MachineOperand *DppCt = getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl); 4010 if (DppCt) { 4011 using namespace AMDGPU::DPP; 4012 4013 unsigned DC = DppCt->getImm(); 4014 if (DC == DppCtrl::DPP_UNUSED1 || DC == DppCtrl::DPP_UNUSED2 || 4015 DC == DppCtrl::DPP_UNUSED3 || DC > DppCtrl::DPP_LAST || 4016 (DC >= DppCtrl::DPP_UNUSED4_FIRST && DC <= DppCtrl::DPP_UNUSED4_LAST) || 4017 (DC >= DppCtrl::DPP_UNUSED5_FIRST && DC <= DppCtrl::DPP_UNUSED5_LAST) || 4018 (DC >= DppCtrl::DPP_UNUSED6_FIRST && DC <= DppCtrl::DPP_UNUSED6_LAST) || 4019 (DC >= DppCtrl::DPP_UNUSED7_FIRST && DC <= DppCtrl::DPP_UNUSED7_LAST) || 4020 (DC >= DppCtrl::DPP_UNUSED8_FIRST && DC <= DppCtrl::DPP_UNUSED8_LAST)) { 4021 ErrInfo = "Invalid dpp_ctrl value"; 4022 return false; 4023 } 4024 if (DC >= DppCtrl::WAVE_SHL1 && DC <= DppCtrl::WAVE_ROR1 && 4025 ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 4026 ErrInfo = "Invalid dpp_ctrl value: " 4027 "wavefront shifts are not supported on GFX10+"; 4028 return false; 4029 } 4030 if (DC >= DppCtrl::BCAST15 && DC <= DppCtrl::BCAST31 && 4031 ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 4032 ErrInfo = "Invalid dpp_ctrl value: " 4033 "broadcasts are not supported on GFX10+"; 4034 return false; 4035 } 4036 if (DC >= DppCtrl::ROW_SHARE_FIRST && DC <= DppCtrl::ROW_XMASK_LAST && 4037 ST.getGeneration() < AMDGPUSubtarget::GFX10) { 4038 ErrInfo = "Invalid dpp_ctrl value: " 4039 "row_share and row_xmask are not supported before GFX10"; 4040 return false; 4041 } 4042 } 4043 4044 return true; 4045 } 4046 4047 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) const { 4048 switch (MI.getOpcode()) { 4049 default: return AMDGPU::INSTRUCTION_LIST_END; 4050 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE; 4051 case AMDGPU::COPY: return AMDGPU::COPY; 4052 case AMDGPU::PHI: return AMDGPU::PHI; 4053 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG; 4054 case AMDGPU::WQM: return AMDGPU::WQM; 4055 case AMDGPU::SOFT_WQM: return AMDGPU::SOFT_WQM; 4056 case AMDGPU::WWM: return AMDGPU::WWM; 4057 case AMDGPU::S_MOV_B32: { 4058 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 4059 return MI.getOperand(1).isReg() || 4060 RI.isAGPR(MRI, MI.getOperand(0).getReg()) ? 4061 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32; 4062 } 4063 case AMDGPU::S_ADD_I32: 4064 return ST.hasAddNoCarry() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_CO_U32_e32; 4065 case AMDGPU::S_ADDC_U32: 4066 return AMDGPU::V_ADDC_U32_e32; 4067 case AMDGPU::S_SUB_I32: 4068 return ST.hasAddNoCarry() ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_CO_U32_e32; 4069 // FIXME: These are not consistently handled, and selected when the carry is 4070 // used. 4071 case AMDGPU::S_ADD_U32: 4072 return AMDGPU::V_ADD_CO_U32_e32; 4073 case AMDGPU::S_SUB_U32: 4074 return AMDGPU::V_SUB_CO_U32_e32; 4075 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32; 4076 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_U32; 4077 case AMDGPU::S_MUL_HI_U32: return AMDGPU::V_MUL_HI_U32; 4078 case AMDGPU::S_MUL_HI_I32: return AMDGPU::V_MUL_HI_I32; 4079 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64; 4080 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64; 4081 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64; 4082 case AMDGPU::S_XNOR_B32: 4083 return ST.hasDLInsts() ? AMDGPU::V_XNOR_B32_e64 : AMDGPU::INSTRUCTION_LIST_END; 4084 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64; 4085 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64; 4086 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64; 4087 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64; 4088 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32; 4089 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64; 4090 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32; 4091 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64; 4092 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32; 4093 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64; 4094 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32; 4095 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32; 4096 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32; 4097 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32; 4098 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64; 4099 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32; 4100 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32; 4101 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32; 4102 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32; 4103 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32; 4104 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32; 4105 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32; 4106 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32; 4107 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32; 4108 case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e32; 4109 case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e32; 4110 case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e32; 4111 case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e32; 4112 case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e32; 4113 case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e32; 4114 case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e32; 4115 case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e32; 4116 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64; 4117 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32; 4118 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32; 4119 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64; 4120 case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ; 4121 case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ; 4122 } 4123 llvm_unreachable( 4124 "Unexpected scalar opcode without corresponding vector one!"); 4125 } 4126 4127 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI, 4128 unsigned OpNo) const { 4129 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 4130 const MCInstrDesc &Desc = get(MI.getOpcode()); 4131 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() || 4132 Desc.OpInfo[OpNo].RegClass == -1) { 4133 Register Reg = MI.getOperand(OpNo).getReg(); 4134 4135 if (Reg.isVirtual()) 4136 return MRI.getRegClass(Reg); 4137 return RI.getPhysRegClass(Reg); 4138 } 4139 4140 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 4141 return RI.getRegClass(RCID); 4142 } 4143 4144 void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const { 4145 MachineBasicBlock::iterator I = MI; 4146 MachineBasicBlock *MBB = MI.getParent(); 4147 MachineOperand &MO = MI.getOperand(OpIdx); 4148 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 4149 unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass; 4150 const TargetRegisterClass *RC = RI.getRegClass(RCID); 4151 unsigned Size = RI.getRegSizeInBits(*RC); 4152 unsigned Opcode = (Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO : AMDGPU::V_MOV_B32_e32; 4153 if (MO.isReg()) 4154 Opcode = AMDGPU::COPY; 4155 else if (RI.isSGPRClass(RC)) 4156 Opcode = (Size == 64) ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; 4157 4158 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); 4159 if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC)) 4160 VRC = &AMDGPU::VReg_64RegClass; 4161 else 4162 VRC = &AMDGPU::VGPR_32RegClass; 4163 4164 Register Reg = MRI.createVirtualRegister(VRC); 4165 DebugLoc DL = MBB->findDebugLoc(I); 4166 BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO); 4167 MO.ChangeToRegister(Reg, false); 4168 } 4169 4170 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI, 4171 MachineRegisterInfo &MRI, 4172 MachineOperand &SuperReg, 4173 const TargetRegisterClass *SuperRC, 4174 unsigned SubIdx, 4175 const TargetRegisterClass *SubRC) 4176 const { 4177 MachineBasicBlock *MBB = MI->getParent(); 4178 DebugLoc DL = MI->getDebugLoc(); 4179 Register SubReg = MRI.createVirtualRegister(SubRC); 4180 4181 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) { 4182 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 4183 .addReg(SuperReg.getReg(), 0, SubIdx); 4184 return SubReg; 4185 } 4186 4187 // Just in case the super register is itself a sub-register, copy it to a new 4188 // value so we don't need to worry about merging its subreg index with the 4189 // SubIdx passed to this function. The register coalescer should be able to 4190 // eliminate this extra copy. 4191 Register NewSuperReg = MRI.createVirtualRegister(SuperRC); 4192 4193 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg) 4194 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg()); 4195 4196 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 4197 .addReg(NewSuperReg, 0, SubIdx); 4198 4199 return SubReg; 4200 } 4201 4202 MachineOperand SIInstrInfo::buildExtractSubRegOrImm( 4203 MachineBasicBlock::iterator MII, 4204 MachineRegisterInfo &MRI, 4205 MachineOperand &Op, 4206 const TargetRegisterClass *SuperRC, 4207 unsigned SubIdx, 4208 const TargetRegisterClass *SubRC) const { 4209 if (Op.isImm()) { 4210 if (SubIdx == AMDGPU::sub0) 4211 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm())); 4212 if (SubIdx == AMDGPU::sub1) 4213 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32)); 4214 4215 llvm_unreachable("Unhandled register index for immediate"); 4216 } 4217 4218 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC, 4219 SubIdx, SubRC); 4220 return MachineOperand::CreateReg(SubReg, false); 4221 } 4222 4223 // Change the order of operands from (0, 1, 2) to (0, 2, 1) 4224 void SIInstrInfo::swapOperands(MachineInstr &Inst) const { 4225 assert(Inst.getNumExplicitOperands() == 3); 4226 MachineOperand Op1 = Inst.getOperand(1); 4227 Inst.RemoveOperand(1); 4228 Inst.addOperand(Op1); 4229 } 4230 4231 bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI, 4232 const MCOperandInfo &OpInfo, 4233 const MachineOperand &MO) const { 4234 if (!MO.isReg()) 4235 return false; 4236 4237 Register Reg = MO.getReg(); 4238 const TargetRegisterClass *RC = 4239 Reg.isVirtual() ? MRI.getRegClass(Reg) : RI.getPhysRegClass(Reg); 4240 4241 const TargetRegisterClass *DRC = RI.getRegClass(OpInfo.RegClass); 4242 if (MO.getSubReg()) { 4243 const MachineFunction *MF = MO.getParent()->getParent()->getParent(); 4244 const TargetRegisterClass *SuperRC = RI.getLargestLegalSuperClass(RC, *MF); 4245 if (!SuperRC) 4246 return false; 4247 4248 DRC = RI.getMatchingSuperRegClass(SuperRC, DRC, MO.getSubReg()); 4249 if (!DRC) 4250 return false; 4251 } 4252 return RC->hasSuperClassEq(DRC); 4253 } 4254 4255 bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI, 4256 const MCOperandInfo &OpInfo, 4257 const MachineOperand &MO) const { 4258 if (MO.isReg()) 4259 return isLegalRegOperand(MRI, OpInfo, MO); 4260 4261 // Handle non-register types that are treated like immediates. 4262 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()); 4263 return true; 4264 } 4265 4266 bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx, 4267 const MachineOperand *MO) const { 4268 const MachineFunction &MF = *MI.getParent()->getParent(); 4269 const MachineRegisterInfo &MRI = MF.getRegInfo(); 4270 const MCInstrDesc &InstDesc = MI.getDesc(); 4271 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; 4272 const TargetRegisterClass *DefinedRC = 4273 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; 4274 if (!MO) 4275 MO = &MI.getOperand(OpIdx); 4276 4277 int ConstantBusLimit = ST.getConstantBusLimit(MI.getOpcode()); 4278 int VOP3LiteralLimit = ST.hasVOP3Literal() ? 1 : 0; 4279 if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) { 4280 if (isVOP3(MI) && isLiteralConstantLike(*MO, OpInfo) && !VOP3LiteralLimit--) 4281 return false; 4282 4283 SmallDenseSet<RegSubRegPair> SGPRsUsed; 4284 if (MO->isReg()) 4285 SGPRsUsed.insert(RegSubRegPair(MO->getReg(), MO->getSubReg())); 4286 4287 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 4288 if (i == OpIdx) 4289 continue; 4290 const MachineOperand &Op = MI.getOperand(i); 4291 if (Op.isReg()) { 4292 RegSubRegPair SGPR(Op.getReg(), Op.getSubReg()); 4293 if (!SGPRsUsed.count(SGPR) && 4294 usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) { 4295 if (--ConstantBusLimit <= 0) 4296 return false; 4297 SGPRsUsed.insert(SGPR); 4298 } 4299 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) { 4300 if (--ConstantBusLimit <= 0) 4301 return false; 4302 } else if (isVOP3(MI) && AMDGPU::isSISrcOperand(InstDesc, i) && 4303 isLiteralConstantLike(Op, InstDesc.OpInfo[i])) { 4304 if (!VOP3LiteralLimit--) 4305 return false; 4306 if (--ConstantBusLimit <= 0) 4307 return false; 4308 } 4309 } 4310 } 4311 4312 if (MO->isReg()) { 4313 assert(DefinedRC); 4314 return isLegalRegOperand(MRI, OpInfo, *MO); 4315 } 4316 4317 // Handle non-register types that are treated like immediates. 4318 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI() || MO->isGlobal()); 4319 4320 if (!DefinedRC) { 4321 // This operand expects an immediate. 4322 return true; 4323 } 4324 4325 return isImmOperandLegal(MI, OpIdx, *MO); 4326 } 4327 4328 void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI, 4329 MachineInstr &MI) const { 4330 unsigned Opc = MI.getOpcode(); 4331 const MCInstrDesc &InstrDesc = get(Opc); 4332 4333 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 4334 MachineOperand &Src0 = MI.getOperand(Src0Idx); 4335 4336 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 4337 MachineOperand &Src1 = MI.getOperand(Src1Idx); 4338 4339 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32 4340 // we need to only have one constant bus use before GFX10. 4341 bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister; 4342 if (HasImplicitSGPR && ST.getConstantBusLimit(Opc) <= 1 && 4343 Src0.isReg() && (RI.isSGPRReg(MRI, Src0.getReg()) || 4344 isLiteralConstantLike(Src0, InstrDesc.OpInfo[Src0Idx]))) 4345 legalizeOpWithMove(MI, Src0Idx); 4346 4347 // Special case: V_WRITELANE_B32 accepts only immediate or SGPR operands for 4348 // both the value to write (src0) and lane select (src1). Fix up non-SGPR 4349 // src0/src1 with V_READFIRSTLANE. 4350 if (Opc == AMDGPU::V_WRITELANE_B32) { 4351 const DebugLoc &DL = MI.getDebugLoc(); 4352 if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) { 4353 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4354 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4355 .add(Src0); 4356 Src0.ChangeToRegister(Reg, false); 4357 } 4358 if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) { 4359 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4360 const DebugLoc &DL = MI.getDebugLoc(); 4361 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4362 .add(Src1); 4363 Src1.ChangeToRegister(Reg, false); 4364 } 4365 return; 4366 } 4367 4368 // No VOP2 instructions support AGPRs. 4369 if (Src0.isReg() && RI.isAGPR(MRI, Src0.getReg())) 4370 legalizeOpWithMove(MI, Src0Idx); 4371 4372 if (Src1.isReg() && RI.isAGPR(MRI, Src1.getReg())) 4373 legalizeOpWithMove(MI, Src1Idx); 4374 4375 // VOP2 src0 instructions support all operand types, so we don't need to check 4376 // their legality. If src1 is already legal, we don't need to do anything. 4377 if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1)) 4378 return; 4379 4380 // Special case: V_READLANE_B32 accepts only immediate or SGPR operands for 4381 // lane select. Fix up using V_READFIRSTLANE, since we assume that the lane 4382 // select is uniform. 4383 if (Opc == AMDGPU::V_READLANE_B32 && Src1.isReg() && 4384 RI.isVGPR(MRI, Src1.getReg())) { 4385 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4386 const DebugLoc &DL = MI.getDebugLoc(); 4387 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4388 .add(Src1); 4389 Src1.ChangeToRegister(Reg, false); 4390 return; 4391 } 4392 4393 // We do not use commuteInstruction here because it is too aggressive and will 4394 // commute if it is possible. We only want to commute here if it improves 4395 // legality. This can be called a fairly large number of times so don't waste 4396 // compile time pointlessly swapping and checking legality again. 4397 if (HasImplicitSGPR || !MI.isCommutable()) { 4398 legalizeOpWithMove(MI, Src1Idx); 4399 return; 4400 } 4401 4402 // If src0 can be used as src1, commuting will make the operands legal. 4403 // Otherwise we have to give up and insert a move. 4404 // 4405 // TODO: Other immediate-like operand kinds could be commuted if there was a 4406 // MachineOperand::ChangeTo* for them. 4407 if ((!Src1.isImm() && !Src1.isReg()) || 4408 !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) { 4409 legalizeOpWithMove(MI, Src1Idx); 4410 return; 4411 } 4412 4413 int CommutedOpc = commuteOpcode(MI); 4414 if (CommutedOpc == -1) { 4415 legalizeOpWithMove(MI, Src1Idx); 4416 return; 4417 } 4418 4419 MI.setDesc(get(CommutedOpc)); 4420 4421 Register Src0Reg = Src0.getReg(); 4422 unsigned Src0SubReg = Src0.getSubReg(); 4423 bool Src0Kill = Src0.isKill(); 4424 4425 if (Src1.isImm()) 4426 Src0.ChangeToImmediate(Src1.getImm()); 4427 else if (Src1.isReg()) { 4428 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill()); 4429 Src0.setSubReg(Src1.getSubReg()); 4430 } else 4431 llvm_unreachable("Should only have register or immediate operands"); 4432 4433 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill); 4434 Src1.setSubReg(Src0SubReg); 4435 fixImplicitOperands(MI); 4436 } 4437 4438 // Legalize VOP3 operands. All operand types are supported for any operand 4439 // but only one literal constant and only starting from GFX10. 4440 void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI, 4441 MachineInstr &MI) const { 4442 unsigned Opc = MI.getOpcode(); 4443 4444 int VOP3Idx[3] = { 4445 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), 4446 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1), 4447 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2) 4448 }; 4449 4450 if (Opc == AMDGPU::V_PERMLANE16_B32 || 4451 Opc == AMDGPU::V_PERMLANEX16_B32) { 4452 // src1 and src2 must be scalar 4453 MachineOperand &Src1 = MI.getOperand(VOP3Idx[1]); 4454 MachineOperand &Src2 = MI.getOperand(VOP3Idx[2]); 4455 const DebugLoc &DL = MI.getDebugLoc(); 4456 if (Src1.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) { 4457 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4458 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4459 .add(Src1); 4460 Src1.ChangeToRegister(Reg, false); 4461 } 4462 if (Src2.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src2.getReg()))) { 4463 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4464 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4465 .add(Src2); 4466 Src2.ChangeToRegister(Reg, false); 4467 } 4468 } 4469 4470 // Find the one SGPR operand we are allowed to use. 4471 int ConstantBusLimit = ST.getConstantBusLimit(Opc); 4472 int LiteralLimit = ST.hasVOP3Literal() ? 1 : 0; 4473 SmallDenseSet<unsigned> SGPRsUsed; 4474 Register SGPRReg = findUsedSGPR(MI, VOP3Idx); 4475 if (SGPRReg != AMDGPU::NoRegister) { 4476 SGPRsUsed.insert(SGPRReg); 4477 --ConstantBusLimit; 4478 } 4479 4480 for (unsigned i = 0; i < 3; ++i) { 4481 int Idx = VOP3Idx[i]; 4482 if (Idx == -1) 4483 break; 4484 MachineOperand &MO = MI.getOperand(Idx); 4485 4486 if (!MO.isReg()) { 4487 if (!isLiteralConstantLike(MO, get(Opc).OpInfo[Idx])) 4488 continue; 4489 4490 if (LiteralLimit > 0 && ConstantBusLimit > 0) { 4491 --LiteralLimit; 4492 --ConstantBusLimit; 4493 continue; 4494 } 4495 4496 --LiteralLimit; 4497 --ConstantBusLimit; 4498 legalizeOpWithMove(MI, Idx); 4499 continue; 4500 } 4501 4502 if (RI.hasAGPRs(MRI.getRegClass(MO.getReg())) && 4503 !isOperandLegal(MI, Idx, &MO)) { 4504 legalizeOpWithMove(MI, Idx); 4505 continue; 4506 } 4507 4508 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg()))) 4509 continue; // VGPRs are legal 4510 4511 // We can use one SGPR in each VOP3 instruction prior to GFX10 4512 // and two starting from GFX10. 4513 if (SGPRsUsed.count(MO.getReg())) 4514 continue; 4515 if (ConstantBusLimit > 0) { 4516 SGPRsUsed.insert(MO.getReg()); 4517 --ConstantBusLimit; 4518 continue; 4519 } 4520 4521 // If we make it this far, then the operand is not legal and we must 4522 // legalize it. 4523 legalizeOpWithMove(MI, Idx); 4524 } 4525 } 4526 4527 Register SIInstrInfo::readlaneVGPRToSGPR(Register SrcReg, MachineInstr &UseMI, 4528 MachineRegisterInfo &MRI) const { 4529 const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg); 4530 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC); 4531 Register DstReg = MRI.createVirtualRegister(SRC); 4532 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32; 4533 4534 if (RI.hasAGPRs(VRC)) { 4535 VRC = RI.getEquivalentVGPRClass(VRC); 4536 Register NewSrcReg = MRI.createVirtualRegister(VRC); 4537 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4538 get(TargetOpcode::COPY), NewSrcReg) 4539 .addReg(SrcReg); 4540 SrcReg = NewSrcReg; 4541 } 4542 4543 if (SubRegs == 1) { 4544 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4545 get(AMDGPU::V_READFIRSTLANE_B32), DstReg) 4546 .addReg(SrcReg); 4547 return DstReg; 4548 } 4549 4550 SmallVector<unsigned, 8> SRegs; 4551 for (unsigned i = 0; i < SubRegs; ++i) { 4552 Register SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4553 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4554 get(AMDGPU::V_READFIRSTLANE_B32), SGPR) 4555 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i)); 4556 SRegs.push_back(SGPR); 4557 } 4558 4559 MachineInstrBuilder MIB = 4560 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4561 get(AMDGPU::REG_SEQUENCE), DstReg); 4562 for (unsigned i = 0; i < SubRegs; ++i) { 4563 MIB.addReg(SRegs[i]); 4564 MIB.addImm(RI.getSubRegFromChannel(i)); 4565 } 4566 return DstReg; 4567 } 4568 4569 void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI, 4570 MachineInstr &MI) const { 4571 4572 // If the pointer is store in VGPRs, then we need to move them to 4573 // SGPRs using v_readfirstlane. This is safe because we only select 4574 // loads with uniform pointers to SMRD instruction so we know the 4575 // pointer value is uniform. 4576 MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase); 4577 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) { 4578 Register SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI); 4579 SBase->setReg(SGPR); 4580 } 4581 MachineOperand *SOff = getNamedOperand(MI, AMDGPU::OpName::soff); 4582 if (SOff && !RI.isSGPRClass(MRI.getRegClass(SOff->getReg()))) { 4583 Register SGPR = readlaneVGPRToSGPR(SOff->getReg(), MI, MRI); 4584 SOff->setReg(SGPR); 4585 } 4586 } 4587 4588 // FIXME: Remove this when SelectionDAG is obsoleted. 4589 void SIInstrInfo::legalizeOperandsFLAT(MachineRegisterInfo &MRI, 4590 MachineInstr &MI) const { 4591 if (!isSegmentSpecificFLAT(MI)) 4592 return; 4593 4594 // Fixup SGPR operands in VGPRs. We only select these when the DAG divergence 4595 // thinks they are uniform, so a readfirstlane should be valid. 4596 MachineOperand *SAddr = getNamedOperand(MI, AMDGPU::OpName::saddr); 4597 if (!SAddr || RI.isSGPRClass(MRI.getRegClass(SAddr->getReg()))) 4598 return; 4599 4600 Register ToSGPR = readlaneVGPRToSGPR(SAddr->getReg(), MI, MRI); 4601 SAddr->setReg(ToSGPR); 4602 } 4603 4604 void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB, 4605 MachineBasicBlock::iterator I, 4606 const TargetRegisterClass *DstRC, 4607 MachineOperand &Op, 4608 MachineRegisterInfo &MRI, 4609 const DebugLoc &DL) const { 4610 Register OpReg = Op.getReg(); 4611 unsigned OpSubReg = Op.getSubReg(); 4612 4613 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg( 4614 RI.getRegClassForReg(MRI, OpReg), OpSubReg); 4615 4616 // Check if operand is already the correct register class. 4617 if (DstRC == OpRC) 4618 return; 4619 4620 Register DstReg = MRI.createVirtualRegister(DstRC); 4621 MachineInstr *Copy = 4622 BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op); 4623 4624 Op.setReg(DstReg); 4625 Op.setSubReg(0); 4626 4627 MachineInstr *Def = MRI.getVRegDef(OpReg); 4628 if (!Def) 4629 return; 4630 4631 // Try to eliminate the copy if it is copying an immediate value. 4632 if (Def->isMoveImmediate() && DstRC != &AMDGPU::VReg_1RegClass) 4633 FoldImmediate(*Copy, *Def, OpReg, &MRI); 4634 4635 bool ImpDef = Def->isImplicitDef(); 4636 while (!ImpDef && Def && Def->isCopy()) { 4637 if (Def->getOperand(1).getReg().isPhysical()) 4638 break; 4639 Def = MRI.getUniqueVRegDef(Def->getOperand(1).getReg()); 4640 ImpDef = Def && Def->isImplicitDef(); 4641 } 4642 if (!RI.isSGPRClass(DstRC) && !Copy->readsRegister(AMDGPU::EXEC, &RI) && 4643 !ImpDef) 4644 Copy->addOperand(MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 4645 } 4646 4647 // Emit the actual waterfall loop, executing the wrapped instruction for each 4648 // unique value of \p Rsrc across all lanes. In the best case we execute 1 4649 // iteration, in the worst case we execute 64 (once per lane). 4650 static void 4651 emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI, 4652 MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB, 4653 const DebugLoc &DL, MachineOperand &Rsrc) { 4654 MachineFunction &MF = *OrigBB.getParent(); 4655 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 4656 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 4657 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 4658 unsigned SaveExecOpc = 4659 ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 : AMDGPU::S_AND_SAVEEXEC_B64; 4660 unsigned XorTermOpc = 4661 ST.isWave32() ? AMDGPU::S_XOR_B32_term : AMDGPU::S_XOR_B64_term; 4662 unsigned AndOpc = 4663 ST.isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64; 4664 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 4665 4666 MachineBasicBlock::iterator I = LoopBB.begin(); 4667 4668 SmallVector<Register, 8> ReadlanePieces; 4669 Register CondReg = AMDGPU::NoRegister; 4670 4671 Register VRsrc = Rsrc.getReg(); 4672 unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef()); 4673 4674 unsigned RegSize = TRI->getRegSizeInBits(Rsrc.getReg(), MRI); 4675 unsigned NumSubRegs = RegSize / 32; 4676 assert(NumSubRegs % 2 == 0 && NumSubRegs <= 32 && "Unhandled register size"); 4677 4678 for (unsigned Idx = 0; Idx < NumSubRegs; Idx += 2) { 4679 4680 Register CurRegLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4681 Register CurRegHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4682 4683 // Read the next variant <- also loop target. 4684 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegLo) 4685 .addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx)); 4686 4687 // Read the next variant <- also loop target. 4688 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegHi) 4689 .addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx + 1)); 4690 4691 ReadlanePieces.push_back(CurRegLo); 4692 ReadlanePieces.push_back(CurRegHi); 4693 4694 // Comparison is to be done as 64-bit. 4695 Register CurReg = MRI.createVirtualRegister(&AMDGPU::SGPR_64RegClass); 4696 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), CurReg) 4697 .addReg(CurRegLo) 4698 .addImm(AMDGPU::sub0) 4699 .addReg(CurRegHi) 4700 .addImm(AMDGPU::sub1); 4701 4702 Register NewCondReg = MRI.createVirtualRegister(BoolXExecRC); 4703 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), NewCondReg) 4704 .addReg(CurReg) 4705 .addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx, 2)); 4706 4707 // Combine the comparision results with AND. 4708 if (CondReg == AMDGPU::NoRegister) // First. 4709 CondReg = NewCondReg; 4710 else { // If not the first, we create an AND. 4711 Register AndReg = MRI.createVirtualRegister(BoolXExecRC); 4712 BuildMI(LoopBB, I, DL, TII.get(AndOpc), AndReg) 4713 .addReg(CondReg) 4714 .addReg(NewCondReg); 4715 CondReg = AndReg; 4716 } 4717 } // End for loop. 4718 4719 auto SRsrcRC = TRI->getEquivalentSGPRClass(MRI.getRegClass(VRsrc)); 4720 Register SRsrc = MRI.createVirtualRegister(SRsrcRC); 4721 4722 // Build scalar Rsrc. 4723 auto Merge = BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), SRsrc); 4724 unsigned Channel = 0; 4725 for (Register Piece : ReadlanePieces) { 4726 Merge.addReg(Piece) 4727 .addImm(TRI->getSubRegFromChannel(Channel++)); 4728 } 4729 4730 // Update Rsrc operand to use the SGPR Rsrc. 4731 Rsrc.setReg(SRsrc); 4732 Rsrc.setIsKill(true); 4733 4734 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); 4735 MRI.setSimpleHint(SaveExec, CondReg); 4736 4737 // Update EXEC to matching lanes, saving original to SaveExec. 4738 BuildMI(LoopBB, I, DL, TII.get(SaveExecOpc), SaveExec) 4739 .addReg(CondReg, RegState::Kill); 4740 4741 // The original instruction is here; we insert the terminators after it. 4742 I = LoopBB.end(); 4743 4744 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 4745 BuildMI(LoopBB, I, DL, TII.get(XorTermOpc), Exec) 4746 .addReg(Exec) 4747 .addReg(SaveExec); 4748 4749 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_CBRANCH_EXECNZ)).addMBB(&LoopBB); 4750 } 4751 4752 // Build a waterfall loop around \p MI, replacing the VGPR \p Rsrc register 4753 // with SGPRs by iterating over all unique values across all lanes. 4754 static void loadSRsrcFromVGPR(const SIInstrInfo &TII, MachineInstr &MI, 4755 MachineOperand &Rsrc, MachineDominatorTree *MDT) { 4756 MachineBasicBlock &MBB = *MI.getParent(); 4757 MachineFunction &MF = *MBB.getParent(); 4758 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 4759 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 4760 MachineRegisterInfo &MRI = MF.getRegInfo(); 4761 MachineBasicBlock::iterator I(&MI); 4762 const DebugLoc &DL = MI.getDebugLoc(); 4763 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 4764 unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 4765 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 4766 4767 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); 4768 4769 // Save the EXEC mask 4770 BuildMI(MBB, I, DL, TII.get(MovExecOpc), SaveExec).addReg(Exec); 4771 4772 // Killed uses in the instruction we are waterfalling around will be 4773 // incorrect due to the added control-flow. 4774 for (auto &MO : MI.uses()) { 4775 if (MO.isReg() && MO.isUse()) { 4776 MRI.clearKillFlags(MO.getReg()); 4777 } 4778 } 4779 4780 // To insert the loop we need to split the block. Move everything after this 4781 // point to a new block, and insert a new empty block between the two. 4782 MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock(); 4783 MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock(); 4784 MachineFunction::iterator MBBI(MBB); 4785 ++MBBI; 4786 4787 MF.insert(MBBI, LoopBB); 4788 MF.insert(MBBI, RemainderBB); 4789 4790 LoopBB->addSuccessor(LoopBB); 4791 LoopBB->addSuccessor(RemainderBB); 4792 4793 // Move MI to the LoopBB, and the remainder of the block to RemainderBB. 4794 MachineBasicBlock::iterator J = I++; 4795 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 4796 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); 4797 LoopBB->splice(LoopBB->begin(), &MBB, J); 4798 4799 MBB.addSuccessor(LoopBB); 4800 4801 // Update dominators. We know that MBB immediately dominates LoopBB, that 4802 // LoopBB immediately dominates RemainderBB, and that RemainderBB immediately 4803 // dominates all of the successors transferred to it from MBB that MBB used 4804 // to properly dominate. 4805 if (MDT) { 4806 MDT->addNewBlock(LoopBB, &MBB); 4807 MDT->addNewBlock(RemainderBB, LoopBB); 4808 for (auto &Succ : RemainderBB->successors()) { 4809 if (MDT->properlyDominates(&MBB, Succ)) { 4810 MDT->changeImmediateDominator(Succ, RemainderBB); 4811 } 4812 } 4813 } 4814 4815 emitLoadSRsrcFromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, Rsrc); 4816 4817 // Restore the EXEC mask 4818 MachineBasicBlock::iterator First = RemainderBB->begin(); 4819 BuildMI(*RemainderBB, First, DL, TII.get(MovExecOpc), Exec).addReg(SaveExec); 4820 } 4821 4822 // Extract pointer from Rsrc and return a zero-value Rsrc replacement. 4823 static std::tuple<unsigned, unsigned> 4824 extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc) { 4825 MachineBasicBlock &MBB = *MI.getParent(); 4826 MachineFunction &MF = *MBB.getParent(); 4827 MachineRegisterInfo &MRI = MF.getRegInfo(); 4828 4829 // Extract the ptr from the resource descriptor. 4830 unsigned RsrcPtr = 4831 TII.buildExtractSubReg(MI, MRI, Rsrc, &AMDGPU::VReg_128RegClass, 4832 AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass); 4833 4834 // Create an empty resource descriptor 4835 Register Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 4836 Register SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4837 Register SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4838 Register NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass); 4839 uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat(); 4840 4841 // Zero64 = 0 4842 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B64), Zero64) 4843 .addImm(0); 4844 4845 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0} 4846 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatLo) 4847 .addImm(RsrcDataFormat & 0xFFFFFFFF); 4848 4849 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32} 4850 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatHi) 4851 .addImm(RsrcDataFormat >> 32); 4852 4853 // NewSRsrc = {Zero64, SRsrcFormat} 4854 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::REG_SEQUENCE), NewSRsrc) 4855 .addReg(Zero64) 4856 .addImm(AMDGPU::sub0_sub1) 4857 .addReg(SRsrcFormatLo) 4858 .addImm(AMDGPU::sub2) 4859 .addReg(SRsrcFormatHi) 4860 .addImm(AMDGPU::sub3); 4861 4862 return std::make_tuple(RsrcPtr, NewSRsrc); 4863 } 4864 4865 void SIInstrInfo::legalizeOperands(MachineInstr &MI, 4866 MachineDominatorTree *MDT) const { 4867 MachineFunction &MF = *MI.getParent()->getParent(); 4868 MachineRegisterInfo &MRI = MF.getRegInfo(); 4869 4870 // Legalize VOP2 4871 if (isVOP2(MI) || isVOPC(MI)) { 4872 legalizeOperandsVOP2(MRI, MI); 4873 return; 4874 } 4875 4876 // Legalize VOP3 4877 if (isVOP3(MI)) { 4878 legalizeOperandsVOP3(MRI, MI); 4879 return; 4880 } 4881 4882 // Legalize SMRD 4883 if (isSMRD(MI)) { 4884 legalizeOperandsSMRD(MRI, MI); 4885 return; 4886 } 4887 4888 // Legalize FLAT 4889 if (isFLAT(MI)) { 4890 legalizeOperandsFLAT(MRI, MI); 4891 return; 4892 } 4893 4894 // Legalize REG_SEQUENCE and PHI 4895 // The register class of the operands much be the same type as the register 4896 // class of the output. 4897 if (MI.getOpcode() == AMDGPU::PHI) { 4898 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr; 4899 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) { 4900 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).getReg().isVirtual()) 4901 continue; 4902 const TargetRegisterClass *OpRC = 4903 MRI.getRegClass(MI.getOperand(i).getReg()); 4904 if (RI.hasVectorRegisters(OpRC)) { 4905 VRC = OpRC; 4906 } else { 4907 SRC = OpRC; 4908 } 4909 } 4910 4911 // If any of the operands are VGPR registers, then they all most be 4912 // otherwise we will create illegal VGPR->SGPR copies when legalizing 4913 // them. 4914 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) { 4915 if (!VRC) { 4916 assert(SRC); 4917 if (getOpRegClass(MI, 0) == &AMDGPU::VReg_1RegClass) { 4918 VRC = &AMDGPU::VReg_1RegClass; 4919 } else 4920 VRC = RI.hasAGPRs(getOpRegClass(MI, 0)) 4921 ? RI.getEquivalentAGPRClass(SRC) 4922 : RI.getEquivalentVGPRClass(SRC); 4923 } else { 4924 VRC = RI.hasAGPRs(getOpRegClass(MI, 0)) 4925 ? RI.getEquivalentAGPRClass(VRC) 4926 : RI.getEquivalentVGPRClass(VRC); 4927 } 4928 RC = VRC; 4929 } else { 4930 RC = SRC; 4931 } 4932 4933 // Update all the operands so they have the same type. 4934 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 4935 MachineOperand &Op = MI.getOperand(I); 4936 if (!Op.isReg() || !Op.getReg().isVirtual()) 4937 continue; 4938 4939 // MI is a PHI instruction. 4940 MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB(); 4941 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator(); 4942 4943 // Avoid creating no-op copies with the same src and dst reg class. These 4944 // confuse some of the machine passes. 4945 legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc()); 4946 } 4947 } 4948 4949 // REG_SEQUENCE doesn't really require operand legalization, but if one has a 4950 // VGPR dest type and SGPR sources, insert copies so all operands are 4951 // VGPRs. This seems to help operand folding / the register coalescer. 4952 if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) { 4953 MachineBasicBlock *MBB = MI.getParent(); 4954 const TargetRegisterClass *DstRC = getOpRegClass(MI, 0); 4955 if (RI.hasVGPRs(DstRC)) { 4956 // Update all the operands so they are VGPR register classes. These may 4957 // not be the same register class because REG_SEQUENCE supports mixing 4958 // subregister index types e.g. sub0_sub1 + sub2 + sub3 4959 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 4960 MachineOperand &Op = MI.getOperand(I); 4961 if (!Op.isReg() || !Op.getReg().isVirtual()) 4962 continue; 4963 4964 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg()); 4965 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); 4966 if (VRC == OpRC) 4967 continue; 4968 4969 legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc()); 4970 Op.setIsKill(); 4971 } 4972 } 4973 4974 return; 4975 } 4976 4977 // Legalize INSERT_SUBREG 4978 // src0 must have the same register class as dst 4979 if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) { 4980 Register Dst = MI.getOperand(0).getReg(); 4981 Register Src0 = MI.getOperand(1).getReg(); 4982 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst); 4983 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0); 4984 if (DstRC != Src0RC) { 4985 MachineBasicBlock *MBB = MI.getParent(); 4986 MachineOperand &Op = MI.getOperand(1); 4987 legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc()); 4988 } 4989 return; 4990 } 4991 4992 // Legalize SI_INIT_M0 4993 if (MI.getOpcode() == AMDGPU::SI_INIT_M0) { 4994 MachineOperand &Src = MI.getOperand(0); 4995 if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg()))) 4996 Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI)); 4997 return; 4998 } 4999 5000 // Legalize MIMG and MUBUF/MTBUF for shaders. 5001 // 5002 // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via 5003 // scratch memory access. In both cases, the legalization never involves 5004 // conversion to the addr64 form. 5005 if (isMIMG(MI) || 5006 (AMDGPU::isShader(MF.getFunction().getCallingConv()) && 5007 (isMUBUF(MI) || isMTBUF(MI)))) { 5008 MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc); 5009 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) 5010 loadSRsrcFromVGPR(*this, MI, *SRsrc, MDT); 5011 5012 MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp); 5013 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) 5014 loadSRsrcFromVGPR(*this, MI, *SSamp, MDT); 5015 5016 return; 5017 } 5018 5019 // Legalize MUBUF* instructions. 5020 int RsrcIdx = 5021 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc); 5022 if (RsrcIdx != -1) { 5023 // We have an MUBUF instruction 5024 MachineOperand *Rsrc = &MI.getOperand(RsrcIdx); 5025 unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass; 5026 if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()), 5027 RI.getRegClass(RsrcRC))) { 5028 // The operands are legal. 5029 // FIXME: We may need to legalize operands besided srsrc. 5030 return; 5031 } 5032 5033 // Legalize a VGPR Rsrc. 5034 // 5035 // If the instruction is _ADDR64, we can avoid a waterfall by extracting 5036 // the base pointer from the VGPR Rsrc, adding it to the VAddr, then using 5037 // a zero-value SRsrc. 5038 // 5039 // If the instruction is _OFFSET (both idxen and offen disabled), and we 5040 // support ADDR64 instructions, we can convert to ADDR64 and do the same as 5041 // above. 5042 // 5043 // Otherwise we are on non-ADDR64 hardware, and/or we have 5044 // idxen/offen/bothen and we fall back to a waterfall loop. 5045 5046 MachineBasicBlock &MBB = *MI.getParent(); 5047 5048 MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 5049 if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) { 5050 // This is already an ADDR64 instruction so we need to add the pointer 5051 // extracted from the resource descriptor to the current value of VAddr. 5052 Register NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5053 Register NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5054 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5055 5056 const auto *BoolXExecRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5057 Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC); 5058 Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC); 5059 5060 unsigned RsrcPtr, NewSRsrc; 5061 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 5062 5063 // NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0 5064 const DebugLoc &DL = MI.getDebugLoc(); 5065 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_CO_U32_e64), NewVAddrLo) 5066 .addDef(CondReg0) 5067 .addReg(RsrcPtr, 0, AMDGPU::sub0) 5068 .addReg(VAddr->getReg(), 0, AMDGPU::sub0) 5069 .addImm(0); 5070 5071 // NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1 5072 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e64), NewVAddrHi) 5073 .addDef(CondReg1, RegState::Dead) 5074 .addReg(RsrcPtr, 0, AMDGPU::sub1) 5075 .addReg(VAddr->getReg(), 0, AMDGPU::sub1) 5076 .addReg(CondReg0, RegState::Kill) 5077 .addImm(0); 5078 5079 // NewVaddr = {NewVaddrHi, NewVaddrLo} 5080 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) 5081 .addReg(NewVAddrLo) 5082 .addImm(AMDGPU::sub0) 5083 .addReg(NewVAddrHi) 5084 .addImm(AMDGPU::sub1); 5085 5086 VAddr->setReg(NewVAddr); 5087 Rsrc->setReg(NewSRsrc); 5088 } else if (!VAddr && ST.hasAddr64()) { 5089 // This instructions is the _OFFSET variant, so we need to convert it to 5090 // ADDR64. 5091 assert(ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS && 5092 "FIXME: Need to emit flat atomics here"); 5093 5094 unsigned RsrcPtr, NewSRsrc; 5095 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 5096 5097 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5098 MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata); 5099 MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 5100 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset); 5101 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode()); 5102 5103 // Atomics rith return have have an additional tied operand and are 5104 // missing some of the special bits. 5105 MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in); 5106 MachineInstr *Addr64; 5107 5108 if (!VDataIn) { 5109 // Regular buffer load / store. 5110 MachineInstrBuilder MIB = 5111 BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 5112 .add(*VData) 5113 .addReg(NewVAddr) 5114 .addReg(NewSRsrc) 5115 .add(*SOffset) 5116 .add(*Offset); 5117 5118 // Atomics do not have this operand. 5119 if (const MachineOperand *GLC = 5120 getNamedOperand(MI, AMDGPU::OpName::glc)) { 5121 MIB.addImm(GLC->getImm()); 5122 } 5123 if (const MachineOperand *DLC = 5124 getNamedOperand(MI, AMDGPU::OpName::dlc)) { 5125 MIB.addImm(DLC->getImm()); 5126 } 5127 5128 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)); 5129 5130 if (const MachineOperand *TFE = 5131 getNamedOperand(MI, AMDGPU::OpName::tfe)) { 5132 MIB.addImm(TFE->getImm()); 5133 } 5134 5135 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::swz)); 5136 5137 MIB.cloneMemRefs(MI); 5138 Addr64 = MIB; 5139 } else { 5140 // Atomics with return. 5141 Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 5142 .add(*VData) 5143 .add(*VDataIn) 5144 .addReg(NewVAddr) 5145 .addReg(NewSRsrc) 5146 .add(*SOffset) 5147 .add(*Offset) 5148 .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)) 5149 .cloneMemRefs(MI); 5150 } 5151 5152 MI.removeFromParent(); 5153 5154 // NewVaddr = {NewVaddrHi, NewVaddrLo} 5155 BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), 5156 NewVAddr) 5157 .addReg(RsrcPtr, 0, AMDGPU::sub0) 5158 .addImm(AMDGPU::sub0) 5159 .addReg(RsrcPtr, 0, AMDGPU::sub1) 5160 .addImm(AMDGPU::sub1); 5161 } else { 5162 // This is another variant; legalize Rsrc with waterfall loop from VGPRs 5163 // to SGPRs. 5164 loadSRsrcFromVGPR(*this, MI, *Rsrc, MDT); 5165 } 5166 } 5167 } 5168 5169 void SIInstrInfo::moveToVALU(MachineInstr &TopInst, 5170 MachineDominatorTree *MDT) const { 5171 SetVectorType Worklist; 5172 Worklist.insert(&TopInst); 5173 5174 while (!Worklist.empty()) { 5175 MachineInstr &Inst = *Worklist.pop_back_val(); 5176 MachineBasicBlock *MBB = Inst.getParent(); 5177 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 5178 5179 unsigned Opcode = Inst.getOpcode(); 5180 unsigned NewOpcode = getVALUOp(Inst); 5181 5182 // Handle some special cases 5183 switch (Opcode) { 5184 default: 5185 break; 5186 case AMDGPU::S_ADD_U64_PSEUDO: 5187 case AMDGPU::S_SUB_U64_PSEUDO: 5188 splitScalar64BitAddSub(Worklist, Inst, MDT); 5189 Inst.eraseFromParent(); 5190 continue; 5191 case AMDGPU::S_ADD_I32: 5192 case AMDGPU::S_SUB_I32: 5193 // FIXME: The u32 versions currently selected use the carry. 5194 if (moveScalarAddSub(Worklist, Inst, MDT)) 5195 continue; 5196 5197 // Default handling 5198 break; 5199 case AMDGPU::S_AND_B64: 5200 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32, MDT); 5201 Inst.eraseFromParent(); 5202 continue; 5203 5204 case AMDGPU::S_OR_B64: 5205 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32, MDT); 5206 Inst.eraseFromParent(); 5207 continue; 5208 5209 case AMDGPU::S_XOR_B64: 5210 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32, MDT); 5211 Inst.eraseFromParent(); 5212 continue; 5213 5214 case AMDGPU::S_NAND_B64: 5215 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NAND_B32, MDT); 5216 Inst.eraseFromParent(); 5217 continue; 5218 5219 case AMDGPU::S_NOR_B64: 5220 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NOR_B32, MDT); 5221 Inst.eraseFromParent(); 5222 continue; 5223 5224 case AMDGPU::S_XNOR_B64: 5225 if (ST.hasDLInsts()) 5226 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT); 5227 else 5228 splitScalar64BitXnor(Worklist, Inst, MDT); 5229 Inst.eraseFromParent(); 5230 continue; 5231 5232 case AMDGPU::S_ANDN2_B64: 5233 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ANDN2_B32, MDT); 5234 Inst.eraseFromParent(); 5235 continue; 5236 5237 case AMDGPU::S_ORN2_B64: 5238 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ORN2_B32, MDT); 5239 Inst.eraseFromParent(); 5240 continue; 5241 5242 case AMDGPU::S_NOT_B64: 5243 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32); 5244 Inst.eraseFromParent(); 5245 continue; 5246 5247 case AMDGPU::S_BCNT1_I32_B64: 5248 splitScalar64BitBCNT(Worklist, Inst); 5249 Inst.eraseFromParent(); 5250 continue; 5251 5252 case AMDGPU::S_BFE_I64: 5253 splitScalar64BitBFE(Worklist, Inst); 5254 Inst.eraseFromParent(); 5255 continue; 5256 5257 case AMDGPU::S_LSHL_B32: 5258 if (ST.hasOnlyRevVALUShifts()) { 5259 NewOpcode = AMDGPU::V_LSHLREV_B32_e64; 5260 swapOperands(Inst); 5261 } 5262 break; 5263 case AMDGPU::S_ASHR_I32: 5264 if (ST.hasOnlyRevVALUShifts()) { 5265 NewOpcode = AMDGPU::V_ASHRREV_I32_e64; 5266 swapOperands(Inst); 5267 } 5268 break; 5269 case AMDGPU::S_LSHR_B32: 5270 if (ST.hasOnlyRevVALUShifts()) { 5271 NewOpcode = AMDGPU::V_LSHRREV_B32_e64; 5272 swapOperands(Inst); 5273 } 5274 break; 5275 case AMDGPU::S_LSHL_B64: 5276 if (ST.hasOnlyRevVALUShifts()) { 5277 NewOpcode = AMDGPU::V_LSHLREV_B64; 5278 swapOperands(Inst); 5279 } 5280 break; 5281 case AMDGPU::S_ASHR_I64: 5282 if (ST.hasOnlyRevVALUShifts()) { 5283 NewOpcode = AMDGPU::V_ASHRREV_I64; 5284 swapOperands(Inst); 5285 } 5286 break; 5287 case AMDGPU::S_LSHR_B64: 5288 if (ST.hasOnlyRevVALUShifts()) { 5289 NewOpcode = AMDGPU::V_LSHRREV_B64; 5290 swapOperands(Inst); 5291 } 5292 break; 5293 5294 case AMDGPU::S_ABS_I32: 5295 lowerScalarAbs(Worklist, Inst); 5296 Inst.eraseFromParent(); 5297 continue; 5298 5299 case AMDGPU::S_CBRANCH_SCC0: 5300 case AMDGPU::S_CBRANCH_SCC1: 5301 // Clear unused bits of vcc 5302 if (ST.isWave32()) 5303 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B32), 5304 AMDGPU::VCC_LO) 5305 .addReg(AMDGPU::EXEC_LO) 5306 .addReg(AMDGPU::VCC_LO); 5307 else 5308 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64), 5309 AMDGPU::VCC) 5310 .addReg(AMDGPU::EXEC) 5311 .addReg(AMDGPU::VCC); 5312 break; 5313 5314 case AMDGPU::S_BFE_U64: 5315 case AMDGPU::S_BFM_B64: 5316 llvm_unreachable("Moving this op to VALU not implemented"); 5317 5318 case AMDGPU::S_PACK_LL_B32_B16: 5319 case AMDGPU::S_PACK_LH_B32_B16: 5320 case AMDGPU::S_PACK_HH_B32_B16: 5321 movePackToVALU(Worklist, MRI, Inst); 5322 Inst.eraseFromParent(); 5323 continue; 5324 5325 case AMDGPU::S_XNOR_B32: 5326 lowerScalarXnor(Worklist, Inst); 5327 Inst.eraseFromParent(); 5328 continue; 5329 5330 case AMDGPU::S_NAND_B32: 5331 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_AND_B32); 5332 Inst.eraseFromParent(); 5333 continue; 5334 5335 case AMDGPU::S_NOR_B32: 5336 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_OR_B32); 5337 Inst.eraseFromParent(); 5338 continue; 5339 5340 case AMDGPU::S_ANDN2_B32: 5341 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_AND_B32); 5342 Inst.eraseFromParent(); 5343 continue; 5344 5345 case AMDGPU::S_ORN2_B32: 5346 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_OR_B32); 5347 Inst.eraseFromParent(); 5348 continue; 5349 5350 // TODO: remove as soon as everything is ready 5351 // to replace VGPR to SGPR copy with V_READFIRSTLANEs. 5352 // S_ADD/SUB_CO_PSEUDO as well as S_UADDO/USUBO_PSEUDO 5353 // can only be selected from the uniform SDNode. 5354 case AMDGPU::S_ADD_CO_PSEUDO: 5355 case AMDGPU::S_SUB_CO_PSEUDO: { 5356 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) 5357 ? AMDGPU::V_ADDC_U32_e64 5358 : AMDGPU::V_SUBB_U32_e64; 5359 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5360 5361 Register CarryInReg = Inst.getOperand(4).getReg(); 5362 if (!MRI.constrainRegClass(CarryInReg, CarryRC)) { 5363 Register NewCarryReg = MRI.createVirtualRegister(CarryRC); 5364 BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(AMDGPU::COPY), NewCarryReg) 5365 .addReg(CarryInReg); 5366 } 5367 5368 Register CarryOutReg = Inst.getOperand(1).getReg(); 5369 5370 Register DestReg = MRI.createVirtualRegister(RI.getEquivalentVGPRClass( 5371 MRI.getRegClass(Inst.getOperand(0).getReg()))); 5372 MachineInstr *CarryOp = 5373 BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(Opc), DestReg) 5374 .addReg(CarryOutReg, RegState::Define) 5375 .add(Inst.getOperand(2)) 5376 .add(Inst.getOperand(3)) 5377 .addReg(CarryInReg) 5378 .addImm(0); 5379 legalizeOperands(*CarryOp); 5380 MRI.replaceRegWith(Inst.getOperand(0).getReg(), DestReg); 5381 addUsersToMoveToVALUWorklist(DestReg, MRI, Worklist); 5382 Inst.eraseFromParent(); 5383 } 5384 continue; 5385 case AMDGPU::S_UADDO_PSEUDO: 5386 case AMDGPU::S_USUBO_PSEUDO: { 5387 const DebugLoc &DL = Inst.getDebugLoc(); 5388 MachineOperand &Dest0 = Inst.getOperand(0); 5389 MachineOperand &Dest1 = Inst.getOperand(1); 5390 MachineOperand &Src0 = Inst.getOperand(2); 5391 MachineOperand &Src1 = Inst.getOperand(3); 5392 5393 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_UADDO_PSEUDO) 5394 ? AMDGPU::V_ADD_CO_U32_e64 5395 : AMDGPU::V_SUB_CO_U32_e64; 5396 const TargetRegisterClass *NewRC = 5397 RI.getEquivalentVGPRClass(MRI.getRegClass(Dest0.getReg())); 5398 Register DestReg = MRI.createVirtualRegister(NewRC); 5399 MachineInstr *NewInstr = BuildMI(*MBB, &Inst, DL, get(Opc), DestReg) 5400 .addReg(Dest1.getReg(), RegState::Define) 5401 .add(Src0) 5402 .add(Src1) 5403 .addImm(0); // clamp bit 5404 5405 legalizeOperands(*NewInstr, MDT); 5406 5407 MRI.replaceRegWith(Dest0.getReg(), DestReg); 5408 addUsersToMoveToVALUWorklist(NewInstr->getOperand(0).getReg(), MRI, 5409 Worklist); 5410 Inst.eraseFromParent(); 5411 } 5412 continue; 5413 5414 case AMDGPU::S_CSELECT_B32: 5415 case AMDGPU::S_CSELECT_B64: 5416 lowerSelect(Worklist, Inst, MDT); 5417 Inst.eraseFromParent(); 5418 continue; 5419 } 5420 5421 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) { 5422 // We cannot move this instruction to the VALU, so we should try to 5423 // legalize its operands instead. 5424 legalizeOperands(Inst, MDT); 5425 continue; 5426 } 5427 5428 // Use the new VALU Opcode. 5429 const MCInstrDesc &NewDesc = get(NewOpcode); 5430 Inst.setDesc(NewDesc); 5431 5432 // Remove any references to SCC. Vector instructions can't read from it, and 5433 // We're just about to add the implicit use / defs of VCC, and we don't want 5434 // both. 5435 for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) { 5436 MachineOperand &Op = Inst.getOperand(i); 5437 if (Op.isReg() && Op.getReg() == AMDGPU::SCC) { 5438 // Only propagate through live-def of SCC. 5439 if (Op.isDef() && !Op.isDead()) 5440 addSCCDefUsersToVALUWorklist(Op, Inst, Worklist); 5441 Inst.RemoveOperand(i); 5442 } 5443 } 5444 5445 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) { 5446 // We are converting these to a BFE, so we need to add the missing 5447 // operands for the size and offset. 5448 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16; 5449 Inst.addOperand(MachineOperand::CreateImm(0)); 5450 Inst.addOperand(MachineOperand::CreateImm(Size)); 5451 5452 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) { 5453 // The VALU version adds the second operand to the result, so insert an 5454 // extra 0 operand. 5455 Inst.addOperand(MachineOperand::CreateImm(0)); 5456 } 5457 5458 Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent()); 5459 fixImplicitOperands(Inst); 5460 5461 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) { 5462 const MachineOperand &OffsetWidthOp = Inst.getOperand(2); 5463 // If we need to move this to VGPRs, we need to unpack the second operand 5464 // back into the 2 separate ones for bit offset and width. 5465 assert(OffsetWidthOp.isImm() && 5466 "Scalar BFE is only implemented for constant width and offset"); 5467 uint32_t Imm = OffsetWidthOp.getImm(); 5468 5469 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 5470 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 5471 Inst.RemoveOperand(2); // Remove old immediate. 5472 Inst.addOperand(MachineOperand::CreateImm(Offset)); 5473 Inst.addOperand(MachineOperand::CreateImm(BitWidth)); 5474 } 5475 5476 bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef(); 5477 unsigned NewDstReg = AMDGPU::NoRegister; 5478 if (HasDst) { 5479 Register DstReg = Inst.getOperand(0).getReg(); 5480 if (DstReg.isPhysical()) 5481 continue; 5482 5483 // Update the destination register class. 5484 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst); 5485 if (!NewDstRC) 5486 continue; 5487 5488 if (Inst.isCopy() && Inst.getOperand(1).getReg().isVirtual() && 5489 NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) { 5490 // Instead of creating a copy where src and dst are the same register 5491 // class, we just replace all uses of dst with src. These kinds of 5492 // copies interfere with the heuristics MachineSink uses to decide 5493 // whether or not to split a critical edge. Since the pass assumes 5494 // that copies will end up as machine instructions and not be 5495 // eliminated. 5496 addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist); 5497 MRI.replaceRegWith(DstReg, Inst.getOperand(1).getReg()); 5498 MRI.clearKillFlags(Inst.getOperand(1).getReg()); 5499 Inst.getOperand(0).setReg(DstReg); 5500 5501 // Make sure we don't leave around a dead VGPR->SGPR copy. Normally 5502 // these are deleted later, but at -O0 it would leave a suspicious 5503 // looking illegal copy of an undef register. 5504 for (unsigned I = Inst.getNumOperands() - 1; I != 0; --I) 5505 Inst.RemoveOperand(I); 5506 Inst.setDesc(get(AMDGPU::IMPLICIT_DEF)); 5507 continue; 5508 } 5509 5510 NewDstReg = MRI.createVirtualRegister(NewDstRC); 5511 MRI.replaceRegWith(DstReg, NewDstReg); 5512 } 5513 5514 // Legalize the operands 5515 legalizeOperands(Inst, MDT); 5516 5517 if (HasDst) 5518 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); 5519 } 5520 } 5521 5522 // Add/sub require special handling to deal with carry outs. 5523 bool SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst, 5524 MachineDominatorTree *MDT) const { 5525 if (ST.hasAddNoCarry()) { 5526 // Assume there is no user of scc since we don't select this in that case. 5527 // Since scc isn't used, it doesn't really matter if the i32 or u32 variant 5528 // is used. 5529 5530 MachineBasicBlock &MBB = *Inst.getParent(); 5531 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5532 5533 Register OldDstReg = Inst.getOperand(0).getReg(); 5534 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5535 5536 unsigned Opc = Inst.getOpcode(); 5537 assert(Opc == AMDGPU::S_ADD_I32 || Opc == AMDGPU::S_SUB_I32); 5538 5539 unsigned NewOpc = Opc == AMDGPU::S_ADD_I32 ? 5540 AMDGPU::V_ADD_U32_e64 : AMDGPU::V_SUB_U32_e64; 5541 5542 assert(Inst.getOperand(3).getReg() == AMDGPU::SCC); 5543 Inst.RemoveOperand(3); 5544 5545 Inst.setDesc(get(NewOpc)); 5546 Inst.addOperand(MachineOperand::CreateImm(0)); // clamp bit 5547 Inst.addImplicitDefUseOperands(*MBB.getParent()); 5548 MRI.replaceRegWith(OldDstReg, ResultReg); 5549 legalizeOperands(Inst, MDT); 5550 5551 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5552 return true; 5553 } 5554 5555 return false; 5556 } 5557 5558 void SIInstrInfo::lowerSelect(SetVectorType &Worklist, MachineInstr &Inst, 5559 MachineDominatorTree *MDT) const { 5560 5561 MachineBasicBlock &MBB = *Inst.getParent(); 5562 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5563 MachineBasicBlock::iterator MII = Inst; 5564 DebugLoc DL = Inst.getDebugLoc(); 5565 5566 MachineOperand &Dest = Inst.getOperand(0); 5567 MachineOperand &Src0 = Inst.getOperand(1); 5568 MachineOperand &Src1 = Inst.getOperand(2); 5569 MachineOperand &Cond = Inst.getOperand(3); 5570 5571 Register SCCSource = Cond.getReg(); 5572 // Find SCC def, and if that is a copy (SCC = COPY reg) then use reg instead. 5573 if (!Cond.isUndef()) { 5574 for (MachineInstr &CandI : 5575 make_range(std::next(MachineBasicBlock::reverse_iterator(Inst)), 5576 Inst.getParent()->rend())) { 5577 if (CandI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != 5578 -1) { 5579 if (CandI.isCopy() && CandI.getOperand(0).getReg() == AMDGPU::SCC) { 5580 SCCSource = CandI.getOperand(1).getReg(); 5581 } 5582 break; 5583 } 5584 } 5585 } 5586 5587 // If this is a trivial select where the condition is effectively not SCC 5588 // (SCCSource is a source of copy to SCC), then the select is semantically 5589 // equivalent to copying SCCSource. Hence, there is no need to create 5590 // V_CNDMASK, we can just use that and bail out. 5591 if ((SCCSource != AMDGPU::SCC) && Src0.isImm() && (Src0.getImm() == -1) && 5592 Src1.isImm() && (Src1.getImm() == 0)) { 5593 MRI.replaceRegWith(Dest.getReg(), SCCSource); 5594 return; 5595 } 5596 5597 const TargetRegisterClass *TC = ST.getWavefrontSize() == 64 5598 ? &AMDGPU::SReg_64_XEXECRegClass 5599 : &AMDGPU::SReg_32_XM0_XEXECRegClass; 5600 Register CopySCC = MRI.createVirtualRegister(TC); 5601 5602 if (SCCSource == AMDGPU::SCC) { 5603 // Insert a trivial select instead of creating a copy, because a copy from 5604 // SCC would semantically mean just copying a single bit, but we may need 5605 // the result to be a vector condition mask that needs preserving. 5606 unsigned Opcode = (ST.getWavefrontSize() == 64) ? AMDGPU::S_CSELECT_B64 5607 : AMDGPU::S_CSELECT_B32; 5608 auto NewSelect = 5609 BuildMI(MBB, MII, DL, get(Opcode), CopySCC).addImm(-1).addImm(0); 5610 NewSelect->getOperand(3).setIsUndef(Cond.isUndef()); 5611 } else { 5612 BuildMI(MBB, MII, DL, get(AMDGPU::COPY), CopySCC).addReg(SCCSource); 5613 } 5614 5615 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5616 5617 auto UpdatedInst = 5618 BuildMI(MBB, MII, DL, get(AMDGPU::V_CNDMASK_B32_e64), ResultReg) 5619 .addImm(0) 5620 .add(Src1) // False 5621 .addImm(0) 5622 .add(Src0) // True 5623 .addReg(CopySCC); 5624 5625 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5626 legalizeOperands(*UpdatedInst, MDT); 5627 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5628 } 5629 5630 void SIInstrInfo::lowerScalarAbs(SetVectorType &Worklist, 5631 MachineInstr &Inst) const { 5632 MachineBasicBlock &MBB = *Inst.getParent(); 5633 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5634 MachineBasicBlock::iterator MII = Inst; 5635 DebugLoc DL = Inst.getDebugLoc(); 5636 5637 MachineOperand &Dest = Inst.getOperand(0); 5638 MachineOperand &Src = Inst.getOperand(1); 5639 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5640 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5641 5642 unsigned SubOp = ST.hasAddNoCarry() ? 5643 AMDGPU::V_SUB_U32_e32 : AMDGPU::V_SUB_CO_U32_e32; 5644 5645 BuildMI(MBB, MII, DL, get(SubOp), TmpReg) 5646 .addImm(0) 5647 .addReg(Src.getReg()); 5648 5649 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg) 5650 .addReg(Src.getReg()) 5651 .addReg(TmpReg); 5652 5653 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5654 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5655 } 5656 5657 void SIInstrInfo::lowerScalarXnor(SetVectorType &Worklist, 5658 MachineInstr &Inst) const { 5659 MachineBasicBlock &MBB = *Inst.getParent(); 5660 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5661 MachineBasicBlock::iterator MII = Inst; 5662 const DebugLoc &DL = Inst.getDebugLoc(); 5663 5664 MachineOperand &Dest = Inst.getOperand(0); 5665 MachineOperand &Src0 = Inst.getOperand(1); 5666 MachineOperand &Src1 = Inst.getOperand(2); 5667 5668 if (ST.hasDLInsts()) { 5669 Register NewDest = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5670 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src0, MRI, DL); 5671 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src1, MRI, DL); 5672 5673 BuildMI(MBB, MII, DL, get(AMDGPU::V_XNOR_B32_e64), NewDest) 5674 .add(Src0) 5675 .add(Src1); 5676 5677 MRI.replaceRegWith(Dest.getReg(), NewDest); 5678 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5679 } else { 5680 // Using the identity !(x ^ y) == (!x ^ y) == (x ^ !y), we can 5681 // invert either source and then perform the XOR. If either source is a 5682 // scalar register, then we can leave the inversion on the scalar unit to 5683 // acheive a better distrubution of scalar and vector instructions. 5684 bool Src0IsSGPR = Src0.isReg() && 5685 RI.isSGPRClass(MRI.getRegClass(Src0.getReg())); 5686 bool Src1IsSGPR = Src1.isReg() && 5687 RI.isSGPRClass(MRI.getRegClass(Src1.getReg())); 5688 MachineInstr *Xor; 5689 Register Temp = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5690 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5691 5692 // Build a pair of scalar instructions and add them to the work list. 5693 // The next iteration over the work list will lower these to the vector 5694 // unit as necessary. 5695 if (Src0IsSGPR) { 5696 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src0); 5697 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 5698 .addReg(Temp) 5699 .add(Src1); 5700 } else if (Src1IsSGPR) { 5701 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src1); 5702 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 5703 .add(Src0) 5704 .addReg(Temp); 5705 } else { 5706 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), Temp) 5707 .add(Src0) 5708 .add(Src1); 5709 MachineInstr *Not = 5710 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest).addReg(Temp); 5711 Worklist.insert(Not); 5712 } 5713 5714 MRI.replaceRegWith(Dest.getReg(), NewDest); 5715 5716 Worklist.insert(Xor); 5717 5718 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5719 } 5720 } 5721 5722 void SIInstrInfo::splitScalarNotBinop(SetVectorType &Worklist, 5723 MachineInstr &Inst, 5724 unsigned Opcode) const { 5725 MachineBasicBlock &MBB = *Inst.getParent(); 5726 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5727 MachineBasicBlock::iterator MII = Inst; 5728 const DebugLoc &DL = Inst.getDebugLoc(); 5729 5730 MachineOperand &Dest = Inst.getOperand(0); 5731 MachineOperand &Src0 = Inst.getOperand(1); 5732 MachineOperand &Src1 = Inst.getOperand(2); 5733 5734 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5735 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5736 5737 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), Interm) 5738 .add(Src0) 5739 .add(Src1); 5740 5741 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest) 5742 .addReg(Interm); 5743 5744 Worklist.insert(&Op); 5745 Worklist.insert(&Not); 5746 5747 MRI.replaceRegWith(Dest.getReg(), NewDest); 5748 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5749 } 5750 5751 void SIInstrInfo::splitScalarBinOpN2(SetVectorType& Worklist, 5752 MachineInstr &Inst, 5753 unsigned Opcode) const { 5754 MachineBasicBlock &MBB = *Inst.getParent(); 5755 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5756 MachineBasicBlock::iterator MII = Inst; 5757 const DebugLoc &DL = Inst.getDebugLoc(); 5758 5759 MachineOperand &Dest = Inst.getOperand(0); 5760 MachineOperand &Src0 = Inst.getOperand(1); 5761 MachineOperand &Src1 = Inst.getOperand(2); 5762 5763 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5764 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5765 5766 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Interm) 5767 .add(Src1); 5768 5769 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), NewDest) 5770 .add(Src0) 5771 .addReg(Interm); 5772 5773 Worklist.insert(&Not); 5774 Worklist.insert(&Op); 5775 5776 MRI.replaceRegWith(Dest.getReg(), NewDest); 5777 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5778 } 5779 5780 void SIInstrInfo::splitScalar64BitUnaryOp( 5781 SetVectorType &Worklist, MachineInstr &Inst, 5782 unsigned Opcode) const { 5783 MachineBasicBlock &MBB = *Inst.getParent(); 5784 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5785 5786 MachineOperand &Dest = Inst.getOperand(0); 5787 MachineOperand &Src0 = Inst.getOperand(1); 5788 DebugLoc DL = Inst.getDebugLoc(); 5789 5790 MachineBasicBlock::iterator MII = Inst; 5791 5792 const MCInstrDesc &InstDesc = get(Opcode); 5793 const TargetRegisterClass *Src0RC = Src0.isReg() ? 5794 MRI.getRegClass(Src0.getReg()) : 5795 &AMDGPU::SGPR_32RegClass; 5796 5797 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 5798 5799 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5800 AMDGPU::sub0, Src0SubRC); 5801 5802 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 5803 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 5804 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 5805 5806 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 5807 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0); 5808 5809 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5810 AMDGPU::sub1, Src0SubRC); 5811 5812 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 5813 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1); 5814 5815 Register FullDestReg = MRI.createVirtualRegister(NewDestRC); 5816 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 5817 .addReg(DestSub0) 5818 .addImm(AMDGPU::sub0) 5819 .addReg(DestSub1) 5820 .addImm(AMDGPU::sub1); 5821 5822 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 5823 5824 Worklist.insert(&LoHalf); 5825 Worklist.insert(&HiHalf); 5826 5827 // We don't need to legalizeOperands here because for a single operand, src0 5828 // will support any kind of input. 5829 5830 // Move all users of this moved value. 5831 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 5832 } 5833 5834 void SIInstrInfo::splitScalar64BitAddSub(SetVectorType &Worklist, 5835 MachineInstr &Inst, 5836 MachineDominatorTree *MDT) const { 5837 bool IsAdd = (Inst.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); 5838 5839 MachineBasicBlock &MBB = *Inst.getParent(); 5840 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5841 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5842 5843 Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5844 Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5845 Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5846 5847 Register CarryReg = MRI.createVirtualRegister(CarryRC); 5848 Register DeadCarryReg = MRI.createVirtualRegister(CarryRC); 5849 5850 MachineOperand &Dest = Inst.getOperand(0); 5851 MachineOperand &Src0 = Inst.getOperand(1); 5852 MachineOperand &Src1 = Inst.getOperand(2); 5853 const DebugLoc &DL = Inst.getDebugLoc(); 5854 MachineBasicBlock::iterator MII = Inst; 5855 5856 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg()); 5857 const TargetRegisterClass *Src1RC = MRI.getRegClass(Src1.getReg()); 5858 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 5859 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 5860 5861 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5862 AMDGPU::sub0, Src0SubRC); 5863 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 5864 AMDGPU::sub0, Src1SubRC); 5865 5866 5867 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5868 AMDGPU::sub1, Src0SubRC); 5869 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 5870 AMDGPU::sub1, Src1SubRC); 5871 5872 unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64; 5873 MachineInstr *LoHalf = 5874 BuildMI(MBB, MII, DL, get(LoOpc), DestSub0) 5875 .addReg(CarryReg, RegState::Define) 5876 .add(SrcReg0Sub0) 5877 .add(SrcReg1Sub0) 5878 .addImm(0); // clamp bit 5879 5880 unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; 5881 MachineInstr *HiHalf = 5882 BuildMI(MBB, MII, DL, get(HiOpc), DestSub1) 5883 .addReg(DeadCarryReg, RegState::Define | RegState::Dead) 5884 .add(SrcReg0Sub1) 5885 .add(SrcReg1Sub1) 5886 .addReg(CarryReg, RegState::Kill) 5887 .addImm(0); // clamp bit 5888 5889 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 5890 .addReg(DestSub0) 5891 .addImm(AMDGPU::sub0) 5892 .addReg(DestSub1) 5893 .addImm(AMDGPU::sub1); 5894 5895 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 5896 5897 // Try to legalize the operands in case we need to swap the order to keep it 5898 // valid. 5899 legalizeOperands(*LoHalf, MDT); 5900 legalizeOperands(*HiHalf, MDT); 5901 5902 // Move all users of this moved vlaue. 5903 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 5904 } 5905 5906 void SIInstrInfo::splitScalar64BitBinaryOp(SetVectorType &Worklist, 5907 MachineInstr &Inst, unsigned Opcode, 5908 MachineDominatorTree *MDT) const { 5909 MachineBasicBlock &MBB = *Inst.getParent(); 5910 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5911 5912 MachineOperand &Dest = Inst.getOperand(0); 5913 MachineOperand &Src0 = Inst.getOperand(1); 5914 MachineOperand &Src1 = Inst.getOperand(2); 5915 DebugLoc DL = Inst.getDebugLoc(); 5916 5917 MachineBasicBlock::iterator MII = Inst; 5918 5919 const MCInstrDesc &InstDesc = get(Opcode); 5920 const TargetRegisterClass *Src0RC = Src0.isReg() ? 5921 MRI.getRegClass(Src0.getReg()) : 5922 &AMDGPU::SGPR_32RegClass; 5923 5924 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 5925 const TargetRegisterClass *Src1RC = Src1.isReg() ? 5926 MRI.getRegClass(Src1.getReg()) : 5927 &AMDGPU::SGPR_32RegClass; 5928 5929 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 5930 5931 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5932 AMDGPU::sub0, Src0SubRC); 5933 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 5934 AMDGPU::sub0, Src1SubRC); 5935 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5936 AMDGPU::sub1, Src0SubRC); 5937 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 5938 AMDGPU::sub1, Src1SubRC); 5939 5940 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 5941 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 5942 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 5943 5944 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 5945 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0) 5946 .add(SrcReg0Sub0) 5947 .add(SrcReg1Sub0); 5948 5949 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 5950 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1) 5951 .add(SrcReg0Sub1) 5952 .add(SrcReg1Sub1); 5953 5954 Register FullDestReg = MRI.createVirtualRegister(NewDestRC); 5955 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 5956 .addReg(DestSub0) 5957 .addImm(AMDGPU::sub0) 5958 .addReg(DestSub1) 5959 .addImm(AMDGPU::sub1); 5960 5961 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 5962 5963 Worklist.insert(&LoHalf); 5964 Worklist.insert(&HiHalf); 5965 5966 // Move all users of this moved vlaue. 5967 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 5968 } 5969 5970 void SIInstrInfo::splitScalar64BitXnor(SetVectorType &Worklist, 5971 MachineInstr &Inst, 5972 MachineDominatorTree *MDT) const { 5973 MachineBasicBlock &MBB = *Inst.getParent(); 5974 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5975 5976 MachineOperand &Dest = Inst.getOperand(0); 5977 MachineOperand &Src0 = Inst.getOperand(1); 5978 MachineOperand &Src1 = Inst.getOperand(2); 5979 const DebugLoc &DL = Inst.getDebugLoc(); 5980 5981 MachineBasicBlock::iterator MII = Inst; 5982 5983 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 5984 5985 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 5986 5987 MachineOperand* Op0; 5988 MachineOperand* Op1; 5989 5990 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) { 5991 Op0 = &Src0; 5992 Op1 = &Src1; 5993 } else { 5994 Op0 = &Src1; 5995 Op1 = &Src0; 5996 } 5997 5998 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B64), Interm) 5999 .add(*Op0); 6000 6001 Register NewDest = MRI.createVirtualRegister(DestRC); 6002 6003 MachineInstr &Xor = *BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B64), NewDest) 6004 .addReg(Interm) 6005 .add(*Op1); 6006 6007 MRI.replaceRegWith(Dest.getReg(), NewDest); 6008 6009 Worklist.insert(&Xor); 6010 } 6011 6012 void SIInstrInfo::splitScalar64BitBCNT( 6013 SetVectorType &Worklist, MachineInstr &Inst) const { 6014 MachineBasicBlock &MBB = *Inst.getParent(); 6015 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6016 6017 MachineBasicBlock::iterator MII = Inst; 6018 const DebugLoc &DL = Inst.getDebugLoc(); 6019 6020 MachineOperand &Dest = Inst.getOperand(0); 6021 MachineOperand &Src = Inst.getOperand(1); 6022 6023 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64); 6024 const TargetRegisterClass *SrcRC = Src.isReg() ? 6025 MRI.getRegClass(Src.getReg()) : 6026 &AMDGPU::SGPR_32RegClass; 6027 6028 Register MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6029 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6030 6031 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); 6032 6033 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 6034 AMDGPU::sub0, SrcSubRC); 6035 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 6036 AMDGPU::sub1, SrcSubRC); 6037 6038 BuildMI(MBB, MII, DL, InstDesc, MidReg).add(SrcRegSub0).addImm(0); 6039 6040 BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg); 6041 6042 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6043 6044 // We don't need to legalize operands here. src0 for etiher instruction can be 6045 // an SGPR, and the second input is unused or determined here. 6046 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6047 } 6048 6049 void SIInstrInfo::splitScalar64BitBFE(SetVectorType &Worklist, 6050 MachineInstr &Inst) const { 6051 MachineBasicBlock &MBB = *Inst.getParent(); 6052 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6053 MachineBasicBlock::iterator MII = Inst; 6054 const DebugLoc &DL = Inst.getDebugLoc(); 6055 6056 MachineOperand &Dest = Inst.getOperand(0); 6057 uint32_t Imm = Inst.getOperand(2).getImm(); 6058 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 6059 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 6060 6061 (void) Offset; 6062 6063 // Only sext_inreg cases handled. 6064 assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 && 6065 Offset == 0 && "Not implemented"); 6066 6067 if (BitWidth < 32) { 6068 Register MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6069 Register MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6070 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 6071 6072 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo) 6073 .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0) 6074 .addImm(0) 6075 .addImm(BitWidth); 6076 6077 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi) 6078 .addImm(31) 6079 .addReg(MidRegLo); 6080 6081 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 6082 .addReg(MidRegLo) 6083 .addImm(AMDGPU::sub0) 6084 .addReg(MidRegHi) 6085 .addImm(AMDGPU::sub1); 6086 6087 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6088 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6089 return; 6090 } 6091 6092 MachineOperand &Src = Inst.getOperand(1); 6093 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6094 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 6095 6096 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg) 6097 .addImm(31) 6098 .addReg(Src.getReg(), 0, AMDGPU::sub0); 6099 6100 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 6101 .addReg(Src.getReg(), 0, AMDGPU::sub0) 6102 .addImm(AMDGPU::sub0) 6103 .addReg(TmpReg) 6104 .addImm(AMDGPU::sub1); 6105 6106 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6107 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6108 } 6109 6110 void SIInstrInfo::addUsersToMoveToVALUWorklist( 6111 Register DstReg, 6112 MachineRegisterInfo &MRI, 6113 SetVectorType &Worklist) const { 6114 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg), 6115 E = MRI.use_end(); I != E;) { 6116 MachineInstr &UseMI = *I->getParent(); 6117 6118 unsigned OpNo = 0; 6119 6120 switch (UseMI.getOpcode()) { 6121 case AMDGPU::COPY: 6122 case AMDGPU::WQM: 6123 case AMDGPU::SOFT_WQM: 6124 case AMDGPU::WWM: 6125 case AMDGPU::REG_SEQUENCE: 6126 case AMDGPU::PHI: 6127 case AMDGPU::INSERT_SUBREG: 6128 break; 6129 default: 6130 OpNo = I.getOperandNo(); 6131 break; 6132 } 6133 6134 if (!RI.hasVectorRegisters(getOpRegClass(UseMI, OpNo))) { 6135 Worklist.insert(&UseMI); 6136 6137 do { 6138 ++I; 6139 } while (I != E && I->getParent() == &UseMI); 6140 } else { 6141 ++I; 6142 } 6143 } 6144 } 6145 6146 void SIInstrInfo::movePackToVALU(SetVectorType &Worklist, 6147 MachineRegisterInfo &MRI, 6148 MachineInstr &Inst) const { 6149 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6150 MachineBasicBlock *MBB = Inst.getParent(); 6151 MachineOperand &Src0 = Inst.getOperand(1); 6152 MachineOperand &Src1 = Inst.getOperand(2); 6153 const DebugLoc &DL = Inst.getDebugLoc(); 6154 6155 switch (Inst.getOpcode()) { 6156 case AMDGPU::S_PACK_LL_B32_B16: { 6157 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6158 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6159 6160 // FIXME: Can do a lot better if we know the high bits of src0 or src1 are 6161 // 0. 6162 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 6163 .addImm(0xffff); 6164 6165 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_B32_e64), TmpReg) 6166 .addReg(ImmReg, RegState::Kill) 6167 .add(Src0); 6168 6169 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHL_OR_B32), ResultReg) 6170 .add(Src1) 6171 .addImm(16) 6172 .addReg(TmpReg, RegState::Kill); 6173 break; 6174 } 6175 case AMDGPU::S_PACK_LH_B32_B16: { 6176 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6177 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 6178 .addImm(0xffff); 6179 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_BFI_B32), ResultReg) 6180 .addReg(ImmReg, RegState::Kill) 6181 .add(Src0) 6182 .add(Src1); 6183 break; 6184 } 6185 case AMDGPU::S_PACK_HH_B32_B16: { 6186 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6187 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6188 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg) 6189 .addImm(16) 6190 .add(Src0); 6191 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 6192 .addImm(0xffff0000); 6193 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_OR_B32), ResultReg) 6194 .add(Src1) 6195 .addReg(ImmReg, RegState::Kill) 6196 .addReg(TmpReg, RegState::Kill); 6197 break; 6198 } 6199 default: 6200 llvm_unreachable("unhandled s_pack_* instruction"); 6201 } 6202 6203 MachineOperand &Dest = Inst.getOperand(0); 6204 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6205 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6206 } 6207 6208 void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op, 6209 MachineInstr &SCCDefInst, 6210 SetVectorType &Worklist) const { 6211 bool SCCUsedImplicitly = false; 6212 6213 // Ensure that def inst defines SCC, which is still live. 6214 assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isDef() && 6215 !Op.isDead() && Op.getParent() == &SCCDefInst); 6216 SmallVector<MachineInstr *, 4> CopyToDelete; 6217 // This assumes that all the users of SCC are in the same block 6218 // as the SCC def. 6219 for (MachineInstr &MI : // Skip the def inst itself. 6220 make_range(std::next(MachineBasicBlock::iterator(SCCDefInst)), 6221 SCCDefInst.getParent()->end())) { 6222 // Check if SCC is used first. 6223 if (MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI) != -1) { 6224 if (MI.isCopy()) { 6225 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 6226 Register DestReg = MI.getOperand(0).getReg(); 6227 6228 for (auto &User : MRI.use_nodbg_instructions(DestReg)) { 6229 if ((User.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) || 6230 (User.getOpcode() == AMDGPU::S_SUB_CO_PSEUDO)) { 6231 User.getOperand(4).setReg(RI.getVCC()); 6232 Worklist.insert(&User); 6233 } else if (User.getOpcode() == AMDGPU::V_CNDMASK_B32_e64) { 6234 User.getOperand(5).setReg(RI.getVCC()); 6235 // No need to add to Worklist. 6236 } 6237 } 6238 CopyToDelete.push_back(&MI); 6239 } else { 6240 if (MI.getOpcode() == AMDGPU::S_CSELECT_B32 || 6241 MI.getOpcode() == AMDGPU::S_CSELECT_B64) { 6242 // This is an implicit use of SCC and it is really expected by 6243 // the SCC users to handle. 6244 // We cannot preserve the edge to the user so add the explicit 6245 // copy: SCC = COPY VCC. 6246 // The copy will be cleaned up during the processing of the user 6247 // in lowerSelect. 6248 SCCUsedImplicitly = true; 6249 } 6250 6251 Worklist.insert(&MI); 6252 } 6253 } 6254 // Exit if we find another SCC def. 6255 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != -1) 6256 break; 6257 } 6258 for (auto &Copy : CopyToDelete) 6259 Copy->eraseFromParent(); 6260 6261 if (SCCUsedImplicitly) { 6262 BuildMI(*SCCDefInst.getParent(), std::next(SCCDefInst.getIterator()), 6263 SCCDefInst.getDebugLoc(), get(AMDGPU::COPY), AMDGPU::SCC) 6264 .addReg(RI.getVCC()); 6265 } 6266 } 6267 6268 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass( 6269 const MachineInstr &Inst) const { 6270 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0); 6271 6272 switch (Inst.getOpcode()) { 6273 // For target instructions, getOpRegClass just returns the virtual register 6274 // class associated with the operand, so we need to find an equivalent VGPR 6275 // register class in order to move the instruction to the VALU. 6276 case AMDGPU::COPY: 6277 case AMDGPU::PHI: 6278 case AMDGPU::REG_SEQUENCE: 6279 case AMDGPU::INSERT_SUBREG: 6280 case AMDGPU::WQM: 6281 case AMDGPU::SOFT_WQM: 6282 case AMDGPU::WWM: { 6283 const TargetRegisterClass *SrcRC = getOpRegClass(Inst, 1); 6284 if (RI.hasAGPRs(SrcRC)) { 6285 if (RI.hasAGPRs(NewDstRC)) 6286 return nullptr; 6287 6288 switch (Inst.getOpcode()) { 6289 case AMDGPU::PHI: 6290 case AMDGPU::REG_SEQUENCE: 6291 case AMDGPU::INSERT_SUBREG: 6292 NewDstRC = RI.getEquivalentAGPRClass(NewDstRC); 6293 break; 6294 default: 6295 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 6296 } 6297 6298 if (!NewDstRC) 6299 return nullptr; 6300 } else { 6301 if (RI.hasVGPRs(NewDstRC) || NewDstRC == &AMDGPU::VReg_1RegClass) 6302 return nullptr; 6303 6304 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 6305 if (!NewDstRC) 6306 return nullptr; 6307 } 6308 6309 return NewDstRC; 6310 } 6311 default: 6312 return NewDstRC; 6313 } 6314 } 6315 6316 // Find the one SGPR operand we are allowed to use. 6317 Register SIInstrInfo::findUsedSGPR(const MachineInstr &MI, 6318 int OpIndices[3]) const { 6319 const MCInstrDesc &Desc = MI.getDesc(); 6320 6321 // Find the one SGPR operand we are allowed to use. 6322 // 6323 // First we need to consider the instruction's operand requirements before 6324 // legalizing. Some operands are required to be SGPRs, such as implicit uses 6325 // of VCC, but we are still bound by the constant bus requirement to only use 6326 // one. 6327 // 6328 // If the operand's class is an SGPR, we can never move it. 6329 6330 Register SGPRReg = findImplicitSGPRRead(MI); 6331 if (SGPRReg != AMDGPU::NoRegister) 6332 return SGPRReg; 6333 6334 Register UsedSGPRs[3] = { AMDGPU::NoRegister }; 6335 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 6336 6337 for (unsigned i = 0; i < 3; ++i) { 6338 int Idx = OpIndices[i]; 6339 if (Idx == -1) 6340 break; 6341 6342 const MachineOperand &MO = MI.getOperand(Idx); 6343 if (!MO.isReg()) 6344 continue; 6345 6346 // Is this operand statically required to be an SGPR based on the operand 6347 // constraints? 6348 const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); 6349 bool IsRequiredSGPR = RI.isSGPRClass(OpRC); 6350 if (IsRequiredSGPR) 6351 return MO.getReg(); 6352 6353 // If this could be a VGPR or an SGPR, Check the dynamic register class. 6354 Register Reg = MO.getReg(); 6355 const TargetRegisterClass *RegRC = MRI.getRegClass(Reg); 6356 if (RI.isSGPRClass(RegRC)) 6357 UsedSGPRs[i] = Reg; 6358 } 6359 6360 // We don't have a required SGPR operand, so we have a bit more freedom in 6361 // selecting operands to move. 6362 6363 // Try to select the most used SGPR. If an SGPR is equal to one of the 6364 // others, we choose that. 6365 // 6366 // e.g. 6367 // V_FMA_F32 v0, s0, s0, s0 -> No moves 6368 // V_FMA_F32 v0, s0, s1, s0 -> Move s1 6369 6370 // TODO: If some of the operands are 64-bit SGPRs and some 32, we should 6371 // prefer those. 6372 6373 if (UsedSGPRs[0] != AMDGPU::NoRegister) { 6374 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2]) 6375 SGPRReg = UsedSGPRs[0]; 6376 } 6377 6378 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) { 6379 if (UsedSGPRs[1] == UsedSGPRs[2]) 6380 SGPRReg = UsedSGPRs[1]; 6381 } 6382 6383 return SGPRReg; 6384 } 6385 6386 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI, 6387 unsigned OperandName) const { 6388 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName); 6389 if (Idx == -1) 6390 return nullptr; 6391 6392 return &MI.getOperand(Idx); 6393 } 6394 6395 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const { 6396 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 6397 return (22ULL << 44) | // IMG_FORMAT_32_FLOAT 6398 (1ULL << 56) | // RESOURCE_LEVEL = 1 6399 (3ULL << 60); // OOB_SELECT = 3 6400 } 6401 6402 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT; 6403 if (ST.isAmdHsaOS()) { 6404 // Set ATC = 1. GFX9 doesn't have this bit. 6405 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) 6406 RsrcDataFormat |= (1ULL << 56); 6407 6408 // Set MTYPE = 2 (MTYPE_UC = uncached). GFX9 doesn't have this. 6409 // BTW, it disables TC L2 and therefore decreases performance. 6410 if (ST.getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS) 6411 RsrcDataFormat |= (2ULL << 59); 6412 } 6413 6414 return RsrcDataFormat; 6415 } 6416 6417 uint64_t SIInstrInfo::getScratchRsrcWords23() const { 6418 uint64_t Rsrc23 = getDefaultRsrcDataFormat() | 6419 AMDGPU::RSRC_TID_ENABLE | 6420 0xffffffff; // Size; 6421 6422 // GFX9 doesn't have ELEMENT_SIZE. 6423 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 6424 uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize()) - 1; 6425 Rsrc23 |= EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT; 6426 } 6427 6428 // IndexStride = 64 / 32. 6429 uint64_t IndexStride = ST.getWavefrontSize() == 64 ? 3 : 2; 6430 Rsrc23 |= IndexStride << AMDGPU::RSRC_INDEX_STRIDE_SHIFT; 6431 6432 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17]. 6433 // Clear them unless we want a huge stride. 6434 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS && 6435 ST.getGeneration() <= AMDGPUSubtarget::GFX9) 6436 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT; 6437 6438 return Rsrc23; 6439 } 6440 6441 bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const { 6442 unsigned Opc = MI.getOpcode(); 6443 6444 return isSMRD(Opc); 6445 } 6446 6447 bool SIInstrInfo::isHighLatencyDef(int Opc) const { 6448 return get(Opc).mayLoad() && 6449 (isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc) || isFLAT(Opc)); 6450 } 6451 6452 unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI, 6453 int &FrameIndex) const { 6454 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 6455 if (!Addr || !Addr->isFI()) 6456 return AMDGPU::NoRegister; 6457 6458 assert(!MI.memoperands_empty() && 6459 (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS); 6460 6461 FrameIndex = Addr->getIndex(); 6462 return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg(); 6463 } 6464 6465 unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI, 6466 int &FrameIndex) const { 6467 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr); 6468 assert(Addr && Addr->isFI()); 6469 FrameIndex = Addr->getIndex(); 6470 return getNamedOperand(MI, AMDGPU::OpName::data)->getReg(); 6471 } 6472 6473 unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 6474 int &FrameIndex) const { 6475 if (!MI.mayLoad()) 6476 return AMDGPU::NoRegister; 6477 6478 if (isMUBUF(MI) || isVGPRSpill(MI)) 6479 return isStackAccess(MI, FrameIndex); 6480 6481 if (isSGPRSpill(MI)) 6482 return isSGPRStackAccess(MI, FrameIndex); 6483 6484 return AMDGPU::NoRegister; 6485 } 6486 6487 unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 6488 int &FrameIndex) const { 6489 if (!MI.mayStore()) 6490 return AMDGPU::NoRegister; 6491 6492 if (isMUBUF(MI) || isVGPRSpill(MI)) 6493 return isStackAccess(MI, FrameIndex); 6494 6495 if (isSGPRSpill(MI)) 6496 return isSGPRStackAccess(MI, FrameIndex); 6497 6498 return AMDGPU::NoRegister; 6499 } 6500 6501 unsigned SIInstrInfo::getInstBundleSize(const MachineInstr &MI) const { 6502 unsigned Size = 0; 6503 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 6504 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 6505 while (++I != E && I->isInsideBundle()) { 6506 assert(!I->isBundle() && "No nested bundle!"); 6507 Size += getInstSizeInBytes(*I); 6508 } 6509 6510 return Size; 6511 } 6512 6513 unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { 6514 unsigned Opc = MI.getOpcode(); 6515 const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc); 6516 unsigned DescSize = Desc.getSize(); 6517 6518 // If we have a definitive size, we can use it. Otherwise we need to inspect 6519 // the operands to know the size. 6520 if (isFixedSize(MI)) 6521 return DescSize; 6522 6523 // 4-byte instructions may have a 32-bit literal encoded after them. Check 6524 // operands that coud ever be literals. 6525 if (isVALU(MI) || isSALU(MI)) { 6526 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 6527 if (Src0Idx == -1) 6528 return DescSize; // No operands. 6529 6530 if (isLiteralConstantLike(MI.getOperand(Src0Idx), Desc.OpInfo[Src0Idx])) 6531 return isVOP3(MI) ? 12 : (DescSize + 4); 6532 6533 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 6534 if (Src1Idx == -1) 6535 return DescSize; 6536 6537 if (isLiteralConstantLike(MI.getOperand(Src1Idx), Desc.OpInfo[Src1Idx])) 6538 return isVOP3(MI) ? 12 : (DescSize + 4); 6539 6540 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 6541 if (Src2Idx == -1) 6542 return DescSize; 6543 6544 if (isLiteralConstantLike(MI.getOperand(Src2Idx), Desc.OpInfo[Src2Idx])) 6545 return isVOP3(MI) ? 12 : (DescSize + 4); 6546 6547 return DescSize; 6548 } 6549 6550 // Check whether we have extra NSA words. 6551 if (isMIMG(MI)) { 6552 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); 6553 if (VAddr0Idx < 0) 6554 return 8; 6555 6556 int RSrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); 6557 return 8 + 4 * ((RSrcIdx - VAddr0Idx + 2) / 4); 6558 } 6559 6560 switch (Opc) { 6561 case TargetOpcode::IMPLICIT_DEF: 6562 case TargetOpcode::KILL: 6563 case TargetOpcode::DBG_VALUE: 6564 case TargetOpcode::EH_LABEL: 6565 return 0; 6566 case TargetOpcode::BUNDLE: 6567 return getInstBundleSize(MI); 6568 case TargetOpcode::INLINEASM: 6569 case TargetOpcode::INLINEASM_BR: { 6570 const MachineFunction *MF = MI.getParent()->getParent(); 6571 const char *AsmStr = MI.getOperand(0).getSymbolName(); 6572 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo(), &ST); 6573 } 6574 default: 6575 return DescSize; 6576 } 6577 } 6578 6579 bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const { 6580 if (!isFLAT(MI)) 6581 return false; 6582 6583 if (MI.memoperands_empty()) 6584 return true; 6585 6586 for (const MachineMemOperand *MMO : MI.memoperands()) { 6587 if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS) 6588 return true; 6589 } 6590 return false; 6591 } 6592 6593 bool SIInstrInfo::isNonUniformBranchInstr(MachineInstr &Branch) const { 6594 return Branch.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO; 6595 } 6596 6597 void SIInstrInfo::convertNonUniformIfRegion(MachineBasicBlock *IfEntry, 6598 MachineBasicBlock *IfEnd) const { 6599 MachineBasicBlock::iterator TI = IfEntry->getFirstTerminator(); 6600 assert(TI != IfEntry->end()); 6601 6602 MachineInstr *Branch = &(*TI); 6603 MachineFunction *MF = IfEntry->getParent(); 6604 MachineRegisterInfo &MRI = IfEntry->getParent()->getRegInfo(); 6605 6606 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 6607 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 6608 MachineInstr *SIIF = 6609 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_IF), DstReg) 6610 .add(Branch->getOperand(0)) 6611 .add(Branch->getOperand(1)); 6612 MachineInstr *SIEND = 6613 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_END_CF)) 6614 .addReg(DstReg); 6615 6616 IfEntry->erase(TI); 6617 IfEntry->insert(IfEntry->end(), SIIF); 6618 IfEnd->insert(IfEnd->getFirstNonPHI(), SIEND); 6619 } 6620 } 6621 6622 void SIInstrInfo::convertNonUniformLoopRegion( 6623 MachineBasicBlock *LoopEntry, MachineBasicBlock *LoopEnd) const { 6624 MachineBasicBlock::iterator TI = LoopEnd->getFirstTerminator(); 6625 // We expect 2 terminators, one conditional and one unconditional. 6626 assert(TI != LoopEnd->end()); 6627 6628 MachineInstr *Branch = &(*TI); 6629 MachineFunction *MF = LoopEnd->getParent(); 6630 MachineRegisterInfo &MRI = LoopEnd->getParent()->getRegInfo(); 6631 6632 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 6633 6634 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 6635 Register BackEdgeReg = MRI.createVirtualRegister(RI.getBoolRC()); 6636 MachineInstrBuilder HeaderPHIBuilder = 6637 BuildMI(*(MF), Branch->getDebugLoc(), get(TargetOpcode::PHI), DstReg); 6638 for (MachineBasicBlock::pred_iterator PI = LoopEntry->pred_begin(), 6639 E = LoopEntry->pred_end(); 6640 PI != E; ++PI) { 6641 if (*PI == LoopEnd) { 6642 HeaderPHIBuilder.addReg(BackEdgeReg); 6643 } else { 6644 MachineBasicBlock *PMBB = *PI; 6645 Register ZeroReg = MRI.createVirtualRegister(RI.getBoolRC()); 6646 materializeImmediate(*PMBB, PMBB->getFirstTerminator(), DebugLoc(), 6647 ZeroReg, 0); 6648 HeaderPHIBuilder.addReg(ZeroReg); 6649 } 6650 HeaderPHIBuilder.addMBB(*PI); 6651 } 6652 MachineInstr *HeaderPhi = HeaderPHIBuilder; 6653 MachineInstr *SIIFBREAK = BuildMI(*(MF), Branch->getDebugLoc(), 6654 get(AMDGPU::SI_IF_BREAK), BackEdgeReg) 6655 .addReg(DstReg) 6656 .add(Branch->getOperand(0)); 6657 MachineInstr *SILOOP = 6658 BuildMI(*(MF), Branch->getDebugLoc(), get(AMDGPU::SI_LOOP)) 6659 .addReg(BackEdgeReg) 6660 .addMBB(LoopEntry); 6661 6662 LoopEntry->insert(LoopEntry->begin(), HeaderPhi); 6663 LoopEnd->erase(TI); 6664 LoopEnd->insert(LoopEnd->end(), SIIFBREAK); 6665 LoopEnd->insert(LoopEnd->end(), SILOOP); 6666 } 6667 } 6668 6669 ArrayRef<std::pair<int, const char *>> 6670 SIInstrInfo::getSerializableTargetIndices() const { 6671 static const std::pair<int, const char *> TargetIndices[] = { 6672 {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"}, 6673 {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"}, 6674 {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"}, 6675 {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"}, 6676 {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}}; 6677 return makeArrayRef(TargetIndices); 6678 } 6679 6680 /// This is used by the post-RA scheduler (SchedulePostRAList.cpp). The 6681 /// post-RA version of misched uses CreateTargetMIHazardRecognizer. 6682 ScheduleHazardRecognizer * 6683 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 6684 const ScheduleDAG *DAG) const { 6685 return new GCNHazardRecognizer(DAG->MF); 6686 } 6687 6688 /// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer 6689 /// pass. 6690 ScheduleHazardRecognizer * 6691 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const { 6692 return new GCNHazardRecognizer(MF); 6693 } 6694 6695 std::pair<unsigned, unsigned> 6696 SIInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { 6697 return std::make_pair(TF & MO_MASK, TF & ~MO_MASK); 6698 } 6699 6700 ArrayRef<std::pair<unsigned, const char *>> 6701 SIInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { 6702 static const std::pair<unsigned, const char *> TargetFlags[] = { 6703 { MO_GOTPCREL, "amdgpu-gotprel" }, 6704 { MO_GOTPCREL32_LO, "amdgpu-gotprel32-lo" }, 6705 { MO_GOTPCREL32_HI, "amdgpu-gotprel32-hi" }, 6706 { MO_REL32_LO, "amdgpu-rel32-lo" }, 6707 { MO_REL32_HI, "amdgpu-rel32-hi" }, 6708 { MO_ABS32_LO, "amdgpu-abs32-lo" }, 6709 { MO_ABS32_HI, "amdgpu-abs32-hi" }, 6710 }; 6711 6712 return makeArrayRef(TargetFlags); 6713 } 6714 6715 bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const { 6716 return !MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY && 6717 MI.modifiesRegister(AMDGPU::EXEC, &RI); 6718 } 6719 6720 MachineInstrBuilder 6721 SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 6722 MachineBasicBlock::iterator I, 6723 const DebugLoc &DL, 6724 Register DestReg) const { 6725 if (ST.hasAddNoCarry()) 6726 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg); 6727 6728 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6729 Register UnusedCarry = MRI.createVirtualRegister(RI.getBoolRC()); 6730 MRI.setRegAllocationHint(UnusedCarry, 0, RI.getVCC()); 6731 6732 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_CO_U32_e64), DestReg) 6733 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 6734 } 6735 6736 MachineInstrBuilder SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 6737 MachineBasicBlock::iterator I, 6738 const DebugLoc &DL, 6739 Register DestReg, 6740 RegScavenger &RS) const { 6741 if (ST.hasAddNoCarry()) 6742 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e32), DestReg); 6743 6744 // If available, prefer to use vcc. 6745 Register UnusedCarry = !RS.isRegUsed(AMDGPU::VCC) 6746 ? Register(RI.getVCC()) 6747 : RS.scavengeRegister(RI.getBoolRC(), I, 0, false); 6748 6749 // TODO: Users need to deal with this. 6750 if (!UnusedCarry.isValid()) 6751 return MachineInstrBuilder(); 6752 6753 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_CO_U32_e64), DestReg) 6754 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 6755 } 6756 6757 bool SIInstrInfo::isKillTerminator(unsigned Opcode) { 6758 switch (Opcode) { 6759 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 6760 case AMDGPU::SI_KILL_I1_TERMINATOR: 6761 return true; 6762 default: 6763 return false; 6764 } 6765 } 6766 6767 const MCInstrDesc &SIInstrInfo::getKillTerminatorFromPseudo(unsigned Opcode) const { 6768 switch (Opcode) { 6769 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: 6770 return get(AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR); 6771 case AMDGPU::SI_KILL_I1_PSEUDO: 6772 return get(AMDGPU::SI_KILL_I1_TERMINATOR); 6773 default: 6774 llvm_unreachable("invalid opcode, expected SI_KILL_*_PSEUDO"); 6775 } 6776 } 6777 6778 void SIInstrInfo::fixImplicitOperands(MachineInstr &MI) const { 6779 if (!ST.isWave32()) 6780 return; 6781 6782 for (auto &Op : MI.implicit_operands()) { 6783 if (Op.isReg() && Op.getReg() == AMDGPU::VCC) 6784 Op.setReg(AMDGPU::VCC_LO); 6785 } 6786 } 6787 6788 bool SIInstrInfo::isBufferSMRD(const MachineInstr &MI) const { 6789 if (!isSMRD(MI)) 6790 return false; 6791 6792 // Check that it is using a buffer resource. 6793 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sbase); 6794 if (Idx == -1) // e.g. s_memtime 6795 return false; 6796 6797 const auto RCID = MI.getDesc().OpInfo[Idx].RegClass; 6798 return RI.getRegClass(RCID)->hasSubClassEq(&AMDGPU::SGPR_128RegClass); 6799 } 6800 6801 unsigned SIInstrInfo::getNumFlatOffsetBits(bool Signed) const { 6802 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) 6803 return Signed ? 12 : 11; 6804 6805 return Signed ? 13 : 12; 6806 } 6807 6808 bool SIInstrInfo::isLegalFLATOffset(int64_t Offset, unsigned AddrSpace, 6809 bool Signed) const { 6810 // TODO: Should 0 be special cased? 6811 if (!ST.hasFlatInstOffsets()) 6812 return false; 6813 6814 if (ST.hasFlatSegmentOffsetBug() && AddrSpace == AMDGPUAS::FLAT_ADDRESS) 6815 return false; 6816 6817 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) 6818 return Signed ? isInt<12>(Offset) : isUInt<11>(Offset); 6819 6820 return Signed ? isInt<13>(Offset) :isUInt<12>(Offset); 6821 } 6822 6823 6824 // This must be kept in sync with the SIEncodingFamily class in SIInstrInfo.td 6825 enum SIEncodingFamily { 6826 SI = 0, 6827 VI = 1, 6828 SDWA = 2, 6829 SDWA9 = 3, 6830 GFX80 = 4, 6831 GFX9 = 5, 6832 GFX10 = 6, 6833 SDWA10 = 7 6834 }; 6835 6836 static SIEncodingFamily subtargetEncodingFamily(const GCNSubtarget &ST) { 6837 switch (ST.getGeneration()) { 6838 default: 6839 break; 6840 case AMDGPUSubtarget::SOUTHERN_ISLANDS: 6841 case AMDGPUSubtarget::SEA_ISLANDS: 6842 return SIEncodingFamily::SI; 6843 case AMDGPUSubtarget::VOLCANIC_ISLANDS: 6844 case AMDGPUSubtarget::GFX9: 6845 return SIEncodingFamily::VI; 6846 case AMDGPUSubtarget::GFX10: 6847 return SIEncodingFamily::GFX10; 6848 } 6849 llvm_unreachable("Unknown subtarget generation!"); 6850 } 6851 6852 bool SIInstrInfo::isAsmOnlyOpcode(int MCOp) const { 6853 switch(MCOp) { 6854 // These opcodes use indirect register addressing so 6855 // they need special handling by codegen (currently missing). 6856 // Therefore it is too risky to allow these opcodes 6857 // to be selected by dpp combiner or sdwa peepholer. 6858 case AMDGPU::V_MOVRELS_B32_dpp_gfx10: 6859 case AMDGPU::V_MOVRELS_B32_sdwa_gfx10: 6860 case AMDGPU::V_MOVRELD_B32_dpp_gfx10: 6861 case AMDGPU::V_MOVRELD_B32_sdwa_gfx10: 6862 case AMDGPU::V_MOVRELSD_B32_dpp_gfx10: 6863 case AMDGPU::V_MOVRELSD_B32_sdwa_gfx10: 6864 case AMDGPU::V_MOVRELSD_2_B32_dpp_gfx10: 6865 case AMDGPU::V_MOVRELSD_2_B32_sdwa_gfx10: 6866 return true; 6867 default: 6868 return false; 6869 } 6870 } 6871 6872 int SIInstrInfo::pseudoToMCOpcode(int Opcode) const { 6873 SIEncodingFamily Gen = subtargetEncodingFamily(ST); 6874 6875 if ((get(Opcode).TSFlags & SIInstrFlags::renamedInGFX9) != 0 && 6876 ST.getGeneration() == AMDGPUSubtarget::GFX9) 6877 Gen = SIEncodingFamily::GFX9; 6878 6879 // Adjust the encoding family to GFX80 for D16 buffer instructions when the 6880 // subtarget has UnpackedD16VMem feature. 6881 // TODO: remove this when we discard GFX80 encoding. 6882 if (ST.hasUnpackedD16VMem() && (get(Opcode).TSFlags & SIInstrFlags::D16Buf)) 6883 Gen = SIEncodingFamily::GFX80; 6884 6885 if (get(Opcode).TSFlags & SIInstrFlags::SDWA) { 6886 switch (ST.getGeneration()) { 6887 default: 6888 Gen = SIEncodingFamily::SDWA; 6889 break; 6890 case AMDGPUSubtarget::GFX9: 6891 Gen = SIEncodingFamily::SDWA9; 6892 break; 6893 case AMDGPUSubtarget::GFX10: 6894 Gen = SIEncodingFamily::SDWA10; 6895 break; 6896 } 6897 } 6898 6899 int MCOp = AMDGPU::getMCOpcode(Opcode, Gen); 6900 6901 // -1 means that Opcode is already a native instruction. 6902 if (MCOp == -1) 6903 return Opcode; 6904 6905 // (uint16_t)-1 means that Opcode is a pseudo instruction that has 6906 // no encoding in the given subtarget generation. 6907 if (MCOp == (uint16_t)-1) 6908 return -1; 6909 6910 if (isAsmOnlyOpcode(MCOp)) 6911 return -1; 6912 6913 return MCOp; 6914 } 6915 6916 static 6917 TargetInstrInfo::RegSubRegPair getRegOrUndef(const MachineOperand &RegOpnd) { 6918 assert(RegOpnd.isReg()); 6919 return RegOpnd.isUndef() ? TargetInstrInfo::RegSubRegPair() : 6920 getRegSubRegPair(RegOpnd); 6921 } 6922 6923 TargetInstrInfo::RegSubRegPair 6924 llvm::getRegSequenceSubReg(MachineInstr &MI, unsigned SubReg) { 6925 assert(MI.isRegSequence()); 6926 for (unsigned I = 0, E = (MI.getNumOperands() - 1)/ 2; I < E; ++I) 6927 if (MI.getOperand(1 + 2 * I + 1).getImm() == SubReg) { 6928 auto &RegOp = MI.getOperand(1 + 2 * I); 6929 return getRegOrUndef(RegOp); 6930 } 6931 return TargetInstrInfo::RegSubRegPair(); 6932 } 6933 6934 // Try to find the definition of reg:subreg in subreg-manipulation pseudos 6935 // Following a subreg of reg:subreg isn't supported 6936 static bool followSubRegDef(MachineInstr &MI, 6937 TargetInstrInfo::RegSubRegPair &RSR) { 6938 if (!RSR.SubReg) 6939 return false; 6940 switch (MI.getOpcode()) { 6941 default: break; 6942 case AMDGPU::REG_SEQUENCE: 6943 RSR = getRegSequenceSubReg(MI, RSR.SubReg); 6944 return true; 6945 // EXTRACT_SUBREG ins't supported as this would follow a subreg of subreg 6946 case AMDGPU::INSERT_SUBREG: 6947 if (RSR.SubReg == (unsigned)MI.getOperand(3).getImm()) 6948 // inserted the subreg we're looking for 6949 RSR = getRegOrUndef(MI.getOperand(2)); 6950 else { // the subreg in the rest of the reg 6951 auto R1 = getRegOrUndef(MI.getOperand(1)); 6952 if (R1.SubReg) // subreg of subreg isn't supported 6953 return false; 6954 RSR.Reg = R1.Reg; 6955 } 6956 return true; 6957 } 6958 return false; 6959 } 6960 6961 MachineInstr *llvm::getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P, 6962 MachineRegisterInfo &MRI) { 6963 assert(MRI.isSSA()); 6964 if (!P.Reg.isVirtual()) 6965 return nullptr; 6966 6967 auto RSR = P; 6968 auto *DefInst = MRI.getVRegDef(RSR.Reg); 6969 while (auto *MI = DefInst) { 6970 DefInst = nullptr; 6971 switch (MI->getOpcode()) { 6972 case AMDGPU::COPY: 6973 case AMDGPU::V_MOV_B32_e32: { 6974 auto &Op1 = MI->getOperand(1); 6975 if (Op1.isReg() && Op1.getReg().isVirtual()) { 6976 if (Op1.isUndef()) 6977 return nullptr; 6978 RSR = getRegSubRegPair(Op1); 6979 DefInst = MRI.getVRegDef(RSR.Reg); 6980 } 6981 break; 6982 } 6983 default: 6984 if (followSubRegDef(*MI, RSR)) { 6985 if (!RSR.Reg) 6986 return nullptr; 6987 DefInst = MRI.getVRegDef(RSR.Reg); 6988 } 6989 } 6990 if (!DefInst) 6991 return MI; 6992 } 6993 return nullptr; 6994 } 6995 6996 bool llvm::execMayBeModifiedBeforeUse(const MachineRegisterInfo &MRI, 6997 Register VReg, 6998 const MachineInstr &DefMI, 6999 const MachineInstr &UseMI) { 7000 assert(MRI.isSSA() && "Must be run on SSA"); 7001 7002 auto *TRI = MRI.getTargetRegisterInfo(); 7003 auto *DefBB = DefMI.getParent(); 7004 7005 // Don't bother searching between blocks, although it is possible this block 7006 // doesn't modify exec. 7007 if (UseMI.getParent() != DefBB) 7008 return true; 7009 7010 const int MaxInstScan = 20; 7011 int NumInst = 0; 7012 7013 // Stop scan at the use. 7014 auto E = UseMI.getIterator(); 7015 for (auto I = std::next(DefMI.getIterator()); I != E; ++I) { 7016 if (I->isDebugInstr()) 7017 continue; 7018 7019 if (++NumInst > MaxInstScan) 7020 return true; 7021 7022 if (I->modifiesRegister(AMDGPU::EXEC, TRI)) 7023 return true; 7024 } 7025 7026 return false; 7027 } 7028 7029 bool llvm::execMayBeModifiedBeforeAnyUse(const MachineRegisterInfo &MRI, 7030 Register VReg, 7031 const MachineInstr &DefMI) { 7032 assert(MRI.isSSA() && "Must be run on SSA"); 7033 7034 auto *TRI = MRI.getTargetRegisterInfo(); 7035 auto *DefBB = DefMI.getParent(); 7036 7037 const int MaxUseInstScan = 10; 7038 int NumUseInst = 0; 7039 7040 for (auto &UseInst : MRI.use_nodbg_instructions(VReg)) { 7041 // Don't bother searching between blocks, although it is possible this block 7042 // doesn't modify exec. 7043 if (UseInst.getParent() != DefBB) 7044 return true; 7045 7046 if (++NumUseInst > MaxUseInstScan) 7047 return true; 7048 } 7049 7050 const int MaxInstScan = 20; 7051 int NumInst = 0; 7052 7053 // Stop scan when we have seen all the uses. 7054 for (auto I = std::next(DefMI.getIterator()); ; ++I) { 7055 if (I->isDebugInstr()) 7056 continue; 7057 7058 if (++NumInst > MaxInstScan) 7059 return true; 7060 7061 if (I->readsRegister(VReg)) 7062 if (--NumUseInst == 0) 7063 return false; 7064 7065 if (I->modifiesRegister(AMDGPU::EXEC, TRI)) 7066 return true; 7067 } 7068 } 7069 7070 MachineInstr *SIInstrInfo::createPHIDestinationCopy( 7071 MachineBasicBlock &MBB, MachineBasicBlock::iterator LastPHIIt, 7072 const DebugLoc &DL, Register Src, Register Dst) const { 7073 auto Cur = MBB.begin(); 7074 if (Cur != MBB.end()) 7075 do { 7076 if (!Cur->isPHI() && Cur->readsRegister(Dst)) 7077 return BuildMI(MBB, Cur, DL, get(TargetOpcode::COPY), Dst).addReg(Src); 7078 ++Cur; 7079 } while (Cur != MBB.end() && Cur != LastPHIIt); 7080 7081 return TargetInstrInfo::createPHIDestinationCopy(MBB, LastPHIIt, DL, Src, 7082 Dst); 7083 } 7084 7085 MachineInstr *SIInstrInfo::createPHISourceCopy( 7086 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, 7087 const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const { 7088 if (InsPt != MBB.end() && 7089 (InsPt->getOpcode() == AMDGPU::SI_IF || 7090 InsPt->getOpcode() == AMDGPU::SI_ELSE || 7091 InsPt->getOpcode() == AMDGPU::SI_IF_BREAK) && 7092 InsPt->definesRegister(Src)) { 7093 InsPt++; 7094 return BuildMI(MBB, InsPt, DL, 7095 get(ST.isWave32() ? AMDGPU::S_MOV_B32_term 7096 : AMDGPU::S_MOV_B64_term), 7097 Dst) 7098 .addReg(Src, 0, SrcSubReg) 7099 .addReg(AMDGPU::EXEC, RegState::Implicit); 7100 } 7101 return TargetInstrInfo::createPHISourceCopy(MBB, InsPt, DL, Src, SrcSubReg, 7102 Dst); 7103 } 7104 7105 bool llvm::SIInstrInfo::isWave32() const { return ST.isWave32(); } 7106 7107 MachineInstr *SIInstrInfo::foldMemoryOperandImpl( 7108 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, 7109 MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS, 7110 VirtRegMap *VRM) const { 7111 // This is a bit of a hack (copied from AArch64). Consider this instruction: 7112 // 7113 // %0:sreg_32 = COPY $m0 7114 // 7115 // We explicitly chose SReg_32 for the virtual register so such a copy might 7116 // be eliminated by RegisterCoalescer. However, that may not be possible, and 7117 // %0 may even spill. We can't spill $m0 normally (it would require copying to 7118 // a numbered SGPR anyway), and since it is in the SReg_32 register class, 7119 // TargetInstrInfo::foldMemoryOperand() is going to try. 7120 // A similar issue also exists with spilling and reloading $exec registers. 7121 // 7122 // To prevent that, constrain the %0 register class here. 7123 if (MI.isFullCopy()) { 7124 Register DstReg = MI.getOperand(0).getReg(); 7125 Register SrcReg = MI.getOperand(1).getReg(); 7126 if ((DstReg.isVirtual() || SrcReg.isVirtual()) && 7127 (DstReg.isVirtual() != SrcReg.isVirtual())) { 7128 MachineRegisterInfo &MRI = MF.getRegInfo(); 7129 Register VirtReg = DstReg.isVirtual() ? DstReg : SrcReg; 7130 const TargetRegisterClass *RC = MRI.getRegClass(VirtReg); 7131 if (RC->hasSuperClassEq(&AMDGPU::SReg_32RegClass)) { 7132 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 7133 return nullptr; 7134 } else if (RC->hasSuperClassEq(&AMDGPU::SReg_64RegClass)) { 7135 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_64_XEXECRegClass); 7136 return nullptr; 7137 } 7138 } 7139 } 7140 7141 return nullptr; 7142 } 7143 7144 unsigned SIInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 7145 const MachineInstr &MI, 7146 unsigned *PredCost) const { 7147 if (MI.isBundle()) { 7148 MachineBasicBlock::const_instr_iterator I(MI.getIterator()); 7149 MachineBasicBlock::const_instr_iterator E(MI.getParent()->instr_end()); 7150 unsigned Lat = 0, Count = 0; 7151 for (++I; I != E && I->isBundledWithPred(); ++I) { 7152 ++Count; 7153 Lat = std::max(Lat, SchedModel.computeInstrLatency(&*I)); 7154 } 7155 return Lat + Count - 1; 7156 } 7157 7158 return SchedModel.computeInstrLatency(&MI); 7159 } 7160 7161 unsigned SIInstrInfo::getDSShaderTypeValue(const MachineFunction &MF) { 7162 switch (MF.getFunction().getCallingConv()) { 7163 case CallingConv::AMDGPU_PS: 7164 return 1; 7165 case CallingConv::AMDGPU_VS: 7166 return 2; 7167 case CallingConv::AMDGPU_GS: 7168 return 3; 7169 case CallingConv::AMDGPU_HS: 7170 case CallingConv::AMDGPU_LS: 7171 case CallingConv::AMDGPU_ES: 7172 report_fatal_error("ds_ordered_count unsupported for this calling conv"); 7173 case CallingConv::AMDGPU_CS: 7174 case CallingConv::AMDGPU_KERNEL: 7175 case CallingConv::C: 7176 case CallingConv::Fast: 7177 default: 7178 // Assume other calling conventions are various compute callable functions 7179 return 0; 7180 } 7181 } 7182