1 //===- SIInstrInfo.cpp - SI Instruction Information ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// SI Implementation of TargetInstrInfo. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SIInstrInfo.h" 15 #include "AMDGPU.h" 16 #include "AMDGPUSubtarget.h" 17 #include "GCNHazardRecognizer.h" 18 #include "SIDefines.h" 19 #include "SIMachineFunctionInfo.h" 20 #include "SIRegisterInfo.h" 21 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 22 #include "Utils/AMDGPUBaseInfo.h" 23 #include "llvm/ADT/APInt.h" 24 #include "llvm/ADT/ArrayRef.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/ADT/iterator_range.h" 28 #include "llvm/Analysis/AliasAnalysis.h" 29 #include "llvm/Analysis/MemoryLocation.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/CodeGen/MachineBasicBlock.h" 32 #include "llvm/CodeGen/MachineDominators.h" 33 #include "llvm/CodeGen/MachineFrameInfo.h" 34 #include "llvm/CodeGen/MachineFunction.h" 35 #include "llvm/CodeGen/MachineInstr.h" 36 #include "llvm/CodeGen/MachineInstrBuilder.h" 37 #include "llvm/CodeGen/MachineInstrBundle.h" 38 #include "llvm/CodeGen/MachineMemOperand.h" 39 #include "llvm/CodeGen/MachineOperand.h" 40 #include "llvm/CodeGen/MachineRegisterInfo.h" 41 #include "llvm/CodeGen/RegisterScavenging.h" 42 #include "llvm/CodeGen/ScheduleDAG.h" 43 #include "llvm/CodeGen/SelectionDAGNodes.h" 44 #include "llvm/CodeGen/TargetOpcodes.h" 45 #include "llvm/CodeGen/TargetRegisterInfo.h" 46 #include "llvm/IR/DebugLoc.h" 47 #include "llvm/IR/DiagnosticInfo.h" 48 #include "llvm/IR/Function.h" 49 #include "llvm/IR/InlineAsm.h" 50 #include "llvm/IR/LLVMContext.h" 51 #include "llvm/MC/MCInstrDesc.h" 52 #include "llvm/Support/Casting.h" 53 #include "llvm/Support/CommandLine.h" 54 #include "llvm/Support/Compiler.h" 55 #include "llvm/Support/ErrorHandling.h" 56 #include "llvm/Support/MachineValueType.h" 57 #include "llvm/Support/MathExtras.h" 58 #include "llvm/Target/TargetMachine.h" 59 #include <cassert> 60 #include <cstdint> 61 #include <iterator> 62 #include <utility> 63 64 using namespace llvm; 65 66 #define DEBUG_TYPE "si-instr-info" 67 68 #define GET_INSTRINFO_CTOR_DTOR 69 #include "AMDGPUGenInstrInfo.inc" 70 71 namespace llvm { 72 namespace AMDGPU { 73 #define GET_D16ImageDimIntrinsics_IMPL 74 #define GET_ImageDimIntrinsicTable_IMPL 75 #define GET_RsrcIntrinsics_IMPL 76 #include "AMDGPUGenSearchableTables.inc" 77 } 78 } 79 80 81 // Must be at least 4 to be able to branch over minimum unconditional branch 82 // code. This is only for making it possible to write reasonably small tests for 83 // long branches. 84 static cl::opt<unsigned> 85 BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16), 86 cl::desc("Restrict range of branch instructions (DEBUG)")); 87 88 static cl::opt<bool> Fix16BitCopies( 89 "amdgpu-fix-16-bit-physreg-copies", 90 cl::desc("Fix copies between 32 and 16 bit registers by extending to 32 bit"), 91 cl::init(true), 92 cl::ReallyHidden); 93 94 SIInstrInfo::SIInstrInfo(const GCNSubtarget &ST) 95 : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN), 96 RI(ST), ST(ST) { 97 SchedModel.init(&ST); 98 } 99 100 //===----------------------------------------------------------------------===// 101 // TargetInstrInfo callbacks 102 //===----------------------------------------------------------------------===// 103 104 static unsigned getNumOperandsNoGlue(SDNode *Node) { 105 unsigned N = Node->getNumOperands(); 106 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) 107 --N; 108 return N; 109 } 110 111 /// Returns true if both nodes have the same value for the given 112 /// operand \p Op, or if both nodes do not have this operand. 113 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { 114 unsigned Opc0 = N0->getMachineOpcode(); 115 unsigned Opc1 = N1->getMachineOpcode(); 116 117 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); 118 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); 119 120 if (Op0Idx == -1 && Op1Idx == -1) 121 return true; 122 123 124 if ((Op0Idx == -1 && Op1Idx != -1) || 125 (Op1Idx == -1 && Op0Idx != -1)) 126 return false; 127 128 // getNamedOperandIdx returns the index for the MachineInstr's operands, 129 // which includes the result as the first operand. We are indexing into the 130 // MachineSDNode's operands, so we need to skip the result operand to get 131 // the real index. 132 --Op0Idx; 133 --Op1Idx; 134 135 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx); 136 } 137 138 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, 139 AliasAnalysis *AA) const { 140 // TODO: The generic check fails for VALU instructions that should be 141 // rematerializable due to implicit reads of exec. We really want all of the 142 // generic logic for this except for this. 143 switch (MI.getOpcode()) { 144 case AMDGPU::V_MOV_B32_e32: 145 case AMDGPU::V_MOV_B32_e64: 146 case AMDGPU::V_MOV_B64_PSEUDO: 147 case AMDGPU::V_ACCVGPR_READ_B32: 148 case AMDGPU::V_ACCVGPR_WRITE_B32: 149 // No implicit operands. 150 return MI.getNumOperands() == MI.getDesc().getNumOperands(); 151 default: 152 return false; 153 } 154 } 155 156 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, 157 int64_t &Offset0, 158 int64_t &Offset1) const { 159 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode()) 160 return false; 161 162 unsigned Opc0 = Load0->getMachineOpcode(); 163 unsigned Opc1 = Load1->getMachineOpcode(); 164 165 // Make sure both are actually loads. 166 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad()) 167 return false; 168 169 if (isDS(Opc0) && isDS(Opc1)) { 170 171 // FIXME: Handle this case: 172 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1)) 173 return false; 174 175 // Check base reg. 176 if (Load0->getOperand(0) != Load1->getOperand(0)) 177 return false; 178 179 // Skip read2 / write2 variants for simplicity. 180 // TODO: We should report true if the used offsets are adjacent (excluded 181 // st64 versions). 182 int Offset0Idx = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 183 int Offset1Idx = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 184 if (Offset0Idx == -1 || Offset1Idx == -1) 185 return false; 186 187 // XXX - be careful of datalesss loads 188 // getNamedOperandIdx returns the index for MachineInstrs. Since they 189 // include the output in the operand list, but SDNodes don't, we need to 190 // subtract the index by one. 191 Offset0Idx -= get(Opc0).NumDefs; 192 Offset1Idx -= get(Opc1).NumDefs; 193 Offset0 = cast<ConstantSDNode>(Load0->getOperand(Offset0Idx))->getZExtValue(); 194 Offset1 = cast<ConstantSDNode>(Load1->getOperand(Offset1Idx))->getZExtValue(); 195 return true; 196 } 197 198 if (isSMRD(Opc0) && isSMRD(Opc1)) { 199 // Skip time and cache invalidation instructions. 200 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::sbase) == -1 || 201 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::sbase) == -1) 202 return false; 203 204 assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1)); 205 206 // Check base reg. 207 if (Load0->getOperand(0) != Load1->getOperand(0)) 208 return false; 209 210 const ConstantSDNode *Load0Offset = 211 dyn_cast<ConstantSDNode>(Load0->getOperand(1)); 212 const ConstantSDNode *Load1Offset = 213 dyn_cast<ConstantSDNode>(Load1->getOperand(1)); 214 215 if (!Load0Offset || !Load1Offset) 216 return false; 217 218 Offset0 = Load0Offset->getZExtValue(); 219 Offset1 = Load1Offset->getZExtValue(); 220 return true; 221 } 222 223 // MUBUF and MTBUF can access the same addresses. 224 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) { 225 226 // MUBUF and MTBUF have vaddr at different indices. 227 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || 228 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || 229 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) 230 return false; 231 232 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 233 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 234 235 if (OffIdx0 == -1 || OffIdx1 == -1) 236 return false; 237 238 // getNamedOperandIdx returns the index for MachineInstrs. Since they 239 // include the output in the operand list, but SDNodes don't, we need to 240 // subtract the index by one. 241 OffIdx0 -= get(Opc0).NumDefs; 242 OffIdx1 -= get(Opc1).NumDefs; 243 244 SDValue Off0 = Load0->getOperand(OffIdx0); 245 SDValue Off1 = Load1->getOperand(OffIdx1); 246 247 // The offset might be a FrameIndexSDNode. 248 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1)) 249 return false; 250 251 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue(); 252 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue(); 253 return true; 254 } 255 256 return false; 257 } 258 259 static bool isStride64(unsigned Opc) { 260 switch (Opc) { 261 case AMDGPU::DS_READ2ST64_B32: 262 case AMDGPU::DS_READ2ST64_B64: 263 case AMDGPU::DS_WRITE2ST64_B32: 264 case AMDGPU::DS_WRITE2ST64_B64: 265 return true; 266 default: 267 return false; 268 } 269 } 270 271 bool SIInstrInfo::getMemOperandsWithOffsetWidth( 272 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps, 273 int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, 274 const TargetRegisterInfo *TRI) const { 275 if (!LdSt.mayLoadOrStore()) 276 return false; 277 278 unsigned Opc = LdSt.getOpcode(); 279 OffsetIsScalable = false; 280 const MachineOperand *BaseOp, *OffsetOp; 281 int DataOpIdx; 282 283 if (isDS(LdSt)) { 284 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr); 285 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset); 286 if (OffsetOp) { 287 // Normal, single offset LDS instruction. 288 if (!BaseOp) { 289 // DS_CONSUME/DS_APPEND use M0 for the base address. 290 // TODO: find the implicit use operand for M0 and use that as BaseOp? 291 return false; 292 } 293 BaseOps.push_back(BaseOp); 294 Offset = OffsetOp->getImm(); 295 // Get appropriate operand, and compute width accordingly. 296 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 297 if (DataOpIdx == -1) 298 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 299 Width = getOpSize(LdSt, DataOpIdx); 300 } else { 301 // The 2 offset instructions use offset0 and offset1 instead. We can treat 302 // these as a load with a single offset if the 2 offsets are consecutive. 303 // We will use this for some partially aligned loads. 304 const MachineOperand *Offset0Op = 305 getNamedOperand(LdSt, AMDGPU::OpName::offset0); 306 const MachineOperand *Offset1Op = 307 getNamedOperand(LdSt, AMDGPU::OpName::offset1); 308 309 unsigned Offset0 = Offset0Op->getImm(); 310 unsigned Offset1 = Offset1Op->getImm(); 311 if (Offset0 + 1 != Offset1) 312 return false; 313 314 // Each of these offsets is in element sized units, so we need to convert 315 // to bytes of the individual reads. 316 317 unsigned EltSize; 318 if (LdSt.mayLoad()) 319 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16; 320 else { 321 assert(LdSt.mayStore()); 322 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 323 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8; 324 } 325 326 if (isStride64(Opc)) 327 EltSize *= 64; 328 329 BaseOps.push_back(BaseOp); 330 Offset = EltSize * Offset0; 331 // Get appropriate operand(s), and compute width accordingly. 332 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 333 if (DataOpIdx == -1) { 334 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 335 Width = getOpSize(LdSt, DataOpIdx); 336 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1); 337 Width += getOpSize(LdSt, DataOpIdx); 338 } else { 339 Width = getOpSize(LdSt, DataOpIdx); 340 } 341 } 342 return true; 343 } 344 345 if (isMUBUF(LdSt) || isMTBUF(LdSt)) { 346 const MachineOperand *SOffset = getNamedOperand(LdSt, AMDGPU::OpName::soffset); 347 if (SOffset && SOffset->isReg()) { 348 // We can only handle this if it's a stack access, as any other resource 349 // would require reporting multiple base registers. 350 const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 351 if (AddrReg && !AddrReg->isFI()) 352 return false; 353 354 const MachineOperand *RSrc = getNamedOperand(LdSt, AMDGPU::OpName::srsrc); 355 const SIMachineFunctionInfo *MFI 356 = LdSt.getParent()->getParent()->getInfo<SIMachineFunctionInfo>(); 357 if (RSrc->getReg() != MFI->getScratchRSrcReg()) 358 return false; 359 360 const MachineOperand *OffsetImm = 361 getNamedOperand(LdSt, AMDGPU::OpName::offset); 362 BaseOps.push_back(RSrc); 363 BaseOps.push_back(SOffset); 364 Offset = OffsetImm->getImm(); 365 } else { 366 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::srsrc); 367 if (!BaseOp) // e.g. BUFFER_WBINVL1_VOL 368 return false; 369 BaseOps.push_back(BaseOp); 370 371 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 372 if (BaseOp) 373 BaseOps.push_back(BaseOp); 374 375 const MachineOperand *OffsetImm = 376 getNamedOperand(LdSt, AMDGPU::OpName::offset); 377 Offset = OffsetImm->getImm(); 378 if (SOffset) // soffset can be an inline immediate. 379 Offset += SOffset->getImm(); 380 } 381 // Get appropriate operand, and compute width accordingly. 382 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 383 if (DataOpIdx == -1) 384 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 385 Width = getOpSize(LdSt, DataOpIdx); 386 return true; 387 } 388 389 if (isMIMG(LdSt)) { 390 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); 391 BaseOps.push_back(&LdSt.getOperand(SRsrcIdx)); 392 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); 393 if (VAddr0Idx >= 0) { 394 // GFX10 possible NSA encoding. 395 for (int I = VAddr0Idx; I < SRsrcIdx; ++I) 396 BaseOps.push_back(&LdSt.getOperand(I)); 397 } else { 398 BaseOps.push_back(getNamedOperand(LdSt, AMDGPU::OpName::vaddr)); 399 } 400 Offset = 0; 401 // Get appropriate operand, and compute width accordingly. 402 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 403 Width = getOpSize(LdSt, DataOpIdx); 404 return true; 405 } 406 407 if (isSMRD(LdSt)) { 408 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::sbase); 409 if (!BaseOp) // e.g. S_MEMTIME 410 return false; 411 BaseOps.push_back(BaseOp); 412 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset); 413 Offset = OffsetOp ? OffsetOp->getImm() : 0; 414 // Get appropriate operand, and compute width accordingly. 415 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::sdst); 416 Width = getOpSize(LdSt, DataOpIdx); 417 return true; 418 } 419 420 if (isFLAT(LdSt)) { 421 // Instructions have either vaddr or saddr or both. 422 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 423 if (BaseOp) 424 BaseOps.push_back(BaseOp); 425 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::saddr); 426 if (BaseOp) 427 BaseOps.push_back(BaseOp); 428 Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm(); 429 // Get appropriate operand, and compute width accordingly. 430 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 431 if (DataOpIdx == -1) 432 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 433 Width = getOpSize(LdSt, DataOpIdx); 434 return true; 435 } 436 437 return false; 438 } 439 440 static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, 441 ArrayRef<const MachineOperand *> BaseOps1, 442 const MachineInstr &MI2, 443 ArrayRef<const MachineOperand *> BaseOps2) { 444 // Only examine the first "base" operand of each instruction, on the 445 // assumption that it represents the real base address of the memory access. 446 // Other operands are typically offsets or indices from this base address. 447 if (BaseOps1.front()->isIdenticalTo(*BaseOps2.front())) 448 return true; 449 450 if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand()) 451 return false; 452 453 auto MO1 = *MI1.memoperands_begin(); 454 auto MO2 = *MI2.memoperands_begin(); 455 if (MO1->getAddrSpace() != MO2->getAddrSpace()) 456 return false; 457 458 auto Base1 = MO1->getValue(); 459 auto Base2 = MO2->getValue(); 460 if (!Base1 || !Base2) 461 return false; 462 Base1 = getUnderlyingObject(Base1); 463 Base2 = getUnderlyingObject(Base2); 464 465 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2)) 466 return false; 467 468 return Base1 == Base2; 469 } 470 471 bool SIInstrInfo::shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1, 472 ArrayRef<const MachineOperand *> BaseOps2, 473 unsigned NumLoads, 474 unsigned NumBytes) const { 475 // If the mem ops (to be clustered) do not have the same base ptr, then they 476 // should not be clustered 477 assert(!BaseOps1.empty() && !BaseOps2.empty()); 478 const MachineInstr &FirstLdSt = *BaseOps1.front()->getParent(); 479 const MachineInstr &SecondLdSt = *BaseOps2.front()->getParent(); 480 if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOps1, SecondLdSt, BaseOps2)) 481 return false; 482 483 // In order to avoid regester pressure, on an average, the number of DWORDS 484 // loaded together by all clustered mem ops should not exceed 8. This is an 485 // empirical value based on certain observations and performance related 486 // experiments. 487 // The good thing about this heuristic is - it avoids clustering of too many 488 // sub-word loads, and also avoids clustering of wide loads. Below is the 489 // brief summary of how the heuristic behaves for various `LoadSize`. 490 // (1) 1 <= LoadSize <= 4: cluster at max 8 mem ops 491 // (2) 5 <= LoadSize <= 8: cluster at max 4 mem ops 492 // (3) 9 <= LoadSize <= 12: cluster at max 2 mem ops 493 // (4) 13 <= LoadSize <= 16: cluster at max 2 mem ops 494 // (5) LoadSize >= 17: do not cluster 495 const unsigned LoadSize = NumBytes / NumLoads; 496 const unsigned NumDWORDs = ((LoadSize + 3) / 4) * NumLoads; 497 return NumDWORDs <= 8; 498 } 499 500 // FIXME: This behaves strangely. If, for example, you have 32 load + stores, 501 // the first 16 loads will be interleaved with the stores, and the next 16 will 502 // be clustered as expected. It should really split into 2 16 store batches. 503 // 504 // Loads are clustered until this returns false, rather than trying to schedule 505 // groups of stores. This also means we have to deal with saying different 506 // address space loads should be clustered, and ones which might cause bank 507 // conflicts. 508 // 509 // This might be deprecated so it might not be worth that much effort to fix. 510 bool SIInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, 511 int64_t Offset0, int64_t Offset1, 512 unsigned NumLoads) const { 513 assert(Offset1 > Offset0 && 514 "Second offset should be larger than first offset!"); 515 // If we have less than 16 loads in a row, and the offsets are within 64 516 // bytes, then schedule together. 517 518 // A cacheline is 64 bytes (for global memory). 519 return (NumLoads <= 16 && (Offset1 - Offset0) < 64); 520 } 521 522 static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB, 523 MachineBasicBlock::iterator MI, 524 const DebugLoc &DL, MCRegister DestReg, 525 MCRegister SrcReg, bool KillSrc, 526 const char *Msg = "illegal SGPR to VGPR copy") { 527 MachineFunction *MF = MBB.getParent(); 528 DiagnosticInfoUnsupported IllegalCopy(MF->getFunction(), Msg, DL, DS_Error); 529 LLVMContext &C = MF->getFunction().getContext(); 530 C.diagnose(IllegalCopy); 531 532 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg) 533 .addReg(SrcReg, getKillRegState(KillSrc)); 534 } 535 536 /// Handle copying from SGPR to AGPR, or from AGPR to AGPR. It is not possible 537 /// to directly copy, so an intermediate VGPR needs to be used. 538 static void indirectCopyToAGPR(const SIInstrInfo &TII, 539 MachineBasicBlock &MBB, 540 MachineBasicBlock::iterator MI, 541 const DebugLoc &DL, MCRegister DestReg, 542 MCRegister SrcReg, bool KillSrc, 543 RegScavenger &RS, 544 Register ImpDefSuperReg = Register(), 545 Register ImpUseSuperReg = Register()) { 546 const SIRegisterInfo &RI = TII.getRegisterInfo(); 547 548 assert(AMDGPU::SReg_32RegClass.contains(SrcReg) || 549 AMDGPU::AGPR_32RegClass.contains(SrcReg)); 550 551 // First try to find defining accvgpr_write to avoid temporary registers. 552 for (auto Def = MI, E = MBB.begin(); Def != E; ) { 553 --Def; 554 if (!Def->definesRegister(SrcReg, &RI)) 555 continue; 556 if (Def->getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32) 557 break; 558 559 MachineOperand &DefOp = Def->getOperand(1); 560 assert(DefOp.isReg() || DefOp.isImm()); 561 562 if (DefOp.isReg()) { 563 // Check that register source operand if not clobbered before MI. 564 // Immediate operands are always safe to propagate. 565 bool SafeToPropagate = true; 566 for (auto I = Def; I != MI && SafeToPropagate; ++I) 567 if (I->modifiesRegister(DefOp.getReg(), &RI)) 568 SafeToPropagate = false; 569 570 if (!SafeToPropagate) 571 break; 572 573 DefOp.setIsKill(false); 574 } 575 576 MachineInstrBuilder Builder = 577 BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32), DestReg) 578 .add(DefOp); 579 if (ImpDefSuperReg) 580 Builder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit); 581 582 if (ImpUseSuperReg) { 583 Builder.addReg(ImpUseSuperReg, 584 getKillRegState(KillSrc) | RegState::Implicit); 585 } 586 587 return; 588 } 589 590 RS.enterBasicBlock(MBB); 591 RS.forward(MI); 592 593 // Ideally we want to have three registers for a long reg_sequence copy 594 // to hide 2 waitstates between v_mov_b32 and accvgpr_write. 595 unsigned MaxVGPRs = RI.getRegPressureLimit(&AMDGPU::VGPR_32RegClass, 596 *MBB.getParent()); 597 598 // Registers in the sequence are allocated contiguously so we can just 599 // use register number to pick one of three round-robin temps. 600 unsigned RegNo = DestReg % 3; 601 Register Tmp = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0); 602 if (!Tmp) 603 report_fatal_error("Cannot scavenge VGPR to copy to AGPR"); 604 RS.setRegUsed(Tmp); 605 // Only loop through if there are any free registers left, otherwise 606 // scavenger may report a fatal error without emergency spill slot 607 // or spill with the slot. 608 while (RegNo-- && RS.FindUnusedReg(&AMDGPU::VGPR_32RegClass)) { 609 Register Tmp2 = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0); 610 if (!Tmp2 || RI.getHWRegIndex(Tmp2) >= MaxVGPRs) 611 break; 612 Tmp = Tmp2; 613 RS.setRegUsed(Tmp); 614 } 615 616 // Insert copy to temporary VGPR. 617 unsigned TmpCopyOp = AMDGPU::V_MOV_B32_e32; 618 if (AMDGPU::AGPR_32RegClass.contains(SrcReg)) { 619 TmpCopyOp = AMDGPU::V_ACCVGPR_READ_B32; 620 } else { 621 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 622 } 623 624 MachineInstrBuilder UseBuilder = BuildMI(MBB, MI, DL, TII.get(TmpCopyOp), Tmp) 625 .addReg(SrcReg, getKillRegState(KillSrc)); 626 if (ImpUseSuperReg) { 627 UseBuilder.addReg(ImpUseSuperReg, 628 getKillRegState(KillSrc) | RegState::Implicit); 629 } 630 631 MachineInstrBuilder DefBuilder 632 = BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32), DestReg) 633 .addReg(Tmp, RegState::Kill); 634 635 if (ImpDefSuperReg) 636 DefBuilder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit); 637 } 638 639 void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 640 MachineBasicBlock::iterator MI, 641 const DebugLoc &DL, MCRegister DestReg, 642 MCRegister SrcReg, bool KillSrc) const { 643 const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg); 644 645 // FIXME: This is hack to resolve copies between 16 bit and 32 bit 646 // registers until all patterns are fixed. 647 if (Fix16BitCopies && 648 ((RI.getRegSizeInBits(*RC) == 16) ^ 649 (RI.getRegSizeInBits(*RI.getPhysRegClass(SrcReg)) == 16))) { 650 MCRegister &RegToFix = (RI.getRegSizeInBits(*RC) == 16) ? DestReg : SrcReg; 651 MCRegister Super = RI.get32BitRegister(RegToFix); 652 assert(RI.getSubReg(Super, AMDGPU::lo16) == RegToFix); 653 RegToFix = Super; 654 655 if (DestReg == SrcReg) { 656 // Insert empty bundle since ExpandPostRA expects an instruction here. 657 BuildMI(MBB, MI, DL, get(AMDGPU::BUNDLE)); 658 return; 659 } 660 661 RC = RI.getPhysRegClass(DestReg); 662 } 663 664 if (RC == &AMDGPU::VGPR_32RegClass) { 665 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || 666 AMDGPU::SReg_32RegClass.contains(SrcReg) || 667 AMDGPU::AGPR_32RegClass.contains(SrcReg)); 668 unsigned Opc = AMDGPU::AGPR_32RegClass.contains(SrcReg) ? 669 AMDGPU::V_ACCVGPR_READ_B32 : AMDGPU::V_MOV_B32_e32; 670 BuildMI(MBB, MI, DL, get(Opc), DestReg) 671 .addReg(SrcReg, getKillRegState(KillSrc)); 672 return; 673 } 674 675 if (RC == &AMDGPU::SReg_32_XM0RegClass || 676 RC == &AMDGPU::SReg_32RegClass) { 677 if (SrcReg == AMDGPU::SCC) { 678 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg) 679 .addImm(1) 680 .addImm(0); 681 return; 682 } 683 684 if (DestReg == AMDGPU::VCC_LO) { 685 if (AMDGPU::SReg_32RegClass.contains(SrcReg)) { 686 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), AMDGPU::VCC_LO) 687 .addReg(SrcReg, getKillRegState(KillSrc)); 688 } else { 689 // FIXME: Hack until VReg_1 removed. 690 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 691 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 692 .addImm(0) 693 .addReg(SrcReg, getKillRegState(KillSrc)); 694 } 695 696 return; 697 } 698 699 if (!AMDGPU::SReg_32RegClass.contains(SrcReg)) { 700 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 701 return; 702 } 703 704 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 705 .addReg(SrcReg, getKillRegState(KillSrc)); 706 return; 707 } 708 709 if (RC == &AMDGPU::SReg_64RegClass) { 710 if (SrcReg == AMDGPU::SCC) { 711 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B64), DestReg) 712 .addImm(1) 713 .addImm(0); 714 return; 715 } 716 717 if (DestReg == AMDGPU::VCC) { 718 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 719 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC) 720 .addReg(SrcReg, getKillRegState(KillSrc)); 721 } else { 722 // FIXME: Hack until VReg_1 removed. 723 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 724 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 725 .addImm(0) 726 .addReg(SrcReg, getKillRegState(KillSrc)); 727 } 728 729 return; 730 } 731 732 if (!AMDGPU::SReg_64RegClass.contains(SrcReg)) { 733 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 734 return; 735 } 736 737 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 738 .addReg(SrcReg, getKillRegState(KillSrc)); 739 return; 740 } 741 742 if (DestReg == AMDGPU::SCC) { 743 // Copying 64-bit or 32-bit sources to SCC barely makes sense, 744 // but SelectionDAG emits such copies for i1 sources. 745 // TODO: Use S_BITCMP0_B32 instead and only consider the 0th bit. 746 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 747 SrcReg = RI.getSubReg(SrcReg, AMDGPU::sub0); 748 } 749 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 750 751 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32)) 752 .addReg(SrcReg, getKillRegState(KillSrc)) 753 .addImm(0); 754 755 return; 756 } 757 758 759 if (RC == &AMDGPU::AGPR_32RegClass) { 760 if (AMDGPU::VGPR_32RegClass.contains(SrcReg)) { 761 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32), DestReg) 762 .addReg(SrcReg, getKillRegState(KillSrc)); 763 return; 764 } 765 766 // FIXME: Pass should maintain scavenger to avoid scan through the block on 767 // every AGPR spill. 768 RegScavenger RS; 769 indirectCopyToAGPR(*this, MBB, MI, DL, DestReg, SrcReg, KillSrc, RS); 770 return; 771 } 772 773 if (RI.getRegSizeInBits(*RC) == 16) { 774 assert(AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || 775 AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || 776 AMDGPU::SReg_LO16RegClass.contains(SrcReg) || 777 AMDGPU::AGPR_LO16RegClass.contains(SrcReg)); 778 779 bool IsSGPRDst = AMDGPU::SReg_LO16RegClass.contains(DestReg); 780 bool IsSGPRSrc = AMDGPU::SReg_LO16RegClass.contains(SrcReg); 781 bool IsAGPRDst = AMDGPU::AGPR_LO16RegClass.contains(DestReg); 782 bool IsAGPRSrc = AMDGPU::AGPR_LO16RegClass.contains(SrcReg); 783 bool DstLow = AMDGPU::VGPR_LO16RegClass.contains(DestReg) || 784 AMDGPU::SReg_LO16RegClass.contains(DestReg) || 785 AMDGPU::AGPR_LO16RegClass.contains(DestReg); 786 bool SrcLow = AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || 787 AMDGPU::SReg_LO16RegClass.contains(SrcReg) || 788 AMDGPU::AGPR_LO16RegClass.contains(SrcReg); 789 MCRegister NewDestReg = RI.get32BitRegister(DestReg); 790 MCRegister NewSrcReg = RI.get32BitRegister(SrcReg); 791 792 if (IsSGPRDst) { 793 if (!IsSGPRSrc) { 794 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 795 return; 796 } 797 798 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), NewDestReg) 799 .addReg(NewSrcReg, getKillRegState(KillSrc)); 800 return; 801 } 802 803 if (IsAGPRDst || IsAGPRSrc) { 804 if (!DstLow || !SrcLow) { 805 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc, 806 "Cannot use hi16 subreg with an AGPR!"); 807 } 808 809 copyPhysReg(MBB, MI, DL, NewDestReg, NewSrcReg, KillSrc); 810 return; 811 } 812 813 if (IsSGPRSrc && !ST.hasSDWAScalar()) { 814 if (!DstLow || !SrcLow) { 815 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc, 816 "Cannot use hi16 subreg on VI!"); 817 } 818 819 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), NewDestReg) 820 .addReg(NewSrcReg, getKillRegState(KillSrc)); 821 return; 822 } 823 824 auto MIB = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_sdwa), NewDestReg) 825 .addImm(0) // src0_modifiers 826 .addReg(NewSrcReg) 827 .addImm(0) // clamp 828 .addImm(DstLow ? AMDGPU::SDWA::SdwaSel::WORD_0 829 : AMDGPU::SDWA::SdwaSel::WORD_1) 830 .addImm(AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE) 831 .addImm(SrcLow ? AMDGPU::SDWA::SdwaSel::WORD_0 832 : AMDGPU::SDWA::SdwaSel::WORD_1) 833 .addReg(NewDestReg, RegState::Implicit | RegState::Undef); 834 // First implicit operand is $exec. 835 MIB->tieOperands(0, MIB->getNumOperands() - 1); 836 return; 837 } 838 839 unsigned EltSize = 4; 840 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 841 if (RI.isSGPRClass(RC)) { 842 // TODO: Copy vec3/vec5 with s_mov_b64s then final s_mov_b32. 843 if (!(RI.getRegSizeInBits(*RC) % 64)) { 844 Opcode = AMDGPU::S_MOV_B64; 845 EltSize = 8; 846 } else { 847 Opcode = AMDGPU::S_MOV_B32; 848 EltSize = 4; 849 } 850 851 if (!RI.isSGPRClass(RI.getPhysRegClass(SrcReg))) { 852 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 853 return; 854 } 855 } else if (RI.hasAGPRs(RC)) { 856 Opcode = RI.hasVGPRs(RI.getPhysRegClass(SrcReg)) ? 857 AMDGPU::V_ACCVGPR_WRITE_B32 : AMDGPU::INSTRUCTION_LIST_END; 858 } else if (RI.hasVGPRs(RC) && RI.hasAGPRs(RI.getPhysRegClass(SrcReg))) { 859 Opcode = AMDGPU::V_ACCVGPR_READ_B32; 860 } 861 862 // For the cases where we need an intermediate instruction/temporary register 863 // (the result is an SGPR, and the source is either an SGPR or AGPR), we need 864 // a scavenger. 865 // 866 // FIXME: The pass should maintain this for us so we don't have to re-scan the 867 // whole block for every handled copy. 868 std::unique_ptr<RegScavenger> RS; 869 if (Opcode == AMDGPU::INSTRUCTION_LIST_END) 870 RS.reset(new RegScavenger()); 871 872 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize); 873 bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg); 874 875 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 876 unsigned SubIdx; 877 if (Forward) 878 SubIdx = SubIndices[Idx]; 879 else 880 SubIdx = SubIndices[SubIndices.size() - Idx - 1]; 881 882 883 bool UseKill = KillSrc && Idx == SubIndices.size() - 1; 884 885 if (Opcode == AMDGPU::INSTRUCTION_LIST_END) { 886 Register ImpDefSuper = Idx == 0 ? Register(DestReg) : Register(); 887 Register ImpUseSuper = SrcReg; 888 indirectCopyToAGPR(*this, MBB, MI, DL, RI.getSubReg(DestReg, SubIdx), 889 RI.getSubReg(SrcReg, SubIdx), UseKill, *RS, 890 ImpDefSuper, ImpUseSuper); 891 } else { 892 MachineInstrBuilder Builder = 893 BuildMI(MBB, MI, DL, get(Opcode), RI.getSubReg(DestReg, SubIdx)) 894 .addReg(RI.getSubReg(SrcReg, SubIdx)); 895 if (Idx == 0) 896 Builder.addReg(DestReg, RegState::Define | RegState::Implicit); 897 898 Builder.addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit); 899 } 900 } 901 } 902 903 int SIInstrInfo::commuteOpcode(unsigned Opcode) const { 904 int NewOpc; 905 906 // Try to map original to commuted opcode 907 NewOpc = AMDGPU::getCommuteRev(Opcode); 908 if (NewOpc != -1) 909 // Check if the commuted (REV) opcode exists on the target. 910 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 911 912 // Try to map commuted to original opcode 913 NewOpc = AMDGPU::getCommuteOrig(Opcode); 914 if (NewOpc != -1) 915 // Check if the original (non-REV) opcode exists on the target. 916 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 917 918 return Opcode; 919 } 920 921 void SIInstrInfo::materializeImmediate(MachineBasicBlock &MBB, 922 MachineBasicBlock::iterator MI, 923 const DebugLoc &DL, unsigned DestReg, 924 int64_t Value) const { 925 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 926 const TargetRegisterClass *RegClass = MRI.getRegClass(DestReg); 927 if (RegClass == &AMDGPU::SReg_32RegClass || 928 RegClass == &AMDGPU::SGPR_32RegClass || 929 RegClass == &AMDGPU::SReg_32_XM0RegClass || 930 RegClass == &AMDGPU::SReg_32_XM0_XEXECRegClass) { 931 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 932 .addImm(Value); 933 return; 934 } 935 936 if (RegClass == &AMDGPU::SReg_64RegClass || 937 RegClass == &AMDGPU::SGPR_64RegClass || 938 RegClass == &AMDGPU::SReg_64_XEXECRegClass) { 939 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 940 .addImm(Value); 941 return; 942 } 943 944 if (RegClass == &AMDGPU::VGPR_32RegClass) { 945 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) 946 .addImm(Value); 947 return; 948 } 949 if (RegClass == &AMDGPU::VReg_64RegClass) { 950 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), DestReg) 951 .addImm(Value); 952 return; 953 } 954 955 unsigned EltSize = 4; 956 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 957 if (RI.isSGPRClass(RegClass)) { 958 if (RI.getRegSizeInBits(*RegClass) > 32) { 959 Opcode = AMDGPU::S_MOV_B64; 960 EltSize = 8; 961 } else { 962 Opcode = AMDGPU::S_MOV_B32; 963 EltSize = 4; 964 } 965 } 966 967 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RegClass, EltSize); 968 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 969 int64_t IdxValue = Idx == 0 ? Value : 0; 970 971 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 972 get(Opcode), RI.getSubReg(DestReg, SubIndices[Idx])); 973 Builder.addImm(IdxValue); 974 } 975 } 976 977 const TargetRegisterClass * 978 SIInstrInfo::getPreferredSelectRegClass(unsigned Size) const { 979 return &AMDGPU::VGPR_32RegClass; 980 } 981 982 void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB, 983 MachineBasicBlock::iterator I, 984 const DebugLoc &DL, Register DstReg, 985 ArrayRef<MachineOperand> Cond, 986 Register TrueReg, 987 Register FalseReg) const { 988 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 989 MachineFunction *MF = MBB.getParent(); 990 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 991 const TargetRegisterClass *BoolXExecRC = 992 RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 993 assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass && 994 "Not a VGPR32 reg"); 995 996 if (Cond.size() == 1) { 997 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 998 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 999 .add(Cond[0]); 1000 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1001 .addImm(0) 1002 .addReg(FalseReg) 1003 .addImm(0) 1004 .addReg(TrueReg) 1005 .addReg(SReg); 1006 } else if (Cond.size() == 2) { 1007 assert(Cond[0].isImm() && "Cond[0] is not an immediate"); 1008 switch (Cond[0].getImm()) { 1009 case SIInstrInfo::SCC_TRUE: { 1010 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1011 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1012 : AMDGPU::S_CSELECT_B64), SReg) 1013 .addImm(1) 1014 .addImm(0); 1015 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1016 .addImm(0) 1017 .addReg(FalseReg) 1018 .addImm(0) 1019 .addReg(TrueReg) 1020 .addReg(SReg); 1021 break; 1022 } 1023 case SIInstrInfo::SCC_FALSE: { 1024 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1025 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1026 : AMDGPU::S_CSELECT_B64), SReg) 1027 .addImm(0) 1028 .addImm(1); 1029 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1030 .addImm(0) 1031 .addReg(FalseReg) 1032 .addImm(0) 1033 .addReg(TrueReg) 1034 .addReg(SReg); 1035 break; 1036 } 1037 case SIInstrInfo::VCCNZ: { 1038 MachineOperand RegOp = Cond[1]; 1039 RegOp.setImplicit(false); 1040 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1041 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1042 .add(RegOp); 1043 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1044 .addImm(0) 1045 .addReg(FalseReg) 1046 .addImm(0) 1047 .addReg(TrueReg) 1048 .addReg(SReg); 1049 break; 1050 } 1051 case SIInstrInfo::VCCZ: { 1052 MachineOperand RegOp = Cond[1]; 1053 RegOp.setImplicit(false); 1054 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1055 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1056 .add(RegOp); 1057 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1058 .addImm(0) 1059 .addReg(TrueReg) 1060 .addImm(0) 1061 .addReg(FalseReg) 1062 .addReg(SReg); 1063 break; 1064 } 1065 case SIInstrInfo::EXECNZ: { 1066 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1067 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 1068 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1069 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 1070 .addImm(0); 1071 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1072 : AMDGPU::S_CSELECT_B64), SReg) 1073 .addImm(1) 1074 .addImm(0); 1075 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1076 .addImm(0) 1077 .addReg(FalseReg) 1078 .addImm(0) 1079 .addReg(TrueReg) 1080 .addReg(SReg); 1081 break; 1082 } 1083 case SIInstrInfo::EXECZ: { 1084 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1085 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 1086 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1087 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 1088 .addImm(0); 1089 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1090 : AMDGPU::S_CSELECT_B64), SReg) 1091 .addImm(0) 1092 .addImm(1); 1093 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1094 .addImm(0) 1095 .addReg(FalseReg) 1096 .addImm(0) 1097 .addReg(TrueReg) 1098 .addReg(SReg); 1099 llvm_unreachable("Unhandled branch predicate EXECZ"); 1100 break; 1101 } 1102 default: 1103 llvm_unreachable("invalid branch predicate"); 1104 } 1105 } else { 1106 llvm_unreachable("Can only handle Cond size 1 or 2"); 1107 } 1108 } 1109 1110 Register SIInstrInfo::insertEQ(MachineBasicBlock *MBB, 1111 MachineBasicBlock::iterator I, 1112 const DebugLoc &DL, 1113 Register SrcReg, int Value) const { 1114 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1115 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 1116 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg) 1117 .addImm(Value) 1118 .addReg(SrcReg); 1119 1120 return Reg; 1121 } 1122 1123 Register SIInstrInfo::insertNE(MachineBasicBlock *MBB, 1124 MachineBasicBlock::iterator I, 1125 const DebugLoc &DL, 1126 Register SrcReg, int Value) const { 1127 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1128 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 1129 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg) 1130 .addImm(Value) 1131 .addReg(SrcReg); 1132 1133 return Reg; 1134 } 1135 1136 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const { 1137 1138 if (RI.hasAGPRs(DstRC)) 1139 return AMDGPU::COPY; 1140 if (RI.getRegSizeInBits(*DstRC) == 32) { 1141 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 1142 } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) { 1143 return AMDGPU::S_MOV_B64; 1144 } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) { 1145 return AMDGPU::V_MOV_B64_PSEUDO; 1146 } 1147 return AMDGPU::COPY; 1148 } 1149 1150 static unsigned getIndirectVGPRWritePseudoOpc(unsigned VecSize) { 1151 if (VecSize <= 32) // 4 bytes 1152 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V1; 1153 if (VecSize <= 64) // 8 bytes 1154 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V2; 1155 if (VecSize <= 96) // 12 bytes 1156 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V3; 1157 if (VecSize <= 128) // 16 bytes 1158 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V4; 1159 if (VecSize <= 160) // 20 bytes 1160 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V5; 1161 if (VecSize <= 256) // 32 bytes 1162 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V8; 1163 if (VecSize <= 512) // 64 bytes 1164 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V16; 1165 if (VecSize <= 1024) // 128 bytes 1166 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V32; 1167 1168 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1169 } 1170 1171 static unsigned getIndirectSGPRWritePseudo32(unsigned VecSize) { 1172 if (VecSize <= 32) // 4 bytes 1173 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V1; 1174 if (VecSize <= 64) // 8 bytes 1175 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V2; 1176 if (VecSize <= 96) // 12 bytes 1177 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V3; 1178 if (VecSize <= 128) // 16 bytes 1179 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V4; 1180 if (VecSize <= 160) // 20 bytes 1181 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V5; 1182 if (VecSize <= 256) // 32 bytes 1183 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V8; 1184 if (VecSize <= 512) // 64 bytes 1185 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V16; 1186 if (VecSize <= 1024) // 128 bytes 1187 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V32; 1188 1189 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1190 } 1191 1192 static unsigned getIndirectSGPRWritePseudo64(unsigned VecSize) { 1193 if (VecSize <= 64) // 8 bytes 1194 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V1; 1195 if (VecSize <= 128) // 16 bytes 1196 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V2; 1197 if (VecSize <= 256) // 32 bytes 1198 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V4; 1199 if (VecSize <= 512) // 64 bytes 1200 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V8; 1201 if (VecSize <= 1024) // 128 bytes 1202 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V16; 1203 1204 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1205 } 1206 1207 const MCInstrDesc &SIInstrInfo::getIndirectRegWritePseudo( 1208 unsigned VecSize, unsigned EltSize, bool IsSGPR) const { 1209 if (IsSGPR) { 1210 switch (EltSize) { 1211 case 32: 1212 return get(getIndirectSGPRWritePseudo32(VecSize)); 1213 case 64: 1214 return get(getIndirectSGPRWritePseudo64(VecSize)); 1215 default: 1216 llvm_unreachable("invalid reg indexing elt size"); 1217 } 1218 } 1219 1220 assert(EltSize == 32 && "invalid reg indexing elt size"); 1221 return get(getIndirectVGPRWritePseudoOpc(VecSize)); 1222 } 1223 1224 static unsigned getSGPRSpillSaveOpcode(unsigned Size) { 1225 switch (Size) { 1226 case 4: 1227 return AMDGPU::SI_SPILL_S32_SAVE; 1228 case 8: 1229 return AMDGPU::SI_SPILL_S64_SAVE; 1230 case 12: 1231 return AMDGPU::SI_SPILL_S96_SAVE; 1232 case 16: 1233 return AMDGPU::SI_SPILL_S128_SAVE; 1234 case 20: 1235 return AMDGPU::SI_SPILL_S160_SAVE; 1236 case 24: 1237 return AMDGPU::SI_SPILL_S192_SAVE; 1238 case 32: 1239 return AMDGPU::SI_SPILL_S256_SAVE; 1240 case 64: 1241 return AMDGPU::SI_SPILL_S512_SAVE; 1242 case 128: 1243 return AMDGPU::SI_SPILL_S1024_SAVE; 1244 default: 1245 llvm_unreachable("unknown register size"); 1246 } 1247 } 1248 1249 static unsigned getVGPRSpillSaveOpcode(unsigned Size) { 1250 switch (Size) { 1251 case 4: 1252 return AMDGPU::SI_SPILL_V32_SAVE; 1253 case 8: 1254 return AMDGPU::SI_SPILL_V64_SAVE; 1255 case 12: 1256 return AMDGPU::SI_SPILL_V96_SAVE; 1257 case 16: 1258 return AMDGPU::SI_SPILL_V128_SAVE; 1259 case 20: 1260 return AMDGPU::SI_SPILL_V160_SAVE; 1261 case 24: 1262 return AMDGPU::SI_SPILL_V192_SAVE; 1263 case 32: 1264 return AMDGPU::SI_SPILL_V256_SAVE; 1265 case 64: 1266 return AMDGPU::SI_SPILL_V512_SAVE; 1267 case 128: 1268 return AMDGPU::SI_SPILL_V1024_SAVE; 1269 default: 1270 llvm_unreachable("unknown register size"); 1271 } 1272 } 1273 1274 static unsigned getAGPRSpillSaveOpcode(unsigned Size) { 1275 switch (Size) { 1276 case 4: 1277 return AMDGPU::SI_SPILL_A32_SAVE; 1278 case 8: 1279 return AMDGPU::SI_SPILL_A64_SAVE; 1280 case 16: 1281 return AMDGPU::SI_SPILL_A128_SAVE; 1282 case 64: 1283 return AMDGPU::SI_SPILL_A512_SAVE; 1284 case 128: 1285 return AMDGPU::SI_SPILL_A1024_SAVE; 1286 default: 1287 llvm_unreachable("unknown register size"); 1288 } 1289 } 1290 1291 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 1292 MachineBasicBlock::iterator MI, 1293 Register SrcReg, bool isKill, 1294 int FrameIndex, 1295 const TargetRegisterClass *RC, 1296 const TargetRegisterInfo *TRI) const { 1297 MachineFunction *MF = MBB.getParent(); 1298 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1299 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 1300 const DebugLoc &DL = MBB.findDebugLoc(MI); 1301 1302 MachinePointerInfo PtrInfo 1303 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 1304 MachineMemOperand *MMO = MF->getMachineMemOperand( 1305 PtrInfo, MachineMemOperand::MOStore, FrameInfo.getObjectSize(FrameIndex), 1306 FrameInfo.getObjectAlign(FrameIndex)); 1307 unsigned SpillSize = TRI->getSpillSize(*RC); 1308 1309 if (RI.isSGPRClass(RC)) { 1310 MFI->setHasSpilledSGPRs(); 1311 assert(SrcReg != AMDGPU::M0 && "m0 should not be spilled"); 1312 assert(SrcReg != AMDGPU::EXEC_LO && SrcReg != AMDGPU::EXEC_HI && 1313 SrcReg != AMDGPU::EXEC && "exec should not be spilled"); 1314 1315 // We are only allowed to create one new instruction when spilling 1316 // registers, so we need to use pseudo instruction for spilling SGPRs. 1317 const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(SpillSize)); 1318 1319 // The SGPR spill/restore instructions only work on number sgprs, so we need 1320 // to make sure we are using the correct register class. 1321 if (Register::isVirtualRegister(SrcReg) && SpillSize == 4) { 1322 MachineRegisterInfo &MRI = MF->getRegInfo(); 1323 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 1324 } 1325 1326 BuildMI(MBB, MI, DL, OpDesc) 1327 .addReg(SrcReg, getKillRegState(isKill)) // data 1328 .addFrameIndex(FrameIndex) // addr 1329 .addMemOperand(MMO) 1330 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) 1331 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); 1332 // Add the scratch resource registers as implicit uses because we may end up 1333 // needing them, and need to ensure that the reserved registers are 1334 // correctly handled. 1335 if (RI.spillSGPRToVGPR()) 1336 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill); 1337 return; 1338 } 1339 1340 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillSaveOpcode(SpillSize) 1341 : getVGPRSpillSaveOpcode(SpillSize); 1342 MFI->setHasSpilledVGPRs(); 1343 1344 BuildMI(MBB, MI, DL, get(Opcode)) 1345 .addReg(SrcReg, getKillRegState(isKill)) // data 1346 .addFrameIndex(FrameIndex) // addr 1347 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 1348 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset 1349 .addImm(0) // offset 1350 .addMemOperand(MMO); 1351 } 1352 1353 static unsigned getSGPRSpillRestoreOpcode(unsigned Size) { 1354 switch (Size) { 1355 case 4: 1356 return AMDGPU::SI_SPILL_S32_RESTORE; 1357 case 8: 1358 return AMDGPU::SI_SPILL_S64_RESTORE; 1359 case 12: 1360 return AMDGPU::SI_SPILL_S96_RESTORE; 1361 case 16: 1362 return AMDGPU::SI_SPILL_S128_RESTORE; 1363 case 20: 1364 return AMDGPU::SI_SPILL_S160_RESTORE; 1365 case 24: 1366 return AMDGPU::SI_SPILL_S192_RESTORE; 1367 case 32: 1368 return AMDGPU::SI_SPILL_S256_RESTORE; 1369 case 64: 1370 return AMDGPU::SI_SPILL_S512_RESTORE; 1371 case 128: 1372 return AMDGPU::SI_SPILL_S1024_RESTORE; 1373 default: 1374 llvm_unreachable("unknown register size"); 1375 } 1376 } 1377 1378 static unsigned getVGPRSpillRestoreOpcode(unsigned Size) { 1379 switch (Size) { 1380 case 4: 1381 return AMDGPU::SI_SPILL_V32_RESTORE; 1382 case 8: 1383 return AMDGPU::SI_SPILL_V64_RESTORE; 1384 case 12: 1385 return AMDGPU::SI_SPILL_V96_RESTORE; 1386 case 16: 1387 return AMDGPU::SI_SPILL_V128_RESTORE; 1388 case 20: 1389 return AMDGPU::SI_SPILL_V160_RESTORE; 1390 case 24: 1391 return AMDGPU::SI_SPILL_V192_RESTORE; 1392 case 32: 1393 return AMDGPU::SI_SPILL_V256_RESTORE; 1394 case 64: 1395 return AMDGPU::SI_SPILL_V512_RESTORE; 1396 case 128: 1397 return AMDGPU::SI_SPILL_V1024_RESTORE; 1398 default: 1399 llvm_unreachable("unknown register size"); 1400 } 1401 } 1402 1403 static unsigned getAGPRSpillRestoreOpcode(unsigned Size) { 1404 switch (Size) { 1405 case 4: 1406 return AMDGPU::SI_SPILL_A32_RESTORE; 1407 case 8: 1408 return AMDGPU::SI_SPILL_A64_RESTORE; 1409 case 16: 1410 return AMDGPU::SI_SPILL_A128_RESTORE; 1411 case 64: 1412 return AMDGPU::SI_SPILL_A512_RESTORE; 1413 case 128: 1414 return AMDGPU::SI_SPILL_A1024_RESTORE; 1415 default: 1416 llvm_unreachable("unknown register size"); 1417 } 1418 } 1419 1420 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 1421 MachineBasicBlock::iterator MI, 1422 Register DestReg, int FrameIndex, 1423 const TargetRegisterClass *RC, 1424 const TargetRegisterInfo *TRI) const { 1425 MachineFunction *MF = MBB.getParent(); 1426 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1427 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 1428 const DebugLoc &DL = MBB.findDebugLoc(MI); 1429 unsigned SpillSize = TRI->getSpillSize(*RC); 1430 1431 MachinePointerInfo PtrInfo 1432 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 1433 1434 MachineMemOperand *MMO = MF->getMachineMemOperand( 1435 PtrInfo, MachineMemOperand::MOLoad, FrameInfo.getObjectSize(FrameIndex), 1436 FrameInfo.getObjectAlign(FrameIndex)); 1437 1438 if (RI.isSGPRClass(RC)) { 1439 MFI->setHasSpilledSGPRs(); 1440 assert(DestReg != AMDGPU::M0 && "m0 should not be reloaded into"); 1441 assert(DestReg != AMDGPU::EXEC_LO && DestReg != AMDGPU::EXEC_HI && 1442 DestReg != AMDGPU::EXEC && "exec should not be spilled"); 1443 1444 // FIXME: Maybe this should not include a memoperand because it will be 1445 // lowered to non-memory instructions. 1446 const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize)); 1447 if (DestReg.isVirtual() && SpillSize == 4) { 1448 MachineRegisterInfo &MRI = MF->getRegInfo(); 1449 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 1450 } 1451 1452 if (RI.spillSGPRToVGPR()) 1453 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill); 1454 BuildMI(MBB, MI, DL, OpDesc, DestReg) 1455 .addFrameIndex(FrameIndex) // addr 1456 .addMemOperand(MMO) 1457 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) 1458 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); 1459 return; 1460 } 1461 1462 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillRestoreOpcode(SpillSize) 1463 : getVGPRSpillRestoreOpcode(SpillSize); 1464 BuildMI(MBB, MI, DL, get(Opcode), DestReg) 1465 .addFrameIndex(FrameIndex) // vaddr 1466 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 1467 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset 1468 .addImm(0) // offset 1469 .addMemOperand(MMO); 1470 } 1471 1472 /// \param @Offset Offset in bytes of the FrameIndex being spilled 1473 unsigned SIInstrInfo::calculateLDSSpillAddress( 1474 MachineBasicBlock &MBB, MachineInstr &MI, RegScavenger *RS, unsigned TmpReg, 1475 unsigned FrameOffset, unsigned Size) const { 1476 MachineFunction *MF = MBB.getParent(); 1477 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1478 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 1479 const DebugLoc &DL = MBB.findDebugLoc(MI); 1480 unsigned WorkGroupSize = MFI->getMaxFlatWorkGroupSize(); 1481 unsigned WavefrontSize = ST.getWavefrontSize(); 1482 1483 Register TIDReg = MFI->getTIDReg(); 1484 if (!MFI->hasCalculatedTID()) { 1485 MachineBasicBlock &Entry = MBB.getParent()->front(); 1486 MachineBasicBlock::iterator Insert = Entry.front(); 1487 const DebugLoc &DL = Insert->getDebugLoc(); 1488 1489 TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass, 1490 *MF); 1491 if (TIDReg == AMDGPU::NoRegister) 1492 return TIDReg; 1493 1494 if (!AMDGPU::isShader(MF->getFunction().getCallingConv()) && 1495 WorkGroupSize > WavefrontSize) { 1496 Register TIDIGXReg = 1497 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_X); 1498 Register TIDIGYReg = 1499 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Y); 1500 Register TIDIGZReg = 1501 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Z); 1502 Register InputPtrReg = 1503 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 1504 for (unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) { 1505 if (!Entry.isLiveIn(Reg)) 1506 Entry.addLiveIn(Reg); 1507 } 1508 1509 RS->enterBasicBlock(Entry); 1510 // FIXME: Can we scavenge an SReg_64 and access the subregs? 1511 Register STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); 1512 Register STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); 1513 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp0) 1514 .addReg(InputPtrReg) 1515 .addImm(SI::KernelInputOffsets::NGROUPS_Z); 1516 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp1) 1517 .addReg(InputPtrReg) 1518 .addImm(SI::KernelInputOffsets::NGROUPS_Y); 1519 1520 // NGROUPS.X * NGROUPS.Y 1521 BuildMI(Entry, Insert, DL, get(AMDGPU::S_MUL_I32), STmp1) 1522 .addReg(STmp1) 1523 .addReg(STmp0); 1524 // (NGROUPS.X * NGROUPS.Y) * TIDIG.X 1525 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MUL_U32_U24_e32), TIDReg) 1526 .addReg(STmp1) 1527 .addReg(TIDIGXReg); 1528 // NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X) 1529 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MAD_U32_U24), TIDReg) 1530 .addReg(STmp0) 1531 .addReg(TIDIGYReg) 1532 .addReg(TIDReg); 1533 // (NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)) + TIDIG.Z 1534 getAddNoCarry(Entry, Insert, DL, TIDReg) 1535 .addReg(TIDReg) 1536 .addReg(TIDIGZReg) 1537 .addImm(0); // clamp bit 1538 } else { 1539 // Get the wave id 1540 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_LO_U32_B32_e64), 1541 TIDReg) 1542 .addImm(-1) 1543 .addImm(0); 1544 1545 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_HI_U32_B32_e64), 1546 TIDReg) 1547 .addImm(-1) 1548 .addReg(TIDReg); 1549 } 1550 1551 BuildMI(Entry, Insert, DL, get(AMDGPU::V_LSHLREV_B32_e32), 1552 TIDReg) 1553 .addImm(2) 1554 .addReg(TIDReg); 1555 MFI->setTIDReg(TIDReg); 1556 } 1557 1558 // Add FrameIndex to LDS offset 1559 unsigned LDSOffset = MFI->getLDSSize() + (FrameOffset * WorkGroupSize); 1560 getAddNoCarry(MBB, MI, DL, TmpReg) 1561 .addImm(LDSOffset) 1562 .addReg(TIDReg) 1563 .addImm(0); // clamp bit 1564 1565 return TmpReg; 1566 } 1567 1568 void SIInstrInfo::insertWaitStates(MachineBasicBlock &MBB, 1569 MachineBasicBlock::iterator MI, 1570 int Count) const { 1571 DebugLoc DL = MBB.findDebugLoc(MI); 1572 while (Count > 0) { 1573 int Arg; 1574 if (Count >= 8) 1575 Arg = 7; 1576 else 1577 Arg = Count - 1; 1578 Count -= 8; 1579 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP)) 1580 .addImm(Arg); 1581 } 1582 } 1583 1584 void SIInstrInfo::insertNoop(MachineBasicBlock &MBB, 1585 MachineBasicBlock::iterator MI) const { 1586 insertWaitStates(MBB, MI, 1); 1587 } 1588 1589 void SIInstrInfo::insertReturn(MachineBasicBlock &MBB) const { 1590 auto MF = MBB.getParent(); 1591 SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 1592 1593 assert(Info->isEntryFunction()); 1594 1595 if (MBB.succ_empty()) { 1596 bool HasNoTerminator = MBB.getFirstTerminator() == MBB.end(); 1597 if (HasNoTerminator) { 1598 if (Info->returnsVoid()) { 1599 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::S_ENDPGM)).addImm(0); 1600 } else { 1601 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::SI_RETURN_TO_EPILOG)); 1602 } 1603 } 1604 } 1605 } 1606 1607 unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) { 1608 switch (MI.getOpcode()) { 1609 default: return 1; // FIXME: Do wait states equal cycles? 1610 1611 case AMDGPU::S_NOP: 1612 return MI.getOperand(0).getImm() + 1; 1613 } 1614 } 1615 1616 bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 1617 MachineBasicBlock &MBB = *MI.getParent(); 1618 DebugLoc DL = MBB.findDebugLoc(MI); 1619 switch (MI.getOpcode()) { 1620 default: return TargetInstrInfo::expandPostRAPseudo(MI); 1621 case AMDGPU::S_MOV_B64_term: 1622 // This is only a terminator to get the correct spill code placement during 1623 // register allocation. 1624 MI.setDesc(get(AMDGPU::S_MOV_B64)); 1625 break; 1626 1627 case AMDGPU::S_MOV_B32_term: 1628 // This is only a terminator to get the correct spill code placement during 1629 // register allocation. 1630 MI.setDesc(get(AMDGPU::S_MOV_B32)); 1631 break; 1632 1633 case AMDGPU::S_XOR_B64_term: 1634 // This is only a terminator to get the correct spill code placement during 1635 // register allocation. 1636 MI.setDesc(get(AMDGPU::S_XOR_B64)); 1637 break; 1638 1639 case AMDGPU::S_XOR_B32_term: 1640 // This is only a terminator to get the correct spill code placement during 1641 // register allocation. 1642 MI.setDesc(get(AMDGPU::S_XOR_B32)); 1643 break; 1644 1645 case AMDGPU::S_OR_B32_term: 1646 // This is only a terminator to get the correct spill code placement during 1647 // register allocation. 1648 MI.setDesc(get(AMDGPU::S_OR_B32)); 1649 break; 1650 1651 case AMDGPU::S_ANDN2_B64_term: 1652 // This is only a terminator to get the correct spill code placement during 1653 // register allocation. 1654 MI.setDesc(get(AMDGPU::S_ANDN2_B64)); 1655 break; 1656 1657 case AMDGPU::S_ANDN2_B32_term: 1658 // This is only a terminator to get the correct spill code placement during 1659 // register allocation. 1660 MI.setDesc(get(AMDGPU::S_ANDN2_B32)); 1661 break; 1662 1663 case AMDGPU::V_MOV_B64_PSEUDO: { 1664 Register Dst = MI.getOperand(0).getReg(); 1665 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 1666 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 1667 1668 const MachineOperand &SrcOp = MI.getOperand(1); 1669 // FIXME: Will this work for 64-bit floating point immediates? 1670 assert(!SrcOp.isFPImm()); 1671 if (SrcOp.isImm()) { 1672 APInt Imm(64, SrcOp.getImm()); 1673 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1674 .addImm(Imm.getLoBits(32).getZExtValue()) 1675 .addReg(Dst, RegState::Implicit | RegState::Define); 1676 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1677 .addImm(Imm.getHiBits(32).getZExtValue()) 1678 .addReg(Dst, RegState::Implicit | RegState::Define); 1679 } else { 1680 assert(SrcOp.isReg()); 1681 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1682 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) 1683 .addReg(Dst, RegState::Implicit | RegState::Define); 1684 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1685 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) 1686 .addReg(Dst, RegState::Implicit | RegState::Define); 1687 } 1688 MI.eraseFromParent(); 1689 break; 1690 } 1691 case AMDGPU::V_MOV_B64_DPP_PSEUDO: { 1692 expandMovDPP64(MI); 1693 break; 1694 } 1695 case AMDGPU::V_SET_INACTIVE_B32: { 1696 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; 1697 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1698 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1699 .addReg(Exec); 1700 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), MI.getOperand(0).getReg()) 1701 .add(MI.getOperand(2)); 1702 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1703 .addReg(Exec); 1704 MI.eraseFromParent(); 1705 break; 1706 } 1707 case AMDGPU::V_SET_INACTIVE_B64: { 1708 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; 1709 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1710 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1711 .addReg(Exec); 1712 MachineInstr *Copy = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), 1713 MI.getOperand(0).getReg()) 1714 .add(MI.getOperand(2)); 1715 expandPostRAPseudo(*Copy); 1716 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1717 .addReg(Exec); 1718 MI.eraseFromParent(); 1719 break; 1720 } 1721 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V1: 1722 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V2: 1723 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V3: 1724 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V4: 1725 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V5: 1726 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V8: 1727 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V16: 1728 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V32: 1729 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V1: 1730 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V2: 1731 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V3: 1732 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V4: 1733 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V5: 1734 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V8: 1735 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V16: 1736 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V32: 1737 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V1: 1738 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V2: 1739 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V4: 1740 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V8: 1741 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V16: { 1742 const TargetRegisterClass *EltRC = getOpRegClass(MI, 2); 1743 1744 unsigned Opc; 1745 if (RI.hasVGPRs(EltRC)) { 1746 Opc = ST.useVGPRIndexMode() ? 1747 AMDGPU::V_MOV_B32_indirect : AMDGPU::V_MOVRELD_B32_e32; 1748 } else { 1749 Opc = RI.getRegSizeInBits(*EltRC) == 64 ? 1750 AMDGPU::S_MOVRELD_B64 : AMDGPU::S_MOVRELD_B32; 1751 } 1752 1753 const MCInstrDesc &OpDesc = get(Opc); 1754 Register VecReg = MI.getOperand(0).getReg(); 1755 bool IsUndef = MI.getOperand(1).isUndef(); 1756 unsigned SubReg = MI.getOperand(3).getImm(); 1757 assert(VecReg == MI.getOperand(1).getReg()); 1758 1759 MachineInstrBuilder MIB = 1760 BuildMI(MBB, MI, DL, OpDesc) 1761 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 1762 .add(MI.getOperand(2)) 1763 .addReg(VecReg, RegState::ImplicitDefine) 1764 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0)); 1765 1766 const int ImpDefIdx = 1767 OpDesc.getNumOperands() + OpDesc.getNumImplicitUses(); 1768 const int ImpUseIdx = ImpDefIdx + 1; 1769 MIB->tieOperands(ImpDefIdx, ImpUseIdx); 1770 MI.eraseFromParent(); 1771 break; 1772 } 1773 case AMDGPU::SI_PC_ADD_REL_OFFSET: { 1774 MachineFunction &MF = *MBB.getParent(); 1775 Register Reg = MI.getOperand(0).getReg(); 1776 Register RegLo = RI.getSubReg(Reg, AMDGPU::sub0); 1777 Register RegHi = RI.getSubReg(Reg, AMDGPU::sub1); 1778 1779 // Create a bundle so these instructions won't be re-ordered by the 1780 // post-RA scheduler. 1781 MIBundleBuilder Bundler(MBB, MI); 1782 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg)); 1783 1784 // Add 32-bit offset from this instruction to the start of the 1785 // constant data. 1786 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo) 1787 .addReg(RegLo) 1788 .add(MI.getOperand(1))); 1789 1790 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi) 1791 .addReg(RegHi); 1792 MIB.add(MI.getOperand(2)); 1793 1794 Bundler.append(MIB); 1795 finalizeBundle(MBB, Bundler.begin()); 1796 1797 MI.eraseFromParent(); 1798 break; 1799 } 1800 case AMDGPU::ENTER_WWM: { 1801 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 1802 // WWM is entered. 1803 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1804 : AMDGPU::S_OR_SAVEEXEC_B64)); 1805 break; 1806 } 1807 case AMDGPU::EXIT_WWM: { 1808 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 1809 // WWM is exited. 1810 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64)); 1811 break; 1812 } 1813 } 1814 return true; 1815 } 1816 1817 std::pair<MachineInstr*, MachineInstr*> 1818 SIInstrInfo::expandMovDPP64(MachineInstr &MI) const { 1819 assert (MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO); 1820 1821 MachineBasicBlock &MBB = *MI.getParent(); 1822 DebugLoc DL = MBB.findDebugLoc(MI); 1823 MachineFunction *MF = MBB.getParent(); 1824 MachineRegisterInfo &MRI = MF->getRegInfo(); 1825 Register Dst = MI.getOperand(0).getReg(); 1826 unsigned Part = 0; 1827 MachineInstr *Split[2]; 1828 1829 1830 for (auto Sub : { AMDGPU::sub0, AMDGPU::sub1 }) { 1831 auto MovDPP = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_dpp)); 1832 if (Dst.isPhysical()) { 1833 MovDPP.addDef(RI.getSubReg(Dst, Sub)); 1834 } else { 1835 assert(MRI.isSSA()); 1836 auto Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1837 MovDPP.addDef(Tmp); 1838 } 1839 1840 for (unsigned I = 1; I <= 2; ++I) { // old and src operands. 1841 const MachineOperand &SrcOp = MI.getOperand(I); 1842 assert(!SrcOp.isFPImm()); 1843 if (SrcOp.isImm()) { 1844 APInt Imm(64, SrcOp.getImm()); 1845 Imm.ashrInPlace(Part * 32); 1846 MovDPP.addImm(Imm.getLoBits(32).getZExtValue()); 1847 } else { 1848 assert(SrcOp.isReg()); 1849 Register Src = SrcOp.getReg(); 1850 if (Src.isPhysical()) 1851 MovDPP.addReg(RI.getSubReg(Src, Sub)); 1852 else 1853 MovDPP.addReg(Src, SrcOp.isUndef() ? RegState::Undef : 0, Sub); 1854 } 1855 } 1856 1857 for (unsigned I = 3; I < MI.getNumExplicitOperands(); ++I) 1858 MovDPP.addImm(MI.getOperand(I).getImm()); 1859 1860 Split[Part] = MovDPP; 1861 ++Part; 1862 } 1863 1864 if (Dst.isVirtual()) 1865 BuildMI(MBB, MI, DL, get(AMDGPU::REG_SEQUENCE), Dst) 1866 .addReg(Split[0]->getOperand(0).getReg()) 1867 .addImm(AMDGPU::sub0) 1868 .addReg(Split[1]->getOperand(0).getReg()) 1869 .addImm(AMDGPU::sub1); 1870 1871 MI.eraseFromParent(); 1872 return std::make_pair(Split[0], Split[1]); 1873 } 1874 1875 bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI, 1876 MachineOperand &Src0, 1877 unsigned Src0OpName, 1878 MachineOperand &Src1, 1879 unsigned Src1OpName) const { 1880 MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName); 1881 if (!Src0Mods) 1882 return false; 1883 1884 MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName); 1885 assert(Src1Mods && 1886 "All commutable instructions have both src0 and src1 modifiers"); 1887 1888 int Src0ModsVal = Src0Mods->getImm(); 1889 int Src1ModsVal = Src1Mods->getImm(); 1890 1891 Src1Mods->setImm(Src0ModsVal); 1892 Src0Mods->setImm(Src1ModsVal); 1893 return true; 1894 } 1895 1896 static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI, 1897 MachineOperand &RegOp, 1898 MachineOperand &NonRegOp) { 1899 Register Reg = RegOp.getReg(); 1900 unsigned SubReg = RegOp.getSubReg(); 1901 bool IsKill = RegOp.isKill(); 1902 bool IsDead = RegOp.isDead(); 1903 bool IsUndef = RegOp.isUndef(); 1904 bool IsDebug = RegOp.isDebug(); 1905 1906 if (NonRegOp.isImm()) 1907 RegOp.ChangeToImmediate(NonRegOp.getImm()); 1908 else if (NonRegOp.isFI()) 1909 RegOp.ChangeToFrameIndex(NonRegOp.getIndex()); 1910 else if (NonRegOp.isGlobal()) { 1911 RegOp.ChangeToGA(NonRegOp.getGlobal(), NonRegOp.getOffset(), 1912 NonRegOp.getTargetFlags()); 1913 } else 1914 return nullptr; 1915 1916 // Make sure we don't reinterpret a subreg index in the target flags. 1917 RegOp.setTargetFlags(NonRegOp.getTargetFlags()); 1918 1919 NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug); 1920 NonRegOp.setSubReg(SubReg); 1921 1922 return &MI; 1923 } 1924 1925 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, 1926 unsigned Src0Idx, 1927 unsigned Src1Idx) const { 1928 assert(!NewMI && "this should never be used"); 1929 1930 unsigned Opc = MI.getOpcode(); 1931 int CommutedOpcode = commuteOpcode(Opc); 1932 if (CommutedOpcode == -1) 1933 return nullptr; 1934 1935 assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == 1936 static_cast<int>(Src0Idx) && 1937 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == 1938 static_cast<int>(Src1Idx) && 1939 "inconsistency with findCommutedOpIndices"); 1940 1941 MachineOperand &Src0 = MI.getOperand(Src0Idx); 1942 MachineOperand &Src1 = MI.getOperand(Src1Idx); 1943 1944 MachineInstr *CommutedMI = nullptr; 1945 if (Src0.isReg() && Src1.isReg()) { 1946 if (isOperandLegal(MI, Src1Idx, &Src0)) { 1947 // Be sure to copy the source modifiers to the right place. 1948 CommutedMI 1949 = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx); 1950 } 1951 1952 } else if (Src0.isReg() && !Src1.isReg()) { 1953 // src0 should always be able to support any operand type, so no need to 1954 // check operand legality. 1955 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1); 1956 } else if (!Src0.isReg() && Src1.isReg()) { 1957 if (isOperandLegal(MI, Src1Idx, &Src0)) 1958 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0); 1959 } else { 1960 // FIXME: Found two non registers to commute. This does happen. 1961 return nullptr; 1962 } 1963 1964 if (CommutedMI) { 1965 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers, 1966 Src1, AMDGPU::OpName::src1_modifiers); 1967 1968 CommutedMI->setDesc(get(CommutedOpcode)); 1969 } 1970 1971 return CommutedMI; 1972 } 1973 1974 // This needs to be implemented because the source modifiers may be inserted 1975 // between the true commutable operands, and the base 1976 // TargetInstrInfo::commuteInstruction uses it. 1977 bool SIInstrInfo::findCommutedOpIndices(const MachineInstr &MI, 1978 unsigned &SrcOpIdx0, 1979 unsigned &SrcOpIdx1) const { 1980 return findCommutedOpIndices(MI.getDesc(), SrcOpIdx0, SrcOpIdx1); 1981 } 1982 1983 bool SIInstrInfo::findCommutedOpIndices(MCInstrDesc Desc, unsigned &SrcOpIdx0, 1984 unsigned &SrcOpIdx1) const { 1985 if (!Desc.isCommutable()) 1986 return false; 1987 1988 unsigned Opc = Desc.getOpcode(); 1989 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 1990 if (Src0Idx == -1) 1991 return false; 1992 1993 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 1994 if (Src1Idx == -1) 1995 return false; 1996 1997 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx); 1998 } 1999 2000 bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp, 2001 int64_t BrOffset) const { 2002 // BranchRelaxation should never have to check s_setpc_b64 because its dest 2003 // block is unanalyzable. 2004 assert(BranchOp != AMDGPU::S_SETPC_B64); 2005 2006 // Convert to dwords. 2007 BrOffset /= 4; 2008 2009 // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is 2010 // from the next instruction. 2011 BrOffset -= 1; 2012 2013 return isIntN(BranchOffsetBits, BrOffset); 2014 } 2015 2016 MachineBasicBlock *SIInstrInfo::getBranchDestBlock( 2017 const MachineInstr &MI) const { 2018 if (MI.getOpcode() == AMDGPU::S_SETPC_B64) { 2019 // This would be a difficult analysis to perform, but can always be legal so 2020 // there's no need to analyze it. 2021 return nullptr; 2022 } 2023 2024 return MI.getOperand(0).getMBB(); 2025 } 2026 2027 unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB, 2028 MachineBasicBlock &DestBB, 2029 const DebugLoc &DL, 2030 int64_t BrOffset, 2031 RegScavenger *RS) const { 2032 assert(RS && "RegScavenger required for long branching"); 2033 assert(MBB.empty() && 2034 "new block should be inserted for expanding unconditional branch"); 2035 assert(MBB.pred_size() == 1); 2036 2037 MachineFunction *MF = MBB.getParent(); 2038 MachineRegisterInfo &MRI = MF->getRegInfo(); 2039 2040 // FIXME: Virtual register workaround for RegScavenger not working with empty 2041 // blocks. 2042 Register PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 2043 2044 auto I = MBB.end(); 2045 2046 // We need to compute the offset relative to the instruction immediately after 2047 // s_getpc_b64. Insert pc arithmetic code before last terminator. 2048 MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg); 2049 2050 // TODO: Handle > 32-bit block address. 2051 if (BrOffset >= 0) { 2052 BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32)) 2053 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 2054 .addReg(PCReg, 0, AMDGPU::sub0) 2055 .addMBB(&DestBB, MO_LONG_BRANCH_FORWARD); 2056 BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32)) 2057 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 2058 .addReg(PCReg, 0, AMDGPU::sub1) 2059 .addImm(0); 2060 } else { 2061 // Backwards branch. 2062 BuildMI(MBB, I, DL, get(AMDGPU::S_SUB_U32)) 2063 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 2064 .addReg(PCReg, 0, AMDGPU::sub0) 2065 .addMBB(&DestBB, MO_LONG_BRANCH_BACKWARD); 2066 BuildMI(MBB, I, DL, get(AMDGPU::S_SUBB_U32)) 2067 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 2068 .addReg(PCReg, 0, AMDGPU::sub1) 2069 .addImm(0); 2070 } 2071 2072 // Insert the indirect branch after the other terminator. 2073 BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64)) 2074 .addReg(PCReg); 2075 2076 // FIXME: If spilling is necessary, this will fail because this scavenger has 2077 // no emergency stack slots. It is non-trivial to spill in this situation, 2078 // because the restore code needs to be specially placed after the 2079 // jump. BranchRelaxation then needs to be made aware of the newly inserted 2080 // block. 2081 // 2082 // If a spill is needed for the pc register pair, we need to insert a spill 2083 // restore block right before the destination block, and insert a short branch 2084 // into the old destination block's fallthrough predecessor. 2085 // e.g.: 2086 // 2087 // s_cbranch_scc0 skip_long_branch: 2088 // 2089 // long_branch_bb: 2090 // spill s[8:9] 2091 // s_getpc_b64 s[8:9] 2092 // s_add_u32 s8, s8, restore_bb 2093 // s_addc_u32 s9, s9, 0 2094 // s_setpc_b64 s[8:9] 2095 // 2096 // skip_long_branch: 2097 // foo; 2098 // 2099 // ..... 2100 // 2101 // dest_bb_fallthrough_predecessor: 2102 // bar; 2103 // s_branch dest_bb 2104 // 2105 // restore_bb: 2106 // restore s[8:9] 2107 // fallthrough dest_bb 2108 /// 2109 // dest_bb: 2110 // buzz; 2111 2112 RS->enterBasicBlockEnd(MBB); 2113 unsigned Scav = RS->scavengeRegisterBackwards( 2114 AMDGPU::SReg_64RegClass, 2115 MachineBasicBlock::iterator(GetPC), false, 0); 2116 MRI.replaceRegWith(PCReg, Scav); 2117 MRI.clearVirtRegs(); 2118 RS->setRegUsed(Scav); 2119 2120 return 4 + 8 + 4 + 4; 2121 } 2122 2123 unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) { 2124 switch (Cond) { 2125 case SIInstrInfo::SCC_TRUE: 2126 return AMDGPU::S_CBRANCH_SCC1; 2127 case SIInstrInfo::SCC_FALSE: 2128 return AMDGPU::S_CBRANCH_SCC0; 2129 case SIInstrInfo::VCCNZ: 2130 return AMDGPU::S_CBRANCH_VCCNZ; 2131 case SIInstrInfo::VCCZ: 2132 return AMDGPU::S_CBRANCH_VCCZ; 2133 case SIInstrInfo::EXECNZ: 2134 return AMDGPU::S_CBRANCH_EXECNZ; 2135 case SIInstrInfo::EXECZ: 2136 return AMDGPU::S_CBRANCH_EXECZ; 2137 default: 2138 llvm_unreachable("invalid branch predicate"); 2139 } 2140 } 2141 2142 SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) { 2143 switch (Opcode) { 2144 case AMDGPU::S_CBRANCH_SCC0: 2145 return SCC_FALSE; 2146 case AMDGPU::S_CBRANCH_SCC1: 2147 return SCC_TRUE; 2148 case AMDGPU::S_CBRANCH_VCCNZ: 2149 return VCCNZ; 2150 case AMDGPU::S_CBRANCH_VCCZ: 2151 return VCCZ; 2152 case AMDGPU::S_CBRANCH_EXECNZ: 2153 return EXECNZ; 2154 case AMDGPU::S_CBRANCH_EXECZ: 2155 return EXECZ; 2156 default: 2157 return INVALID_BR; 2158 } 2159 } 2160 2161 bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB, 2162 MachineBasicBlock::iterator I, 2163 MachineBasicBlock *&TBB, 2164 MachineBasicBlock *&FBB, 2165 SmallVectorImpl<MachineOperand> &Cond, 2166 bool AllowModify) const { 2167 if (I->getOpcode() == AMDGPU::S_BRANCH) { 2168 // Unconditional Branch 2169 TBB = I->getOperand(0).getMBB(); 2170 return false; 2171 } 2172 2173 MachineBasicBlock *CondBB = nullptr; 2174 2175 if (I->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 2176 CondBB = I->getOperand(1).getMBB(); 2177 Cond.push_back(I->getOperand(0)); 2178 } else { 2179 BranchPredicate Pred = getBranchPredicate(I->getOpcode()); 2180 if (Pred == INVALID_BR) 2181 return true; 2182 2183 CondBB = I->getOperand(0).getMBB(); 2184 Cond.push_back(MachineOperand::CreateImm(Pred)); 2185 Cond.push_back(I->getOperand(1)); // Save the branch register. 2186 } 2187 ++I; 2188 2189 if (I == MBB.end()) { 2190 // Conditional branch followed by fall-through. 2191 TBB = CondBB; 2192 return false; 2193 } 2194 2195 if (I->getOpcode() == AMDGPU::S_BRANCH) { 2196 TBB = CondBB; 2197 FBB = I->getOperand(0).getMBB(); 2198 return false; 2199 } 2200 2201 return true; 2202 } 2203 2204 bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 2205 MachineBasicBlock *&FBB, 2206 SmallVectorImpl<MachineOperand> &Cond, 2207 bool AllowModify) const { 2208 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 2209 auto E = MBB.end(); 2210 if (I == E) 2211 return false; 2212 2213 // Skip over the instructions that are artificially terminators for special 2214 // exec management. 2215 while (I != E && !I->isBranch() && !I->isReturn() && 2216 I->getOpcode() != AMDGPU::SI_MASK_BRANCH) { 2217 switch (I->getOpcode()) { 2218 case AMDGPU::SI_MASK_BRANCH: 2219 case AMDGPU::S_MOV_B64_term: 2220 case AMDGPU::S_XOR_B64_term: 2221 case AMDGPU::S_ANDN2_B64_term: 2222 case AMDGPU::S_MOV_B32_term: 2223 case AMDGPU::S_XOR_B32_term: 2224 case AMDGPU::S_OR_B32_term: 2225 case AMDGPU::S_ANDN2_B32_term: 2226 break; 2227 case AMDGPU::SI_IF: 2228 case AMDGPU::SI_ELSE: 2229 case AMDGPU::SI_KILL_I1_TERMINATOR: 2230 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 2231 // FIXME: It's messy that these need to be considered here at all. 2232 return true; 2233 default: 2234 llvm_unreachable("unexpected non-branch terminator inst"); 2235 } 2236 2237 ++I; 2238 } 2239 2240 if (I == E) 2241 return false; 2242 2243 if (I->getOpcode() != AMDGPU::SI_MASK_BRANCH) 2244 return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify); 2245 2246 ++I; 2247 2248 // TODO: Should be able to treat as fallthrough? 2249 if (I == MBB.end()) 2250 return true; 2251 2252 if (analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify)) 2253 return true; 2254 2255 MachineBasicBlock *MaskBrDest = I->getOperand(0).getMBB(); 2256 2257 // Specifically handle the case where the conditional branch is to the same 2258 // destination as the mask branch. e.g. 2259 // 2260 // si_mask_branch BB8 2261 // s_cbranch_execz BB8 2262 // s_cbranch BB9 2263 // 2264 // This is required to understand divergent loops which may need the branches 2265 // to be relaxed. 2266 if (TBB != MaskBrDest || Cond.empty()) 2267 return true; 2268 2269 auto Pred = Cond[0].getImm(); 2270 return (Pred != EXECZ && Pred != EXECNZ); 2271 } 2272 2273 unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB, 2274 int *BytesRemoved) const { 2275 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 2276 2277 unsigned Count = 0; 2278 unsigned RemovedSize = 0; 2279 while (I != MBB.end()) { 2280 MachineBasicBlock::iterator Next = std::next(I); 2281 if (I->getOpcode() == AMDGPU::SI_MASK_BRANCH) { 2282 I = Next; 2283 continue; 2284 } 2285 2286 RemovedSize += getInstSizeInBytes(*I); 2287 I->eraseFromParent(); 2288 ++Count; 2289 I = Next; 2290 } 2291 2292 if (BytesRemoved) 2293 *BytesRemoved = RemovedSize; 2294 2295 return Count; 2296 } 2297 2298 // Copy the flags onto the implicit condition register operand. 2299 static void preserveCondRegFlags(MachineOperand &CondReg, 2300 const MachineOperand &OrigCond) { 2301 CondReg.setIsUndef(OrigCond.isUndef()); 2302 CondReg.setIsKill(OrigCond.isKill()); 2303 } 2304 2305 unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB, 2306 MachineBasicBlock *TBB, 2307 MachineBasicBlock *FBB, 2308 ArrayRef<MachineOperand> Cond, 2309 const DebugLoc &DL, 2310 int *BytesAdded) const { 2311 if (!FBB && Cond.empty()) { 2312 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 2313 .addMBB(TBB); 2314 if (BytesAdded) 2315 *BytesAdded = 4; 2316 return 1; 2317 } 2318 2319 if(Cond.size() == 1 && Cond[0].isReg()) { 2320 BuildMI(&MBB, DL, get(AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO)) 2321 .add(Cond[0]) 2322 .addMBB(TBB); 2323 return 1; 2324 } 2325 2326 assert(TBB && Cond[0].isImm()); 2327 2328 unsigned Opcode 2329 = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm())); 2330 2331 if (!FBB) { 2332 Cond[1].isUndef(); 2333 MachineInstr *CondBr = 2334 BuildMI(&MBB, DL, get(Opcode)) 2335 .addMBB(TBB); 2336 2337 // Copy the flags onto the implicit condition register operand. 2338 preserveCondRegFlags(CondBr->getOperand(1), Cond[1]); 2339 fixImplicitOperands(*CondBr); 2340 2341 if (BytesAdded) 2342 *BytesAdded = 4; 2343 return 1; 2344 } 2345 2346 assert(TBB && FBB); 2347 2348 MachineInstr *CondBr = 2349 BuildMI(&MBB, DL, get(Opcode)) 2350 .addMBB(TBB); 2351 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 2352 .addMBB(FBB); 2353 2354 MachineOperand &CondReg = CondBr->getOperand(1); 2355 CondReg.setIsUndef(Cond[1].isUndef()); 2356 CondReg.setIsKill(Cond[1].isKill()); 2357 2358 if (BytesAdded) 2359 *BytesAdded = 8; 2360 2361 return 2; 2362 } 2363 2364 bool SIInstrInfo::reverseBranchCondition( 2365 SmallVectorImpl<MachineOperand> &Cond) const { 2366 if (Cond.size() != 2) { 2367 return true; 2368 } 2369 2370 if (Cond[0].isImm()) { 2371 Cond[0].setImm(-Cond[0].getImm()); 2372 return false; 2373 } 2374 2375 return true; 2376 } 2377 2378 bool SIInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, 2379 ArrayRef<MachineOperand> Cond, 2380 Register DstReg, Register TrueReg, 2381 Register FalseReg, int &CondCycles, 2382 int &TrueCycles, int &FalseCycles) const { 2383 switch (Cond[0].getImm()) { 2384 case VCCNZ: 2385 case VCCZ: { 2386 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2387 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 2388 if (MRI.getRegClass(FalseReg) != RC) 2389 return false; 2390 2391 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 2392 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 2393 2394 // Limit to equal cost for branch vs. N v_cndmask_b32s. 2395 return RI.hasVGPRs(RC) && NumInsts <= 6; 2396 } 2397 case SCC_TRUE: 2398 case SCC_FALSE: { 2399 // FIXME: We could insert for VGPRs if we could replace the original compare 2400 // with a vector one. 2401 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2402 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 2403 if (MRI.getRegClass(FalseReg) != RC) 2404 return false; 2405 2406 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 2407 2408 // Multiples of 8 can do s_cselect_b64 2409 if (NumInsts % 2 == 0) 2410 NumInsts /= 2; 2411 2412 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 2413 return RI.isSGPRClass(RC); 2414 } 2415 default: 2416 return false; 2417 } 2418 } 2419 2420 void SIInstrInfo::insertSelect(MachineBasicBlock &MBB, 2421 MachineBasicBlock::iterator I, const DebugLoc &DL, 2422 Register DstReg, ArrayRef<MachineOperand> Cond, 2423 Register TrueReg, Register FalseReg) const { 2424 BranchPredicate Pred = static_cast<BranchPredicate>(Cond[0].getImm()); 2425 if (Pred == VCCZ || Pred == SCC_FALSE) { 2426 Pred = static_cast<BranchPredicate>(-Pred); 2427 std::swap(TrueReg, FalseReg); 2428 } 2429 2430 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2431 const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg); 2432 unsigned DstSize = RI.getRegSizeInBits(*DstRC); 2433 2434 if (DstSize == 32) { 2435 MachineInstr *Select; 2436 if (Pred == SCC_TRUE) { 2437 Select = BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B32), DstReg) 2438 .addReg(TrueReg) 2439 .addReg(FalseReg); 2440 } else { 2441 // Instruction's operands are backwards from what is expected. 2442 Select = BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e32), DstReg) 2443 .addReg(FalseReg) 2444 .addReg(TrueReg); 2445 } 2446 2447 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2448 return; 2449 } 2450 2451 if (DstSize == 64 && Pred == SCC_TRUE) { 2452 MachineInstr *Select = 2453 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg) 2454 .addReg(TrueReg) 2455 .addReg(FalseReg); 2456 2457 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2458 return; 2459 } 2460 2461 static const int16_t Sub0_15[] = { 2462 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 2463 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 2464 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, 2465 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 2466 }; 2467 2468 static const int16_t Sub0_15_64[] = { 2469 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, 2470 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7, 2471 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11, 2472 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15, 2473 }; 2474 2475 unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32; 2476 const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass; 2477 const int16_t *SubIndices = Sub0_15; 2478 int NElts = DstSize / 32; 2479 2480 // 64-bit select is only available for SALU. 2481 // TODO: Split 96-bit into 64-bit and 32-bit, not 3x 32-bit. 2482 if (Pred == SCC_TRUE) { 2483 if (NElts % 2) { 2484 SelOp = AMDGPU::S_CSELECT_B32; 2485 EltRC = &AMDGPU::SGPR_32RegClass; 2486 } else { 2487 SelOp = AMDGPU::S_CSELECT_B64; 2488 EltRC = &AMDGPU::SGPR_64RegClass; 2489 SubIndices = Sub0_15_64; 2490 NElts /= 2; 2491 } 2492 } 2493 2494 MachineInstrBuilder MIB = BuildMI( 2495 MBB, I, DL, get(AMDGPU::REG_SEQUENCE), DstReg); 2496 2497 I = MIB->getIterator(); 2498 2499 SmallVector<Register, 8> Regs; 2500 for (int Idx = 0; Idx != NElts; ++Idx) { 2501 Register DstElt = MRI.createVirtualRegister(EltRC); 2502 Regs.push_back(DstElt); 2503 2504 unsigned SubIdx = SubIndices[Idx]; 2505 2506 MachineInstr *Select; 2507 if (SelOp == AMDGPU::V_CNDMASK_B32_e32) { 2508 Select = 2509 BuildMI(MBB, I, DL, get(SelOp), DstElt) 2510 .addReg(FalseReg, 0, SubIdx) 2511 .addReg(TrueReg, 0, SubIdx); 2512 } else { 2513 Select = 2514 BuildMI(MBB, I, DL, get(SelOp), DstElt) 2515 .addReg(TrueReg, 0, SubIdx) 2516 .addReg(FalseReg, 0, SubIdx); 2517 } 2518 2519 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2520 fixImplicitOperands(*Select); 2521 2522 MIB.addReg(DstElt) 2523 .addImm(SubIdx); 2524 } 2525 } 2526 2527 bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) const { 2528 switch (MI.getOpcode()) { 2529 case AMDGPU::V_MOV_B32_e32: 2530 case AMDGPU::V_MOV_B32_e64: 2531 case AMDGPU::V_MOV_B64_PSEUDO: { 2532 // If there are additional implicit register operands, this may be used for 2533 // register indexing so the source register operand isn't simply copied. 2534 unsigned NumOps = MI.getDesc().getNumOperands() + 2535 MI.getDesc().getNumImplicitUses(); 2536 2537 return MI.getNumOperands() == NumOps; 2538 } 2539 case AMDGPU::S_MOV_B32: 2540 case AMDGPU::S_MOV_B64: 2541 case AMDGPU::COPY: 2542 case AMDGPU::V_ACCVGPR_WRITE_B32: 2543 case AMDGPU::V_ACCVGPR_READ_B32: 2544 return true; 2545 default: 2546 return false; 2547 } 2548 } 2549 2550 unsigned SIInstrInfo::getAddressSpaceForPseudoSourceKind( 2551 unsigned Kind) const { 2552 switch(Kind) { 2553 case PseudoSourceValue::Stack: 2554 case PseudoSourceValue::FixedStack: 2555 return AMDGPUAS::PRIVATE_ADDRESS; 2556 case PseudoSourceValue::ConstantPool: 2557 case PseudoSourceValue::GOT: 2558 case PseudoSourceValue::JumpTable: 2559 case PseudoSourceValue::GlobalValueCallEntry: 2560 case PseudoSourceValue::ExternalSymbolCallEntry: 2561 case PseudoSourceValue::TargetCustom: 2562 return AMDGPUAS::CONSTANT_ADDRESS; 2563 } 2564 return AMDGPUAS::FLAT_ADDRESS; 2565 } 2566 2567 static void removeModOperands(MachineInstr &MI) { 2568 unsigned Opc = MI.getOpcode(); 2569 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2570 AMDGPU::OpName::src0_modifiers); 2571 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2572 AMDGPU::OpName::src1_modifiers); 2573 int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2574 AMDGPU::OpName::src2_modifiers); 2575 2576 MI.RemoveOperand(Src2ModIdx); 2577 MI.RemoveOperand(Src1ModIdx); 2578 MI.RemoveOperand(Src0ModIdx); 2579 } 2580 2581 bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, 2582 Register Reg, MachineRegisterInfo *MRI) const { 2583 if (!MRI->hasOneNonDBGUse(Reg)) 2584 return false; 2585 2586 switch (DefMI.getOpcode()) { 2587 default: 2588 return false; 2589 case AMDGPU::S_MOV_B64: 2590 // TODO: We could fold 64-bit immediates, but this get compilicated 2591 // when there are sub-registers. 2592 return false; 2593 2594 case AMDGPU::V_MOV_B32_e32: 2595 case AMDGPU::S_MOV_B32: 2596 case AMDGPU::V_ACCVGPR_WRITE_B32: 2597 break; 2598 } 2599 2600 const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0); 2601 assert(ImmOp); 2602 // FIXME: We could handle FrameIndex values here. 2603 if (!ImmOp->isImm()) 2604 return false; 2605 2606 unsigned Opc = UseMI.getOpcode(); 2607 if (Opc == AMDGPU::COPY) { 2608 Register DstReg = UseMI.getOperand(0).getReg(); 2609 bool Is16Bit = getOpSize(UseMI, 0) == 2; 2610 bool isVGPRCopy = RI.isVGPR(*MRI, DstReg); 2611 unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32; 2612 APInt Imm(32, ImmOp->getImm()); 2613 2614 if (UseMI.getOperand(1).getSubReg() == AMDGPU::hi16) 2615 Imm = Imm.ashr(16); 2616 2617 if (RI.isAGPR(*MRI, DstReg)) { 2618 if (!isInlineConstant(Imm)) 2619 return false; 2620 NewOpc = AMDGPU::V_ACCVGPR_WRITE_B32; 2621 } 2622 2623 if (Is16Bit) { 2624 if (isVGPRCopy) 2625 return false; // Do not clobber vgpr_hi16 2626 2627 if (DstReg.isVirtual() && 2628 UseMI.getOperand(0).getSubReg() != AMDGPU::lo16) 2629 return false; 2630 2631 UseMI.getOperand(0).setSubReg(0); 2632 if (DstReg.isPhysical()) { 2633 DstReg = RI.get32BitRegister(DstReg); 2634 UseMI.getOperand(0).setReg(DstReg); 2635 } 2636 assert(UseMI.getOperand(1).getReg().isVirtual()); 2637 } 2638 2639 UseMI.setDesc(get(NewOpc)); 2640 UseMI.getOperand(1).ChangeToImmediate(Imm.getSExtValue()); 2641 UseMI.getOperand(1).setTargetFlags(0); 2642 UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent()); 2643 return true; 2644 } 2645 2646 if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 || 2647 Opc == AMDGPU::V_MAD_F16 || Opc == AMDGPU::V_MAC_F16_e64 || 2648 Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64 || 2649 Opc == AMDGPU::V_FMA_F16 || Opc == AMDGPU::V_FMAC_F16_e64) { 2650 // Don't fold if we are using source or output modifiers. The new VOP2 2651 // instructions don't have them. 2652 if (hasAnyModifiersSet(UseMI)) 2653 return false; 2654 2655 // If this is a free constant, there's no reason to do this. 2656 // TODO: We could fold this here instead of letting SIFoldOperands do it 2657 // later. 2658 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0); 2659 2660 // Any src operand can be used for the legality check. 2661 if (isInlineConstant(UseMI, *Src0, *ImmOp)) 2662 return false; 2663 2664 bool IsF32 = Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 || 2665 Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64; 2666 bool IsFMA = Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64 || 2667 Opc == AMDGPU::V_FMA_F16 || Opc == AMDGPU::V_FMAC_F16_e64; 2668 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1); 2669 MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2); 2670 2671 // Multiplied part is the constant: Use v_madmk_{f16, f32}. 2672 // We should only expect these to be on src0 due to canonicalizations. 2673 if (Src0->isReg() && Src0->getReg() == Reg) { 2674 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) 2675 return false; 2676 2677 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg()))) 2678 return false; 2679 2680 unsigned NewOpc = 2681 IsFMA ? (IsF32 ? AMDGPU::V_FMAMK_F32 : AMDGPU::V_FMAMK_F16) 2682 : (IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16); 2683 if (pseudoToMCOpcode(NewOpc) == -1) 2684 return false; 2685 2686 // We need to swap operands 0 and 1 since madmk constant is at operand 1. 2687 2688 const int64_t Imm = ImmOp->getImm(); 2689 2690 // FIXME: This would be a lot easier if we could return a new instruction 2691 // instead of having to modify in place. 2692 2693 // Remove these first since they are at the end. 2694 UseMI.RemoveOperand( 2695 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 2696 UseMI.RemoveOperand( 2697 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 2698 2699 Register Src1Reg = Src1->getReg(); 2700 unsigned Src1SubReg = Src1->getSubReg(); 2701 Src0->setReg(Src1Reg); 2702 Src0->setSubReg(Src1SubReg); 2703 Src0->setIsKill(Src1->isKill()); 2704 2705 if (Opc == AMDGPU::V_MAC_F32_e64 || 2706 Opc == AMDGPU::V_MAC_F16_e64 || 2707 Opc == AMDGPU::V_FMAC_F32_e64 || 2708 Opc == AMDGPU::V_FMAC_F16_e64) 2709 UseMI.untieRegOperand( 2710 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 2711 2712 Src1->ChangeToImmediate(Imm); 2713 2714 removeModOperands(UseMI); 2715 UseMI.setDesc(get(NewOpc)); 2716 2717 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 2718 if (DeleteDef) 2719 DefMI.eraseFromParent(); 2720 2721 return true; 2722 } 2723 2724 // Added part is the constant: Use v_madak_{f16, f32}. 2725 if (Src2->isReg() && Src2->getReg() == Reg) { 2726 // Not allowed to use constant bus for another operand. 2727 // We can however allow an inline immediate as src0. 2728 bool Src0Inlined = false; 2729 if (Src0->isReg()) { 2730 // Try to inline constant if possible. 2731 // If the Def moves immediate and the use is single 2732 // We are saving VGPR here. 2733 MachineInstr *Def = MRI->getUniqueVRegDef(Src0->getReg()); 2734 if (Def && Def->isMoveImmediate() && 2735 isInlineConstant(Def->getOperand(1)) && 2736 MRI->hasOneUse(Src0->getReg())) { 2737 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 2738 Src0Inlined = true; 2739 } else if ((Register::isPhysicalRegister(Src0->getReg()) && 2740 (ST.getConstantBusLimit(Opc) <= 1 && 2741 RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg())))) || 2742 (Register::isVirtualRegister(Src0->getReg()) && 2743 (ST.getConstantBusLimit(Opc) <= 1 && 2744 RI.isSGPRClass(MRI->getRegClass(Src0->getReg()))))) 2745 return false; 2746 // VGPR is okay as Src0 - fallthrough 2747 } 2748 2749 if (Src1->isReg() && !Src0Inlined ) { 2750 // We have one slot for inlinable constant so far - try to fill it 2751 MachineInstr *Def = MRI->getUniqueVRegDef(Src1->getReg()); 2752 if (Def && Def->isMoveImmediate() && 2753 isInlineConstant(Def->getOperand(1)) && 2754 MRI->hasOneUse(Src1->getReg()) && 2755 commuteInstruction(UseMI)) { 2756 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 2757 } else if ((Register::isPhysicalRegister(Src1->getReg()) && 2758 RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) || 2759 (Register::isVirtualRegister(Src1->getReg()) && 2760 RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) 2761 return false; 2762 // VGPR is okay as Src1 - fallthrough 2763 } 2764 2765 unsigned NewOpc = 2766 IsFMA ? (IsF32 ? AMDGPU::V_FMAAK_F32 : AMDGPU::V_FMAAK_F16) 2767 : (IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16); 2768 if (pseudoToMCOpcode(NewOpc) == -1) 2769 return false; 2770 2771 const int64_t Imm = ImmOp->getImm(); 2772 2773 // FIXME: This would be a lot easier if we could return a new instruction 2774 // instead of having to modify in place. 2775 2776 // Remove these first since they are at the end. 2777 UseMI.RemoveOperand( 2778 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 2779 UseMI.RemoveOperand( 2780 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 2781 2782 if (Opc == AMDGPU::V_MAC_F32_e64 || 2783 Opc == AMDGPU::V_MAC_F16_e64 || 2784 Opc == AMDGPU::V_FMAC_F32_e64 || 2785 Opc == AMDGPU::V_FMAC_F16_e64) 2786 UseMI.untieRegOperand( 2787 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 2788 2789 // ChangingToImmediate adds Src2 back to the instruction. 2790 Src2->ChangeToImmediate(Imm); 2791 2792 // These come before src2. 2793 removeModOperands(UseMI); 2794 UseMI.setDesc(get(NewOpc)); 2795 // It might happen that UseMI was commuted 2796 // and we now have SGPR as SRC1. If so 2 inlined 2797 // constant and SGPR are illegal. 2798 legalizeOperands(UseMI); 2799 2800 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 2801 if (DeleteDef) 2802 DefMI.eraseFromParent(); 2803 2804 return true; 2805 } 2806 } 2807 2808 return false; 2809 } 2810 2811 static bool 2812 memOpsHaveSameBaseOperands(ArrayRef<const MachineOperand *> BaseOps1, 2813 ArrayRef<const MachineOperand *> BaseOps2) { 2814 if (BaseOps1.size() != BaseOps2.size()) 2815 return false; 2816 for (size_t I = 0, E = BaseOps1.size(); I < E; ++I) { 2817 if (!BaseOps1[I]->isIdenticalTo(*BaseOps2[I])) 2818 return false; 2819 } 2820 return true; 2821 } 2822 2823 static bool offsetsDoNotOverlap(int WidthA, int OffsetA, 2824 int WidthB, int OffsetB) { 2825 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; 2826 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; 2827 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; 2828 return LowOffset + LowWidth <= HighOffset; 2829 } 2830 2831 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa, 2832 const MachineInstr &MIb) const { 2833 SmallVector<const MachineOperand *, 4> BaseOps0, BaseOps1; 2834 int64_t Offset0, Offset1; 2835 unsigned Dummy0, Dummy1; 2836 bool Offset0IsScalable, Offset1IsScalable; 2837 if (!getMemOperandsWithOffsetWidth(MIa, BaseOps0, Offset0, Offset0IsScalable, 2838 Dummy0, &RI) || 2839 !getMemOperandsWithOffsetWidth(MIb, BaseOps1, Offset1, Offset1IsScalable, 2840 Dummy1, &RI)) 2841 return false; 2842 2843 if (!memOpsHaveSameBaseOperands(BaseOps0, BaseOps1)) 2844 return false; 2845 2846 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) { 2847 // FIXME: Handle ds_read2 / ds_write2. 2848 return false; 2849 } 2850 unsigned Width0 = MIa.memoperands().front()->getSize(); 2851 unsigned Width1 = MIb.memoperands().front()->getSize(); 2852 return offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1); 2853 } 2854 2855 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, 2856 const MachineInstr &MIb) const { 2857 assert(MIa.mayLoadOrStore() && 2858 "MIa must load from or modify a memory location"); 2859 assert(MIb.mayLoadOrStore() && 2860 "MIb must load from or modify a memory location"); 2861 2862 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects()) 2863 return false; 2864 2865 // XXX - Can we relax this between address spaces? 2866 if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef()) 2867 return false; 2868 2869 // TODO: Should we check the address space from the MachineMemOperand? That 2870 // would allow us to distinguish objects we know don't alias based on the 2871 // underlying address space, even if it was lowered to a different one, 2872 // e.g. private accesses lowered to use MUBUF instructions on a scratch 2873 // buffer. 2874 if (isDS(MIa)) { 2875 if (isDS(MIb)) 2876 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2877 2878 return !isFLAT(MIb) || isSegmentSpecificFLAT(MIb); 2879 } 2880 2881 if (isMUBUF(MIa) || isMTBUF(MIa)) { 2882 if (isMUBUF(MIb) || isMTBUF(MIb)) 2883 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2884 2885 return !isFLAT(MIb) && !isSMRD(MIb); 2886 } 2887 2888 if (isSMRD(MIa)) { 2889 if (isSMRD(MIb)) 2890 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2891 2892 return !isFLAT(MIb) && !isMUBUF(MIb) && !isMTBUF(MIb); 2893 } 2894 2895 if (isFLAT(MIa)) { 2896 if (isFLAT(MIb)) 2897 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2898 2899 return false; 2900 } 2901 2902 return false; 2903 } 2904 2905 static int64_t getFoldableImm(const MachineOperand* MO) { 2906 if (!MO->isReg()) 2907 return false; 2908 const MachineFunction *MF = MO->getParent()->getParent()->getParent(); 2909 const MachineRegisterInfo &MRI = MF->getRegInfo(); 2910 auto Def = MRI.getUniqueVRegDef(MO->getReg()); 2911 if (Def && Def->getOpcode() == AMDGPU::V_MOV_B32_e32 && 2912 Def->getOperand(1).isImm()) 2913 return Def->getOperand(1).getImm(); 2914 return AMDGPU::NoRegister; 2915 } 2916 2917 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB, 2918 MachineInstr &MI, 2919 LiveVariables *LV) const { 2920 unsigned Opc = MI.getOpcode(); 2921 bool IsF16 = false; 2922 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e32 || Opc == AMDGPU::V_FMAC_F32_e64 || 2923 Opc == AMDGPU::V_FMAC_F16_e32 || Opc == AMDGPU::V_FMAC_F16_e64; 2924 2925 switch (Opc) { 2926 default: 2927 return nullptr; 2928 case AMDGPU::V_MAC_F16_e64: 2929 case AMDGPU::V_FMAC_F16_e64: 2930 IsF16 = true; 2931 LLVM_FALLTHROUGH; 2932 case AMDGPU::V_MAC_F32_e64: 2933 case AMDGPU::V_FMAC_F32_e64: 2934 break; 2935 case AMDGPU::V_MAC_F16_e32: 2936 case AMDGPU::V_FMAC_F16_e32: 2937 IsF16 = true; 2938 LLVM_FALLTHROUGH; 2939 case AMDGPU::V_MAC_F32_e32: 2940 case AMDGPU::V_FMAC_F32_e32: { 2941 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 2942 AMDGPU::OpName::src0); 2943 const MachineOperand *Src0 = &MI.getOperand(Src0Idx); 2944 if (!Src0->isReg() && !Src0->isImm()) 2945 return nullptr; 2946 2947 if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0)) 2948 return nullptr; 2949 2950 break; 2951 } 2952 } 2953 2954 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 2955 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0); 2956 const MachineOperand *Src0Mods = 2957 getNamedOperand(MI, AMDGPU::OpName::src0_modifiers); 2958 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 2959 const MachineOperand *Src1Mods = 2960 getNamedOperand(MI, AMDGPU::OpName::src1_modifiers); 2961 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 2962 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 2963 const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod); 2964 2965 if (!Src0Mods && !Src1Mods && !Clamp && !Omod && 2966 // If we have an SGPR input, we will violate the constant bus restriction. 2967 (ST.getConstantBusLimit(Opc) > 1 || 2968 !Src0->isReg() || 2969 !RI.isSGPRReg(MBB->getParent()->getRegInfo(), Src0->getReg()))) { 2970 if (auto Imm = getFoldableImm(Src2)) { 2971 unsigned NewOpc = 2972 IsFMA ? (IsF16 ? AMDGPU::V_FMAAK_F16 : AMDGPU::V_FMAAK_F32) 2973 : (IsF16 ? AMDGPU::V_MADAK_F16 : AMDGPU::V_MADAK_F32); 2974 if (pseudoToMCOpcode(NewOpc) != -1) 2975 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2976 .add(*Dst) 2977 .add(*Src0) 2978 .add(*Src1) 2979 .addImm(Imm); 2980 } 2981 unsigned NewOpc = 2982 IsFMA ? (IsF16 ? AMDGPU::V_FMAMK_F16 : AMDGPU::V_FMAMK_F32) 2983 : (IsF16 ? AMDGPU::V_MADMK_F16 : AMDGPU::V_MADMK_F32); 2984 if (auto Imm = getFoldableImm(Src1)) { 2985 if (pseudoToMCOpcode(NewOpc) != -1) 2986 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2987 .add(*Dst) 2988 .add(*Src0) 2989 .addImm(Imm) 2990 .add(*Src2); 2991 } 2992 if (auto Imm = getFoldableImm(Src0)) { 2993 if (pseudoToMCOpcode(NewOpc) != -1 && 2994 isOperandLegal(MI, AMDGPU::getNamedOperandIdx(NewOpc, 2995 AMDGPU::OpName::src0), Src1)) 2996 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2997 .add(*Dst) 2998 .add(*Src1) 2999 .addImm(Imm) 3000 .add(*Src2); 3001 } 3002 } 3003 3004 unsigned NewOpc = IsFMA ? (IsF16 ? AMDGPU::V_FMA_F16 : AMDGPU::V_FMA_F32) 3005 : (IsF16 ? AMDGPU::V_MAD_F16 : AMDGPU::V_MAD_F32); 3006 if (pseudoToMCOpcode(NewOpc) == -1) 3007 return nullptr; 3008 3009 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 3010 .add(*Dst) 3011 .addImm(Src0Mods ? Src0Mods->getImm() : 0) 3012 .add(*Src0) 3013 .addImm(Src1Mods ? Src1Mods->getImm() : 0) 3014 .add(*Src1) 3015 .addImm(0) // Src mods 3016 .add(*Src2) 3017 .addImm(Clamp ? Clamp->getImm() : 0) 3018 .addImm(Omod ? Omod->getImm() : 0); 3019 } 3020 3021 // It's not generally safe to move VALU instructions across these since it will 3022 // start using the register as a base index rather than directly. 3023 // XXX - Why isn't hasSideEffects sufficient for these? 3024 static bool changesVGPRIndexingMode(const MachineInstr &MI) { 3025 switch (MI.getOpcode()) { 3026 case AMDGPU::S_SET_GPR_IDX_ON: 3027 case AMDGPU::S_SET_GPR_IDX_MODE: 3028 case AMDGPU::S_SET_GPR_IDX_OFF: 3029 return true; 3030 default: 3031 return false; 3032 } 3033 } 3034 3035 bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 3036 const MachineBasicBlock *MBB, 3037 const MachineFunction &MF) const { 3038 // Skipping the check for SP writes in the base implementation. The reason it 3039 // was added was apparently due to compile time concerns. 3040 // 3041 // TODO: Do we really want this barrier? It triggers unnecessary hazard nops 3042 // but is probably avoidable. 3043 3044 // Copied from base implementation. 3045 // Terminators and labels can't be scheduled around. 3046 if (MI.isTerminator() || MI.isPosition()) 3047 return true; 3048 3049 // INLINEASM_BR can jump to another block 3050 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR) 3051 return true; 3052 3053 // Target-independent instructions do not have an implicit-use of EXEC, even 3054 // when they operate on VGPRs. Treating EXEC modifications as scheduling 3055 // boundaries prevents incorrect movements of such instructions. 3056 3057 // TODO: Don't treat setreg with known constant that only changes MODE as 3058 // barrier. 3059 return MI.modifiesRegister(AMDGPU::EXEC, &RI) || 3060 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 || 3061 MI.getOpcode() == AMDGPU::S_SETREG_B32 || 3062 changesVGPRIndexingMode(MI); 3063 } 3064 3065 bool SIInstrInfo::isAlwaysGDS(uint16_t Opcode) const { 3066 return Opcode == AMDGPU::DS_ORDERED_COUNT || 3067 Opcode == AMDGPU::DS_GWS_INIT || 3068 Opcode == AMDGPU::DS_GWS_SEMA_V || 3069 Opcode == AMDGPU::DS_GWS_SEMA_BR || 3070 Opcode == AMDGPU::DS_GWS_SEMA_P || 3071 Opcode == AMDGPU::DS_GWS_SEMA_RELEASE_ALL || 3072 Opcode == AMDGPU::DS_GWS_BARRIER; 3073 } 3074 3075 bool SIInstrInfo::modifiesModeRegister(const MachineInstr &MI) { 3076 // Skip the full operand and register alias search modifiesRegister 3077 // does. There's only a handful of instructions that touch this, it's only an 3078 // implicit def, and doesn't alias any other registers. 3079 if (const MCPhysReg *ImpDef = MI.getDesc().getImplicitDefs()) { 3080 for (; ImpDef && *ImpDef; ++ImpDef) { 3081 if (*ImpDef == AMDGPU::MODE) 3082 return true; 3083 } 3084 } 3085 3086 return false; 3087 } 3088 3089 bool SIInstrInfo::hasUnwantedEffectsWhenEXECEmpty(const MachineInstr &MI) const { 3090 unsigned Opcode = MI.getOpcode(); 3091 3092 if (MI.mayStore() && isSMRD(MI)) 3093 return true; // scalar store or atomic 3094 3095 // This will terminate the function when other lanes may need to continue. 3096 if (MI.isReturn()) 3097 return true; 3098 3099 // These instructions cause shader I/O that may cause hardware lockups 3100 // when executed with an empty EXEC mask. 3101 // 3102 // Note: exp with VM = DONE = 0 is automatically skipped by hardware when 3103 // EXEC = 0, but checking for that case here seems not worth it 3104 // given the typical code patterns. 3105 if (Opcode == AMDGPU::S_SENDMSG || Opcode == AMDGPU::S_SENDMSGHALT || 3106 Opcode == AMDGPU::EXP || Opcode == AMDGPU::EXP_DONE || 3107 Opcode == AMDGPU::DS_ORDERED_COUNT || Opcode == AMDGPU::S_TRAP || 3108 Opcode == AMDGPU::DS_GWS_INIT || Opcode == AMDGPU::DS_GWS_BARRIER) 3109 return true; 3110 3111 if (MI.isCall() || MI.isInlineAsm()) 3112 return true; // conservative assumption 3113 3114 // A mode change is a scalar operation that influences vector instructions. 3115 if (modifiesModeRegister(MI)) 3116 return true; 3117 3118 // These are like SALU instructions in terms of effects, so it's questionable 3119 // whether we should return true for those. 3120 // 3121 // However, executing them with EXEC = 0 causes them to operate on undefined 3122 // data, which we avoid by returning true here. 3123 if (Opcode == AMDGPU::V_READFIRSTLANE_B32 || Opcode == AMDGPU::V_READLANE_B32) 3124 return true; 3125 3126 return false; 3127 } 3128 3129 bool SIInstrInfo::mayReadEXEC(const MachineRegisterInfo &MRI, 3130 const MachineInstr &MI) const { 3131 if (MI.isMetaInstruction()) 3132 return false; 3133 3134 // This won't read exec if this is an SGPR->SGPR copy. 3135 if (MI.isCopyLike()) { 3136 if (!RI.isSGPRReg(MRI, MI.getOperand(0).getReg())) 3137 return true; 3138 3139 // Make sure this isn't copying exec as a normal operand 3140 return MI.readsRegister(AMDGPU::EXEC, &RI); 3141 } 3142 3143 // Make a conservative assumption about the callee. 3144 if (MI.isCall()) 3145 return true; 3146 3147 // Be conservative with any unhandled generic opcodes. 3148 if (!isTargetSpecificOpcode(MI.getOpcode())) 3149 return true; 3150 3151 return !isSALU(MI) || MI.readsRegister(AMDGPU::EXEC, &RI); 3152 } 3153 3154 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const { 3155 switch (Imm.getBitWidth()) { 3156 case 1: // This likely will be a condition code mask. 3157 return true; 3158 3159 case 32: 3160 return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(), 3161 ST.hasInv2PiInlineImm()); 3162 case 64: 3163 return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(), 3164 ST.hasInv2PiInlineImm()); 3165 case 16: 3166 return ST.has16BitInsts() && 3167 AMDGPU::isInlinableLiteral16(Imm.getSExtValue(), 3168 ST.hasInv2PiInlineImm()); 3169 default: 3170 llvm_unreachable("invalid bitwidth"); 3171 } 3172 } 3173 3174 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO, 3175 uint8_t OperandType) const { 3176 if (!MO.isImm() || 3177 OperandType < AMDGPU::OPERAND_SRC_FIRST || 3178 OperandType > AMDGPU::OPERAND_SRC_LAST) 3179 return false; 3180 3181 // MachineOperand provides no way to tell the true operand size, since it only 3182 // records a 64-bit value. We need to know the size to determine if a 32-bit 3183 // floating point immediate bit pattern is legal for an integer immediate. It 3184 // would be for any 32-bit integer operand, but would not be for a 64-bit one. 3185 3186 int64_t Imm = MO.getImm(); 3187 switch (OperandType) { 3188 case AMDGPU::OPERAND_REG_IMM_INT32: 3189 case AMDGPU::OPERAND_REG_IMM_FP32: 3190 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 3191 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 3192 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 3193 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: { 3194 int32_t Trunc = static_cast<int32_t>(Imm); 3195 return AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm()); 3196 } 3197 case AMDGPU::OPERAND_REG_IMM_INT64: 3198 case AMDGPU::OPERAND_REG_IMM_FP64: 3199 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 3200 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 3201 return AMDGPU::isInlinableLiteral64(MO.getImm(), 3202 ST.hasInv2PiInlineImm()); 3203 case AMDGPU::OPERAND_REG_IMM_INT16: 3204 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 3205 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 3206 // We would expect inline immediates to not be concerned with an integer/fp 3207 // distinction. However, in the case of 16-bit integer operations, the 3208 // "floating point" values appear to not work. It seems read the low 16-bits 3209 // of 32-bit immediates, which happens to always work for the integer 3210 // values. 3211 // 3212 // See llvm bugzilla 46302. 3213 // 3214 // TODO: Theoretically we could use op-sel to use the high bits of the 3215 // 32-bit FP values. 3216 return AMDGPU::isInlinableIntLiteral(Imm); 3217 case AMDGPU::OPERAND_REG_IMM_V2INT16: 3218 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 3219 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16: 3220 // This suffers the same problem as the scalar 16-bit cases. 3221 return AMDGPU::isInlinableIntLiteralV216(Imm); 3222 case AMDGPU::OPERAND_REG_IMM_FP16: 3223 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 3224 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: { 3225 if (isInt<16>(Imm) || isUInt<16>(Imm)) { 3226 // A few special case instructions have 16-bit operands on subtargets 3227 // where 16-bit instructions are not legal. 3228 // TODO: Do the 32-bit immediates work? We shouldn't really need to handle 3229 // constants in these cases 3230 int16_t Trunc = static_cast<int16_t>(Imm); 3231 return ST.has16BitInsts() && 3232 AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm()); 3233 } 3234 3235 return false; 3236 } 3237 case AMDGPU::OPERAND_REG_IMM_V2FP16: 3238 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 3239 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: { 3240 uint32_t Trunc = static_cast<uint32_t>(Imm); 3241 return AMDGPU::isInlinableLiteralV216(Trunc, ST.hasInv2PiInlineImm()); 3242 } 3243 default: 3244 llvm_unreachable("invalid bitwidth"); 3245 } 3246 } 3247 3248 bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO, 3249 const MCOperandInfo &OpInfo) const { 3250 switch (MO.getType()) { 3251 case MachineOperand::MO_Register: 3252 return false; 3253 case MachineOperand::MO_Immediate: 3254 return !isInlineConstant(MO, OpInfo); 3255 case MachineOperand::MO_FrameIndex: 3256 case MachineOperand::MO_MachineBasicBlock: 3257 case MachineOperand::MO_ExternalSymbol: 3258 case MachineOperand::MO_GlobalAddress: 3259 case MachineOperand::MO_MCSymbol: 3260 return true; 3261 default: 3262 llvm_unreachable("unexpected operand type"); 3263 } 3264 } 3265 3266 static bool compareMachineOp(const MachineOperand &Op0, 3267 const MachineOperand &Op1) { 3268 if (Op0.getType() != Op1.getType()) 3269 return false; 3270 3271 switch (Op0.getType()) { 3272 case MachineOperand::MO_Register: 3273 return Op0.getReg() == Op1.getReg(); 3274 case MachineOperand::MO_Immediate: 3275 return Op0.getImm() == Op1.getImm(); 3276 default: 3277 llvm_unreachable("Didn't expect to be comparing these operand types"); 3278 } 3279 } 3280 3281 bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo, 3282 const MachineOperand &MO) const { 3283 const MCInstrDesc &InstDesc = MI.getDesc(); 3284 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo]; 3285 3286 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()); 3287 3288 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE) 3289 return true; 3290 3291 if (OpInfo.RegClass < 0) 3292 return false; 3293 3294 const MachineFunction *MF = MI.getParent()->getParent(); 3295 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 3296 3297 if (MO.isImm() && isInlineConstant(MO, OpInfo)) { 3298 if (isMAI(MI) && ST.hasMFMAInlineLiteralBug() && 3299 OpNo ==(unsigned)AMDGPU::getNamedOperandIdx(MI.getOpcode(), 3300 AMDGPU::OpName::src2)) 3301 return false; 3302 return RI.opCanUseInlineConstant(OpInfo.OperandType); 3303 } 3304 3305 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType)) 3306 return false; 3307 3308 if (!isVOP3(MI) || !AMDGPU::isSISrcOperand(InstDesc, OpNo)) 3309 return true; 3310 3311 return ST.hasVOP3Literal(); 3312 } 3313 3314 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const { 3315 int Op32 = AMDGPU::getVOPe32(Opcode); 3316 if (Op32 == -1) 3317 return false; 3318 3319 return pseudoToMCOpcode(Op32) != -1; 3320 } 3321 3322 bool SIInstrInfo::hasModifiers(unsigned Opcode) const { 3323 // The src0_modifier operand is present on all instructions 3324 // that have modifiers. 3325 3326 return AMDGPU::getNamedOperandIdx(Opcode, 3327 AMDGPU::OpName::src0_modifiers) != -1; 3328 } 3329 3330 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI, 3331 unsigned OpName) const { 3332 const MachineOperand *Mods = getNamedOperand(MI, OpName); 3333 return Mods && Mods->getImm(); 3334 } 3335 3336 bool SIInstrInfo::hasAnyModifiersSet(const MachineInstr &MI) const { 3337 return hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || 3338 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || 3339 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers) || 3340 hasModifiersSet(MI, AMDGPU::OpName::clamp) || 3341 hasModifiersSet(MI, AMDGPU::OpName::omod); 3342 } 3343 3344 bool SIInstrInfo::canShrink(const MachineInstr &MI, 3345 const MachineRegisterInfo &MRI) const { 3346 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3347 // Can't shrink instruction with three operands. 3348 // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add 3349 // a special case for it. It can only be shrunk if the third operand 3350 // is vcc, and src0_modifiers and src1_modifiers are not set. 3351 // We should handle this the same way we handle vopc, by addding 3352 // a register allocation hint pre-regalloc and then do the shrinking 3353 // post-regalloc. 3354 if (Src2) { 3355 switch (MI.getOpcode()) { 3356 default: return false; 3357 3358 case AMDGPU::V_ADDC_U32_e64: 3359 case AMDGPU::V_SUBB_U32_e64: 3360 case AMDGPU::V_SUBBREV_U32_e64: { 3361 const MachineOperand *Src1 3362 = getNamedOperand(MI, AMDGPU::OpName::src1); 3363 if (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg())) 3364 return false; 3365 // Additional verification is needed for sdst/src2. 3366 return true; 3367 } 3368 case AMDGPU::V_MAC_F32_e64: 3369 case AMDGPU::V_MAC_F16_e64: 3370 case AMDGPU::V_FMAC_F32_e64: 3371 case AMDGPU::V_FMAC_F16_e64: 3372 if (!Src2->isReg() || !RI.isVGPR(MRI, Src2->getReg()) || 3373 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers)) 3374 return false; 3375 break; 3376 3377 case AMDGPU::V_CNDMASK_B32_e64: 3378 break; 3379 } 3380 } 3381 3382 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3383 if (Src1 && (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()) || 3384 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers))) 3385 return false; 3386 3387 // We don't need to check src0, all input types are legal, so just make sure 3388 // src0 isn't using any modifiers. 3389 if (hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers)) 3390 return false; 3391 3392 // Can it be shrunk to a valid 32 bit opcode? 3393 if (!hasVALU32BitEncoding(MI.getOpcode())) 3394 return false; 3395 3396 // Check output modifiers 3397 return !hasModifiersSet(MI, AMDGPU::OpName::omod) && 3398 !hasModifiersSet(MI, AMDGPU::OpName::clamp); 3399 } 3400 3401 // Set VCC operand with all flags from \p Orig, except for setting it as 3402 // implicit. 3403 static void copyFlagsToImplicitVCC(MachineInstr &MI, 3404 const MachineOperand &Orig) { 3405 3406 for (MachineOperand &Use : MI.implicit_operands()) { 3407 if (Use.isUse() && 3408 (Use.getReg() == AMDGPU::VCC || Use.getReg() == AMDGPU::VCC_LO)) { 3409 Use.setIsUndef(Orig.isUndef()); 3410 Use.setIsKill(Orig.isKill()); 3411 return; 3412 } 3413 } 3414 } 3415 3416 MachineInstr *SIInstrInfo::buildShrunkInst(MachineInstr &MI, 3417 unsigned Op32) const { 3418 MachineBasicBlock *MBB = MI.getParent();; 3419 MachineInstrBuilder Inst32 = 3420 BuildMI(*MBB, MI, MI.getDebugLoc(), get(Op32)) 3421 .setMIFlags(MI.getFlags()); 3422 3423 // Add the dst operand if the 32-bit encoding also has an explicit $vdst. 3424 // For VOPC instructions, this is replaced by an implicit def of vcc. 3425 int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst); 3426 if (Op32DstIdx != -1) { 3427 // dst 3428 Inst32.add(MI.getOperand(0)); 3429 } else { 3430 assert(((MI.getOperand(0).getReg() == AMDGPU::VCC) || 3431 (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) && 3432 "Unexpected case"); 3433 } 3434 3435 Inst32.add(*getNamedOperand(MI, AMDGPU::OpName::src0)); 3436 3437 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3438 if (Src1) 3439 Inst32.add(*Src1); 3440 3441 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3442 3443 if (Src2) { 3444 int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2); 3445 if (Op32Src2Idx != -1) { 3446 Inst32.add(*Src2); 3447 } else { 3448 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is 3449 // replaced with an implicit read of vcc. This was already added 3450 // during the initial BuildMI, so find it to preserve the flags. 3451 copyFlagsToImplicitVCC(*Inst32, *Src2); 3452 } 3453 } 3454 3455 return Inst32; 3456 } 3457 3458 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI, 3459 const MachineOperand &MO, 3460 const MCOperandInfo &OpInfo) const { 3461 // Literal constants use the constant bus. 3462 //if (isLiteralConstantLike(MO, OpInfo)) 3463 // return true; 3464 if (MO.isImm()) 3465 return !isInlineConstant(MO, OpInfo); 3466 3467 if (!MO.isReg()) 3468 return true; // Misc other operands like FrameIndex 3469 3470 if (!MO.isUse()) 3471 return false; 3472 3473 if (Register::isVirtualRegister(MO.getReg())) 3474 return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); 3475 3476 // Null is free 3477 if (MO.getReg() == AMDGPU::SGPR_NULL) 3478 return false; 3479 3480 // SGPRs use the constant bus 3481 if (MO.isImplicit()) { 3482 return MO.getReg() == AMDGPU::M0 || 3483 MO.getReg() == AMDGPU::VCC || 3484 MO.getReg() == AMDGPU::VCC_LO; 3485 } else { 3486 return AMDGPU::SReg_32RegClass.contains(MO.getReg()) || 3487 AMDGPU::SReg_64RegClass.contains(MO.getReg()); 3488 } 3489 } 3490 3491 static Register findImplicitSGPRRead(const MachineInstr &MI) { 3492 for (const MachineOperand &MO : MI.implicit_operands()) { 3493 // We only care about reads. 3494 if (MO.isDef()) 3495 continue; 3496 3497 switch (MO.getReg()) { 3498 case AMDGPU::VCC: 3499 case AMDGPU::VCC_LO: 3500 case AMDGPU::VCC_HI: 3501 case AMDGPU::M0: 3502 case AMDGPU::FLAT_SCR: 3503 return MO.getReg(); 3504 3505 default: 3506 break; 3507 } 3508 } 3509 3510 return AMDGPU::NoRegister; 3511 } 3512 3513 static bool shouldReadExec(const MachineInstr &MI) { 3514 if (SIInstrInfo::isVALU(MI)) { 3515 switch (MI.getOpcode()) { 3516 case AMDGPU::V_READLANE_B32: 3517 case AMDGPU::V_READLANE_B32_gfx6_gfx7: 3518 case AMDGPU::V_READLANE_B32_gfx10: 3519 case AMDGPU::V_READLANE_B32_vi: 3520 case AMDGPU::V_WRITELANE_B32: 3521 case AMDGPU::V_WRITELANE_B32_gfx6_gfx7: 3522 case AMDGPU::V_WRITELANE_B32_gfx10: 3523 case AMDGPU::V_WRITELANE_B32_vi: 3524 return false; 3525 } 3526 3527 return true; 3528 } 3529 3530 if (MI.isPreISelOpcode() || 3531 SIInstrInfo::isGenericOpcode(MI.getOpcode()) || 3532 SIInstrInfo::isSALU(MI) || 3533 SIInstrInfo::isSMRD(MI)) 3534 return false; 3535 3536 return true; 3537 } 3538 3539 static bool isSubRegOf(const SIRegisterInfo &TRI, 3540 const MachineOperand &SuperVec, 3541 const MachineOperand &SubReg) { 3542 if (Register::isPhysicalRegister(SubReg.getReg())) 3543 return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg()); 3544 3545 return SubReg.getSubReg() != AMDGPU::NoSubRegister && 3546 SubReg.getReg() == SuperVec.getReg(); 3547 } 3548 3549 bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, 3550 StringRef &ErrInfo) const { 3551 uint16_t Opcode = MI.getOpcode(); 3552 if (SIInstrInfo::isGenericOpcode(MI.getOpcode())) 3553 return true; 3554 3555 const MachineFunction *MF = MI.getParent()->getParent(); 3556 const MachineRegisterInfo &MRI = MF->getRegInfo(); 3557 3558 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); 3559 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1); 3560 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2); 3561 3562 // Make sure the number of operands is correct. 3563 const MCInstrDesc &Desc = get(Opcode); 3564 if (!Desc.isVariadic() && 3565 Desc.getNumOperands() != MI.getNumExplicitOperands()) { 3566 ErrInfo = "Instruction has wrong number of operands."; 3567 return false; 3568 } 3569 3570 if (MI.isInlineAsm()) { 3571 // Verify register classes for inlineasm constraints. 3572 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands(); 3573 I != E; ++I) { 3574 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI); 3575 if (!RC) 3576 continue; 3577 3578 const MachineOperand &Op = MI.getOperand(I); 3579 if (!Op.isReg()) 3580 continue; 3581 3582 Register Reg = Op.getReg(); 3583 if (!Register::isVirtualRegister(Reg) && !RC->contains(Reg)) { 3584 ErrInfo = "inlineasm operand has incorrect register class."; 3585 return false; 3586 } 3587 } 3588 3589 return true; 3590 } 3591 3592 if (isMIMG(MI) && MI.memoperands_empty() && MI.mayLoadOrStore()) { 3593 ErrInfo = "missing memory operand from MIMG instruction."; 3594 return false; 3595 } 3596 3597 // Make sure the register classes are correct. 3598 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) { 3599 if (MI.getOperand(i).isFPImm()) { 3600 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast " 3601 "all fp values to integers."; 3602 return false; 3603 } 3604 3605 int RegClass = Desc.OpInfo[i].RegClass; 3606 3607 switch (Desc.OpInfo[i].OperandType) { 3608 case MCOI::OPERAND_REGISTER: 3609 if (MI.getOperand(i).isImm() || MI.getOperand(i).isGlobal()) { 3610 ErrInfo = "Illegal immediate value for operand."; 3611 return false; 3612 } 3613 break; 3614 case AMDGPU::OPERAND_REG_IMM_INT32: 3615 case AMDGPU::OPERAND_REG_IMM_FP32: 3616 break; 3617 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 3618 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 3619 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 3620 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 3621 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 3622 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 3623 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 3624 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: 3625 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 3626 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: { 3627 const MachineOperand &MO = MI.getOperand(i); 3628 if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) { 3629 ErrInfo = "Illegal immediate value for operand."; 3630 return false; 3631 } 3632 break; 3633 } 3634 case MCOI::OPERAND_IMMEDIATE: 3635 case AMDGPU::OPERAND_KIMM32: 3636 // Check if this operand is an immediate. 3637 // FrameIndex operands will be replaced by immediates, so they are 3638 // allowed. 3639 if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) { 3640 ErrInfo = "Expected immediate, but got non-immediate"; 3641 return false; 3642 } 3643 LLVM_FALLTHROUGH; 3644 default: 3645 continue; 3646 } 3647 3648 if (!MI.getOperand(i).isReg()) 3649 continue; 3650 3651 if (RegClass != -1) { 3652 Register Reg = MI.getOperand(i).getReg(); 3653 if (Reg == AMDGPU::NoRegister || Register::isVirtualRegister(Reg)) 3654 continue; 3655 3656 const TargetRegisterClass *RC = RI.getRegClass(RegClass); 3657 if (!RC->contains(Reg)) { 3658 ErrInfo = "Operand has incorrect register class."; 3659 return false; 3660 } 3661 } 3662 } 3663 3664 // Verify SDWA 3665 if (isSDWA(MI)) { 3666 if (!ST.hasSDWA()) { 3667 ErrInfo = "SDWA is not supported on this target"; 3668 return false; 3669 } 3670 3671 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst); 3672 3673 const int OpIndicies[] = { DstIdx, Src0Idx, Src1Idx, Src2Idx }; 3674 3675 for (int OpIdx: OpIndicies) { 3676 if (OpIdx == -1) 3677 continue; 3678 const MachineOperand &MO = MI.getOperand(OpIdx); 3679 3680 if (!ST.hasSDWAScalar()) { 3681 // Only VGPRS on VI 3682 if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) { 3683 ErrInfo = "Only VGPRs allowed as operands in SDWA instructions on VI"; 3684 return false; 3685 } 3686 } else { 3687 // No immediates on GFX9 3688 if (!MO.isReg()) { 3689 ErrInfo = "Only reg allowed as operands in SDWA instructions on GFX9"; 3690 return false; 3691 } 3692 } 3693 } 3694 3695 if (!ST.hasSDWAOmod()) { 3696 // No omod allowed on VI 3697 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 3698 if (OMod != nullptr && 3699 (!OMod->isImm() || OMod->getImm() != 0)) { 3700 ErrInfo = "OMod not allowed in SDWA instructions on VI"; 3701 return false; 3702 } 3703 } 3704 3705 uint16_t BasicOpcode = AMDGPU::getBasicFromSDWAOp(Opcode); 3706 if (isVOPC(BasicOpcode)) { 3707 if (!ST.hasSDWASdst() && DstIdx != -1) { 3708 // Only vcc allowed as dst on VI for VOPC 3709 const MachineOperand &Dst = MI.getOperand(DstIdx); 3710 if (!Dst.isReg() || Dst.getReg() != AMDGPU::VCC) { 3711 ErrInfo = "Only VCC allowed as dst in SDWA instructions on VI"; 3712 return false; 3713 } 3714 } else if (!ST.hasSDWAOutModsVOPC()) { 3715 // No clamp allowed on GFX9 for VOPC 3716 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 3717 if (Clamp && (!Clamp->isImm() || Clamp->getImm() != 0)) { 3718 ErrInfo = "Clamp not allowed in VOPC SDWA instructions on VI"; 3719 return false; 3720 } 3721 3722 // No omod allowed on GFX9 for VOPC 3723 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 3724 if (OMod && (!OMod->isImm() || OMod->getImm() != 0)) { 3725 ErrInfo = "OMod not allowed in VOPC SDWA instructions on VI"; 3726 return false; 3727 } 3728 } 3729 } 3730 3731 const MachineOperand *DstUnused = getNamedOperand(MI, AMDGPU::OpName::dst_unused); 3732 if (DstUnused && DstUnused->isImm() && 3733 DstUnused->getImm() == AMDGPU::SDWA::UNUSED_PRESERVE) { 3734 const MachineOperand &Dst = MI.getOperand(DstIdx); 3735 if (!Dst.isReg() || !Dst.isTied()) { 3736 ErrInfo = "Dst register should have tied register"; 3737 return false; 3738 } 3739 3740 const MachineOperand &TiedMO = 3741 MI.getOperand(MI.findTiedOperandIdx(DstIdx)); 3742 if (!TiedMO.isReg() || !TiedMO.isImplicit() || !TiedMO.isUse()) { 3743 ErrInfo = 3744 "Dst register should be tied to implicit use of preserved register"; 3745 return false; 3746 } else if (Register::isPhysicalRegister(TiedMO.getReg()) && 3747 Dst.getReg() != TiedMO.getReg()) { 3748 ErrInfo = "Dst register should use same physical register as preserved"; 3749 return false; 3750 } 3751 } 3752 } 3753 3754 // Verify MIMG 3755 if (isMIMG(MI.getOpcode()) && !MI.mayStore()) { 3756 // Ensure that the return type used is large enough for all the options 3757 // being used TFE/LWE require an extra result register. 3758 const MachineOperand *DMask = getNamedOperand(MI, AMDGPU::OpName::dmask); 3759 if (DMask) { 3760 uint64_t DMaskImm = DMask->getImm(); 3761 uint32_t RegCount = 3762 isGather4(MI.getOpcode()) ? 4 : countPopulation(DMaskImm); 3763 const MachineOperand *TFE = getNamedOperand(MI, AMDGPU::OpName::tfe); 3764 const MachineOperand *LWE = getNamedOperand(MI, AMDGPU::OpName::lwe); 3765 const MachineOperand *D16 = getNamedOperand(MI, AMDGPU::OpName::d16); 3766 3767 // Adjust for packed 16 bit values 3768 if (D16 && D16->getImm() && !ST.hasUnpackedD16VMem()) 3769 RegCount >>= 1; 3770 3771 // Adjust if using LWE or TFE 3772 if ((LWE && LWE->getImm()) || (TFE && TFE->getImm())) 3773 RegCount += 1; 3774 3775 const uint32_t DstIdx = 3776 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata); 3777 const MachineOperand &Dst = MI.getOperand(DstIdx); 3778 if (Dst.isReg()) { 3779 const TargetRegisterClass *DstRC = getOpRegClass(MI, DstIdx); 3780 uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32; 3781 if (RegCount > DstSize) { 3782 ErrInfo = "MIMG instruction returns too many registers for dst " 3783 "register class"; 3784 return false; 3785 } 3786 } 3787 } 3788 } 3789 3790 // Verify VOP*. Ignore multiple sgpr operands on writelane. 3791 if (Desc.getOpcode() != AMDGPU::V_WRITELANE_B32 3792 && (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI) || isSDWA(MI))) { 3793 // Only look at the true operands. Only a real operand can use the constant 3794 // bus, and we don't want to check pseudo-operands like the source modifier 3795 // flags. 3796 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx }; 3797 3798 unsigned ConstantBusCount = 0; 3799 unsigned LiteralCount = 0; 3800 3801 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) 3802 ++ConstantBusCount; 3803 3804 SmallVector<Register, 2> SGPRsUsed; 3805 Register SGPRUsed = findImplicitSGPRRead(MI); 3806 if (SGPRUsed != AMDGPU::NoRegister) { 3807 ++ConstantBusCount; 3808 SGPRsUsed.push_back(SGPRUsed); 3809 } 3810 3811 for (int OpIdx : OpIndices) { 3812 if (OpIdx == -1) 3813 break; 3814 const MachineOperand &MO = MI.getOperand(OpIdx); 3815 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 3816 if (MO.isReg()) { 3817 SGPRUsed = MO.getReg(); 3818 if (llvm::all_of(SGPRsUsed, [this, SGPRUsed](unsigned SGPR) { 3819 return !RI.regsOverlap(SGPRUsed, SGPR); 3820 })) { 3821 ++ConstantBusCount; 3822 SGPRsUsed.push_back(SGPRUsed); 3823 } 3824 } else { 3825 ++ConstantBusCount; 3826 ++LiteralCount; 3827 } 3828 } 3829 } 3830 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 3831 // v_writelane_b32 is an exception from constant bus restriction: 3832 // vsrc0 can be sgpr, const or m0 and lane select sgpr, m0 or inline-const 3833 if (ConstantBusCount > ST.getConstantBusLimit(Opcode) && 3834 Opcode != AMDGPU::V_WRITELANE_B32) { 3835 ErrInfo = "VOP* instruction violates constant bus restriction"; 3836 return false; 3837 } 3838 3839 if (isVOP3(MI) && LiteralCount) { 3840 if (!ST.hasVOP3Literal()) { 3841 ErrInfo = "VOP3 instruction uses literal"; 3842 return false; 3843 } 3844 if (LiteralCount > 1) { 3845 ErrInfo = "VOP3 instruction uses more than one literal"; 3846 return false; 3847 } 3848 } 3849 } 3850 3851 // Special case for writelane - this can break the multiple constant bus rule, 3852 // but still can't use more than one SGPR register 3853 if (Desc.getOpcode() == AMDGPU::V_WRITELANE_B32) { 3854 unsigned SGPRCount = 0; 3855 Register SGPRUsed = AMDGPU::NoRegister; 3856 3857 for (int OpIdx : {Src0Idx, Src1Idx, Src2Idx}) { 3858 if (OpIdx == -1) 3859 break; 3860 3861 const MachineOperand &MO = MI.getOperand(OpIdx); 3862 3863 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 3864 if (MO.isReg() && MO.getReg() != AMDGPU::M0) { 3865 if (MO.getReg() != SGPRUsed) 3866 ++SGPRCount; 3867 SGPRUsed = MO.getReg(); 3868 } 3869 } 3870 if (SGPRCount > ST.getConstantBusLimit(Opcode)) { 3871 ErrInfo = "WRITELANE instruction violates constant bus restriction"; 3872 return false; 3873 } 3874 } 3875 } 3876 3877 // Verify misc. restrictions on specific instructions. 3878 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 || 3879 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) { 3880 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3881 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 3882 const MachineOperand &Src2 = MI.getOperand(Src2Idx); 3883 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) { 3884 if (!compareMachineOp(Src0, Src1) && 3885 !compareMachineOp(Src0, Src2)) { 3886 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2"; 3887 return false; 3888 } 3889 } 3890 } 3891 3892 if (isSOP2(MI) || isSOPC(MI)) { 3893 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3894 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 3895 unsigned Immediates = 0; 3896 3897 if (!Src0.isReg() && 3898 !isInlineConstant(Src0, Desc.OpInfo[Src0Idx].OperandType)) 3899 Immediates++; 3900 if (!Src1.isReg() && 3901 !isInlineConstant(Src1, Desc.OpInfo[Src1Idx].OperandType)) 3902 Immediates++; 3903 3904 if (Immediates > 1) { 3905 ErrInfo = "SOP2/SOPC instruction requires too many immediate constants"; 3906 return false; 3907 } 3908 } 3909 3910 if (isSOPK(MI)) { 3911 auto Op = getNamedOperand(MI, AMDGPU::OpName::simm16); 3912 if (Desc.isBranch()) { 3913 if (!Op->isMBB()) { 3914 ErrInfo = "invalid branch target for SOPK instruction"; 3915 return false; 3916 } 3917 } else { 3918 uint64_t Imm = Op->getImm(); 3919 if (sopkIsZext(MI)) { 3920 if (!isUInt<16>(Imm)) { 3921 ErrInfo = "invalid immediate for SOPK instruction"; 3922 return false; 3923 } 3924 } else { 3925 if (!isInt<16>(Imm)) { 3926 ErrInfo = "invalid immediate for SOPK instruction"; 3927 return false; 3928 } 3929 } 3930 } 3931 } 3932 3933 if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 || 3934 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 || 3935 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 3936 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) { 3937 const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 3938 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64; 3939 3940 const unsigned StaticNumOps = Desc.getNumOperands() + 3941 Desc.getNumImplicitUses(); 3942 const unsigned NumImplicitOps = IsDst ? 2 : 1; 3943 3944 // Allow additional implicit operands. This allows a fixup done by the post 3945 // RA scheduler where the main implicit operand is killed and implicit-defs 3946 // are added for sub-registers that remain live after this instruction. 3947 if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) { 3948 ErrInfo = "missing implicit register operands"; 3949 return false; 3950 } 3951 3952 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 3953 if (IsDst) { 3954 if (!Dst->isUse()) { 3955 ErrInfo = "v_movreld_b32 vdst should be a use operand"; 3956 return false; 3957 } 3958 3959 unsigned UseOpIdx; 3960 if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) || 3961 UseOpIdx != StaticNumOps + 1) { 3962 ErrInfo = "movrel implicit operands should be tied"; 3963 return false; 3964 } 3965 } 3966 3967 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3968 const MachineOperand &ImpUse 3969 = MI.getOperand(StaticNumOps + NumImplicitOps - 1); 3970 if (!ImpUse.isReg() || !ImpUse.isUse() || 3971 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) { 3972 ErrInfo = "src0 should be subreg of implicit vector use"; 3973 return false; 3974 } 3975 } 3976 3977 // Make sure we aren't losing exec uses in the td files. This mostly requires 3978 // being careful when using let Uses to try to add other use registers. 3979 if (shouldReadExec(MI)) { 3980 if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) { 3981 ErrInfo = "VALU instruction does not implicitly read exec mask"; 3982 return false; 3983 } 3984 } 3985 3986 if (isSMRD(MI)) { 3987 if (MI.mayStore()) { 3988 // The register offset form of scalar stores may only use m0 as the 3989 // soffset register. 3990 const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff); 3991 if (Soff && Soff->getReg() != AMDGPU::M0) { 3992 ErrInfo = "scalar stores must use m0 as offset register"; 3993 return false; 3994 } 3995 } 3996 } 3997 3998 if (isFLAT(MI) && !MF->getSubtarget<GCNSubtarget>().hasFlatInstOffsets()) { 3999 const MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 4000 if (Offset->getImm() != 0) { 4001 ErrInfo = "subtarget does not support offsets in flat instructions"; 4002 return false; 4003 } 4004 } 4005 4006 if (isMIMG(MI)) { 4007 const MachineOperand *DimOp = getNamedOperand(MI, AMDGPU::OpName::dim); 4008 if (DimOp) { 4009 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opcode, 4010 AMDGPU::OpName::vaddr0); 4011 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc); 4012 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opcode); 4013 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = 4014 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode); 4015 const AMDGPU::MIMGDimInfo *Dim = 4016 AMDGPU::getMIMGDimInfoByEncoding(DimOp->getImm()); 4017 4018 if (!Dim) { 4019 ErrInfo = "dim is out of range"; 4020 return false; 4021 } 4022 4023 bool IsA16 = false; 4024 if (ST.hasR128A16()) { 4025 const MachineOperand *R128A16 = getNamedOperand(MI, AMDGPU::OpName::r128); 4026 IsA16 = R128A16->getImm() != 0; 4027 } else if (ST.hasGFX10A16()) { 4028 const MachineOperand *A16 = getNamedOperand(MI, AMDGPU::OpName::a16); 4029 IsA16 = A16->getImm() != 0; 4030 } 4031 4032 bool PackDerivatives = IsA16 || BaseOpcode->G16; 4033 bool IsNSA = SRsrcIdx - VAddr0Idx > 1; 4034 4035 unsigned AddrWords = BaseOpcode->NumExtraArgs; 4036 unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) + 4037 (BaseOpcode->LodOrClampOrMip ? 1 : 0); 4038 if (IsA16) 4039 AddrWords += (AddrComponents + 1) / 2; 4040 else 4041 AddrWords += AddrComponents; 4042 4043 if (BaseOpcode->Gradients) { 4044 if (PackDerivatives) 4045 // There are two gradients per coordinate, we pack them separately. 4046 // For the 3d case, we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv) 4047 AddrWords += (Dim->NumGradients / 2 + 1) / 2 * 2; 4048 else 4049 AddrWords += Dim->NumGradients; 4050 } 4051 4052 unsigned VAddrWords; 4053 if (IsNSA) { 4054 VAddrWords = SRsrcIdx - VAddr0Idx; 4055 } else { 4056 const TargetRegisterClass *RC = getOpRegClass(MI, VAddr0Idx); 4057 VAddrWords = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC) / 32; 4058 if (AddrWords > 8) 4059 AddrWords = 16; 4060 else if (AddrWords > 4) 4061 AddrWords = 8; 4062 else if (AddrWords == 4) 4063 AddrWords = 4; 4064 else if (AddrWords == 3) 4065 AddrWords = 3; 4066 } 4067 4068 if (VAddrWords != AddrWords) { 4069 LLVM_DEBUG(dbgs() << "bad vaddr size, expected " << AddrWords 4070 << " but got " << VAddrWords << "\n"); 4071 ErrInfo = "bad vaddr size"; 4072 return false; 4073 } 4074 } 4075 } 4076 4077 const MachineOperand *DppCt = getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl); 4078 if (DppCt) { 4079 using namespace AMDGPU::DPP; 4080 4081 unsigned DC = DppCt->getImm(); 4082 if (DC == DppCtrl::DPP_UNUSED1 || DC == DppCtrl::DPP_UNUSED2 || 4083 DC == DppCtrl::DPP_UNUSED3 || DC > DppCtrl::DPP_LAST || 4084 (DC >= DppCtrl::DPP_UNUSED4_FIRST && DC <= DppCtrl::DPP_UNUSED4_LAST) || 4085 (DC >= DppCtrl::DPP_UNUSED5_FIRST && DC <= DppCtrl::DPP_UNUSED5_LAST) || 4086 (DC >= DppCtrl::DPP_UNUSED6_FIRST && DC <= DppCtrl::DPP_UNUSED6_LAST) || 4087 (DC >= DppCtrl::DPP_UNUSED7_FIRST && DC <= DppCtrl::DPP_UNUSED7_LAST) || 4088 (DC >= DppCtrl::DPP_UNUSED8_FIRST && DC <= DppCtrl::DPP_UNUSED8_LAST)) { 4089 ErrInfo = "Invalid dpp_ctrl value"; 4090 return false; 4091 } 4092 if (DC >= DppCtrl::WAVE_SHL1 && DC <= DppCtrl::WAVE_ROR1 && 4093 ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 4094 ErrInfo = "Invalid dpp_ctrl value: " 4095 "wavefront shifts are not supported on GFX10+"; 4096 return false; 4097 } 4098 if (DC >= DppCtrl::BCAST15 && DC <= DppCtrl::BCAST31 && 4099 ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 4100 ErrInfo = "Invalid dpp_ctrl value: " 4101 "broadcasts are not supported on GFX10+"; 4102 return false; 4103 } 4104 if (DC >= DppCtrl::ROW_SHARE_FIRST && DC <= DppCtrl::ROW_XMASK_LAST && 4105 ST.getGeneration() < AMDGPUSubtarget::GFX10) { 4106 ErrInfo = "Invalid dpp_ctrl value: " 4107 "row_share and row_xmask are not supported before GFX10"; 4108 return false; 4109 } 4110 } 4111 4112 return true; 4113 } 4114 4115 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) const { 4116 switch (MI.getOpcode()) { 4117 default: return AMDGPU::INSTRUCTION_LIST_END; 4118 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE; 4119 case AMDGPU::COPY: return AMDGPU::COPY; 4120 case AMDGPU::PHI: return AMDGPU::PHI; 4121 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG; 4122 case AMDGPU::WQM: return AMDGPU::WQM; 4123 case AMDGPU::SOFT_WQM: return AMDGPU::SOFT_WQM; 4124 case AMDGPU::WWM: return AMDGPU::WWM; 4125 case AMDGPU::S_MOV_B32: { 4126 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 4127 return MI.getOperand(1).isReg() || 4128 RI.isAGPR(MRI, MI.getOperand(0).getReg()) ? 4129 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32; 4130 } 4131 case AMDGPU::S_ADD_I32: 4132 return ST.hasAddNoCarry() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_CO_U32_e32; 4133 case AMDGPU::S_ADDC_U32: 4134 return AMDGPU::V_ADDC_U32_e32; 4135 case AMDGPU::S_SUB_I32: 4136 return ST.hasAddNoCarry() ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_CO_U32_e32; 4137 // FIXME: These are not consistently handled, and selected when the carry is 4138 // used. 4139 case AMDGPU::S_ADD_U32: 4140 return AMDGPU::V_ADD_CO_U32_e32; 4141 case AMDGPU::S_SUB_U32: 4142 return AMDGPU::V_SUB_CO_U32_e32; 4143 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32; 4144 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_U32; 4145 case AMDGPU::S_MUL_HI_U32: return AMDGPU::V_MUL_HI_U32; 4146 case AMDGPU::S_MUL_HI_I32: return AMDGPU::V_MUL_HI_I32; 4147 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64; 4148 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64; 4149 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64; 4150 case AMDGPU::S_XNOR_B32: 4151 return ST.hasDLInsts() ? AMDGPU::V_XNOR_B32_e64 : AMDGPU::INSTRUCTION_LIST_END; 4152 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64; 4153 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64; 4154 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64; 4155 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64; 4156 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32; 4157 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64; 4158 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32; 4159 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64; 4160 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32; 4161 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64; 4162 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32; 4163 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32; 4164 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32; 4165 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32; 4166 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64; 4167 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32; 4168 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32; 4169 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32; 4170 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32; 4171 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32; 4172 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32; 4173 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32; 4174 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32; 4175 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32; 4176 case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e32; 4177 case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e32; 4178 case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e32; 4179 case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e32; 4180 case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e32; 4181 case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e32; 4182 case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e32; 4183 case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e32; 4184 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64; 4185 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32; 4186 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32; 4187 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64; 4188 case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ; 4189 case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ; 4190 } 4191 llvm_unreachable( 4192 "Unexpected scalar opcode without corresponding vector one!"); 4193 } 4194 4195 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI, 4196 unsigned OpNo) const { 4197 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 4198 const MCInstrDesc &Desc = get(MI.getOpcode()); 4199 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() || 4200 Desc.OpInfo[OpNo].RegClass == -1) { 4201 Register Reg = MI.getOperand(OpNo).getReg(); 4202 4203 if (Register::isVirtualRegister(Reg)) 4204 return MRI.getRegClass(Reg); 4205 return RI.getPhysRegClass(Reg); 4206 } 4207 4208 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 4209 return RI.getRegClass(RCID); 4210 } 4211 4212 void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const { 4213 MachineBasicBlock::iterator I = MI; 4214 MachineBasicBlock *MBB = MI.getParent(); 4215 MachineOperand &MO = MI.getOperand(OpIdx); 4216 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 4217 const SIRegisterInfo *TRI = 4218 static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo()); 4219 unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass; 4220 const TargetRegisterClass *RC = RI.getRegClass(RCID); 4221 unsigned Size = TRI->getRegSizeInBits(*RC); 4222 unsigned Opcode = (Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO : AMDGPU::V_MOV_B32_e32; 4223 if (MO.isReg()) 4224 Opcode = AMDGPU::COPY; 4225 else if (RI.isSGPRClass(RC)) 4226 Opcode = (Size == 64) ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; 4227 4228 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); 4229 if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC)) 4230 VRC = &AMDGPU::VReg_64RegClass; 4231 else 4232 VRC = &AMDGPU::VGPR_32RegClass; 4233 4234 Register Reg = MRI.createVirtualRegister(VRC); 4235 DebugLoc DL = MBB->findDebugLoc(I); 4236 BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO); 4237 MO.ChangeToRegister(Reg, false); 4238 } 4239 4240 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI, 4241 MachineRegisterInfo &MRI, 4242 MachineOperand &SuperReg, 4243 const TargetRegisterClass *SuperRC, 4244 unsigned SubIdx, 4245 const TargetRegisterClass *SubRC) 4246 const { 4247 MachineBasicBlock *MBB = MI->getParent(); 4248 DebugLoc DL = MI->getDebugLoc(); 4249 Register SubReg = MRI.createVirtualRegister(SubRC); 4250 4251 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) { 4252 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 4253 .addReg(SuperReg.getReg(), 0, SubIdx); 4254 return SubReg; 4255 } 4256 4257 // Just in case the super register is itself a sub-register, copy it to a new 4258 // value so we don't need to worry about merging its subreg index with the 4259 // SubIdx passed to this function. The register coalescer should be able to 4260 // eliminate this extra copy. 4261 Register NewSuperReg = MRI.createVirtualRegister(SuperRC); 4262 4263 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg) 4264 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg()); 4265 4266 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 4267 .addReg(NewSuperReg, 0, SubIdx); 4268 4269 return SubReg; 4270 } 4271 4272 MachineOperand SIInstrInfo::buildExtractSubRegOrImm( 4273 MachineBasicBlock::iterator MII, 4274 MachineRegisterInfo &MRI, 4275 MachineOperand &Op, 4276 const TargetRegisterClass *SuperRC, 4277 unsigned SubIdx, 4278 const TargetRegisterClass *SubRC) const { 4279 if (Op.isImm()) { 4280 if (SubIdx == AMDGPU::sub0) 4281 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm())); 4282 if (SubIdx == AMDGPU::sub1) 4283 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32)); 4284 4285 llvm_unreachable("Unhandled register index for immediate"); 4286 } 4287 4288 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC, 4289 SubIdx, SubRC); 4290 return MachineOperand::CreateReg(SubReg, false); 4291 } 4292 4293 // Change the order of operands from (0, 1, 2) to (0, 2, 1) 4294 void SIInstrInfo::swapOperands(MachineInstr &Inst) const { 4295 assert(Inst.getNumExplicitOperands() == 3); 4296 MachineOperand Op1 = Inst.getOperand(1); 4297 Inst.RemoveOperand(1); 4298 Inst.addOperand(Op1); 4299 } 4300 4301 bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI, 4302 const MCOperandInfo &OpInfo, 4303 const MachineOperand &MO) const { 4304 if (!MO.isReg()) 4305 return false; 4306 4307 Register Reg = MO.getReg(); 4308 const TargetRegisterClass *RC = Register::isVirtualRegister(Reg) 4309 ? MRI.getRegClass(Reg) 4310 : RI.getPhysRegClass(Reg); 4311 4312 const TargetRegisterClass *DRC = RI.getRegClass(OpInfo.RegClass); 4313 if (MO.getSubReg()) { 4314 const MachineFunction *MF = MO.getParent()->getParent()->getParent(); 4315 const TargetRegisterClass *SuperRC = RI.getLargestLegalSuperClass(RC, *MF); 4316 if (!SuperRC) 4317 return false; 4318 4319 DRC = RI.getMatchingSuperRegClass(SuperRC, DRC, MO.getSubReg()); 4320 if (!DRC) 4321 return false; 4322 } 4323 return RC->hasSuperClassEq(DRC); 4324 } 4325 4326 bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI, 4327 const MCOperandInfo &OpInfo, 4328 const MachineOperand &MO) const { 4329 if (MO.isReg()) 4330 return isLegalRegOperand(MRI, OpInfo, MO); 4331 4332 // Handle non-register types that are treated like immediates. 4333 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()); 4334 return true; 4335 } 4336 4337 bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx, 4338 const MachineOperand *MO) const { 4339 const MachineFunction &MF = *MI.getParent()->getParent(); 4340 const MachineRegisterInfo &MRI = MF.getRegInfo(); 4341 const MCInstrDesc &InstDesc = MI.getDesc(); 4342 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; 4343 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 4344 const TargetRegisterClass *DefinedRC = 4345 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; 4346 if (!MO) 4347 MO = &MI.getOperand(OpIdx); 4348 4349 int ConstantBusLimit = ST.getConstantBusLimit(MI.getOpcode()); 4350 int VOP3LiteralLimit = ST.hasVOP3Literal() ? 1 : 0; 4351 if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) { 4352 if (isVOP3(MI) && isLiteralConstantLike(*MO, OpInfo) && !VOP3LiteralLimit--) 4353 return false; 4354 4355 SmallDenseSet<RegSubRegPair> SGPRsUsed; 4356 if (MO->isReg()) 4357 SGPRsUsed.insert(RegSubRegPair(MO->getReg(), MO->getSubReg())); 4358 4359 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 4360 if (i == OpIdx) 4361 continue; 4362 const MachineOperand &Op = MI.getOperand(i); 4363 if (Op.isReg()) { 4364 RegSubRegPair SGPR(Op.getReg(), Op.getSubReg()); 4365 if (!SGPRsUsed.count(SGPR) && 4366 usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) { 4367 if (--ConstantBusLimit <= 0) 4368 return false; 4369 SGPRsUsed.insert(SGPR); 4370 } 4371 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) { 4372 if (--ConstantBusLimit <= 0) 4373 return false; 4374 } else if (isVOP3(MI) && AMDGPU::isSISrcOperand(InstDesc, i) && 4375 isLiteralConstantLike(Op, InstDesc.OpInfo[i])) { 4376 if (!VOP3LiteralLimit--) 4377 return false; 4378 if (--ConstantBusLimit <= 0) 4379 return false; 4380 } 4381 } 4382 } 4383 4384 if (MO->isReg()) { 4385 assert(DefinedRC); 4386 return isLegalRegOperand(MRI, OpInfo, *MO); 4387 } 4388 4389 // Handle non-register types that are treated like immediates. 4390 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI() || MO->isGlobal()); 4391 4392 if (!DefinedRC) { 4393 // This operand expects an immediate. 4394 return true; 4395 } 4396 4397 return isImmOperandLegal(MI, OpIdx, *MO); 4398 } 4399 4400 void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI, 4401 MachineInstr &MI) const { 4402 unsigned Opc = MI.getOpcode(); 4403 const MCInstrDesc &InstrDesc = get(Opc); 4404 4405 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 4406 MachineOperand &Src0 = MI.getOperand(Src0Idx); 4407 4408 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 4409 MachineOperand &Src1 = MI.getOperand(Src1Idx); 4410 4411 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32 4412 // we need to only have one constant bus use before GFX10. 4413 bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister; 4414 if (HasImplicitSGPR && ST.getConstantBusLimit(Opc) <= 1 && 4415 Src0.isReg() && (RI.isSGPRReg(MRI, Src0.getReg()) || 4416 isLiteralConstantLike(Src0, InstrDesc.OpInfo[Src0Idx]))) 4417 legalizeOpWithMove(MI, Src0Idx); 4418 4419 // Special case: V_WRITELANE_B32 accepts only immediate or SGPR operands for 4420 // both the value to write (src0) and lane select (src1). Fix up non-SGPR 4421 // src0/src1 with V_READFIRSTLANE. 4422 if (Opc == AMDGPU::V_WRITELANE_B32) { 4423 const DebugLoc &DL = MI.getDebugLoc(); 4424 if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) { 4425 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4426 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4427 .add(Src0); 4428 Src0.ChangeToRegister(Reg, false); 4429 } 4430 if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) { 4431 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4432 const DebugLoc &DL = MI.getDebugLoc(); 4433 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4434 .add(Src1); 4435 Src1.ChangeToRegister(Reg, false); 4436 } 4437 return; 4438 } 4439 4440 // No VOP2 instructions support AGPRs. 4441 if (Src0.isReg() && RI.isAGPR(MRI, Src0.getReg())) 4442 legalizeOpWithMove(MI, Src0Idx); 4443 4444 if (Src1.isReg() && RI.isAGPR(MRI, Src1.getReg())) 4445 legalizeOpWithMove(MI, Src1Idx); 4446 4447 // VOP2 src0 instructions support all operand types, so we don't need to check 4448 // their legality. If src1 is already legal, we don't need to do anything. 4449 if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1)) 4450 return; 4451 4452 // Special case: V_READLANE_B32 accepts only immediate or SGPR operands for 4453 // lane select. Fix up using V_READFIRSTLANE, since we assume that the lane 4454 // select is uniform. 4455 if (Opc == AMDGPU::V_READLANE_B32 && Src1.isReg() && 4456 RI.isVGPR(MRI, Src1.getReg())) { 4457 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4458 const DebugLoc &DL = MI.getDebugLoc(); 4459 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4460 .add(Src1); 4461 Src1.ChangeToRegister(Reg, false); 4462 return; 4463 } 4464 4465 // We do not use commuteInstruction here because it is too aggressive and will 4466 // commute if it is possible. We only want to commute here if it improves 4467 // legality. This can be called a fairly large number of times so don't waste 4468 // compile time pointlessly swapping and checking legality again. 4469 if (HasImplicitSGPR || !MI.isCommutable()) { 4470 legalizeOpWithMove(MI, Src1Idx); 4471 return; 4472 } 4473 4474 // If src0 can be used as src1, commuting will make the operands legal. 4475 // Otherwise we have to give up and insert a move. 4476 // 4477 // TODO: Other immediate-like operand kinds could be commuted if there was a 4478 // MachineOperand::ChangeTo* for them. 4479 if ((!Src1.isImm() && !Src1.isReg()) || 4480 !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) { 4481 legalizeOpWithMove(MI, Src1Idx); 4482 return; 4483 } 4484 4485 int CommutedOpc = commuteOpcode(MI); 4486 if (CommutedOpc == -1) { 4487 legalizeOpWithMove(MI, Src1Idx); 4488 return; 4489 } 4490 4491 MI.setDesc(get(CommutedOpc)); 4492 4493 Register Src0Reg = Src0.getReg(); 4494 unsigned Src0SubReg = Src0.getSubReg(); 4495 bool Src0Kill = Src0.isKill(); 4496 4497 if (Src1.isImm()) 4498 Src0.ChangeToImmediate(Src1.getImm()); 4499 else if (Src1.isReg()) { 4500 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill()); 4501 Src0.setSubReg(Src1.getSubReg()); 4502 } else 4503 llvm_unreachable("Should only have register or immediate operands"); 4504 4505 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill); 4506 Src1.setSubReg(Src0SubReg); 4507 fixImplicitOperands(MI); 4508 } 4509 4510 // Legalize VOP3 operands. All operand types are supported for any operand 4511 // but only one literal constant and only starting from GFX10. 4512 void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI, 4513 MachineInstr &MI) const { 4514 unsigned Opc = MI.getOpcode(); 4515 4516 int VOP3Idx[3] = { 4517 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), 4518 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1), 4519 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2) 4520 }; 4521 4522 if (Opc == AMDGPU::V_PERMLANE16_B32 || 4523 Opc == AMDGPU::V_PERMLANEX16_B32) { 4524 // src1 and src2 must be scalar 4525 MachineOperand &Src1 = MI.getOperand(VOP3Idx[1]); 4526 MachineOperand &Src2 = MI.getOperand(VOP3Idx[2]); 4527 const DebugLoc &DL = MI.getDebugLoc(); 4528 if (Src1.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) { 4529 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4530 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4531 .add(Src1); 4532 Src1.ChangeToRegister(Reg, false); 4533 } 4534 if (Src2.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src2.getReg()))) { 4535 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4536 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4537 .add(Src2); 4538 Src2.ChangeToRegister(Reg, false); 4539 } 4540 } 4541 4542 // Find the one SGPR operand we are allowed to use. 4543 int ConstantBusLimit = ST.getConstantBusLimit(Opc); 4544 int LiteralLimit = ST.hasVOP3Literal() ? 1 : 0; 4545 SmallDenseSet<unsigned> SGPRsUsed; 4546 unsigned SGPRReg = findUsedSGPR(MI, VOP3Idx); 4547 if (SGPRReg != AMDGPU::NoRegister) { 4548 SGPRsUsed.insert(SGPRReg); 4549 --ConstantBusLimit; 4550 } 4551 4552 for (unsigned i = 0; i < 3; ++i) { 4553 int Idx = VOP3Idx[i]; 4554 if (Idx == -1) 4555 break; 4556 MachineOperand &MO = MI.getOperand(Idx); 4557 4558 if (!MO.isReg()) { 4559 if (!isLiteralConstantLike(MO, get(Opc).OpInfo[Idx])) 4560 continue; 4561 4562 if (LiteralLimit > 0 && ConstantBusLimit > 0) { 4563 --LiteralLimit; 4564 --ConstantBusLimit; 4565 continue; 4566 } 4567 4568 --LiteralLimit; 4569 --ConstantBusLimit; 4570 legalizeOpWithMove(MI, Idx); 4571 continue; 4572 } 4573 4574 if (RI.hasAGPRs(MRI.getRegClass(MO.getReg())) && 4575 !isOperandLegal(MI, Idx, &MO)) { 4576 legalizeOpWithMove(MI, Idx); 4577 continue; 4578 } 4579 4580 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg()))) 4581 continue; // VGPRs are legal 4582 4583 // We can use one SGPR in each VOP3 instruction prior to GFX10 4584 // and two starting from GFX10. 4585 if (SGPRsUsed.count(MO.getReg())) 4586 continue; 4587 if (ConstantBusLimit > 0) { 4588 SGPRsUsed.insert(MO.getReg()); 4589 --ConstantBusLimit; 4590 continue; 4591 } 4592 4593 // If we make it this far, then the operand is not legal and we must 4594 // legalize it. 4595 legalizeOpWithMove(MI, Idx); 4596 } 4597 } 4598 4599 Register SIInstrInfo::readlaneVGPRToSGPR(Register SrcReg, MachineInstr &UseMI, 4600 MachineRegisterInfo &MRI) const { 4601 const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg); 4602 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC); 4603 Register DstReg = MRI.createVirtualRegister(SRC); 4604 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32; 4605 4606 if (RI.hasAGPRs(VRC)) { 4607 VRC = RI.getEquivalentVGPRClass(VRC); 4608 Register NewSrcReg = MRI.createVirtualRegister(VRC); 4609 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4610 get(TargetOpcode::COPY), NewSrcReg) 4611 .addReg(SrcReg); 4612 SrcReg = NewSrcReg; 4613 } 4614 4615 if (SubRegs == 1) { 4616 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4617 get(AMDGPU::V_READFIRSTLANE_B32), DstReg) 4618 .addReg(SrcReg); 4619 return DstReg; 4620 } 4621 4622 SmallVector<unsigned, 8> SRegs; 4623 for (unsigned i = 0; i < SubRegs; ++i) { 4624 Register SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4625 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4626 get(AMDGPU::V_READFIRSTLANE_B32), SGPR) 4627 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i)); 4628 SRegs.push_back(SGPR); 4629 } 4630 4631 MachineInstrBuilder MIB = 4632 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4633 get(AMDGPU::REG_SEQUENCE), DstReg); 4634 for (unsigned i = 0; i < SubRegs; ++i) { 4635 MIB.addReg(SRegs[i]); 4636 MIB.addImm(RI.getSubRegFromChannel(i)); 4637 } 4638 return DstReg; 4639 } 4640 4641 void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI, 4642 MachineInstr &MI) const { 4643 4644 // If the pointer is store in VGPRs, then we need to move them to 4645 // SGPRs using v_readfirstlane. This is safe because we only select 4646 // loads with uniform pointers to SMRD instruction so we know the 4647 // pointer value is uniform. 4648 MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase); 4649 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) { 4650 unsigned SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI); 4651 SBase->setReg(SGPR); 4652 } 4653 MachineOperand *SOff = getNamedOperand(MI, AMDGPU::OpName::soff); 4654 if (SOff && !RI.isSGPRClass(MRI.getRegClass(SOff->getReg()))) { 4655 unsigned SGPR = readlaneVGPRToSGPR(SOff->getReg(), MI, MRI); 4656 SOff->setReg(SGPR); 4657 } 4658 } 4659 4660 void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB, 4661 MachineBasicBlock::iterator I, 4662 const TargetRegisterClass *DstRC, 4663 MachineOperand &Op, 4664 MachineRegisterInfo &MRI, 4665 const DebugLoc &DL) const { 4666 Register OpReg = Op.getReg(); 4667 unsigned OpSubReg = Op.getSubReg(); 4668 4669 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg( 4670 RI.getRegClassForReg(MRI, OpReg), OpSubReg); 4671 4672 // Check if operand is already the correct register class. 4673 if (DstRC == OpRC) 4674 return; 4675 4676 Register DstReg = MRI.createVirtualRegister(DstRC); 4677 MachineInstr *Copy = 4678 BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op); 4679 4680 Op.setReg(DstReg); 4681 Op.setSubReg(0); 4682 4683 MachineInstr *Def = MRI.getVRegDef(OpReg); 4684 if (!Def) 4685 return; 4686 4687 // Try to eliminate the copy if it is copying an immediate value. 4688 if (Def->isMoveImmediate() && DstRC != &AMDGPU::VReg_1RegClass) 4689 FoldImmediate(*Copy, *Def, OpReg, &MRI); 4690 4691 bool ImpDef = Def->isImplicitDef(); 4692 while (!ImpDef && Def && Def->isCopy()) { 4693 if (Def->getOperand(1).getReg().isPhysical()) 4694 break; 4695 Def = MRI.getUniqueVRegDef(Def->getOperand(1).getReg()); 4696 ImpDef = Def && Def->isImplicitDef(); 4697 } 4698 if (!RI.isSGPRClass(DstRC) && !Copy->readsRegister(AMDGPU::EXEC, &RI) && 4699 !ImpDef) 4700 Copy->addOperand(MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 4701 } 4702 4703 // Emit the actual waterfall loop, executing the wrapped instruction for each 4704 // unique value of \p Rsrc across all lanes. In the best case we execute 1 4705 // iteration, in the worst case we execute 64 (once per lane). 4706 static void 4707 emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI, 4708 MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB, 4709 const DebugLoc &DL, MachineOperand &Rsrc) { 4710 MachineFunction &MF = *OrigBB.getParent(); 4711 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 4712 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 4713 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 4714 unsigned SaveExecOpc = 4715 ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 : AMDGPU::S_AND_SAVEEXEC_B64; 4716 unsigned XorTermOpc = 4717 ST.isWave32() ? AMDGPU::S_XOR_B32_term : AMDGPU::S_XOR_B64_term; 4718 unsigned AndOpc = 4719 ST.isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64; 4720 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 4721 4722 MachineBasicBlock::iterator I = LoopBB.begin(); 4723 4724 Register VRsrc = Rsrc.getReg(); 4725 unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef()); 4726 4727 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); 4728 Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC); 4729 Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC); 4730 Register AndCond = MRI.createVirtualRegister(BoolXExecRC); 4731 Register SRsrcSub0 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4732 Register SRsrcSub1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4733 Register SRsrcSub2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4734 Register SRsrcSub3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4735 Register SRsrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass); 4736 4737 // Beginning of the loop, read the next Rsrc variant. 4738 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub0) 4739 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub0); 4740 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub1) 4741 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub1); 4742 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub2) 4743 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub2); 4744 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub3) 4745 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub3); 4746 4747 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), SRsrc) 4748 .addReg(SRsrcSub0) 4749 .addImm(AMDGPU::sub0) 4750 .addReg(SRsrcSub1) 4751 .addImm(AMDGPU::sub1) 4752 .addReg(SRsrcSub2) 4753 .addImm(AMDGPU::sub2) 4754 .addReg(SRsrcSub3) 4755 .addImm(AMDGPU::sub3); 4756 4757 // Update Rsrc operand to use the SGPR Rsrc. 4758 Rsrc.setReg(SRsrc); 4759 Rsrc.setIsKill(true); 4760 4761 // Identify all lanes with identical Rsrc operands in their VGPRs. 4762 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), CondReg0) 4763 .addReg(SRsrc, 0, AMDGPU::sub0_sub1) 4764 .addReg(VRsrc, 0, AMDGPU::sub0_sub1); 4765 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), CondReg1) 4766 .addReg(SRsrc, 0, AMDGPU::sub2_sub3) 4767 .addReg(VRsrc, 0, AMDGPU::sub2_sub3); 4768 BuildMI(LoopBB, I, DL, TII.get(AndOpc), AndCond) 4769 .addReg(CondReg0) 4770 .addReg(CondReg1); 4771 4772 MRI.setSimpleHint(SaveExec, AndCond); 4773 4774 // Update EXEC to matching lanes, saving original to SaveExec. 4775 BuildMI(LoopBB, I, DL, TII.get(SaveExecOpc), SaveExec) 4776 .addReg(AndCond, RegState::Kill); 4777 4778 // The original instruction is here; we insert the terminators after it. 4779 I = LoopBB.end(); 4780 4781 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 4782 BuildMI(LoopBB, I, DL, TII.get(XorTermOpc), Exec) 4783 .addReg(Exec) 4784 .addReg(SaveExec); 4785 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_CBRANCH_EXECNZ)).addMBB(&LoopBB); 4786 } 4787 4788 // Build a waterfall loop around \p MI, replacing the VGPR \p Rsrc register 4789 // with SGPRs by iterating over all unique values across all lanes. 4790 static void loadSRsrcFromVGPR(const SIInstrInfo &TII, MachineInstr &MI, 4791 MachineOperand &Rsrc, MachineDominatorTree *MDT) { 4792 MachineBasicBlock &MBB = *MI.getParent(); 4793 MachineFunction &MF = *MBB.getParent(); 4794 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 4795 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 4796 MachineRegisterInfo &MRI = MF.getRegInfo(); 4797 MachineBasicBlock::iterator I(&MI); 4798 const DebugLoc &DL = MI.getDebugLoc(); 4799 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 4800 unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 4801 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 4802 4803 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); 4804 4805 // Save the EXEC mask 4806 BuildMI(MBB, I, DL, TII.get(MovExecOpc), SaveExec).addReg(Exec); 4807 4808 // Killed uses in the instruction we are waterfalling around will be 4809 // incorrect due to the added control-flow. 4810 for (auto &MO : MI.uses()) { 4811 if (MO.isReg() && MO.isUse()) { 4812 MRI.clearKillFlags(MO.getReg()); 4813 } 4814 } 4815 4816 // To insert the loop we need to split the block. Move everything after this 4817 // point to a new block, and insert a new empty block between the two. 4818 MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock(); 4819 MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock(); 4820 MachineFunction::iterator MBBI(MBB); 4821 ++MBBI; 4822 4823 MF.insert(MBBI, LoopBB); 4824 MF.insert(MBBI, RemainderBB); 4825 4826 LoopBB->addSuccessor(LoopBB); 4827 LoopBB->addSuccessor(RemainderBB); 4828 4829 // Move MI to the LoopBB, and the remainder of the block to RemainderBB. 4830 MachineBasicBlock::iterator J = I++; 4831 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 4832 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); 4833 LoopBB->splice(LoopBB->begin(), &MBB, J); 4834 4835 MBB.addSuccessor(LoopBB); 4836 4837 // Update dominators. We know that MBB immediately dominates LoopBB, that 4838 // LoopBB immediately dominates RemainderBB, and that RemainderBB immediately 4839 // dominates all of the successors transferred to it from MBB that MBB used 4840 // to properly dominate. 4841 if (MDT) { 4842 MDT->addNewBlock(LoopBB, &MBB); 4843 MDT->addNewBlock(RemainderBB, LoopBB); 4844 for (auto &Succ : RemainderBB->successors()) { 4845 if (MDT->properlyDominates(&MBB, Succ)) { 4846 MDT->changeImmediateDominator(Succ, RemainderBB); 4847 } 4848 } 4849 } 4850 4851 emitLoadSRsrcFromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, Rsrc); 4852 4853 // Restore the EXEC mask 4854 MachineBasicBlock::iterator First = RemainderBB->begin(); 4855 BuildMI(*RemainderBB, First, DL, TII.get(MovExecOpc), Exec).addReg(SaveExec); 4856 } 4857 4858 // Extract pointer from Rsrc and return a zero-value Rsrc replacement. 4859 static std::tuple<unsigned, unsigned> 4860 extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc) { 4861 MachineBasicBlock &MBB = *MI.getParent(); 4862 MachineFunction &MF = *MBB.getParent(); 4863 MachineRegisterInfo &MRI = MF.getRegInfo(); 4864 4865 // Extract the ptr from the resource descriptor. 4866 unsigned RsrcPtr = 4867 TII.buildExtractSubReg(MI, MRI, Rsrc, &AMDGPU::VReg_128RegClass, 4868 AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass); 4869 4870 // Create an empty resource descriptor 4871 Register Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 4872 Register SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4873 Register SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4874 Register NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass); 4875 uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat(); 4876 4877 // Zero64 = 0 4878 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B64), Zero64) 4879 .addImm(0); 4880 4881 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0} 4882 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatLo) 4883 .addImm(RsrcDataFormat & 0xFFFFFFFF); 4884 4885 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32} 4886 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatHi) 4887 .addImm(RsrcDataFormat >> 32); 4888 4889 // NewSRsrc = {Zero64, SRsrcFormat} 4890 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::REG_SEQUENCE), NewSRsrc) 4891 .addReg(Zero64) 4892 .addImm(AMDGPU::sub0_sub1) 4893 .addReg(SRsrcFormatLo) 4894 .addImm(AMDGPU::sub2) 4895 .addReg(SRsrcFormatHi) 4896 .addImm(AMDGPU::sub3); 4897 4898 return std::make_tuple(RsrcPtr, NewSRsrc); 4899 } 4900 4901 void SIInstrInfo::legalizeOperands(MachineInstr &MI, 4902 MachineDominatorTree *MDT) const { 4903 MachineFunction &MF = *MI.getParent()->getParent(); 4904 MachineRegisterInfo &MRI = MF.getRegInfo(); 4905 4906 // Legalize VOP2 4907 if (isVOP2(MI) || isVOPC(MI)) { 4908 legalizeOperandsVOP2(MRI, MI); 4909 return; 4910 } 4911 4912 // Legalize VOP3 4913 if (isVOP3(MI)) { 4914 legalizeOperandsVOP3(MRI, MI); 4915 return; 4916 } 4917 4918 // Legalize SMRD 4919 if (isSMRD(MI)) { 4920 legalizeOperandsSMRD(MRI, MI); 4921 return; 4922 } 4923 4924 // Legalize REG_SEQUENCE and PHI 4925 // The register class of the operands much be the same type as the register 4926 // class of the output. 4927 if (MI.getOpcode() == AMDGPU::PHI) { 4928 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr; 4929 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) { 4930 if (!MI.getOperand(i).isReg() || 4931 !Register::isVirtualRegister(MI.getOperand(i).getReg())) 4932 continue; 4933 const TargetRegisterClass *OpRC = 4934 MRI.getRegClass(MI.getOperand(i).getReg()); 4935 if (RI.hasVectorRegisters(OpRC)) { 4936 VRC = OpRC; 4937 } else { 4938 SRC = OpRC; 4939 } 4940 } 4941 4942 // If any of the operands are VGPR registers, then they all most be 4943 // otherwise we will create illegal VGPR->SGPR copies when legalizing 4944 // them. 4945 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) { 4946 if (!VRC) { 4947 assert(SRC); 4948 if (getOpRegClass(MI, 0) == &AMDGPU::VReg_1RegClass) { 4949 VRC = &AMDGPU::VReg_1RegClass; 4950 } else 4951 VRC = RI.hasAGPRs(getOpRegClass(MI, 0)) 4952 ? RI.getEquivalentAGPRClass(SRC) 4953 : RI.getEquivalentVGPRClass(SRC); 4954 } else { 4955 VRC = RI.hasAGPRs(getOpRegClass(MI, 0)) 4956 ? RI.getEquivalentAGPRClass(VRC) 4957 : RI.getEquivalentVGPRClass(VRC); 4958 } 4959 RC = VRC; 4960 } else { 4961 RC = SRC; 4962 } 4963 4964 // Update all the operands so they have the same type. 4965 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 4966 MachineOperand &Op = MI.getOperand(I); 4967 if (!Op.isReg() || !Register::isVirtualRegister(Op.getReg())) 4968 continue; 4969 4970 // MI is a PHI instruction. 4971 MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB(); 4972 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator(); 4973 4974 // Avoid creating no-op copies with the same src and dst reg class. These 4975 // confuse some of the machine passes. 4976 legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc()); 4977 } 4978 } 4979 4980 // REG_SEQUENCE doesn't really require operand legalization, but if one has a 4981 // VGPR dest type and SGPR sources, insert copies so all operands are 4982 // VGPRs. This seems to help operand folding / the register coalescer. 4983 if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) { 4984 MachineBasicBlock *MBB = MI.getParent(); 4985 const TargetRegisterClass *DstRC = getOpRegClass(MI, 0); 4986 if (RI.hasVGPRs(DstRC)) { 4987 // Update all the operands so they are VGPR register classes. These may 4988 // not be the same register class because REG_SEQUENCE supports mixing 4989 // subregister index types e.g. sub0_sub1 + sub2 + sub3 4990 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 4991 MachineOperand &Op = MI.getOperand(I); 4992 if (!Op.isReg() || !Register::isVirtualRegister(Op.getReg())) 4993 continue; 4994 4995 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg()); 4996 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); 4997 if (VRC == OpRC) 4998 continue; 4999 5000 legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc()); 5001 Op.setIsKill(); 5002 } 5003 } 5004 5005 return; 5006 } 5007 5008 // Legalize INSERT_SUBREG 5009 // src0 must have the same register class as dst 5010 if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) { 5011 Register Dst = MI.getOperand(0).getReg(); 5012 Register Src0 = MI.getOperand(1).getReg(); 5013 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst); 5014 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0); 5015 if (DstRC != Src0RC) { 5016 MachineBasicBlock *MBB = MI.getParent(); 5017 MachineOperand &Op = MI.getOperand(1); 5018 legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc()); 5019 } 5020 return; 5021 } 5022 5023 // Legalize SI_INIT_M0 5024 if (MI.getOpcode() == AMDGPU::SI_INIT_M0) { 5025 MachineOperand &Src = MI.getOperand(0); 5026 if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg()))) 5027 Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI)); 5028 return; 5029 } 5030 5031 // Legalize MIMG and MUBUF/MTBUF for shaders. 5032 // 5033 // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via 5034 // scratch memory access. In both cases, the legalization never involves 5035 // conversion to the addr64 form. 5036 if (isMIMG(MI) || 5037 (AMDGPU::isShader(MF.getFunction().getCallingConv()) && 5038 (isMUBUF(MI) || isMTBUF(MI)))) { 5039 MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc); 5040 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) { 5041 unsigned SGPR = readlaneVGPRToSGPR(SRsrc->getReg(), MI, MRI); 5042 SRsrc->setReg(SGPR); 5043 } 5044 5045 MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp); 5046 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) { 5047 unsigned SGPR = readlaneVGPRToSGPR(SSamp->getReg(), MI, MRI); 5048 SSamp->setReg(SGPR); 5049 } 5050 return; 5051 } 5052 5053 // Legalize MUBUF* instructions. 5054 int RsrcIdx = 5055 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc); 5056 if (RsrcIdx != -1) { 5057 // We have an MUBUF instruction 5058 MachineOperand *Rsrc = &MI.getOperand(RsrcIdx); 5059 unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass; 5060 if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()), 5061 RI.getRegClass(RsrcRC))) { 5062 // The operands are legal. 5063 // FIXME: We may need to legalize operands besided srsrc. 5064 return; 5065 } 5066 5067 // Legalize a VGPR Rsrc. 5068 // 5069 // If the instruction is _ADDR64, we can avoid a waterfall by extracting 5070 // the base pointer from the VGPR Rsrc, adding it to the VAddr, then using 5071 // a zero-value SRsrc. 5072 // 5073 // If the instruction is _OFFSET (both idxen and offen disabled), and we 5074 // support ADDR64 instructions, we can convert to ADDR64 and do the same as 5075 // above. 5076 // 5077 // Otherwise we are on non-ADDR64 hardware, and/or we have 5078 // idxen/offen/bothen and we fall back to a waterfall loop. 5079 5080 MachineBasicBlock &MBB = *MI.getParent(); 5081 5082 MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 5083 if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) { 5084 // This is already an ADDR64 instruction so we need to add the pointer 5085 // extracted from the resource descriptor to the current value of VAddr. 5086 Register NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5087 Register NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5088 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5089 5090 const auto *BoolXExecRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5091 Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC); 5092 Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC); 5093 5094 unsigned RsrcPtr, NewSRsrc; 5095 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 5096 5097 // NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0 5098 const DebugLoc &DL = MI.getDebugLoc(); 5099 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_CO_U32_e64), NewVAddrLo) 5100 .addDef(CondReg0) 5101 .addReg(RsrcPtr, 0, AMDGPU::sub0) 5102 .addReg(VAddr->getReg(), 0, AMDGPU::sub0) 5103 .addImm(0); 5104 5105 // NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1 5106 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e64), NewVAddrHi) 5107 .addDef(CondReg1, RegState::Dead) 5108 .addReg(RsrcPtr, 0, AMDGPU::sub1) 5109 .addReg(VAddr->getReg(), 0, AMDGPU::sub1) 5110 .addReg(CondReg0, RegState::Kill) 5111 .addImm(0); 5112 5113 // NewVaddr = {NewVaddrHi, NewVaddrLo} 5114 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) 5115 .addReg(NewVAddrLo) 5116 .addImm(AMDGPU::sub0) 5117 .addReg(NewVAddrHi) 5118 .addImm(AMDGPU::sub1); 5119 5120 VAddr->setReg(NewVAddr); 5121 Rsrc->setReg(NewSRsrc); 5122 } else if (!VAddr && ST.hasAddr64()) { 5123 // This instructions is the _OFFSET variant, so we need to convert it to 5124 // ADDR64. 5125 assert(MBB.getParent()->getSubtarget<GCNSubtarget>().getGeneration() 5126 < AMDGPUSubtarget::VOLCANIC_ISLANDS && 5127 "FIXME: Need to emit flat atomics here"); 5128 5129 unsigned RsrcPtr, NewSRsrc; 5130 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 5131 5132 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5133 MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata); 5134 MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 5135 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset); 5136 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode()); 5137 5138 // Atomics rith return have have an additional tied operand and are 5139 // missing some of the special bits. 5140 MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in); 5141 MachineInstr *Addr64; 5142 5143 if (!VDataIn) { 5144 // Regular buffer load / store. 5145 MachineInstrBuilder MIB = 5146 BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 5147 .add(*VData) 5148 .addReg(NewVAddr) 5149 .addReg(NewSRsrc) 5150 .add(*SOffset) 5151 .add(*Offset); 5152 5153 // Atomics do not have this operand. 5154 if (const MachineOperand *GLC = 5155 getNamedOperand(MI, AMDGPU::OpName::glc)) { 5156 MIB.addImm(GLC->getImm()); 5157 } 5158 if (const MachineOperand *DLC = 5159 getNamedOperand(MI, AMDGPU::OpName::dlc)) { 5160 MIB.addImm(DLC->getImm()); 5161 } 5162 5163 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)); 5164 5165 if (const MachineOperand *TFE = 5166 getNamedOperand(MI, AMDGPU::OpName::tfe)) { 5167 MIB.addImm(TFE->getImm()); 5168 } 5169 5170 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::swz)); 5171 5172 MIB.cloneMemRefs(MI); 5173 Addr64 = MIB; 5174 } else { 5175 // Atomics with return. 5176 Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 5177 .add(*VData) 5178 .add(*VDataIn) 5179 .addReg(NewVAddr) 5180 .addReg(NewSRsrc) 5181 .add(*SOffset) 5182 .add(*Offset) 5183 .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)) 5184 .cloneMemRefs(MI); 5185 } 5186 5187 MI.removeFromParent(); 5188 5189 // NewVaddr = {NewVaddrHi, NewVaddrLo} 5190 BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), 5191 NewVAddr) 5192 .addReg(RsrcPtr, 0, AMDGPU::sub0) 5193 .addImm(AMDGPU::sub0) 5194 .addReg(RsrcPtr, 0, AMDGPU::sub1) 5195 .addImm(AMDGPU::sub1); 5196 } else { 5197 // This is another variant; legalize Rsrc with waterfall loop from VGPRs 5198 // to SGPRs. 5199 loadSRsrcFromVGPR(*this, MI, *Rsrc, MDT); 5200 } 5201 } 5202 } 5203 5204 void SIInstrInfo::moveToVALU(MachineInstr &TopInst, 5205 MachineDominatorTree *MDT) const { 5206 SetVectorType Worklist; 5207 Worklist.insert(&TopInst); 5208 5209 while (!Worklist.empty()) { 5210 MachineInstr &Inst = *Worklist.pop_back_val(); 5211 MachineBasicBlock *MBB = Inst.getParent(); 5212 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 5213 5214 unsigned Opcode = Inst.getOpcode(); 5215 unsigned NewOpcode = getVALUOp(Inst); 5216 5217 // Handle some special cases 5218 switch (Opcode) { 5219 default: 5220 break; 5221 case AMDGPU::S_ADD_U64_PSEUDO: 5222 case AMDGPU::S_SUB_U64_PSEUDO: 5223 splitScalar64BitAddSub(Worklist, Inst, MDT); 5224 Inst.eraseFromParent(); 5225 continue; 5226 case AMDGPU::S_ADD_I32: 5227 case AMDGPU::S_SUB_I32: 5228 // FIXME: The u32 versions currently selected use the carry. 5229 if (moveScalarAddSub(Worklist, Inst, MDT)) 5230 continue; 5231 5232 // Default handling 5233 break; 5234 case AMDGPU::S_AND_B64: 5235 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32, MDT); 5236 Inst.eraseFromParent(); 5237 continue; 5238 5239 case AMDGPU::S_OR_B64: 5240 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32, MDT); 5241 Inst.eraseFromParent(); 5242 continue; 5243 5244 case AMDGPU::S_XOR_B64: 5245 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32, MDT); 5246 Inst.eraseFromParent(); 5247 continue; 5248 5249 case AMDGPU::S_NAND_B64: 5250 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NAND_B32, MDT); 5251 Inst.eraseFromParent(); 5252 continue; 5253 5254 case AMDGPU::S_NOR_B64: 5255 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NOR_B32, MDT); 5256 Inst.eraseFromParent(); 5257 continue; 5258 5259 case AMDGPU::S_XNOR_B64: 5260 if (ST.hasDLInsts()) 5261 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT); 5262 else 5263 splitScalar64BitXnor(Worklist, Inst, MDT); 5264 Inst.eraseFromParent(); 5265 continue; 5266 5267 case AMDGPU::S_ANDN2_B64: 5268 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ANDN2_B32, MDT); 5269 Inst.eraseFromParent(); 5270 continue; 5271 5272 case AMDGPU::S_ORN2_B64: 5273 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ORN2_B32, MDT); 5274 Inst.eraseFromParent(); 5275 continue; 5276 5277 case AMDGPU::S_NOT_B64: 5278 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32); 5279 Inst.eraseFromParent(); 5280 continue; 5281 5282 case AMDGPU::S_BCNT1_I32_B64: 5283 splitScalar64BitBCNT(Worklist, Inst); 5284 Inst.eraseFromParent(); 5285 continue; 5286 5287 case AMDGPU::S_BFE_I64: 5288 splitScalar64BitBFE(Worklist, Inst); 5289 Inst.eraseFromParent(); 5290 continue; 5291 5292 case AMDGPU::S_LSHL_B32: 5293 if (ST.hasOnlyRevVALUShifts()) { 5294 NewOpcode = AMDGPU::V_LSHLREV_B32_e64; 5295 swapOperands(Inst); 5296 } 5297 break; 5298 case AMDGPU::S_ASHR_I32: 5299 if (ST.hasOnlyRevVALUShifts()) { 5300 NewOpcode = AMDGPU::V_ASHRREV_I32_e64; 5301 swapOperands(Inst); 5302 } 5303 break; 5304 case AMDGPU::S_LSHR_B32: 5305 if (ST.hasOnlyRevVALUShifts()) { 5306 NewOpcode = AMDGPU::V_LSHRREV_B32_e64; 5307 swapOperands(Inst); 5308 } 5309 break; 5310 case AMDGPU::S_LSHL_B64: 5311 if (ST.hasOnlyRevVALUShifts()) { 5312 NewOpcode = AMDGPU::V_LSHLREV_B64; 5313 swapOperands(Inst); 5314 } 5315 break; 5316 case AMDGPU::S_ASHR_I64: 5317 if (ST.hasOnlyRevVALUShifts()) { 5318 NewOpcode = AMDGPU::V_ASHRREV_I64; 5319 swapOperands(Inst); 5320 } 5321 break; 5322 case AMDGPU::S_LSHR_B64: 5323 if (ST.hasOnlyRevVALUShifts()) { 5324 NewOpcode = AMDGPU::V_LSHRREV_B64; 5325 swapOperands(Inst); 5326 } 5327 break; 5328 5329 case AMDGPU::S_ABS_I32: 5330 lowerScalarAbs(Worklist, Inst); 5331 Inst.eraseFromParent(); 5332 continue; 5333 5334 case AMDGPU::S_CBRANCH_SCC0: 5335 case AMDGPU::S_CBRANCH_SCC1: 5336 // Clear unused bits of vcc 5337 if (ST.isWave32()) 5338 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B32), 5339 AMDGPU::VCC_LO) 5340 .addReg(AMDGPU::EXEC_LO) 5341 .addReg(AMDGPU::VCC_LO); 5342 else 5343 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64), 5344 AMDGPU::VCC) 5345 .addReg(AMDGPU::EXEC) 5346 .addReg(AMDGPU::VCC); 5347 break; 5348 5349 case AMDGPU::S_BFE_U64: 5350 case AMDGPU::S_BFM_B64: 5351 llvm_unreachable("Moving this op to VALU not implemented"); 5352 5353 case AMDGPU::S_PACK_LL_B32_B16: 5354 case AMDGPU::S_PACK_LH_B32_B16: 5355 case AMDGPU::S_PACK_HH_B32_B16: 5356 movePackToVALU(Worklist, MRI, Inst); 5357 Inst.eraseFromParent(); 5358 continue; 5359 5360 case AMDGPU::S_XNOR_B32: 5361 lowerScalarXnor(Worklist, Inst); 5362 Inst.eraseFromParent(); 5363 continue; 5364 5365 case AMDGPU::S_NAND_B32: 5366 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_AND_B32); 5367 Inst.eraseFromParent(); 5368 continue; 5369 5370 case AMDGPU::S_NOR_B32: 5371 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_OR_B32); 5372 Inst.eraseFromParent(); 5373 continue; 5374 5375 case AMDGPU::S_ANDN2_B32: 5376 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_AND_B32); 5377 Inst.eraseFromParent(); 5378 continue; 5379 5380 case AMDGPU::S_ORN2_B32: 5381 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_OR_B32); 5382 Inst.eraseFromParent(); 5383 continue; 5384 5385 // TODO: remove as soon as everything is ready 5386 // to replace VGPR to SGPR copy with V_READFIRSTLANEs. 5387 // S_ADD/SUB_CO_PSEUDO as well as S_UADDO/USUBO_PSEUDO 5388 // can only be selected from the uniform SDNode. 5389 case AMDGPU::S_ADD_CO_PSEUDO: 5390 case AMDGPU::S_SUB_CO_PSEUDO: { 5391 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) 5392 ? AMDGPU::V_ADDC_U32_e64 5393 : AMDGPU::V_SUBB_U32_e64; 5394 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5395 5396 Register CarryInReg = Inst.getOperand(4).getReg(); 5397 if (!MRI.constrainRegClass(CarryInReg, CarryRC)) { 5398 Register NewCarryReg = MRI.createVirtualRegister(CarryRC); 5399 BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(AMDGPU::COPY), NewCarryReg) 5400 .addReg(CarryInReg); 5401 } 5402 5403 Register CarryOutReg = Inst.getOperand(1).getReg(); 5404 5405 Register DestReg = MRI.createVirtualRegister(RI.getEquivalentVGPRClass( 5406 MRI.getRegClass(Inst.getOperand(0).getReg()))); 5407 MachineInstr *CarryOp = 5408 BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(Opc), DestReg) 5409 .addReg(CarryOutReg, RegState::Define) 5410 .add(Inst.getOperand(2)) 5411 .add(Inst.getOperand(3)) 5412 .addReg(CarryInReg) 5413 .addImm(0); 5414 legalizeOperands(*CarryOp); 5415 MRI.replaceRegWith(Inst.getOperand(0).getReg(), DestReg); 5416 addUsersToMoveToVALUWorklist(DestReg, MRI, Worklist); 5417 Inst.eraseFromParent(); 5418 } 5419 continue; 5420 case AMDGPU::S_UADDO_PSEUDO: 5421 case AMDGPU::S_USUBO_PSEUDO: { 5422 const DebugLoc &DL = Inst.getDebugLoc(); 5423 MachineOperand &Dest0 = Inst.getOperand(0); 5424 MachineOperand &Dest1 = Inst.getOperand(1); 5425 MachineOperand &Src0 = Inst.getOperand(2); 5426 MachineOperand &Src1 = Inst.getOperand(3); 5427 5428 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_UADDO_PSEUDO) 5429 ? AMDGPU::V_ADD_CO_U32_e64 5430 : AMDGPU::V_SUB_CO_U32_e64; 5431 const TargetRegisterClass *NewRC = 5432 RI.getEquivalentVGPRClass(MRI.getRegClass(Dest0.getReg())); 5433 Register DestReg = MRI.createVirtualRegister(NewRC); 5434 MachineInstr *NewInstr = BuildMI(*MBB, &Inst, DL, get(Opc), DestReg) 5435 .addReg(Dest1.getReg(), RegState::Define) 5436 .add(Src0) 5437 .add(Src1) 5438 .addImm(0); // clamp bit 5439 5440 legalizeOperands(*NewInstr, MDT); 5441 5442 MRI.replaceRegWith(Dest0.getReg(), DestReg); 5443 addUsersToMoveToVALUWorklist(NewInstr->getOperand(0).getReg(), MRI, 5444 Worklist); 5445 Inst.eraseFromParent(); 5446 } 5447 continue; 5448 5449 case AMDGPU::S_CSELECT_B32: 5450 case AMDGPU::S_CSELECT_B64: 5451 lowerSelect(Worklist, Inst, MDT); 5452 Inst.eraseFromParent(); 5453 continue; 5454 } 5455 5456 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) { 5457 // We cannot move this instruction to the VALU, so we should try to 5458 // legalize its operands instead. 5459 legalizeOperands(Inst, MDT); 5460 continue; 5461 } 5462 5463 // Use the new VALU Opcode. 5464 const MCInstrDesc &NewDesc = get(NewOpcode); 5465 Inst.setDesc(NewDesc); 5466 5467 // Remove any references to SCC. Vector instructions can't read from it, and 5468 // We're just about to add the implicit use / defs of VCC, and we don't want 5469 // both. 5470 for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) { 5471 MachineOperand &Op = Inst.getOperand(i); 5472 if (Op.isReg() && Op.getReg() == AMDGPU::SCC) { 5473 // Only propagate through live-def of SCC. 5474 if (Op.isDef() && !Op.isDead()) 5475 addSCCDefUsersToVALUWorklist(Op, Inst, Worklist); 5476 Inst.RemoveOperand(i); 5477 } 5478 } 5479 5480 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) { 5481 // We are converting these to a BFE, so we need to add the missing 5482 // operands for the size and offset. 5483 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16; 5484 Inst.addOperand(MachineOperand::CreateImm(0)); 5485 Inst.addOperand(MachineOperand::CreateImm(Size)); 5486 5487 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) { 5488 // The VALU version adds the second operand to the result, so insert an 5489 // extra 0 operand. 5490 Inst.addOperand(MachineOperand::CreateImm(0)); 5491 } 5492 5493 Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent()); 5494 fixImplicitOperands(Inst); 5495 5496 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) { 5497 const MachineOperand &OffsetWidthOp = Inst.getOperand(2); 5498 // If we need to move this to VGPRs, we need to unpack the second operand 5499 // back into the 2 separate ones for bit offset and width. 5500 assert(OffsetWidthOp.isImm() && 5501 "Scalar BFE is only implemented for constant width and offset"); 5502 uint32_t Imm = OffsetWidthOp.getImm(); 5503 5504 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 5505 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 5506 Inst.RemoveOperand(2); // Remove old immediate. 5507 Inst.addOperand(MachineOperand::CreateImm(Offset)); 5508 Inst.addOperand(MachineOperand::CreateImm(BitWidth)); 5509 } 5510 5511 bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef(); 5512 unsigned NewDstReg = AMDGPU::NoRegister; 5513 if (HasDst) { 5514 Register DstReg = Inst.getOperand(0).getReg(); 5515 if (Register::isPhysicalRegister(DstReg)) 5516 continue; 5517 5518 // Update the destination register class. 5519 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst); 5520 if (!NewDstRC) 5521 continue; 5522 5523 if (Inst.isCopy() && 5524 Register::isVirtualRegister(Inst.getOperand(1).getReg()) && 5525 NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) { 5526 // Instead of creating a copy where src and dst are the same register 5527 // class, we just replace all uses of dst with src. These kinds of 5528 // copies interfere with the heuristics MachineSink uses to decide 5529 // whether or not to split a critical edge. Since the pass assumes 5530 // that copies will end up as machine instructions and not be 5531 // eliminated. 5532 addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist); 5533 MRI.replaceRegWith(DstReg, Inst.getOperand(1).getReg()); 5534 MRI.clearKillFlags(Inst.getOperand(1).getReg()); 5535 Inst.getOperand(0).setReg(DstReg); 5536 5537 // Make sure we don't leave around a dead VGPR->SGPR copy. Normally 5538 // these are deleted later, but at -O0 it would leave a suspicious 5539 // looking illegal copy of an undef register. 5540 for (unsigned I = Inst.getNumOperands() - 1; I != 0; --I) 5541 Inst.RemoveOperand(I); 5542 Inst.setDesc(get(AMDGPU::IMPLICIT_DEF)); 5543 continue; 5544 } 5545 5546 NewDstReg = MRI.createVirtualRegister(NewDstRC); 5547 MRI.replaceRegWith(DstReg, NewDstReg); 5548 } 5549 5550 // Legalize the operands 5551 legalizeOperands(Inst, MDT); 5552 5553 if (HasDst) 5554 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); 5555 } 5556 } 5557 5558 // Add/sub require special handling to deal with carry outs. 5559 bool SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst, 5560 MachineDominatorTree *MDT) const { 5561 if (ST.hasAddNoCarry()) { 5562 // Assume there is no user of scc since we don't select this in that case. 5563 // Since scc isn't used, it doesn't really matter if the i32 or u32 variant 5564 // is used. 5565 5566 MachineBasicBlock &MBB = *Inst.getParent(); 5567 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5568 5569 Register OldDstReg = Inst.getOperand(0).getReg(); 5570 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5571 5572 unsigned Opc = Inst.getOpcode(); 5573 assert(Opc == AMDGPU::S_ADD_I32 || Opc == AMDGPU::S_SUB_I32); 5574 5575 unsigned NewOpc = Opc == AMDGPU::S_ADD_I32 ? 5576 AMDGPU::V_ADD_U32_e64 : AMDGPU::V_SUB_U32_e64; 5577 5578 assert(Inst.getOperand(3).getReg() == AMDGPU::SCC); 5579 Inst.RemoveOperand(3); 5580 5581 Inst.setDesc(get(NewOpc)); 5582 Inst.addOperand(MachineOperand::CreateImm(0)); // clamp bit 5583 Inst.addImplicitDefUseOperands(*MBB.getParent()); 5584 MRI.replaceRegWith(OldDstReg, ResultReg); 5585 legalizeOperands(Inst, MDT); 5586 5587 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5588 return true; 5589 } 5590 5591 return false; 5592 } 5593 5594 void SIInstrInfo::lowerSelect(SetVectorType &Worklist, MachineInstr &Inst, 5595 MachineDominatorTree *MDT) const { 5596 5597 MachineBasicBlock &MBB = *Inst.getParent(); 5598 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5599 MachineBasicBlock::iterator MII = Inst; 5600 DebugLoc DL = Inst.getDebugLoc(); 5601 5602 MachineOperand &Dest = Inst.getOperand(0); 5603 MachineOperand &Src0 = Inst.getOperand(1); 5604 MachineOperand &Src1 = Inst.getOperand(2); 5605 MachineOperand &Cond = Inst.getOperand(3); 5606 5607 Register SCCSource = Cond.getReg(); 5608 // Find SCC def, and if that is a copy (SCC = COPY reg) then use reg instead. 5609 if (!Cond.isUndef()) { 5610 for (MachineInstr &CandI : 5611 make_range(std::next(MachineBasicBlock::reverse_iterator(Inst)), 5612 Inst.getParent()->rend())) { 5613 if (CandI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != 5614 -1) { 5615 if (CandI.isCopy() && CandI.getOperand(0).getReg() == AMDGPU::SCC) { 5616 SCCSource = CandI.getOperand(1).getReg(); 5617 } 5618 break; 5619 } 5620 } 5621 } 5622 5623 // If this is a trivial select where the condition is effectively not SCC 5624 // (SCCSource is a source of copy to SCC), then the select is semantically 5625 // equivalent to copying SCCSource. Hence, there is no need to create 5626 // V_CNDMASK, we can just use that and bail out. 5627 if ((SCCSource != AMDGPU::SCC) && Src0.isImm() && (Src0.getImm() == -1) && 5628 Src1.isImm() && (Src1.getImm() == 0)) { 5629 MRI.replaceRegWith(Dest.getReg(), SCCSource); 5630 return; 5631 } 5632 5633 const TargetRegisterClass *TC = ST.getWavefrontSize() == 64 5634 ? &AMDGPU::SReg_64_XEXECRegClass 5635 : &AMDGPU::SReg_32_XM0_XEXECRegClass; 5636 Register CopySCC = MRI.createVirtualRegister(TC); 5637 5638 if (SCCSource == AMDGPU::SCC) { 5639 // Insert a trivial select instead of creating a copy, because a copy from 5640 // SCC would semantically mean just copying a single bit, but we may need 5641 // the result to be a vector condition mask that needs preserving. 5642 unsigned Opcode = (ST.getWavefrontSize() == 64) ? AMDGPU::S_CSELECT_B64 5643 : AMDGPU::S_CSELECT_B32; 5644 auto NewSelect = 5645 BuildMI(MBB, MII, DL, get(Opcode), CopySCC).addImm(-1).addImm(0); 5646 NewSelect->getOperand(3).setIsUndef(Cond.isUndef()); 5647 } else { 5648 BuildMI(MBB, MII, DL, get(AMDGPU::COPY), CopySCC).addReg(SCCSource); 5649 } 5650 5651 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5652 5653 auto UpdatedInst = 5654 BuildMI(MBB, MII, DL, get(AMDGPU::V_CNDMASK_B32_e64), ResultReg) 5655 .addImm(0) 5656 .add(Src1) // False 5657 .addImm(0) 5658 .add(Src0) // True 5659 .addReg(CopySCC); 5660 5661 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5662 legalizeOperands(*UpdatedInst, MDT); 5663 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5664 } 5665 5666 void SIInstrInfo::lowerScalarAbs(SetVectorType &Worklist, 5667 MachineInstr &Inst) const { 5668 MachineBasicBlock &MBB = *Inst.getParent(); 5669 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5670 MachineBasicBlock::iterator MII = Inst; 5671 DebugLoc DL = Inst.getDebugLoc(); 5672 5673 MachineOperand &Dest = Inst.getOperand(0); 5674 MachineOperand &Src = Inst.getOperand(1); 5675 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5676 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5677 5678 unsigned SubOp = ST.hasAddNoCarry() ? 5679 AMDGPU::V_SUB_U32_e32 : AMDGPU::V_SUB_CO_U32_e32; 5680 5681 BuildMI(MBB, MII, DL, get(SubOp), TmpReg) 5682 .addImm(0) 5683 .addReg(Src.getReg()); 5684 5685 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg) 5686 .addReg(Src.getReg()) 5687 .addReg(TmpReg); 5688 5689 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5690 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5691 } 5692 5693 void SIInstrInfo::lowerScalarXnor(SetVectorType &Worklist, 5694 MachineInstr &Inst) const { 5695 MachineBasicBlock &MBB = *Inst.getParent(); 5696 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5697 MachineBasicBlock::iterator MII = Inst; 5698 const DebugLoc &DL = Inst.getDebugLoc(); 5699 5700 MachineOperand &Dest = Inst.getOperand(0); 5701 MachineOperand &Src0 = Inst.getOperand(1); 5702 MachineOperand &Src1 = Inst.getOperand(2); 5703 5704 if (ST.hasDLInsts()) { 5705 Register NewDest = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5706 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src0, MRI, DL); 5707 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src1, MRI, DL); 5708 5709 BuildMI(MBB, MII, DL, get(AMDGPU::V_XNOR_B32_e64), NewDest) 5710 .add(Src0) 5711 .add(Src1); 5712 5713 MRI.replaceRegWith(Dest.getReg(), NewDest); 5714 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5715 } else { 5716 // Using the identity !(x ^ y) == (!x ^ y) == (x ^ !y), we can 5717 // invert either source and then perform the XOR. If either source is a 5718 // scalar register, then we can leave the inversion on the scalar unit to 5719 // acheive a better distrubution of scalar and vector instructions. 5720 bool Src0IsSGPR = Src0.isReg() && 5721 RI.isSGPRClass(MRI.getRegClass(Src0.getReg())); 5722 bool Src1IsSGPR = Src1.isReg() && 5723 RI.isSGPRClass(MRI.getRegClass(Src1.getReg())); 5724 MachineInstr *Xor; 5725 Register Temp = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5726 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5727 5728 // Build a pair of scalar instructions and add them to the work list. 5729 // The next iteration over the work list will lower these to the vector 5730 // unit as necessary. 5731 if (Src0IsSGPR) { 5732 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src0); 5733 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 5734 .addReg(Temp) 5735 .add(Src1); 5736 } else if (Src1IsSGPR) { 5737 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src1); 5738 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 5739 .add(Src0) 5740 .addReg(Temp); 5741 } else { 5742 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), Temp) 5743 .add(Src0) 5744 .add(Src1); 5745 MachineInstr *Not = 5746 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest).addReg(Temp); 5747 Worklist.insert(Not); 5748 } 5749 5750 MRI.replaceRegWith(Dest.getReg(), NewDest); 5751 5752 Worklist.insert(Xor); 5753 5754 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5755 } 5756 } 5757 5758 void SIInstrInfo::splitScalarNotBinop(SetVectorType &Worklist, 5759 MachineInstr &Inst, 5760 unsigned Opcode) const { 5761 MachineBasicBlock &MBB = *Inst.getParent(); 5762 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5763 MachineBasicBlock::iterator MII = Inst; 5764 const DebugLoc &DL = Inst.getDebugLoc(); 5765 5766 MachineOperand &Dest = Inst.getOperand(0); 5767 MachineOperand &Src0 = Inst.getOperand(1); 5768 MachineOperand &Src1 = Inst.getOperand(2); 5769 5770 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5771 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5772 5773 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), Interm) 5774 .add(Src0) 5775 .add(Src1); 5776 5777 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest) 5778 .addReg(Interm); 5779 5780 Worklist.insert(&Op); 5781 Worklist.insert(&Not); 5782 5783 MRI.replaceRegWith(Dest.getReg(), NewDest); 5784 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5785 } 5786 5787 void SIInstrInfo::splitScalarBinOpN2(SetVectorType& Worklist, 5788 MachineInstr &Inst, 5789 unsigned Opcode) const { 5790 MachineBasicBlock &MBB = *Inst.getParent(); 5791 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5792 MachineBasicBlock::iterator MII = Inst; 5793 const DebugLoc &DL = Inst.getDebugLoc(); 5794 5795 MachineOperand &Dest = Inst.getOperand(0); 5796 MachineOperand &Src0 = Inst.getOperand(1); 5797 MachineOperand &Src1 = Inst.getOperand(2); 5798 5799 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5800 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5801 5802 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Interm) 5803 .add(Src1); 5804 5805 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), NewDest) 5806 .add(Src0) 5807 .addReg(Interm); 5808 5809 Worklist.insert(&Not); 5810 Worklist.insert(&Op); 5811 5812 MRI.replaceRegWith(Dest.getReg(), NewDest); 5813 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5814 } 5815 5816 void SIInstrInfo::splitScalar64BitUnaryOp( 5817 SetVectorType &Worklist, MachineInstr &Inst, 5818 unsigned Opcode) const { 5819 MachineBasicBlock &MBB = *Inst.getParent(); 5820 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5821 5822 MachineOperand &Dest = Inst.getOperand(0); 5823 MachineOperand &Src0 = Inst.getOperand(1); 5824 DebugLoc DL = Inst.getDebugLoc(); 5825 5826 MachineBasicBlock::iterator MII = Inst; 5827 5828 const MCInstrDesc &InstDesc = get(Opcode); 5829 const TargetRegisterClass *Src0RC = Src0.isReg() ? 5830 MRI.getRegClass(Src0.getReg()) : 5831 &AMDGPU::SGPR_32RegClass; 5832 5833 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 5834 5835 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5836 AMDGPU::sub0, Src0SubRC); 5837 5838 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 5839 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 5840 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 5841 5842 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 5843 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0); 5844 5845 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5846 AMDGPU::sub1, Src0SubRC); 5847 5848 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 5849 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1); 5850 5851 Register FullDestReg = MRI.createVirtualRegister(NewDestRC); 5852 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 5853 .addReg(DestSub0) 5854 .addImm(AMDGPU::sub0) 5855 .addReg(DestSub1) 5856 .addImm(AMDGPU::sub1); 5857 5858 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 5859 5860 Worklist.insert(&LoHalf); 5861 Worklist.insert(&HiHalf); 5862 5863 // We don't need to legalizeOperands here because for a single operand, src0 5864 // will support any kind of input. 5865 5866 // Move all users of this moved value. 5867 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 5868 } 5869 5870 void SIInstrInfo::splitScalar64BitAddSub(SetVectorType &Worklist, 5871 MachineInstr &Inst, 5872 MachineDominatorTree *MDT) const { 5873 bool IsAdd = (Inst.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); 5874 5875 MachineBasicBlock &MBB = *Inst.getParent(); 5876 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5877 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5878 5879 Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5880 Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5881 Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5882 5883 Register CarryReg = MRI.createVirtualRegister(CarryRC); 5884 Register DeadCarryReg = MRI.createVirtualRegister(CarryRC); 5885 5886 MachineOperand &Dest = Inst.getOperand(0); 5887 MachineOperand &Src0 = Inst.getOperand(1); 5888 MachineOperand &Src1 = Inst.getOperand(2); 5889 const DebugLoc &DL = Inst.getDebugLoc(); 5890 MachineBasicBlock::iterator MII = Inst; 5891 5892 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg()); 5893 const TargetRegisterClass *Src1RC = MRI.getRegClass(Src1.getReg()); 5894 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 5895 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 5896 5897 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5898 AMDGPU::sub0, Src0SubRC); 5899 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 5900 AMDGPU::sub0, Src1SubRC); 5901 5902 5903 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5904 AMDGPU::sub1, Src0SubRC); 5905 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 5906 AMDGPU::sub1, Src1SubRC); 5907 5908 unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64; 5909 MachineInstr *LoHalf = 5910 BuildMI(MBB, MII, DL, get(LoOpc), DestSub0) 5911 .addReg(CarryReg, RegState::Define) 5912 .add(SrcReg0Sub0) 5913 .add(SrcReg1Sub0) 5914 .addImm(0); // clamp bit 5915 5916 unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; 5917 MachineInstr *HiHalf = 5918 BuildMI(MBB, MII, DL, get(HiOpc), DestSub1) 5919 .addReg(DeadCarryReg, RegState::Define | RegState::Dead) 5920 .add(SrcReg0Sub1) 5921 .add(SrcReg1Sub1) 5922 .addReg(CarryReg, RegState::Kill) 5923 .addImm(0); // clamp bit 5924 5925 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 5926 .addReg(DestSub0) 5927 .addImm(AMDGPU::sub0) 5928 .addReg(DestSub1) 5929 .addImm(AMDGPU::sub1); 5930 5931 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 5932 5933 // Try to legalize the operands in case we need to swap the order to keep it 5934 // valid. 5935 legalizeOperands(*LoHalf, MDT); 5936 legalizeOperands(*HiHalf, MDT); 5937 5938 // Move all users of this moved vlaue. 5939 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 5940 } 5941 5942 void SIInstrInfo::splitScalar64BitBinaryOp(SetVectorType &Worklist, 5943 MachineInstr &Inst, unsigned Opcode, 5944 MachineDominatorTree *MDT) const { 5945 MachineBasicBlock &MBB = *Inst.getParent(); 5946 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5947 5948 MachineOperand &Dest = Inst.getOperand(0); 5949 MachineOperand &Src0 = Inst.getOperand(1); 5950 MachineOperand &Src1 = Inst.getOperand(2); 5951 DebugLoc DL = Inst.getDebugLoc(); 5952 5953 MachineBasicBlock::iterator MII = Inst; 5954 5955 const MCInstrDesc &InstDesc = get(Opcode); 5956 const TargetRegisterClass *Src0RC = Src0.isReg() ? 5957 MRI.getRegClass(Src0.getReg()) : 5958 &AMDGPU::SGPR_32RegClass; 5959 5960 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 5961 const TargetRegisterClass *Src1RC = Src1.isReg() ? 5962 MRI.getRegClass(Src1.getReg()) : 5963 &AMDGPU::SGPR_32RegClass; 5964 5965 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 5966 5967 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5968 AMDGPU::sub0, Src0SubRC); 5969 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 5970 AMDGPU::sub0, Src1SubRC); 5971 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5972 AMDGPU::sub1, Src0SubRC); 5973 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 5974 AMDGPU::sub1, Src1SubRC); 5975 5976 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 5977 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 5978 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 5979 5980 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 5981 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0) 5982 .add(SrcReg0Sub0) 5983 .add(SrcReg1Sub0); 5984 5985 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 5986 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1) 5987 .add(SrcReg0Sub1) 5988 .add(SrcReg1Sub1); 5989 5990 Register FullDestReg = MRI.createVirtualRegister(NewDestRC); 5991 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 5992 .addReg(DestSub0) 5993 .addImm(AMDGPU::sub0) 5994 .addReg(DestSub1) 5995 .addImm(AMDGPU::sub1); 5996 5997 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 5998 5999 Worklist.insert(&LoHalf); 6000 Worklist.insert(&HiHalf); 6001 6002 // Move all users of this moved vlaue. 6003 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 6004 } 6005 6006 void SIInstrInfo::splitScalar64BitXnor(SetVectorType &Worklist, 6007 MachineInstr &Inst, 6008 MachineDominatorTree *MDT) const { 6009 MachineBasicBlock &MBB = *Inst.getParent(); 6010 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6011 6012 MachineOperand &Dest = Inst.getOperand(0); 6013 MachineOperand &Src0 = Inst.getOperand(1); 6014 MachineOperand &Src1 = Inst.getOperand(2); 6015 const DebugLoc &DL = Inst.getDebugLoc(); 6016 6017 MachineBasicBlock::iterator MII = Inst; 6018 6019 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 6020 6021 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 6022 6023 MachineOperand* Op0; 6024 MachineOperand* Op1; 6025 6026 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) { 6027 Op0 = &Src0; 6028 Op1 = &Src1; 6029 } else { 6030 Op0 = &Src1; 6031 Op1 = &Src0; 6032 } 6033 6034 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B64), Interm) 6035 .add(*Op0); 6036 6037 Register NewDest = MRI.createVirtualRegister(DestRC); 6038 6039 MachineInstr &Xor = *BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B64), NewDest) 6040 .addReg(Interm) 6041 .add(*Op1); 6042 6043 MRI.replaceRegWith(Dest.getReg(), NewDest); 6044 6045 Worklist.insert(&Xor); 6046 } 6047 6048 void SIInstrInfo::splitScalar64BitBCNT( 6049 SetVectorType &Worklist, MachineInstr &Inst) const { 6050 MachineBasicBlock &MBB = *Inst.getParent(); 6051 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6052 6053 MachineBasicBlock::iterator MII = Inst; 6054 const DebugLoc &DL = Inst.getDebugLoc(); 6055 6056 MachineOperand &Dest = Inst.getOperand(0); 6057 MachineOperand &Src = Inst.getOperand(1); 6058 6059 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64); 6060 const TargetRegisterClass *SrcRC = Src.isReg() ? 6061 MRI.getRegClass(Src.getReg()) : 6062 &AMDGPU::SGPR_32RegClass; 6063 6064 Register MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6065 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6066 6067 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); 6068 6069 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 6070 AMDGPU::sub0, SrcSubRC); 6071 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 6072 AMDGPU::sub1, SrcSubRC); 6073 6074 BuildMI(MBB, MII, DL, InstDesc, MidReg).add(SrcRegSub0).addImm(0); 6075 6076 BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg); 6077 6078 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6079 6080 // We don't need to legalize operands here. src0 for etiher instruction can be 6081 // an SGPR, and the second input is unused or determined here. 6082 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6083 } 6084 6085 void SIInstrInfo::splitScalar64BitBFE(SetVectorType &Worklist, 6086 MachineInstr &Inst) const { 6087 MachineBasicBlock &MBB = *Inst.getParent(); 6088 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6089 MachineBasicBlock::iterator MII = Inst; 6090 const DebugLoc &DL = Inst.getDebugLoc(); 6091 6092 MachineOperand &Dest = Inst.getOperand(0); 6093 uint32_t Imm = Inst.getOperand(2).getImm(); 6094 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 6095 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 6096 6097 (void) Offset; 6098 6099 // Only sext_inreg cases handled. 6100 assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 && 6101 Offset == 0 && "Not implemented"); 6102 6103 if (BitWidth < 32) { 6104 Register MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6105 Register MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6106 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 6107 6108 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo) 6109 .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0) 6110 .addImm(0) 6111 .addImm(BitWidth); 6112 6113 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi) 6114 .addImm(31) 6115 .addReg(MidRegLo); 6116 6117 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 6118 .addReg(MidRegLo) 6119 .addImm(AMDGPU::sub0) 6120 .addReg(MidRegHi) 6121 .addImm(AMDGPU::sub1); 6122 6123 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6124 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6125 return; 6126 } 6127 6128 MachineOperand &Src = Inst.getOperand(1); 6129 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6130 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 6131 6132 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg) 6133 .addImm(31) 6134 .addReg(Src.getReg(), 0, AMDGPU::sub0); 6135 6136 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 6137 .addReg(Src.getReg(), 0, AMDGPU::sub0) 6138 .addImm(AMDGPU::sub0) 6139 .addReg(TmpReg) 6140 .addImm(AMDGPU::sub1); 6141 6142 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6143 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6144 } 6145 6146 void SIInstrInfo::addUsersToMoveToVALUWorklist( 6147 Register DstReg, 6148 MachineRegisterInfo &MRI, 6149 SetVectorType &Worklist) const { 6150 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg), 6151 E = MRI.use_end(); I != E;) { 6152 MachineInstr &UseMI = *I->getParent(); 6153 6154 unsigned OpNo = 0; 6155 6156 switch (UseMI.getOpcode()) { 6157 case AMDGPU::COPY: 6158 case AMDGPU::WQM: 6159 case AMDGPU::SOFT_WQM: 6160 case AMDGPU::WWM: 6161 case AMDGPU::REG_SEQUENCE: 6162 case AMDGPU::PHI: 6163 case AMDGPU::INSERT_SUBREG: 6164 break; 6165 default: 6166 OpNo = I.getOperandNo(); 6167 break; 6168 } 6169 6170 if (!RI.hasVectorRegisters(getOpRegClass(UseMI, OpNo))) { 6171 Worklist.insert(&UseMI); 6172 6173 do { 6174 ++I; 6175 } while (I != E && I->getParent() == &UseMI); 6176 } else { 6177 ++I; 6178 } 6179 } 6180 } 6181 6182 void SIInstrInfo::movePackToVALU(SetVectorType &Worklist, 6183 MachineRegisterInfo &MRI, 6184 MachineInstr &Inst) const { 6185 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6186 MachineBasicBlock *MBB = Inst.getParent(); 6187 MachineOperand &Src0 = Inst.getOperand(1); 6188 MachineOperand &Src1 = Inst.getOperand(2); 6189 const DebugLoc &DL = Inst.getDebugLoc(); 6190 6191 switch (Inst.getOpcode()) { 6192 case AMDGPU::S_PACK_LL_B32_B16: { 6193 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6194 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6195 6196 // FIXME: Can do a lot better if we know the high bits of src0 or src1 are 6197 // 0. 6198 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 6199 .addImm(0xffff); 6200 6201 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_B32_e64), TmpReg) 6202 .addReg(ImmReg, RegState::Kill) 6203 .add(Src0); 6204 6205 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHL_OR_B32), ResultReg) 6206 .add(Src1) 6207 .addImm(16) 6208 .addReg(TmpReg, RegState::Kill); 6209 break; 6210 } 6211 case AMDGPU::S_PACK_LH_B32_B16: { 6212 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6213 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 6214 .addImm(0xffff); 6215 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_BFI_B32), ResultReg) 6216 .addReg(ImmReg, RegState::Kill) 6217 .add(Src0) 6218 .add(Src1); 6219 break; 6220 } 6221 case AMDGPU::S_PACK_HH_B32_B16: { 6222 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6223 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6224 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg) 6225 .addImm(16) 6226 .add(Src0); 6227 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 6228 .addImm(0xffff0000); 6229 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_OR_B32), ResultReg) 6230 .add(Src1) 6231 .addReg(ImmReg, RegState::Kill) 6232 .addReg(TmpReg, RegState::Kill); 6233 break; 6234 } 6235 default: 6236 llvm_unreachable("unhandled s_pack_* instruction"); 6237 } 6238 6239 MachineOperand &Dest = Inst.getOperand(0); 6240 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6241 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6242 } 6243 6244 void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op, 6245 MachineInstr &SCCDefInst, 6246 SetVectorType &Worklist) const { 6247 bool SCCUsedImplicitly = false; 6248 6249 // Ensure that def inst defines SCC, which is still live. 6250 assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isDef() && 6251 !Op.isDead() && Op.getParent() == &SCCDefInst); 6252 SmallVector<MachineInstr *, 4> CopyToDelete; 6253 // This assumes that all the users of SCC are in the same block 6254 // as the SCC def. 6255 for (MachineInstr &MI : // Skip the def inst itself. 6256 make_range(std::next(MachineBasicBlock::iterator(SCCDefInst)), 6257 SCCDefInst.getParent()->end())) { 6258 // Check if SCC is used first. 6259 if (MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI) != -1) { 6260 if (MI.isCopy()) { 6261 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 6262 unsigned DestReg = MI.getOperand(0).getReg(); 6263 6264 for (auto &User : MRI.use_nodbg_instructions(DestReg)) { 6265 if ((User.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) || 6266 (User.getOpcode() == AMDGPU::S_SUB_CO_PSEUDO)) { 6267 User.getOperand(4).setReg(RI.getVCC()); 6268 Worklist.insert(&User); 6269 } else if (User.getOpcode() == AMDGPU::V_CNDMASK_B32_e64) { 6270 User.getOperand(5).setReg(RI.getVCC()); 6271 // No need to add to Worklist. 6272 } 6273 } 6274 CopyToDelete.push_back(&MI); 6275 } else { 6276 if (MI.getOpcode() == AMDGPU::S_CSELECT_B32 || 6277 MI.getOpcode() == AMDGPU::S_CSELECT_B64) { 6278 // This is an implicit use of SCC and it is really expected by 6279 // the SCC users to handle. 6280 // We cannot preserve the edge to the user so add the explicit 6281 // copy: SCC = COPY VCC. 6282 // The copy will be cleaned up during the processing of the user 6283 // in lowerSelect. 6284 SCCUsedImplicitly = true; 6285 } 6286 6287 Worklist.insert(&MI); 6288 } 6289 } 6290 // Exit if we find another SCC def. 6291 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != -1) 6292 break; 6293 } 6294 for (auto &Copy : CopyToDelete) 6295 Copy->eraseFromParent(); 6296 6297 if (SCCUsedImplicitly) { 6298 BuildMI(*SCCDefInst.getParent(), std::next(SCCDefInst.getIterator()), 6299 SCCDefInst.getDebugLoc(), get(AMDGPU::COPY), AMDGPU::SCC) 6300 .addReg(RI.getVCC()); 6301 } 6302 } 6303 6304 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass( 6305 const MachineInstr &Inst) const { 6306 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0); 6307 6308 switch (Inst.getOpcode()) { 6309 // For target instructions, getOpRegClass just returns the virtual register 6310 // class associated with the operand, so we need to find an equivalent VGPR 6311 // register class in order to move the instruction to the VALU. 6312 case AMDGPU::COPY: 6313 case AMDGPU::PHI: 6314 case AMDGPU::REG_SEQUENCE: 6315 case AMDGPU::INSERT_SUBREG: 6316 case AMDGPU::WQM: 6317 case AMDGPU::SOFT_WQM: 6318 case AMDGPU::WWM: { 6319 const TargetRegisterClass *SrcRC = getOpRegClass(Inst, 1); 6320 if (RI.hasAGPRs(SrcRC)) { 6321 if (RI.hasAGPRs(NewDstRC)) 6322 return nullptr; 6323 6324 switch (Inst.getOpcode()) { 6325 case AMDGPU::PHI: 6326 case AMDGPU::REG_SEQUENCE: 6327 case AMDGPU::INSERT_SUBREG: 6328 NewDstRC = RI.getEquivalentAGPRClass(NewDstRC); 6329 break; 6330 default: 6331 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 6332 } 6333 6334 if (!NewDstRC) 6335 return nullptr; 6336 } else { 6337 if (RI.hasVGPRs(NewDstRC) || NewDstRC == &AMDGPU::VReg_1RegClass) 6338 return nullptr; 6339 6340 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 6341 if (!NewDstRC) 6342 return nullptr; 6343 } 6344 6345 return NewDstRC; 6346 } 6347 default: 6348 return NewDstRC; 6349 } 6350 } 6351 6352 // Find the one SGPR operand we are allowed to use. 6353 Register SIInstrInfo::findUsedSGPR(const MachineInstr &MI, 6354 int OpIndices[3]) const { 6355 const MCInstrDesc &Desc = MI.getDesc(); 6356 6357 // Find the one SGPR operand we are allowed to use. 6358 // 6359 // First we need to consider the instruction's operand requirements before 6360 // legalizing. Some operands are required to be SGPRs, such as implicit uses 6361 // of VCC, but we are still bound by the constant bus requirement to only use 6362 // one. 6363 // 6364 // If the operand's class is an SGPR, we can never move it. 6365 6366 Register SGPRReg = findImplicitSGPRRead(MI); 6367 if (SGPRReg != AMDGPU::NoRegister) 6368 return SGPRReg; 6369 6370 Register UsedSGPRs[3] = { AMDGPU::NoRegister }; 6371 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 6372 6373 for (unsigned i = 0; i < 3; ++i) { 6374 int Idx = OpIndices[i]; 6375 if (Idx == -1) 6376 break; 6377 6378 const MachineOperand &MO = MI.getOperand(Idx); 6379 if (!MO.isReg()) 6380 continue; 6381 6382 // Is this operand statically required to be an SGPR based on the operand 6383 // constraints? 6384 const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); 6385 bool IsRequiredSGPR = RI.isSGPRClass(OpRC); 6386 if (IsRequiredSGPR) 6387 return MO.getReg(); 6388 6389 // If this could be a VGPR or an SGPR, Check the dynamic register class. 6390 Register Reg = MO.getReg(); 6391 const TargetRegisterClass *RegRC = MRI.getRegClass(Reg); 6392 if (RI.isSGPRClass(RegRC)) 6393 UsedSGPRs[i] = Reg; 6394 } 6395 6396 // We don't have a required SGPR operand, so we have a bit more freedom in 6397 // selecting operands to move. 6398 6399 // Try to select the most used SGPR. If an SGPR is equal to one of the 6400 // others, we choose that. 6401 // 6402 // e.g. 6403 // V_FMA_F32 v0, s0, s0, s0 -> No moves 6404 // V_FMA_F32 v0, s0, s1, s0 -> Move s1 6405 6406 // TODO: If some of the operands are 64-bit SGPRs and some 32, we should 6407 // prefer those. 6408 6409 if (UsedSGPRs[0] != AMDGPU::NoRegister) { 6410 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2]) 6411 SGPRReg = UsedSGPRs[0]; 6412 } 6413 6414 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) { 6415 if (UsedSGPRs[1] == UsedSGPRs[2]) 6416 SGPRReg = UsedSGPRs[1]; 6417 } 6418 6419 return SGPRReg; 6420 } 6421 6422 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI, 6423 unsigned OperandName) const { 6424 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName); 6425 if (Idx == -1) 6426 return nullptr; 6427 6428 return &MI.getOperand(Idx); 6429 } 6430 6431 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const { 6432 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 6433 return (22ULL << 44) | // IMG_FORMAT_32_FLOAT 6434 (1ULL << 56) | // RESOURCE_LEVEL = 1 6435 (3ULL << 60); // OOB_SELECT = 3 6436 } 6437 6438 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT; 6439 if (ST.isAmdHsaOS()) { 6440 // Set ATC = 1. GFX9 doesn't have this bit. 6441 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) 6442 RsrcDataFormat |= (1ULL << 56); 6443 6444 // Set MTYPE = 2 (MTYPE_UC = uncached). GFX9 doesn't have this. 6445 // BTW, it disables TC L2 and therefore decreases performance. 6446 if (ST.getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS) 6447 RsrcDataFormat |= (2ULL << 59); 6448 } 6449 6450 return RsrcDataFormat; 6451 } 6452 6453 uint64_t SIInstrInfo::getScratchRsrcWords23() const { 6454 uint64_t Rsrc23 = getDefaultRsrcDataFormat() | 6455 AMDGPU::RSRC_TID_ENABLE | 6456 0xffffffff; // Size; 6457 6458 // GFX9 doesn't have ELEMENT_SIZE. 6459 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 6460 uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize()) - 1; 6461 Rsrc23 |= EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT; 6462 } 6463 6464 // IndexStride = 64 / 32. 6465 uint64_t IndexStride = ST.getWavefrontSize() == 64 ? 3 : 2; 6466 Rsrc23 |= IndexStride << AMDGPU::RSRC_INDEX_STRIDE_SHIFT; 6467 6468 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17]. 6469 // Clear them unless we want a huge stride. 6470 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS && 6471 ST.getGeneration() <= AMDGPUSubtarget::GFX9) 6472 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT; 6473 6474 return Rsrc23; 6475 } 6476 6477 bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const { 6478 unsigned Opc = MI.getOpcode(); 6479 6480 return isSMRD(Opc); 6481 } 6482 6483 bool SIInstrInfo::isHighLatencyDef(int Opc) const { 6484 return get(Opc).mayLoad() && 6485 (isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc) || isFLAT(Opc)); 6486 } 6487 6488 unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI, 6489 int &FrameIndex) const { 6490 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 6491 if (!Addr || !Addr->isFI()) 6492 return AMDGPU::NoRegister; 6493 6494 assert(!MI.memoperands_empty() && 6495 (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS); 6496 6497 FrameIndex = Addr->getIndex(); 6498 return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg(); 6499 } 6500 6501 unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI, 6502 int &FrameIndex) const { 6503 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr); 6504 assert(Addr && Addr->isFI()); 6505 FrameIndex = Addr->getIndex(); 6506 return getNamedOperand(MI, AMDGPU::OpName::data)->getReg(); 6507 } 6508 6509 unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 6510 int &FrameIndex) const { 6511 if (!MI.mayLoad()) 6512 return AMDGPU::NoRegister; 6513 6514 if (isMUBUF(MI) || isVGPRSpill(MI)) 6515 return isStackAccess(MI, FrameIndex); 6516 6517 if (isSGPRSpill(MI)) 6518 return isSGPRStackAccess(MI, FrameIndex); 6519 6520 return AMDGPU::NoRegister; 6521 } 6522 6523 unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 6524 int &FrameIndex) const { 6525 if (!MI.mayStore()) 6526 return AMDGPU::NoRegister; 6527 6528 if (isMUBUF(MI) || isVGPRSpill(MI)) 6529 return isStackAccess(MI, FrameIndex); 6530 6531 if (isSGPRSpill(MI)) 6532 return isSGPRStackAccess(MI, FrameIndex); 6533 6534 return AMDGPU::NoRegister; 6535 } 6536 6537 unsigned SIInstrInfo::getInstBundleSize(const MachineInstr &MI) const { 6538 unsigned Size = 0; 6539 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 6540 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 6541 while (++I != E && I->isInsideBundle()) { 6542 assert(!I->isBundle() && "No nested bundle!"); 6543 Size += getInstSizeInBytes(*I); 6544 } 6545 6546 return Size; 6547 } 6548 6549 unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { 6550 unsigned Opc = MI.getOpcode(); 6551 const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc); 6552 unsigned DescSize = Desc.getSize(); 6553 6554 // If we have a definitive size, we can use it. Otherwise we need to inspect 6555 // the operands to know the size. 6556 if (isFixedSize(MI)) 6557 return DescSize; 6558 6559 // 4-byte instructions may have a 32-bit literal encoded after them. Check 6560 // operands that coud ever be literals. 6561 if (isVALU(MI) || isSALU(MI)) { 6562 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 6563 if (Src0Idx == -1) 6564 return DescSize; // No operands. 6565 6566 if (isLiteralConstantLike(MI.getOperand(Src0Idx), Desc.OpInfo[Src0Idx])) 6567 return isVOP3(MI) ? 12 : (DescSize + 4); 6568 6569 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 6570 if (Src1Idx == -1) 6571 return DescSize; 6572 6573 if (isLiteralConstantLike(MI.getOperand(Src1Idx), Desc.OpInfo[Src1Idx])) 6574 return isVOP3(MI) ? 12 : (DescSize + 4); 6575 6576 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 6577 if (Src2Idx == -1) 6578 return DescSize; 6579 6580 if (isLiteralConstantLike(MI.getOperand(Src2Idx), Desc.OpInfo[Src2Idx])) 6581 return isVOP3(MI) ? 12 : (DescSize + 4); 6582 6583 return DescSize; 6584 } 6585 6586 // Check whether we have extra NSA words. 6587 if (isMIMG(MI)) { 6588 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); 6589 if (VAddr0Idx < 0) 6590 return 8; 6591 6592 int RSrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); 6593 return 8 + 4 * ((RSrcIdx - VAddr0Idx + 2) / 4); 6594 } 6595 6596 switch (Opc) { 6597 case TargetOpcode::IMPLICIT_DEF: 6598 case TargetOpcode::KILL: 6599 case TargetOpcode::DBG_VALUE: 6600 case TargetOpcode::EH_LABEL: 6601 return 0; 6602 case TargetOpcode::BUNDLE: 6603 return getInstBundleSize(MI); 6604 case TargetOpcode::INLINEASM: 6605 case TargetOpcode::INLINEASM_BR: { 6606 const MachineFunction *MF = MI.getParent()->getParent(); 6607 const char *AsmStr = MI.getOperand(0).getSymbolName(); 6608 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo(), 6609 &MF->getSubtarget()); 6610 } 6611 default: 6612 return DescSize; 6613 } 6614 } 6615 6616 bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const { 6617 if (!isFLAT(MI)) 6618 return false; 6619 6620 if (MI.memoperands_empty()) 6621 return true; 6622 6623 for (const MachineMemOperand *MMO : MI.memoperands()) { 6624 if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS) 6625 return true; 6626 } 6627 return false; 6628 } 6629 6630 bool SIInstrInfo::isNonUniformBranchInstr(MachineInstr &Branch) const { 6631 return Branch.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO; 6632 } 6633 6634 void SIInstrInfo::convertNonUniformIfRegion(MachineBasicBlock *IfEntry, 6635 MachineBasicBlock *IfEnd) const { 6636 MachineBasicBlock::iterator TI = IfEntry->getFirstTerminator(); 6637 assert(TI != IfEntry->end()); 6638 6639 MachineInstr *Branch = &(*TI); 6640 MachineFunction *MF = IfEntry->getParent(); 6641 MachineRegisterInfo &MRI = IfEntry->getParent()->getRegInfo(); 6642 6643 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 6644 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 6645 MachineInstr *SIIF = 6646 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_IF), DstReg) 6647 .add(Branch->getOperand(0)) 6648 .add(Branch->getOperand(1)); 6649 MachineInstr *SIEND = 6650 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_END_CF)) 6651 .addReg(DstReg); 6652 6653 IfEntry->erase(TI); 6654 IfEntry->insert(IfEntry->end(), SIIF); 6655 IfEnd->insert(IfEnd->getFirstNonPHI(), SIEND); 6656 } 6657 } 6658 6659 void SIInstrInfo::convertNonUniformLoopRegion( 6660 MachineBasicBlock *LoopEntry, MachineBasicBlock *LoopEnd) const { 6661 MachineBasicBlock::iterator TI = LoopEnd->getFirstTerminator(); 6662 // We expect 2 terminators, one conditional and one unconditional. 6663 assert(TI != LoopEnd->end()); 6664 6665 MachineInstr *Branch = &(*TI); 6666 MachineFunction *MF = LoopEnd->getParent(); 6667 MachineRegisterInfo &MRI = LoopEnd->getParent()->getRegInfo(); 6668 6669 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 6670 6671 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 6672 Register BackEdgeReg = MRI.createVirtualRegister(RI.getBoolRC()); 6673 MachineInstrBuilder HeaderPHIBuilder = 6674 BuildMI(*(MF), Branch->getDebugLoc(), get(TargetOpcode::PHI), DstReg); 6675 for (MachineBasicBlock::pred_iterator PI = LoopEntry->pred_begin(), 6676 E = LoopEntry->pred_end(); 6677 PI != E; ++PI) { 6678 if (*PI == LoopEnd) { 6679 HeaderPHIBuilder.addReg(BackEdgeReg); 6680 } else { 6681 MachineBasicBlock *PMBB = *PI; 6682 Register ZeroReg = MRI.createVirtualRegister(RI.getBoolRC()); 6683 materializeImmediate(*PMBB, PMBB->getFirstTerminator(), DebugLoc(), 6684 ZeroReg, 0); 6685 HeaderPHIBuilder.addReg(ZeroReg); 6686 } 6687 HeaderPHIBuilder.addMBB(*PI); 6688 } 6689 MachineInstr *HeaderPhi = HeaderPHIBuilder; 6690 MachineInstr *SIIFBREAK = BuildMI(*(MF), Branch->getDebugLoc(), 6691 get(AMDGPU::SI_IF_BREAK), BackEdgeReg) 6692 .addReg(DstReg) 6693 .add(Branch->getOperand(0)); 6694 MachineInstr *SILOOP = 6695 BuildMI(*(MF), Branch->getDebugLoc(), get(AMDGPU::SI_LOOP)) 6696 .addReg(BackEdgeReg) 6697 .addMBB(LoopEntry); 6698 6699 LoopEntry->insert(LoopEntry->begin(), HeaderPhi); 6700 LoopEnd->erase(TI); 6701 LoopEnd->insert(LoopEnd->end(), SIIFBREAK); 6702 LoopEnd->insert(LoopEnd->end(), SILOOP); 6703 } 6704 } 6705 6706 ArrayRef<std::pair<int, const char *>> 6707 SIInstrInfo::getSerializableTargetIndices() const { 6708 static const std::pair<int, const char *> TargetIndices[] = { 6709 {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"}, 6710 {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"}, 6711 {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"}, 6712 {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"}, 6713 {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}}; 6714 return makeArrayRef(TargetIndices); 6715 } 6716 6717 /// This is used by the post-RA scheduler (SchedulePostRAList.cpp). The 6718 /// post-RA version of misched uses CreateTargetMIHazardRecognizer. 6719 ScheduleHazardRecognizer * 6720 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 6721 const ScheduleDAG *DAG) const { 6722 return new GCNHazardRecognizer(DAG->MF); 6723 } 6724 6725 /// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer 6726 /// pass. 6727 ScheduleHazardRecognizer * 6728 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const { 6729 return new GCNHazardRecognizer(MF); 6730 } 6731 6732 std::pair<unsigned, unsigned> 6733 SIInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { 6734 return std::make_pair(TF & MO_MASK, TF & ~MO_MASK); 6735 } 6736 6737 ArrayRef<std::pair<unsigned, const char *>> 6738 SIInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { 6739 static const std::pair<unsigned, const char *> TargetFlags[] = { 6740 { MO_GOTPCREL, "amdgpu-gotprel" }, 6741 { MO_GOTPCREL32_LO, "amdgpu-gotprel32-lo" }, 6742 { MO_GOTPCREL32_HI, "amdgpu-gotprel32-hi" }, 6743 { MO_REL32_LO, "amdgpu-rel32-lo" }, 6744 { MO_REL32_HI, "amdgpu-rel32-hi" }, 6745 { MO_ABS32_LO, "amdgpu-abs32-lo" }, 6746 { MO_ABS32_HI, "amdgpu-abs32-hi" }, 6747 }; 6748 6749 return makeArrayRef(TargetFlags); 6750 } 6751 6752 bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const { 6753 return !MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY && 6754 MI.modifiesRegister(AMDGPU::EXEC, &RI); 6755 } 6756 6757 MachineInstrBuilder 6758 SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 6759 MachineBasicBlock::iterator I, 6760 const DebugLoc &DL, 6761 Register DestReg) const { 6762 if (ST.hasAddNoCarry()) 6763 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg); 6764 6765 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6766 Register UnusedCarry = MRI.createVirtualRegister(RI.getBoolRC()); 6767 MRI.setRegAllocationHint(UnusedCarry, 0, RI.getVCC()); 6768 6769 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_CO_U32_e64), DestReg) 6770 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 6771 } 6772 6773 MachineInstrBuilder SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 6774 MachineBasicBlock::iterator I, 6775 const DebugLoc &DL, 6776 Register DestReg, 6777 RegScavenger &RS) const { 6778 if (ST.hasAddNoCarry()) 6779 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e32), DestReg); 6780 6781 // If available, prefer to use vcc. 6782 Register UnusedCarry = !RS.isRegUsed(AMDGPU::VCC) 6783 ? Register(RI.getVCC()) 6784 : RS.scavengeRegister(RI.getBoolRC(), I, 0, false); 6785 6786 // TODO: Users need to deal with this. 6787 if (!UnusedCarry.isValid()) 6788 return MachineInstrBuilder(); 6789 6790 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_CO_U32_e64), DestReg) 6791 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 6792 } 6793 6794 bool SIInstrInfo::isKillTerminator(unsigned Opcode) { 6795 switch (Opcode) { 6796 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 6797 case AMDGPU::SI_KILL_I1_TERMINATOR: 6798 return true; 6799 default: 6800 return false; 6801 } 6802 } 6803 6804 const MCInstrDesc &SIInstrInfo::getKillTerminatorFromPseudo(unsigned Opcode) const { 6805 switch (Opcode) { 6806 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: 6807 return get(AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR); 6808 case AMDGPU::SI_KILL_I1_PSEUDO: 6809 return get(AMDGPU::SI_KILL_I1_TERMINATOR); 6810 default: 6811 llvm_unreachable("invalid opcode, expected SI_KILL_*_PSEUDO"); 6812 } 6813 } 6814 6815 void SIInstrInfo::fixImplicitOperands(MachineInstr &MI) const { 6816 MachineBasicBlock *MBB = MI.getParent(); 6817 MachineFunction *MF = MBB->getParent(); 6818 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 6819 6820 if (!ST.isWave32()) 6821 return; 6822 6823 for (auto &Op : MI.implicit_operands()) { 6824 if (Op.isReg() && Op.getReg() == AMDGPU::VCC) 6825 Op.setReg(AMDGPU::VCC_LO); 6826 } 6827 } 6828 6829 bool SIInstrInfo::isBufferSMRD(const MachineInstr &MI) const { 6830 if (!isSMRD(MI)) 6831 return false; 6832 6833 // Check that it is using a buffer resource. 6834 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sbase); 6835 if (Idx == -1) // e.g. s_memtime 6836 return false; 6837 6838 const auto RCID = MI.getDesc().OpInfo[Idx].RegClass; 6839 return RI.getRegClass(RCID)->hasSubClassEq(&AMDGPU::SGPR_128RegClass); 6840 } 6841 6842 unsigned SIInstrInfo::getNumFlatOffsetBits(unsigned AddrSpace, 6843 bool Signed) const { 6844 if (!ST.hasFlatInstOffsets()) 6845 return 0; 6846 6847 if (ST.hasFlatSegmentOffsetBug() && AddrSpace == AMDGPUAS::FLAT_ADDRESS) 6848 return 0; 6849 6850 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) 6851 return Signed ? 12 : 11; 6852 6853 return Signed ? 13 : 12; 6854 } 6855 6856 bool SIInstrInfo::isLegalFLATOffset(int64_t Offset, unsigned AddrSpace, 6857 bool Signed) const { 6858 // TODO: Should 0 be special cased? 6859 if (!ST.hasFlatInstOffsets()) 6860 return false; 6861 6862 if (ST.hasFlatSegmentOffsetBug() && AddrSpace == AMDGPUAS::FLAT_ADDRESS) 6863 return false; 6864 6865 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 6866 return (Signed && isInt<12>(Offset)) || 6867 (!Signed && isUInt<11>(Offset)); 6868 } 6869 6870 return (Signed && isInt<13>(Offset)) || 6871 (!Signed && isUInt<12>(Offset)); 6872 } 6873 6874 6875 // This must be kept in sync with the SIEncodingFamily class in SIInstrInfo.td 6876 enum SIEncodingFamily { 6877 SI = 0, 6878 VI = 1, 6879 SDWA = 2, 6880 SDWA9 = 3, 6881 GFX80 = 4, 6882 GFX9 = 5, 6883 GFX10 = 6, 6884 SDWA10 = 7 6885 }; 6886 6887 static SIEncodingFamily subtargetEncodingFamily(const GCNSubtarget &ST) { 6888 switch (ST.getGeneration()) { 6889 default: 6890 break; 6891 case AMDGPUSubtarget::SOUTHERN_ISLANDS: 6892 case AMDGPUSubtarget::SEA_ISLANDS: 6893 return SIEncodingFamily::SI; 6894 case AMDGPUSubtarget::VOLCANIC_ISLANDS: 6895 case AMDGPUSubtarget::GFX9: 6896 return SIEncodingFamily::VI; 6897 case AMDGPUSubtarget::GFX10: 6898 return SIEncodingFamily::GFX10; 6899 } 6900 llvm_unreachable("Unknown subtarget generation!"); 6901 } 6902 6903 bool SIInstrInfo::isAsmOnlyOpcode(int MCOp) const { 6904 switch(MCOp) { 6905 // These opcodes use indirect register addressing so 6906 // they need special handling by codegen (currently missing). 6907 // Therefore it is too risky to allow these opcodes 6908 // to be selected by dpp combiner or sdwa peepholer. 6909 case AMDGPU::V_MOVRELS_B32_dpp_gfx10: 6910 case AMDGPU::V_MOVRELS_B32_sdwa_gfx10: 6911 case AMDGPU::V_MOVRELD_B32_dpp_gfx10: 6912 case AMDGPU::V_MOVRELD_B32_sdwa_gfx10: 6913 case AMDGPU::V_MOVRELSD_B32_dpp_gfx10: 6914 case AMDGPU::V_MOVRELSD_B32_sdwa_gfx10: 6915 case AMDGPU::V_MOVRELSD_2_B32_dpp_gfx10: 6916 case AMDGPU::V_MOVRELSD_2_B32_sdwa_gfx10: 6917 return true; 6918 default: 6919 return false; 6920 } 6921 } 6922 6923 int SIInstrInfo::pseudoToMCOpcode(int Opcode) const { 6924 SIEncodingFamily Gen = subtargetEncodingFamily(ST); 6925 6926 if ((get(Opcode).TSFlags & SIInstrFlags::renamedInGFX9) != 0 && 6927 ST.getGeneration() == AMDGPUSubtarget::GFX9) 6928 Gen = SIEncodingFamily::GFX9; 6929 6930 // Adjust the encoding family to GFX80 for D16 buffer instructions when the 6931 // subtarget has UnpackedD16VMem feature. 6932 // TODO: remove this when we discard GFX80 encoding. 6933 if (ST.hasUnpackedD16VMem() && (get(Opcode).TSFlags & SIInstrFlags::D16Buf)) 6934 Gen = SIEncodingFamily::GFX80; 6935 6936 if (get(Opcode).TSFlags & SIInstrFlags::SDWA) { 6937 switch (ST.getGeneration()) { 6938 default: 6939 Gen = SIEncodingFamily::SDWA; 6940 break; 6941 case AMDGPUSubtarget::GFX9: 6942 Gen = SIEncodingFamily::SDWA9; 6943 break; 6944 case AMDGPUSubtarget::GFX10: 6945 Gen = SIEncodingFamily::SDWA10; 6946 break; 6947 } 6948 } 6949 6950 int MCOp = AMDGPU::getMCOpcode(Opcode, Gen); 6951 6952 // -1 means that Opcode is already a native instruction. 6953 if (MCOp == -1) 6954 return Opcode; 6955 6956 // (uint16_t)-1 means that Opcode is a pseudo instruction that has 6957 // no encoding in the given subtarget generation. 6958 if (MCOp == (uint16_t)-1) 6959 return -1; 6960 6961 if (isAsmOnlyOpcode(MCOp)) 6962 return -1; 6963 6964 return MCOp; 6965 } 6966 6967 static 6968 TargetInstrInfo::RegSubRegPair getRegOrUndef(const MachineOperand &RegOpnd) { 6969 assert(RegOpnd.isReg()); 6970 return RegOpnd.isUndef() ? TargetInstrInfo::RegSubRegPair() : 6971 getRegSubRegPair(RegOpnd); 6972 } 6973 6974 TargetInstrInfo::RegSubRegPair 6975 llvm::getRegSequenceSubReg(MachineInstr &MI, unsigned SubReg) { 6976 assert(MI.isRegSequence()); 6977 for (unsigned I = 0, E = (MI.getNumOperands() - 1)/ 2; I < E; ++I) 6978 if (MI.getOperand(1 + 2 * I + 1).getImm() == SubReg) { 6979 auto &RegOp = MI.getOperand(1 + 2 * I); 6980 return getRegOrUndef(RegOp); 6981 } 6982 return TargetInstrInfo::RegSubRegPair(); 6983 } 6984 6985 // Try to find the definition of reg:subreg in subreg-manipulation pseudos 6986 // Following a subreg of reg:subreg isn't supported 6987 static bool followSubRegDef(MachineInstr &MI, 6988 TargetInstrInfo::RegSubRegPair &RSR) { 6989 if (!RSR.SubReg) 6990 return false; 6991 switch (MI.getOpcode()) { 6992 default: break; 6993 case AMDGPU::REG_SEQUENCE: 6994 RSR = getRegSequenceSubReg(MI, RSR.SubReg); 6995 return true; 6996 // EXTRACT_SUBREG ins't supported as this would follow a subreg of subreg 6997 case AMDGPU::INSERT_SUBREG: 6998 if (RSR.SubReg == (unsigned)MI.getOperand(3).getImm()) 6999 // inserted the subreg we're looking for 7000 RSR = getRegOrUndef(MI.getOperand(2)); 7001 else { // the subreg in the rest of the reg 7002 auto R1 = getRegOrUndef(MI.getOperand(1)); 7003 if (R1.SubReg) // subreg of subreg isn't supported 7004 return false; 7005 RSR.Reg = R1.Reg; 7006 } 7007 return true; 7008 } 7009 return false; 7010 } 7011 7012 MachineInstr *llvm::getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P, 7013 MachineRegisterInfo &MRI) { 7014 assert(MRI.isSSA()); 7015 if (!Register::isVirtualRegister(P.Reg)) 7016 return nullptr; 7017 7018 auto RSR = P; 7019 auto *DefInst = MRI.getVRegDef(RSR.Reg); 7020 while (auto *MI = DefInst) { 7021 DefInst = nullptr; 7022 switch (MI->getOpcode()) { 7023 case AMDGPU::COPY: 7024 case AMDGPU::V_MOV_B32_e32: { 7025 auto &Op1 = MI->getOperand(1); 7026 if (Op1.isReg() && Register::isVirtualRegister(Op1.getReg())) { 7027 if (Op1.isUndef()) 7028 return nullptr; 7029 RSR = getRegSubRegPair(Op1); 7030 DefInst = MRI.getVRegDef(RSR.Reg); 7031 } 7032 break; 7033 } 7034 default: 7035 if (followSubRegDef(*MI, RSR)) { 7036 if (!RSR.Reg) 7037 return nullptr; 7038 DefInst = MRI.getVRegDef(RSR.Reg); 7039 } 7040 } 7041 if (!DefInst) 7042 return MI; 7043 } 7044 return nullptr; 7045 } 7046 7047 bool llvm::execMayBeModifiedBeforeUse(const MachineRegisterInfo &MRI, 7048 Register VReg, 7049 const MachineInstr &DefMI, 7050 const MachineInstr &UseMI) { 7051 assert(MRI.isSSA() && "Must be run on SSA"); 7052 7053 auto *TRI = MRI.getTargetRegisterInfo(); 7054 auto *DefBB = DefMI.getParent(); 7055 7056 // Don't bother searching between blocks, although it is possible this block 7057 // doesn't modify exec. 7058 if (UseMI.getParent() != DefBB) 7059 return true; 7060 7061 const int MaxInstScan = 20; 7062 int NumInst = 0; 7063 7064 // Stop scan at the use. 7065 auto E = UseMI.getIterator(); 7066 for (auto I = std::next(DefMI.getIterator()); I != E; ++I) { 7067 if (I->isDebugInstr()) 7068 continue; 7069 7070 if (++NumInst > MaxInstScan) 7071 return true; 7072 7073 if (I->modifiesRegister(AMDGPU::EXEC, TRI)) 7074 return true; 7075 } 7076 7077 return false; 7078 } 7079 7080 bool llvm::execMayBeModifiedBeforeAnyUse(const MachineRegisterInfo &MRI, 7081 Register VReg, 7082 const MachineInstr &DefMI) { 7083 assert(MRI.isSSA() && "Must be run on SSA"); 7084 7085 auto *TRI = MRI.getTargetRegisterInfo(); 7086 auto *DefBB = DefMI.getParent(); 7087 7088 const int MaxUseInstScan = 10; 7089 int NumUseInst = 0; 7090 7091 for (auto &UseInst : MRI.use_nodbg_instructions(VReg)) { 7092 // Don't bother searching between blocks, although it is possible this block 7093 // doesn't modify exec. 7094 if (UseInst.getParent() != DefBB) 7095 return true; 7096 7097 if (++NumUseInst > MaxUseInstScan) 7098 return true; 7099 } 7100 7101 const int MaxInstScan = 20; 7102 int NumInst = 0; 7103 7104 // Stop scan when we have seen all the uses. 7105 for (auto I = std::next(DefMI.getIterator()); ; ++I) { 7106 if (I->isDebugInstr()) 7107 continue; 7108 7109 if (++NumInst > MaxInstScan) 7110 return true; 7111 7112 if (I->readsRegister(VReg)) 7113 if (--NumUseInst == 0) 7114 return false; 7115 7116 if (I->modifiesRegister(AMDGPU::EXEC, TRI)) 7117 return true; 7118 } 7119 } 7120 7121 MachineInstr *SIInstrInfo::createPHIDestinationCopy( 7122 MachineBasicBlock &MBB, MachineBasicBlock::iterator LastPHIIt, 7123 const DebugLoc &DL, Register Src, Register Dst) const { 7124 auto Cur = MBB.begin(); 7125 if (Cur != MBB.end()) 7126 do { 7127 if (!Cur->isPHI() && Cur->readsRegister(Dst)) 7128 return BuildMI(MBB, Cur, DL, get(TargetOpcode::COPY), Dst).addReg(Src); 7129 ++Cur; 7130 } while (Cur != MBB.end() && Cur != LastPHIIt); 7131 7132 return TargetInstrInfo::createPHIDestinationCopy(MBB, LastPHIIt, DL, Src, 7133 Dst); 7134 } 7135 7136 MachineInstr *SIInstrInfo::createPHISourceCopy( 7137 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, 7138 const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const { 7139 if (InsPt != MBB.end() && 7140 (InsPt->getOpcode() == AMDGPU::SI_IF || 7141 InsPt->getOpcode() == AMDGPU::SI_ELSE || 7142 InsPt->getOpcode() == AMDGPU::SI_IF_BREAK) && 7143 InsPt->definesRegister(Src)) { 7144 InsPt++; 7145 return BuildMI(MBB, InsPt, DL, 7146 get(ST.isWave32() ? AMDGPU::S_MOV_B32_term 7147 : AMDGPU::S_MOV_B64_term), 7148 Dst) 7149 .addReg(Src, 0, SrcSubReg) 7150 .addReg(AMDGPU::EXEC, RegState::Implicit); 7151 } 7152 return TargetInstrInfo::createPHISourceCopy(MBB, InsPt, DL, Src, SrcSubReg, 7153 Dst); 7154 } 7155 7156 bool llvm::SIInstrInfo::isWave32() const { return ST.isWave32(); } 7157 7158 MachineInstr *SIInstrInfo::foldMemoryOperandImpl( 7159 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, 7160 MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS, 7161 VirtRegMap *VRM) const { 7162 // This is a bit of a hack (copied from AArch64). Consider this instruction: 7163 // 7164 // %0:sreg_32 = COPY $m0 7165 // 7166 // We explicitly chose SReg_32 for the virtual register so such a copy might 7167 // be eliminated by RegisterCoalescer. However, that may not be possible, and 7168 // %0 may even spill. We can't spill $m0 normally (it would require copying to 7169 // a numbered SGPR anyway), and since it is in the SReg_32 register class, 7170 // TargetInstrInfo::foldMemoryOperand() is going to try. 7171 // A similar issue also exists with spilling and reloading $exec registers. 7172 // 7173 // To prevent that, constrain the %0 register class here. 7174 if (MI.isFullCopy()) { 7175 Register DstReg = MI.getOperand(0).getReg(); 7176 Register SrcReg = MI.getOperand(1).getReg(); 7177 if ((DstReg.isVirtual() || SrcReg.isVirtual()) && 7178 (DstReg.isVirtual() != SrcReg.isVirtual())) { 7179 MachineRegisterInfo &MRI = MF.getRegInfo(); 7180 Register VirtReg = DstReg.isVirtual() ? DstReg : SrcReg; 7181 const TargetRegisterClass *RC = MRI.getRegClass(VirtReg); 7182 if (RC->hasSuperClassEq(&AMDGPU::SReg_32RegClass)) { 7183 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 7184 return nullptr; 7185 } else if (RC->hasSuperClassEq(&AMDGPU::SReg_64RegClass)) { 7186 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_64_XEXECRegClass); 7187 return nullptr; 7188 } 7189 } 7190 } 7191 7192 return nullptr; 7193 } 7194 7195 unsigned SIInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 7196 const MachineInstr &MI, 7197 unsigned *PredCost) const { 7198 if (MI.isBundle()) { 7199 MachineBasicBlock::const_instr_iterator I(MI.getIterator()); 7200 MachineBasicBlock::const_instr_iterator E(MI.getParent()->instr_end()); 7201 unsigned Lat = 0, Count = 0; 7202 for (++I; I != E && I->isBundledWithPred(); ++I) { 7203 ++Count; 7204 Lat = std::max(Lat, SchedModel.computeInstrLatency(&*I)); 7205 } 7206 return Lat + Count - 1; 7207 } 7208 7209 return SchedModel.computeInstrLatency(&MI); 7210 } 7211