1 //===- SIInstrInfo.cpp - SI Instruction Information ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// SI Implementation of TargetInstrInfo. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SIInstrInfo.h" 15 #include "AMDGPU.h" 16 #include "AMDGPUSubtarget.h" 17 #include "GCNHazardRecognizer.h" 18 #include "SIDefines.h" 19 #include "SIMachineFunctionInfo.h" 20 #include "SIRegisterInfo.h" 21 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 22 #include "Utils/AMDGPUBaseInfo.h" 23 #include "llvm/ADT/APInt.h" 24 #include "llvm/ADT/ArrayRef.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/ADT/iterator_range.h" 28 #include "llvm/Analysis/AliasAnalysis.h" 29 #include "llvm/Analysis/MemoryLocation.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/CodeGen/MachineBasicBlock.h" 32 #include "llvm/CodeGen/MachineDominators.h" 33 #include "llvm/CodeGen/MachineFrameInfo.h" 34 #include "llvm/CodeGen/MachineFunction.h" 35 #include "llvm/CodeGen/MachineInstr.h" 36 #include "llvm/CodeGen/MachineInstrBuilder.h" 37 #include "llvm/CodeGen/MachineInstrBundle.h" 38 #include "llvm/CodeGen/MachineMemOperand.h" 39 #include "llvm/CodeGen/MachineOperand.h" 40 #include "llvm/CodeGen/MachineRegisterInfo.h" 41 #include "llvm/CodeGen/RegisterScavenging.h" 42 #include "llvm/CodeGen/ScheduleDAG.h" 43 #include "llvm/CodeGen/SelectionDAGNodes.h" 44 #include "llvm/CodeGen/TargetOpcodes.h" 45 #include "llvm/CodeGen/TargetRegisterInfo.h" 46 #include "llvm/IR/DebugLoc.h" 47 #include "llvm/IR/DiagnosticInfo.h" 48 #include "llvm/IR/Function.h" 49 #include "llvm/IR/InlineAsm.h" 50 #include "llvm/IR/LLVMContext.h" 51 #include "llvm/MC/MCInstrDesc.h" 52 #include "llvm/Support/Casting.h" 53 #include "llvm/Support/CommandLine.h" 54 #include "llvm/Support/Compiler.h" 55 #include "llvm/Support/ErrorHandling.h" 56 #include "llvm/Support/MachineValueType.h" 57 #include "llvm/Support/MathExtras.h" 58 #include "llvm/Target/TargetMachine.h" 59 #include <cassert> 60 #include <cstdint> 61 #include <iterator> 62 #include <utility> 63 64 using namespace llvm; 65 66 #define GET_INSTRINFO_CTOR_DTOR 67 #include "AMDGPUGenInstrInfo.inc" 68 69 namespace llvm { 70 namespace AMDGPU { 71 #define GET_D16ImageDimIntrinsics_IMPL 72 #define GET_ImageDimIntrinsicTable_IMPL 73 #define GET_RsrcIntrinsics_IMPL 74 #include "AMDGPUGenSearchableTables.inc" 75 } 76 } 77 78 79 // Must be at least 4 to be able to branch over minimum unconditional branch 80 // code. This is only for making it possible to write reasonably small tests for 81 // long branches. 82 static cl::opt<unsigned> 83 BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16), 84 cl::desc("Restrict range of branch instructions (DEBUG)")); 85 86 static cl::opt<bool> Fix16BitCopies( 87 "amdgpu-fix-16-bit-physreg-copies", 88 cl::desc("Fix copies between 32 and 16 bit registers by extending to 32 bit"), 89 cl::init(true), 90 cl::ReallyHidden); 91 92 SIInstrInfo::SIInstrInfo(const GCNSubtarget &ST) 93 : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN), 94 RI(ST), ST(ST) { 95 SchedModel.init(&ST); 96 } 97 98 //===----------------------------------------------------------------------===// 99 // TargetInstrInfo callbacks 100 //===----------------------------------------------------------------------===// 101 102 static unsigned getNumOperandsNoGlue(SDNode *Node) { 103 unsigned N = Node->getNumOperands(); 104 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) 105 --N; 106 return N; 107 } 108 109 /// Returns true if both nodes have the same value for the given 110 /// operand \p Op, or if both nodes do not have this operand. 111 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { 112 unsigned Opc0 = N0->getMachineOpcode(); 113 unsigned Opc1 = N1->getMachineOpcode(); 114 115 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); 116 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); 117 118 if (Op0Idx == -1 && Op1Idx == -1) 119 return true; 120 121 122 if ((Op0Idx == -1 && Op1Idx != -1) || 123 (Op1Idx == -1 && Op0Idx != -1)) 124 return false; 125 126 // getNamedOperandIdx returns the index for the MachineInstr's operands, 127 // which includes the result as the first operand. We are indexing into the 128 // MachineSDNode's operands, so we need to skip the result operand to get 129 // the real index. 130 --Op0Idx; 131 --Op1Idx; 132 133 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx); 134 } 135 136 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, 137 AliasAnalysis *AA) const { 138 // TODO: The generic check fails for VALU instructions that should be 139 // rematerializable due to implicit reads of exec. We really want all of the 140 // generic logic for this except for this. 141 switch (MI.getOpcode()) { 142 case AMDGPU::V_MOV_B32_e32: 143 case AMDGPU::V_MOV_B32_e64: 144 case AMDGPU::V_MOV_B64_PSEUDO: 145 // No implicit operands. 146 return MI.getNumOperands() == MI.getDesc().getNumOperands(); 147 default: 148 return false; 149 } 150 } 151 152 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, 153 int64_t &Offset0, 154 int64_t &Offset1) const { 155 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode()) 156 return false; 157 158 unsigned Opc0 = Load0->getMachineOpcode(); 159 unsigned Opc1 = Load1->getMachineOpcode(); 160 161 // Make sure both are actually loads. 162 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad()) 163 return false; 164 165 if (isDS(Opc0) && isDS(Opc1)) { 166 167 // FIXME: Handle this case: 168 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1)) 169 return false; 170 171 // Check base reg. 172 if (Load0->getOperand(0) != Load1->getOperand(0)) 173 return false; 174 175 // Skip read2 / write2 variants for simplicity. 176 // TODO: We should report true if the used offsets are adjacent (excluded 177 // st64 versions). 178 int Offset0Idx = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 179 int Offset1Idx = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 180 if (Offset0Idx == -1 || Offset1Idx == -1) 181 return false; 182 183 // XXX - be careful of datalesss loads 184 // getNamedOperandIdx returns the index for MachineInstrs. Since they 185 // include the output in the operand list, but SDNodes don't, we need to 186 // subtract the index by one. 187 Offset0Idx -= get(Opc0).NumDefs; 188 Offset1Idx -= get(Opc1).NumDefs; 189 Offset0 = cast<ConstantSDNode>(Load0->getOperand(Offset0Idx))->getZExtValue(); 190 Offset1 = cast<ConstantSDNode>(Load1->getOperand(Offset1Idx))->getZExtValue(); 191 return true; 192 } 193 194 if (isSMRD(Opc0) && isSMRD(Opc1)) { 195 // Skip time and cache invalidation instructions. 196 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::sbase) == -1 || 197 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::sbase) == -1) 198 return false; 199 200 assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1)); 201 202 // Check base reg. 203 if (Load0->getOperand(0) != Load1->getOperand(0)) 204 return false; 205 206 const ConstantSDNode *Load0Offset = 207 dyn_cast<ConstantSDNode>(Load0->getOperand(1)); 208 const ConstantSDNode *Load1Offset = 209 dyn_cast<ConstantSDNode>(Load1->getOperand(1)); 210 211 if (!Load0Offset || !Load1Offset) 212 return false; 213 214 Offset0 = Load0Offset->getZExtValue(); 215 Offset1 = Load1Offset->getZExtValue(); 216 return true; 217 } 218 219 // MUBUF and MTBUF can access the same addresses. 220 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) { 221 222 // MUBUF and MTBUF have vaddr at different indices. 223 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || 224 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || 225 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) 226 return false; 227 228 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 229 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 230 231 if (OffIdx0 == -1 || OffIdx1 == -1) 232 return false; 233 234 // getNamedOperandIdx returns the index for MachineInstrs. Since they 235 // include the output in the operand list, but SDNodes don't, we need to 236 // subtract the index by one. 237 OffIdx0 -= get(Opc0).NumDefs; 238 OffIdx1 -= get(Opc1).NumDefs; 239 240 SDValue Off0 = Load0->getOperand(OffIdx0); 241 SDValue Off1 = Load1->getOperand(OffIdx1); 242 243 // The offset might be a FrameIndexSDNode. 244 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1)) 245 return false; 246 247 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue(); 248 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue(); 249 return true; 250 } 251 252 return false; 253 } 254 255 static bool isStride64(unsigned Opc) { 256 switch (Opc) { 257 case AMDGPU::DS_READ2ST64_B32: 258 case AMDGPU::DS_READ2ST64_B64: 259 case AMDGPU::DS_WRITE2ST64_B32: 260 case AMDGPU::DS_WRITE2ST64_B64: 261 return true; 262 default: 263 return false; 264 } 265 } 266 267 bool SIInstrInfo::getMemOperandsWithOffset( 268 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps, 269 int64_t &Offset, bool &OffsetIsScalable, const TargetRegisterInfo *TRI) 270 const { 271 if (!LdSt.mayLoadOrStore()) 272 return false; 273 274 unsigned Opc = LdSt.getOpcode(); 275 OffsetIsScalable = false; 276 const MachineOperand *BaseOp, *OffsetOp; 277 278 if (isDS(LdSt)) { 279 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr); 280 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset); 281 if (OffsetOp) { 282 // Normal, single offset LDS instruction. 283 if (!BaseOp) { 284 // DS_CONSUME/DS_APPEND use M0 for the base address. 285 // TODO: find the implicit use operand for M0 and use that as BaseOp? 286 return false; 287 } 288 BaseOps.push_back(BaseOp); 289 Offset = OffsetOp->getImm(); 290 } else { 291 // The 2 offset instructions use offset0 and offset1 instead. We can treat 292 // these as a load with a single offset if the 2 offsets are consecutive. 293 // We will use this for some partially aligned loads. 294 const MachineOperand *Offset0Op = 295 getNamedOperand(LdSt, AMDGPU::OpName::offset0); 296 const MachineOperand *Offset1Op = 297 getNamedOperand(LdSt, AMDGPU::OpName::offset1); 298 299 unsigned Offset0 = Offset0Op->getImm(); 300 unsigned Offset1 = Offset1Op->getImm(); 301 if (Offset0 + 1 != Offset1) 302 return false; 303 304 // Each of these offsets is in element sized units, so we need to convert 305 // to bytes of the individual reads. 306 307 unsigned EltSize; 308 if (LdSt.mayLoad()) 309 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16; 310 else { 311 assert(LdSt.mayStore()); 312 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 313 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8; 314 } 315 316 if (isStride64(Opc)) 317 EltSize *= 64; 318 319 BaseOps.push_back(BaseOp); 320 Offset = EltSize * Offset0; 321 } 322 return true; 323 } 324 325 if (isMUBUF(LdSt) || isMTBUF(LdSt)) { 326 const MachineOperand *SOffset = getNamedOperand(LdSt, AMDGPU::OpName::soffset); 327 if (SOffset && SOffset->isReg()) { 328 // We can only handle this if it's a stack access, as any other resource 329 // would require reporting multiple base registers. 330 const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 331 if (AddrReg && !AddrReg->isFI()) 332 return false; 333 334 const MachineOperand *RSrc = getNamedOperand(LdSt, AMDGPU::OpName::srsrc); 335 const SIMachineFunctionInfo *MFI 336 = LdSt.getParent()->getParent()->getInfo<SIMachineFunctionInfo>(); 337 if (RSrc->getReg() != MFI->getScratchRSrcReg()) 338 return false; 339 340 const MachineOperand *OffsetImm = 341 getNamedOperand(LdSt, AMDGPU::OpName::offset); 342 BaseOps.push_back(RSrc); 343 BaseOps.push_back(SOffset); 344 Offset = OffsetImm->getImm(); 345 return true; 346 } 347 348 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::srsrc); 349 if (!BaseOp) // e.g. BUFFER_WBINVL1_VOL 350 return false; 351 BaseOps.push_back(BaseOp); 352 353 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 354 if (BaseOp) 355 BaseOps.push_back(BaseOp); 356 357 const MachineOperand *OffsetImm = 358 getNamedOperand(LdSt, AMDGPU::OpName::offset); 359 Offset = OffsetImm->getImm(); 360 if (SOffset) // soffset can be an inline immediate. 361 Offset += SOffset->getImm(); 362 return true; 363 } 364 365 if (isSMRD(LdSt)) { 366 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::sbase); 367 if (!BaseOp) // e.g. S_MEMTIME 368 return false; 369 BaseOps.push_back(BaseOp); 370 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset); 371 Offset = OffsetOp ? OffsetOp->getImm() : 0; 372 return true; 373 } 374 375 if (isFLAT(LdSt)) { 376 // Instructions have either vaddr or saddr or both. 377 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 378 if (BaseOp) 379 BaseOps.push_back(BaseOp); 380 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::saddr); 381 if (BaseOp) 382 BaseOps.push_back(BaseOp); 383 Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm(); 384 return true; 385 } 386 387 return false; 388 } 389 390 static bool 391 memOpsHaveSameBaseOperands(ArrayRef<const MachineOperand *> BaseOps1, 392 ArrayRef<const MachineOperand *> BaseOps2) { 393 if (BaseOps1.size() != BaseOps2.size()) 394 return false; 395 for (size_t I = 0, E = BaseOps1.size(); I < E; ++I) 396 if (!BaseOps1[I]->isIdenticalTo(*BaseOps2[I])) 397 return false; 398 return true; 399 } 400 401 static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, 402 ArrayRef<const MachineOperand *> BaseOps1, 403 const MachineInstr &MI2, 404 ArrayRef<const MachineOperand *> BaseOps2) { 405 if (memOpsHaveSameBaseOperands(BaseOps1, BaseOps2)) 406 return true; 407 408 if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand()) 409 return false; 410 411 auto MO1 = *MI1.memoperands_begin(); 412 auto MO2 = *MI2.memoperands_begin(); 413 if (MO1->getAddrSpace() != MO2->getAddrSpace()) 414 return false; 415 416 auto Base1 = MO1->getValue(); 417 auto Base2 = MO2->getValue(); 418 if (!Base1 || !Base2) 419 return false; 420 const MachineFunction &MF = *MI1.getParent()->getParent(); 421 const DataLayout &DL = MF.getFunction().getParent()->getDataLayout(); 422 Base1 = GetUnderlyingObject(Base1, DL); 423 Base2 = GetUnderlyingObject(Base2, DL); 424 425 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2)) 426 return false; 427 428 return Base1 == Base2; 429 } 430 431 bool SIInstrInfo::shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1, 432 ArrayRef<const MachineOperand *> BaseOps2, 433 unsigned NumLoads) const { 434 assert(!BaseOps1.empty() && !BaseOps2.empty()); 435 const MachineInstr &FirstLdSt = *BaseOps1.front()->getParent(); 436 const MachineInstr &SecondLdSt = *BaseOps2.front()->getParent(); 437 438 if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOps1, SecondLdSt, BaseOps2)) 439 return false; 440 441 const MachineOperand *FirstDst = nullptr; 442 const MachineOperand *SecondDst = nullptr; 443 444 if ((isMUBUF(FirstLdSt) && isMUBUF(SecondLdSt)) || 445 (isMTBUF(FirstLdSt) && isMTBUF(SecondLdSt)) || 446 (isFLAT(FirstLdSt) && isFLAT(SecondLdSt))) { 447 const unsigned MaxGlobalLoadCluster = 7; 448 if (NumLoads > MaxGlobalLoadCluster) 449 return false; 450 451 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdata); 452 if (!FirstDst) 453 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdst); 454 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdata); 455 if (!SecondDst) 456 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdst); 457 } else if (isSMRD(FirstLdSt) && isSMRD(SecondLdSt)) { 458 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::sdst); 459 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::sdst); 460 } else if (isDS(FirstLdSt) && isDS(SecondLdSt)) { 461 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdst); 462 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdst); 463 } 464 465 if (!FirstDst || !SecondDst) 466 return false; 467 468 // Try to limit clustering based on the total number of bytes loaded 469 // rather than the number of instructions. This is done to help reduce 470 // register pressure. The method used is somewhat inexact, though, 471 // because it assumes that all loads in the cluster will load the 472 // same number of bytes as FirstLdSt. 473 474 // The unit of this value is bytes. 475 // FIXME: This needs finer tuning. 476 unsigned LoadClusterThreshold = 16; 477 478 const MachineRegisterInfo &MRI = 479 FirstLdSt.getParent()->getParent()->getRegInfo(); 480 481 const Register Reg = FirstDst->getReg(); 482 483 const TargetRegisterClass *DstRC = Register::isVirtualRegister(Reg) 484 ? MRI.getRegClass(Reg) 485 : RI.getPhysRegClass(Reg); 486 487 // FIXME: NumLoads should not be subtracted 1. This is to match behavior 488 // of clusterNeighboringMemOps which was previosly passing cluster length 489 // less 1. LoadClusterThreshold should be tuned instead. 490 return ((NumLoads - 1) * (RI.getRegSizeInBits(*DstRC) / 8)) <= 491 LoadClusterThreshold; 492 } 493 494 // FIXME: This behaves strangely. If, for example, you have 32 load + stores, 495 // the first 16 loads will be interleaved with the stores, and the next 16 will 496 // be clustered as expected. It should really split into 2 16 store batches. 497 // 498 // Loads are clustered until this returns false, rather than trying to schedule 499 // groups of stores. This also means we have to deal with saying different 500 // address space loads should be clustered, and ones which might cause bank 501 // conflicts. 502 // 503 // This might be deprecated so it might not be worth that much effort to fix. 504 bool SIInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, 505 int64_t Offset0, int64_t Offset1, 506 unsigned NumLoads) const { 507 assert(Offset1 > Offset0 && 508 "Second offset should be larger than first offset!"); 509 // If we have less than 16 loads in a row, and the offsets are within 64 510 // bytes, then schedule together. 511 512 // A cacheline is 64 bytes (for global memory). 513 return (NumLoads <= 16 && (Offset1 - Offset0) < 64); 514 } 515 516 static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB, 517 MachineBasicBlock::iterator MI, 518 const DebugLoc &DL, MCRegister DestReg, 519 MCRegister SrcReg, bool KillSrc, 520 const char *Msg = "illegal SGPR to VGPR copy") { 521 MachineFunction *MF = MBB.getParent(); 522 DiagnosticInfoUnsupported IllegalCopy(MF->getFunction(), Msg, DL, DS_Error); 523 LLVMContext &C = MF->getFunction().getContext(); 524 C.diagnose(IllegalCopy); 525 526 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg) 527 .addReg(SrcReg, getKillRegState(KillSrc)); 528 } 529 530 void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 531 MachineBasicBlock::iterator MI, 532 const DebugLoc &DL, MCRegister DestReg, 533 MCRegister SrcReg, bool KillSrc) const { 534 const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg); 535 536 // FIXME: This is hack to resolve copies between 16 bit and 32 bit 537 // registers until all patterns are fixed. 538 if (Fix16BitCopies && 539 ((RI.getRegSizeInBits(*RC) == 16) ^ 540 (RI.getRegSizeInBits(*RI.getPhysRegClass(SrcReg)) == 16))) { 541 MCRegister &RegToFix = (RI.getRegSizeInBits(*RC) == 16) ? DestReg : SrcReg; 542 MCRegister Super = RI.get32BitRegister(RegToFix); 543 assert(RI.getSubReg(Super, AMDGPU::lo16) == RegToFix); 544 RegToFix = Super; 545 546 if (DestReg == SrcReg) { 547 // Insert empty bundle since ExpandPostRA expects an instruction here. 548 BuildMI(MBB, MI, DL, get(AMDGPU::BUNDLE)); 549 return; 550 } 551 552 RC = RI.getPhysRegClass(DestReg); 553 } 554 555 if (RC == &AMDGPU::VGPR_32RegClass) { 556 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || 557 AMDGPU::SReg_32RegClass.contains(SrcReg) || 558 AMDGPU::AGPR_32RegClass.contains(SrcReg)); 559 unsigned Opc = AMDGPU::AGPR_32RegClass.contains(SrcReg) ? 560 AMDGPU::V_ACCVGPR_READ_B32 : AMDGPU::V_MOV_B32_e32; 561 BuildMI(MBB, MI, DL, get(Opc), DestReg) 562 .addReg(SrcReg, getKillRegState(KillSrc)); 563 return; 564 } 565 566 if (RC == &AMDGPU::SReg_32_XM0RegClass || 567 RC == &AMDGPU::SReg_32RegClass) { 568 if (SrcReg == AMDGPU::SCC) { 569 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg) 570 .addImm(1) 571 .addImm(0); 572 return; 573 } 574 575 if (DestReg == AMDGPU::VCC_LO) { 576 if (AMDGPU::SReg_32RegClass.contains(SrcReg)) { 577 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), AMDGPU::VCC_LO) 578 .addReg(SrcReg, getKillRegState(KillSrc)); 579 } else { 580 // FIXME: Hack until VReg_1 removed. 581 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 582 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 583 .addImm(0) 584 .addReg(SrcReg, getKillRegState(KillSrc)); 585 } 586 587 return; 588 } 589 590 if (!AMDGPU::SReg_32RegClass.contains(SrcReg)) { 591 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 592 return; 593 } 594 595 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 596 .addReg(SrcReg, getKillRegState(KillSrc)); 597 return; 598 } 599 600 if (RC == &AMDGPU::SReg_64RegClass) { 601 if (DestReg == AMDGPU::VCC) { 602 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 603 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC) 604 .addReg(SrcReg, getKillRegState(KillSrc)); 605 } else { 606 // FIXME: Hack until VReg_1 removed. 607 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 608 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 609 .addImm(0) 610 .addReg(SrcReg, getKillRegState(KillSrc)); 611 } 612 613 return; 614 } 615 616 if (!AMDGPU::SReg_64RegClass.contains(SrcReg)) { 617 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 618 return; 619 } 620 621 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 622 .addReg(SrcReg, getKillRegState(KillSrc)); 623 return; 624 } 625 626 if (DestReg == AMDGPU::SCC) { 627 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 628 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32)) 629 .addReg(SrcReg, getKillRegState(KillSrc)) 630 .addImm(0); 631 return; 632 } 633 634 if (RC == &AMDGPU::AGPR_32RegClass) { 635 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || 636 AMDGPU::SReg_32RegClass.contains(SrcReg) || 637 AMDGPU::AGPR_32RegClass.contains(SrcReg)); 638 if (!AMDGPU::VGPR_32RegClass.contains(SrcReg)) { 639 // First try to find defining accvgpr_write to avoid temporary registers. 640 for (auto Def = MI, E = MBB.begin(); Def != E; ) { 641 --Def; 642 if (!Def->definesRegister(SrcReg, &RI)) 643 continue; 644 if (Def->getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32) 645 break; 646 647 MachineOperand &DefOp = Def->getOperand(1); 648 assert(DefOp.isReg() || DefOp.isImm()); 649 650 if (DefOp.isReg()) { 651 // Check that register source operand if not clobbered before MI. 652 // Immediate operands are always safe to propagate. 653 bool SafeToPropagate = true; 654 for (auto I = Def; I != MI && SafeToPropagate; ++I) 655 if (I->modifiesRegister(DefOp.getReg(), &RI)) 656 SafeToPropagate = false; 657 658 if (!SafeToPropagate) 659 break; 660 661 DefOp.setIsKill(false); 662 } 663 664 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32), DestReg) 665 .add(DefOp); 666 return; 667 } 668 669 RegScavenger RS; 670 RS.enterBasicBlock(MBB); 671 RS.forward(MI); 672 673 // Ideally we want to have three registers for a long reg_sequence copy 674 // to hide 2 waitstates between v_mov_b32 and accvgpr_write. 675 unsigned MaxVGPRs = RI.getRegPressureLimit(&AMDGPU::VGPR_32RegClass, 676 *MBB.getParent()); 677 678 // Registers in the sequence are allocated contiguously so we can just 679 // use register number to pick one of three round-robin temps. 680 unsigned RegNo = DestReg % 3; 681 Register Tmp = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0); 682 if (!Tmp) 683 report_fatal_error("Cannot scavenge VGPR to copy to AGPR"); 684 RS.setRegUsed(Tmp); 685 // Only loop through if there are any free registers left, otherwise 686 // scavenger may report a fatal error without emergency spill slot 687 // or spill with the slot. 688 while (RegNo-- && RS.FindUnusedReg(&AMDGPU::VGPR_32RegClass)) { 689 unsigned Tmp2 = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0); 690 if (!Tmp2 || RI.getHWRegIndex(Tmp2) >= MaxVGPRs) 691 break; 692 Tmp = Tmp2; 693 RS.setRegUsed(Tmp); 694 } 695 copyPhysReg(MBB, MI, DL, Tmp, SrcReg, KillSrc); 696 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32), DestReg) 697 .addReg(Tmp, RegState::Kill); 698 return; 699 } 700 701 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32), DestReg) 702 .addReg(SrcReg, getKillRegState(KillSrc)); 703 return; 704 } 705 706 if (RI.getRegSizeInBits(*RC) == 16) { 707 assert(AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || 708 AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || 709 AMDGPU::SReg_LO16RegClass.contains(SrcReg) || 710 AMDGPU::AGPR_LO16RegClass.contains(SrcReg)); 711 712 bool IsSGPRDst = AMDGPU::SReg_LO16RegClass.contains(DestReg); 713 bool IsSGPRSrc = AMDGPU::SReg_LO16RegClass.contains(SrcReg); 714 bool IsAGPRDst = AMDGPU::AGPR_LO16RegClass.contains(DestReg); 715 bool IsAGPRSrc = AMDGPU::AGPR_LO16RegClass.contains(SrcReg); 716 bool DstLow = AMDGPU::VGPR_LO16RegClass.contains(DestReg) || 717 AMDGPU::SReg_LO16RegClass.contains(DestReg) || 718 AMDGPU::AGPR_LO16RegClass.contains(DestReg); 719 bool SrcLow = AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || 720 AMDGPU::SReg_LO16RegClass.contains(SrcReg) || 721 AMDGPU::AGPR_LO16RegClass.contains(SrcReg); 722 MCRegister NewDestReg = RI.get32BitRegister(DestReg); 723 MCRegister NewSrcReg = RI.get32BitRegister(SrcReg); 724 725 if (IsSGPRDst) { 726 if (!IsSGPRSrc) { 727 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 728 return; 729 } 730 731 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), NewDestReg) 732 .addReg(NewSrcReg, getKillRegState(KillSrc)); 733 return; 734 } 735 736 if (IsAGPRDst || IsAGPRSrc) { 737 if (!DstLow || !SrcLow) { 738 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc, 739 "Cannot use hi16 subreg with an AGPR!"); 740 } 741 742 copyPhysReg(MBB, MI, DL, NewDestReg, NewSrcReg, KillSrc); 743 return; 744 } 745 746 if (IsSGPRSrc && !ST.hasSDWAScalar()) { 747 if (!DstLow || !SrcLow) { 748 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc, 749 "Cannot use hi16 subreg on VI!"); 750 } 751 752 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), NewDestReg) 753 .addReg(NewSrcReg, getKillRegState(KillSrc)); 754 return; 755 } 756 757 auto MIB = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_sdwa), NewDestReg) 758 .addImm(0) // src0_modifiers 759 .addReg(NewSrcReg) 760 .addImm(0) // clamp 761 .addImm(DstLow ? AMDGPU::SDWA::SdwaSel::WORD_0 762 : AMDGPU::SDWA::SdwaSel::WORD_1) 763 .addImm(AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE) 764 .addImm(SrcLow ? AMDGPU::SDWA::SdwaSel::WORD_0 765 : AMDGPU::SDWA::SdwaSel::WORD_1) 766 .addReg(NewDestReg, RegState::Implicit | RegState::Undef); 767 // First implicit operand is $exec. 768 MIB->tieOperands(0, MIB->getNumOperands() - 1); 769 return; 770 } 771 772 unsigned EltSize = 4; 773 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 774 if (RI.isSGPRClass(RC)) { 775 // TODO: Copy vec3/vec5 with s_mov_b64s then final s_mov_b32. 776 if (!(RI.getRegSizeInBits(*RC) % 64)) { 777 Opcode = AMDGPU::S_MOV_B64; 778 EltSize = 8; 779 } else { 780 Opcode = AMDGPU::S_MOV_B32; 781 EltSize = 4; 782 } 783 784 if (!RI.isSGPRClass(RI.getPhysRegClass(SrcReg))) { 785 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 786 return; 787 } 788 } else if (RI.hasAGPRs(RC)) { 789 Opcode = RI.hasVGPRs(RI.getPhysRegClass(SrcReg)) ? 790 AMDGPU::V_ACCVGPR_WRITE_B32 : AMDGPU::COPY; 791 } else if (RI.hasVGPRs(RC) && RI.hasAGPRs(RI.getPhysRegClass(SrcReg))) { 792 Opcode = AMDGPU::V_ACCVGPR_READ_B32; 793 } 794 795 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize); 796 bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg); 797 798 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 799 unsigned SubIdx; 800 if (Forward) 801 SubIdx = SubIndices[Idx]; 802 else 803 SubIdx = SubIndices[SubIndices.size() - Idx - 1]; 804 805 if (Opcode == TargetOpcode::COPY) { 806 copyPhysReg(MBB, MI, DL, RI.getSubReg(DestReg, SubIdx), 807 RI.getSubReg(SrcReg, SubIdx), KillSrc); 808 continue; 809 } 810 811 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 812 get(Opcode), RI.getSubReg(DestReg, SubIdx)); 813 814 Builder.addReg(RI.getSubReg(SrcReg, SubIdx)); 815 816 if (Idx == 0) 817 Builder.addReg(DestReg, RegState::Define | RegState::Implicit); 818 819 bool UseKill = KillSrc && Idx == SubIndices.size() - 1; 820 Builder.addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit); 821 } 822 } 823 824 int SIInstrInfo::commuteOpcode(unsigned Opcode) const { 825 int NewOpc; 826 827 // Try to map original to commuted opcode 828 NewOpc = AMDGPU::getCommuteRev(Opcode); 829 if (NewOpc != -1) 830 // Check if the commuted (REV) opcode exists on the target. 831 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 832 833 // Try to map commuted to original opcode 834 NewOpc = AMDGPU::getCommuteOrig(Opcode); 835 if (NewOpc != -1) 836 // Check if the original (non-REV) opcode exists on the target. 837 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 838 839 return Opcode; 840 } 841 842 void SIInstrInfo::materializeImmediate(MachineBasicBlock &MBB, 843 MachineBasicBlock::iterator MI, 844 const DebugLoc &DL, unsigned DestReg, 845 int64_t Value) const { 846 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 847 const TargetRegisterClass *RegClass = MRI.getRegClass(DestReg); 848 if (RegClass == &AMDGPU::SReg_32RegClass || 849 RegClass == &AMDGPU::SGPR_32RegClass || 850 RegClass == &AMDGPU::SReg_32_XM0RegClass || 851 RegClass == &AMDGPU::SReg_32_XM0_XEXECRegClass) { 852 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 853 .addImm(Value); 854 return; 855 } 856 857 if (RegClass == &AMDGPU::SReg_64RegClass || 858 RegClass == &AMDGPU::SGPR_64RegClass || 859 RegClass == &AMDGPU::SReg_64_XEXECRegClass) { 860 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 861 .addImm(Value); 862 return; 863 } 864 865 if (RegClass == &AMDGPU::VGPR_32RegClass) { 866 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) 867 .addImm(Value); 868 return; 869 } 870 if (RegClass == &AMDGPU::VReg_64RegClass) { 871 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), DestReg) 872 .addImm(Value); 873 return; 874 } 875 876 unsigned EltSize = 4; 877 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 878 if (RI.isSGPRClass(RegClass)) { 879 if (RI.getRegSizeInBits(*RegClass) > 32) { 880 Opcode = AMDGPU::S_MOV_B64; 881 EltSize = 8; 882 } else { 883 Opcode = AMDGPU::S_MOV_B32; 884 EltSize = 4; 885 } 886 } 887 888 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RegClass, EltSize); 889 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 890 int64_t IdxValue = Idx == 0 ? Value : 0; 891 892 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 893 get(Opcode), RI.getSubReg(DestReg, SubIndices[Idx])); 894 Builder.addImm(IdxValue); 895 } 896 } 897 898 const TargetRegisterClass * 899 SIInstrInfo::getPreferredSelectRegClass(unsigned Size) const { 900 return &AMDGPU::VGPR_32RegClass; 901 } 902 903 void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB, 904 MachineBasicBlock::iterator I, 905 const DebugLoc &DL, Register DstReg, 906 ArrayRef<MachineOperand> Cond, 907 Register TrueReg, 908 Register FalseReg) const { 909 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 910 MachineFunction *MF = MBB.getParent(); 911 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 912 const TargetRegisterClass *BoolXExecRC = 913 RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 914 assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass && 915 "Not a VGPR32 reg"); 916 917 if (Cond.size() == 1) { 918 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 919 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 920 .add(Cond[0]); 921 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 922 .addImm(0) 923 .addReg(FalseReg) 924 .addImm(0) 925 .addReg(TrueReg) 926 .addReg(SReg); 927 } else if (Cond.size() == 2) { 928 assert(Cond[0].isImm() && "Cond[0] is not an immediate"); 929 switch (Cond[0].getImm()) { 930 case SIInstrInfo::SCC_TRUE: { 931 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 932 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 933 : AMDGPU::S_CSELECT_B64), SReg) 934 .addImm(1) 935 .addImm(0); 936 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 937 .addImm(0) 938 .addReg(FalseReg) 939 .addImm(0) 940 .addReg(TrueReg) 941 .addReg(SReg); 942 break; 943 } 944 case SIInstrInfo::SCC_FALSE: { 945 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 946 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 947 : AMDGPU::S_CSELECT_B64), SReg) 948 .addImm(0) 949 .addImm(1); 950 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 951 .addImm(0) 952 .addReg(FalseReg) 953 .addImm(0) 954 .addReg(TrueReg) 955 .addReg(SReg); 956 break; 957 } 958 case SIInstrInfo::VCCNZ: { 959 MachineOperand RegOp = Cond[1]; 960 RegOp.setImplicit(false); 961 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 962 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 963 .add(RegOp); 964 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 965 .addImm(0) 966 .addReg(FalseReg) 967 .addImm(0) 968 .addReg(TrueReg) 969 .addReg(SReg); 970 break; 971 } 972 case SIInstrInfo::VCCZ: { 973 MachineOperand RegOp = Cond[1]; 974 RegOp.setImplicit(false); 975 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 976 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 977 .add(RegOp); 978 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 979 .addImm(0) 980 .addReg(TrueReg) 981 .addImm(0) 982 .addReg(FalseReg) 983 .addReg(SReg); 984 break; 985 } 986 case SIInstrInfo::EXECNZ: { 987 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 988 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 989 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 990 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 991 .addImm(0); 992 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 993 : AMDGPU::S_CSELECT_B64), SReg) 994 .addImm(1) 995 .addImm(0); 996 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 997 .addImm(0) 998 .addReg(FalseReg) 999 .addImm(0) 1000 .addReg(TrueReg) 1001 .addReg(SReg); 1002 break; 1003 } 1004 case SIInstrInfo::EXECZ: { 1005 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1006 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 1007 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1008 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 1009 .addImm(0); 1010 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1011 : AMDGPU::S_CSELECT_B64), SReg) 1012 .addImm(0) 1013 .addImm(1); 1014 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1015 .addImm(0) 1016 .addReg(FalseReg) 1017 .addImm(0) 1018 .addReg(TrueReg) 1019 .addReg(SReg); 1020 llvm_unreachable("Unhandled branch predicate EXECZ"); 1021 break; 1022 } 1023 default: 1024 llvm_unreachable("invalid branch predicate"); 1025 } 1026 } else { 1027 llvm_unreachable("Can only handle Cond size 1 or 2"); 1028 } 1029 } 1030 1031 Register SIInstrInfo::insertEQ(MachineBasicBlock *MBB, 1032 MachineBasicBlock::iterator I, 1033 const DebugLoc &DL, 1034 Register SrcReg, int Value) const { 1035 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1036 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 1037 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg) 1038 .addImm(Value) 1039 .addReg(SrcReg); 1040 1041 return Reg; 1042 } 1043 1044 Register SIInstrInfo::insertNE(MachineBasicBlock *MBB, 1045 MachineBasicBlock::iterator I, 1046 const DebugLoc &DL, 1047 Register SrcReg, int Value) const { 1048 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1049 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 1050 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg) 1051 .addImm(Value) 1052 .addReg(SrcReg); 1053 1054 return Reg; 1055 } 1056 1057 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const { 1058 1059 if (RI.hasAGPRs(DstRC)) 1060 return AMDGPU::COPY; 1061 if (RI.getRegSizeInBits(*DstRC) == 32) { 1062 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 1063 } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) { 1064 return AMDGPU::S_MOV_B64; 1065 } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) { 1066 return AMDGPU::V_MOV_B64_PSEUDO; 1067 } 1068 return AMDGPU::COPY; 1069 } 1070 1071 static unsigned getIndirectVGPRWritePseudoOpc(unsigned VecSize) { 1072 switch (VecSize) { 1073 case 32: // 4 bytes 1074 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V1; 1075 case 64: // 8 bytes 1076 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V2; 1077 case 96: // 12 bytes 1078 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V3; 1079 case 128: // 16 bytes 1080 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V4; 1081 case 160: // 20 bytes 1082 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V5; 1083 case 256: // 32 bytes 1084 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V8; 1085 case 512: // 64 bytes 1086 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V16; 1087 case 1024: // 128 bytes 1088 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V32; 1089 default: 1090 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1091 } 1092 } 1093 1094 static unsigned getIndirectSGPRWritePseudo32(unsigned VecSize) { 1095 switch (VecSize) { 1096 case 32: // 4 bytes 1097 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V1; 1098 case 64: // 8 bytes 1099 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V2; 1100 case 96: // 12 bytes 1101 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V3; 1102 case 128: // 16 bytes 1103 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V4; 1104 case 160: // 20 bytes 1105 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V5; 1106 case 256: // 32 bytes 1107 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V8; 1108 case 512: // 64 bytes 1109 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V16; 1110 case 1024: // 128 bytes 1111 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V32; 1112 default: 1113 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1114 } 1115 } 1116 1117 static unsigned getIndirectSGPRWritePseudo64(unsigned VecSize) { 1118 switch (VecSize) { 1119 case 64: // 8 bytes 1120 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V1; 1121 case 128: // 16 bytes 1122 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V2; 1123 case 256: // 32 bytes 1124 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V4; 1125 case 512: // 64 bytes 1126 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V8; 1127 case 1024: // 128 bytes 1128 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V16; 1129 default: 1130 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1131 } 1132 } 1133 1134 const MCInstrDesc &SIInstrInfo::getIndirectRegWritePseudo( 1135 unsigned VecSize, unsigned EltSize, bool IsSGPR) const { 1136 if (IsSGPR) { 1137 switch (EltSize) { 1138 case 32: 1139 return get(getIndirectSGPRWritePseudo32(VecSize)); 1140 case 64: 1141 return get(getIndirectSGPRWritePseudo64(VecSize)); 1142 default: 1143 llvm_unreachable("invalid reg indexing elt size"); 1144 } 1145 } 1146 1147 assert(EltSize == 32 && "invalid reg indexing elt size"); 1148 return get(getIndirectVGPRWritePseudoOpc(VecSize)); 1149 } 1150 1151 static unsigned getSGPRSpillSaveOpcode(unsigned Size) { 1152 switch (Size) { 1153 case 4: 1154 return AMDGPU::SI_SPILL_S32_SAVE; 1155 case 8: 1156 return AMDGPU::SI_SPILL_S64_SAVE; 1157 case 12: 1158 return AMDGPU::SI_SPILL_S96_SAVE; 1159 case 16: 1160 return AMDGPU::SI_SPILL_S128_SAVE; 1161 case 20: 1162 return AMDGPU::SI_SPILL_S160_SAVE; 1163 case 32: 1164 return AMDGPU::SI_SPILL_S256_SAVE; 1165 case 64: 1166 return AMDGPU::SI_SPILL_S512_SAVE; 1167 case 128: 1168 return AMDGPU::SI_SPILL_S1024_SAVE; 1169 default: 1170 llvm_unreachable("unknown register size"); 1171 } 1172 } 1173 1174 static unsigned getVGPRSpillSaveOpcode(unsigned Size) { 1175 switch (Size) { 1176 case 4: 1177 return AMDGPU::SI_SPILL_V32_SAVE; 1178 case 8: 1179 return AMDGPU::SI_SPILL_V64_SAVE; 1180 case 12: 1181 return AMDGPU::SI_SPILL_V96_SAVE; 1182 case 16: 1183 return AMDGPU::SI_SPILL_V128_SAVE; 1184 case 20: 1185 return AMDGPU::SI_SPILL_V160_SAVE; 1186 case 32: 1187 return AMDGPU::SI_SPILL_V256_SAVE; 1188 case 64: 1189 return AMDGPU::SI_SPILL_V512_SAVE; 1190 case 128: 1191 return AMDGPU::SI_SPILL_V1024_SAVE; 1192 default: 1193 llvm_unreachable("unknown register size"); 1194 } 1195 } 1196 1197 static unsigned getAGPRSpillSaveOpcode(unsigned Size) { 1198 switch (Size) { 1199 case 4: 1200 return AMDGPU::SI_SPILL_A32_SAVE; 1201 case 8: 1202 return AMDGPU::SI_SPILL_A64_SAVE; 1203 case 16: 1204 return AMDGPU::SI_SPILL_A128_SAVE; 1205 case 64: 1206 return AMDGPU::SI_SPILL_A512_SAVE; 1207 case 128: 1208 return AMDGPU::SI_SPILL_A1024_SAVE; 1209 default: 1210 llvm_unreachable("unknown register size"); 1211 } 1212 } 1213 1214 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 1215 MachineBasicBlock::iterator MI, 1216 Register SrcReg, bool isKill, 1217 int FrameIndex, 1218 const TargetRegisterClass *RC, 1219 const TargetRegisterInfo *TRI) const { 1220 MachineFunction *MF = MBB.getParent(); 1221 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1222 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 1223 const DebugLoc &DL = MBB.findDebugLoc(MI); 1224 1225 MachinePointerInfo PtrInfo 1226 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 1227 MachineMemOperand *MMO = MF->getMachineMemOperand( 1228 PtrInfo, MachineMemOperand::MOStore, FrameInfo.getObjectSize(FrameIndex), 1229 FrameInfo.getObjectAlign(FrameIndex)); 1230 unsigned SpillSize = TRI->getSpillSize(*RC); 1231 1232 if (RI.isSGPRClass(RC)) { 1233 MFI->setHasSpilledSGPRs(); 1234 assert(SrcReg != AMDGPU::M0 && "m0 should not be spilled"); 1235 1236 // We are only allowed to create one new instruction when spilling 1237 // registers, so we need to use pseudo instruction for spilling SGPRs. 1238 const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(SpillSize)); 1239 1240 // The SGPR spill/restore instructions only work on number sgprs, so we need 1241 // to make sure we are using the correct register class. 1242 if (Register::isVirtualRegister(SrcReg) && SpillSize == 4) { 1243 MachineRegisterInfo &MRI = MF->getRegInfo(); 1244 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0RegClass); 1245 } 1246 1247 BuildMI(MBB, MI, DL, OpDesc) 1248 .addReg(SrcReg, getKillRegState(isKill)) // data 1249 .addFrameIndex(FrameIndex) // addr 1250 .addMemOperand(MMO) 1251 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) 1252 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); 1253 // Add the scratch resource registers as implicit uses because we may end up 1254 // needing them, and need to ensure that the reserved registers are 1255 // correctly handled. 1256 if (RI.spillSGPRToVGPR()) 1257 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill); 1258 return; 1259 } 1260 1261 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillSaveOpcode(SpillSize) 1262 : getVGPRSpillSaveOpcode(SpillSize); 1263 MFI->setHasSpilledVGPRs(); 1264 1265 auto MIB = BuildMI(MBB, MI, DL, get(Opcode)); 1266 if (RI.hasAGPRs(RC)) { 1267 MachineRegisterInfo &MRI = MF->getRegInfo(); 1268 Register Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1269 MIB.addReg(Tmp, RegState::Define); 1270 } 1271 MIB.addReg(SrcReg, getKillRegState(isKill)) // data 1272 .addFrameIndex(FrameIndex) // addr 1273 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 1274 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset 1275 .addImm(0) // offset 1276 .addMemOperand(MMO); 1277 } 1278 1279 static unsigned getSGPRSpillRestoreOpcode(unsigned Size) { 1280 switch (Size) { 1281 case 4: 1282 return AMDGPU::SI_SPILL_S32_RESTORE; 1283 case 8: 1284 return AMDGPU::SI_SPILL_S64_RESTORE; 1285 case 12: 1286 return AMDGPU::SI_SPILL_S96_RESTORE; 1287 case 16: 1288 return AMDGPU::SI_SPILL_S128_RESTORE; 1289 case 20: 1290 return AMDGPU::SI_SPILL_S160_RESTORE; 1291 case 32: 1292 return AMDGPU::SI_SPILL_S256_RESTORE; 1293 case 64: 1294 return AMDGPU::SI_SPILL_S512_RESTORE; 1295 case 128: 1296 return AMDGPU::SI_SPILL_S1024_RESTORE; 1297 default: 1298 llvm_unreachable("unknown register size"); 1299 } 1300 } 1301 1302 static unsigned getVGPRSpillRestoreOpcode(unsigned Size) { 1303 switch (Size) { 1304 case 4: 1305 return AMDGPU::SI_SPILL_V32_RESTORE; 1306 case 8: 1307 return AMDGPU::SI_SPILL_V64_RESTORE; 1308 case 12: 1309 return AMDGPU::SI_SPILL_V96_RESTORE; 1310 case 16: 1311 return AMDGPU::SI_SPILL_V128_RESTORE; 1312 case 20: 1313 return AMDGPU::SI_SPILL_V160_RESTORE; 1314 case 32: 1315 return AMDGPU::SI_SPILL_V256_RESTORE; 1316 case 64: 1317 return AMDGPU::SI_SPILL_V512_RESTORE; 1318 case 128: 1319 return AMDGPU::SI_SPILL_V1024_RESTORE; 1320 default: 1321 llvm_unreachable("unknown register size"); 1322 } 1323 } 1324 1325 static unsigned getAGPRSpillRestoreOpcode(unsigned Size) { 1326 switch (Size) { 1327 case 4: 1328 return AMDGPU::SI_SPILL_A32_RESTORE; 1329 case 8: 1330 return AMDGPU::SI_SPILL_A64_RESTORE; 1331 case 16: 1332 return AMDGPU::SI_SPILL_A128_RESTORE; 1333 case 64: 1334 return AMDGPU::SI_SPILL_A512_RESTORE; 1335 case 128: 1336 return AMDGPU::SI_SPILL_A1024_RESTORE; 1337 default: 1338 llvm_unreachable("unknown register size"); 1339 } 1340 } 1341 1342 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 1343 MachineBasicBlock::iterator MI, 1344 Register DestReg, int FrameIndex, 1345 const TargetRegisterClass *RC, 1346 const TargetRegisterInfo *TRI) const { 1347 MachineFunction *MF = MBB.getParent(); 1348 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1349 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 1350 const DebugLoc &DL = MBB.findDebugLoc(MI); 1351 unsigned SpillSize = TRI->getSpillSize(*RC); 1352 1353 MachinePointerInfo PtrInfo 1354 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 1355 1356 MachineMemOperand *MMO = MF->getMachineMemOperand( 1357 PtrInfo, MachineMemOperand::MOLoad, FrameInfo.getObjectSize(FrameIndex), 1358 FrameInfo.getObjectAlign(FrameIndex)); 1359 1360 if (RI.isSGPRClass(RC)) { 1361 MFI->setHasSpilledSGPRs(); 1362 assert(DestReg != AMDGPU::M0 && "m0 should not be reloaded into"); 1363 1364 // FIXME: Maybe this should not include a memoperand because it will be 1365 // lowered to non-memory instructions. 1366 const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize)); 1367 if (DestReg.isVirtual() && SpillSize == 4) { 1368 MachineRegisterInfo &MRI = MF->getRegInfo(); 1369 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0RegClass); 1370 } 1371 1372 if (RI.spillSGPRToVGPR()) 1373 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill); 1374 BuildMI(MBB, MI, DL, OpDesc, DestReg) 1375 .addFrameIndex(FrameIndex) // addr 1376 .addMemOperand(MMO) 1377 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) 1378 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); 1379 return; 1380 } 1381 1382 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillRestoreOpcode(SpillSize) 1383 : getVGPRSpillRestoreOpcode(SpillSize); 1384 auto MIB = BuildMI(MBB, MI, DL, get(Opcode), DestReg); 1385 if (RI.hasAGPRs(RC)) { 1386 MachineRegisterInfo &MRI = MF->getRegInfo(); 1387 Register Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1388 MIB.addReg(Tmp, RegState::Define); 1389 } 1390 MIB.addFrameIndex(FrameIndex) // vaddr 1391 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 1392 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset 1393 .addImm(0) // offset 1394 .addMemOperand(MMO); 1395 } 1396 1397 /// \param @Offset Offset in bytes of the FrameIndex being spilled 1398 unsigned SIInstrInfo::calculateLDSSpillAddress( 1399 MachineBasicBlock &MBB, MachineInstr &MI, RegScavenger *RS, unsigned TmpReg, 1400 unsigned FrameOffset, unsigned Size) const { 1401 MachineFunction *MF = MBB.getParent(); 1402 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1403 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 1404 const DebugLoc &DL = MBB.findDebugLoc(MI); 1405 unsigned WorkGroupSize = MFI->getMaxFlatWorkGroupSize(); 1406 unsigned WavefrontSize = ST.getWavefrontSize(); 1407 1408 Register TIDReg = MFI->getTIDReg(); 1409 if (!MFI->hasCalculatedTID()) { 1410 MachineBasicBlock &Entry = MBB.getParent()->front(); 1411 MachineBasicBlock::iterator Insert = Entry.front(); 1412 const DebugLoc &DL = Insert->getDebugLoc(); 1413 1414 TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass, 1415 *MF); 1416 if (TIDReg == AMDGPU::NoRegister) 1417 return TIDReg; 1418 1419 if (!AMDGPU::isShader(MF->getFunction().getCallingConv()) && 1420 WorkGroupSize > WavefrontSize) { 1421 Register TIDIGXReg = 1422 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_X); 1423 Register TIDIGYReg = 1424 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Y); 1425 Register TIDIGZReg = 1426 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Z); 1427 Register InputPtrReg = 1428 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 1429 for (unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) { 1430 if (!Entry.isLiveIn(Reg)) 1431 Entry.addLiveIn(Reg); 1432 } 1433 1434 RS->enterBasicBlock(Entry); 1435 // FIXME: Can we scavenge an SReg_64 and access the subregs? 1436 Register STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); 1437 Register STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); 1438 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp0) 1439 .addReg(InputPtrReg) 1440 .addImm(SI::KernelInputOffsets::NGROUPS_Z); 1441 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp1) 1442 .addReg(InputPtrReg) 1443 .addImm(SI::KernelInputOffsets::NGROUPS_Y); 1444 1445 // NGROUPS.X * NGROUPS.Y 1446 BuildMI(Entry, Insert, DL, get(AMDGPU::S_MUL_I32), STmp1) 1447 .addReg(STmp1) 1448 .addReg(STmp0); 1449 // (NGROUPS.X * NGROUPS.Y) * TIDIG.X 1450 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MUL_U32_U24_e32), TIDReg) 1451 .addReg(STmp1) 1452 .addReg(TIDIGXReg); 1453 // NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X) 1454 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MAD_U32_U24), TIDReg) 1455 .addReg(STmp0) 1456 .addReg(TIDIGYReg) 1457 .addReg(TIDReg); 1458 // (NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)) + TIDIG.Z 1459 getAddNoCarry(Entry, Insert, DL, TIDReg) 1460 .addReg(TIDReg) 1461 .addReg(TIDIGZReg) 1462 .addImm(0); // clamp bit 1463 } else { 1464 // Get the wave id 1465 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_LO_U32_B32_e64), 1466 TIDReg) 1467 .addImm(-1) 1468 .addImm(0); 1469 1470 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_HI_U32_B32_e64), 1471 TIDReg) 1472 .addImm(-1) 1473 .addReg(TIDReg); 1474 } 1475 1476 BuildMI(Entry, Insert, DL, get(AMDGPU::V_LSHLREV_B32_e32), 1477 TIDReg) 1478 .addImm(2) 1479 .addReg(TIDReg); 1480 MFI->setTIDReg(TIDReg); 1481 } 1482 1483 // Add FrameIndex to LDS offset 1484 unsigned LDSOffset = MFI->getLDSSize() + (FrameOffset * WorkGroupSize); 1485 getAddNoCarry(MBB, MI, DL, TmpReg) 1486 .addImm(LDSOffset) 1487 .addReg(TIDReg) 1488 .addImm(0); // clamp bit 1489 1490 return TmpReg; 1491 } 1492 1493 void SIInstrInfo::insertWaitStates(MachineBasicBlock &MBB, 1494 MachineBasicBlock::iterator MI, 1495 int Count) const { 1496 DebugLoc DL = MBB.findDebugLoc(MI); 1497 while (Count > 0) { 1498 int Arg; 1499 if (Count >= 8) 1500 Arg = 7; 1501 else 1502 Arg = Count - 1; 1503 Count -= 8; 1504 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP)) 1505 .addImm(Arg); 1506 } 1507 } 1508 1509 void SIInstrInfo::insertNoop(MachineBasicBlock &MBB, 1510 MachineBasicBlock::iterator MI) const { 1511 insertWaitStates(MBB, MI, 1); 1512 } 1513 1514 void SIInstrInfo::insertReturn(MachineBasicBlock &MBB) const { 1515 auto MF = MBB.getParent(); 1516 SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 1517 1518 assert(Info->isEntryFunction()); 1519 1520 if (MBB.succ_empty()) { 1521 bool HasNoTerminator = MBB.getFirstTerminator() == MBB.end(); 1522 if (HasNoTerminator) { 1523 if (Info->returnsVoid()) { 1524 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::S_ENDPGM)).addImm(0); 1525 } else { 1526 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::SI_RETURN_TO_EPILOG)); 1527 } 1528 } 1529 } 1530 } 1531 1532 unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) { 1533 switch (MI.getOpcode()) { 1534 default: return 1; // FIXME: Do wait states equal cycles? 1535 1536 case AMDGPU::S_NOP: 1537 return MI.getOperand(0).getImm() + 1; 1538 } 1539 } 1540 1541 bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 1542 MachineBasicBlock &MBB = *MI.getParent(); 1543 DebugLoc DL = MBB.findDebugLoc(MI); 1544 switch (MI.getOpcode()) { 1545 default: return TargetInstrInfo::expandPostRAPseudo(MI); 1546 case AMDGPU::S_MOV_B64_term: 1547 // This is only a terminator to get the correct spill code placement during 1548 // register allocation. 1549 MI.setDesc(get(AMDGPU::S_MOV_B64)); 1550 break; 1551 1552 case AMDGPU::S_MOV_B32_term: 1553 // This is only a terminator to get the correct spill code placement during 1554 // register allocation. 1555 MI.setDesc(get(AMDGPU::S_MOV_B32)); 1556 break; 1557 1558 case AMDGPU::S_XOR_B64_term: 1559 // This is only a terminator to get the correct spill code placement during 1560 // register allocation. 1561 MI.setDesc(get(AMDGPU::S_XOR_B64)); 1562 break; 1563 1564 case AMDGPU::S_XOR_B32_term: 1565 // This is only a terminator to get the correct spill code placement during 1566 // register allocation. 1567 MI.setDesc(get(AMDGPU::S_XOR_B32)); 1568 break; 1569 1570 case AMDGPU::S_OR_B32_term: 1571 // This is only a terminator to get the correct spill code placement during 1572 // register allocation. 1573 MI.setDesc(get(AMDGPU::S_OR_B32)); 1574 break; 1575 1576 case AMDGPU::S_ANDN2_B64_term: 1577 // This is only a terminator to get the correct spill code placement during 1578 // register allocation. 1579 MI.setDesc(get(AMDGPU::S_ANDN2_B64)); 1580 break; 1581 1582 case AMDGPU::S_ANDN2_B32_term: 1583 // This is only a terminator to get the correct spill code placement during 1584 // register allocation. 1585 MI.setDesc(get(AMDGPU::S_ANDN2_B32)); 1586 break; 1587 1588 case AMDGPU::V_MOV_B64_PSEUDO: { 1589 Register Dst = MI.getOperand(0).getReg(); 1590 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 1591 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 1592 1593 const MachineOperand &SrcOp = MI.getOperand(1); 1594 // FIXME: Will this work for 64-bit floating point immediates? 1595 assert(!SrcOp.isFPImm()); 1596 if (SrcOp.isImm()) { 1597 APInt Imm(64, SrcOp.getImm()); 1598 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1599 .addImm(Imm.getLoBits(32).getZExtValue()) 1600 .addReg(Dst, RegState::Implicit | RegState::Define); 1601 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1602 .addImm(Imm.getHiBits(32).getZExtValue()) 1603 .addReg(Dst, RegState::Implicit | RegState::Define); 1604 } else { 1605 assert(SrcOp.isReg()); 1606 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1607 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) 1608 .addReg(Dst, RegState::Implicit | RegState::Define); 1609 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1610 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) 1611 .addReg(Dst, RegState::Implicit | RegState::Define); 1612 } 1613 MI.eraseFromParent(); 1614 break; 1615 } 1616 case AMDGPU::V_MOV_B64_DPP_PSEUDO: { 1617 expandMovDPP64(MI); 1618 break; 1619 } 1620 case AMDGPU::V_SET_INACTIVE_B32: { 1621 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; 1622 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1623 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1624 .addReg(Exec); 1625 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), MI.getOperand(0).getReg()) 1626 .add(MI.getOperand(2)); 1627 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1628 .addReg(Exec); 1629 MI.eraseFromParent(); 1630 break; 1631 } 1632 case AMDGPU::V_SET_INACTIVE_B64: { 1633 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; 1634 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1635 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1636 .addReg(Exec); 1637 MachineInstr *Copy = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), 1638 MI.getOperand(0).getReg()) 1639 .add(MI.getOperand(2)); 1640 expandPostRAPseudo(*Copy); 1641 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1642 .addReg(Exec); 1643 MI.eraseFromParent(); 1644 break; 1645 } 1646 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V1: 1647 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V2: 1648 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V3: 1649 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V4: 1650 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V5: 1651 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V8: 1652 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V16: 1653 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V32: 1654 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V1: 1655 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V2: 1656 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V3: 1657 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V4: 1658 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V5: 1659 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V8: 1660 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V16: 1661 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V32: 1662 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V1: 1663 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V2: 1664 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V4: 1665 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V8: 1666 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V16: { 1667 const TargetRegisterClass *EltRC = getOpRegClass(MI, 2); 1668 1669 unsigned Opc; 1670 if (RI.hasVGPRs(EltRC)) { 1671 Opc = ST.useVGPRIndexMode() ? 1672 AMDGPU::V_MOV_B32_indirect : AMDGPU::V_MOVRELD_B32_e32; 1673 } else { 1674 Opc = RI.getRegSizeInBits(*EltRC) == 64 ? 1675 AMDGPU::S_MOVRELD_B64 : AMDGPU::S_MOVRELD_B32; 1676 } 1677 1678 const MCInstrDesc &OpDesc = get(Opc); 1679 Register VecReg = MI.getOperand(0).getReg(); 1680 bool IsUndef = MI.getOperand(1).isUndef(); 1681 unsigned SubReg = MI.getOperand(3).getImm(); 1682 assert(VecReg == MI.getOperand(1).getReg()); 1683 1684 MachineInstrBuilder MIB = 1685 BuildMI(MBB, MI, DL, OpDesc) 1686 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 1687 .add(MI.getOperand(2)) 1688 .addReg(VecReg, RegState::ImplicitDefine) 1689 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0)); 1690 1691 const int ImpDefIdx = 1692 OpDesc.getNumOperands() + OpDesc.getNumImplicitUses(); 1693 const int ImpUseIdx = ImpDefIdx + 1; 1694 MIB->tieOperands(ImpDefIdx, ImpUseIdx); 1695 MI.eraseFromParent(); 1696 break; 1697 } 1698 case AMDGPU::SI_PC_ADD_REL_OFFSET: { 1699 MachineFunction &MF = *MBB.getParent(); 1700 Register Reg = MI.getOperand(0).getReg(); 1701 Register RegLo = RI.getSubReg(Reg, AMDGPU::sub0); 1702 Register RegHi = RI.getSubReg(Reg, AMDGPU::sub1); 1703 1704 // Create a bundle so these instructions won't be re-ordered by the 1705 // post-RA scheduler. 1706 MIBundleBuilder Bundler(MBB, MI); 1707 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg)); 1708 1709 // Add 32-bit offset from this instruction to the start of the 1710 // constant data. 1711 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo) 1712 .addReg(RegLo) 1713 .add(MI.getOperand(1))); 1714 1715 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi) 1716 .addReg(RegHi); 1717 MIB.add(MI.getOperand(2)); 1718 1719 Bundler.append(MIB); 1720 finalizeBundle(MBB, Bundler.begin()); 1721 1722 MI.eraseFromParent(); 1723 break; 1724 } 1725 case AMDGPU::ENTER_WWM: { 1726 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 1727 // WWM is entered. 1728 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1729 : AMDGPU::S_OR_SAVEEXEC_B64)); 1730 break; 1731 } 1732 case AMDGPU::EXIT_WWM: { 1733 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 1734 // WWM is exited. 1735 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64)); 1736 break; 1737 } 1738 } 1739 return true; 1740 } 1741 1742 std::pair<MachineInstr*, MachineInstr*> 1743 SIInstrInfo::expandMovDPP64(MachineInstr &MI) const { 1744 assert (MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO); 1745 1746 MachineBasicBlock &MBB = *MI.getParent(); 1747 DebugLoc DL = MBB.findDebugLoc(MI); 1748 MachineFunction *MF = MBB.getParent(); 1749 MachineRegisterInfo &MRI = MF->getRegInfo(); 1750 Register Dst = MI.getOperand(0).getReg(); 1751 unsigned Part = 0; 1752 MachineInstr *Split[2]; 1753 1754 1755 for (auto Sub : { AMDGPU::sub0, AMDGPU::sub1 }) { 1756 auto MovDPP = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_dpp)); 1757 if (Dst.isPhysical()) { 1758 MovDPP.addDef(RI.getSubReg(Dst, Sub)); 1759 } else { 1760 assert(MRI.isSSA()); 1761 auto Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1762 MovDPP.addDef(Tmp); 1763 } 1764 1765 for (unsigned I = 1; I <= 2; ++I) { // old and src operands. 1766 const MachineOperand &SrcOp = MI.getOperand(I); 1767 assert(!SrcOp.isFPImm()); 1768 if (SrcOp.isImm()) { 1769 APInt Imm(64, SrcOp.getImm()); 1770 Imm.ashrInPlace(Part * 32); 1771 MovDPP.addImm(Imm.getLoBits(32).getZExtValue()); 1772 } else { 1773 assert(SrcOp.isReg()); 1774 Register Src = SrcOp.getReg(); 1775 if (Src.isPhysical()) 1776 MovDPP.addReg(RI.getSubReg(Src, Sub)); 1777 else 1778 MovDPP.addReg(Src, SrcOp.isUndef() ? RegState::Undef : 0, Sub); 1779 } 1780 } 1781 1782 for (unsigned I = 3; I < MI.getNumExplicitOperands(); ++I) 1783 MovDPP.addImm(MI.getOperand(I).getImm()); 1784 1785 Split[Part] = MovDPP; 1786 ++Part; 1787 } 1788 1789 if (Dst.isVirtual()) 1790 BuildMI(MBB, MI, DL, get(AMDGPU::REG_SEQUENCE), Dst) 1791 .addReg(Split[0]->getOperand(0).getReg()) 1792 .addImm(AMDGPU::sub0) 1793 .addReg(Split[1]->getOperand(0).getReg()) 1794 .addImm(AMDGPU::sub1); 1795 1796 MI.eraseFromParent(); 1797 return std::make_pair(Split[0], Split[1]); 1798 } 1799 1800 bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI, 1801 MachineOperand &Src0, 1802 unsigned Src0OpName, 1803 MachineOperand &Src1, 1804 unsigned Src1OpName) const { 1805 MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName); 1806 if (!Src0Mods) 1807 return false; 1808 1809 MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName); 1810 assert(Src1Mods && 1811 "All commutable instructions have both src0 and src1 modifiers"); 1812 1813 int Src0ModsVal = Src0Mods->getImm(); 1814 int Src1ModsVal = Src1Mods->getImm(); 1815 1816 Src1Mods->setImm(Src0ModsVal); 1817 Src0Mods->setImm(Src1ModsVal); 1818 return true; 1819 } 1820 1821 static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI, 1822 MachineOperand &RegOp, 1823 MachineOperand &NonRegOp) { 1824 Register Reg = RegOp.getReg(); 1825 unsigned SubReg = RegOp.getSubReg(); 1826 bool IsKill = RegOp.isKill(); 1827 bool IsDead = RegOp.isDead(); 1828 bool IsUndef = RegOp.isUndef(); 1829 bool IsDebug = RegOp.isDebug(); 1830 1831 if (NonRegOp.isImm()) 1832 RegOp.ChangeToImmediate(NonRegOp.getImm()); 1833 else if (NonRegOp.isFI()) 1834 RegOp.ChangeToFrameIndex(NonRegOp.getIndex()); 1835 else 1836 return nullptr; 1837 1838 NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug); 1839 NonRegOp.setSubReg(SubReg); 1840 1841 return &MI; 1842 } 1843 1844 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, 1845 unsigned Src0Idx, 1846 unsigned Src1Idx) const { 1847 assert(!NewMI && "this should never be used"); 1848 1849 unsigned Opc = MI.getOpcode(); 1850 int CommutedOpcode = commuteOpcode(Opc); 1851 if (CommutedOpcode == -1) 1852 return nullptr; 1853 1854 assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == 1855 static_cast<int>(Src0Idx) && 1856 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == 1857 static_cast<int>(Src1Idx) && 1858 "inconsistency with findCommutedOpIndices"); 1859 1860 MachineOperand &Src0 = MI.getOperand(Src0Idx); 1861 MachineOperand &Src1 = MI.getOperand(Src1Idx); 1862 1863 MachineInstr *CommutedMI = nullptr; 1864 if (Src0.isReg() && Src1.isReg()) { 1865 if (isOperandLegal(MI, Src1Idx, &Src0)) { 1866 // Be sure to copy the source modifiers to the right place. 1867 CommutedMI 1868 = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx); 1869 } 1870 1871 } else if (Src0.isReg() && !Src1.isReg()) { 1872 // src0 should always be able to support any operand type, so no need to 1873 // check operand legality. 1874 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1); 1875 } else if (!Src0.isReg() && Src1.isReg()) { 1876 if (isOperandLegal(MI, Src1Idx, &Src0)) 1877 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0); 1878 } else { 1879 // FIXME: Found two non registers to commute. This does happen. 1880 return nullptr; 1881 } 1882 1883 if (CommutedMI) { 1884 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers, 1885 Src1, AMDGPU::OpName::src1_modifiers); 1886 1887 CommutedMI->setDesc(get(CommutedOpcode)); 1888 } 1889 1890 return CommutedMI; 1891 } 1892 1893 // This needs to be implemented because the source modifiers may be inserted 1894 // between the true commutable operands, and the base 1895 // TargetInstrInfo::commuteInstruction uses it. 1896 bool SIInstrInfo::findCommutedOpIndices(const MachineInstr &MI, 1897 unsigned &SrcOpIdx0, 1898 unsigned &SrcOpIdx1) const { 1899 return findCommutedOpIndices(MI.getDesc(), SrcOpIdx0, SrcOpIdx1); 1900 } 1901 1902 bool SIInstrInfo::findCommutedOpIndices(MCInstrDesc Desc, unsigned &SrcOpIdx0, 1903 unsigned &SrcOpIdx1) const { 1904 if (!Desc.isCommutable()) 1905 return false; 1906 1907 unsigned Opc = Desc.getOpcode(); 1908 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 1909 if (Src0Idx == -1) 1910 return false; 1911 1912 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 1913 if (Src1Idx == -1) 1914 return false; 1915 1916 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx); 1917 } 1918 1919 bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp, 1920 int64_t BrOffset) const { 1921 // BranchRelaxation should never have to check s_setpc_b64 because its dest 1922 // block is unanalyzable. 1923 assert(BranchOp != AMDGPU::S_SETPC_B64); 1924 1925 // Convert to dwords. 1926 BrOffset /= 4; 1927 1928 // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is 1929 // from the next instruction. 1930 BrOffset -= 1; 1931 1932 return isIntN(BranchOffsetBits, BrOffset); 1933 } 1934 1935 MachineBasicBlock *SIInstrInfo::getBranchDestBlock( 1936 const MachineInstr &MI) const { 1937 if (MI.getOpcode() == AMDGPU::S_SETPC_B64) { 1938 // This would be a difficult analysis to perform, but can always be legal so 1939 // there's no need to analyze it. 1940 return nullptr; 1941 } 1942 1943 return MI.getOperand(0).getMBB(); 1944 } 1945 1946 unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB, 1947 MachineBasicBlock &DestBB, 1948 const DebugLoc &DL, 1949 int64_t BrOffset, 1950 RegScavenger *RS) const { 1951 assert(RS && "RegScavenger required for long branching"); 1952 assert(MBB.empty() && 1953 "new block should be inserted for expanding unconditional branch"); 1954 assert(MBB.pred_size() == 1); 1955 1956 MachineFunction *MF = MBB.getParent(); 1957 MachineRegisterInfo &MRI = MF->getRegInfo(); 1958 1959 // FIXME: Virtual register workaround for RegScavenger not working with empty 1960 // blocks. 1961 Register PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1962 1963 auto I = MBB.end(); 1964 1965 // We need to compute the offset relative to the instruction immediately after 1966 // s_getpc_b64. Insert pc arithmetic code before last terminator. 1967 MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg); 1968 1969 // TODO: Handle > 32-bit block address. 1970 if (BrOffset >= 0) { 1971 BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32)) 1972 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 1973 .addReg(PCReg, 0, AMDGPU::sub0) 1974 .addMBB(&DestBB, MO_LONG_BRANCH_FORWARD); 1975 BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32)) 1976 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 1977 .addReg(PCReg, 0, AMDGPU::sub1) 1978 .addImm(0); 1979 } else { 1980 // Backwards branch. 1981 BuildMI(MBB, I, DL, get(AMDGPU::S_SUB_U32)) 1982 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 1983 .addReg(PCReg, 0, AMDGPU::sub0) 1984 .addMBB(&DestBB, MO_LONG_BRANCH_BACKWARD); 1985 BuildMI(MBB, I, DL, get(AMDGPU::S_SUBB_U32)) 1986 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 1987 .addReg(PCReg, 0, AMDGPU::sub1) 1988 .addImm(0); 1989 } 1990 1991 // Insert the indirect branch after the other terminator. 1992 BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64)) 1993 .addReg(PCReg); 1994 1995 // FIXME: If spilling is necessary, this will fail because this scavenger has 1996 // no emergency stack slots. It is non-trivial to spill in this situation, 1997 // because the restore code needs to be specially placed after the 1998 // jump. BranchRelaxation then needs to be made aware of the newly inserted 1999 // block. 2000 // 2001 // If a spill is needed for the pc register pair, we need to insert a spill 2002 // restore block right before the destination block, and insert a short branch 2003 // into the old destination block's fallthrough predecessor. 2004 // e.g.: 2005 // 2006 // s_cbranch_scc0 skip_long_branch: 2007 // 2008 // long_branch_bb: 2009 // spill s[8:9] 2010 // s_getpc_b64 s[8:9] 2011 // s_add_u32 s8, s8, restore_bb 2012 // s_addc_u32 s9, s9, 0 2013 // s_setpc_b64 s[8:9] 2014 // 2015 // skip_long_branch: 2016 // foo; 2017 // 2018 // ..... 2019 // 2020 // dest_bb_fallthrough_predecessor: 2021 // bar; 2022 // s_branch dest_bb 2023 // 2024 // restore_bb: 2025 // restore s[8:9] 2026 // fallthrough dest_bb 2027 /// 2028 // dest_bb: 2029 // buzz; 2030 2031 RS->enterBasicBlockEnd(MBB); 2032 unsigned Scav = RS->scavengeRegisterBackwards( 2033 AMDGPU::SReg_64RegClass, 2034 MachineBasicBlock::iterator(GetPC), false, 0); 2035 MRI.replaceRegWith(PCReg, Scav); 2036 MRI.clearVirtRegs(); 2037 RS->setRegUsed(Scav); 2038 2039 return 4 + 8 + 4 + 4; 2040 } 2041 2042 unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) { 2043 switch (Cond) { 2044 case SIInstrInfo::SCC_TRUE: 2045 return AMDGPU::S_CBRANCH_SCC1; 2046 case SIInstrInfo::SCC_FALSE: 2047 return AMDGPU::S_CBRANCH_SCC0; 2048 case SIInstrInfo::VCCNZ: 2049 return AMDGPU::S_CBRANCH_VCCNZ; 2050 case SIInstrInfo::VCCZ: 2051 return AMDGPU::S_CBRANCH_VCCZ; 2052 case SIInstrInfo::EXECNZ: 2053 return AMDGPU::S_CBRANCH_EXECNZ; 2054 case SIInstrInfo::EXECZ: 2055 return AMDGPU::S_CBRANCH_EXECZ; 2056 default: 2057 llvm_unreachable("invalid branch predicate"); 2058 } 2059 } 2060 2061 SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) { 2062 switch (Opcode) { 2063 case AMDGPU::S_CBRANCH_SCC0: 2064 return SCC_FALSE; 2065 case AMDGPU::S_CBRANCH_SCC1: 2066 return SCC_TRUE; 2067 case AMDGPU::S_CBRANCH_VCCNZ: 2068 return VCCNZ; 2069 case AMDGPU::S_CBRANCH_VCCZ: 2070 return VCCZ; 2071 case AMDGPU::S_CBRANCH_EXECNZ: 2072 return EXECNZ; 2073 case AMDGPU::S_CBRANCH_EXECZ: 2074 return EXECZ; 2075 default: 2076 return INVALID_BR; 2077 } 2078 } 2079 2080 bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB, 2081 MachineBasicBlock::iterator I, 2082 MachineBasicBlock *&TBB, 2083 MachineBasicBlock *&FBB, 2084 SmallVectorImpl<MachineOperand> &Cond, 2085 bool AllowModify) const { 2086 if (I->getOpcode() == AMDGPU::S_BRANCH) { 2087 // Unconditional Branch 2088 TBB = I->getOperand(0).getMBB(); 2089 return false; 2090 } 2091 2092 MachineBasicBlock *CondBB = nullptr; 2093 2094 if (I->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 2095 CondBB = I->getOperand(1).getMBB(); 2096 Cond.push_back(I->getOperand(0)); 2097 } else { 2098 BranchPredicate Pred = getBranchPredicate(I->getOpcode()); 2099 if (Pred == INVALID_BR) 2100 return true; 2101 2102 CondBB = I->getOperand(0).getMBB(); 2103 Cond.push_back(MachineOperand::CreateImm(Pred)); 2104 Cond.push_back(I->getOperand(1)); // Save the branch register. 2105 } 2106 ++I; 2107 2108 if (I == MBB.end()) { 2109 // Conditional branch followed by fall-through. 2110 TBB = CondBB; 2111 return false; 2112 } 2113 2114 if (I->getOpcode() == AMDGPU::S_BRANCH) { 2115 TBB = CondBB; 2116 FBB = I->getOperand(0).getMBB(); 2117 return false; 2118 } 2119 2120 return true; 2121 } 2122 2123 bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 2124 MachineBasicBlock *&FBB, 2125 SmallVectorImpl<MachineOperand> &Cond, 2126 bool AllowModify) const { 2127 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 2128 auto E = MBB.end(); 2129 if (I == E) 2130 return false; 2131 2132 // Skip over the instructions that are artificially terminators for special 2133 // exec management. 2134 while (I != E && !I->isBranch() && !I->isReturn() && 2135 I->getOpcode() != AMDGPU::SI_MASK_BRANCH) { 2136 switch (I->getOpcode()) { 2137 case AMDGPU::SI_MASK_BRANCH: 2138 case AMDGPU::S_MOV_B64_term: 2139 case AMDGPU::S_XOR_B64_term: 2140 case AMDGPU::S_ANDN2_B64_term: 2141 case AMDGPU::S_MOV_B32_term: 2142 case AMDGPU::S_XOR_B32_term: 2143 case AMDGPU::S_OR_B32_term: 2144 case AMDGPU::S_ANDN2_B32_term: 2145 break; 2146 case AMDGPU::SI_IF: 2147 case AMDGPU::SI_ELSE: 2148 case AMDGPU::SI_KILL_I1_TERMINATOR: 2149 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 2150 // FIXME: It's messy that these need to be considered here at all. 2151 return true; 2152 default: 2153 llvm_unreachable("unexpected non-branch terminator inst"); 2154 } 2155 2156 ++I; 2157 } 2158 2159 if (I == E) 2160 return false; 2161 2162 if (I->getOpcode() != AMDGPU::SI_MASK_BRANCH) 2163 return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify); 2164 2165 ++I; 2166 2167 // TODO: Should be able to treat as fallthrough? 2168 if (I == MBB.end()) 2169 return true; 2170 2171 if (analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify)) 2172 return true; 2173 2174 MachineBasicBlock *MaskBrDest = I->getOperand(0).getMBB(); 2175 2176 // Specifically handle the case where the conditional branch is to the same 2177 // destination as the mask branch. e.g. 2178 // 2179 // si_mask_branch BB8 2180 // s_cbranch_execz BB8 2181 // s_cbranch BB9 2182 // 2183 // This is required to understand divergent loops which may need the branches 2184 // to be relaxed. 2185 if (TBB != MaskBrDest || Cond.empty()) 2186 return true; 2187 2188 auto Pred = Cond[0].getImm(); 2189 return (Pred != EXECZ && Pred != EXECNZ); 2190 } 2191 2192 unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB, 2193 int *BytesRemoved) const { 2194 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 2195 2196 unsigned Count = 0; 2197 unsigned RemovedSize = 0; 2198 while (I != MBB.end()) { 2199 MachineBasicBlock::iterator Next = std::next(I); 2200 if (I->getOpcode() == AMDGPU::SI_MASK_BRANCH) { 2201 I = Next; 2202 continue; 2203 } 2204 2205 RemovedSize += getInstSizeInBytes(*I); 2206 I->eraseFromParent(); 2207 ++Count; 2208 I = Next; 2209 } 2210 2211 if (BytesRemoved) 2212 *BytesRemoved = RemovedSize; 2213 2214 return Count; 2215 } 2216 2217 // Copy the flags onto the implicit condition register operand. 2218 static void preserveCondRegFlags(MachineOperand &CondReg, 2219 const MachineOperand &OrigCond) { 2220 CondReg.setIsUndef(OrigCond.isUndef()); 2221 CondReg.setIsKill(OrigCond.isKill()); 2222 } 2223 2224 unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB, 2225 MachineBasicBlock *TBB, 2226 MachineBasicBlock *FBB, 2227 ArrayRef<MachineOperand> Cond, 2228 const DebugLoc &DL, 2229 int *BytesAdded) const { 2230 if (!FBB && Cond.empty()) { 2231 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 2232 .addMBB(TBB); 2233 if (BytesAdded) 2234 *BytesAdded = 4; 2235 return 1; 2236 } 2237 2238 if(Cond.size() == 1 && Cond[0].isReg()) { 2239 BuildMI(&MBB, DL, get(AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO)) 2240 .add(Cond[0]) 2241 .addMBB(TBB); 2242 return 1; 2243 } 2244 2245 assert(TBB && Cond[0].isImm()); 2246 2247 unsigned Opcode 2248 = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm())); 2249 2250 if (!FBB) { 2251 Cond[1].isUndef(); 2252 MachineInstr *CondBr = 2253 BuildMI(&MBB, DL, get(Opcode)) 2254 .addMBB(TBB); 2255 2256 // Copy the flags onto the implicit condition register operand. 2257 preserveCondRegFlags(CondBr->getOperand(1), Cond[1]); 2258 2259 if (BytesAdded) 2260 *BytesAdded = 4; 2261 return 1; 2262 } 2263 2264 assert(TBB && FBB); 2265 2266 MachineInstr *CondBr = 2267 BuildMI(&MBB, DL, get(Opcode)) 2268 .addMBB(TBB); 2269 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 2270 .addMBB(FBB); 2271 2272 MachineOperand &CondReg = CondBr->getOperand(1); 2273 CondReg.setIsUndef(Cond[1].isUndef()); 2274 CondReg.setIsKill(Cond[1].isKill()); 2275 2276 if (BytesAdded) 2277 *BytesAdded = 8; 2278 2279 return 2; 2280 } 2281 2282 bool SIInstrInfo::reverseBranchCondition( 2283 SmallVectorImpl<MachineOperand> &Cond) const { 2284 if (Cond.size() != 2) { 2285 return true; 2286 } 2287 2288 if (Cond[0].isImm()) { 2289 Cond[0].setImm(-Cond[0].getImm()); 2290 return false; 2291 } 2292 2293 return true; 2294 } 2295 2296 bool SIInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, 2297 ArrayRef<MachineOperand> Cond, 2298 Register DstReg, Register TrueReg, 2299 Register FalseReg, int &CondCycles, 2300 int &TrueCycles, int &FalseCycles) const { 2301 switch (Cond[0].getImm()) { 2302 case VCCNZ: 2303 case VCCZ: { 2304 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2305 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 2306 assert(MRI.getRegClass(FalseReg) == RC); 2307 2308 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 2309 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 2310 2311 // Limit to equal cost for branch vs. N v_cndmask_b32s. 2312 return RI.hasVGPRs(RC) && NumInsts <= 6; 2313 } 2314 case SCC_TRUE: 2315 case SCC_FALSE: { 2316 // FIXME: We could insert for VGPRs if we could replace the original compare 2317 // with a vector one. 2318 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2319 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 2320 assert(MRI.getRegClass(FalseReg) == RC); 2321 2322 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 2323 2324 // Multiples of 8 can do s_cselect_b64 2325 if (NumInsts % 2 == 0) 2326 NumInsts /= 2; 2327 2328 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 2329 return RI.isSGPRClass(RC); 2330 } 2331 default: 2332 return false; 2333 } 2334 } 2335 2336 void SIInstrInfo::insertSelect(MachineBasicBlock &MBB, 2337 MachineBasicBlock::iterator I, const DebugLoc &DL, 2338 Register DstReg, ArrayRef<MachineOperand> Cond, 2339 Register TrueReg, Register FalseReg) const { 2340 BranchPredicate Pred = static_cast<BranchPredicate>(Cond[0].getImm()); 2341 if (Pred == VCCZ || Pred == SCC_FALSE) { 2342 Pred = static_cast<BranchPredicate>(-Pred); 2343 std::swap(TrueReg, FalseReg); 2344 } 2345 2346 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2347 const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg); 2348 unsigned DstSize = RI.getRegSizeInBits(*DstRC); 2349 2350 if (DstSize == 32) { 2351 unsigned SelOp = Pred == SCC_TRUE ? 2352 AMDGPU::S_CSELECT_B32 : AMDGPU::V_CNDMASK_B32_e32; 2353 2354 // Instruction's operands are backwards from what is expected. 2355 MachineInstr *Select = 2356 BuildMI(MBB, I, DL, get(SelOp), DstReg) 2357 .addReg(FalseReg) 2358 .addReg(TrueReg); 2359 2360 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2361 return; 2362 } 2363 2364 if (DstSize == 64 && Pred == SCC_TRUE) { 2365 MachineInstr *Select = 2366 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg) 2367 .addReg(FalseReg) 2368 .addReg(TrueReg); 2369 2370 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2371 return; 2372 } 2373 2374 static const int16_t Sub0_15[] = { 2375 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 2376 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 2377 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, 2378 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 2379 }; 2380 2381 static const int16_t Sub0_15_64[] = { 2382 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, 2383 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7, 2384 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11, 2385 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15, 2386 }; 2387 2388 unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32; 2389 const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass; 2390 const int16_t *SubIndices = Sub0_15; 2391 int NElts = DstSize / 32; 2392 2393 // 64-bit select is only available for SALU. 2394 // TODO: Split 96-bit into 64-bit and 32-bit, not 3x 32-bit. 2395 if (Pred == SCC_TRUE) { 2396 if (NElts % 2) { 2397 SelOp = AMDGPU::S_CSELECT_B32; 2398 EltRC = &AMDGPU::SGPR_32RegClass; 2399 } else { 2400 SelOp = AMDGPU::S_CSELECT_B64; 2401 EltRC = &AMDGPU::SGPR_64RegClass; 2402 SubIndices = Sub0_15_64; 2403 NElts /= 2; 2404 } 2405 } 2406 2407 MachineInstrBuilder MIB = BuildMI( 2408 MBB, I, DL, get(AMDGPU::REG_SEQUENCE), DstReg); 2409 2410 I = MIB->getIterator(); 2411 2412 SmallVector<Register, 8> Regs; 2413 for (int Idx = 0; Idx != NElts; ++Idx) { 2414 Register DstElt = MRI.createVirtualRegister(EltRC); 2415 Regs.push_back(DstElt); 2416 2417 unsigned SubIdx = SubIndices[Idx]; 2418 2419 MachineInstr *Select = 2420 BuildMI(MBB, I, DL, get(SelOp), DstElt) 2421 .addReg(FalseReg, 0, SubIdx) 2422 .addReg(TrueReg, 0, SubIdx); 2423 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2424 fixImplicitOperands(*Select); 2425 2426 MIB.addReg(DstElt) 2427 .addImm(SubIdx); 2428 } 2429 } 2430 2431 bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) const { 2432 switch (MI.getOpcode()) { 2433 case AMDGPU::V_MOV_B32_e32: 2434 case AMDGPU::V_MOV_B32_e64: 2435 case AMDGPU::V_MOV_B64_PSEUDO: { 2436 // If there are additional implicit register operands, this may be used for 2437 // register indexing so the source register operand isn't simply copied. 2438 unsigned NumOps = MI.getDesc().getNumOperands() + 2439 MI.getDesc().getNumImplicitUses(); 2440 2441 return MI.getNumOperands() == NumOps; 2442 } 2443 case AMDGPU::S_MOV_B32: 2444 case AMDGPU::S_MOV_B64: 2445 case AMDGPU::COPY: 2446 case AMDGPU::V_ACCVGPR_WRITE_B32: 2447 case AMDGPU::V_ACCVGPR_READ_B32: 2448 return true; 2449 default: 2450 return false; 2451 } 2452 } 2453 2454 unsigned SIInstrInfo::getAddressSpaceForPseudoSourceKind( 2455 unsigned Kind) const { 2456 switch(Kind) { 2457 case PseudoSourceValue::Stack: 2458 case PseudoSourceValue::FixedStack: 2459 return AMDGPUAS::PRIVATE_ADDRESS; 2460 case PseudoSourceValue::ConstantPool: 2461 case PseudoSourceValue::GOT: 2462 case PseudoSourceValue::JumpTable: 2463 case PseudoSourceValue::GlobalValueCallEntry: 2464 case PseudoSourceValue::ExternalSymbolCallEntry: 2465 case PseudoSourceValue::TargetCustom: 2466 return AMDGPUAS::CONSTANT_ADDRESS; 2467 } 2468 return AMDGPUAS::FLAT_ADDRESS; 2469 } 2470 2471 static void removeModOperands(MachineInstr &MI) { 2472 unsigned Opc = MI.getOpcode(); 2473 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2474 AMDGPU::OpName::src0_modifiers); 2475 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2476 AMDGPU::OpName::src1_modifiers); 2477 int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2478 AMDGPU::OpName::src2_modifiers); 2479 2480 MI.RemoveOperand(Src2ModIdx); 2481 MI.RemoveOperand(Src1ModIdx); 2482 MI.RemoveOperand(Src0ModIdx); 2483 } 2484 2485 bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, 2486 Register Reg, MachineRegisterInfo *MRI) const { 2487 if (!MRI->hasOneNonDBGUse(Reg)) 2488 return false; 2489 2490 switch (DefMI.getOpcode()) { 2491 default: 2492 return false; 2493 case AMDGPU::S_MOV_B64: 2494 // TODO: We could fold 64-bit immediates, but this get compilicated 2495 // when there are sub-registers. 2496 return false; 2497 2498 case AMDGPU::V_MOV_B32_e32: 2499 case AMDGPU::S_MOV_B32: 2500 case AMDGPU::V_ACCVGPR_WRITE_B32: 2501 break; 2502 } 2503 2504 const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0); 2505 assert(ImmOp); 2506 // FIXME: We could handle FrameIndex values here. 2507 if (!ImmOp->isImm()) 2508 return false; 2509 2510 unsigned Opc = UseMI.getOpcode(); 2511 if (Opc == AMDGPU::COPY) { 2512 bool isVGPRCopy = RI.isVGPR(*MRI, UseMI.getOperand(0).getReg()); 2513 unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32; 2514 if (RI.isAGPR(*MRI, UseMI.getOperand(0).getReg())) { 2515 if (!isInlineConstant(*ImmOp, AMDGPU::OPERAND_REG_INLINE_AC_INT32)) 2516 return false; 2517 NewOpc = AMDGPU::V_ACCVGPR_WRITE_B32; 2518 } 2519 UseMI.setDesc(get(NewOpc)); 2520 UseMI.getOperand(1).ChangeToImmediate(ImmOp->getImm()); 2521 UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent()); 2522 return true; 2523 } 2524 2525 if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 || 2526 Opc == AMDGPU::V_MAD_F16 || Opc == AMDGPU::V_MAC_F16_e64 || 2527 Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64 || 2528 Opc == AMDGPU::V_FMA_F16 || Opc == AMDGPU::V_FMAC_F16_e64) { 2529 // Don't fold if we are using source or output modifiers. The new VOP2 2530 // instructions don't have them. 2531 if (hasAnyModifiersSet(UseMI)) 2532 return false; 2533 2534 // If this is a free constant, there's no reason to do this. 2535 // TODO: We could fold this here instead of letting SIFoldOperands do it 2536 // later. 2537 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0); 2538 2539 // Any src operand can be used for the legality check. 2540 if (isInlineConstant(UseMI, *Src0, *ImmOp)) 2541 return false; 2542 2543 bool IsF32 = Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 || 2544 Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64; 2545 bool IsFMA = Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64 || 2546 Opc == AMDGPU::V_FMA_F16 || Opc == AMDGPU::V_FMAC_F16_e64; 2547 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1); 2548 MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2); 2549 2550 // Multiplied part is the constant: Use v_madmk_{f16, f32}. 2551 // We should only expect these to be on src0 due to canonicalizations. 2552 if (Src0->isReg() && Src0->getReg() == Reg) { 2553 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) 2554 return false; 2555 2556 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg()))) 2557 return false; 2558 2559 unsigned NewOpc = 2560 IsFMA ? (IsF32 ? AMDGPU::V_FMAMK_F32 : AMDGPU::V_FMAMK_F16) 2561 : (IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16); 2562 if (pseudoToMCOpcode(NewOpc) == -1) 2563 return false; 2564 2565 // We need to swap operands 0 and 1 since madmk constant is at operand 1. 2566 2567 const int64_t Imm = ImmOp->getImm(); 2568 2569 // FIXME: This would be a lot easier if we could return a new instruction 2570 // instead of having to modify in place. 2571 2572 // Remove these first since they are at the end. 2573 UseMI.RemoveOperand( 2574 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 2575 UseMI.RemoveOperand( 2576 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 2577 2578 Register Src1Reg = Src1->getReg(); 2579 unsigned Src1SubReg = Src1->getSubReg(); 2580 Src0->setReg(Src1Reg); 2581 Src0->setSubReg(Src1SubReg); 2582 Src0->setIsKill(Src1->isKill()); 2583 2584 if (Opc == AMDGPU::V_MAC_F32_e64 || 2585 Opc == AMDGPU::V_MAC_F16_e64 || 2586 Opc == AMDGPU::V_FMAC_F32_e64 || 2587 Opc == AMDGPU::V_FMAC_F16_e64) 2588 UseMI.untieRegOperand( 2589 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 2590 2591 Src1->ChangeToImmediate(Imm); 2592 2593 removeModOperands(UseMI); 2594 UseMI.setDesc(get(NewOpc)); 2595 2596 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 2597 if (DeleteDef) 2598 DefMI.eraseFromParent(); 2599 2600 return true; 2601 } 2602 2603 // Added part is the constant: Use v_madak_{f16, f32}. 2604 if (Src2->isReg() && Src2->getReg() == Reg) { 2605 // Not allowed to use constant bus for another operand. 2606 // We can however allow an inline immediate as src0. 2607 bool Src0Inlined = false; 2608 if (Src0->isReg()) { 2609 // Try to inline constant if possible. 2610 // If the Def moves immediate and the use is single 2611 // We are saving VGPR here. 2612 MachineInstr *Def = MRI->getUniqueVRegDef(Src0->getReg()); 2613 if (Def && Def->isMoveImmediate() && 2614 isInlineConstant(Def->getOperand(1)) && 2615 MRI->hasOneUse(Src0->getReg())) { 2616 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 2617 Src0Inlined = true; 2618 } else if ((Register::isPhysicalRegister(Src0->getReg()) && 2619 (ST.getConstantBusLimit(Opc) <= 1 && 2620 RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg())))) || 2621 (Register::isVirtualRegister(Src0->getReg()) && 2622 (ST.getConstantBusLimit(Opc) <= 1 && 2623 RI.isSGPRClass(MRI->getRegClass(Src0->getReg()))))) 2624 return false; 2625 // VGPR is okay as Src0 - fallthrough 2626 } 2627 2628 if (Src1->isReg() && !Src0Inlined ) { 2629 // We have one slot for inlinable constant so far - try to fill it 2630 MachineInstr *Def = MRI->getUniqueVRegDef(Src1->getReg()); 2631 if (Def && Def->isMoveImmediate() && 2632 isInlineConstant(Def->getOperand(1)) && 2633 MRI->hasOneUse(Src1->getReg()) && 2634 commuteInstruction(UseMI)) { 2635 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 2636 } else if ((Register::isPhysicalRegister(Src1->getReg()) && 2637 RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) || 2638 (Register::isVirtualRegister(Src1->getReg()) && 2639 RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) 2640 return false; 2641 // VGPR is okay as Src1 - fallthrough 2642 } 2643 2644 unsigned NewOpc = 2645 IsFMA ? (IsF32 ? AMDGPU::V_FMAAK_F32 : AMDGPU::V_FMAAK_F16) 2646 : (IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16); 2647 if (pseudoToMCOpcode(NewOpc) == -1) 2648 return false; 2649 2650 const int64_t Imm = ImmOp->getImm(); 2651 2652 // FIXME: This would be a lot easier if we could return a new instruction 2653 // instead of having to modify in place. 2654 2655 // Remove these first since they are at the end. 2656 UseMI.RemoveOperand( 2657 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 2658 UseMI.RemoveOperand( 2659 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 2660 2661 if (Opc == AMDGPU::V_MAC_F32_e64 || 2662 Opc == AMDGPU::V_MAC_F16_e64 || 2663 Opc == AMDGPU::V_FMAC_F32_e64 || 2664 Opc == AMDGPU::V_FMAC_F16_e64) 2665 UseMI.untieRegOperand( 2666 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 2667 2668 // ChangingToImmediate adds Src2 back to the instruction. 2669 Src2->ChangeToImmediate(Imm); 2670 2671 // These come before src2. 2672 removeModOperands(UseMI); 2673 UseMI.setDesc(get(NewOpc)); 2674 // It might happen that UseMI was commuted 2675 // and we now have SGPR as SRC1. If so 2 inlined 2676 // constant and SGPR are illegal. 2677 legalizeOperands(UseMI); 2678 2679 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 2680 if (DeleteDef) 2681 DefMI.eraseFromParent(); 2682 2683 return true; 2684 } 2685 } 2686 2687 return false; 2688 } 2689 2690 static bool offsetsDoNotOverlap(int WidthA, int OffsetA, 2691 int WidthB, int OffsetB) { 2692 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; 2693 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; 2694 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; 2695 return LowOffset + LowWidth <= HighOffset; 2696 } 2697 2698 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa, 2699 const MachineInstr &MIb) const { 2700 SmallVector<const MachineOperand *, 4> BaseOps0, BaseOps1; 2701 int64_t Offset0, Offset1; 2702 bool Offset0IsScalable, Offset1IsScalable; 2703 if (!getMemOperandsWithOffset(MIa, BaseOps0, Offset0, Offset0IsScalable, &RI) || 2704 !getMemOperandsWithOffset(MIb, BaseOps1, Offset1, Offset1IsScalable, &RI)) 2705 return false; 2706 2707 if (!memOpsHaveSameBaseOperands(BaseOps0, BaseOps1)) 2708 return false; 2709 2710 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) { 2711 // FIXME: Handle ds_read2 / ds_write2. 2712 return false; 2713 } 2714 unsigned Width0 = MIa.memoperands().front()->getSize(); 2715 unsigned Width1 = MIb.memoperands().front()->getSize(); 2716 return offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1); 2717 } 2718 2719 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, 2720 const MachineInstr &MIb) const { 2721 assert(MIa.mayLoadOrStore() && 2722 "MIa must load from or modify a memory location"); 2723 assert(MIb.mayLoadOrStore() && 2724 "MIb must load from or modify a memory location"); 2725 2726 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects()) 2727 return false; 2728 2729 // XXX - Can we relax this between address spaces? 2730 if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef()) 2731 return false; 2732 2733 // TODO: Should we check the address space from the MachineMemOperand? That 2734 // would allow us to distinguish objects we know don't alias based on the 2735 // underlying address space, even if it was lowered to a different one, 2736 // e.g. private accesses lowered to use MUBUF instructions on a scratch 2737 // buffer. 2738 if (isDS(MIa)) { 2739 if (isDS(MIb)) 2740 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2741 2742 return !isFLAT(MIb) || isSegmentSpecificFLAT(MIb); 2743 } 2744 2745 if (isMUBUF(MIa) || isMTBUF(MIa)) { 2746 if (isMUBUF(MIb) || isMTBUF(MIb)) 2747 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2748 2749 return !isFLAT(MIb) && !isSMRD(MIb); 2750 } 2751 2752 if (isSMRD(MIa)) { 2753 if (isSMRD(MIb)) 2754 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2755 2756 return !isFLAT(MIb) && !isMUBUF(MIb) && !isMTBUF(MIb); 2757 } 2758 2759 if (isFLAT(MIa)) { 2760 if (isFLAT(MIb)) 2761 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2762 2763 return false; 2764 } 2765 2766 return false; 2767 } 2768 2769 static int64_t getFoldableImm(const MachineOperand* MO) { 2770 if (!MO->isReg()) 2771 return false; 2772 const MachineFunction *MF = MO->getParent()->getParent()->getParent(); 2773 const MachineRegisterInfo &MRI = MF->getRegInfo(); 2774 auto Def = MRI.getUniqueVRegDef(MO->getReg()); 2775 if (Def && Def->getOpcode() == AMDGPU::V_MOV_B32_e32 && 2776 Def->getOperand(1).isImm()) 2777 return Def->getOperand(1).getImm(); 2778 return AMDGPU::NoRegister; 2779 } 2780 2781 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB, 2782 MachineInstr &MI, 2783 LiveVariables *LV) const { 2784 unsigned Opc = MI.getOpcode(); 2785 bool IsF16 = false; 2786 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e32 || Opc == AMDGPU::V_FMAC_F32_e64 || 2787 Opc == AMDGPU::V_FMAC_F16_e32 || Opc == AMDGPU::V_FMAC_F16_e64; 2788 2789 switch (Opc) { 2790 default: 2791 return nullptr; 2792 case AMDGPU::V_MAC_F16_e64: 2793 case AMDGPU::V_FMAC_F16_e64: 2794 IsF16 = true; 2795 LLVM_FALLTHROUGH; 2796 case AMDGPU::V_MAC_F32_e64: 2797 case AMDGPU::V_FMAC_F32_e64: 2798 break; 2799 case AMDGPU::V_MAC_F16_e32: 2800 case AMDGPU::V_FMAC_F16_e32: 2801 IsF16 = true; 2802 LLVM_FALLTHROUGH; 2803 case AMDGPU::V_MAC_F32_e32: 2804 case AMDGPU::V_FMAC_F32_e32: { 2805 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 2806 AMDGPU::OpName::src0); 2807 const MachineOperand *Src0 = &MI.getOperand(Src0Idx); 2808 if (!Src0->isReg() && !Src0->isImm()) 2809 return nullptr; 2810 2811 if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0)) 2812 return nullptr; 2813 2814 break; 2815 } 2816 } 2817 2818 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 2819 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0); 2820 const MachineOperand *Src0Mods = 2821 getNamedOperand(MI, AMDGPU::OpName::src0_modifiers); 2822 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 2823 const MachineOperand *Src1Mods = 2824 getNamedOperand(MI, AMDGPU::OpName::src1_modifiers); 2825 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 2826 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 2827 const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod); 2828 2829 if (!Src0Mods && !Src1Mods && !Clamp && !Omod && 2830 // If we have an SGPR input, we will violate the constant bus restriction. 2831 (ST.getConstantBusLimit(Opc) > 1 || 2832 !Src0->isReg() || 2833 !RI.isSGPRReg(MBB->getParent()->getRegInfo(), Src0->getReg()))) { 2834 if (auto Imm = getFoldableImm(Src2)) { 2835 unsigned NewOpc = 2836 IsFMA ? (IsF16 ? AMDGPU::V_FMAAK_F16 : AMDGPU::V_FMAAK_F32) 2837 : (IsF16 ? AMDGPU::V_MADAK_F16 : AMDGPU::V_MADAK_F32); 2838 if (pseudoToMCOpcode(NewOpc) != -1) 2839 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2840 .add(*Dst) 2841 .add(*Src0) 2842 .add(*Src1) 2843 .addImm(Imm); 2844 } 2845 unsigned NewOpc = 2846 IsFMA ? (IsF16 ? AMDGPU::V_FMAMK_F16 : AMDGPU::V_FMAMK_F32) 2847 : (IsF16 ? AMDGPU::V_MADMK_F16 : AMDGPU::V_MADMK_F32); 2848 if (auto Imm = getFoldableImm(Src1)) { 2849 if (pseudoToMCOpcode(NewOpc) != -1) 2850 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2851 .add(*Dst) 2852 .add(*Src0) 2853 .addImm(Imm) 2854 .add(*Src2); 2855 } 2856 if (auto Imm = getFoldableImm(Src0)) { 2857 if (pseudoToMCOpcode(NewOpc) != -1 && 2858 isOperandLegal(MI, AMDGPU::getNamedOperandIdx(NewOpc, 2859 AMDGPU::OpName::src0), Src1)) 2860 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2861 .add(*Dst) 2862 .add(*Src1) 2863 .addImm(Imm) 2864 .add(*Src2); 2865 } 2866 } 2867 2868 unsigned NewOpc = IsFMA ? (IsF16 ? AMDGPU::V_FMA_F16 : AMDGPU::V_FMA_F32) 2869 : (IsF16 ? AMDGPU::V_MAD_F16 : AMDGPU::V_MAD_F32); 2870 if (pseudoToMCOpcode(NewOpc) == -1) 2871 return nullptr; 2872 2873 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2874 .add(*Dst) 2875 .addImm(Src0Mods ? Src0Mods->getImm() : 0) 2876 .add(*Src0) 2877 .addImm(Src1Mods ? Src1Mods->getImm() : 0) 2878 .add(*Src1) 2879 .addImm(0) // Src mods 2880 .add(*Src2) 2881 .addImm(Clamp ? Clamp->getImm() : 0) 2882 .addImm(Omod ? Omod->getImm() : 0); 2883 } 2884 2885 // It's not generally safe to move VALU instructions across these since it will 2886 // start using the register as a base index rather than directly. 2887 // XXX - Why isn't hasSideEffects sufficient for these? 2888 static bool changesVGPRIndexingMode(const MachineInstr &MI) { 2889 switch (MI.getOpcode()) { 2890 case AMDGPU::S_SET_GPR_IDX_ON: 2891 case AMDGPU::S_SET_GPR_IDX_MODE: 2892 case AMDGPU::S_SET_GPR_IDX_OFF: 2893 return true; 2894 default: 2895 return false; 2896 } 2897 } 2898 2899 bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 2900 const MachineBasicBlock *MBB, 2901 const MachineFunction &MF) const { 2902 // XXX - Do we want the SP check in the base implementation? 2903 2904 // Target-independent instructions do not have an implicit-use of EXEC, even 2905 // when they operate on VGPRs. Treating EXEC modifications as scheduling 2906 // boundaries prevents incorrect movements of such instructions. 2907 return TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF) || 2908 MI.modifiesRegister(AMDGPU::EXEC, &RI) || 2909 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 || 2910 MI.getOpcode() == AMDGPU::S_SETREG_B32 || 2911 MI.getOpcode() == AMDGPU::S_DENORM_MODE || 2912 changesVGPRIndexingMode(MI); 2913 } 2914 2915 bool SIInstrInfo::isAlwaysGDS(uint16_t Opcode) const { 2916 return Opcode == AMDGPU::DS_ORDERED_COUNT || 2917 Opcode == AMDGPU::DS_GWS_INIT || 2918 Opcode == AMDGPU::DS_GWS_SEMA_V || 2919 Opcode == AMDGPU::DS_GWS_SEMA_BR || 2920 Opcode == AMDGPU::DS_GWS_SEMA_P || 2921 Opcode == AMDGPU::DS_GWS_SEMA_RELEASE_ALL || 2922 Opcode == AMDGPU::DS_GWS_BARRIER; 2923 } 2924 2925 bool SIInstrInfo::hasUnwantedEffectsWhenEXECEmpty(const MachineInstr &MI) const { 2926 unsigned Opcode = MI.getOpcode(); 2927 2928 if (MI.mayStore() && isSMRD(MI)) 2929 return true; // scalar store or atomic 2930 2931 // This will terminate the function when other lanes may need to continue. 2932 if (MI.isReturn()) 2933 return true; 2934 2935 // These instructions cause shader I/O that may cause hardware lockups 2936 // when executed with an empty EXEC mask. 2937 // 2938 // Note: exp with VM = DONE = 0 is automatically skipped by hardware when 2939 // EXEC = 0, but checking for that case here seems not worth it 2940 // given the typical code patterns. 2941 if (Opcode == AMDGPU::S_SENDMSG || Opcode == AMDGPU::S_SENDMSGHALT || 2942 Opcode == AMDGPU::EXP || Opcode == AMDGPU::EXP_DONE || 2943 Opcode == AMDGPU::DS_ORDERED_COUNT || Opcode == AMDGPU::S_TRAP || 2944 Opcode == AMDGPU::DS_GWS_INIT || Opcode == AMDGPU::DS_GWS_BARRIER) 2945 return true; 2946 2947 if (MI.isCall() || MI.isInlineAsm()) 2948 return true; // conservative assumption 2949 2950 // These are like SALU instructions in terms of effects, so it's questionable 2951 // whether we should return true for those. 2952 // 2953 // However, executing them with EXEC = 0 causes them to operate on undefined 2954 // data, which we avoid by returning true here. 2955 if (Opcode == AMDGPU::V_READFIRSTLANE_B32 || Opcode == AMDGPU::V_READLANE_B32) 2956 return true; 2957 2958 return false; 2959 } 2960 2961 bool SIInstrInfo::mayReadEXEC(const MachineRegisterInfo &MRI, 2962 const MachineInstr &MI) const { 2963 if (MI.isMetaInstruction()) 2964 return false; 2965 2966 // This won't read exec if this is an SGPR->SGPR copy. 2967 if (MI.isCopyLike()) { 2968 if (!RI.isSGPRReg(MRI, MI.getOperand(0).getReg())) 2969 return true; 2970 2971 // Make sure this isn't copying exec as a normal operand 2972 return MI.readsRegister(AMDGPU::EXEC, &RI); 2973 } 2974 2975 // Make a conservative assumption about the callee. 2976 if (MI.isCall()) 2977 return true; 2978 2979 // Be conservative with any unhandled generic opcodes. 2980 if (!isTargetSpecificOpcode(MI.getOpcode())) 2981 return true; 2982 2983 return !isSALU(MI) || MI.readsRegister(AMDGPU::EXEC, &RI); 2984 } 2985 2986 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const { 2987 switch (Imm.getBitWidth()) { 2988 case 1: // This likely will be a condition code mask. 2989 return true; 2990 2991 case 32: 2992 return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(), 2993 ST.hasInv2PiInlineImm()); 2994 case 64: 2995 return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(), 2996 ST.hasInv2PiInlineImm()); 2997 case 16: 2998 return ST.has16BitInsts() && 2999 AMDGPU::isInlinableLiteral16(Imm.getSExtValue(), 3000 ST.hasInv2PiInlineImm()); 3001 default: 3002 llvm_unreachable("invalid bitwidth"); 3003 } 3004 } 3005 3006 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO, 3007 uint8_t OperandType) const { 3008 if (!MO.isImm() || 3009 OperandType < AMDGPU::OPERAND_SRC_FIRST || 3010 OperandType > AMDGPU::OPERAND_SRC_LAST) 3011 return false; 3012 3013 // MachineOperand provides no way to tell the true operand size, since it only 3014 // records a 64-bit value. We need to know the size to determine if a 32-bit 3015 // floating point immediate bit pattern is legal for an integer immediate. It 3016 // would be for any 32-bit integer operand, but would not be for a 64-bit one. 3017 3018 int64_t Imm = MO.getImm(); 3019 switch (OperandType) { 3020 case AMDGPU::OPERAND_REG_IMM_INT32: 3021 case AMDGPU::OPERAND_REG_IMM_FP32: 3022 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 3023 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 3024 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 3025 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: { 3026 int32_t Trunc = static_cast<int32_t>(Imm); 3027 return AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm()); 3028 } 3029 case AMDGPU::OPERAND_REG_IMM_INT64: 3030 case AMDGPU::OPERAND_REG_IMM_FP64: 3031 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 3032 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 3033 return AMDGPU::isInlinableLiteral64(MO.getImm(), 3034 ST.hasInv2PiInlineImm()); 3035 case AMDGPU::OPERAND_REG_IMM_INT16: 3036 case AMDGPU::OPERAND_REG_IMM_FP16: 3037 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 3038 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 3039 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 3040 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: { 3041 if (isInt<16>(Imm) || isUInt<16>(Imm)) { 3042 // A few special case instructions have 16-bit operands on subtargets 3043 // where 16-bit instructions are not legal. 3044 // TODO: Do the 32-bit immediates work? We shouldn't really need to handle 3045 // constants in these cases 3046 int16_t Trunc = static_cast<int16_t>(Imm); 3047 return ST.has16BitInsts() && 3048 AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm()); 3049 } 3050 3051 return false; 3052 } 3053 case AMDGPU::OPERAND_REG_IMM_V2INT16: 3054 case AMDGPU::OPERAND_REG_IMM_V2FP16: 3055 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 3056 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 3057 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16: 3058 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: { 3059 uint32_t Trunc = static_cast<uint32_t>(Imm); 3060 return AMDGPU::isInlinableLiteralV216(Trunc, ST.hasInv2PiInlineImm()); 3061 } 3062 default: 3063 llvm_unreachable("invalid bitwidth"); 3064 } 3065 } 3066 3067 bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO, 3068 const MCOperandInfo &OpInfo) const { 3069 switch (MO.getType()) { 3070 case MachineOperand::MO_Register: 3071 return false; 3072 case MachineOperand::MO_Immediate: 3073 return !isInlineConstant(MO, OpInfo); 3074 case MachineOperand::MO_FrameIndex: 3075 case MachineOperand::MO_MachineBasicBlock: 3076 case MachineOperand::MO_ExternalSymbol: 3077 case MachineOperand::MO_GlobalAddress: 3078 case MachineOperand::MO_MCSymbol: 3079 return true; 3080 default: 3081 llvm_unreachable("unexpected operand type"); 3082 } 3083 } 3084 3085 static bool compareMachineOp(const MachineOperand &Op0, 3086 const MachineOperand &Op1) { 3087 if (Op0.getType() != Op1.getType()) 3088 return false; 3089 3090 switch (Op0.getType()) { 3091 case MachineOperand::MO_Register: 3092 return Op0.getReg() == Op1.getReg(); 3093 case MachineOperand::MO_Immediate: 3094 return Op0.getImm() == Op1.getImm(); 3095 default: 3096 llvm_unreachable("Didn't expect to be comparing these operand types"); 3097 } 3098 } 3099 3100 bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo, 3101 const MachineOperand &MO) const { 3102 const MCInstrDesc &InstDesc = MI.getDesc(); 3103 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo]; 3104 3105 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()); 3106 3107 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE) 3108 return true; 3109 3110 if (OpInfo.RegClass < 0) 3111 return false; 3112 3113 const MachineFunction *MF = MI.getParent()->getParent(); 3114 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 3115 3116 if (MO.isImm() && isInlineConstant(MO, OpInfo)) { 3117 if (isMAI(MI) && ST.hasMFMAInlineLiteralBug() && 3118 OpNo ==(unsigned)AMDGPU::getNamedOperandIdx(MI.getOpcode(), 3119 AMDGPU::OpName::src2)) 3120 return false; 3121 return RI.opCanUseInlineConstant(OpInfo.OperandType); 3122 } 3123 3124 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType)) 3125 return false; 3126 3127 if (!isVOP3(MI) || !AMDGPU::isSISrcOperand(InstDesc, OpNo)) 3128 return true; 3129 3130 return ST.hasVOP3Literal(); 3131 } 3132 3133 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const { 3134 int Op32 = AMDGPU::getVOPe32(Opcode); 3135 if (Op32 == -1) 3136 return false; 3137 3138 return pseudoToMCOpcode(Op32) != -1; 3139 } 3140 3141 bool SIInstrInfo::hasModifiers(unsigned Opcode) const { 3142 // The src0_modifier operand is present on all instructions 3143 // that have modifiers. 3144 3145 return AMDGPU::getNamedOperandIdx(Opcode, 3146 AMDGPU::OpName::src0_modifiers) != -1; 3147 } 3148 3149 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI, 3150 unsigned OpName) const { 3151 const MachineOperand *Mods = getNamedOperand(MI, OpName); 3152 return Mods && Mods->getImm(); 3153 } 3154 3155 bool SIInstrInfo::hasAnyModifiersSet(const MachineInstr &MI) const { 3156 return hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || 3157 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || 3158 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers) || 3159 hasModifiersSet(MI, AMDGPU::OpName::clamp) || 3160 hasModifiersSet(MI, AMDGPU::OpName::omod); 3161 } 3162 3163 bool SIInstrInfo::canShrink(const MachineInstr &MI, 3164 const MachineRegisterInfo &MRI) const { 3165 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3166 // Can't shrink instruction with three operands. 3167 // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add 3168 // a special case for it. It can only be shrunk if the third operand 3169 // is vcc, and src0_modifiers and src1_modifiers are not set. 3170 // We should handle this the same way we handle vopc, by addding 3171 // a register allocation hint pre-regalloc and then do the shrinking 3172 // post-regalloc. 3173 if (Src2) { 3174 switch (MI.getOpcode()) { 3175 default: return false; 3176 3177 case AMDGPU::V_ADDC_U32_e64: 3178 case AMDGPU::V_SUBB_U32_e64: 3179 case AMDGPU::V_SUBBREV_U32_e64: { 3180 const MachineOperand *Src1 3181 = getNamedOperand(MI, AMDGPU::OpName::src1); 3182 if (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg())) 3183 return false; 3184 // Additional verification is needed for sdst/src2. 3185 return true; 3186 } 3187 case AMDGPU::V_MAC_F32_e64: 3188 case AMDGPU::V_MAC_F16_e64: 3189 case AMDGPU::V_FMAC_F32_e64: 3190 case AMDGPU::V_FMAC_F16_e64: 3191 if (!Src2->isReg() || !RI.isVGPR(MRI, Src2->getReg()) || 3192 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers)) 3193 return false; 3194 break; 3195 3196 case AMDGPU::V_CNDMASK_B32_e64: 3197 break; 3198 } 3199 } 3200 3201 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3202 if (Src1 && (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()) || 3203 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers))) 3204 return false; 3205 3206 // We don't need to check src0, all input types are legal, so just make sure 3207 // src0 isn't using any modifiers. 3208 if (hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers)) 3209 return false; 3210 3211 // Can it be shrunk to a valid 32 bit opcode? 3212 if (!hasVALU32BitEncoding(MI.getOpcode())) 3213 return false; 3214 3215 // Check output modifiers 3216 return !hasModifiersSet(MI, AMDGPU::OpName::omod) && 3217 !hasModifiersSet(MI, AMDGPU::OpName::clamp); 3218 } 3219 3220 // Set VCC operand with all flags from \p Orig, except for setting it as 3221 // implicit. 3222 static void copyFlagsToImplicitVCC(MachineInstr &MI, 3223 const MachineOperand &Orig) { 3224 3225 for (MachineOperand &Use : MI.implicit_operands()) { 3226 if (Use.isUse() && Use.getReg() == AMDGPU::VCC) { 3227 Use.setIsUndef(Orig.isUndef()); 3228 Use.setIsKill(Orig.isKill()); 3229 return; 3230 } 3231 } 3232 } 3233 3234 MachineInstr *SIInstrInfo::buildShrunkInst(MachineInstr &MI, 3235 unsigned Op32) const { 3236 MachineBasicBlock *MBB = MI.getParent();; 3237 MachineInstrBuilder Inst32 = 3238 BuildMI(*MBB, MI, MI.getDebugLoc(), get(Op32)); 3239 3240 // Add the dst operand if the 32-bit encoding also has an explicit $vdst. 3241 // For VOPC instructions, this is replaced by an implicit def of vcc. 3242 int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst); 3243 if (Op32DstIdx != -1) { 3244 // dst 3245 Inst32.add(MI.getOperand(0)); 3246 } else { 3247 assert(((MI.getOperand(0).getReg() == AMDGPU::VCC) || 3248 (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) && 3249 "Unexpected case"); 3250 } 3251 3252 Inst32.add(*getNamedOperand(MI, AMDGPU::OpName::src0)); 3253 3254 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3255 if (Src1) 3256 Inst32.add(*Src1); 3257 3258 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3259 3260 if (Src2) { 3261 int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2); 3262 if (Op32Src2Idx != -1) { 3263 Inst32.add(*Src2); 3264 } else { 3265 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is 3266 // replaced with an implicit read of vcc. This was already added 3267 // during the initial BuildMI, so find it to preserve the flags. 3268 copyFlagsToImplicitVCC(*Inst32, *Src2); 3269 } 3270 } 3271 3272 return Inst32; 3273 } 3274 3275 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI, 3276 const MachineOperand &MO, 3277 const MCOperandInfo &OpInfo) const { 3278 // Literal constants use the constant bus. 3279 //if (isLiteralConstantLike(MO, OpInfo)) 3280 // return true; 3281 if (MO.isImm()) 3282 return !isInlineConstant(MO, OpInfo); 3283 3284 if (!MO.isReg()) 3285 return true; // Misc other operands like FrameIndex 3286 3287 if (!MO.isUse()) 3288 return false; 3289 3290 if (Register::isVirtualRegister(MO.getReg())) 3291 return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); 3292 3293 // Null is free 3294 if (MO.getReg() == AMDGPU::SGPR_NULL) 3295 return false; 3296 3297 // SGPRs use the constant bus 3298 if (MO.isImplicit()) { 3299 return MO.getReg() == AMDGPU::M0 || 3300 MO.getReg() == AMDGPU::VCC || 3301 MO.getReg() == AMDGPU::VCC_LO; 3302 } else { 3303 return AMDGPU::SReg_32RegClass.contains(MO.getReg()) || 3304 AMDGPU::SReg_64RegClass.contains(MO.getReg()); 3305 } 3306 } 3307 3308 static Register findImplicitSGPRRead(const MachineInstr &MI) { 3309 for (const MachineOperand &MO : MI.implicit_operands()) { 3310 // We only care about reads. 3311 if (MO.isDef()) 3312 continue; 3313 3314 switch (MO.getReg()) { 3315 case AMDGPU::VCC: 3316 case AMDGPU::VCC_LO: 3317 case AMDGPU::VCC_HI: 3318 case AMDGPU::M0: 3319 case AMDGPU::FLAT_SCR: 3320 return MO.getReg(); 3321 3322 default: 3323 break; 3324 } 3325 } 3326 3327 return AMDGPU::NoRegister; 3328 } 3329 3330 static bool shouldReadExec(const MachineInstr &MI) { 3331 if (SIInstrInfo::isVALU(MI)) { 3332 switch (MI.getOpcode()) { 3333 case AMDGPU::V_READLANE_B32: 3334 case AMDGPU::V_READLANE_B32_gfx6_gfx7: 3335 case AMDGPU::V_READLANE_B32_gfx10: 3336 case AMDGPU::V_READLANE_B32_vi: 3337 case AMDGPU::V_WRITELANE_B32: 3338 case AMDGPU::V_WRITELANE_B32_gfx6_gfx7: 3339 case AMDGPU::V_WRITELANE_B32_gfx10: 3340 case AMDGPU::V_WRITELANE_B32_vi: 3341 return false; 3342 } 3343 3344 return true; 3345 } 3346 3347 if (MI.isPreISelOpcode() || 3348 SIInstrInfo::isGenericOpcode(MI.getOpcode()) || 3349 SIInstrInfo::isSALU(MI) || 3350 SIInstrInfo::isSMRD(MI)) 3351 return false; 3352 3353 return true; 3354 } 3355 3356 static bool isSubRegOf(const SIRegisterInfo &TRI, 3357 const MachineOperand &SuperVec, 3358 const MachineOperand &SubReg) { 3359 if (Register::isPhysicalRegister(SubReg.getReg())) 3360 return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg()); 3361 3362 return SubReg.getSubReg() != AMDGPU::NoSubRegister && 3363 SubReg.getReg() == SuperVec.getReg(); 3364 } 3365 3366 bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, 3367 StringRef &ErrInfo) const { 3368 uint16_t Opcode = MI.getOpcode(); 3369 if (SIInstrInfo::isGenericOpcode(MI.getOpcode())) 3370 return true; 3371 3372 const MachineFunction *MF = MI.getParent()->getParent(); 3373 const MachineRegisterInfo &MRI = MF->getRegInfo(); 3374 3375 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); 3376 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1); 3377 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2); 3378 3379 // Make sure the number of operands is correct. 3380 const MCInstrDesc &Desc = get(Opcode); 3381 if (!Desc.isVariadic() && 3382 Desc.getNumOperands() != MI.getNumExplicitOperands()) { 3383 ErrInfo = "Instruction has wrong number of operands."; 3384 return false; 3385 } 3386 3387 if (MI.isInlineAsm()) { 3388 // Verify register classes for inlineasm constraints. 3389 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands(); 3390 I != E; ++I) { 3391 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI); 3392 if (!RC) 3393 continue; 3394 3395 const MachineOperand &Op = MI.getOperand(I); 3396 if (!Op.isReg()) 3397 continue; 3398 3399 Register Reg = Op.getReg(); 3400 if (!Register::isVirtualRegister(Reg) && !RC->contains(Reg)) { 3401 ErrInfo = "inlineasm operand has incorrect register class."; 3402 return false; 3403 } 3404 } 3405 3406 return true; 3407 } 3408 3409 // Make sure the register classes are correct. 3410 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) { 3411 if (MI.getOperand(i).isFPImm()) { 3412 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast " 3413 "all fp values to integers."; 3414 return false; 3415 } 3416 3417 int RegClass = Desc.OpInfo[i].RegClass; 3418 3419 switch (Desc.OpInfo[i].OperandType) { 3420 case MCOI::OPERAND_REGISTER: 3421 if (MI.getOperand(i).isImm() || MI.getOperand(i).isGlobal()) { 3422 ErrInfo = "Illegal immediate value for operand."; 3423 return false; 3424 } 3425 break; 3426 case AMDGPU::OPERAND_REG_IMM_INT32: 3427 case AMDGPU::OPERAND_REG_IMM_FP32: 3428 break; 3429 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 3430 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 3431 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 3432 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 3433 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 3434 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 3435 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 3436 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: 3437 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 3438 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: { 3439 const MachineOperand &MO = MI.getOperand(i); 3440 if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) { 3441 ErrInfo = "Illegal immediate value for operand."; 3442 return false; 3443 } 3444 break; 3445 } 3446 case MCOI::OPERAND_IMMEDIATE: 3447 case AMDGPU::OPERAND_KIMM32: 3448 // Check if this operand is an immediate. 3449 // FrameIndex operands will be replaced by immediates, so they are 3450 // allowed. 3451 if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) { 3452 ErrInfo = "Expected immediate, but got non-immediate"; 3453 return false; 3454 } 3455 LLVM_FALLTHROUGH; 3456 default: 3457 continue; 3458 } 3459 3460 if (!MI.getOperand(i).isReg()) 3461 continue; 3462 3463 if (RegClass != -1) { 3464 Register Reg = MI.getOperand(i).getReg(); 3465 if (Reg == AMDGPU::NoRegister || Register::isVirtualRegister(Reg)) 3466 continue; 3467 3468 const TargetRegisterClass *RC = RI.getRegClass(RegClass); 3469 if (!RC->contains(Reg)) { 3470 ErrInfo = "Operand has incorrect register class."; 3471 return false; 3472 } 3473 } 3474 } 3475 3476 // Verify SDWA 3477 if (isSDWA(MI)) { 3478 if (!ST.hasSDWA()) { 3479 ErrInfo = "SDWA is not supported on this target"; 3480 return false; 3481 } 3482 3483 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst); 3484 3485 const int OpIndicies[] = { DstIdx, Src0Idx, Src1Idx, Src2Idx }; 3486 3487 for (int OpIdx: OpIndicies) { 3488 if (OpIdx == -1) 3489 continue; 3490 const MachineOperand &MO = MI.getOperand(OpIdx); 3491 3492 if (!ST.hasSDWAScalar()) { 3493 // Only VGPRS on VI 3494 if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) { 3495 ErrInfo = "Only VGPRs allowed as operands in SDWA instructions on VI"; 3496 return false; 3497 } 3498 } else { 3499 // No immediates on GFX9 3500 if (!MO.isReg()) { 3501 ErrInfo = "Only reg allowed as operands in SDWA instructions on GFX9"; 3502 return false; 3503 } 3504 } 3505 } 3506 3507 if (!ST.hasSDWAOmod()) { 3508 // No omod allowed on VI 3509 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 3510 if (OMod != nullptr && 3511 (!OMod->isImm() || OMod->getImm() != 0)) { 3512 ErrInfo = "OMod not allowed in SDWA instructions on VI"; 3513 return false; 3514 } 3515 } 3516 3517 uint16_t BasicOpcode = AMDGPU::getBasicFromSDWAOp(Opcode); 3518 if (isVOPC(BasicOpcode)) { 3519 if (!ST.hasSDWASdst() && DstIdx != -1) { 3520 // Only vcc allowed as dst on VI for VOPC 3521 const MachineOperand &Dst = MI.getOperand(DstIdx); 3522 if (!Dst.isReg() || Dst.getReg() != AMDGPU::VCC) { 3523 ErrInfo = "Only VCC allowed as dst in SDWA instructions on VI"; 3524 return false; 3525 } 3526 } else if (!ST.hasSDWAOutModsVOPC()) { 3527 // No clamp allowed on GFX9 for VOPC 3528 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 3529 if (Clamp && (!Clamp->isImm() || Clamp->getImm() != 0)) { 3530 ErrInfo = "Clamp not allowed in VOPC SDWA instructions on VI"; 3531 return false; 3532 } 3533 3534 // No omod allowed on GFX9 for VOPC 3535 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 3536 if (OMod && (!OMod->isImm() || OMod->getImm() != 0)) { 3537 ErrInfo = "OMod not allowed in VOPC SDWA instructions on VI"; 3538 return false; 3539 } 3540 } 3541 } 3542 3543 const MachineOperand *DstUnused = getNamedOperand(MI, AMDGPU::OpName::dst_unused); 3544 if (DstUnused && DstUnused->isImm() && 3545 DstUnused->getImm() == AMDGPU::SDWA::UNUSED_PRESERVE) { 3546 const MachineOperand &Dst = MI.getOperand(DstIdx); 3547 if (!Dst.isReg() || !Dst.isTied()) { 3548 ErrInfo = "Dst register should have tied register"; 3549 return false; 3550 } 3551 3552 const MachineOperand &TiedMO = 3553 MI.getOperand(MI.findTiedOperandIdx(DstIdx)); 3554 if (!TiedMO.isReg() || !TiedMO.isImplicit() || !TiedMO.isUse()) { 3555 ErrInfo = 3556 "Dst register should be tied to implicit use of preserved register"; 3557 return false; 3558 } else if (Register::isPhysicalRegister(TiedMO.getReg()) && 3559 Dst.getReg() != TiedMO.getReg()) { 3560 ErrInfo = "Dst register should use same physical register as preserved"; 3561 return false; 3562 } 3563 } 3564 } 3565 3566 // Verify MIMG 3567 if (isMIMG(MI.getOpcode()) && !MI.mayStore()) { 3568 // Ensure that the return type used is large enough for all the options 3569 // being used TFE/LWE require an extra result register. 3570 const MachineOperand *DMask = getNamedOperand(MI, AMDGPU::OpName::dmask); 3571 if (DMask) { 3572 uint64_t DMaskImm = DMask->getImm(); 3573 uint32_t RegCount = 3574 isGather4(MI.getOpcode()) ? 4 : countPopulation(DMaskImm); 3575 const MachineOperand *TFE = getNamedOperand(MI, AMDGPU::OpName::tfe); 3576 const MachineOperand *LWE = getNamedOperand(MI, AMDGPU::OpName::lwe); 3577 const MachineOperand *D16 = getNamedOperand(MI, AMDGPU::OpName::d16); 3578 3579 // Adjust for packed 16 bit values 3580 if (D16 && D16->getImm() && !ST.hasUnpackedD16VMem()) 3581 RegCount >>= 1; 3582 3583 // Adjust if using LWE or TFE 3584 if ((LWE && LWE->getImm()) || (TFE && TFE->getImm())) 3585 RegCount += 1; 3586 3587 const uint32_t DstIdx = 3588 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata); 3589 const MachineOperand &Dst = MI.getOperand(DstIdx); 3590 if (Dst.isReg()) { 3591 const TargetRegisterClass *DstRC = getOpRegClass(MI, DstIdx); 3592 uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32; 3593 if (RegCount > DstSize) { 3594 ErrInfo = "MIMG instruction returns too many registers for dst " 3595 "register class"; 3596 return false; 3597 } 3598 } 3599 } 3600 } 3601 3602 // Verify VOP*. Ignore multiple sgpr operands on writelane. 3603 if (Desc.getOpcode() != AMDGPU::V_WRITELANE_B32 3604 && (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI) || isSDWA(MI))) { 3605 // Only look at the true operands. Only a real operand can use the constant 3606 // bus, and we don't want to check pseudo-operands like the source modifier 3607 // flags. 3608 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx }; 3609 3610 unsigned ConstantBusCount = 0; 3611 unsigned LiteralCount = 0; 3612 3613 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) 3614 ++ConstantBusCount; 3615 3616 SmallVector<Register, 2> SGPRsUsed; 3617 Register SGPRUsed = findImplicitSGPRRead(MI); 3618 if (SGPRUsed != AMDGPU::NoRegister) { 3619 ++ConstantBusCount; 3620 SGPRsUsed.push_back(SGPRUsed); 3621 } 3622 3623 for (int OpIdx : OpIndices) { 3624 if (OpIdx == -1) 3625 break; 3626 const MachineOperand &MO = MI.getOperand(OpIdx); 3627 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 3628 if (MO.isReg()) { 3629 SGPRUsed = MO.getReg(); 3630 if (llvm::all_of(SGPRsUsed, [this, SGPRUsed](unsigned SGPR) { 3631 return !RI.regsOverlap(SGPRUsed, SGPR); 3632 })) { 3633 ++ConstantBusCount; 3634 SGPRsUsed.push_back(SGPRUsed); 3635 } 3636 } else { 3637 ++ConstantBusCount; 3638 ++LiteralCount; 3639 } 3640 } 3641 } 3642 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 3643 // v_writelane_b32 is an exception from constant bus restriction: 3644 // vsrc0 can be sgpr, const or m0 and lane select sgpr, m0 or inline-const 3645 if (ConstantBusCount > ST.getConstantBusLimit(Opcode) && 3646 Opcode != AMDGPU::V_WRITELANE_B32) { 3647 ErrInfo = "VOP* instruction violates constant bus restriction"; 3648 return false; 3649 } 3650 3651 if (isVOP3(MI) && LiteralCount) { 3652 if (LiteralCount && !ST.hasVOP3Literal()) { 3653 ErrInfo = "VOP3 instruction uses literal"; 3654 return false; 3655 } 3656 if (LiteralCount > 1) { 3657 ErrInfo = "VOP3 instruction uses more than one literal"; 3658 return false; 3659 } 3660 } 3661 } 3662 3663 // Special case for writelane - this can break the multiple constant bus rule, 3664 // but still can't use more than one SGPR register 3665 if (Desc.getOpcode() == AMDGPU::V_WRITELANE_B32) { 3666 unsigned SGPRCount = 0; 3667 Register SGPRUsed = AMDGPU::NoRegister; 3668 3669 for (int OpIdx : {Src0Idx, Src1Idx, Src2Idx}) { 3670 if (OpIdx == -1) 3671 break; 3672 3673 const MachineOperand &MO = MI.getOperand(OpIdx); 3674 3675 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 3676 if (MO.isReg() && MO.getReg() != AMDGPU::M0) { 3677 if (MO.getReg() != SGPRUsed) 3678 ++SGPRCount; 3679 SGPRUsed = MO.getReg(); 3680 } 3681 } 3682 if (SGPRCount > ST.getConstantBusLimit(Opcode)) { 3683 ErrInfo = "WRITELANE instruction violates constant bus restriction"; 3684 return false; 3685 } 3686 } 3687 } 3688 3689 // Verify misc. restrictions on specific instructions. 3690 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 || 3691 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) { 3692 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3693 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 3694 const MachineOperand &Src2 = MI.getOperand(Src2Idx); 3695 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) { 3696 if (!compareMachineOp(Src0, Src1) && 3697 !compareMachineOp(Src0, Src2)) { 3698 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2"; 3699 return false; 3700 } 3701 } 3702 } 3703 3704 if (isSOP2(MI) || isSOPC(MI)) { 3705 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3706 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 3707 unsigned Immediates = 0; 3708 3709 if (!Src0.isReg() && 3710 !isInlineConstant(Src0, Desc.OpInfo[Src0Idx].OperandType)) 3711 Immediates++; 3712 if (!Src1.isReg() && 3713 !isInlineConstant(Src1, Desc.OpInfo[Src1Idx].OperandType)) 3714 Immediates++; 3715 3716 if (Immediates > 1) { 3717 ErrInfo = "SOP2/SOPC instruction requires too many immediate constants"; 3718 return false; 3719 } 3720 } 3721 3722 if (isSOPK(MI)) { 3723 auto Op = getNamedOperand(MI, AMDGPU::OpName::simm16); 3724 if (Desc.isBranch()) { 3725 if (!Op->isMBB()) { 3726 ErrInfo = "invalid branch target for SOPK instruction"; 3727 return false; 3728 } 3729 } else { 3730 uint64_t Imm = Op->getImm(); 3731 if (sopkIsZext(MI)) { 3732 if (!isUInt<16>(Imm)) { 3733 ErrInfo = "invalid immediate for SOPK instruction"; 3734 return false; 3735 } 3736 } else { 3737 if (!isInt<16>(Imm)) { 3738 ErrInfo = "invalid immediate for SOPK instruction"; 3739 return false; 3740 } 3741 } 3742 } 3743 } 3744 3745 if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 || 3746 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 || 3747 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 3748 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) { 3749 const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 3750 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64; 3751 3752 const unsigned StaticNumOps = Desc.getNumOperands() + 3753 Desc.getNumImplicitUses(); 3754 const unsigned NumImplicitOps = IsDst ? 2 : 1; 3755 3756 // Allow additional implicit operands. This allows a fixup done by the post 3757 // RA scheduler where the main implicit operand is killed and implicit-defs 3758 // are added for sub-registers that remain live after this instruction. 3759 if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) { 3760 ErrInfo = "missing implicit register operands"; 3761 return false; 3762 } 3763 3764 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 3765 if (IsDst) { 3766 if (!Dst->isUse()) { 3767 ErrInfo = "v_movreld_b32 vdst should be a use operand"; 3768 return false; 3769 } 3770 3771 unsigned UseOpIdx; 3772 if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) || 3773 UseOpIdx != StaticNumOps + 1) { 3774 ErrInfo = "movrel implicit operands should be tied"; 3775 return false; 3776 } 3777 } 3778 3779 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3780 const MachineOperand &ImpUse 3781 = MI.getOperand(StaticNumOps + NumImplicitOps - 1); 3782 if (!ImpUse.isReg() || !ImpUse.isUse() || 3783 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) { 3784 ErrInfo = "src0 should be subreg of implicit vector use"; 3785 return false; 3786 } 3787 } 3788 3789 // Make sure we aren't losing exec uses in the td files. This mostly requires 3790 // being careful when using let Uses to try to add other use registers. 3791 if (shouldReadExec(MI)) { 3792 if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) { 3793 ErrInfo = "VALU instruction does not implicitly read exec mask"; 3794 return false; 3795 } 3796 } 3797 3798 if (isSMRD(MI)) { 3799 if (MI.mayStore()) { 3800 // The register offset form of scalar stores may only use m0 as the 3801 // soffset register. 3802 const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff); 3803 if (Soff && Soff->getReg() != AMDGPU::M0) { 3804 ErrInfo = "scalar stores must use m0 as offset register"; 3805 return false; 3806 } 3807 } 3808 } 3809 3810 if (isFLAT(MI) && !MF->getSubtarget<GCNSubtarget>().hasFlatInstOffsets()) { 3811 const MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 3812 if (Offset->getImm() != 0) { 3813 ErrInfo = "subtarget does not support offsets in flat instructions"; 3814 return false; 3815 } 3816 } 3817 3818 if (isMIMG(MI)) { 3819 const MachineOperand *DimOp = getNamedOperand(MI, AMDGPU::OpName::dim); 3820 if (DimOp) { 3821 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opcode, 3822 AMDGPU::OpName::vaddr0); 3823 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc); 3824 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opcode); 3825 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = 3826 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode); 3827 const AMDGPU::MIMGDimInfo *Dim = 3828 AMDGPU::getMIMGDimInfoByEncoding(DimOp->getImm()); 3829 3830 if (!Dim) { 3831 ErrInfo = "dim is out of range"; 3832 return false; 3833 } 3834 3835 bool IsA16 = false; 3836 if (ST.hasR128A16()) { 3837 const MachineOperand *R128A16 = getNamedOperand(MI, AMDGPU::OpName::r128); 3838 IsA16 = R128A16->getImm() != 0; 3839 } else if (ST.hasGFX10A16()) { 3840 const MachineOperand *A16 = getNamedOperand(MI, AMDGPU::OpName::a16); 3841 IsA16 = A16->getImm() != 0; 3842 } 3843 3844 bool PackDerivatives = IsA16; // Either A16 or G16 3845 bool IsNSA = SRsrcIdx - VAddr0Idx > 1; 3846 3847 unsigned AddrWords = BaseOpcode->NumExtraArgs; 3848 unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) + 3849 (BaseOpcode->LodOrClampOrMip ? 1 : 0); 3850 if (IsA16) 3851 AddrWords += (AddrComponents + 1) / 2; 3852 else 3853 AddrWords += AddrComponents; 3854 3855 if (BaseOpcode->Gradients) { 3856 if (PackDerivatives) 3857 // There are two gradients per coordinate, we pack them separately. 3858 // For the 3d case, we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv) 3859 AddrWords += (Dim->NumGradients / 2 + 1) / 2 * 2; 3860 else 3861 AddrWords += Dim->NumGradients; 3862 } 3863 3864 unsigned VAddrWords; 3865 if (IsNSA) { 3866 VAddrWords = SRsrcIdx - VAddr0Idx; 3867 } else { 3868 const TargetRegisterClass *RC = getOpRegClass(MI, VAddr0Idx); 3869 VAddrWords = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC) / 32; 3870 if (AddrWords > 8) 3871 AddrWords = 16; 3872 else if (AddrWords > 4) 3873 AddrWords = 8; 3874 else if (AddrWords == 4) 3875 AddrWords = 4; 3876 else if (AddrWords == 3) 3877 AddrWords = 3; 3878 } 3879 3880 if (VAddrWords != AddrWords) { 3881 ErrInfo = "bad vaddr size"; 3882 return false; 3883 } 3884 } 3885 } 3886 3887 const MachineOperand *DppCt = getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl); 3888 if (DppCt) { 3889 using namespace AMDGPU::DPP; 3890 3891 unsigned DC = DppCt->getImm(); 3892 if (DC == DppCtrl::DPP_UNUSED1 || DC == DppCtrl::DPP_UNUSED2 || 3893 DC == DppCtrl::DPP_UNUSED3 || DC > DppCtrl::DPP_LAST || 3894 (DC >= DppCtrl::DPP_UNUSED4_FIRST && DC <= DppCtrl::DPP_UNUSED4_LAST) || 3895 (DC >= DppCtrl::DPP_UNUSED5_FIRST && DC <= DppCtrl::DPP_UNUSED5_LAST) || 3896 (DC >= DppCtrl::DPP_UNUSED6_FIRST && DC <= DppCtrl::DPP_UNUSED6_LAST) || 3897 (DC >= DppCtrl::DPP_UNUSED7_FIRST && DC <= DppCtrl::DPP_UNUSED7_LAST) || 3898 (DC >= DppCtrl::DPP_UNUSED8_FIRST && DC <= DppCtrl::DPP_UNUSED8_LAST)) { 3899 ErrInfo = "Invalid dpp_ctrl value"; 3900 return false; 3901 } 3902 if (DC >= DppCtrl::WAVE_SHL1 && DC <= DppCtrl::WAVE_ROR1 && 3903 ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 3904 ErrInfo = "Invalid dpp_ctrl value: " 3905 "wavefront shifts are not supported on GFX10+"; 3906 return false; 3907 } 3908 if (DC >= DppCtrl::BCAST15 && DC <= DppCtrl::BCAST31 && 3909 ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 3910 ErrInfo = "Invalid dpp_ctrl value: " 3911 "broadcasts are not supported on GFX10+"; 3912 return false; 3913 } 3914 if (DC >= DppCtrl::ROW_SHARE_FIRST && DC <= DppCtrl::ROW_XMASK_LAST && 3915 ST.getGeneration() < AMDGPUSubtarget::GFX10) { 3916 ErrInfo = "Invalid dpp_ctrl value: " 3917 "row_share and row_xmask are not supported before GFX10"; 3918 return false; 3919 } 3920 } 3921 3922 return true; 3923 } 3924 3925 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) const { 3926 switch (MI.getOpcode()) { 3927 default: return AMDGPU::INSTRUCTION_LIST_END; 3928 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE; 3929 case AMDGPU::COPY: return AMDGPU::COPY; 3930 case AMDGPU::PHI: return AMDGPU::PHI; 3931 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG; 3932 case AMDGPU::WQM: return AMDGPU::WQM; 3933 case AMDGPU::SOFT_WQM: return AMDGPU::SOFT_WQM; 3934 case AMDGPU::WWM: return AMDGPU::WWM; 3935 case AMDGPU::S_MOV_B32: { 3936 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 3937 return MI.getOperand(1).isReg() || 3938 RI.isAGPR(MRI, MI.getOperand(0).getReg()) ? 3939 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32; 3940 } 3941 case AMDGPU::S_ADD_I32: 3942 return ST.hasAddNoCarry() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_I32_e32; 3943 case AMDGPU::S_ADDC_U32: 3944 return AMDGPU::V_ADDC_U32_e32; 3945 case AMDGPU::S_SUB_I32: 3946 return ST.hasAddNoCarry() ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_I32_e32; 3947 // FIXME: These are not consistently handled, and selected when the carry is 3948 // used. 3949 case AMDGPU::S_ADD_U32: 3950 return AMDGPU::V_ADD_I32_e32; 3951 case AMDGPU::S_SUB_U32: 3952 return AMDGPU::V_SUB_I32_e32; 3953 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32; 3954 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_U32; 3955 case AMDGPU::S_MUL_HI_U32: return AMDGPU::V_MUL_HI_U32; 3956 case AMDGPU::S_MUL_HI_I32: return AMDGPU::V_MUL_HI_I32; 3957 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64; 3958 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64; 3959 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64; 3960 case AMDGPU::S_XNOR_B32: 3961 return ST.hasDLInsts() ? AMDGPU::V_XNOR_B32_e64 : AMDGPU::INSTRUCTION_LIST_END; 3962 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64; 3963 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64; 3964 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64; 3965 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64; 3966 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32; 3967 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64; 3968 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32; 3969 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64; 3970 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32; 3971 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64; 3972 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32; 3973 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32; 3974 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32; 3975 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32; 3976 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64; 3977 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32; 3978 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32; 3979 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32; 3980 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32; 3981 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32; 3982 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32; 3983 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32; 3984 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32; 3985 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32; 3986 case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e32; 3987 case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e32; 3988 case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e32; 3989 case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e32; 3990 case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e32; 3991 case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e32; 3992 case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e32; 3993 case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e32; 3994 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64; 3995 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32; 3996 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32; 3997 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64; 3998 case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ; 3999 case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ; 4000 } 4001 llvm_unreachable( 4002 "Unexpected scalar opcode without corresponding vector one!"); 4003 } 4004 4005 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI, 4006 unsigned OpNo) const { 4007 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 4008 const MCInstrDesc &Desc = get(MI.getOpcode()); 4009 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() || 4010 Desc.OpInfo[OpNo].RegClass == -1) { 4011 Register Reg = MI.getOperand(OpNo).getReg(); 4012 4013 if (Register::isVirtualRegister(Reg)) 4014 return MRI.getRegClass(Reg); 4015 return RI.getPhysRegClass(Reg); 4016 } 4017 4018 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 4019 return RI.getRegClass(RCID); 4020 } 4021 4022 void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const { 4023 MachineBasicBlock::iterator I = MI; 4024 MachineBasicBlock *MBB = MI.getParent(); 4025 MachineOperand &MO = MI.getOperand(OpIdx); 4026 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 4027 const SIRegisterInfo *TRI = 4028 static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo()); 4029 unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass; 4030 const TargetRegisterClass *RC = RI.getRegClass(RCID); 4031 unsigned Size = TRI->getRegSizeInBits(*RC); 4032 unsigned Opcode = (Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO : AMDGPU::V_MOV_B32_e32; 4033 if (MO.isReg()) 4034 Opcode = AMDGPU::COPY; 4035 else if (RI.isSGPRClass(RC)) 4036 Opcode = (Size == 64) ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; 4037 4038 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); 4039 if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC)) 4040 VRC = &AMDGPU::VReg_64RegClass; 4041 else 4042 VRC = &AMDGPU::VGPR_32RegClass; 4043 4044 Register Reg = MRI.createVirtualRegister(VRC); 4045 DebugLoc DL = MBB->findDebugLoc(I); 4046 BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO); 4047 MO.ChangeToRegister(Reg, false); 4048 } 4049 4050 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI, 4051 MachineRegisterInfo &MRI, 4052 MachineOperand &SuperReg, 4053 const TargetRegisterClass *SuperRC, 4054 unsigned SubIdx, 4055 const TargetRegisterClass *SubRC) 4056 const { 4057 MachineBasicBlock *MBB = MI->getParent(); 4058 DebugLoc DL = MI->getDebugLoc(); 4059 Register SubReg = MRI.createVirtualRegister(SubRC); 4060 4061 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) { 4062 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 4063 .addReg(SuperReg.getReg(), 0, SubIdx); 4064 return SubReg; 4065 } 4066 4067 // Just in case the super register is itself a sub-register, copy it to a new 4068 // value so we don't need to worry about merging its subreg index with the 4069 // SubIdx passed to this function. The register coalescer should be able to 4070 // eliminate this extra copy. 4071 Register NewSuperReg = MRI.createVirtualRegister(SuperRC); 4072 4073 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg) 4074 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg()); 4075 4076 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 4077 .addReg(NewSuperReg, 0, SubIdx); 4078 4079 return SubReg; 4080 } 4081 4082 MachineOperand SIInstrInfo::buildExtractSubRegOrImm( 4083 MachineBasicBlock::iterator MII, 4084 MachineRegisterInfo &MRI, 4085 MachineOperand &Op, 4086 const TargetRegisterClass *SuperRC, 4087 unsigned SubIdx, 4088 const TargetRegisterClass *SubRC) const { 4089 if (Op.isImm()) { 4090 if (SubIdx == AMDGPU::sub0) 4091 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm())); 4092 if (SubIdx == AMDGPU::sub1) 4093 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32)); 4094 4095 llvm_unreachable("Unhandled register index for immediate"); 4096 } 4097 4098 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC, 4099 SubIdx, SubRC); 4100 return MachineOperand::CreateReg(SubReg, false); 4101 } 4102 4103 // Change the order of operands from (0, 1, 2) to (0, 2, 1) 4104 void SIInstrInfo::swapOperands(MachineInstr &Inst) const { 4105 assert(Inst.getNumExplicitOperands() == 3); 4106 MachineOperand Op1 = Inst.getOperand(1); 4107 Inst.RemoveOperand(1); 4108 Inst.addOperand(Op1); 4109 } 4110 4111 bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI, 4112 const MCOperandInfo &OpInfo, 4113 const MachineOperand &MO) const { 4114 if (!MO.isReg()) 4115 return false; 4116 4117 Register Reg = MO.getReg(); 4118 const TargetRegisterClass *RC = Register::isVirtualRegister(Reg) 4119 ? MRI.getRegClass(Reg) 4120 : RI.getPhysRegClass(Reg); 4121 4122 const TargetRegisterClass *DRC = RI.getRegClass(OpInfo.RegClass); 4123 if (MO.getSubReg()) { 4124 const MachineFunction *MF = MO.getParent()->getParent()->getParent(); 4125 const TargetRegisterClass *SuperRC = RI.getLargestLegalSuperClass(RC, *MF); 4126 if (!SuperRC) 4127 return false; 4128 4129 DRC = RI.getMatchingSuperRegClass(SuperRC, DRC, MO.getSubReg()); 4130 if (!DRC) 4131 return false; 4132 } 4133 return RC->hasSuperClassEq(DRC); 4134 } 4135 4136 bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI, 4137 const MCOperandInfo &OpInfo, 4138 const MachineOperand &MO) const { 4139 if (MO.isReg()) 4140 return isLegalRegOperand(MRI, OpInfo, MO); 4141 4142 // Handle non-register types that are treated like immediates. 4143 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()); 4144 return true; 4145 } 4146 4147 bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx, 4148 const MachineOperand *MO) const { 4149 const MachineFunction &MF = *MI.getParent()->getParent(); 4150 const MachineRegisterInfo &MRI = MF.getRegInfo(); 4151 const MCInstrDesc &InstDesc = MI.getDesc(); 4152 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; 4153 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 4154 const TargetRegisterClass *DefinedRC = 4155 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; 4156 if (!MO) 4157 MO = &MI.getOperand(OpIdx); 4158 4159 int ConstantBusLimit = ST.getConstantBusLimit(MI.getOpcode()); 4160 int VOP3LiteralLimit = ST.hasVOP3Literal() ? 1 : 0; 4161 if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) { 4162 if (isVOP3(MI) && isLiteralConstantLike(*MO, OpInfo) && !VOP3LiteralLimit--) 4163 return false; 4164 4165 SmallDenseSet<RegSubRegPair> SGPRsUsed; 4166 if (MO->isReg()) 4167 SGPRsUsed.insert(RegSubRegPair(MO->getReg(), MO->getSubReg())); 4168 4169 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 4170 if (i == OpIdx) 4171 continue; 4172 const MachineOperand &Op = MI.getOperand(i); 4173 if (Op.isReg()) { 4174 RegSubRegPair SGPR(Op.getReg(), Op.getSubReg()); 4175 if (!SGPRsUsed.count(SGPR) && 4176 usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) { 4177 if (--ConstantBusLimit <= 0) 4178 return false; 4179 SGPRsUsed.insert(SGPR); 4180 } 4181 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) { 4182 if (--ConstantBusLimit <= 0) 4183 return false; 4184 } else if (isVOP3(MI) && AMDGPU::isSISrcOperand(InstDesc, i) && 4185 isLiteralConstantLike(Op, InstDesc.OpInfo[i])) { 4186 if (!VOP3LiteralLimit--) 4187 return false; 4188 if (--ConstantBusLimit <= 0) 4189 return false; 4190 } 4191 } 4192 } 4193 4194 if (MO->isReg()) { 4195 assert(DefinedRC); 4196 return isLegalRegOperand(MRI, OpInfo, *MO); 4197 } 4198 4199 // Handle non-register types that are treated like immediates. 4200 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI() || MO->isGlobal()); 4201 4202 if (!DefinedRC) { 4203 // This operand expects an immediate. 4204 return true; 4205 } 4206 4207 return isImmOperandLegal(MI, OpIdx, *MO); 4208 } 4209 4210 void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI, 4211 MachineInstr &MI) const { 4212 unsigned Opc = MI.getOpcode(); 4213 const MCInstrDesc &InstrDesc = get(Opc); 4214 4215 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 4216 MachineOperand &Src0 = MI.getOperand(Src0Idx); 4217 4218 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 4219 MachineOperand &Src1 = MI.getOperand(Src1Idx); 4220 4221 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32 4222 // we need to only have one constant bus use before GFX10. 4223 bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister; 4224 if (HasImplicitSGPR && ST.getConstantBusLimit(Opc) <= 1 && 4225 Src0.isReg() && (RI.isSGPRReg(MRI, Src0.getReg()) || 4226 isLiteralConstantLike(Src0, InstrDesc.OpInfo[Src0Idx]))) 4227 legalizeOpWithMove(MI, Src0Idx); 4228 4229 // Special case: V_WRITELANE_B32 accepts only immediate or SGPR operands for 4230 // both the value to write (src0) and lane select (src1). Fix up non-SGPR 4231 // src0/src1 with V_READFIRSTLANE. 4232 if (Opc == AMDGPU::V_WRITELANE_B32) { 4233 const DebugLoc &DL = MI.getDebugLoc(); 4234 if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) { 4235 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4236 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4237 .add(Src0); 4238 Src0.ChangeToRegister(Reg, false); 4239 } 4240 if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) { 4241 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4242 const DebugLoc &DL = MI.getDebugLoc(); 4243 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4244 .add(Src1); 4245 Src1.ChangeToRegister(Reg, false); 4246 } 4247 return; 4248 } 4249 4250 // No VOP2 instructions support AGPRs. 4251 if (Src0.isReg() && RI.isAGPR(MRI, Src0.getReg())) 4252 legalizeOpWithMove(MI, Src0Idx); 4253 4254 if (Src1.isReg() && RI.isAGPR(MRI, Src1.getReg())) 4255 legalizeOpWithMove(MI, Src1Idx); 4256 4257 // VOP2 src0 instructions support all operand types, so we don't need to check 4258 // their legality. If src1 is already legal, we don't need to do anything. 4259 if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1)) 4260 return; 4261 4262 // Special case: V_READLANE_B32 accepts only immediate or SGPR operands for 4263 // lane select. Fix up using V_READFIRSTLANE, since we assume that the lane 4264 // select is uniform. 4265 if (Opc == AMDGPU::V_READLANE_B32 && Src1.isReg() && 4266 RI.isVGPR(MRI, Src1.getReg())) { 4267 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4268 const DebugLoc &DL = MI.getDebugLoc(); 4269 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4270 .add(Src1); 4271 Src1.ChangeToRegister(Reg, false); 4272 return; 4273 } 4274 4275 // We do not use commuteInstruction here because it is too aggressive and will 4276 // commute if it is possible. We only want to commute here if it improves 4277 // legality. This can be called a fairly large number of times so don't waste 4278 // compile time pointlessly swapping and checking legality again. 4279 if (HasImplicitSGPR || !MI.isCommutable()) { 4280 legalizeOpWithMove(MI, Src1Idx); 4281 return; 4282 } 4283 4284 // If src0 can be used as src1, commuting will make the operands legal. 4285 // Otherwise we have to give up and insert a move. 4286 // 4287 // TODO: Other immediate-like operand kinds could be commuted if there was a 4288 // MachineOperand::ChangeTo* for them. 4289 if ((!Src1.isImm() && !Src1.isReg()) || 4290 !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) { 4291 legalizeOpWithMove(MI, Src1Idx); 4292 return; 4293 } 4294 4295 int CommutedOpc = commuteOpcode(MI); 4296 if (CommutedOpc == -1) { 4297 legalizeOpWithMove(MI, Src1Idx); 4298 return; 4299 } 4300 4301 MI.setDesc(get(CommutedOpc)); 4302 4303 Register Src0Reg = Src0.getReg(); 4304 unsigned Src0SubReg = Src0.getSubReg(); 4305 bool Src0Kill = Src0.isKill(); 4306 4307 if (Src1.isImm()) 4308 Src0.ChangeToImmediate(Src1.getImm()); 4309 else if (Src1.isReg()) { 4310 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill()); 4311 Src0.setSubReg(Src1.getSubReg()); 4312 } else 4313 llvm_unreachable("Should only have register or immediate operands"); 4314 4315 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill); 4316 Src1.setSubReg(Src0SubReg); 4317 fixImplicitOperands(MI); 4318 } 4319 4320 // Legalize VOP3 operands. All operand types are supported for any operand 4321 // but only one literal constant and only starting from GFX10. 4322 void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI, 4323 MachineInstr &MI) const { 4324 unsigned Opc = MI.getOpcode(); 4325 4326 int VOP3Idx[3] = { 4327 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), 4328 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1), 4329 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2) 4330 }; 4331 4332 if (Opc == AMDGPU::V_PERMLANE16_B32 || 4333 Opc == AMDGPU::V_PERMLANEX16_B32) { 4334 // src1 and src2 must be scalar 4335 MachineOperand &Src1 = MI.getOperand(VOP3Idx[1]); 4336 MachineOperand &Src2 = MI.getOperand(VOP3Idx[2]); 4337 const DebugLoc &DL = MI.getDebugLoc(); 4338 if (Src1.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) { 4339 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4340 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4341 .add(Src1); 4342 Src1.ChangeToRegister(Reg, false); 4343 } 4344 if (Src2.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src2.getReg()))) { 4345 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4346 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4347 .add(Src2); 4348 Src2.ChangeToRegister(Reg, false); 4349 } 4350 } 4351 4352 // Find the one SGPR operand we are allowed to use. 4353 int ConstantBusLimit = ST.getConstantBusLimit(Opc); 4354 int LiteralLimit = ST.hasVOP3Literal() ? 1 : 0; 4355 SmallDenseSet<unsigned> SGPRsUsed; 4356 unsigned SGPRReg = findUsedSGPR(MI, VOP3Idx); 4357 if (SGPRReg != AMDGPU::NoRegister) { 4358 SGPRsUsed.insert(SGPRReg); 4359 --ConstantBusLimit; 4360 } 4361 4362 for (unsigned i = 0; i < 3; ++i) { 4363 int Idx = VOP3Idx[i]; 4364 if (Idx == -1) 4365 break; 4366 MachineOperand &MO = MI.getOperand(Idx); 4367 4368 if (!MO.isReg()) { 4369 if (!isLiteralConstantLike(MO, get(Opc).OpInfo[Idx])) 4370 continue; 4371 4372 if (LiteralLimit > 0 && ConstantBusLimit > 0) { 4373 --LiteralLimit; 4374 --ConstantBusLimit; 4375 continue; 4376 } 4377 4378 --LiteralLimit; 4379 --ConstantBusLimit; 4380 legalizeOpWithMove(MI, Idx); 4381 continue; 4382 } 4383 4384 if (RI.hasAGPRs(MRI.getRegClass(MO.getReg())) && 4385 !isOperandLegal(MI, Idx, &MO)) { 4386 legalizeOpWithMove(MI, Idx); 4387 continue; 4388 } 4389 4390 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg()))) 4391 continue; // VGPRs are legal 4392 4393 // We can use one SGPR in each VOP3 instruction prior to GFX10 4394 // and two starting from GFX10. 4395 if (SGPRsUsed.count(MO.getReg())) 4396 continue; 4397 if (ConstantBusLimit > 0) { 4398 SGPRsUsed.insert(MO.getReg()); 4399 --ConstantBusLimit; 4400 continue; 4401 } 4402 4403 // If we make it this far, then the operand is not legal and we must 4404 // legalize it. 4405 legalizeOpWithMove(MI, Idx); 4406 } 4407 } 4408 4409 Register SIInstrInfo::readlaneVGPRToSGPR(Register SrcReg, MachineInstr &UseMI, 4410 MachineRegisterInfo &MRI) const { 4411 const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg); 4412 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC); 4413 Register DstReg = MRI.createVirtualRegister(SRC); 4414 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32; 4415 4416 if (RI.hasAGPRs(VRC)) { 4417 VRC = RI.getEquivalentVGPRClass(VRC); 4418 Register NewSrcReg = MRI.createVirtualRegister(VRC); 4419 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4420 get(TargetOpcode::COPY), NewSrcReg) 4421 .addReg(SrcReg); 4422 SrcReg = NewSrcReg; 4423 } 4424 4425 if (SubRegs == 1) { 4426 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4427 get(AMDGPU::V_READFIRSTLANE_B32), DstReg) 4428 .addReg(SrcReg); 4429 return DstReg; 4430 } 4431 4432 SmallVector<unsigned, 8> SRegs; 4433 for (unsigned i = 0; i < SubRegs; ++i) { 4434 Register SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4435 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4436 get(AMDGPU::V_READFIRSTLANE_B32), SGPR) 4437 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i)); 4438 SRegs.push_back(SGPR); 4439 } 4440 4441 MachineInstrBuilder MIB = 4442 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4443 get(AMDGPU::REG_SEQUENCE), DstReg); 4444 for (unsigned i = 0; i < SubRegs; ++i) { 4445 MIB.addReg(SRegs[i]); 4446 MIB.addImm(RI.getSubRegFromChannel(i)); 4447 } 4448 return DstReg; 4449 } 4450 4451 void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI, 4452 MachineInstr &MI) const { 4453 4454 // If the pointer is store in VGPRs, then we need to move them to 4455 // SGPRs using v_readfirstlane. This is safe because we only select 4456 // loads with uniform pointers to SMRD instruction so we know the 4457 // pointer value is uniform. 4458 MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase); 4459 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) { 4460 unsigned SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI); 4461 SBase->setReg(SGPR); 4462 } 4463 MachineOperand *SOff = getNamedOperand(MI, AMDGPU::OpName::soff); 4464 if (SOff && !RI.isSGPRClass(MRI.getRegClass(SOff->getReg()))) { 4465 unsigned SGPR = readlaneVGPRToSGPR(SOff->getReg(), MI, MRI); 4466 SOff->setReg(SGPR); 4467 } 4468 } 4469 4470 void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB, 4471 MachineBasicBlock::iterator I, 4472 const TargetRegisterClass *DstRC, 4473 MachineOperand &Op, 4474 MachineRegisterInfo &MRI, 4475 const DebugLoc &DL) const { 4476 Register OpReg = Op.getReg(); 4477 unsigned OpSubReg = Op.getSubReg(); 4478 4479 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg( 4480 RI.getRegClassForReg(MRI, OpReg), OpSubReg); 4481 4482 // Check if operand is already the correct register class. 4483 if (DstRC == OpRC) 4484 return; 4485 4486 Register DstReg = MRI.createVirtualRegister(DstRC); 4487 MachineInstr *Copy = 4488 BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op); 4489 4490 Op.setReg(DstReg); 4491 Op.setSubReg(0); 4492 4493 MachineInstr *Def = MRI.getVRegDef(OpReg); 4494 if (!Def) 4495 return; 4496 4497 // Try to eliminate the copy if it is copying an immediate value. 4498 if (Def->isMoveImmediate() && DstRC != &AMDGPU::VReg_1RegClass) 4499 FoldImmediate(*Copy, *Def, OpReg, &MRI); 4500 4501 bool ImpDef = Def->isImplicitDef(); 4502 while (!ImpDef && Def && Def->isCopy()) { 4503 if (Def->getOperand(1).getReg().isPhysical()) 4504 break; 4505 Def = MRI.getUniqueVRegDef(Def->getOperand(1).getReg()); 4506 ImpDef = Def && Def->isImplicitDef(); 4507 } 4508 if (!RI.isSGPRClass(DstRC) && !Copy->readsRegister(AMDGPU::EXEC, &RI) && 4509 !ImpDef) 4510 Copy->addOperand(MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 4511 } 4512 4513 // Emit the actual waterfall loop, executing the wrapped instruction for each 4514 // unique value of \p Rsrc across all lanes. In the best case we execute 1 4515 // iteration, in the worst case we execute 64 (once per lane). 4516 static void 4517 emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI, 4518 MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB, 4519 const DebugLoc &DL, MachineOperand &Rsrc) { 4520 MachineFunction &MF = *OrigBB.getParent(); 4521 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 4522 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 4523 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 4524 unsigned SaveExecOpc = 4525 ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 : AMDGPU::S_AND_SAVEEXEC_B64; 4526 unsigned XorTermOpc = 4527 ST.isWave32() ? AMDGPU::S_XOR_B32_term : AMDGPU::S_XOR_B64_term; 4528 unsigned AndOpc = 4529 ST.isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64; 4530 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 4531 4532 MachineBasicBlock::iterator I = LoopBB.begin(); 4533 4534 Register VRsrc = Rsrc.getReg(); 4535 unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef()); 4536 4537 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); 4538 Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC); 4539 Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC); 4540 Register AndCond = MRI.createVirtualRegister(BoolXExecRC); 4541 Register SRsrcSub0 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4542 Register SRsrcSub1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4543 Register SRsrcSub2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4544 Register SRsrcSub3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4545 Register SRsrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass); 4546 4547 // Beginning of the loop, read the next Rsrc variant. 4548 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub0) 4549 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub0); 4550 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub1) 4551 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub1); 4552 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub2) 4553 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub2); 4554 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub3) 4555 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub3); 4556 4557 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), SRsrc) 4558 .addReg(SRsrcSub0) 4559 .addImm(AMDGPU::sub0) 4560 .addReg(SRsrcSub1) 4561 .addImm(AMDGPU::sub1) 4562 .addReg(SRsrcSub2) 4563 .addImm(AMDGPU::sub2) 4564 .addReg(SRsrcSub3) 4565 .addImm(AMDGPU::sub3); 4566 4567 // Update Rsrc operand to use the SGPR Rsrc. 4568 Rsrc.setReg(SRsrc); 4569 Rsrc.setIsKill(true); 4570 4571 // Identify all lanes with identical Rsrc operands in their VGPRs. 4572 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), CondReg0) 4573 .addReg(SRsrc, 0, AMDGPU::sub0_sub1) 4574 .addReg(VRsrc, 0, AMDGPU::sub0_sub1); 4575 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), CondReg1) 4576 .addReg(SRsrc, 0, AMDGPU::sub2_sub3) 4577 .addReg(VRsrc, 0, AMDGPU::sub2_sub3); 4578 BuildMI(LoopBB, I, DL, TII.get(AndOpc), AndCond) 4579 .addReg(CondReg0) 4580 .addReg(CondReg1); 4581 4582 MRI.setSimpleHint(SaveExec, AndCond); 4583 4584 // Update EXEC to matching lanes, saving original to SaveExec. 4585 BuildMI(LoopBB, I, DL, TII.get(SaveExecOpc), SaveExec) 4586 .addReg(AndCond, RegState::Kill); 4587 4588 // The original instruction is here; we insert the terminators after it. 4589 I = LoopBB.end(); 4590 4591 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 4592 BuildMI(LoopBB, I, DL, TII.get(XorTermOpc), Exec) 4593 .addReg(Exec) 4594 .addReg(SaveExec); 4595 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_CBRANCH_EXECNZ)).addMBB(&LoopBB); 4596 } 4597 4598 // Build a waterfall loop around \p MI, replacing the VGPR \p Rsrc register 4599 // with SGPRs by iterating over all unique values across all lanes. 4600 static void loadSRsrcFromVGPR(const SIInstrInfo &TII, MachineInstr &MI, 4601 MachineOperand &Rsrc, MachineDominatorTree *MDT) { 4602 MachineBasicBlock &MBB = *MI.getParent(); 4603 MachineFunction &MF = *MBB.getParent(); 4604 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 4605 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 4606 MachineRegisterInfo &MRI = MF.getRegInfo(); 4607 MachineBasicBlock::iterator I(&MI); 4608 const DebugLoc &DL = MI.getDebugLoc(); 4609 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 4610 unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 4611 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 4612 4613 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); 4614 4615 // Save the EXEC mask 4616 BuildMI(MBB, I, DL, TII.get(MovExecOpc), SaveExec).addReg(Exec); 4617 4618 // Killed uses in the instruction we are waterfalling around will be 4619 // incorrect due to the added control-flow. 4620 for (auto &MO : MI.uses()) { 4621 if (MO.isReg() && MO.isUse()) { 4622 MRI.clearKillFlags(MO.getReg()); 4623 } 4624 } 4625 4626 // To insert the loop we need to split the block. Move everything after this 4627 // point to a new block, and insert a new empty block between the two. 4628 MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock(); 4629 MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock(); 4630 MachineFunction::iterator MBBI(MBB); 4631 ++MBBI; 4632 4633 MF.insert(MBBI, LoopBB); 4634 MF.insert(MBBI, RemainderBB); 4635 4636 LoopBB->addSuccessor(LoopBB); 4637 LoopBB->addSuccessor(RemainderBB); 4638 4639 // Move MI to the LoopBB, and the remainder of the block to RemainderBB. 4640 MachineBasicBlock::iterator J = I++; 4641 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 4642 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); 4643 LoopBB->splice(LoopBB->begin(), &MBB, J); 4644 4645 MBB.addSuccessor(LoopBB); 4646 4647 // Update dominators. We know that MBB immediately dominates LoopBB, that 4648 // LoopBB immediately dominates RemainderBB, and that RemainderBB immediately 4649 // dominates all of the successors transferred to it from MBB that MBB used 4650 // to properly dominate. 4651 if (MDT) { 4652 MDT->addNewBlock(LoopBB, &MBB); 4653 MDT->addNewBlock(RemainderBB, LoopBB); 4654 for (auto &Succ : RemainderBB->successors()) { 4655 if (MDT->properlyDominates(&MBB, Succ)) { 4656 MDT->changeImmediateDominator(Succ, RemainderBB); 4657 } 4658 } 4659 } 4660 4661 emitLoadSRsrcFromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, Rsrc); 4662 4663 // Restore the EXEC mask 4664 MachineBasicBlock::iterator First = RemainderBB->begin(); 4665 BuildMI(*RemainderBB, First, DL, TII.get(MovExecOpc), Exec).addReg(SaveExec); 4666 } 4667 4668 // Extract pointer from Rsrc and return a zero-value Rsrc replacement. 4669 static std::tuple<unsigned, unsigned> 4670 extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc) { 4671 MachineBasicBlock &MBB = *MI.getParent(); 4672 MachineFunction &MF = *MBB.getParent(); 4673 MachineRegisterInfo &MRI = MF.getRegInfo(); 4674 4675 // Extract the ptr from the resource descriptor. 4676 unsigned RsrcPtr = 4677 TII.buildExtractSubReg(MI, MRI, Rsrc, &AMDGPU::VReg_128RegClass, 4678 AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass); 4679 4680 // Create an empty resource descriptor 4681 Register Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 4682 Register SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4683 Register SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4684 Register NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass); 4685 uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat(); 4686 4687 // Zero64 = 0 4688 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B64), Zero64) 4689 .addImm(0); 4690 4691 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0} 4692 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatLo) 4693 .addImm(RsrcDataFormat & 0xFFFFFFFF); 4694 4695 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32} 4696 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatHi) 4697 .addImm(RsrcDataFormat >> 32); 4698 4699 // NewSRsrc = {Zero64, SRsrcFormat} 4700 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::REG_SEQUENCE), NewSRsrc) 4701 .addReg(Zero64) 4702 .addImm(AMDGPU::sub0_sub1) 4703 .addReg(SRsrcFormatLo) 4704 .addImm(AMDGPU::sub2) 4705 .addReg(SRsrcFormatHi) 4706 .addImm(AMDGPU::sub3); 4707 4708 return std::make_tuple(RsrcPtr, NewSRsrc); 4709 } 4710 4711 void SIInstrInfo::legalizeOperands(MachineInstr &MI, 4712 MachineDominatorTree *MDT) const { 4713 MachineFunction &MF = *MI.getParent()->getParent(); 4714 MachineRegisterInfo &MRI = MF.getRegInfo(); 4715 4716 // Legalize VOP2 4717 if (isVOP2(MI) || isVOPC(MI)) { 4718 legalizeOperandsVOP2(MRI, MI); 4719 return; 4720 } 4721 4722 // Legalize VOP3 4723 if (isVOP3(MI)) { 4724 legalizeOperandsVOP3(MRI, MI); 4725 return; 4726 } 4727 4728 // Legalize SMRD 4729 if (isSMRD(MI)) { 4730 legalizeOperandsSMRD(MRI, MI); 4731 return; 4732 } 4733 4734 // Legalize REG_SEQUENCE and PHI 4735 // The register class of the operands much be the same type as the register 4736 // class of the output. 4737 if (MI.getOpcode() == AMDGPU::PHI) { 4738 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr; 4739 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) { 4740 if (!MI.getOperand(i).isReg() || 4741 !Register::isVirtualRegister(MI.getOperand(i).getReg())) 4742 continue; 4743 const TargetRegisterClass *OpRC = 4744 MRI.getRegClass(MI.getOperand(i).getReg()); 4745 if (RI.hasVectorRegisters(OpRC)) { 4746 VRC = OpRC; 4747 } else { 4748 SRC = OpRC; 4749 } 4750 } 4751 4752 // If any of the operands are VGPR registers, then they all most be 4753 // otherwise we will create illegal VGPR->SGPR copies when legalizing 4754 // them. 4755 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) { 4756 if (!VRC) { 4757 assert(SRC); 4758 if (getOpRegClass(MI, 0) == &AMDGPU::VReg_1RegClass) { 4759 VRC = &AMDGPU::VReg_1RegClass; 4760 } else 4761 VRC = RI.hasAGPRs(getOpRegClass(MI, 0)) 4762 ? RI.getEquivalentAGPRClass(SRC) 4763 : RI.getEquivalentVGPRClass(SRC); 4764 } else { 4765 VRC = RI.hasAGPRs(getOpRegClass(MI, 0)) 4766 ? RI.getEquivalentAGPRClass(VRC) 4767 : RI.getEquivalentVGPRClass(VRC); 4768 } 4769 RC = VRC; 4770 } else { 4771 RC = SRC; 4772 } 4773 4774 // Update all the operands so they have the same type. 4775 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 4776 MachineOperand &Op = MI.getOperand(I); 4777 if (!Op.isReg() || !Register::isVirtualRegister(Op.getReg())) 4778 continue; 4779 4780 // MI is a PHI instruction. 4781 MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB(); 4782 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator(); 4783 4784 // Avoid creating no-op copies with the same src and dst reg class. These 4785 // confuse some of the machine passes. 4786 legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc()); 4787 } 4788 } 4789 4790 // REG_SEQUENCE doesn't really require operand legalization, but if one has a 4791 // VGPR dest type and SGPR sources, insert copies so all operands are 4792 // VGPRs. This seems to help operand folding / the register coalescer. 4793 if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) { 4794 MachineBasicBlock *MBB = MI.getParent(); 4795 const TargetRegisterClass *DstRC = getOpRegClass(MI, 0); 4796 if (RI.hasVGPRs(DstRC)) { 4797 // Update all the operands so they are VGPR register classes. These may 4798 // not be the same register class because REG_SEQUENCE supports mixing 4799 // subregister index types e.g. sub0_sub1 + sub2 + sub3 4800 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 4801 MachineOperand &Op = MI.getOperand(I); 4802 if (!Op.isReg() || !Register::isVirtualRegister(Op.getReg())) 4803 continue; 4804 4805 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg()); 4806 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); 4807 if (VRC == OpRC) 4808 continue; 4809 4810 legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc()); 4811 Op.setIsKill(); 4812 } 4813 } 4814 4815 return; 4816 } 4817 4818 // Legalize INSERT_SUBREG 4819 // src0 must have the same register class as dst 4820 if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) { 4821 Register Dst = MI.getOperand(0).getReg(); 4822 Register Src0 = MI.getOperand(1).getReg(); 4823 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst); 4824 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0); 4825 if (DstRC != Src0RC) { 4826 MachineBasicBlock *MBB = MI.getParent(); 4827 MachineOperand &Op = MI.getOperand(1); 4828 legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc()); 4829 } 4830 return; 4831 } 4832 4833 // Legalize SI_INIT_M0 4834 if (MI.getOpcode() == AMDGPU::SI_INIT_M0) { 4835 MachineOperand &Src = MI.getOperand(0); 4836 if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg()))) 4837 Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI)); 4838 return; 4839 } 4840 4841 // Legalize MIMG and MUBUF/MTBUF for shaders. 4842 // 4843 // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via 4844 // scratch memory access. In both cases, the legalization never involves 4845 // conversion to the addr64 form. 4846 if (isMIMG(MI) || 4847 (AMDGPU::isShader(MF.getFunction().getCallingConv()) && 4848 (isMUBUF(MI) || isMTBUF(MI)))) { 4849 MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc); 4850 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) { 4851 unsigned SGPR = readlaneVGPRToSGPR(SRsrc->getReg(), MI, MRI); 4852 SRsrc->setReg(SGPR); 4853 } 4854 4855 MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp); 4856 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) { 4857 unsigned SGPR = readlaneVGPRToSGPR(SSamp->getReg(), MI, MRI); 4858 SSamp->setReg(SGPR); 4859 } 4860 return; 4861 } 4862 4863 // Legalize MUBUF* instructions. 4864 int RsrcIdx = 4865 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc); 4866 if (RsrcIdx != -1) { 4867 // We have an MUBUF instruction 4868 MachineOperand *Rsrc = &MI.getOperand(RsrcIdx); 4869 unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass; 4870 if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()), 4871 RI.getRegClass(RsrcRC))) { 4872 // The operands are legal. 4873 // FIXME: We may need to legalize operands besided srsrc. 4874 return; 4875 } 4876 4877 // Legalize a VGPR Rsrc. 4878 // 4879 // If the instruction is _ADDR64, we can avoid a waterfall by extracting 4880 // the base pointer from the VGPR Rsrc, adding it to the VAddr, then using 4881 // a zero-value SRsrc. 4882 // 4883 // If the instruction is _OFFSET (both idxen and offen disabled), and we 4884 // support ADDR64 instructions, we can convert to ADDR64 and do the same as 4885 // above. 4886 // 4887 // Otherwise we are on non-ADDR64 hardware, and/or we have 4888 // idxen/offen/bothen and we fall back to a waterfall loop. 4889 4890 MachineBasicBlock &MBB = *MI.getParent(); 4891 4892 MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 4893 if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) { 4894 // This is already an ADDR64 instruction so we need to add the pointer 4895 // extracted from the resource descriptor to the current value of VAddr. 4896 Register NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4897 Register NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4898 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 4899 4900 const auto *BoolXExecRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 4901 Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC); 4902 Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC); 4903 4904 unsigned RsrcPtr, NewSRsrc; 4905 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 4906 4907 // NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0 4908 const DebugLoc &DL = MI.getDebugLoc(); 4909 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e64), NewVAddrLo) 4910 .addDef(CondReg0) 4911 .addReg(RsrcPtr, 0, AMDGPU::sub0) 4912 .addReg(VAddr->getReg(), 0, AMDGPU::sub0) 4913 .addImm(0); 4914 4915 // NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1 4916 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e64), NewVAddrHi) 4917 .addDef(CondReg1, RegState::Dead) 4918 .addReg(RsrcPtr, 0, AMDGPU::sub1) 4919 .addReg(VAddr->getReg(), 0, AMDGPU::sub1) 4920 .addReg(CondReg0, RegState::Kill) 4921 .addImm(0); 4922 4923 // NewVaddr = {NewVaddrHi, NewVaddrLo} 4924 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) 4925 .addReg(NewVAddrLo) 4926 .addImm(AMDGPU::sub0) 4927 .addReg(NewVAddrHi) 4928 .addImm(AMDGPU::sub1); 4929 4930 VAddr->setReg(NewVAddr); 4931 Rsrc->setReg(NewSRsrc); 4932 } else if (!VAddr && ST.hasAddr64()) { 4933 // This instructions is the _OFFSET variant, so we need to convert it to 4934 // ADDR64. 4935 assert(MBB.getParent()->getSubtarget<GCNSubtarget>().getGeneration() 4936 < AMDGPUSubtarget::VOLCANIC_ISLANDS && 4937 "FIXME: Need to emit flat atomics here"); 4938 4939 unsigned RsrcPtr, NewSRsrc; 4940 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 4941 4942 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 4943 MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata); 4944 MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 4945 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset); 4946 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode()); 4947 4948 // Atomics rith return have have an additional tied operand and are 4949 // missing some of the special bits. 4950 MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in); 4951 MachineInstr *Addr64; 4952 4953 if (!VDataIn) { 4954 // Regular buffer load / store. 4955 MachineInstrBuilder MIB = 4956 BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 4957 .add(*VData) 4958 .addReg(NewVAddr) 4959 .addReg(NewSRsrc) 4960 .add(*SOffset) 4961 .add(*Offset); 4962 4963 // Atomics do not have this operand. 4964 if (const MachineOperand *GLC = 4965 getNamedOperand(MI, AMDGPU::OpName::glc)) { 4966 MIB.addImm(GLC->getImm()); 4967 } 4968 if (const MachineOperand *DLC = 4969 getNamedOperand(MI, AMDGPU::OpName::dlc)) { 4970 MIB.addImm(DLC->getImm()); 4971 } 4972 4973 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)); 4974 4975 if (const MachineOperand *TFE = 4976 getNamedOperand(MI, AMDGPU::OpName::tfe)) { 4977 MIB.addImm(TFE->getImm()); 4978 } 4979 4980 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::swz)); 4981 4982 MIB.cloneMemRefs(MI); 4983 Addr64 = MIB; 4984 } else { 4985 // Atomics with return. 4986 Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 4987 .add(*VData) 4988 .add(*VDataIn) 4989 .addReg(NewVAddr) 4990 .addReg(NewSRsrc) 4991 .add(*SOffset) 4992 .add(*Offset) 4993 .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)) 4994 .cloneMemRefs(MI); 4995 } 4996 4997 MI.removeFromParent(); 4998 4999 // NewVaddr = {NewVaddrHi, NewVaddrLo} 5000 BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), 5001 NewVAddr) 5002 .addReg(RsrcPtr, 0, AMDGPU::sub0) 5003 .addImm(AMDGPU::sub0) 5004 .addReg(RsrcPtr, 0, AMDGPU::sub1) 5005 .addImm(AMDGPU::sub1); 5006 } else { 5007 // This is another variant; legalize Rsrc with waterfall loop from VGPRs 5008 // to SGPRs. 5009 loadSRsrcFromVGPR(*this, MI, *Rsrc, MDT); 5010 } 5011 } 5012 } 5013 5014 void SIInstrInfo::moveToVALU(MachineInstr &TopInst, 5015 MachineDominatorTree *MDT) const { 5016 SetVectorType Worklist; 5017 Worklist.insert(&TopInst); 5018 5019 while (!Worklist.empty()) { 5020 MachineInstr &Inst = *Worklist.pop_back_val(); 5021 MachineBasicBlock *MBB = Inst.getParent(); 5022 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 5023 5024 unsigned Opcode = Inst.getOpcode(); 5025 unsigned NewOpcode = getVALUOp(Inst); 5026 5027 // Handle some special cases 5028 switch (Opcode) { 5029 default: 5030 break; 5031 case AMDGPU::S_ADD_U64_PSEUDO: 5032 case AMDGPU::S_SUB_U64_PSEUDO: 5033 splitScalar64BitAddSub(Worklist, Inst, MDT); 5034 Inst.eraseFromParent(); 5035 continue; 5036 case AMDGPU::S_ADD_I32: 5037 case AMDGPU::S_SUB_I32: 5038 // FIXME: The u32 versions currently selected use the carry. 5039 if (moveScalarAddSub(Worklist, Inst, MDT)) 5040 continue; 5041 5042 // Default handling 5043 break; 5044 case AMDGPU::S_AND_B64: 5045 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32, MDT); 5046 Inst.eraseFromParent(); 5047 continue; 5048 5049 case AMDGPU::S_OR_B64: 5050 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32, MDT); 5051 Inst.eraseFromParent(); 5052 continue; 5053 5054 case AMDGPU::S_XOR_B64: 5055 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32, MDT); 5056 Inst.eraseFromParent(); 5057 continue; 5058 5059 case AMDGPU::S_NAND_B64: 5060 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NAND_B32, MDT); 5061 Inst.eraseFromParent(); 5062 continue; 5063 5064 case AMDGPU::S_NOR_B64: 5065 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NOR_B32, MDT); 5066 Inst.eraseFromParent(); 5067 continue; 5068 5069 case AMDGPU::S_XNOR_B64: 5070 if (ST.hasDLInsts()) 5071 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT); 5072 else 5073 splitScalar64BitXnor(Worklist, Inst, MDT); 5074 Inst.eraseFromParent(); 5075 continue; 5076 5077 case AMDGPU::S_ANDN2_B64: 5078 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ANDN2_B32, MDT); 5079 Inst.eraseFromParent(); 5080 continue; 5081 5082 case AMDGPU::S_ORN2_B64: 5083 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ORN2_B32, MDT); 5084 Inst.eraseFromParent(); 5085 continue; 5086 5087 case AMDGPU::S_NOT_B64: 5088 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32); 5089 Inst.eraseFromParent(); 5090 continue; 5091 5092 case AMDGPU::S_BCNT1_I32_B64: 5093 splitScalar64BitBCNT(Worklist, Inst); 5094 Inst.eraseFromParent(); 5095 continue; 5096 5097 case AMDGPU::S_BFE_I64: 5098 splitScalar64BitBFE(Worklist, Inst); 5099 Inst.eraseFromParent(); 5100 continue; 5101 5102 case AMDGPU::S_LSHL_B32: 5103 if (ST.hasOnlyRevVALUShifts()) { 5104 NewOpcode = AMDGPU::V_LSHLREV_B32_e64; 5105 swapOperands(Inst); 5106 } 5107 break; 5108 case AMDGPU::S_ASHR_I32: 5109 if (ST.hasOnlyRevVALUShifts()) { 5110 NewOpcode = AMDGPU::V_ASHRREV_I32_e64; 5111 swapOperands(Inst); 5112 } 5113 break; 5114 case AMDGPU::S_LSHR_B32: 5115 if (ST.hasOnlyRevVALUShifts()) { 5116 NewOpcode = AMDGPU::V_LSHRREV_B32_e64; 5117 swapOperands(Inst); 5118 } 5119 break; 5120 case AMDGPU::S_LSHL_B64: 5121 if (ST.hasOnlyRevVALUShifts()) { 5122 NewOpcode = AMDGPU::V_LSHLREV_B64; 5123 swapOperands(Inst); 5124 } 5125 break; 5126 case AMDGPU::S_ASHR_I64: 5127 if (ST.hasOnlyRevVALUShifts()) { 5128 NewOpcode = AMDGPU::V_ASHRREV_I64; 5129 swapOperands(Inst); 5130 } 5131 break; 5132 case AMDGPU::S_LSHR_B64: 5133 if (ST.hasOnlyRevVALUShifts()) { 5134 NewOpcode = AMDGPU::V_LSHRREV_B64; 5135 swapOperands(Inst); 5136 } 5137 break; 5138 5139 case AMDGPU::S_ABS_I32: 5140 lowerScalarAbs(Worklist, Inst); 5141 Inst.eraseFromParent(); 5142 continue; 5143 5144 case AMDGPU::S_CBRANCH_SCC0: 5145 case AMDGPU::S_CBRANCH_SCC1: 5146 // Clear unused bits of vcc 5147 if (ST.isWave32()) 5148 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B32), 5149 AMDGPU::VCC_LO) 5150 .addReg(AMDGPU::EXEC_LO) 5151 .addReg(AMDGPU::VCC_LO); 5152 else 5153 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64), 5154 AMDGPU::VCC) 5155 .addReg(AMDGPU::EXEC) 5156 .addReg(AMDGPU::VCC); 5157 break; 5158 5159 case AMDGPU::S_BFE_U64: 5160 case AMDGPU::S_BFM_B64: 5161 llvm_unreachable("Moving this op to VALU not implemented"); 5162 5163 case AMDGPU::S_PACK_LL_B32_B16: 5164 case AMDGPU::S_PACK_LH_B32_B16: 5165 case AMDGPU::S_PACK_HH_B32_B16: 5166 movePackToVALU(Worklist, MRI, Inst); 5167 Inst.eraseFromParent(); 5168 continue; 5169 5170 case AMDGPU::S_XNOR_B32: 5171 lowerScalarXnor(Worklist, Inst); 5172 Inst.eraseFromParent(); 5173 continue; 5174 5175 case AMDGPU::S_NAND_B32: 5176 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_AND_B32); 5177 Inst.eraseFromParent(); 5178 continue; 5179 5180 case AMDGPU::S_NOR_B32: 5181 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_OR_B32); 5182 Inst.eraseFromParent(); 5183 continue; 5184 5185 case AMDGPU::S_ANDN2_B32: 5186 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_AND_B32); 5187 Inst.eraseFromParent(); 5188 continue; 5189 5190 case AMDGPU::S_ORN2_B32: 5191 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_OR_B32); 5192 Inst.eraseFromParent(); 5193 continue; 5194 5195 // TODO: remove as soon as everything is ready 5196 // to replace VGPR to SGPR copy with V_READFIRSTLANEs. 5197 // S_ADD/SUB_CO_PSEUDO as well as S_UADDO/USUBO_PSEUDO 5198 // can only be selected from the uniform SDNode. 5199 case AMDGPU::S_ADD_CO_PSEUDO: 5200 case AMDGPU::S_SUB_CO_PSEUDO: { 5201 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) 5202 ? AMDGPU::V_ADDC_U32_e64 5203 : AMDGPU::V_SUBB_U32_e64; 5204 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5205 Register DummyCReg = MRI.createVirtualRegister(CarryRC); 5206 Register CarryReg = MRI.createVirtualRegister(CarryRC); 5207 Register DestReg = MRI.createVirtualRegister(RI.getEquivalentVGPRClass( 5208 MRI.getRegClass(Inst.getOperand(0).getReg()))); 5209 BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(AMDGPU::COPY), CarryReg) 5210 .addReg(Inst.getOperand(4).getReg()); 5211 MachineInstr *CarryOp = 5212 BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(Opc), DestReg) 5213 .addReg(DummyCReg, RegState::Define | RegState::Dead) 5214 .add(Inst.getOperand(2)) 5215 .add(Inst.getOperand(3)) 5216 .addReg(CarryReg, RegState::Kill) 5217 .addImm(0); 5218 legalizeOperands(*CarryOp); 5219 MRI.replaceRegWith(Inst.getOperand(0).getReg(), DestReg); 5220 addUsersToMoveToVALUWorklist(DestReg, MRI, Worklist); 5221 Inst.eraseFromParent(); 5222 } 5223 continue; 5224 case AMDGPU::S_UADDO_PSEUDO: 5225 case AMDGPU::S_USUBO_PSEUDO: { 5226 const DebugLoc &DL = Inst.getDebugLoc(); 5227 MachineOperand &Dest0 = Inst.getOperand(0); 5228 MachineOperand &Dest1 = Inst.getOperand(1); 5229 MachineOperand &Src0 = Inst.getOperand(2); 5230 MachineOperand &Src1 = Inst.getOperand(3); 5231 5232 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_UADDO_PSEUDO) 5233 ? AMDGPU::V_ADD_I32_e64 5234 : AMDGPU::V_SUB_I32_e64; 5235 const TargetRegisterClass *NewRC = 5236 RI.getEquivalentVGPRClass(MRI.getRegClass(Dest0.getReg())); 5237 Register DestReg = MRI.createVirtualRegister(NewRC); 5238 MachineInstr *NewInstr = BuildMI(*MBB, &Inst, DL, get(Opc), DestReg) 5239 .addReg(Dest1.getReg(), RegState::Define) 5240 .add(Src0) 5241 .add(Src1) 5242 .addImm(0); // clamp bit 5243 5244 legalizeOperands(*NewInstr, MDT); 5245 5246 MRI.replaceRegWith(Dest0.getReg(), DestReg); 5247 addUsersToMoveToVALUWorklist(NewInstr->getOperand(0).getReg(), MRI, 5248 Worklist); 5249 Inst.eraseFromParent(); 5250 } 5251 continue; 5252 } 5253 5254 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) { 5255 // We cannot move this instruction to the VALU, so we should try to 5256 // legalize its operands instead. 5257 legalizeOperands(Inst, MDT); 5258 continue; 5259 } 5260 5261 // Use the new VALU Opcode. 5262 const MCInstrDesc &NewDesc = get(NewOpcode); 5263 Inst.setDesc(NewDesc); 5264 5265 // Remove any references to SCC. Vector instructions can't read from it, and 5266 // We're just about to add the implicit use / defs of VCC, and we don't want 5267 // both. 5268 for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) { 5269 MachineOperand &Op = Inst.getOperand(i); 5270 if (Op.isReg() && Op.getReg() == AMDGPU::SCC) { 5271 // Only propagate through live-def of SCC. 5272 if (Op.isDef() && !Op.isDead()) 5273 addSCCDefUsersToVALUWorklist(Op, Inst, Worklist); 5274 Inst.RemoveOperand(i); 5275 } 5276 } 5277 5278 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) { 5279 // We are converting these to a BFE, so we need to add the missing 5280 // operands for the size and offset. 5281 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16; 5282 Inst.addOperand(MachineOperand::CreateImm(0)); 5283 Inst.addOperand(MachineOperand::CreateImm(Size)); 5284 5285 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) { 5286 // The VALU version adds the second operand to the result, so insert an 5287 // extra 0 operand. 5288 Inst.addOperand(MachineOperand::CreateImm(0)); 5289 } 5290 5291 Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent()); 5292 fixImplicitOperands(Inst); 5293 5294 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) { 5295 const MachineOperand &OffsetWidthOp = Inst.getOperand(2); 5296 // If we need to move this to VGPRs, we need to unpack the second operand 5297 // back into the 2 separate ones for bit offset and width. 5298 assert(OffsetWidthOp.isImm() && 5299 "Scalar BFE is only implemented for constant width and offset"); 5300 uint32_t Imm = OffsetWidthOp.getImm(); 5301 5302 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 5303 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 5304 Inst.RemoveOperand(2); // Remove old immediate. 5305 Inst.addOperand(MachineOperand::CreateImm(Offset)); 5306 Inst.addOperand(MachineOperand::CreateImm(BitWidth)); 5307 } 5308 5309 bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef(); 5310 unsigned NewDstReg = AMDGPU::NoRegister; 5311 if (HasDst) { 5312 Register DstReg = Inst.getOperand(0).getReg(); 5313 if (Register::isPhysicalRegister(DstReg)) 5314 continue; 5315 5316 // Update the destination register class. 5317 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst); 5318 if (!NewDstRC) 5319 continue; 5320 5321 if (Inst.isCopy() && 5322 Register::isVirtualRegister(Inst.getOperand(1).getReg()) && 5323 NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) { 5324 // Instead of creating a copy where src and dst are the same register 5325 // class, we just replace all uses of dst with src. These kinds of 5326 // copies interfere with the heuristics MachineSink uses to decide 5327 // whether or not to split a critical edge. Since the pass assumes 5328 // that copies will end up as machine instructions and not be 5329 // eliminated. 5330 addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist); 5331 MRI.replaceRegWith(DstReg, Inst.getOperand(1).getReg()); 5332 MRI.clearKillFlags(Inst.getOperand(1).getReg()); 5333 Inst.getOperand(0).setReg(DstReg); 5334 5335 // Make sure we don't leave around a dead VGPR->SGPR copy. Normally 5336 // these are deleted later, but at -O0 it would leave a suspicious 5337 // looking illegal copy of an undef register. 5338 for (unsigned I = Inst.getNumOperands() - 1; I != 0; --I) 5339 Inst.RemoveOperand(I); 5340 Inst.setDesc(get(AMDGPU::IMPLICIT_DEF)); 5341 continue; 5342 } 5343 5344 NewDstReg = MRI.createVirtualRegister(NewDstRC); 5345 MRI.replaceRegWith(DstReg, NewDstReg); 5346 } 5347 5348 // Legalize the operands 5349 legalizeOperands(Inst, MDT); 5350 5351 if (HasDst) 5352 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); 5353 } 5354 } 5355 5356 // Add/sub require special handling to deal with carry outs. 5357 bool SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst, 5358 MachineDominatorTree *MDT) const { 5359 if (ST.hasAddNoCarry()) { 5360 // Assume there is no user of scc since we don't select this in that case. 5361 // Since scc isn't used, it doesn't really matter if the i32 or u32 variant 5362 // is used. 5363 5364 MachineBasicBlock &MBB = *Inst.getParent(); 5365 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5366 5367 Register OldDstReg = Inst.getOperand(0).getReg(); 5368 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5369 5370 unsigned Opc = Inst.getOpcode(); 5371 assert(Opc == AMDGPU::S_ADD_I32 || Opc == AMDGPU::S_SUB_I32); 5372 5373 unsigned NewOpc = Opc == AMDGPU::S_ADD_I32 ? 5374 AMDGPU::V_ADD_U32_e64 : AMDGPU::V_SUB_U32_e64; 5375 5376 assert(Inst.getOperand(3).getReg() == AMDGPU::SCC); 5377 Inst.RemoveOperand(3); 5378 5379 Inst.setDesc(get(NewOpc)); 5380 Inst.addOperand(MachineOperand::CreateImm(0)); // clamp bit 5381 Inst.addImplicitDefUseOperands(*MBB.getParent()); 5382 MRI.replaceRegWith(OldDstReg, ResultReg); 5383 legalizeOperands(Inst, MDT); 5384 5385 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5386 return true; 5387 } 5388 5389 return false; 5390 } 5391 5392 void SIInstrInfo::lowerScalarAbs(SetVectorType &Worklist, 5393 MachineInstr &Inst) const { 5394 MachineBasicBlock &MBB = *Inst.getParent(); 5395 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5396 MachineBasicBlock::iterator MII = Inst; 5397 DebugLoc DL = Inst.getDebugLoc(); 5398 5399 MachineOperand &Dest = Inst.getOperand(0); 5400 MachineOperand &Src = Inst.getOperand(1); 5401 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5402 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5403 5404 unsigned SubOp = ST.hasAddNoCarry() ? 5405 AMDGPU::V_SUB_U32_e32 : AMDGPU::V_SUB_I32_e32; 5406 5407 BuildMI(MBB, MII, DL, get(SubOp), TmpReg) 5408 .addImm(0) 5409 .addReg(Src.getReg()); 5410 5411 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg) 5412 .addReg(Src.getReg()) 5413 .addReg(TmpReg); 5414 5415 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5416 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5417 } 5418 5419 void SIInstrInfo::lowerScalarXnor(SetVectorType &Worklist, 5420 MachineInstr &Inst) const { 5421 MachineBasicBlock &MBB = *Inst.getParent(); 5422 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5423 MachineBasicBlock::iterator MII = Inst; 5424 const DebugLoc &DL = Inst.getDebugLoc(); 5425 5426 MachineOperand &Dest = Inst.getOperand(0); 5427 MachineOperand &Src0 = Inst.getOperand(1); 5428 MachineOperand &Src1 = Inst.getOperand(2); 5429 5430 if (ST.hasDLInsts()) { 5431 Register NewDest = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5432 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src0, MRI, DL); 5433 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src1, MRI, DL); 5434 5435 BuildMI(MBB, MII, DL, get(AMDGPU::V_XNOR_B32_e64), NewDest) 5436 .add(Src0) 5437 .add(Src1); 5438 5439 MRI.replaceRegWith(Dest.getReg(), NewDest); 5440 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5441 } else { 5442 // Using the identity !(x ^ y) == (!x ^ y) == (x ^ !y), we can 5443 // invert either source and then perform the XOR. If either source is a 5444 // scalar register, then we can leave the inversion on the scalar unit to 5445 // acheive a better distrubution of scalar and vector instructions. 5446 bool Src0IsSGPR = Src0.isReg() && 5447 RI.isSGPRClass(MRI.getRegClass(Src0.getReg())); 5448 bool Src1IsSGPR = Src1.isReg() && 5449 RI.isSGPRClass(MRI.getRegClass(Src1.getReg())); 5450 MachineInstr *Xor; 5451 Register Temp = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5452 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5453 5454 // Build a pair of scalar instructions and add them to the work list. 5455 // The next iteration over the work list will lower these to the vector 5456 // unit as necessary. 5457 if (Src0IsSGPR) { 5458 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src0); 5459 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 5460 .addReg(Temp) 5461 .add(Src1); 5462 } else if (Src1IsSGPR) { 5463 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src1); 5464 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 5465 .add(Src0) 5466 .addReg(Temp); 5467 } else { 5468 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), Temp) 5469 .add(Src0) 5470 .add(Src1); 5471 MachineInstr *Not = 5472 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest).addReg(Temp); 5473 Worklist.insert(Not); 5474 } 5475 5476 MRI.replaceRegWith(Dest.getReg(), NewDest); 5477 5478 Worklist.insert(Xor); 5479 5480 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5481 } 5482 } 5483 5484 void SIInstrInfo::splitScalarNotBinop(SetVectorType &Worklist, 5485 MachineInstr &Inst, 5486 unsigned Opcode) const { 5487 MachineBasicBlock &MBB = *Inst.getParent(); 5488 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5489 MachineBasicBlock::iterator MII = Inst; 5490 const DebugLoc &DL = Inst.getDebugLoc(); 5491 5492 MachineOperand &Dest = Inst.getOperand(0); 5493 MachineOperand &Src0 = Inst.getOperand(1); 5494 MachineOperand &Src1 = Inst.getOperand(2); 5495 5496 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5497 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5498 5499 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), Interm) 5500 .add(Src0) 5501 .add(Src1); 5502 5503 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest) 5504 .addReg(Interm); 5505 5506 Worklist.insert(&Op); 5507 Worklist.insert(&Not); 5508 5509 MRI.replaceRegWith(Dest.getReg(), NewDest); 5510 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5511 } 5512 5513 void SIInstrInfo::splitScalarBinOpN2(SetVectorType& Worklist, 5514 MachineInstr &Inst, 5515 unsigned Opcode) const { 5516 MachineBasicBlock &MBB = *Inst.getParent(); 5517 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5518 MachineBasicBlock::iterator MII = Inst; 5519 const DebugLoc &DL = Inst.getDebugLoc(); 5520 5521 MachineOperand &Dest = Inst.getOperand(0); 5522 MachineOperand &Src0 = Inst.getOperand(1); 5523 MachineOperand &Src1 = Inst.getOperand(2); 5524 5525 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5526 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5527 5528 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Interm) 5529 .add(Src1); 5530 5531 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), NewDest) 5532 .add(Src0) 5533 .addReg(Interm); 5534 5535 Worklist.insert(&Not); 5536 Worklist.insert(&Op); 5537 5538 MRI.replaceRegWith(Dest.getReg(), NewDest); 5539 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5540 } 5541 5542 void SIInstrInfo::splitScalar64BitUnaryOp( 5543 SetVectorType &Worklist, MachineInstr &Inst, 5544 unsigned Opcode) const { 5545 MachineBasicBlock &MBB = *Inst.getParent(); 5546 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5547 5548 MachineOperand &Dest = Inst.getOperand(0); 5549 MachineOperand &Src0 = Inst.getOperand(1); 5550 DebugLoc DL = Inst.getDebugLoc(); 5551 5552 MachineBasicBlock::iterator MII = Inst; 5553 5554 const MCInstrDesc &InstDesc = get(Opcode); 5555 const TargetRegisterClass *Src0RC = Src0.isReg() ? 5556 MRI.getRegClass(Src0.getReg()) : 5557 &AMDGPU::SGPR_32RegClass; 5558 5559 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 5560 5561 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5562 AMDGPU::sub0, Src0SubRC); 5563 5564 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 5565 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 5566 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 5567 5568 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 5569 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0); 5570 5571 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5572 AMDGPU::sub1, Src0SubRC); 5573 5574 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 5575 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1); 5576 5577 Register FullDestReg = MRI.createVirtualRegister(NewDestRC); 5578 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 5579 .addReg(DestSub0) 5580 .addImm(AMDGPU::sub0) 5581 .addReg(DestSub1) 5582 .addImm(AMDGPU::sub1); 5583 5584 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 5585 5586 Worklist.insert(&LoHalf); 5587 Worklist.insert(&HiHalf); 5588 5589 // We don't need to legalizeOperands here because for a single operand, src0 5590 // will support any kind of input. 5591 5592 // Move all users of this moved value. 5593 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 5594 } 5595 5596 void SIInstrInfo::splitScalar64BitAddSub(SetVectorType &Worklist, 5597 MachineInstr &Inst, 5598 MachineDominatorTree *MDT) const { 5599 bool IsAdd = (Inst.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); 5600 5601 MachineBasicBlock &MBB = *Inst.getParent(); 5602 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5603 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5604 5605 Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5606 Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5607 Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5608 5609 Register CarryReg = MRI.createVirtualRegister(CarryRC); 5610 Register DeadCarryReg = MRI.createVirtualRegister(CarryRC); 5611 5612 MachineOperand &Dest = Inst.getOperand(0); 5613 MachineOperand &Src0 = Inst.getOperand(1); 5614 MachineOperand &Src1 = Inst.getOperand(2); 5615 const DebugLoc &DL = Inst.getDebugLoc(); 5616 MachineBasicBlock::iterator MII = Inst; 5617 5618 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg()); 5619 const TargetRegisterClass *Src1RC = MRI.getRegClass(Src1.getReg()); 5620 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 5621 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 5622 5623 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5624 AMDGPU::sub0, Src0SubRC); 5625 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 5626 AMDGPU::sub0, Src1SubRC); 5627 5628 5629 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5630 AMDGPU::sub1, Src0SubRC); 5631 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 5632 AMDGPU::sub1, Src1SubRC); 5633 5634 unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_I32_e64 : AMDGPU::V_SUB_I32_e64; 5635 MachineInstr *LoHalf = 5636 BuildMI(MBB, MII, DL, get(LoOpc), DestSub0) 5637 .addReg(CarryReg, RegState::Define) 5638 .add(SrcReg0Sub0) 5639 .add(SrcReg1Sub0) 5640 .addImm(0); // clamp bit 5641 5642 unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; 5643 MachineInstr *HiHalf = 5644 BuildMI(MBB, MII, DL, get(HiOpc), DestSub1) 5645 .addReg(DeadCarryReg, RegState::Define | RegState::Dead) 5646 .add(SrcReg0Sub1) 5647 .add(SrcReg1Sub1) 5648 .addReg(CarryReg, RegState::Kill) 5649 .addImm(0); // clamp bit 5650 5651 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 5652 .addReg(DestSub0) 5653 .addImm(AMDGPU::sub0) 5654 .addReg(DestSub1) 5655 .addImm(AMDGPU::sub1); 5656 5657 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 5658 5659 // Try to legalize the operands in case we need to swap the order to keep it 5660 // valid. 5661 legalizeOperands(*LoHalf, MDT); 5662 legalizeOperands(*HiHalf, MDT); 5663 5664 // Move all users of this moved vlaue. 5665 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 5666 } 5667 5668 void SIInstrInfo::splitScalar64BitBinaryOp(SetVectorType &Worklist, 5669 MachineInstr &Inst, unsigned Opcode, 5670 MachineDominatorTree *MDT) const { 5671 MachineBasicBlock &MBB = *Inst.getParent(); 5672 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5673 5674 MachineOperand &Dest = Inst.getOperand(0); 5675 MachineOperand &Src0 = Inst.getOperand(1); 5676 MachineOperand &Src1 = Inst.getOperand(2); 5677 DebugLoc DL = Inst.getDebugLoc(); 5678 5679 MachineBasicBlock::iterator MII = Inst; 5680 5681 const MCInstrDesc &InstDesc = get(Opcode); 5682 const TargetRegisterClass *Src0RC = Src0.isReg() ? 5683 MRI.getRegClass(Src0.getReg()) : 5684 &AMDGPU::SGPR_32RegClass; 5685 5686 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 5687 const TargetRegisterClass *Src1RC = Src1.isReg() ? 5688 MRI.getRegClass(Src1.getReg()) : 5689 &AMDGPU::SGPR_32RegClass; 5690 5691 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 5692 5693 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5694 AMDGPU::sub0, Src0SubRC); 5695 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 5696 AMDGPU::sub0, Src1SubRC); 5697 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5698 AMDGPU::sub1, Src0SubRC); 5699 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 5700 AMDGPU::sub1, Src1SubRC); 5701 5702 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 5703 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 5704 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 5705 5706 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 5707 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0) 5708 .add(SrcReg0Sub0) 5709 .add(SrcReg1Sub0); 5710 5711 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 5712 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1) 5713 .add(SrcReg0Sub1) 5714 .add(SrcReg1Sub1); 5715 5716 Register FullDestReg = MRI.createVirtualRegister(NewDestRC); 5717 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 5718 .addReg(DestSub0) 5719 .addImm(AMDGPU::sub0) 5720 .addReg(DestSub1) 5721 .addImm(AMDGPU::sub1); 5722 5723 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 5724 5725 Worklist.insert(&LoHalf); 5726 Worklist.insert(&HiHalf); 5727 5728 // Move all users of this moved vlaue. 5729 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 5730 } 5731 5732 void SIInstrInfo::splitScalar64BitXnor(SetVectorType &Worklist, 5733 MachineInstr &Inst, 5734 MachineDominatorTree *MDT) const { 5735 MachineBasicBlock &MBB = *Inst.getParent(); 5736 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5737 5738 MachineOperand &Dest = Inst.getOperand(0); 5739 MachineOperand &Src0 = Inst.getOperand(1); 5740 MachineOperand &Src1 = Inst.getOperand(2); 5741 const DebugLoc &DL = Inst.getDebugLoc(); 5742 5743 MachineBasicBlock::iterator MII = Inst; 5744 5745 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 5746 5747 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 5748 5749 MachineOperand* Op0; 5750 MachineOperand* Op1; 5751 5752 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) { 5753 Op0 = &Src0; 5754 Op1 = &Src1; 5755 } else { 5756 Op0 = &Src1; 5757 Op1 = &Src0; 5758 } 5759 5760 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B64), Interm) 5761 .add(*Op0); 5762 5763 Register NewDest = MRI.createVirtualRegister(DestRC); 5764 5765 MachineInstr &Xor = *BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B64), NewDest) 5766 .addReg(Interm) 5767 .add(*Op1); 5768 5769 MRI.replaceRegWith(Dest.getReg(), NewDest); 5770 5771 Worklist.insert(&Xor); 5772 } 5773 5774 void SIInstrInfo::splitScalar64BitBCNT( 5775 SetVectorType &Worklist, MachineInstr &Inst) const { 5776 MachineBasicBlock &MBB = *Inst.getParent(); 5777 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5778 5779 MachineBasicBlock::iterator MII = Inst; 5780 const DebugLoc &DL = Inst.getDebugLoc(); 5781 5782 MachineOperand &Dest = Inst.getOperand(0); 5783 MachineOperand &Src = Inst.getOperand(1); 5784 5785 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64); 5786 const TargetRegisterClass *SrcRC = Src.isReg() ? 5787 MRI.getRegClass(Src.getReg()) : 5788 &AMDGPU::SGPR_32RegClass; 5789 5790 Register MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5791 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5792 5793 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); 5794 5795 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 5796 AMDGPU::sub0, SrcSubRC); 5797 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 5798 AMDGPU::sub1, SrcSubRC); 5799 5800 BuildMI(MBB, MII, DL, InstDesc, MidReg).add(SrcRegSub0).addImm(0); 5801 5802 BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg); 5803 5804 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5805 5806 // We don't need to legalize operands here. src0 for etiher instruction can be 5807 // an SGPR, and the second input is unused or determined here. 5808 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5809 } 5810 5811 void SIInstrInfo::splitScalar64BitBFE(SetVectorType &Worklist, 5812 MachineInstr &Inst) const { 5813 MachineBasicBlock &MBB = *Inst.getParent(); 5814 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5815 MachineBasicBlock::iterator MII = Inst; 5816 const DebugLoc &DL = Inst.getDebugLoc(); 5817 5818 MachineOperand &Dest = Inst.getOperand(0); 5819 uint32_t Imm = Inst.getOperand(2).getImm(); 5820 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 5821 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 5822 5823 (void) Offset; 5824 5825 // Only sext_inreg cases handled. 5826 assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 && 5827 Offset == 0 && "Not implemented"); 5828 5829 if (BitWidth < 32) { 5830 Register MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5831 Register MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5832 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5833 5834 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo) 5835 .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0) 5836 .addImm(0) 5837 .addImm(BitWidth); 5838 5839 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi) 5840 .addImm(31) 5841 .addReg(MidRegLo); 5842 5843 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 5844 .addReg(MidRegLo) 5845 .addImm(AMDGPU::sub0) 5846 .addReg(MidRegHi) 5847 .addImm(AMDGPU::sub1); 5848 5849 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5850 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5851 return; 5852 } 5853 5854 MachineOperand &Src = Inst.getOperand(1); 5855 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5856 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5857 5858 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg) 5859 .addImm(31) 5860 .addReg(Src.getReg(), 0, AMDGPU::sub0); 5861 5862 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 5863 .addReg(Src.getReg(), 0, AMDGPU::sub0) 5864 .addImm(AMDGPU::sub0) 5865 .addReg(TmpReg) 5866 .addImm(AMDGPU::sub1); 5867 5868 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5869 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5870 } 5871 5872 void SIInstrInfo::addUsersToMoveToVALUWorklist( 5873 Register DstReg, 5874 MachineRegisterInfo &MRI, 5875 SetVectorType &Worklist) const { 5876 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg), 5877 E = MRI.use_end(); I != E;) { 5878 MachineInstr &UseMI = *I->getParent(); 5879 5880 unsigned OpNo = 0; 5881 5882 switch (UseMI.getOpcode()) { 5883 case AMDGPU::COPY: 5884 case AMDGPU::WQM: 5885 case AMDGPU::SOFT_WQM: 5886 case AMDGPU::WWM: 5887 case AMDGPU::REG_SEQUENCE: 5888 case AMDGPU::PHI: 5889 case AMDGPU::INSERT_SUBREG: 5890 break; 5891 default: 5892 OpNo = I.getOperandNo(); 5893 break; 5894 } 5895 5896 if (!RI.hasVectorRegisters(getOpRegClass(UseMI, OpNo))) { 5897 Worklist.insert(&UseMI); 5898 5899 do { 5900 ++I; 5901 } while (I != E && I->getParent() == &UseMI); 5902 } else { 5903 ++I; 5904 } 5905 } 5906 } 5907 5908 void SIInstrInfo::movePackToVALU(SetVectorType &Worklist, 5909 MachineRegisterInfo &MRI, 5910 MachineInstr &Inst) const { 5911 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5912 MachineBasicBlock *MBB = Inst.getParent(); 5913 MachineOperand &Src0 = Inst.getOperand(1); 5914 MachineOperand &Src1 = Inst.getOperand(2); 5915 const DebugLoc &DL = Inst.getDebugLoc(); 5916 5917 switch (Inst.getOpcode()) { 5918 case AMDGPU::S_PACK_LL_B32_B16: { 5919 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5920 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5921 5922 // FIXME: Can do a lot better if we know the high bits of src0 or src1 are 5923 // 0. 5924 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 5925 .addImm(0xffff); 5926 5927 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_B32_e64), TmpReg) 5928 .addReg(ImmReg, RegState::Kill) 5929 .add(Src0); 5930 5931 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHL_OR_B32), ResultReg) 5932 .add(Src1) 5933 .addImm(16) 5934 .addReg(TmpReg, RegState::Kill); 5935 break; 5936 } 5937 case AMDGPU::S_PACK_LH_B32_B16: { 5938 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5939 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 5940 .addImm(0xffff); 5941 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_BFI_B32), ResultReg) 5942 .addReg(ImmReg, RegState::Kill) 5943 .add(Src0) 5944 .add(Src1); 5945 break; 5946 } 5947 case AMDGPU::S_PACK_HH_B32_B16: { 5948 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5949 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5950 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg) 5951 .addImm(16) 5952 .add(Src0); 5953 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 5954 .addImm(0xffff0000); 5955 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_OR_B32), ResultReg) 5956 .add(Src1) 5957 .addReg(ImmReg, RegState::Kill) 5958 .addReg(TmpReg, RegState::Kill); 5959 break; 5960 } 5961 default: 5962 llvm_unreachable("unhandled s_pack_* instruction"); 5963 } 5964 5965 MachineOperand &Dest = Inst.getOperand(0); 5966 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5967 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5968 } 5969 5970 void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op, 5971 MachineInstr &SCCDefInst, 5972 SetVectorType &Worklist) const { 5973 // Ensure that def inst defines SCC, which is still live. 5974 assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isDef() && 5975 !Op.isDead() && Op.getParent() == &SCCDefInst); 5976 SmallVector<MachineInstr *, 4> CopyToDelete; 5977 // This assumes that all the users of SCC are in the same block 5978 // as the SCC def. 5979 for (MachineInstr &MI : // Skip the def inst itself. 5980 make_range(std::next(MachineBasicBlock::iterator(SCCDefInst)), 5981 SCCDefInst.getParent()->end())) { 5982 // Check if SCC is used first. 5983 if (MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI) != -1) { 5984 if (MI.isCopy()) { 5985 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 5986 unsigned DestReg = MI.getOperand(0).getReg(); 5987 SmallVector<MachineInstr *, 4> Users; 5988 for (auto &User : MRI.use_nodbg_instructions(DestReg)) { 5989 if ((User.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) || 5990 (User.getOpcode() == AMDGPU::S_SUB_CO_PSEUDO)) { 5991 Users.push_back(&User); 5992 Worklist.insert(&User); 5993 } 5994 } 5995 for (auto &U : Users) 5996 U->getOperand(4).setReg(RI.getVCC()); 5997 CopyToDelete.push_back(&MI); 5998 } else 5999 Worklist.insert(&MI); 6000 } 6001 // Exit if we find another SCC def. 6002 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != -1) 6003 break; 6004 } 6005 for (auto &Copy : CopyToDelete) 6006 Copy->eraseFromParent(); 6007 } 6008 6009 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass( 6010 const MachineInstr &Inst) const { 6011 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0); 6012 6013 switch (Inst.getOpcode()) { 6014 // For target instructions, getOpRegClass just returns the virtual register 6015 // class associated with the operand, so we need to find an equivalent VGPR 6016 // register class in order to move the instruction to the VALU. 6017 case AMDGPU::COPY: 6018 case AMDGPU::PHI: 6019 case AMDGPU::REG_SEQUENCE: 6020 case AMDGPU::INSERT_SUBREG: 6021 case AMDGPU::WQM: 6022 case AMDGPU::SOFT_WQM: 6023 case AMDGPU::WWM: { 6024 const TargetRegisterClass *SrcRC = getOpRegClass(Inst, 1); 6025 if (RI.hasAGPRs(SrcRC)) { 6026 if (RI.hasAGPRs(NewDstRC)) 6027 return nullptr; 6028 6029 switch (Inst.getOpcode()) { 6030 case AMDGPU::PHI: 6031 case AMDGPU::REG_SEQUENCE: 6032 case AMDGPU::INSERT_SUBREG: 6033 NewDstRC = RI.getEquivalentAGPRClass(NewDstRC); 6034 break; 6035 default: 6036 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 6037 } 6038 6039 if (!NewDstRC) 6040 return nullptr; 6041 } else { 6042 if (RI.hasVGPRs(NewDstRC) || NewDstRC == &AMDGPU::VReg_1RegClass) 6043 return nullptr; 6044 6045 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 6046 if (!NewDstRC) 6047 return nullptr; 6048 } 6049 6050 return NewDstRC; 6051 } 6052 default: 6053 return NewDstRC; 6054 } 6055 } 6056 6057 // Find the one SGPR operand we are allowed to use. 6058 Register SIInstrInfo::findUsedSGPR(const MachineInstr &MI, 6059 int OpIndices[3]) const { 6060 const MCInstrDesc &Desc = MI.getDesc(); 6061 6062 // Find the one SGPR operand we are allowed to use. 6063 // 6064 // First we need to consider the instruction's operand requirements before 6065 // legalizing. Some operands are required to be SGPRs, such as implicit uses 6066 // of VCC, but we are still bound by the constant bus requirement to only use 6067 // one. 6068 // 6069 // If the operand's class is an SGPR, we can never move it. 6070 6071 Register SGPRReg = findImplicitSGPRRead(MI); 6072 if (SGPRReg != AMDGPU::NoRegister) 6073 return SGPRReg; 6074 6075 Register UsedSGPRs[3] = { AMDGPU::NoRegister }; 6076 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 6077 6078 for (unsigned i = 0; i < 3; ++i) { 6079 int Idx = OpIndices[i]; 6080 if (Idx == -1) 6081 break; 6082 6083 const MachineOperand &MO = MI.getOperand(Idx); 6084 if (!MO.isReg()) 6085 continue; 6086 6087 // Is this operand statically required to be an SGPR based on the operand 6088 // constraints? 6089 const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); 6090 bool IsRequiredSGPR = RI.isSGPRClass(OpRC); 6091 if (IsRequiredSGPR) 6092 return MO.getReg(); 6093 6094 // If this could be a VGPR or an SGPR, Check the dynamic register class. 6095 Register Reg = MO.getReg(); 6096 const TargetRegisterClass *RegRC = MRI.getRegClass(Reg); 6097 if (RI.isSGPRClass(RegRC)) 6098 UsedSGPRs[i] = Reg; 6099 } 6100 6101 // We don't have a required SGPR operand, so we have a bit more freedom in 6102 // selecting operands to move. 6103 6104 // Try to select the most used SGPR. If an SGPR is equal to one of the 6105 // others, we choose that. 6106 // 6107 // e.g. 6108 // V_FMA_F32 v0, s0, s0, s0 -> No moves 6109 // V_FMA_F32 v0, s0, s1, s0 -> Move s1 6110 6111 // TODO: If some of the operands are 64-bit SGPRs and some 32, we should 6112 // prefer those. 6113 6114 if (UsedSGPRs[0] != AMDGPU::NoRegister) { 6115 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2]) 6116 SGPRReg = UsedSGPRs[0]; 6117 } 6118 6119 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) { 6120 if (UsedSGPRs[1] == UsedSGPRs[2]) 6121 SGPRReg = UsedSGPRs[1]; 6122 } 6123 6124 return SGPRReg; 6125 } 6126 6127 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI, 6128 unsigned OperandName) const { 6129 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName); 6130 if (Idx == -1) 6131 return nullptr; 6132 6133 return &MI.getOperand(Idx); 6134 } 6135 6136 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const { 6137 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 6138 return (22ULL << 44) | // IMG_FORMAT_32_FLOAT 6139 (1ULL << 56) | // RESOURCE_LEVEL = 1 6140 (3ULL << 60); // OOB_SELECT = 3 6141 } 6142 6143 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT; 6144 if (ST.isAmdHsaOS()) { 6145 // Set ATC = 1. GFX9 doesn't have this bit. 6146 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) 6147 RsrcDataFormat |= (1ULL << 56); 6148 6149 // Set MTYPE = 2 (MTYPE_UC = uncached). GFX9 doesn't have this. 6150 // BTW, it disables TC L2 and therefore decreases performance. 6151 if (ST.getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS) 6152 RsrcDataFormat |= (2ULL << 59); 6153 } 6154 6155 return RsrcDataFormat; 6156 } 6157 6158 uint64_t SIInstrInfo::getScratchRsrcWords23() const { 6159 uint64_t Rsrc23 = getDefaultRsrcDataFormat() | 6160 AMDGPU::RSRC_TID_ENABLE | 6161 0xffffffff; // Size; 6162 6163 // GFX9 doesn't have ELEMENT_SIZE. 6164 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 6165 uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize()) - 1; 6166 Rsrc23 |= EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT; 6167 } 6168 6169 // IndexStride = 64 / 32. 6170 uint64_t IndexStride = ST.getWavefrontSize() == 64 ? 3 : 2; 6171 Rsrc23 |= IndexStride << AMDGPU::RSRC_INDEX_STRIDE_SHIFT; 6172 6173 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17]. 6174 // Clear them unless we want a huge stride. 6175 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS && 6176 ST.getGeneration() <= AMDGPUSubtarget::GFX9) 6177 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT; 6178 6179 return Rsrc23; 6180 } 6181 6182 bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const { 6183 unsigned Opc = MI.getOpcode(); 6184 6185 return isSMRD(Opc); 6186 } 6187 6188 bool SIInstrInfo::isHighLatencyDef(int Opc) const { 6189 return get(Opc).mayLoad() && 6190 (isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc) || isFLAT(Opc)); 6191 } 6192 6193 unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI, 6194 int &FrameIndex) const { 6195 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 6196 if (!Addr || !Addr->isFI()) 6197 return AMDGPU::NoRegister; 6198 6199 assert(!MI.memoperands_empty() && 6200 (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS); 6201 6202 FrameIndex = Addr->getIndex(); 6203 return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg(); 6204 } 6205 6206 unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI, 6207 int &FrameIndex) const { 6208 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr); 6209 assert(Addr && Addr->isFI()); 6210 FrameIndex = Addr->getIndex(); 6211 return getNamedOperand(MI, AMDGPU::OpName::data)->getReg(); 6212 } 6213 6214 unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 6215 int &FrameIndex) const { 6216 if (!MI.mayLoad()) 6217 return AMDGPU::NoRegister; 6218 6219 if (isMUBUF(MI) || isVGPRSpill(MI)) 6220 return isStackAccess(MI, FrameIndex); 6221 6222 if (isSGPRSpill(MI)) 6223 return isSGPRStackAccess(MI, FrameIndex); 6224 6225 return AMDGPU::NoRegister; 6226 } 6227 6228 unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 6229 int &FrameIndex) const { 6230 if (!MI.mayStore()) 6231 return AMDGPU::NoRegister; 6232 6233 if (isMUBUF(MI) || isVGPRSpill(MI)) 6234 return isStackAccess(MI, FrameIndex); 6235 6236 if (isSGPRSpill(MI)) 6237 return isSGPRStackAccess(MI, FrameIndex); 6238 6239 return AMDGPU::NoRegister; 6240 } 6241 6242 unsigned SIInstrInfo::getInstBundleSize(const MachineInstr &MI) const { 6243 unsigned Size = 0; 6244 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 6245 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 6246 while (++I != E && I->isInsideBundle()) { 6247 assert(!I->isBundle() && "No nested bundle!"); 6248 Size += getInstSizeInBytes(*I); 6249 } 6250 6251 return Size; 6252 } 6253 6254 unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { 6255 unsigned Opc = MI.getOpcode(); 6256 const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc); 6257 unsigned DescSize = Desc.getSize(); 6258 6259 // If we have a definitive size, we can use it. Otherwise we need to inspect 6260 // the operands to know the size. 6261 if (isFixedSize(MI)) 6262 return DescSize; 6263 6264 // 4-byte instructions may have a 32-bit literal encoded after them. Check 6265 // operands that coud ever be literals. 6266 if (isVALU(MI) || isSALU(MI)) { 6267 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 6268 if (Src0Idx == -1) 6269 return DescSize; // No operands. 6270 6271 if (isLiteralConstantLike(MI.getOperand(Src0Idx), Desc.OpInfo[Src0Idx])) 6272 return isVOP3(MI) ? 12 : (DescSize + 4); 6273 6274 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 6275 if (Src1Idx == -1) 6276 return DescSize; 6277 6278 if (isLiteralConstantLike(MI.getOperand(Src1Idx), Desc.OpInfo[Src1Idx])) 6279 return isVOP3(MI) ? 12 : (DescSize + 4); 6280 6281 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 6282 if (Src2Idx == -1) 6283 return DescSize; 6284 6285 if (isLiteralConstantLike(MI.getOperand(Src2Idx), Desc.OpInfo[Src2Idx])) 6286 return isVOP3(MI) ? 12 : (DescSize + 4); 6287 6288 return DescSize; 6289 } 6290 6291 // Check whether we have extra NSA words. 6292 if (isMIMG(MI)) { 6293 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); 6294 if (VAddr0Idx < 0) 6295 return 8; 6296 6297 int RSrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); 6298 return 8 + 4 * ((RSrcIdx - VAddr0Idx + 2) / 4); 6299 } 6300 6301 switch (Opc) { 6302 case TargetOpcode::IMPLICIT_DEF: 6303 case TargetOpcode::KILL: 6304 case TargetOpcode::DBG_VALUE: 6305 case TargetOpcode::EH_LABEL: 6306 return 0; 6307 case TargetOpcode::BUNDLE: 6308 return getInstBundleSize(MI); 6309 case TargetOpcode::INLINEASM: 6310 case TargetOpcode::INLINEASM_BR: { 6311 const MachineFunction *MF = MI.getParent()->getParent(); 6312 const char *AsmStr = MI.getOperand(0).getSymbolName(); 6313 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo(), 6314 &MF->getSubtarget()); 6315 } 6316 default: 6317 return DescSize; 6318 } 6319 } 6320 6321 bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const { 6322 if (!isFLAT(MI)) 6323 return false; 6324 6325 if (MI.memoperands_empty()) 6326 return true; 6327 6328 for (const MachineMemOperand *MMO : MI.memoperands()) { 6329 if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS) 6330 return true; 6331 } 6332 return false; 6333 } 6334 6335 bool SIInstrInfo::isNonUniformBranchInstr(MachineInstr &Branch) const { 6336 return Branch.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO; 6337 } 6338 6339 void SIInstrInfo::convertNonUniformIfRegion(MachineBasicBlock *IfEntry, 6340 MachineBasicBlock *IfEnd) const { 6341 MachineBasicBlock::iterator TI = IfEntry->getFirstTerminator(); 6342 assert(TI != IfEntry->end()); 6343 6344 MachineInstr *Branch = &(*TI); 6345 MachineFunction *MF = IfEntry->getParent(); 6346 MachineRegisterInfo &MRI = IfEntry->getParent()->getRegInfo(); 6347 6348 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 6349 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 6350 MachineInstr *SIIF = 6351 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_IF), DstReg) 6352 .add(Branch->getOperand(0)) 6353 .add(Branch->getOperand(1)); 6354 MachineInstr *SIEND = 6355 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_END_CF)) 6356 .addReg(DstReg); 6357 6358 IfEntry->erase(TI); 6359 IfEntry->insert(IfEntry->end(), SIIF); 6360 IfEnd->insert(IfEnd->getFirstNonPHI(), SIEND); 6361 } 6362 } 6363 6364 void SIInstrInfo::convertNonUniformLoopRegion( 6365 MachineBasicBlock *LoopEntry, MachineBasicBlock *LoopEnd) const { 6366 MachineBasicBlock::iterator TI = LoopEnd->getFirstTerminator(); 6367 // We expect 2 terminators, one conditional and one unconditional. 6368 assert(TI != LoopEnd->end()); 6369 6370 MachineInstr *Branch = &(*TI); 6371 MachineFunction *MF = LoopEnd->getParent(); 6372 MachineRegisterInfo &MRI = LoopEnd->getParent()->getRegInfo(); 6373 6374 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 6375 6376 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 6377 Register BackEdgeReg = MRI.createVirtualRegister(RI.getBoolRC()); 6378 MachineInstrBuilder HeaderPHIBuilder = 6379 BuildMI(*(MF), Branch->getDebugLoc(), get(TargetOpcode::PHI), DstReg); 6380 for (MachineBasicBlock::pred_iterator PI = LoopEntry->pred_begin(), 6381 E = LoopEntry->pred_end(); 6382 PI != E; ++PI) { 6383 if (*PI == LoopEnd) { 6384 HeaderPHIBuilder.addReg(BackEdgeReg); 6385 } else { 6386 MachineBasicBlock *PMBB = *PI; 6387 Register ZeroReg = MRI.createVirtualRegister(RI.getBoolRC()); 6388 materializeImmediate(*PMBB, PMBB->getFirstTerminator(), DebugLoc(), 6389 ZeroReg, 0); 6390 HeaderPHIBuilder.addReg(ZeroReg); 6391 } 6392 HeaderPHIBuilder.addMBB(*PI); 6393 } 6394 MachineInstr *HeaderPhi = HeaderPHIBuilder; 6395 MachineInstr *SIIFBREAK = BuildMI(*(MF), Branch->getDebugLoc(), 6396 get(AMDGPU::SI_IF_BREAK), BackEdgeReg) 6397 .addReg(DstReg) 6398 .add(Branch->getOperand(0)); 6399 MachineInstr *SILOOP = 6400 BuildMI(*(MF), Branch->getDebugLoc(), get(AMDGPU::SI_LOOP)) 6401 .addReg(BackEdgeReg) 6402 .addMBB(LoopEntry); 6403 6404 LoopEntry->insert(LoopEntry->begin(), HeaderPhi); 6405 LoopEnd->erase(TI); 6406 LoopEnd->insert(LoopEnd->end(), SIIFBREAK); 6407 LoopEnd->insert(LoopEnd->end(), SILOOP); 6408 } 6409 } 6410 6411 ArrayRef<std::pair<int, const char *>> 6412 SIInstrInfo::getSerializableTargetIndices() const { 6413 static const std::pair<int, const char *> TargetIndices[] = { 6414 {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"}, 6415 {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"}, 6416 {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"}, 6417 {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"}, 6418 {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}}; 6419 return makeArrayRef(TargetIndices); 6420 } 6421 6422 /// This is used by the post-RA scheduler (SchedulePostRAList.cpp). The 6423 /// post-RA version of misched uses CreateTargetMIHazardRecognizer. 6424 ScheduleHazardRecognizer * 6425 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 6426 const ScheduleDAG *DAG) const { 6427 return new GCNHazardRecognizer(DAG->MF); 6428 } 6429 6430 /// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer 6431 /// pass. 6432 ScheduleHazardRecognizer * 6433 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const { 6434 return new GCNHazardRecognizer(MF); 6435 } 6436 6437 std::pair<unsigned, unsigned> 6438 SIInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { 6439 return std::make_pair(TF & MO_MASK, TF & ~MO_MASK); 6440 } 6441 6442 ArrayRef<std::pair<unsigned, const char *>> 6443 SIInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { 6444 static const std::pair<unsigned, const char *> TargetFlags[] = { 6445 { MO_GOTPCREL, "amdgpu-gotprel" }, 6446 { MO_GOTPCREL32_LO, "amdgpu-gotprel32-lo" }, 6447 { MO_GOTPCREL32_HI, "amdgpu-gotprel32-hi" }, 6448 { MO_REL32_LO, "amdgpu-rel32-lo" }, 6449 { MO_REL32_HI, "amdgpu-rel32-hi" }, 6450 { MO_ABS32_LO, "amdgpu-abs32-lo" }, 6451 { MO_ABS32_HI, "amdgpu-abs32-hi" }, 6452 }; 6453 6454 return makeArrayRef(TargetFlags); 6455 } 6456 6457 bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const { 6458 return !MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY && 6459 MI.modifiesRegister(AMDGPU::EXEC, &RI); 6460 } 6461 6462 MachineInstrBuilder 6463 SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 6464 MachineBasicBlock::iterator I, 6465 const DebugLoc &DL, 6466 Register DestReg) const { 6467 if (ST.hasAddNoCarry()) 6468 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg); 6469 6470 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6471 Register UnusedCarry = MRI.createVirtualRegister(RI.getBoolRC()); 6472 MRI.setRegAllocationHint(UnusedCarry, 0, RI.getVCC()); 6473 6474 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_I32_e64), DestReg) 6475 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 6476 } 6477 6478 MachineInstrBuilder SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 6479 MachineBasicBlock::iterator I, 6480 const DebugLoc &DL, 6481 Register DestReg, 6482 RegScavenger &RS) const { 6483 if (ST.hasAddNoCarry()) 6484 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e32), DestReg); 6485 6486 // If available, prefer to use vcc. 6487 Register UnusedCarry = !RS.isRegUsed(AMDGPU::VCC) 6488 ? Register(RI.getVCC()) 6489 : RS.scavengeRegister(RI.getBoolRC(), I, 0, false); 6490 6491 // TODO: Users need to deal with this. 6492 if (!UnusedCarry.isValid()) 6493 return MachineInstrBuilder(); 6494 6495 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_I32_e64), DestReg) 6496 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 6497 } 6498 6499 bool SIInstrInfo::isKillTerminator(unsigned Opcode) { 6500 switch (Opcode) { 6501 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 6502 case AMDGPU::SI_KILL_I1_TERMINATOR: 6503 return true; 6504 default: 6505 return false; 6506 } 6507 } 6508 6509 const MCInstrDesc &SIInstrInfo::getKillTerminatorFromPseudo(unsigned Opcode) const { 6510 switch (Opcode) { 6511 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: 6512 return get(AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR); 6513 case AMDGPU::SI_KILL_I1_PSEUDO: 6514 return get(AMDGPU::SI_KILL_I1_TERMINATOR); 6515 default: 6516 llvm_unreachable("invalid opcode, expected SI_KILL_*_PSEUDO"); 6517 } 6518 } 6519 6520 void SIInstrInfo::fixImplicitOperands(MachineInstr &MI) const { 6521 MachineBasicBlock *MBB = MI.getParent(); 6522 MachineFunction *MF = MBB->getParent(); 6523 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 6524 6525 if (!ST.isWave32()) 6526 return; 6527 6528 for (auto &Op : MI.implicit_operands()) { 6529 if (Op.isReg() && Op.getReg() == AMDGPU::VCC) 6530 Op.setReg(AMDGPU::VCC_LO); 6531 } 6532 } 6533 6534 bool SIInstrInfo::isBufferSMRD(const MachineInstr &MI) const { 6535 if (!isSMRD(MI)) 6536 return false; 6537 6538 // Check that it is using a buffer resource. 6539 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sbase); 6540 if (Idx == -1) // e.g. s_memtime 6541 return false; 6542 6543 const auto RCID = MI.getDesc().OpInfo[Idx].RegClass; 6544 return RI.getRegClass(RCID)->hasSubClassEq(&AMDGPU::SGPR_128RegClass); 6545 } 6546 6547 unsigned SIInstrInfo::getNumFlatOffsetBits(unsigned AddrSpace, 6548 bool Signed) const { 6549 if (!ST.hasFlatInstOffsets()) 6550 return 0; 6551 6552 if (ST.hasFlatSegmentOffsetBug() && AddrSpace == AMDGPUAS::FLAT_ADDRESS) 6553 return 0; 6554 6555 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) 6556 return Signed ? 12 : 11; 6557 6558 return Signed ? 13 : 12; 6559 } 6560 6561 bool SIInstrInfo::isLegalFLATOffset(int64_t Offset, unsigned AddrSpace, 6562 bool Signed) const { 6563 // TODO: Should 0 be special cased? 6564 if (!ST.hasFlatInstOffsets()) 6565 return false; 6566 6567 if (ST.hasFlatSegmentOffsetBug() && AddrSpace == AMDGPUAS::FLAT_ADDRESS) 6568 return false; 6569 6570 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 6571 return (Signed && isInt<12>(Offset)) || 6572 (!Signed && isUInt<11>(Offset)); 6573 } 6574 6575 return (Signed && isInt<13>(Offset)) || 6576 (!Signed && isUInt<12>(Offset)); 6577 } 6578 6579 6580 // This must be kept in sync with the SIEncodingFamily class in SIInstrInfo.td 6581 enum SIEncodingFamily { 6582 SI = 0, 6583 VI = 1, 6584 SDWA = 2, 6585 SDWA9 = 3, 6586 GFX80 = 4, 6587 GFX9 = 5, 6588 GFX10 = 6, 6589 SDWA10 = 7 6590 }; 6591 6592 static SIEncodingFamily subtargetEncodingFamily(const GCNSubtarget &ST) { 6593 switch (ST.getGeneration()) { 6594 default: 6595 break; 6596 case AMDGPUSubtarget::SOUTHERN_ISLANDS: 6597 case AMDGPUSubtarget::SEA_ISLANDS: 6598 return SIEncodingFamily::SI; 6599 case AMDGPUSubtarget::VOLCANIC_ISLANDS: 6600 case AMDGPUSubtarget::GFX9: 6601 return SIEncodingFamily::VI; 6602 case AMDGPUSubtarget::GFX10: 6603 return SIEncodingFamily::GFX10; 6604 } 6605 llvm_unreachable("Unknown subtarget generation!"); 6606 } 6607 6608 bool SIInstrInfo::isAsmOnlyOpcode(int MCOp) const { 6609 switch(MCOp) { 6610 // These opcodes use indirect register addressing so 6611 // they need special handling by codegen (currently missing). 6612 // Therefore it is too risky to allow these opcodes 6613 // to be selected by dpp combiner or sdwa peepholer. 6614 case AMDGPU::V_MOVRELS_B32_dpp_gfx10: 6615 case AMDGPU::V_MOVRELS_B32_sdwa_gfx10: 6616 case AMDGPU::V_MOVRELD_B32_dpp_gfx10: 6617 case AMDGPU::V_MOVRELD_B32_sdwa_gfx10: 6618 case AMDGPU::V_MOVRELSD_B32_dpp_gfx10: 6619 case AMDGPU::V_MOVRELSD_B32_sdwa_gfx10: 6620 case AMDGPU::V_MOVRELSD_2_B32_dpp_gfx10: 6621 case AMDGPU::V_MOVRELSD_2_B32_sdwa_gfx10: 6622 return true; 6623 default: 6624 return false; 6625 } 6626 } 6627 6628 int SIInstrInfo::pseudoToMCOpcode(int Opcode) const { 6629 SIEncodingFamily Gen = subtargetEncodingFamily(ST); 6630 6631 if ((get(Opcode).TSFlags & SIInstrFlags::renamedInGFX9) != 0 && 6632 ST.getGeneration() == AMDGPUSubtarget::GFX9) 6633 Gen = SIEncodingFamily::GFX9; 6634 6635 // Adjust the encoding family to GFX80 for D16 buffer instructions when the 6636 // subtarget has UnpackedD16VMem feature. 6637 // TODO: remove this when we discard GFX80 encoding. 6638 if (ST.hasUnpackedD16VMem() && (get(Opcode).TSFlags & SIInstrFlags::D16Buf)) 6639 Gen = SIEncodingFamily::GFX80; 6640 6641 if (get(Opcode).TSFlags & SIInstrFlags::SDWA) { 6642 switch (ST.getGeneration()) { 6643 default: 6644 Gen = SIEncodingFamily::SDWA; 6645 break; 6646 case AMDGPUSubtarget::GFX9: 6647 Gen = SIEncodingFamily::SDWA9; 6648 break; 6649 case AMDGPUSubtarget::GFX10: 6650 Gen = SIEncodingFamily::SDWA10; 6651 break; 6652 } 6653 } 6654 6655 int MCOp = AMDGPU::getMCOpcode(Opcode, Gen); 6656 6657 // -1 means that Opcode is already a native instruction. 6658 if (MCOp == -1) 6659 return Opcode; 6660 6661 // (uint16_t)-1 means that Opcode is a pseudo instruction that has 6662 // no encoding in the given subtarget generation. 6663 if (MCOp == (uint16_t)-1) 6664 return -1; 6665 6666 if (isAsmOnlyOpcode(MCOp)) 6667 return -1; 6668 6669 return MCOp; 6670 } 6671 6672 static 6673 TargetInstrInfo::RegSubRegPair getRegOrUndef(const MachineOperand &RegOpnd) { 6674 assert(RegOpnd.isReg()); 6675 return RegOpnd.isUndef() ? TargetInstrInfo::RegSubRegPair() : 6676 getRegSubRegPair(RegOpnd); 6677 } 6678 6679 TargetInstrInfo::RegSubRegPair 6680 llvm::getRegSequenceSubReg(MachineInstr &MI, unsigned SubReg) { 6681 assert(MI.isRegSequence()); 6682 for (unsigned I = 0, E = (MI.getNumOperands() - 1)/ 2; I < E; ++I) 6683 if (MI.getOperand(1 + 2 * I + 1).getImm() == SubReg) { 6684 auto &RegOp = MI.getOperand(1 + 2 * I); 6685 return getRegOrUndef(RegOp); 6686 } 6687 return TargetInstrInfo::RegSubRegPair(); 6688 } 6689 6690 // Try to find the definition of reg:subreg in subreg-manipulation pseudos 6691 // Following a subreg of reg:subreg isn't supported 6692 static bool followSubRegDef(MachineInstr &MI, 6693 TargetInstrInfo::RegSubRegPair &RSR) { 6694 if (!RSR.SubReg) 6695 return false; 6696 switch (MI.getOpcode()) { 6697 default: break; 6698 case AMDGPU::REG_SEQUENCE: 6699 RSR = getRegSequenceSubReg(MI, RSR.SubReg); 6700 return true; 6701 // EXTRACT_SUBREG ins't supported as this would follow a subreg of subreg 6702 case AMDGPU::INSERT_SUBREG: 6703 if (RSR.SubReg == (unsigned)MI.getOperand(3).getImm()) 6704 // inserted the subreg we're looking for 6705 RSR = getRegOrUndef(MI.getOperand(2)); 6706 else { // the subreg in the rest of the reg 6707 auto R1 = getRegOrUndef(MI.getOperand(1)); 6708 if (R1.SubReg) // subreg of subreg isn't supported 6709 return false; 6710 RSR.Reg = R1.Reg; 6711 } 6712 return true; 6713 } 6714 return false; 6715 } 6716 6717 MachineInstr *llvm::getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P, 6718 MachineRegisterInfo &MRI) { 6719 assert(MRI.isSSA()); 6720 if (!Register::isVirtualRegister(P.Reg)) 6721 return nullptr; 6722 6723 auto RSR = P; 6724 auto *DefInst = MRI.getVRegDef(RSR.Reg); 6725 while (auto *MI = DefInst) { 6726 DefInst = nullptr; 6727 switch (MI->getOpcode()) { 6728 case AMDGPU::COPY: 6729 case AMDGPU::V_MOV_B32_e32: { 6730 auto &Op1 = MI->getOperand(1); 6731 if (Op1.isReg() && Register::isVirtualRegister(Op1.getReg())) { 6732 if (Op1.isUndef()) 6733 return nullptr; 6734 RSR = getRegSubRegPair(Op1); 6735 DefInst = MRI.getVRegDef(RSR.Reg); 6736 } 6737 break; 6738 } 6739 default: 6740 if (followSubRegDef(*MI, RSR)) { 6741 if (!RSR.Reg) 6742 return nullptr; 6743 DefInst = MRI.getVRegDef(RSR.Reg); 6744 } 6745 } 6746 if (!DefInst) 6747 return MI; 6748 } 6749 return nullptr; 6750 } 6751 6752 bool llvm::execMayBeModifiedBeforeUse(const MachineRegisterInfo &MRI, 6753 Register VReg, 6754 const MachineInstr &DefMI, 6755 const MachineInstr &UseMI) { 6756 assert(MRI.isSSA() && "Must be run on SSA"); 6757 6758 auto *TRI = MRI.getTargetRegisterInfo(); 6759 auto *DefBB = DefMI.getParent(); 6760 6761 // Don't bother searching between blocks, although it is possible this block 6762 // doesn't modify exec. 6763 if (UseMI.getParent() != DefBB) 6764 return true; 6765 6766 const int MaxInstScan = 20; 6767 int NumInst = 0; 6768 6769 // Stop scan at the use. 6770 auto E = UseMI.getIterator(); 6771 for (auto I = std::next(DefMI.getIterator()); I != E; ++I) { 6772 if (I->isDebugInstr()) 6773 continue; 6774 6775 if (++NumInst > MaxInstScan) 6776 return true; 6777 6778 if (I->modifiesRegister(AMDGPU::EXEC, TRI)) 6779 return true; 6780 } 6781 6782 return false; 6783 } 6784 6785 bool llvm::execMayBeModifiedBeforeAnyUse(const MachineRegisterInfo &MRI, 6786 Register VReg, 6787 const MachineInstr &DefMI) { 6788 assert(MRI.isSSA() && "Must be run on SSA"); 6789 6790 auto *TRI = MRI.getTargetRegisterInfo(); 6791 auto *DefBB = DefMI.getParent(); 6792 6793 const int MaxUseInstScan = 10; 6794 int NumUseInst = 0; 6795 6796 for (auto &UseInst : MRI.use_nodbg_instructions(VReg)) { 6797 // Don't bother searching between blocks, although it is possible this block 6798 // doesn't modify exec. 6799 if (UseInst.getParent() != DefBB) 6800 return true; 6801 6802 if (++NumUseInst > MaxUseInstScan) 6803 return true; 6804 } 6805 6806 const int MaxInstScan = 20; 6807 int NumInst = 0; 6808 6809 // Stop scan when we have seen all the uses. 6810 for (auto I = std::next(DefMI.getIterator()); ; ++I) { 6811 if (I->isDebugInstr()) 6812 continue; 6813 6814 if (++NumInst > MaxInstScan) 6815 return true; 6816 6817 if (I->readsRegister(VReg)) 6818 if (--NumUseInst == 0) 6819 return false; 6820 6821 if (I->modifiesRegister(AMDGPU::EXEC, TRI)) 6822 return true; 6823 } 6824 } 6825 6826 MachineInstr *SIInstrInfo::createPHIDestinationCopy( 6827 MachineBasicBlock &MBB, MachineBasicBlock::iterator LastPHIIt, 6828 const DebugLoc &DL, Register Src, Register Dst) const { 6829 auto Cur = MBB.begin(); 6830 if (Cur != MBB.end()) 6831 do { 6832 if (!Cur->isPHI() && Cur->readsRegister(Dst)) 6833 return BuildMI(MBB, Cur, DL, get(TargetOpcode::COPY), Dst).addReg(Src); 6834 ++Cur; 6835 } while (Cur != MBB.end() && Cur != LastPHIIt); 6836 6837 return TargetInstrInfo::createPHIDestinationCopy(MBB, LastPHIIt, DL, Src, 6838 Dst); 6839 } 6840 6841 MachineInstr *SIInstrInfo::createPHISourceCopy( 6842 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, 6843 const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const { 6844 if (InsPt != MBB.end() && 6845 (InsPt->getOpcode() == AMDGPU::SI_IF || 6846 InsPt->getOpcode() == AMDGPU::SI_ELSE || 6847 InsPt->getOpcode() == AMDGPU::SI_IF_BREAK) && 6848 InsPt->definesRegister(Src)) { 6849 InsPt++; 6850 return BuildMI(MBB, InsPt, DL, 6851 get(ST.isWave32() ? AMDGPU::S_MOV_B32_term 6852 : AMDGPU::S_MOV_B64_term), 6853 Dst) 6854 .addReg(Src, 0, SrcSubReg) 6855 .addReg(AMDGPU::EXEC, RegState::Implicit); 6856 } 6857 return TargetInstrInfo::createPHISourceCopy(MBB, InsPt, DL, Src, SrcSubReg, 6858 Dst); 6859 } 6860 6861 bool llvm::SIInstrInfo::isWave32() const { return ST.isWave32(); } 6862 6863 MachineInstr *SIInstrInfo::foldMemoryOperandImpl( 6864 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, 6865 MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS, 6866 VirtRegMap *VRM) const { 6867 // This is a bit of a hack (copied from AArch64). Consider this instruction: 6868 // 6869 // %0:sreg_32 = COPY $m0 6870 // 6871 // We explicitly chose SReg_32 for the virtual register so such a copy might 6872 // be eliminated by RegisterCoalescer. However, that may not be possible, and 6873 // %0 may even spill. We can't spill $m0 normally (it would require copying to 6874 // a numbered SGPR anyway), and since it is in the SReg_32 register class, 6875 // TargetInstrInfo::foldMemoryOperand() is going to try. 6876 // 6877 // To prevent that, constrain the %0 register class here. 6878 if (MI.isFullCopy()) { 6879 Register DstReg = MI.getOperand(0).getReg(); 6880 Register SrcReg = MI.getOperand(1).getReg(); 6881 6882 if (DstReg == AMDGPU::M0 && SrcReg.isVirtual()) { 6883 MF.getRegInfo().constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0RegClass); 6884 return nullptr; 6885 } 6886 6887 if (SrcReg == AMDGPU::M0 && DstReg.isVirtual()) { 6888 MF.getRegInfo().constrainRegClass(DstReg, &AMDGPU::SReg_32_XM0RegClass); 6889 return nullptr; 6890 } 6891 } 6892 6893 return nullptr; 6894 } 6895 6896 unsigned SIInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 6897 const MachineInstr &MI, 6898 unsigned *PredCost) const { 6899 if (MI.isBundle()) { 6900 MachineBasicBlock::const_instr_iterator I(MI.getIterator()); 6901 MachineBasicBlock::const_instr_iterator E(MI.getParent()->instr_end()); 6902 unsigned Lat = 0, Count = 0; 6903 for (++I; I != E && I->isBundledWithPred(); ++I) { 6904 ++Count; 6905 Lat = std::max(Lat, SchedModel.computeInstrLatency(&*I)); 6906 } 6907 return Lat + Count - 1; 6908 } 6909 6910 return SchedModel.computeInstrLatency(&MI); 6911 } 6912