1 //===- SIInstrInfo.cpp - SI Instruction Information ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// SI Implementation of TargetInstrInfo. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SIInstrInfo.h" 15 #include "AMDGPU.h" 16 #include "AMDGPUSubtarget.h" 17 #include "GCNHazardRecognizer.h" 18 #include "SIDefines.h" 19 #include "SIMachineFunctionInfo.h" 20 #include "SIRegisterInfo.h" 21 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 22 #include "Utils/AMDGPUBaseInfo.h" 23 #include "llvm/ADT/APInt.h" 24 #include "llvm/ADT/ArrayRef.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/ADT/iterator_range.h" 28 #include "llvm/Analysis/AliasAnalysis.h" 29 #include "llvm/Analysis/MemoryLocation.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/CodeGen/MachineBasicBlock.h" 32 #include "llvm/CodeGen/MachineDominators.h" 33 #include "llvm/CodeGen/MachineFrameInfo.h" 34 #include "llvm/CodeGen/MachineFunction.h" 35 #include "llvm/CodeGen/MachineInstr.h" 36 #include "llvm/CodeGen/MachineInstrBuilder.h" 37 #include "llvm/CodeGen/MachineInstrBundle.h" 38 #include "llvm/CodeGen/MachineMemOperand.h" 39 #include "llvm/CodeGen/MachineOperand.h" 40 #include "llvm/CodeGen/MachineRegisterInfo.h" 41 #include "llvm/CodeGen/RegisterScavenging.h" 42 #include "llvm/CodeGen/ScheduleDAG.h" 43 #include "llvm/CodeGen/SelectionDAGNodes.h" 44 #include "llvm/CodeGen/TargetOpcodes.h" 45 #include "llvm/CodeGen/TargetRegisterInfo.h" 46 #include "llvm/IR/DebugLoc.h" 47 #include "llvm/IR/DiagnosticInfo.h" 48 #include "llvm/IR/Function.h" 49 #include "llvm/IR/InlineAsm.h" 50 #include "llvm/IR/LLVMContext.h" 51 #include "llvm/MC/MCInstrDesc.h" 52 #include "llvm/Support/Casting.h" 53 #include "llvm/Support/CommandLine.h" 54 #include "llvm/Support/Compiler.h" 55 #include "llvm/Support/ErrorHandling.h" 56 #include "llvm/Support/MachineValueType.h" 57 #include "llvm/Support/MathExtras.h" 58 #include "llvm/Target/TargetMachine.h" 59 #include <cassert> 60 #include <cstdint> 61 #include <iterator> 62 #include <utility> 63 64 using namespace llvm; 65 66 #define GET_INSTRINFO_CTOR_DTOR 67 #include "AMDGPUGenInstrInfo.inc" 68 69 namespace llvm { 70 namespace AMDGPU { 71 #define GET_D16ImageDimIntrinsics_IMPL 72 #define GET_ImageDimIntrinsicTable_IMPL 73 #define GET_RsrcIntrinsics_IMPL 74 #include "AMDGPUGenSearchableTables.inc" 75 } 76 } 77 78 79 // Must be at least 4 to be able to branch over minimum unconditional branch 80 // code. This is only for making it possible to write reasonably small tests for 81 // long branches. 82 static cl::opt<unsigned> 83 BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16), 84 cl::desc("Restrict range of branch instructions (DEBUG)")); 85 86 SIInstrInfo::SIInstrInfo(const GCNSubtarget &ST) 87 : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN), 88 RI(ST), ST(ST) { 89 SchedModel.init(&ST); 90 } 91 92 //===----------------------------------------------------------------------===// 93 // TargetInstrInfo callbacks 94 //===----------------------------------------------------------------------===// 95 96 static unsigned getNumOperandsNoGlue(SDNode *Node) { 97 unsigned N = Node->getNumOperands(); 98 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) 99 --N; 100 return N; 101 } 102 103 /// Returns true if both nodes have the same value for the given 104 /// operand \p Op, or if both nodes do not have this operand. 105 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { 106 unsigned Opc0 = N0->getMachineOpcode(); 107 unsigned Opc1 = N1->getMachineOpcode(); 108 109 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); 110 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); 111 112 if (Op0Idx == -1 && Op1Idx == -1) 113 return true; 114 115 116 if ((Op0Idx == -1 && Op1Idx != -1) || 117 (Op1Idx == -1 && Op0Idx != -1)) 118 return false; 119 120 // getNamedOperandIdx returns the index for the MachineInstr's operands, 121 // which includes the result as the first operand. We are indexing into the 122 // MachineSDNode's operands, so we need to skip the result operand to get 123 // the real index. 124 --Op0Idx; 125 --Op1Idx; 126 127 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx); 128 } 129 130 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, 131 AliasAnalysis *AA) const { 132 // TODO: The generic check fails for VALU instructions that should be 133 // rematerializable due to implicit reads of exec. We really want all of the 134 // generic logic for this except for this. 135 switch (MI.getOpcode()) { 136 case AMDGPU::V_MOV_B32_e32: 137 case AMDGPU::V_MOV_B32_e64: 138 case AMDGPU::V_MOV_B64_PSEUDO: 139 // No implicit operands. 140 return MI.getNumOperands() == MI.getDesc().getNumOperands(); 141 default: 142 return false; 143 } 144 } 145 146 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, 147 int64_t &Offset0, 148 int64_t &Offset1) const { 149 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode()) 150 return false; 151 152 unsigned Opc0 = Load0->getMachineOpcode(); 153 unsigned Opc1 = Load1->getMachineOpcode(); 154 155 // Make sure both are actually loads. 156 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad()) 157 return false; 158 159 if (isDS(Opc0) && isDS(Opc1)) { 160 161 // FIXME: Handle this case: 162 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1)) 163 return false; 164 165 // Check base reg. 166 if (Load0->getOperand(0) != Load1->getOperand(0)) 167 return false; 168 169 // Skip read2 / write2 variants for simplicity. 170 // TODO: We should report true if the used offsets are adjacent (excluded 171 // st64 versions). 172 int Offset0Idx = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 173 int Offset1Idx = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 174 if (Offset0Idx == -1 || Offset1Idx == -1) 175 return false; 176 177 // XXX - be careful of datalesss loads 178 // getNamedOperandIdx returns the index for MachineInstrs. Since they 179 // include the output in the operand list, but SDNodes don't, we need to 180 // subtract the index by one. 181 Offset0Idx -= get(Opc0).NumDefs; 182 Offset1Idx -= get(Opc1).NumDefs; 183 Offset0 = cast<ConstantSDNode>(Load0->getOperand(Offset0Idx))->getZExtValue(); 184 Offset1 = cast<ConstantSDNode>(Load1->getOperand(Offset1Idx))->getZExtValue(); 185 return true; 186 } 187 188 if (isSMRD(Opc0) && isSMRD(Opc1)) { 189 // Skip time and cache invalidation instructions. 190 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::sbase) == -1 || 191 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::sbase) == -1) 192 return false; 193 194 assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1)); 195 196 // Check base reg. 197 if (Load0->getOperand(0) != Load1->getOperand(0)) 198 return false; 199 200 const ConstantSDNode *Load0Offset = 201 dyn_cast<ConstantSDNode>(Load0->getOperand(1)); 202 const ConstantSDNode *Load1Offset = 203 dyn_cast<ConstantSDNode>(Load1->getOperand(1)); 204 205 if (!Load0Offset || !Load1Offset) 206 return false; 207 208 Offset0 = Load0Offset->getZExtValue(); 209 Offset1 = Load1Offset->getZExtValue(); 210 return true; 211 } 212 213 // MUBUF and MTBUF can access the same addresses. 214 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) { 215 216 // MUBUF and MTBUF have vaddr at different indices. 217 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || 218 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || 219 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) 220 return false; 221 222 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 223 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 224 225 if (OffIdx0 == -1 || OffIdx1 == -1) 226 return false; 227 228 // getNamedOperandIdx returns the index for MachineInstrs. Since they 229 // include the output in the operand list, but SDNodes don't, we need to 230 // subtract the index by one. 231 OffIdx0 -= get(Opc0).NumDefs; 232 OffIdx1 -= get(Opc1).NumDefs; 233 234 SDValue Off0 = Load0->getOperand(OffIdx0); 235 SDValue Off1 = Load1->getOperand(OffIdx1); 236 237 // The offset might be a FrameIndexSDNode. 238 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1)) 239 return false; 240 241 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue(); 242 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue(); 243 return true; 244 } 245 246 return false; 247 } 248 249 static bool isStride64(unsigned Opc) { 250 switch (Opc) { 251 case AMDGPU::DS_READ2ST64_B32: 252 case AMDGPU::DS_READ2ST64_B64: 253 case AMDGPU::DS_WRITE2ST64_B32: 254 case AMDGPU::DS_WRITE2ST64_B64: 255 return true; 256 default: 257 return false; 258 } 259 } 260 261 bool SIInstrInfo::getMemOperandsWithOffset( 262 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps, 263 int64_t &Offset, bool &OffsetIsScalable, const TargetRegisterInfo *TRI) 264 const { 265 if (!LdSt.mayLoadOrStore()) 266 return false; 267 268 unsigned Opc = LdSt.getOpcode(); 269 OffsetIsScalable = false; 270 const MachineOperand *BaseOp, *OffsetOp; 271 272 if (isDS(LdSt)) { 273 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr); 274 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset); 275 if (OffsetOp) { 276 // Normal, single offset LDS instruction. 277 if (!BaseOp) { 278 // DS_CONSUME/DS_APPEND use M0 for the base address. 279 // TODO: find the implicit use operand for M0 and use that as BaseOp? 280 return false; 281 } 282 BaseOps.push_back(BaseOp); 283 Offset = OffsetOp->getImm(); 284 } else { 285 // The 2 offset instructions use offset0 and offset1 instead. We can treat 286 // these as a load with a single offset if the 2 offsets are consecutive. 287 // We will use this for some partially aligned loads. 288 const MachineOperand *Offset0Op = 289 getNamedOperand(LdSt, AMDGPU::OpName::offset0); 290 const MachineOperand *Offset1Op = 291 getNamedOperand(LdSt, AMDGPU::OpName::offset1); 292 293 unsigned Offset0 = Offset0Op->getImm(); 294 unsigned Offset1 = Offset1Op->getImm(); 295 if (Offset0 + 1 != Offset1) 296 return false; 297 298 // Each of these offsets is in element sized units, so we need to convert 299 // to bytes of the individual reads. 300 301 unsigned EltSize; 302 if (LdSt.mayLoad()) 303 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16; 304 else { 305 assert(LdSt.mayStore()); 306 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 307 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8; 308 } 309 310 if (isStride64(Opc)) 311 EltSize *= 64; 312 313 BaseOps.push_back(BaseOp); 314 Offset = EltSize * Offset0; 315 } 316 return true; 317 } 318 319 if (isMUBUF(LdSt) || isMTBUF(LdSt)) { 320 const MachineOperand *SOffset = getNamedOperand(LdSt, AMDGPU::OpName::soffset); 321 if (SOffset && SOffset->isReg()) { 322 // We can only handle this if it's a stack access, as any other resource 323 // would require reporting multiple base registers. 324 const MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 325 if (AddrReg && !AddrReg->isFI()) 326 return false; 327 328 const MachineOperand *RSrc = getNamedOperand(LdSt, AMDGPU::OpName::srsrc); 329 const SIMachineFunctionInfo *MFI 330 = LdSt.getParent()->getParent()->getInfo<SIMachineFunctionInfo>(); 331 if (RSrc->getReg() != MFI->getScratchRSrcReg()) 332 return false; 333 334 const MachineOperand *OffsetImm = 335 getNamedOperand(LdSt, AMDGPU::OpName::offset); 336 BaseOps.push_back(RSrc); 337 BaseOps.push_back(SOffset); 338 Offset = OffsetImm->getImm(); 339 return true; 340 } 341 342 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::srsrc); 343 if (!BaseOp) // e.g. BUFFER_WBINVL1_VOL 344 return false; 345 BaseOps.push_back(BaseOp); 346 347 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 348 if (BaseOp) 349 BaseOps.push_back(BaseOp); 350 351 const MachineOperand *OffsetImm = 352 getNamedOperand(LdSt, AMDGPU::OpName::offset); 353 Offset = OffsetImm->getImm(); 354 if (SOffset) // soffset can be an inline immediate. 355 Offset += SOffset->getImm(); 356 return true; 357 } 358 359 if (isSMRD(LdSt)) { 360 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::sbase); 361 if (!BaseOp) // e.g. S_MEMTIME 362 return false; 363 BaseOps.push_back(BaseOp); 364 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset); 365 Offset = OffsetOp ? OffsetOp->getImm() : 0; 366 return true; 367 } 368 369 if (isFLAT(LdSt)) { 370 // Instructions have either vaddr or saddr or both. 371 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 372 if (BaseOp) 373 BaseOps.push_back(BaseOp); 374 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::saddr); 375 if (BaseOp) 376 BaseOps.push_back(BaseOp); 377 Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm(); 378 return true; 379 } 380 381 return false; 382 } 383 384 static bool 385 memOpsHaveSameBaseOperands(ArrayRef<const MachineOperand *> BaseOps1, 386 ArrayRef<const MachineOperand *> BaseOps2) { 387 if (BaseOps1.size() != BaseOps2.size()) 388 return false; 389 for (size_t I = 0, E = BaseOps1.size(); I < E; ++I) 390 if (!BaseOps1[I]->isIdenticalTo(*BaseOps2[I])) 391 return false; 392 return true; 393 } 394 395 static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, 396 ArrayRef<const MachineOperand *> BaseOps1, 397 const MachineInstr &MI2, 398 ArrayRef<const MachineOperand *> BaseOps2) { 399 if (memOpsHaveSameBaseOperands(BaseOps1, BaseOps2)) 400 return true; 401 402 if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand()) 403 return false; 404 405 auto MO1 = *MI1.memoperands_begin(); 406 auto MO2 = *MI2.memoperands_begin(); 407 if (MO1->getAddrSpace() != MO2->getAddrSpace()) 408 return false; 409 410 auto Base1 = MO1->getValue(); 411 auto Base2 = MO2->getValue(); 412 if (!Base1 || !Base2) 413 return false; 414 const MachineFunction &MF = *MI1.getParent()->getParent(); 415 const DataLayout &DL = MF.getFunction().getParent()->getDataLayout(); 416 Base1 = GetUnderlyingObject(Base1, DL); 417 Base2 = GetUnderlyingObject(Base2, DL); 418 419 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2)) 420 return false; 421 422 return Base1 == Base2; 423 } 424 425 bool SIInstrInfo::shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1, 426 ArrayRef<const MachineOperand *> BaseOps2, 427 unsigned NumLoads) const { 428 assert(!BaseOps1.empty() && !BaseOps2.empty()); 429 const MachineInstr &FirstLdSt = *BaseOps1.front()->getParent(); 430 const MachineInstr &SecondLdSt = *BaseOps2.front()->getParent(); 431 432 if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOps1, SecondLdSt, BaseOps2)) 433 return false; 434 435 const MachineOperand *FirstDst = nullptr; 436 const MachineOperand *SecondDst = nullptr; 437 438 if ((isMUBUF(FirstLdSt) && isMUBUF(SecondLdSt)) || 439 (isMTBUF(FirstLdSt) && isMTBUF(SecondLdSt)) || 440 (isFLAT(FirstLdSt) && isFLAT(SecondLdSt))) { 441 const unsigned MaxGlobalLoadCluster = 7; 442 if (NumLoads > MaxGlobalLoadCluster) 443 return false; 444 445 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdata); 446 if (!FirstDst) 447 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdst); 448 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdata); 449 if (!SecondDst) 450 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdst); 451 } else if (isSMRD(FirstLdSt) && isSMRD(SecondLdSt)) { 452 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::sdst); 453 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::sdst); 454 } else if (isDS(FirstLdSt) && isDS(SecondLdSt)) { 455 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdst); 456 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdst); 457 } 458 459 if (!FirstDst || !SecondDst) 460 return false; 461 462 // Try to limit clustering based on the total number of bytes loaded 463 // rather than the number of instructions. This is done to help reduce 464 // register pressure. The method used is somewhat inexact, though, 465 // because it assumes that all loads in the cluster will load the 466 // same number of bytes as FirstLdSt. 467 468 // The unit of this value is bytes. 469 // FIXME: This needs finer tuning. 470 unsigned LoadClusterThreshold = 16; 471 472 const MachineRegisterInfo &MRI = 473 FirstLdSt.getParent()->getParent()->getRegInfo(); 474 475 const Register Reg = FirstDst->getReg(); 476 477 const TargetRegisterClass *DstRC = Register::isVirtualRegister(Reg) 478 ? MRI.getRegClass(Reg) 479 : RI.getPhysRegClass(Reg); 480 481 // FIXME: NumLoads should not be subtracted 1. This is to match behavior 482 // of clusterNeighboringMemOps which was previosly passing cluster length 483 // less 1. LoadClusterThreshold should be tuned instead. 484 return ((NumLoads - 1) * (RI.getRegSizeInBits(*DstRC) / 8)) <= 485 LoadClusterThreshold; 486 } 487 488 // FIXME: This behaves strangely. If, for example, you have 32 load + stores, 489 // the first 16 loads will be interleaved with the stores, and the next 16 will 490 // be clustered as expected. It should really split into 2 16 store batches. 491 // 492 // Loads are clustered until this returns false, rather than trying to schedule 493 // groups of stores. This also means we have to deal with saying different 494 // address space loads should be clustered, and ones which might cause bank 495 // conflicts. 496 // 497 // This might be deprecated so it might not be worth that much effort to fix. 498 bool SIInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, 499 int64_t Offset0, int64_t Offset1, 500 unsigned NumLoads) const { 501 assert(Offset1 > Offset0 && 502 "Second offset should be larger than first offset!"); 503 // If we have less than 16 loads in a row, and the offsets are within 64 504 // bytes, then schedule together. 505 506 // A cacheline is 64 bytes (for global memory). 507 return (NumLoads <= 16 && (Offset1 - Offset0) < 64); 508 } 509 510 static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB, 511 MachineBasicBlock::iterator MI, 512 const DebugLoc &DL, MCRegister DestReg, 513 MCRegister SrcReg, bool KillSrc) { 514 MachineFunction *MF = MBB.getParent(); 515 DiagnosticInfoUnsupported IllegalCopy(MF->getFunction(), 516 "illegal SGPR to VGPR copy", 517 DL, DS_Error); 518 LLVMContext &C = MF->getFunction().getContext(); 519 C.diagnose(IllegalCopy); 520 521 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg) 522 .addReg(SrcReg, getKillRegState(KillSrc)); 523 } 524 525 void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 526 MachineBasicBlock::iterator MI, 527 const DebugLoc &DL, MCRegister DestReg, 528 MCRegister SrcReg, bool KillSrc) const { 529 const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg); 530 531 if (RC == &AMDGPU::VGPR_32RegClass) { 532 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || 533 AMDGPU::SReg_32RegClass.contains(SrcReg) || 534 AMDGPU::AGPR_32RegClass.contains(SrcReg)); 535 unsigned Opc = AMDGPU::AGPR_32RegClass.contains(SrcReg) ? 536 AMDGPU::V_ACCVGPR_READ_B32 : AMDGPU::V_MOV_B32_e32; 537 BuildMI(MBB, MI, DL, get(Opc), DestReg) 538 .addReg(SrcReg, getKillRegState(KillSrc)); 539 return; 540 } 541 542 if (RC == &AMDGPU::SReg_32_XM0RegClass || 543 RC == &AMDGPU::SReg_32RegClass) { 544 if (SrcReg == AMDGPU::SCC) { 545 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg) 546 .addImm(1) 547 .addImm(0); 548 return; 549 } 550 551 if (DestReg == AMDGPU::VCC_LO) { 552 if (AMDGPU::SReg_32RegClass.contains(SrcReg)) { 553 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), AMDGPU::VCC_LO) 554 .addReg(SrcReg, getKillRegState(KillSrc)); 555 } else { 556 // FIXME: Hack until VReg_1 removed. 557 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 558 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 559 .addImm(0) 560 .addReg(SrcReg, getKillRegState(KillSrc)); 561 } 562 563 return; 564 } 565 566 if (!AMDGPU::SReg_32RegClass.contains(SrcReg)) { 567 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 568 return; 569 } 570 571 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 572 .addReg(SrcReg, getKillRegState(KillSrc)); 573 return; 574 } 575 576 if (RC == &AMDGPU::SReg_64RegClass) { 577 if (DestReg == AMDGPU::VCC) { 578 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 579 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC) 580 .addReg(SrcReg, getKillRegState(KillSrc)); 581 } else { 582 // FIXME: Hack until VReg_1 removed. 583 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 584 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 585 .addImm(0) 586 .addReg(SrcReg, getKillRegState(KillSrc)); 587 } 588 589 return; 590 } 591 592 if (!AMDGPU::SReg_64RegClass.contains(SrcReg)) { 593 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 594 return; 595 } 596 597 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 598 .addReg(SrcReg, getKillRegState(KillSrc)); 599 return; 600 } 601 602 if (DestReg == AMDGPU::SCC) { 603 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 604 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32)) 605 .addReg(SrcReg, getKillRegState(KillSrc)) 606 .addImm(0); 607 return; 608 } 609 610 if (RC == &AMDGPU::AGPR_32RegClass) { 611 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || 612 AMDGPU::SReg_32RegClass.contains(SrcReg) || 613 AMDGPU::AGPR_32RegClass.contains(SrcReg)); 614 if (!AMDGPU::VGPR_32RegClass.contains(SrcReg)) { 615 // First try to find defining accvgpr_write to avoid temporary registers. 616 for (auto Def = MI, E = MBB.begin(); Def != E; ) { 617 --Def; 618 if (!Def->definesRegister(SrcReg, &RI)) 619 continue; 620 if (Def->getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32) 621 break; 622 623 MachineOperand &DefOp = Def->getOperand(1); 624 assert(DefOp.isReg() || DefOp.isImm()); 625 626 if (DefOp.isReg()) { 627 // Check that register source operand if not clobbered before MI. 628 // Immediate operands are always safe to propagate. 629 bool SafeToPropagate = true; 630 for (auto I = Def; I != MI && SafeToPropagate; ++I) 631 if (I->modifiesRegister(DefOp.getReg(), &RI)) 632 SafeToPropagate = false; 633 634 if (!SafeToPropagate) 635 break; 636 637 DefOp.setIsKill(false); 638 } 639 640 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32), DestReg) 641 .add(DefOp); 642 return; 643 } 644 645 RegScavenger RS; 646 RS.enterBasicBlock(MBB); 647 RS.forward(MI); 648 649 // Ideally we want to have three registers for a long reg_sequence copy 650 // to hide 2 waitstates between v_mov_b32 and accvgpr_write. 651 unsigned MaxVGPRs = RI.getRegPressureLimit(&AMDGPU::VGPR_32RegClass, 652 *MBB.getParent()); 653 654 // Registers in the sequence are allocated contiguously so we can just 655 // use register number to pick one of three round-robin temps. 656 unsigned RegNo = DestReg % 3; 657 unsigned Tmp = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0); 658 if (!Tmp) 659 report_fatal_error("Cannot scavenge VGPR to copy to AGPR"); 660 RS.setRegUsed(Tmp); 661 // Only loop through if there are any free registers left, otherwise 662 // scavenger may report a fatal error without emergency spill slot 663 // or spill with the slot. 664 while (RegNo-- && RS.FindUnusedReg(&AMDGPU::VGPR_32RegClass)) { 665 unsigned Tmp2 = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0); 666 if (!Tmp2 || RI.getHWRegIndex(Tmp2) >= MaxVGPRs) 667 break; 668 Tmp = Tmp2; 669 RS.setRegUsed(Tmp); 670 } 671 copyPhysReg(MBB, MI, DL, Tmp, SrcReg, KillSrc); 672 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32), DestReg) 673 .addReg(Tmp, RegState::Kill); 674 return; 675 } 676 677 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32), DestReg) 678 .addReg(SrcReg, getKillRegState(KillSrc)); 679 return; 680 } 681 682 unsigned EltSize = 4; 683 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 684 if (RI.isSGPRClass(RC)) { 685 // TODO: Copy vec3/vec5 with s_mov_b64s then final s_mov_b32. 686 if (!(RI.getRegSizeInBits(*RC) % 64)) { 687 Opcode = AMDGPU::S_MOV_B64; 688 EltSize = 8; 689 } else { 690 Opcode = AMDGPU::S_MOV_B32; 691 EltSize = 4; 692 } 693 694 if (!RI.isSGPRClass(RI.getPhysRegClass(SrcReg))) { 695 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 696 return; 697 } 698 } else if (RI.hasAGPRs(RC)) { 699 Opcode = RI.hasVGPRs(RI.getPhysRegClass(SrcReg)) ? 700 AMDGPU::V_ACCVGPR_WRITE_B32 : AMDGPU::COPY; 701 } else if (RI.hasVGPRs(RC) && RI.hasAGPRs(RI.getPhysRegClass(SrcReg))) { 702 Opcode = AMDGPU::V_ACCVGPR_READ_B32; 703 } 704 705 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize); 706 bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg); 707 708 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 709 unsigned SubIdx; 710 if (Forward) 711 SubIdx = SubIndices[Idx]; 712 else 713 SubIdx = SubIndices[SubIndices.size() - Idx - 1]; 714 715 if (Opcode == TargetOpcode::COPY) { 716 copyPhysReg(MBB, MI, DL, RI.getSubReg(DestReg, SubIdx), 717 RI.getSubReg(SrcReg, SubIdx), KillSrc); 718 continue; 719 } 720 721 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 722 get(Opcode), RI.getSubReg(DestReg, SubIdx)); 723 724 Builder.addReg(RI.getSubReg(SrcReg, SubIdx)); 725 726 if (Idx == 0) 727 Builder.addReg(DestReg, RegState::Define | RegState::Implicit); 728 729 bool UseKill = KillSrc && Idx == SubIndices.size() - 1; 730 Builder.addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit); 731 } 732 } 733 734 int SIInstrInfo::commuteOpcode(unsigned Opcode) const { 735 int NewOpc; 736 737 // Try to map original to commuted opcode 738 NewOpc = AMDGPU::getCommuteRev(Opcode); 739 if (NewOpc != -1) 740 // Check if the commuted (REV) opcode exists on the target. 741 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 742 743 // Try to map commuted to original opcode 744 NewOpc = AMDGPU::getCommuteOrig(Opcode); 745 if (NewOpc != -1) 746 // Check if the original (non-REV) opcode exists on the target. 747 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 748 749 return Opcode; 750 } 751 752 void SIInstrInfo::materializeImmediate(MachineBasicBlock &MBB, 753 MachineBasicBlock::iterator MI, 754 const DebugLoc &DL, unsigned DestReg, 755 int64_t Value) const { 756 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 757 const TargetRegisterClass *RegClass = MRI.getRegClass(DestReg); 758 if (RegClass == &AMDGPU::SReg_32RegClass || 759 RegClass == &AMDGPU::SGPR_32RegClass || 760 RegClass == &AMDGPU::SReg_32_XM0RegClass || 761 RegClass == &AMDGPU::SReg_32_XM0_XEXECRegClass) { 762 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 763 .addImm(Value); 764 return; 765 } 766 767 if (RegClass == &AMDGPU::SReg_64RegClass || 768 RegClass == &AMDGPU::SGPR_64RegClass || 769 RegClass == &AMDGPU::SReg_64_XEXECRegClass) { 770 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 771 .addImm(Value); 772 return; 773 } 774 775 if (RegClass == &AMDGPU::VGPR_32RegClass) { 776 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) 777 .addImm(Value); 778 return; 779 } 780 if (RegClass == &AMDGPU::VReg_64RegClass) { 781 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), DestReg) 782 .addImm(Value); 783 return; 784 } 785 786 unsigned EltSize = 4; 787 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 788 if (RI.isSGPRClass(RegClass)) { 789 if (RI.getRegSizeInBits(*RegClass) > 32) { 790 Opcode = AMDGPU::S_MOV_B64; 791 EltSize = 8; 792 } else { 793 Opcode = AMDGPU::S_MOV_B32; 794 EltSize = 4; 795 } 796 } 797 798 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RegClass, EltSize); 799 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 800 int64_t IdxValue = Idx == 0 ? Value : 0; 801 802 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 803 get(Opcode), RI.getSubReg(DestReg, SubIndices[Idx])); 804 Builder.addImm(IdxValue); 805 } 806 } 807 808 const TargetRegisterClass * 809 SIInstrInfo::getPreferredSelectRegClass(unsigned Size) const { 810 return &AMDGPU::VGPR_32RegClass; 811 } 812 813 void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB, 814 MachineBasicBlock::iterator I, 815 const DebugLoc &DL, unsigned DstReg, 816 ArrayRef<MachineOperand> Cond, 817 unsigned TrueReg, 818 unsigned FalseReg) const { 819 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 820 MachineFunction *MF = MBB.getParent(); 821 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 822 const TargetRegisterClass *BoolXExecRC = 823 RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 824 assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass && 825 "Not a VGPR32 reg"); 826 827 if (Cond.size() == 1) { 828 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 829 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 830 .add(Cond[0]); 831 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 832 .addImm(0) 833 .addReg(FalseReg) 834 .addImm(0) 835 .addReg(TrueReg) 836 .addReg(SReg); 837 } else if (Cond.size() == 2) { 838 assert(Cond[0].isImm() && "Cond[0] is not an immediate"); 839 switch (Cond[0].getImm()) { 840 case SIInstrInfo::SCC_TRUE: { 841 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 842 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 843 : AMDGPU::S_CSELECT_B64), SReg) 844 .addImm(1) 845 .addImm(0); 846 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 847 .addImm(0) 848 .addReg(FalseReg) 849 .addImm(0) 850 .addReg(TrueReg) 851 .addReg(SReg); 852 break; 853 } 854 case SIInstrInfo::SCC_FALSE: { 855 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 856 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 857 : AMDGPU::S_CSELECT_B64), SReg) 858 .addImm(0) 859 .addImm(1); 860 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 861 .addImm(0) 862 .addReg(FalseReg) 863 .addImm(0) 864 .addReg(TrueReg) 865 .addReg(SReg); 866 break; 867 } 868 case SIInstrInfo::VCCNZ: { 869 MachineOperand RegOp = Cond[1]; 870 RegOp.setImplicit(false); 871 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 872 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 873 .add(RegOp); 874 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 875 .addImm(0) 876 .addReg(FalseReg) 877 .addImm(0) 878 .addReg(TrueReg) 879 .addReg(SReg); 880 break; 881 } 882 case SIInstrInfo::VCCZ: { 883 MachineOperand RegOp = Cond[1]; 884 RegOp.setImplicit(false); 885 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 886 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 887 .add(RegOp); 888 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 889 .addImm(0) 890 .addReg(TrueReg) 891 .addImm(0) 892 .addReg(FalseReg) 893 .addReg(SReg); 894 break; 895 } 896 case SIInstrInfo::EXECNZ: { 897 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 898 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 899 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 900 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 901 .addImm(0); 902 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 903 : AMDGPU::S_CSELECT_B64), SReg) 904 .addImm(1) 905 .addImm(0); 906 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 907 .addImm(0) 908 .addReg(FalseReg) 909 .addImm(0) 910 .addReg(TrueReg) 911 .addReg(SReg); 912 break; 913 } 914 case SIInstrInfo::EXECZ: { 915 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 916 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 917 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 918 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 919 .addImm(0); 920 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 921 : AMDGPU::S_CSELECT_B64), SReg) 922 .addImm(0) 923 .addImm(1); 924 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 925 .addImm(0) 926 .addReg(FalseReg) 927 .addImm(0) 928 .addReg(TrueReg) 929 .addReg(SReg); 930 llvm_unreachable("Unhandled branch predicate EXECZ"); 931 break; 932 } 933 default: 934 llvm_unreachable("invalid branch predicate"); 935 } 936 } else { 937 llvm_unreachable("Can only handle Cond size 1 or 2"); 938 } 939 } 940 941 unsigned SIInstrInfo::insertEQ(MachineBasicBlock *MBB, 942 MachineBasicBlock::iterator I, 943 const DebugLoc &DL, 944 unsigned SrcReg, int Value) const { 945 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 946 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 947 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg) 948 .addImm(Value) 949 .addReg(SrcReg); 950 951 return Reg; 952 } 953 954 unsigned SIInstrInfo::insertNE(MachineBasicBlock *MBB, 955 MachineBasicBlock::iterator I, 956 const DebugLoc &DL, 957 unsigned SrcReg, int Value) const { 958 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 959 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 960 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg) 961 .addImm(Value) 962 .addReg(SrcReg); 963 964 return Reg; 965 } 966 967 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const { 968 969 if (RI.hasAGPRs(DstRC)) 970 return AMDGPU::COPY; 971 if (RI.getRegSizeInBits(*DstRC) == 32) { 972 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 973 } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) { 974 return AMDGPU::S_MOV_B64; 975 } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) { 976 return AMDGPU::V_MOV_B64_PSEUDO; 977 } 978 return AMDGPU::COPY; 979 } 980 981 static unsigned getIndirectVGPRWritePseudoOpc(unsigned VecSize) { 982 switch (VecSize) { 983 case 32: // 4 bytes 984 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V1; 985 case 64: // 8 bytes 986 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V2; 987 case 96: // 12 bytes 988 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V3; 989 case 128: // 16 bytes 990 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V4; 991 case 160: // 20 bytes 992 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V5; 993 case 256: // 32 bytes 994 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V8; 995 case 512: // 64 bytes 996 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V16; 997 case 1024: // 128 bytes 998 return AMDGPU::V_INDIRECT_REG_WRITE_B32_V32; 999 default: 1000 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1001 } 1002 } 1003 1004 static unsigned getIndirectSGPRWritePseudo32(unsigned VecSize) { 1005 switch (VecSize) { 1006 case 32: // 4 bytes 1007 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V1; 1008 case 64: // 8 bytes 1009 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V2; 1010 case 96: // 12 bytes 1011 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V3; 1012 case 128: // 16 bytes 1013 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V4; 1014 case 160: // 20 bytes 1015 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V5; 1016 case 256: // 32 bytes 1017 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V8; 1018 case 512: // 64 bytes 1019 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V16; 1020 case 1024: // 128 bytes 1021 return AMDGPU::S_INDIRECT_REG_WRITE_B32_V32; 1022 default: 1023 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1024 } 1025 } 1026 1027 static unsigned getIndirectSGPRWritePseudo64(unsigned VecSize) { 1028 switch (VecSize) { 1029 case 64: // 8 bytes 1030 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V1; 1031 case 128: // 16 bytes 1032 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V2; 1033 case 256: // 32 bytes 1034 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V4; 1035 case 512: // 64 bytes 1036 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V8; 1037 case 1024: // 128 bytes 1038 return AMDGPU::S_INDIRECT_REG_WRITE_B64_V16; 1039 default: 1040 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1041 } 1042 } 1043 1044 const MCInstrDesc &SIInstrInfo::getIndirectRegWritePseudo( 1045 unsigned VecSize, unsigned EltSize, bool IsSGPR) const { 1046 if (IsSGPR) { 1047 switch (EltSize) { 1048 case 32: 1049 return get(getIndirectSGPRWritePseudo32(VecSize)); 1050 case 64: 1051 return get(getIndirectSGPRWritePseudo64(VecSize)); 1052 default: 1053 llvm_unreachable("invalid reg indexing elt size"); 1054 } 1055 } 1056 1057 assert(EltSize == 32 && "invalid reg indexing elt size"); 1058 return get(getIndirectVGPRWritePseudoOpc(VecSize)); 1059 } 1060 1061 static unsigned getSGPRSpillSaveOpcode(unsigned Size) { 1062 switch (Size) { 1063 case 4: 1064 return AMDGPU::SI_SPILL_S32_SAVE; 1065 case 8: 1066 return AMDGPU::SI_SPILL_S64_SAVE; 1067 case 12: 1068 return AMDGPU::SI_SPILL_S96_SAVE; 1069 case 16: 1070 return AMDGPU::SI_SPILL_S128_SAVE; 1071 case 20: 1072 return AMDGPU::SI_SPILL_S160_SAVE; 1073 case 32: 1074 return AMDGPU::SI_SPILL_S256_SAVE; 1075 case 64: 1076 return AMDGPU::SI_SPILL_S512_SAVE; 1077 case 128: 1078 return AMDGPU::SI_SPILL_S1024_SAVE; 1079 default: 1080 llvm_unreachable("unknown register size"); 1081 } 1082 } 1083 1084 static unsigned getVGPRSpillSaveOpcode(unsigned Size) { 1085 switch (Size) { 1086 case 4: 1087 return AMDGPU::SI_SPILL_V32_SAVE; 1088 case 8: 1089 return AMDGPU::SI_SPILL_V64_SAVE; 1090 case 12: 1091 return AMDGPU::SI_SPILL_V96_SAVE; 1092 case 16: 1093 return AMDGPU::SI_SPILL_V128_SAVE; 1094 case 20: 1095 return AMDGPU::SI_SPILL_V160_SAVE; 1096 case 32: 1097 return AMDGPU::SI_SPILL_V256_SAVE; 1098 case 64: 1099 return AMDGPU::SI_SPILL_V512_SAVE; 1100 case 128: 1101 return AMDGPU::SI_SPILL_V1024_SAVE; 1102 default: 1103 llvm_unreachable("unknown register size"); 1104 } 1105 } 1106 1107 static unsigned getAGPRSpillSaveOpcode(unsigned Size) { 1108 switch (Size) { 1109 case 4: 1110 return AMDGPU::SI_SPILL_A32_SAVE; 1111 case 8: 1112 return AMDGPU::SI_SPILL_A64_SAVE; 1113 case 16: 1114 return AMDGPU::SI_SPILL_A128_SAVE; 1115 case 64: 1116 return AMDGPU::SI_SPILL_A512_SAVE; 1117 case 128: 1118 return AMDGPU::SI_SPILL_A1024_SAVE; 1119 default: 1120 llvm_unreachable("unknown register size"); 1121 } 1122 } 1123 1124 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 1125 MachineBasicBlock::iterator MI, 1126 Register SrcReg, bool isKill, 1127 int FrameIndex, 1128 const TargetRegisterClass *RC, 1129 const TargetRegisterInfo *TRI) const { 1130 MachineFunction *MF = MBB.getParent(); 1131 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1132 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 1133 const DebugLoc &DL = MBB.findDebugLoc(MI); 1134 1135 MachinePointerInfo PtrInfo 1136 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 1137 MachineMemOperand *MMO = MF->getMachineMemOperand( 1138 PtrInfo, MachineMemOperand::MOStore, FrameInfo.getObjectSize(FrameIndex), 1139 FrameInfo.getObjectAlign(FrameIndex)); 1140 unsigned SpillSize = TRI->getSpillSize(*RC); 1141 1142 if (RI.isSGPRClass(RC)) { 1143 MFI->setHasSpilledSGPRs(); 1144 assert(SrcReg != AMDGPU::M0 && "m0 should not be spilled"); 1145 1146 // We are only allowed to create one new instruction when spilling 1147 // registers, so we need to use pseudo instruction for spilling SGPRs. 1148 const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(SpillSize)); 1149 1150 // The SGPR spill/restore instructions only work on number sgprs, so we need 1151 // to make sure we are using the correct register class. 1152 if (Register::isVirtualRegister(SrcReg) && SpillSize == 4) { 1153 MachineRegisterInfo &MRI = MF->getRegInfo(); 1154 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0RegClass); 1155 } 1156 1157 BuildMI(MBB, MI, DL, OpDesc) 1158 .addReg(SrcReg, getKillRegState(isKill)) // data 1159 .addFrameIndex(FrameIndex) // addr 1160 .addMemOperand(MMO) 1161 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) 1162 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); 1163 // Add the scratch resource registers as implicit uses because we may end up 1164 // needing them, and need to ensure that the reserved registers are 1165 // correctly handled. 1166 if (RI.spillSGPRToVGPR()) 1167 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill); 1168 return; 1169 } 1170 1171 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillSaveOpcode(SpillSize) 1172 : getVGPRSpillSaveOpcode(SpillSize); 1173 MFI->setHasSpilledVGPRs(); 1174 1175 auto MIB = BuildMI(MBB, MI, DL, get(Opcode)); 1176 if (RI.hasAGPRs(RC)) { 1177 MachineRegisterInfo &MRI = MF->getRegInfo(); 1178 Register Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1179 MIB.addReg(Tmp, RegState::Define); 1180 } 1181 MIB.addReg(SrcReg, getKillRegState(isKill)) // data 1182 .addFrameIndex(FrameIndex) // addr 1183 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 1184 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset 1185 .addImm(0) // offset 1186 .addMemOperand(MMO); 1187 } 1188 1189 static unsigned getSGPRSpillRestoreOpcode(unsigned Size) { 1190 switch (Size) { 1191 case 4: 1192 return AMDGPU::SI_SPILL_S32_RESTORE; 1193 case 8: 1194 return AMDGPU::SI_SPILL_S64_RESTORE; 1195 case 12: 1196 return AMDGPU::SI_SPILL_S96_RESTORE; 1197 case 16: 1198 return AMDGPU::SI_SPILL_S128_RESTORE; 1199 case 20: 1200 return AMDGPU::SI_SPILL_S160_RESTORE; 1201 case 32: 1202 return AMDGPU::SI_SPILL_S256_RESTORE; 1203 case 64: 1204 return AMDGPU::SI_SPILL_S512_RESTORE; 1205 case 128: 1206 return AMDGPU::SI_SPILL_S1024_RESTORE; 1207 default: 1208 llvm_unreachable("unknown register size"); 1209 } 1210 } 1211 1212 static unsigned getVGPRSpillRestoreOpcode(unsigned Size) { 1213 switch (Size) { 1214 case 4: 1215 return AMDGPU::SI_SPILL_V32_RESTORE; 1216 case 8: 1217 return AMDGPU::SI_SPILL_V64_RESTORE; 1218 case 12: 1219 return AMDGPU::SI_SPILL_V96_RESTORE; 1220 case 16: 1221 return AMDGPU::SI_SPILL_V128_RESTORE; 1222 case 20: 1223 return AMDGPU::SI_SPILL_V160_RESTORE; 1224 case 32: 1225 return AMDGPU::SI_SPILL_V256_RESTORE; 1226 case 64: 1227 return AMDGPU::SI_SPILL_V512_RESTORE; 1228 case 128: 1229 return AMDGPU::SI_SPILL_V1024_RESTORE; 1230 default: 1231 llvm_unreachable("unknown register size"); 1232 } 1233 } 1234 1235 static unsigned getAGPRSpillRestoreOpcode(unsigned Size) { 1236 switch (Size) { 1237 case 4: 1238 return AMDGPU::SI_SPILL_A32_RESTORE; 1239 case 8: 1240 return AMDGPU::SI_SPILL_A64_RESTORE; 1241 case 16: 1242 return AMDGPU::SI_SPILL_A128_RESTORE; 1243 case 64: 1244 return AMDGPU::SI_SPILL_A512_RESTORE; 1245 case 128: 1246 return AMDGPU::SI_SPILL_A1024_RESTORE; 1247 default: 1248 llvm_unreachable("unknown register size"); 1249 } 1250 } 1251 1252 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 1253 MachineBasicBlock::iterator MI, 1254 Register DestReg, int FrameIndex, 1255 const TargetRegisterClass *RC, 1256 const TargetRegisterInfo *TRI) const { 1257 MachineFunction *MF = MBB.getParent(); 1258 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1259 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 1260 const DebugLoc &DL = MBB.findDebugLoc(MI); 1261 unsigned SpillSize = TRI->getSpillSize(*RC); 1262 1263 MachinePointerInfo PtrInfo 1264 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 1265 1266 MachineMemOperand *MMO = MF->getMachineMemOperand( 1267 PtrInfo, MachineMemOperand::MOLoad, FrameInfo.getObjectSize(FrameIndex), 1268 FrameInfo.getObjectAlign(FrameIndex)); 1269 1270 if (RI.isSGPRClass(RC)) { 1271 MFI->setHasSpilledSGPRs(); 1272 assert(DestReg != AMDGPU::M0 && "m0 should not be reloaded into"); 1273 1274 // FIXME: Maybe this should not include a memoperand because it will be 1275 // lowered to non-memory instructions. 1276 const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize)); 1277 if (Register::isVirtualRegister(DestReg) && SpillSize == 4) { 1278 MachineRegisterInfo &MRI = MF->getRegInfo(); 1279 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0RegClass); 1280 } 1281 1282 if (RI.spillSGPRToVGPR()) 1283 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill); 1284 BuildMI(MBB, MI, DL, OpDesc, DestReg) 1285 .addFrameIndex(FrameIndex) // addr 1286 .addMemOperand(MMO) 1287 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) 1288 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); 1289 return; 1290 } 1291 1292 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillRestoreOpcode(SpillSize) 1293 : getVGPRSpillRestoreOpcode(SpillSize); 1294 auto MIB = BuildMI(MBB, MI, DL, get(Opcode), DestReg); 1295 if (RI.hasAGPRs(RC)) { 1296 MachineRegisterInfo &MRI = MF->getRegInfo(); 1297 Register Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1298 MIB.addReg(Tmp, RegState::Define); 1299 } 1300 MIB.addFrameIndex(FrameIndex) // vaddr 1301 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 1302 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset 1303 .addImm(0) // offset 1304 .addMemOperand(MMO); 1305 } 1306 1307 /// \param @Offset Offset in bytes of the FrameIndex being spilled 1308 unsigned SIInstrInfo::calculateLDSSpillAddress( 1309 MachineBasicBlock &MBB, MachineInstr &MI, RegScavenger *RS, unsigned TmpReg, 1310 unsigned FrameOffset, unsigned Size) const { 1311 MachineFunction *MF = MBB.getParent(); 1312 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1313 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 1314 const DebugLoc &DL = MBB.findDebugLoc(MI); 1315 unsigned WorkGroupSize = MFI->getMaxFlatWorkGroupSize(); 1316 unsigned WavefrontSize = ST.getWavefrontSize(); 1317 1318 unsigned TIDReg = MFI->getTIDReg(); 1319 if (!MFI->hasCalculatedTID()) { 1320 MachineBasicBlock &Entry = MBB.getParent()->front(); 1321 MachineBasicBlock::iterator Insert = Entry.front(); 1322 const DebugLoc &DL = Insert->getDebugLoc(); 1323 1324 TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass, 1325 *MF); 1326 if (TIDReg == AMDGPU::NoRegister) 1327 return TIDReg; 1328 1329 if (!AMDGPU::isShader(MF->getFunction().getCallingConv()) && 1330 WorkGroupSize > WavefrontSize) { 1331 Register TIDIGXReg = 1332 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_X); 1333 Register TIDIGYReg = 1334 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Y); 1335 Register TIDIGZReg = 1336 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Z); 1337 Register InputPtrReg = 1338 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 1339 for (unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) { 1340 if (!Entry.isLiveIn(Reg)) 1341 Entry.addLiveIn(Reg); 1342 } 1343 1344 RS->enterBasicBlock(Entry); 1345 // FIXME: Can we scavenge an SReg_64 and access the subregs? 1346 unsigned STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); 1347 unsigned STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); 1348 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp0) 1349 .addReg(InputPtrReg) 1350 .addImm(SI::KernelInputOffsets::NGROUPS_Z); 1351 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp1) 1352 .addReg(InputPtrReg) 1353 .addImm(SI::KernelInputOffsets::NGROUPS_Y); 1354 1355 // NGROUPS.X * NGROUPS.Y 1356 BuildMI(Entry, Insert, DL, get(AMDGPU::S_MUL_I32), STmp1) 1357 .addReg(STmp1) 1358 .addReg(STmp0); 1359 // (NGROUPS.X * NGROUPS.Y) * TIDIG.X 1360 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MUL_U32_U24_e32), TIDReg) 1361 .addReg(STmp1) 1362 .addReg(TIDIGXReg); 1363 // NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X) 1364 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MAD_U32_U24), TIDReg) 1365 .addReg(STmp0) 1366 .addReg(TIDIGYReg) 1367 .addReg(TIDReg); 1368 // (NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)) + TIDIG.Z 1369 getAddNoCarry(Entry, Insert, DL, TIDReg) 1370 .addReg(TIDReg) 1371 .addReg(TIDIGZReg) 1372 .addImm(0); // clamp bit 1373 } else { 1374 // Get the wave id 1375 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_LO_U32_B32_e64), 1376 TIDReg) 1377 .addImm(-1) 1378 .addImm(0); 1379 1380 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_HI_U32_B32_e64), 1381 TIDReg) 1382 .addImm(-1) 1383 .addReg(TIDReg); 1384 } 1385 1386 BuildMI(Entry, Insert, DL, get(AMDGPU::V_LSHLREV_B32_e32), 1387 TIDReg) 1388 .addImm(2) 1389 .addReg(TIDReg); 1390 MFI->setTIDReg(TIDReg); 1391 } 1392 1393 // Add FrameIndex to LDS offset 1394 unsigned LDSOffset = MFI->getLDSSize() + (FrameOffset * WorkGroupSize); 1395 getAddNoCarry(MBB, MI, DL, TmpReg) 1396 .addImm(LDSOffset) 1397 .addReg(TIDReg) 1398 .addImm(0); // clamp bit 1399 1400 return TmpReg; 1401 } 1402 1403 void SIInstrInfo::insertWaitStates(MachineBasicBlock &MBB, 1404 MachineBasicBlock::iterator MI, 1405 int Count) const { 1406 DebugLoc DL = MBB.findDebugLoc(MI); 1407 while (Count > 0) { 1408 int Arg; 1409 if (Count >= 8) 1410 Arg = 7; 1411 else 1412 Arg = Count - 1; 1413 Count -= 8; 1414 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP)) 1415 .addImm(Arg); 1416 } 1417 } 1418 1419 void SIInstrInfo::insertNoop(MachineBasicBlock &MBB, 1420 MachineBasicBlock::iterator MI) const { 1421 insertWaitStates(MBB, MI, 1); 1422 } 1423 1424 void SIInstrInfo::insertReturn(MachineBasicBlock &MBB) const { 1425 auto MF = MBB.getParent(); 1426 SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 1427 1428 assert(Info->isEntryFunction()); 1429 1430 if (MBB.succ_empty()) { 1431 bool HasNoTerminator = MBB.getFirstTerminator() == MBB.end(); 1432 if (HasNoTerminator) { 1433 if (Info->returnsVoid()) { 1434 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::S_ENDPGM)).addImm(0); 1435 } else { 1436 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::SI_RETURN_TO_EPILOG)); 1437 } 1438 } 1439 } 1440 } 1441 1442 unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) { 1443 switch (MI.getOpcode()) { 1444 default: return 1; // FIXME: Do wait states equal cycles? 1445 1446 case AMDGPU::S_NOP: 1447 return MI.getOperand(0).getImm() + 1; 1448 } 1449 } 1450 1451 bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 1452 MachineBasicBlock &MBB = *MI.getParent(); 1453 DebugLoc DL = MBB.findDebugLoc(MI); 1454 switch (MI.getOpcode()) { 1455 default: return TargetInstrInfo::expandPostRAPseudo(MI); 1456 case AMDGPU::S_MOV_B64_term: 1457 // This is only a terminator to get the correct spill code placement during 1458 // register allocation. 1459 MI.setDesc(get(AMDGPU::S_MOV_B64)); 1460 break; 1461 1462 case AMDGPU::S_MOV_B32_term: 1463 // This is only a terminator to get the correct spill code placement during 1464 // register allocation. 1465 MI.setDesc(get(AMDGPU::S_MOV_B32)); 1466 break; 1467 1468 case AMDGPU::S_XOR_B64_term: 1469 // This is only a terminator to get the correct spill code placement during 1470 // register allocation. 1471 MI.setDesc(get(AMDGPU::S_XOR_B64)); 1472 break; 1473 1474 case AMDGPU::S_XOR_B32_term: 1475 // This is only a terminator to get the correct spill code placement during 1476 // register allocation. 1477 MI.setDesc(get(AMDGPU::S_XOR_B32)); 1478 break; 1479 1480 case AMDGPU::S_OR_B32_term: 1481 // This is only a terminator to get the correct spill code placement during 1482 // register allocation. 1483 MI.setDesc(get(AMDGPU::S_OR_B32)); 1484 break; 1485 1486 case AMDGPU::S_ANDN2_B64_term: 1487 // This is only a terminator to get the correct spill code placement during 1488 // register allocation. 1489 MI.setDesc(get(AMDGPU::S_ANDN2_B64)); 1490 break; 1491 1492 case AMDGPU::S_ANDN2_B32_term: 1493 // This is only a terminator to get the correct spill code placement during 1494 // register allocation. 1495 MI.setDesc(get(AMDGPU::S_ANDN2_B32)); 1496 break; 1497 1498 case AMDGPU::V_MOV_B64_PSEUDO: { 1499 Register Dst = MI.getOperand(0).getReg(); 1500 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 1501 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 1502 1503 const MachineOperand &SrcOp = MI.getOperand(1); 1504 // FIXME: Will this work for 64-bit floating point immediates? 1505 assert(!SrcOp.isFPImm()); 1506 if (SrcOp.isImm()) { 1507 APInt Imm(64, SrcOp.getImm()); 1508 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1509 .addImm(Imm.getLoBits(32).getZExtValue()) 1510 .addReg(Dst, RegState::Implicit | RegState::Define); 1511 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1512 .addImm(Imm.getHiBits(32).getZExtValue()) 1513 .addReg(Dst, RegState::Implicit | RegState::Define); 1514 } else { 1515 assert(SrcOp.isReg()); 1516 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1517 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) 1518 .addReg(Dst, RegState::Implicit | RegState::Define); 1519 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1520 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) 1521 .addReg(Dst, RegState::Implicit | RegState::Define); 1522 } 1523 MI.eraseFromParent(); 1524 break; 1525 } 1526 case AMDGPU::V_MOV_B64_DPP_PSEUDO: { 1527 expandMovDPP64(MI); 1528 break; 1529 } 1530 case AMDGPU::V_SET_INACTIVE_B32: { 1531 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; 1532 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1533 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1534 .addReg(Exec); 1535 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), MI.getOperand(0).getReg()) 1536 .add(MI.getOperand(2)); 1537 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1538 .addReg(Exec); 1539 MI.eraseFromParent(); 1540 break; 1541 } 1542 case AMDGPU::V_SET_INACTIVE_B64: { 1543 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; 1544 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1545 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1546 .addReg(Exec); 1547 MachineInstr *Copy = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), 1548 MI.getOperand(0).getReg()) 1549 .add(MI.getOperand(2)); 1550 expandPostRAPseudo(*Copy); 1551 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1552 .addReg(Exec); 1553 MI.eraseFromParent(); 1554 break; 1555 } 1556 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V1: 1557 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V2: 1558 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V3: 1559 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V4: 1560 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V5: 1561 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V8: 1562 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V16: 1563 case AMDGPU::V_INDIRECT_REG_WRITE_B32_V32: 1564 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V1: 1565 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V2: 1566 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V3: 1567 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V4: 1568 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V5: 1569 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V8: 1570 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V16: 1571 case AMDGPU::S_INDIRECT_REG_WRITE_B32_V32: 1572 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V1: 1573 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V2: 1574 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V4: 1575 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V8: 1576 case AMDGPU::S_INDIRECT_REG_WRITE_B64_V16: { 1577 const TargetRegisterClass *EltRC = getOpRegClass(MI, 2); 1578 1579 unsigned Opc; 1580 if (RI.hasVGPRs(EltRC)) { 1581 Opc = ST.useVGPRIndexMode() ? 1582 AMDGPU::V_MOV_B32_indirect : AMDGPU::V_MOVRELD_B32_e32; 1583 } else { 1584 Opc = RI.getRegSizeInBits(*EltRC) == 64 ? 1585 AMDGPU::S_MOVRELD_B64 : AMDGPU::S_MOVRELD_B32; 1586 } 1587 1588 const MCInstrDesc &OpDesc = get(Opc); 1589 Register VecReg = MI.getOperand(0).getReg(); 1590 bool IsUndef = MI.getOperand(1).isUndef(); 1591 unsigned SubReg = MI.getOperand(3).getImm(); 1592 assert(VecReg == MI.getOperand(1).getReg()); 1593 1594 MachineInstrBuilder MIB = 1595 BuildMI(MBB, MI, DL, OpDesc) 1596 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 1597 .add(MI.getOperand(2)) 1598 .addReg(VecReg, RegState::ImplicitDefine) 1599 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0)); 1600 1601 const int ImpDefIdx = 1602 OpDesc.getNumOperands() + OpDesc.getNumImplicitUses(); 1603 const int ImpUseIdx = ImpDefIdx + 1; 1604 MIB->tieOperands(ImpDefIdx, ImpUseIdx); 1605 MI.eraseFromParent(); 1606 break; 1607 } 1608 case AMDGPU::SI_PC_ADD_REL_OFFSET: { 1609 MachineFunction &MF = *MBB.getParent(); 1610 Register Reg = MI.getOperand(0).getReg(); 1611 Register RegLo = RI.getSubReg(Reg, AMDGPU::sub0); 1612 Register RegHi = RI.getSubReg(Reg, AMDGPU::sub1); 1613 1614 // Create a bundle so these instructions won't be re-ordered by the 1615 // post-RA scheduler. 1616 MIBundleBuilder Bundler(MBB, MI); 1617 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg)); 1618 1619 // Add 32-bit offset from this instruction to the start of the 1620 // constant data. 1621 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo) 1622 .addReg(RegLo) 1623 .add(MI.getOperand(1))); 1624 1625 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi) 1626 .addReg(RegHi); 1627 MIB.add(MI.getOperand(2)); 1628 1629 Bundler.append(MIB); 1630 finalizeBundle(MBB, Bundler.begin()); 1631 1632 MI.eraseFromParent(); 1633 break; 1634 } 1635 case AMDGPU::ENTER_WWM: { 1636 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 1637 // WWM is entered. 1638 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1639 : AMDGPU::S_OR_SAVEEXEC_B64)); 1640 break; 1641 } 1642 case AMDGPU::EXIT_WWM: { 1643 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 1644 // WWM is exited. 1645 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64)); 1646 break; 1647 } 1648 } 1649 return true; 1650 } 1651 1652 std::pair<MachineInstr*, MachineInstr*> 1653 SIInstrInfo::expandMovDPP64(MachineInstr &MI) const { 1654 assert (MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO); 1655 1656 MachineBasicBlock &MBB = *MI.getParent(); 1657 DebugLoc DL = MBB.findDebugLoc(MI); 1658 MachineFunction *MF = MBB.getParent(); 1659 MachineRegisterInfo &MRI = MF->getRegInfo(); 1660 Register Dst = MI.getOperand(0).getReg(); 1661 unsigned Part = 0; 1662 MachineInstr *Split[2]; 1663 1664 1665 for (auto Sub : { AMDGPU::sub0, AMDGPU::sub1 }) { 1666 auto MovDPP = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_dpp)); 1667 if (Dst.isPhysical()) { 1668 MovDPP.addDef(RI.getSubReg(Dst, Sub)); 1669 } else { 1670 assert(MRI.isSSA()); 1671 auto Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1672 MovDPP.addDef(Tmp); 1673 } 1674 1675 for (unsigned I = 1; I <= 2; ++I) { // old and src operands. 1676 const MachineOperand &SrcOp = MI.getOperand(I); 1677 assert(!SrcOp.isFPImm()); 1678 if (SrcOp.isImm()) { 1679 APInt Imm(64, SrcOp.getImm()); 1680 Imm.ashrInPlace(Part * 32); 1681 MovDPP.addImm(Imm.getLoBits(32).getZExtValue()); 1682 } else { 1683 assert(SrcOp.isReg()); 1684 Register Src = SrcOp.getReg(); 1685 if (Src.isPhysical()) 1686 MovDPP.addReg(RI.getSubReg(Src, Sub)); 1687 else 1688 MovDPP.addReg(Src, SrcOp.isUndef() ? RegState::Undef : 0, Sub); 1689 } 1690 } 1691 1692 for (unsigned I = 3; I < MI.getNumExplicitOperands(); ++I) 1693 MovDPP.addImm(MI.getOperand(I).getImm()); 1694 1695 Split[Part] = MovDPP; 1696 ++Part; 1697 } 1698 1699 if (Dst.isVirtual()) 1700 BuildMI(MBB, MI, DL, get(AMDGPU::REG_SEQUENCE), Dst) 1701 .addReg(Split[0]->getOperand(0).getReg()) 1702 .addImm(AMDGPU::sub0) 1703 .addReg(Split[1]->getOperand(0).getReg()) 1704 .addImm(AMDGPU::sub1); 1705 1706 MI.eraseFromParent(); 1707 return std::make_pair(Split[0], Split[1]); 1708 } 1709 1710 bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI, 1711 MachineOperand &Src0, 1712 unsigned Src0OpName, 1713 MachineOperand &Src1, 1714 unsigned Src1OpName) const { 1715 MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName); 1716 if (!Src0Mods) 1717 return false; 1718 1719 MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName); 1720 assert(Src1Mods && 1721 "All commutable instructions have both src0 and src1 modifiers"); 1722 1723 int Src0ModsVal = Src0Mods->getImm(); 1724 int Src1ModsVal = Src1Mods->getImm(); 1725 1726 Src1Mods->setImm(Src0ModsVal); 1727 Src0Mods->setImm(Src1ModsVal); 1728 return true; 1729 } 1730 1731 static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI, 1732 MachineOperand &RegOp, 1733 MachineOperand &NonRegOp) { 1734 Register Reg = RegOp.getReg(); 1735 unsigned SubReg = RegOp.getSubReg(); 1736 bool IsKill = RegOp.isKill(); 1737 bool IsDead = RegOp.isDead(); 1738 bool IsUndef = RegOp.isUndef(); 1739 bool IsDebug = RegOp.isDebug(); 1740 1741 if (NonRegOp.isImm()) 1742 RegOp.ChangeToImmediate(NonRegOp.getImm()); 1743 else if (NonRegOp.isFI()) 1744 RegOp.ChangeToFrameIndex(NonRegOp.getIndex()); 1745 else 1746 return nullptr; 1747 1748 NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug); 1749 NonRegOp.setSubReg(SubReg); 1750 1751 return &MI; 1752 } 1753 1754 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, 1755 unsigned Src0Idx, 1756 unsigned Src1Idx) const { 1757 assert(!NewMI && "this should never be used"); 1758 1759 unsigned Opc = MI.getOpcode(); 1760 int CommutedOpcode = commuteOpcode(Opc); 1761 if (CommutedOpcode == -1) 1762 return nullptr; 1763 1764 assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == 1765 static_cast<int>(Src0Idx) && 1766 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == 1767 static_cast<int>(Src1Idx) && 1768 "inconsistency with findCommutedOpIndices"); 1769 1770 MachineOperand &Src0 = MI.getOperand(Src0Idx); 1771 MachineOperand &Src1 = MI.getOperand(Src1Idx); 1772 1773 MachineInstr *CommutedMI = nullptr; 1774 if (Src0.isReg() && Src1.isReg()) { 1775 if (isOperandLegal(MI, Src1Idx, &Src0)) { 1776 // Be sure to copy the source modifiers to the right place. 1777 CommutedMI 1778 = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx); 1779 } 1780 1781 } else if (Src0.isReg() && !Src1.isReg()) { 1782 // src0 should always be able to support any operand type, so no need to 1783 // check operand legality. 1784 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1); 1785 } else if (!Src0.isReg() && Src1.isReg()) { 1786 if (isOperandLegal(MI, Src1Idx, &Src0)) 1787 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0); 1788 } else { 1789 // FIXME: Found two non registers to commute. This does happen. 1790 return nullptr; 1791 } 1792 1793 if (CommutedMI) { 1794 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers, 1795 Src1, AMDGPU::OpName::src1_modifiers); 1796 1797 CommutedMI->setDesc(get(CommutedOpcode)); 1798 } 1799 1800 return CommutedMI; 1801 } 1802 1803 // This needs to be implemented because the source modifiers may be inserted 1804 // between the true commutable operands, and the base 1805 // TargetInstrInfo::commuteInstruction uses it. 1806 bool SIInstrInfo::findCommutedOpIndices(const MachineInstr &MI, 1807 unsigned &SrcOpIdx0, 1808 unsigned &SrcOpIdx1) const { 1809 return findCommutedOpIndices(MI.getDesc(), SrcOpIdx0, SrcOpIdx1); 1810 } 1811 1812 bool SIInstrInfo::findCommutedOpIndices(MCInstrDesc Desc, unsigned &SrcOpIdx0, 1813 unsigned &SrcOpIdx1) const { 1814 if (!Desc.isCommutable()) 1815 return false; 1816 1817 unsigned Opc = Desc.getOpcode(); 1818 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 1819 if (Src0Idx == -1) 1820 return false; 1821 1822 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 1823 if (Src1Idx == -1) 1824 return false; 1825 1826 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx); 1827 } 1828 1829 bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp, 1830 int64_t BrOffset) const { 1831 // BranchRelaxation should never have to check s_setpc_b64 because its dest 1832 // block is unanalyzable. 1833 assert(BranchOp != AMDGPU::S_SETPC_B64); 1834 1835 // Convert to dwords. 1836 BrOffset /= 4; 1837 1838 // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is 1839 // from the next instruction. 1840 BrOffset -= 1; 1841 1842 return isIntN(BranchOffsetBits, BrOffset); 1843 } 1844 1845 MachineBasicBlock *SIInstrInfo::getBranchDestBlock( 1846 const MachineInstr &MI) const { 1847 if (MI.getOpcode() == AMDGPU::S_SETPC_B64) { 1848 // This would be a difficult analysis to perform, but can always be legal so 1849 // there's no need to analyze it. 1850 return nullptr; 1851 } 1852 1853 return MI.getOperand(0).getMBB(); 1854 } 1855 1856 unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB, 1857 MachineBasicBlock &DestBB, 1858 const DebugLoc &DL, 1859 int64_t BrOffset, 1860 RegScavenger *RS) const { 1861 assert(RS && "RegScavenger required for long branching"); 1862 assert(MBB.empty() && 1863 "new block should be inserted for expanding unconditional branch"); 1864 assert(MBB.pred_size() == 1); 1865 1866 MachineFunction *MF = MBB.getParent(); 1867 MachineRegisterInfo &MRI = MF->getRegInfo(); 1868 1869 // FIXME: Virtual register workaround for RegScavenger not working with empty 1870 // blocks. 1871 Register PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1872 1873 auto I = MBB.end(); 1874 1875 // We need to compute the offset relative to the instruction immediately after 1876 // s_getpc_b64. Insert pc arithmetic code before last terminator. 1877 MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg); 1878 1879 // TODO: Handle > 32-bit block address. 1880 if (BrOffset >= 0) { 1881 BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32)) 1882 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 1883 .addReg(PCReg, 0, AMDGPU::sub0) 1884 .addMBB(&DestBB, MO_LONG_BRANCH_FORWARD); 1885 BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32)) 1886 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 1887 .addReg(PCReg, 0, AMDGPU::sub1) 1888 .addImm(0); 1889 } else { 1890 // Backwards branch. 1891 BuildMI(MBB, I, DL, get(AMDGPU::S_SUB_U32)) 1892 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 1893 .addReg(PCReg, 0, AMDGPU::sub0) 1894 .addMBB(&DestBB, MO_LONG_BRANCH_BACKWARD); 1895 BuildMI(MBB, I, DL, get(AMDGPU::S_SUBB_U32)) 1896 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 1897 .addReg(PCReg, 0, AMDGPU::sub1) 1898 .addImm(0); 1899 } 1900 1901 // Insert the indirect branch after the other terminator. 1902 BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64)) 1903 .addReg(PCReg); 1904 1905 // FIXME: If spilling is necessary, this will fail because this scavenger has 1906 // no emergency stack slots. It is non-trivial to spill in this situation, 1907 // because the restore code needs to be specially placed after the 1908 // jump. BranchRelaxation then needs to be made aware of the newly inserted 1909 // block. 1910 // 1911 // If a spill is needed for the pc register pair, we need to insert a spill 1912 // restore block right before the destination block, and insert a short branch 1913 // into the old destination block's fallthrough predecessor. 1914 // e.g.: 1915 // 1916 // s_cbranch_scc0 skip_long_branch: 1917 // 1918 // long_branch_bb: 1919 // spill s[8:9] 1920 // s_getpc_b64 s[8:9] 1921 // s_add_u32 s8, s8, restore_bb 1922 // s_addc_u32 s9, s9, 0 1923 // s_setpc_b64 s[8:9] 1924 // 1925 // skip_long_branch: 1926 // foo; 1927 // 1928 // ..... 1929 // 1930 // dest_bb_fallthrough_predecessor: 1931 // bar; 1932 // s_branch dest_bb 1933 // 1934 // restore_bb: 1935 // restore s[8:9] 1936 // fallthrough dest_bb 1937 /// 1938 // dest_bb: 1939 // buzz; 1940 1941 RS->enterBasicBlockEnd(MBB); 1942 unsigned Scav = RS->scavengeRegisterBackwards( 1943 AMDGPU::SReg_64RegClass, 1944 MachineBasicBlock::iterator(GetPC), false, 0); 1945 MRI.replaceRegWith(PCReg, Scav); 1946 MRI.clearVirtRegs(); 1947 RS->setRegUsed(Scav); 1948 1949 return 4 + 8 + 4 + 4; 1950 } 1951 1952 unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) { 1953 switch (Cond) { 1954 case SIInstrInfo::SCC_TRUE: 1955 return AMDGPU::S_CBRANCH_SCC1; 1956 case SIInstrInfo::SCC_FALSE: 1957 return AMDGPU::S_CBRANCH_SCC0; 1958 case SIInstrInfo::VCCNZ: 1959 return AMDGPU::S_CBRANCH_VCCNZ; 1960 case SIInstrInfo::VCCZ: 1961 return AMDGPU::S_CBRANCH_VCCZ; 1962 case SIInstrInfo::EXECNZ: 1963 return AMDGPU::S_CBRANCH_EXECNZ; 1964 case SIInstrInfo::EXECZ: 1965 return AMDGPU::S_CBRANCH_EXECZ; 1966 default: 1967 llvm_unreachable("invalid branch predicate"); 1968 } 1969 } 1970 1971 SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) { 1972 switch (Opcode) { 1973 case AMDGPU::S_CBRANCH_SCC0: 1974 return SCC_FALSE; 1975 case AMDGPU::S_CBRANCH_SCC1: 1976 return SCC_TRUE; 1977 case AMDGPU::S_CBRANCH_VCCNZ: 1978 return VCCNZ; 1979 case AMDGPU::S_CBRANCH_VCCZ: 1980 return VCCZ; 1981 case AMDGPU::S_CBRANCH_EXECNZ: 1982 return EXECNZ; 1983 case AMDGPU::S_CBRANCH_EXECZ: 1984 return EXECZ; 1985 default: 1986 return INVALID_BR; 1987 } 1988 } 1989 1990 bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB, 1991 MachineBasicBlock::iterator I, 1992 MachineBasicBlock *&TBB, 1993 MachineBasicBlock *&FBB, 1994 SmallVectorImpl<MachineOperand> &Cond, 1995 bool AllowModify) const { 1996 if (I->getOpcode() == AMDGPU::S_BRANCH) { 1997 // Unconditional Branch 1998 TBB = I->getOperand(0).getMBB(); 1999 return false; 2000 } 2001 2002 MachineBasicBlock *CondBB = nullptr; 2003 2004 if (I->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 2005 CondBB = I->getOperand(1).getMBB(); 2006 Cond.push_back(I->getOperand(0)); 2007 } else { 2008 BranchPredicate Pred = getBranchPredicate(I->getOpcode()); 2009 if (Pred == INVALID_BR) 2010 return true; 2011 2012 CondBB = I->getOperand(0).getMBB(); 2013 Cond.push_back(MachineOperand::CreateImm(Pred)); 2014 Cond.push_back(I->getOperand(1)); // Save the branch register. 2015 } 2016 ++I; 2017 2018 if (I == MBB.end()) { 2019 // Conditional branch followed by fall-through. 2020 TBB = CondBB; 2021 return false; 2022 } 2023 2024 if (I->getOpcode() == AMDGPU::S_BRANCH) { 2025 TBB = CondBB; 2026 FBB = I->getOperand(0).getMBB(); 2027 return false; 2028 } 2029 2030 return true; 2031 } 2032 2033 bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 2034 MachineBasicBlock *&FBB, 2035 SmallVectorImpl<MachineOperand> &Cond, 2036 bool AllowModify) const { 2037 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 2038 auto E = MBB.end(); 2039 if (I == E) 2040 return false; 2041 2042 // Skip over the instructions that are artificially terminators for special 2043 // exec management. 2044 while (I != E && !I->isBranch() && !I->isReturn() && 2045 I->getOpcode() != AMDGPU::SI_MASK_BRANCH) { 2046 switch (I->getOpcode()) { 2047 case AMDGPU::SI_MASK_BRANCH: 2048 case AMDGPU::S_MOV_B64_term: 2049 case AMDGPU::S_XOR_B64_term: 2050 case AMDGPU::S_ANDN2_B64_term: 2051 case AMDGPU::S_MOV_B32_term: 2052 case AMDGPU::S_XOR_B32_term: 2053 case AMDGPU::S_OR_B32_term: 2054 case AMDGPU::S_ANDN2_B32_term: 2055 break; 2056 case AMDGPU::SI_IF: 2057 case AMDGPU::SI_ELSE: 2058 case AMDGPU::SI_KILL_I1_TERMINATOR: 2059 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 2060 // FIXME: It's messy that these need to be considered here at all. 2061 return true; 2062 default: 2063 llvm_unreachable("unexpected non-branch terminator inst"); 2064 } 2065 2066 ++I; 2067 } 2068 2069 if (I == E) 2070 return false; 2071 2072 if (I->getOpcode() != AMDGPU::SI_MASK_BRANCH) 2073 return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify); 2074 2075 ++I; 2076 2077 // TODO: Should be able to treat as fallthrough? 2078 if (I == MBB.end()) 2079 return true; 2080 2081 if (analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify)) 2082 return true; 2083 2084 MachineBasicBlock *MaskBrDest = I->getOperand(0).getMBB(); 2085 2086 // Specifically handle the case where the conditional branch is to the same 2087 // destination as the mask branch. e.g. 2088 // 2089 // si_mask_branch BB8 2090 // s_cbranch_execz BB8 2091 // s_cbranch BB9 2092 // 2093 // This is required to understand divergent loops which may need the branches 2094 // to be relaxed. 2095 if (TBB != MaskBrDest || Cond.empty()) 2096 return true; 2097 2098 auto Pred = Cond[0].getImm(); 2099 return (Pred != EXECZ && Pred != EXECNZ); 2100 } 2101 2102 unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB, 2103 int *BytesRemoved) const { 2104 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 2105 2106 unsigned Count = 0; 2107 unsigned RemovedSize = 0; 2108 while (I != MBB.end()) { 2109 MachineBasicBlock::iterator Next = std::next(I); 2110 if (I->getOpcode() == AMDGPU::SI_MASK_BRANCH) { 2111 I = Next; 2112 continue; 2113 } 2114 2115 RemovedSize += getInstSizeInBytes(*I); 2116 I->eraseFromParent(); 2117 ++Count; 2118 I = Next; 2119 } 2120 2121 if (BytesRemoved) 2122 *BytesRemoved = RemovedSize; 2123 2124 return Count; 2125 } 2126 2127 // Copy the flags onto the implicit condition register operand. 2128 static void preserveCondRegFlags(MachineOperand &CondReg, 2129 const MachineOperand &OrigCond) { 2130 CondReg.setIsUndef(OrigCond.isUndef()); 2131 CondReg.setIsKill(OrigCond.isKill()); 2132 } 2133 2134 unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB, 2135 MachineBasicBlock *TBB, 2136 MachineBasicBlock *FBB, 2137 ArrayRef<MachineOperand> Cond, 2138 const DebugLoc &DL, 2139 int *BytesAdded) const { 2140 if (!FBB && Cond.empty()) { 2141 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 2142 .addMBB(TBB); 2143 if (BytesAdded) 2144 *BytesAdded = 4; 2145 return 1; 2146 } 2147 2148 if(Cond.size() == 1 && Cond[0].isReg()) { 2149 BuildMI(&MBB, DL, get(AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO)) 2150 .add(Cond[0]) 2151 .addMBB(TBB); 2152 return 1; 2153 } 2154 2155 assert(TBB && Cond[0].isImm()); 2156 2157 unsigned Opcode 2158 = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm())); 2159 2160 if (!FBB) { 2161 Cond[1].isUndef(); 2162 MachineInstr *CondBr = 2163 BuildMI(&MBB, DL, get(Opcode)) 2164 .addMBB(TBB); 2165 2166 // Copy the flags onto the implicit condition register operand. 2167 preserveCondRegFlags(CondBr->getOperand(1), Cond[1]); 2168 2169 if (BytesAdded) 2170 *BytesAdded = 4; 2171 return 1; 2172 } 2173 2174 assert(TBB && FBB); 2175 2176 MachineInstr *CondBr = 2177 BuildMI(&MBB, DL, get(Opcode)) 2178 .addMBB(TBB); 2179 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 2180 .addMBB(FBB); 2181 2182 MachineOperand &CondReg = CondBr->getOperand(1); 2183 CondReg.setIsUndef(Cond[1].isUndef()); 2184 CondReg.setIsKill(Cond[1].isKill()); 2185 2186 if (BytesAdded) 2187 *BytesAdded = 8; 2188 2189 return 2; 2190 } 2191 2192 bool SIInstrInfo::reverseBranchCondition( 2193 SmallVectorImpl<MachineOperand> &Cond) const { 2194 if (Cond.size() != 2) { 2195 return true; 2196 } 2197 2198 if (Cond[0].isImm()) { 2199 Cond[0].setImm(-Cond[0].getImm()); 2200 return false; 2201 } 2202 2203 return true; 2204 } 2205 2206 bool SIInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, 2207 ArrayRef<MachineOperand> Cond, 2208 unsigned DstReg, unsigned TrueReg, 2209 unsigned FalseReg, int &CondCycles, 2210 int &TrueCycles, int &FalseCycles) const { 2211 switch (Cond[0].getImm()) { 2212 case VCCNZ: 2213 case VCCZ: { 2214 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2215 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 2216 assert(MRI.getRegClass(FalseReg) == RC); 2217 2218 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 2219 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 2220 2221 // Limit to equal cost for branch vs. N v_cndmask_b32s. 2222 return RI.hasVGPRs(RC) && NumInsts <= 6; 2223 } 2224 case SCC_TRUE: 2225 case SCC_FALSE: { 2226 // FIXME: We could insert for VGPRs if we could replace the original compare 2227 // with a vector one. 2228 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2229 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 2230 assert(MRI.getRegClass(FalseReg) == RC); 2231 2232 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 2233 2234 // Multiples of 8 can do s_cselect_b64 2235 if (NumInsts % 2 == 0) 2236 NumInsts /= 2; 2237 2238 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 2239 return RI.isSGPRClass(RC); 2240 } 2241 default: 2242 return false; 2243 } 2244 } 2245 2246 void SIInstrInfo::insertSelect(MachineBasicBlock &MBB, 2247 MachineBasicBlock::iterator I, const DebugLoc &DL, 2248 unsigned DstReg, ArrayRef<MachineOperand> Cond, 2249 unsigned TrueReg, unsigned FalseReg) const { 2250 BranchPredicate Pred = static_cast<BranchPredicate>(Cond[0].getImm()); 2251 if (Pred == VCCZ || Pred == SCC_FALSE) { 2252 Pred = static_cast<BranchPredicate>(-Pred); 2253 std::swap(TrueReg, FalseReg); 2254 } 2255 2256 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2257 const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg); 2258 unsigned DstSize = RI.getRegSizeInBits(*DstRC); 2259 2260 if (DstSize == 32) { 2261 unsigned SelOp = Pred == SCC_TRUE ? 2262 AMDGPU::S_CSELECT_B32 : AMDGPU::V_CNDMASK_B32_e32; 2263 2264 // Instruction's operands are backwards from what is expected. 2265 MachineInstr *Select = 2266 BuildMI(MBB, I, DL, get(SelOp), DstReg) 2267 .addReg(FalseReg) 2268 .addReg(TrueReg); 2269 2270 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2271 return; 2272 } 2273 2274 if (DstSize == 64 && Pred == SCC_TRUE) { 2275 MachineInstr *Select = 2276 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg) 2277 .addReg(FalseReg) 2278 .addReg(TrueReg); 2279 2280 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2281 return; 2282 } 2283 2284 static const int16_t Sub0_15[] = { 2285 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 2286 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 2287 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, 2288 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 2289 }; 2290 2291 static const int16_t Sub0_15_64[] = { 2292 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, 2293 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7, 2294 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11, 2295 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15, 2296 }; 2297 2298 unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32; 2299 const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass; 2300 const int16_t *SubIndices = Sub0_15; 2301 int NElts = DstSize / 32; 2302 2303 // 64-bit select is only available for SALU. 2304 // TODO: Split 96-bit into 64-bit and 32-bit, not 3x 32-bit. 2305 if (Pred == SCC_TRUE) { 2306 if (NElts % 2) { 2307 SelOp = AMDGPU::S_CSELECT_B32; 2308 EltRC = &AMDGPU::SGPR_32RegClass; 2309 } else { 2310 SelOp = AMDGPU::S_CSELECT_B64; 2311 EltRC = &AMDGPU::SGPR_64RegClass; 2312 SubIndices = Sub0_15_64; 2313 NElts /= 2; 2314 } 2315 } 2316 2317 MachineInstrBuilder MIB = BuildMI( 2318 MBB, I, DL, get(AMDGPU::REG_SEQUENCE), DstReg); 2319 2320 I = MIB->getIterator(); 2321 2322 SmallVector<unsigned, 8> Regs; 2323 for (int Idx = 0; Idx != NElts; ++Idx) { 2324 Register DstElt = MRI.createVirtualRegister(EltRC); 2325 Regs.push_back(DstElt); 2326 2327 unsigned SubIdx = SubIndices[Idx]; 2328 2329 MachineInstr *Select = 2330 BuildMI(MBB, I, DL, get(SelOp), DstElt) 2331 .addReg(FalseReg, 0, SubIdx) 2332 .addReg(TrueReg, 0, SubIdx); 2333 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2334 fixImplicitOperands(*Select); 2335 2336 MIB.addReg(DstElt) 2337 .addImm(SubIdx); 2338 } 2339 } 2340 2341 bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) const { 2342 switch (MI.getOpcode()) { 2343 case AMDGPU::V_MOV_B32_e32: 2344 case AMDGPU::V_MOV_B32_e64: 2345 case AMDGPU::V_MOV_B64_PSEUDO: { 2346 // If there are additional implicit register operands, this may be used for 2347 // register indexing so the source register operand isn't simply copied. 2348 unsigned NumOps = MI.getDesc().getNumOperands() + 2349 MI.getDesc().getNumImplicitUses(); 2350 2351 return MI.getNumOperands() == NumOps; 2352 } 2353 case AMDGPU::S_MOV_B32: 2354 case AMDGPU::S_MOV_B64: 2355 case AMDGPU::COPY: 2356 case AMDGPU::V_ACCVGPR_WRITE_B32: 2357 case AMDGPU::V_ACCVGPR_READ_B32: 2358 return true; 2359 default: 2360 return false; 2361 } 2362 } 2363 2364 unsigned SIInstrInfo::getAddressSpaceForPseudoSourceKind( 2365 unsigned Kind) const { 2366 switch(Kind) { 2367 case PseudoSourceValue::Stack: 2368 case PseudoSourceValue::FixedStack: 2369 return AMDGPUAS::PRIVATE_ADDRESS; 2370 case PseudoSourceValue::ConstantPool: 2371 case PseudoSourceValue::GOT: 2372 case PseudoSourceValue::JumpTable: 2373 case PseudoSourceValue::GlobalValueCallEntry: 2374 case PseudoSourceValue::ExternalSymbolCallEntry: 2375 case PseudoSourceValue::TargetCustom: 2376 return AMDGPUAS::CONSTANT_ADDRESS; 2377 } 2378 return AMDGPUAS::FLAT_ADDRESS; 2379 } 2380 2381 static void removeModOperands(MachineInstr &MI) { 2382 unsigned Opc = MI.getOpcode(); 2383 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2384 AMDGPU::OpName::src0_modifiers); 2385 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2386 AMDGPU::OpName::src1_modifiers); 2387 int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2388 AMDGPU::OpName::src2_modifiers); 2389 2390 MI.RemoveOperand(Src2ModIdx); 2391 MI.RemoveOperand(Src1ModIdx); 2392 MI.RemoveOperand(Src0ModIdx); 2393 } 2394 2395 bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, 2396 unsigned Reg, MachineRegisterInfo *MRI) const { 2397 if (!MRI->hasOneNonDBGUse(Reg)) 2398 return false; 2399 2400 switch (DefMI.getOpcode()) { 2401 default: 2402 return false; 2403 case AMDGPU::S_MOV_B64: 2404 // TODO: We could fold 64-bit immediates, but this get compilicated 2405 // when there are sub-registers. 2406 return false; 2407 2408 case AMDGPU::V_MOV_B32_e32: 2409 case AMDGPU::S_MOV_B32: 2410 case AMDGPU::V_ACCVGPR_WRITE_B32: 2411 break; 2412 } 2413 2414 const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0); 2415 assert(ImmOp); 2416 // FIXME: We could handle FrameIndex values here. 2417 if (!ImmOp->isImm()) 2418 return false; 2419 2420 unsigned Opc = UseMI.getOpcode(); 2421 if (Opc == AMDGPU::COPY) { 2422 bool isVGPRCopy = RI.isVGPR(*MRI, UseMI.getOperand(0).getReg()); 2423 unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32; 2424 if (RI.isAGPR(*MRI, UseMI.getOperand(0).getReg())) { 2425 if (!isInlineConstant(*ImmOp, AMDGPU::OPERAND_REG_INLINE_AC_INT32)) 2426 return false; 2427 NewOpc = AMDGPU::V_ACCVGPR_WRITE_B32; 2428 } 2429 UseMI.setDesc(get(NewOpc)); 2430 UseMI.getOperand(1).ChangeToImmediate(ImmOp->getImm()); 2431 UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent()); 2432 return true; 2433 } 2434 2435 if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 || 2436 Opc == AMDGPU::V_MAD_F16 || Opc == AMDGPU::V_MAC_F16_e64 || 2437 Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64 || 2438 Opc == AMDGPU::V_FMA_F16 || Opc == AMDGPU::V_FMAC_F16_e64) { 2439 // Don't fold if we are using source or output modifiers. The new VOP2 2440 // instructions don't have them. 2441 if (hasAnyModifiersSet(UseMI)) 2442 return false; 2443 2444 // If this is a free constant, there's no reason to do this. 2445 // TODO: We could fold this here instead of letting SIFoldOperands do it 2446 // later. 2447 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0); 2448 2449 // Any src operand can be used for the legality check. 2450 if (isInlineConstant(UseMI, *Src0, *ImmOp)) 2451 return false; 2452 2453 bool IsF32 = Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 || 2454 Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64; 2455 bool IsFMA = Opc == AMDGPU::V_FMA_F32 || Opc == AMDGPU::V_FMAC_F32_e64 || 2456 Opc == AMDGPU::V_FMA_F16 || Opc == AMDGPU::V_FMAC_F16_e64; 2457 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1); 2458 MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2); 2459 2460 // Multiplied part is the constant: Use v_madmk_{f16, f32}. 2461 // We should only expect these to be on src0 due to canonicalizations. 2462 if (Src0->isReg() && Src0->getReg() == Reg) { 2463 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) 2464 return false; 2465 2466 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg()))) 2467 return false; 2468 2469 unsigned NewOpc = 2470 IsFMA ? (IsF32 ? AMDGPU::V_FMAMK_F32 : AMDGPU::V_FMAMK_F16) 2471 : (IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16); 2472 if (pseudoToMCOpcode(NewOpc) == -1) 2473 return false; 2474 2475 // We need to swap operands 0 and 1 since madmk constant is at operand 1. 2476 2477 const int64_t Imm = ImmOp->getImm(); 2478 2479 // FIXME: This would be a lot easier if we could return a new instruction 2480 // instead of having to modify in place. 2481 2482 // Remove these first since they are at the end. 2483 UseMI.RemoveOperand( 2484 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 2485 UseMI.RemoveOperand( 2486 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 2487 2488 Register Src1Reg = Src1->getReg(); 2489 unsigned Src1SubReg = Src1->getSubReg(); 2490 Src0->setReg(Src1Reg); 2491 Src0->setSubReg(Src1SubReg); 2492 Src0->setIsKill(Src1->isKill()); 2493 2494 if (Opc == AMDGPU::V_MAC_F32_e64 || 2495 Opc == AMDGPU::V_MAC_F16_e64 || 2496 Opc == AMDGPU::V_FMAC_F32_e64 || 2497 Opc == AMDGPU::V_FMAC_F16_e64) 2498 UseMI.untieRegOperand( 2499 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 2500 2501 Src1->ChangeToImmediate(Imm); 2502 2503 removeModOperands(UseMI); 2504 UseMI.setDesc(get(NewOpc)); 2505 2506 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 2507 if (DeleteDef) 2508 DefMI.eraseFromParent(); 2509 2510 return true; 2511 } 2512 2513 // Added part is the constant: Use v_madak_{f16, f32}. 2514 if (Src2->isReg() && Src2->getReg() == Reg) { 2515 // Not allowed to use constant bus for another operand. 2516 // We can however allow an inline immediate as src0. 2517 bool Src0Inlined = false; 2518 if (Src0->isReg()) { 2519 // Try to inline constant if possible. 2520 // If the Def moves immediate and the use is single 2521 // We are saving VGPR here. 2522 MachineInstr *Def = MRI->getUniqueVRegDef(Src0->getReg()); 2523 if (Def && Def->isMoveImmediate() && 2524 isInlineConstant(Def->getOperand(1)) && 2525 MRI->hasOneUse(Src0->getReg())) { 2526 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 2527 Src0Inlined = true; 2528 } else if ((Register::isPhysicalRegister(Src0->getReg()) && 2529 (ST.getConstantBusLimit(Opc) <= 1 && 2530 RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg())))) || 2531 (Register::isVirtualRegister(Src0->getReg()) && 2532 (ST.getConstantBusLimit(Opc) <= 1 && 2533 RI.isSGPRClass(MRI->getRegClass(Src0->getReg()))))) 2534 return false; 2535 // VGPR is okay as Src0 - fallthrough 2536 } 2537 2538 if (Src1->isReg() && !Src0Inlined ) { 2539 // We have one slot for inlinable constant so far - try to fill it 2540 MachineInstr *Def = MRI->getUniqueVRegDef(Src1->getReg()); 2541 if (Def && Def->isMoveImmediate() && 2542 isInlineConstant(Def->getOperand(1)) && 2543 MRI->hasOneUse(Src1->getReg()) && 2544 commuteInstruction(UseMI)) { 2545 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 2546 } else if ((Register::isPhysicalRegister(Src1->getReg()) && 2547 RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) || 2548 (Register::isVirtualRegister(Src1->getReg()) && 2549 RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) 2550 return false; 2551 // VGPR is okay as Src1 - fallthrough 2552 } 2553 2554 unsigned NewOpc = 2555 IsFMA ? (IsF32 ? AMDGPU::V_FMAAK_F32 : AMDGPU::V_FMAAK_F16) 2556 : (IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16); 2557 if (pseudoToMCOpcode(NewOpc) == -1) 2558 return false; 2559 2560 const int64_t Imm = ImmOp->getImm(); 2561 2562 // FIXME: This would be a lot easier if we could return a new instruction 2563 // instead of having to modify in place. 2564 2565 // Remove these first since they are at the end. 2566 UseMI.RemoveOperand( 2567 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 2568 UseMI.RemoveOperand( 2569 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 2570 2571 if (Opc == AMDGPU::V_MAC_F32_e64 || 2572 Opc == AMDGPU::V_MAC_F16_e64 || 2573 Opc == AMDGPU::V_FMAC_F32_e64 || 2574 Opc == AMDGPU::V_FMAC_F16_e64) 2575 UseMI.untieRegOperand( 2576 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 2577 2578 // ChangingToImmediate adds Src2 back to the instruction. 2579 Src2->ChangeToImmediate(Imm); 2580 2581 // These come before src2. 2582 removeModOperands(UseMI); 2583 UseMI.setDesc(get(NewOpc)); 2584 // It might happen that UseMI was commuted 2585 // and we now have SGPR as SRC1. If so 2 inlined 2586 // constant and SGPR are illegal. 2587 legalizeOperands(UseMI); 2588 2589 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 2590 if (DeleteDef) 2591 DefMI.eraseFromParent(); 2592 2593 return true; 2594 } 2595 } 2596 2597 return false; 2598 } 2599 2600 static bool offsetsDoNotOverlap(int WidthA, int OffsetA, 2601 int WidthB, int OffsetB) { 2602 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; 2603 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; 2604 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; 2605 return LowOffset + LowWidth <= HighOffset; 2606 } 2607 2608 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa, 2609 const MachineInstr &MIb) const { 2610 SmallVector<const MachineOperand *, 4> BaseOps0, BaseOps1; 2611 int64_t Offset0, Offset1; 2612 bool Offset0IsScalable, Offset1IsScalable; 2613 if (!getMemOperandsWithOffset(MIa, BaseOps0, Offset0, Offset0IsScalable, &RI) || 2614 !getMemOperandsWithOffset(MIb, BaseOps1, Offset1, Offset1IsScalable, &RI)) 2615 return false; 2616 2617 if (!memOpsHaveSameBaseOperands(BaseOps0, BaseOps1)) 2618 return false; 2619 2620 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) { 2621 // FIXME: Handle ds_read2 / ds_write2. 2622 return false; 2623 } 2624 unsigned Width0 = MIa.memoperands().front()->getSize(); 2625 unsigned Width1 = MIb.memoperands().front()->getSize(); 2626 return offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1); 2627 } 2628 2629 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, 2630 const MachineInstr &MIb) const { 2631 assert(MIa.mayLoadOrStore() && 2632 "MIa must load from or modify a memory location"); 2633 assert(MIb.mayLoadOrStore() && 2634 "MIb must load from or modify a memory location"); 2635 2636 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects()) 2637 return false; 2638 2639 // XXX - Can we relax this between address spaces? 2640 if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef()) 2641 return false; 2642 2643 // TODO: Should we check the address space from the MachineMemOperand? That 2644 // would allow us to distinguish objects we know don't alias based on the 2645 // underlying address space, even if it was lowered to a different one, 2646 // e.g. private accesses lowered to use MUBUF instructions on a scratch 2647 // buffer. 2648 if (isDS(MIa)) { 2649 if (isDS(MIb)) 2650 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2651 2652 return !isFLAT(MIb) || isSegmentSpecificFLAT(MIb); 2653 } 2654 2655 if (isMUBUF(MIa) || isMTBUF(MIa)) { 2656 if (isMUBUF(MIb) || isMTBUF(MIb)) 2657 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2658 2659 return !isFLAT(MIb) && !isSMRD(MIb); 2660 } 2661 2662 if (isSMRD(MIa)) { 2663 if (isSMRD(MIb)) 2664 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2665 2666 return !isFLAT(MIb) && !isMUBUF(MIb) && !isMTBUF(MIb); 2667 } 2668 2669 if (isFLAT(MIa)) { 2670 if (isFLAT(MIb)) 2671 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2672 2673 return false; 2674 } 2675 2676 return false; 2677 } 2678 2679 static int64_t getFoldableImm(const MachineOperand* MO) { 2680 if (!MO->isReg()) 2681 return false; 2682 const MachineFunction *MF = MO->getParent()->getParent()->getParent(); 2683 const MachineRegisterInfo &MRI = MF->getRegInfo(); 2684 auto Def = MRI.getUniqueVRegDef(MO->getReg()); 2685 if (Def && Def->getOpcode() == AMDGPU::V_MOV_B32_e32 && 2686 Def->getOperand(1).isImm()) 2687 return Def->getOperand(1).getImm(); 2688 return AMDGPU::NoRegister; 2689 } 2690 2691 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB, 2692 MachineInstr &MI, 2693 LiveVariables *LV) const { 2694 unsigned Opc = MI.getOpcode(); 2695 bool IsF16 = false; 2696 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e32 || Opc == AMDGPU::V_FMAC_F32_e64 || 2697 Opc == AMDGPU::V_FMAC_F16_e32 || Opc == AMDGPU::V_FMAC_F16_e64; 2698 2699 switch (Opc) { 2700 default: 2701 return nullptr; 2702 case AMDGPU::V_MAC_F16_e64: 2703 case AMDGPU::V_FMAC_F16_e64: 2704 IsF16 = true; 2705 LLVM_FALLTHROUGH; 2706 case AMDGPU::V_MAC_F32_e64: 2707 case AMDGPU::V_FMAC_F32_e64: 2708 break; 2709 case AMDGPU::V_MAC_F16_e32: 2710 case AMDGPU::V_FMAC_F16_e32: 2711 IsF16 = true; 2712 LLVM_FALLTHROUGH; 2713 case AMDGPU::V_MAC_F32_e32: 2714 case AMDGPU::V_FMAC_F32_e32: { 2715 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 2716 AMDGPU::OpName::src0); 2717 const MachineOperand *Src0 = &MI.getOperand(Src0Idx); 2718 if (!Src0->isReg() && !Src0->isImm()) 2719 return nullptr; 2720 2721 if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0)) 2722 return nullptr; 2723 2724 break; 2725 } 2726 } 2727 2728 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 2729 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0); 2730 const MachineOperand *Src0Mods = 2731 getNamedOperand(MI, AMDGPU::OpName::src0_modifiers); 2732 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 2733 const MachineOperand *Src1Mods = 2734 getNamedOperand(MI, AMDGPU::OpName::src1_modifiers); 2735 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 2736 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 2737 const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod); 2738 2739 if (!Src0Mods && !Src1Mods && !Clamp && !Omod && 2740 // If we have an SGPR input, we will violate the constant bus restriction. 2741 (ST.getConstantBusLimit(Opc) > 1 || 2742 !Src0->isReg() || 2743 !RI.isSGPRReg(MBB->getParent()->getRegInfo(), Src0->getReg()))) { 2744 if (auto Imm = getFoldableImm(Src2)) { 2745 unsigned NewOpc = 2746 IsFMA ? (IsF16 ? AMDGPU::V_FMAAK_F16 : AMDGPU::V_FMAAK_F32) 2747 : (IsF16 ? AMDGPU::V_MADAK_F16 : AMDGPU::V_MADAK_F32); 2748 if (pseudoToMCOpcode(NewOpc) != -1) 2749 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2750 .add(*Dst) 2751 .add(*Src0) 2752 .add(*Src1) 2753 .addImm(Imm); 2754 } 2755 unsigned NewOpc = 2756 IsFMA ? (IsF16 ? AMDGPU::V_FMAMK_F16 : AMDGPU::V_FMAMK_F32) 2757 : (IsF16 ? AMDGPU::V_MADMK_F16 : AMDGPU::V_MADMK_F32); 2758 if (auto Imm = getFoldableImm(Src1)) { 2759 if (pseudoToMCOpcode(NewOpc) != -1) 2760 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2761 .add(*Dst) 2762 .add(*Src0) 2763 .addImm(Imm) 2764 .add(*Src2); 2765 } 2766 if (auto Imm = getFoldableImm(Src0)) { 2767 if (pseudoToMCOpcode(NewOpc) != -1 && 2768 isOperandLegal(MI, AMDGPU::getNamedOperandIdx(NewOpc, 2769 AMDGPU::OpName::src0), Src1)) 2770 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2771 .add(*Dst) 2772 .add(*Src1) 2773 .addImm(Imm) 2774 .add(*Src2); 2775 } 2776 } 2777 2778 unsigned NewOpc = IsFMA ? (IsF16 ? AMDGPU::V_FMA_F16 : AMDGPU::V_FMA_F32) 2779 : (IsF16 ? AMDGPU::V_MAD_F16 : AMDGPU::V_MAD_F32); 2780 if (pseudoToMCOpcode(NewOpc) == -1) 2781 return nullptr; 2782 2783 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2784 .add(*Dst) 2785 .addImm(Src0Mods ? Src0Mods->getImm() : 0) 2786 .add(*Src0) 2787 .addImm(Src1Mods ? Src1Mods->getImm() : 0) 2788 .add(*Src1) 2789 .addImm(0) // Src mods 2790 .add(*Src2) 2791 .addImm(Clamp ? Clamp->getImm() : 0) 2792 .addImm(Omod ? Omod->getImm() : 0); 2793 } 2794 2795 // It's not generally safe to move VALU instructions across these since it will 2796 // start using the register as a base index rather than directly. 2797 // XXX - Why isn't hasSideEffects sufficient for these? 2798 static bool changesVGPRIndexingMode(const MachineInstr &MI) { 2799 switch (MI.getOpcode()) { 2800 case AMDGPU::S_SET_GPR_IDX_ON: 2801 case AMDGPU::S_SET_GPR_IDX_MODE: 2802 case AMDGPU::S_SET_GPR_IDX_OFF: 2803 return true; 2804 default: 2805 return false; 2806 } 2807 } 2808 2809 bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 2810 const MachineBasicBlock *MBB, 2811 const MachineFunction &MF) const { 2812 // XXX - Do we want the SP check in the base implementation? 2813 2814 // Target-independent instructions do not have an implicit-use of EXEC, even 2815 // when they operate on VGPRs. Treating EXEC modifications as scheduling 2816 // boundaries prevents incorrect movements of such instructions. 2817 return TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF) || 2818 MI.modifiesRegister(AMDGPU::EXEC, &RI) || 2819 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 || 2820 MI.getOpcode() == AMDGPU::S_SETREG_B32 || 2821 MI.getOpcode() == AMDGPU::S_DENORM_MODE || 2822 changesVGPRIndexingMode(MI); 2823 } 2824 2825 bool SIInstrInfo::isAlwaysGDS(uint16_t Opcode) const { 2826 return Opcode == AMDGPU::DS_ORDERED_COUNT || 2827 Opcode == AMDGPU::DS_GWS_INIT || 2828 Opcode == AMDGPU::DS_GWS_SEMA_V || 2829 Opcode == AMDGPU::DS_GWS_SEMA_BR || 2830 Opcode == AMDGPU::DS_GWS_SEMA_P || 2831 Opcode == AMDGPU::DS_GWS_SEMA_RELEASE_ALL || 2832 Opcode == AMDGPU::DS_GWS_BARRIER; 2833 } 2834 2835 bool SIInstrInfo::hasUnwantedEffectsWhenEXECEmpty(const MachineInstr &MI) const { 2836 unsigned Opcode = MI.getOpcode(); 2837 2838 if (MI.mayStore() && isSMRD(MI)) 2839 return true; // scalar store or atomic 2840 2841 // This will terminate the function when other lanes may need to continue. 2842 if (MI.isReturn()) 2843 return true; 2844 2845 // These instructions cause shader I/O that may cause hardware lockups 2846 // when executed with an empty EXEC mask. 2847 // 2848 // Note: exp with VM = DONE = 0 is automatically skipped by hardware when 2849 // EXEC = 0, but checking for that case here seems not worth it 2850 // given the typical code patterns. 2851 if (Opcode == AMDGPU::S_SENDMSG || Opcode == AMDGPU::S_SENDMSGHALT || 2852 Opcode == AMDGPU::EXP || Opcode == AMDGPU::EXP_DONE || 2853 Opcode == AMDGPU::DS_ORDERED_COUNT || Opcode == AMDGPU::S_TRAP || 2854 Opcode == AMDGPU::DS_GWS_INIT || Opcode == AMDGPU::DS_GWS_BARRIER) 2855 return true; 2856 2857 if (MI.isCall() || MI.isInlineAsm()) 2858 return true; // conservative assumption 2859 2860 // These are like SALU instructions in terms of effects, so it's questionable 2861 // whether we should return true for those. 2862 // 2863 // However, executing them with EXEC = 0 causes them to operate on undefined 2864 // data, which we avoid by returning true here. 2865 if (Opcode == AMDGPU::V_READFIRSTLANE_B32 || Opcode == AMDGPU::V_READLANE_B32) 2866 return true; 2867 2868 return false; 2869 } 2870 2871 bool SIInstrInfo::mayReadEXEC(const MachineRegisterInfo &MRI, 2872 const MachineInstr &MI) const { 2873 if (MI.isMetaInstruction()) 2874 return false; 2875 2876 // This won't read exec if this is an SGPR->SGPR copy. 2877 if (MI.isCopyLike()) { 2878 if (!RI.isSGPRReg(MRI, MI.getOperand(0).getReg())) 2879 return true; 2880 2881 // Make sure this isn't copying exec as a normal operand 2882 return MI.readsRegister(AMDGPU::EXEC, &RI); 2883 } 2884 2885 // Make a conservative assumption about the callee. 2886 if (MI.isCall()) 2887 return true; 2888 2889 // Be conservative with any unhandled generic opcodes. 2890 if (!isTargetSpecificOpcode(MI.getOpcode())) 2891 return true; 2892 2893 return !isSALU(MI) || MI.readsRegister(AMDGPU::EXEC, &RI); 2894 } 2895 2896 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const { 2897 switch (Imm.getBitWidth()) { 2898 case 1: // This likely will be a condition code mask. 2899 return true; 2900 2901 case 32: 2902 return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(), 2903 ST.hasInv2PiInlineImm()); 2904 case 64: 2905 return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(), 2906 ST.hasInv2PiInlineImm()); 2907 case 16: 2908 return ST.has16BitInsts() && 2909 AMDGPU::isInlinableLiteral16(Imm.getSExtValue(), 2910 ST.hasInv2PiInlineImm()); 2911 default: 2912 llvm_unreachable("invalid bitwidth"); 2913 } 2914 } 2915 2916 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO, 2917 uint8_t OperandType) const { 2918 if (!MO.isImm() || 2919 OperandType < AMDGPU::OPERAND_SRC_FIRST || 2920 OperandType > AMDGPU::OPERAND_SRC_LAST) 2921 return false; 2922 2923 // MachineOperand provides no way to tell the true operand size, since it only 2924 // records a 64-bit value. We need to know the size to determine if a 32-bit 2925 // floating point immediate bit pattern is legal for an integer immediate. It 2926 // would be for any 32-bit integer operand, but would not be for a 64-bit one. 2927 2928 int64_t Imm = MO.getImm(); 2929 switch (OperandType) { 2930 case AMDGPU::OPERAND_REG_IMM_INT32: 2931 case AMDGPU::OPERAND_REG_IMM_FP32: 2932 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 2933 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 2934 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 2935 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: { 2936 int32_t Trunc = static_cast<int32_t>(Imm); 2937 return AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm()); 2938 } 2939 case AMDGPU::OPERAND_REG_IMM_INT64: 2940 case AMDGPU::OPERAND_REG_IMM_FP64: 2941 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 2942 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 2943 return AMDGPU::isInlinableLiteral64(MO.getImm(), 2944 ST.hasInv2PiInlineImm()); 2945 case AMDGPU::OPERAND_REG_IMM_INT16: 2946 case AMDGPU::OPERAND_REG_IMM_FP16: 2947 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 2948 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 2949 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 2950 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: { 2951 if (isInt<16>(Imm) || isUInt<16>(Imm)) { 2952 // A few special case instructions have 16-bit operands on subtargets 2953 // where 16-bit instructions are not legal. 2954 // TODO: Do the 32-bit immediates work? We shouldn't really need to handle 2955 // constants in these cases 2956 int16_t Trunc = static_cast<int16_t>(Imm); 2957 return ST.has16BitInsts() && 2958 AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm()); 2959 } 2960 2961 return false; 2962 } 2963 case AMDGPU::OPERAND_REG_IMM_V2INT16: 2964 case AMDGPU::OPERAND_REG_IMM_V2FP16: 2965 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 2966 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 2967 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16: 2968 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: { 2969 uint32_t Trunc = static_cast<uint32_t>(Imm); 2970 return AMDGPU::isInlinableLiteralV216(Trunc, ST.hasInv2PiInlineImm()); 2971 } 2972 default: 2973 llvm_unreachable("invalid bitwidth"); 2974 } 2975 } 2976 2977 bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO, 2978 const MCOperandInfo &OpInfo) const { 2979 switch (MO.getType()) { 2980 case MachineOperand::MO_Register: 2981 return false; 2982 case MachineOperand::MO_Immediate: 2983 return !isInlineConstant(MO, OpInfo); 2984 case MachineOperand::MO_FrameIndex: 2985 case MachineOperand::MO_MachineBasicBlock: 2986 case MachineOperand::MO_ExternalSymbol: 2987 case MachineOperand::MO_GlobalAddress: 2988 case MachineOperand::MO_MCSymbol: 2989 return true; 2990 default: 2991 llvm_unreachable("unexpected operand type"); 2992 } 2993 } 2994 2995 static bool compareMachineOp(const MachineOperand &Op0, 2996 const MachineOperand &Op1) { 2997 if (Op0.getType() != Op1.getType()) 2998 return false; 2999 3000 switch (Op0.getType()) { 3001 case MachineOperand::MO_Register: 3002 return Op0.getReg() == Op1.getReg(); 3003 case MachineOperand::MO_Immediate: 3004 return Op0.getImm() == Op1.getImm(); 3005 default: 3006 llvm_unreachable("Didn't expect to be comparing these operand types"); 3007 } 3008 } 3009 3010 bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo, 3011 const MachineOperand &MO) const { 3012 const MCInstrDesc &InstDesc = MI.getDesc(); 3013 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo]; 3014 3015 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()); 3016 3017 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE) 3018 return true; 3019 3020 if (OpInfo.RegClass < 0) 3021 return false; 3022 3023 const MachineFunction *MF = MI.getParent()->getParent(); 3024 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 3025 3026 if (MO.isImm() && isInlineConstant(MO, OpInfo)) { 3027 if (isMAI(MI) && ST.hasMFMAInlineLiteralBug() && 3028 OpNo ==(unsigned)AMDGPU::getNamedOperandIdx(MI.getOpcode(), 3029 AMDGPU::OpName::src2)) 3030 return false; 3031 return RI.opCanUseInlineConstant(OpInfo.OperandType); 3032 } 3033 3034 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType)) 3035 return false; 3036 3037 if (!isVOP3(MI) || !AMDGPU::isSISrcOperand(InstDesc, OpNo)) 3038 return true; 3039 3040 return ST.hasVOP3Literal(); 3041 } 3042 3043 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const { 3044 int Op32 = AMDGPU::getVOPe32(Opcode); 3045 if (Op32 == -1) 3046 return false; 3047 3048 return pseudoToMCOpcode(Op32) != -1; 3049 } 3050 3051 bool SIInstrInfo::hasModifiers(unsigned Opcode) const { 3052 // The src0_modifier operand is present on all instructions 3053 // that have modifiers. 3054 3055 return AMDGPU::getNamedOperandIdx(Opcode, 3056 AMDGPU::OpName::src0_modifiers) != -1; 3057 } 3058 3059 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI, 3060 unsigned OpName) const { 3061 const MachineOperand *Mods = getNamedOperand(MI, OpName); 3062 return Mods && Mods->getImm(); 3063 } 3064 3065 bool SIInstrInfo::hasAnyModifiersSet(const MachineInstr &MI) const { 3066 return hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || 3067 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || 3068 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers) || 3069 hasModifiersSet(MI, AMDGPU::OpName::clamp) || 3070 hasModifiersSet(MI, AMDGPU::OpName::omod); 3071 } 3072 3073 bool SIInstrInfo::canShrink(const MachineInstr &MI, 3074 const MachineRegisterInfo &MRI) const { 3075 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3076 // Can't shrink instruction with three operands. 3077 // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add 3078 // a special case for it. It can only be shrunk if the third operand 3079 // is vcc, and src0_modifiers and src1_modifiers are not set. 3080 // We should handle this the same way we handle vopc, by addding 3081 // a register allocation hint pre-regalloc and then do the shrinking 3082 // post-regalloc. 3083 if (Src2) { 3084 switch (MI.getOpcode()) { 3085 default: return false; 3086 3087 case AMDGPU::V_ADDC_U32_e64: 3088 case AMDGPU::V_SUBB_U32_e64: 3089 case AMDGPU::V_SUBBREV_U32_e64: { 3090 const MachineOperand *Src1 3091 = getNamedOperand(MI, AMDGPU::OpName::src1); 3092 if (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg())) 3093 return false; 3094 // Additional verification is needed for sdst/src2. 3095 return true; 3096 } 3097 case AMDGPU::V_MAC_F32_e64: 3098 case AMDGPU::V_MAC_F16_e64: 3099 case AMDGPU::V_FMAC_F32_e64: 3100 case AMDGPU::V_FMAC_F16_e64: 3101 if (!Src2->isReg() || !RI.isVGPR(MRI, Src2->getReg()) || 3102 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers)) 3103 return false; 3104 break; 3105 3106 case AMDGPU::V_CNDMASK_B32_e64: 3107 break; 3108 } 3109 } 3110 3111 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3112 if (Src1 && (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()) || 3113 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers))) 3114 return false; 3115 3116 // We don't need to check src0, all input types are legal, so just make sure 3117 // src0 isn't using any modifiers. 3118 if (hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers)) 3119 return false; 3120 3121 // Can it be shrunk to a valid 32 bit opcode? 3122 if (!hasVALU32BitEncoding(MI.getOpcode())) 3123 return false; 3124 3125 // Check output modifiers 3126 return !hasModifiersSet(MI, AMDGPU::OpName::omod) && 3127 !hasModifiersSet(MI, AMDGPU::OpName::clamp); 3128 } 3129 3130 // Set VCC operand with all flags from \p Orig, except for setting it as 3131 // implicit. 3132 static void copyFlagsToImplicitVCC(MachineInstr &MI, 3133 const MachineOperand &Orig) { 3134 3135 for (MachineOperand &Use : MI.implicit_operands()) { 3136 if (Use.isUse() && Use.getReg() == AMDGPU::VCC) { 3137 Use.setIsUndef(Orig.isUndef()); 3138 Use.setIsKill(Orig.isKill()); 3139 return; 3140 } 3141 } 3142 } 3143 3144 MachineInstr *SIInstrInfo::buildShrunkInst(MachineInstr &MI, 3145 unsigned Op32) const { 3146 MachineBasicBlock *MBB = MI.getParent();; 3147 MachineInstrBuilder Inst32 = 3148 BuildMI(*MBB, MI, MI.getDebugLoc(), get(Op32)); 3149 3150 // Add the dst operand if the 32-bit encoding also has an explicit $vdst. 3151 // For VOPC instructions, this is replaced by an implicit def of vcc. 3152 int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst); 3153 if (Op32DstIdx != -1) { 3154 // dst 3155 Inst32.add(MI.getOperand(0)); 3156 } else { 3157 assert(((MI.getOperand(0).getReg() == AMDGPU::VCC) || 3158 (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) && 3159 "Unexpected case"); 3160 } 3161 3162 Inst32.add(*getNamedOperand(MI, AMDGPU::OpName::src0)); 3163 3164 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3165 if (Src1) 3166 Inst32.add(*Src1); 3167 3168 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3169 3170 if (Src2) { 3171 int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2); 3172 if (Op32Src2Idx != -1) { 3173 Inst32.add(*Src2); 3174 } else { 3175 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is 3176 // replaced with an implicit read of vcc. This was already added 3177 // during the initial BuildMI, so find it to preserve the flags. 3178 copyFlagsToImplicitVCC(*Inst32, *Src2); 3179 } 3180 } 3181 3182 return Inst32; 3183 } 3184 3185 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI, 3186 const MachineOperand &MO, 3187 const MCOperandInfo &OpInfo) const { 3188 // Literal constants use the constant bus. 3189 //if (isLiteralConstantLike(MO, OpInfo)) 3190 // return true; 3191 if (MO.isImm()) 3192 return !isInlineConstant(MO, OpInfo); 3193 3194 if (!MO.isReg()) 3195 return true; // Misc other operands like FrameIndex 3196 3197 if (!MO.isUse()) 3198 return false; 3199 3200 if (Register::isVirtualRegister(MO.getReg())) 3201 return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); 3202 3203 // Null is free 3204 if (MO.getReg() == AMDGPU::SGPR_NULL) 3205 return false; 3206 3207 // SGPRs use the constant bus 3208 if (MO.isImplicit()) { 3209 return MO.getReg() == AMDGPU::M0 || 3210 MO.getReg() == AMDGPU::VCC || 3211 MO.getReg() == AMDGPU::VCC_LO; 3212 } else { 3213 return AMDGPU::SReg_32RegClass.contains(MO.getReg()) || 3214 AMDGPU::SReg_64RegClass.contains(MO.getReg()); 3215 } 3216 } 3217 3218 static unsigned findImplicitSGPRRead(const MachineInstr &MI) { 3219 for (const MachineOperand &MO : MI.implicit_operands()) { 3220 // We only care about reads. 3221 if (MO.isDef()) 3222 continue; 3223 3224 switch (MO.getReg()) { 3225 case AMDGPU::VCC: 3226 case AMDGPU::VCC_LO: 3227 case AMDGPU::VCC_HI: 3228 case AMDGPU::M0: 3229 case AMDGPU::FLAT_SCR: 3230 return MO.getReg(); 3231 3232 default: 3233 break; 3234 } 3235 } 3236 3237 return AMDGPU::NoRegister; 3238 } 3239 3240 static bool shouldReadExec(const MachineInstr &MI) { 3241 if (SIInstrInfo::isVALU(MI)) { 3242 switch (MI.getOpcode()) { 3243 case AMDGPU::V_READLANE_B32: 3244 case AMDGPU::V_READLANE_B32_gfx6_gfx7: 3245 case AMDGPU::V_READLANE_B32_gfx10: 3246 case AMDGPU::V_READLANE_B32_vi: 3247 case AMDGPU::V_WRITELANE_B32: 3248 case AMDGPU::V_WRITELANE_B32_gfx6_gfx7: 3249 case AMDGPU::V_WRITELANE_B32_gfx10: 3250 case AMDGPU::V_WRITELANE_B32_vi: 3251 return false; 3252 } 3253 3254 return true; 3255 } 3256 3257 if (MI.isPreISelOpcode() || 3258 SIInstrInfo::isGenericOpcode(MI.getOpcode()) || 3259 SIInstrInfo::isSALU(MI) || 3260 SIInstrInfo::isSMRD(MI)) 3261 return false; 3262 3263 return true; 3264 } 3265 3266 static bool isSubRegOf(const SIRegisterInfo &TRI, 3267 const MachineOperand &SuperVec, 3268 const MachineOperand &SubReg) { 3269 if (Register::isPhysicalRegister(SubReg.getReg())) 3270 return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg()); 3271 3272 return SubReg.getSubReg() != AMDGPU::NoSubRegister && 3273 SubReg.getReg() == SuperVec.getReg(); 3274 } 3275 3276 bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, 3277 StringRef &ErrInfo) const { 3278 uint16_t Opcode = MI.getOpcode(); 3279 if (SIInstrInfo::isGenericOpcode(MI.getOpcode())) 3280 return true; 3281 3282 const MachineFunction *MF = MI.getParent()->getParent(); 3283 const MachineRegisterInfo &MRI = MF->getRegInfo(); 3284 3285 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); 3286 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1); 3287 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2); 3288 3289 // Make sure the number of operands is correct. 3290 const MCInstrDesc &Desc = get(Opcode); 3291 if (!Desc.isVariadic() && 3292 Desc.getNumOperands() != MI.getNumExplicitOperands()) { 3293 ErrInfo = "Instruction has wrong number of operands."; 3294 return false; 3295 } 3296 3297 if (MI.isInlineAsm()) { 3298 // Verify register classes for inlineasm constraints. 3299 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands(); 3300 I != E; ++I) { 3301 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI); 3302 if (!RC) 3303 continue; 3304 3305 const MachineOperand &Op = MI.getOperand(I); 3306 if (!Op.isReg()) 3307 continue; 3308 3309 Register Reg = Op.getReg(); 3310 if (!Register::isVirtualRegister(Reg) && !RC->contains(Reg)) { 3311 ErrInfo = "inlineasm operand has incorrect register class."; 3312 return false; 3313 } 3314 } 3315 3316 return true; 3317 } 3318 3319 // Make sure the register classes are correct. 3320 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) { 3321 if (MI.getOperand(i).isFPImm()) { 3322 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast " 3323 "all fp values to integers."; 3324 return false; 3325 } 3326 3327 int RegClass = Desc.OpInfo[i].RegClass; 3328 3329 switch (Desc.OpInfo[i].OperandType) { 3330 case MCOI::OPERAND_REGISTER: 3331 if (MI.getOperand(i).isImm() || MI.getOperand(i).isGlobal()) { 3332 ErrInfo = "Illegal immediate value for operand."; 3333 return false; 3334 } 3335 break; 3336 case AMDGPU::OPERAND_REG_IMM_INT32: 3337 case AMDGPU::OPERAND_REG_IMM_FP32: 3338 break; 3339 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 3340 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 3341 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 3342 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 3343 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 3344 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 3345 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 3346 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: 3347 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 3348 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: { 3349 const MachineOperand &MO = MI.getOperand(i); 3350 if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) { 3351 ErrInfo = "Illegal immediate value for operand."; 3352 return false; 3353 } 3354 break; 3355 } 3356 case MCOI::OPERAND_IMMEDIATE: 3357 case AMDGPU::OPERAND_KIMM32: 3358 // Check if this operand is an immediate. 3359 // FrameIndex operands will be replaced by immediates, so they are 3360 // allowed. 3361 if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) { 3362 ErrInfo = "Expected immediate, but got non-immediate"; 3363 return false; 3364 } 3365 LLVM_FALLTHROUGH; 3366 default: 3367 continue; 3368 } 3369 3370 if (!MI.getOperand(i).isReg()) 3371 continue; 3372 3373 if (RegClass != -1) { 3374 Register Reg = MI.getOperand(i).getReg(); 3375 if (Reg == AMDGPU::NoRegister || Register::isVirtualRegister(Reg)) 3376 continue; 3377 3378 const TargetRegisterClass *RC = RI.getRegClass(RegClass); 3379 if (!RC->contains(Reg)) { 3380 ErrInfo = "Operand has incorrect register class."; 3381 return false; 3382 } 3383 } 3384 } 3385 3386 // Verify SDWA 3387 if (isSDWA(MI)) { 3388 if (!ST.hasSDWA()) { 3389 ErrInfo = "SDWA is not supported on this target"; 3390 return false; 3391 } 3392 3393 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst); 3394 3395 const int OpIndicies[] = { DstIdx, Src0Idx, Src1Idx, Src2Idx }; 3396 3397 for (int OpIdx: OpIndicies) { 3398 if (OpIdx == -1) 3399 continue; 3400 const MachineOperand &MO = MI.getOperand(OpIdx); 3401 3402 if (!ST.hasSDWAScalar()) { 3403 // Only VGPRS on VI 3404 if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) { 3405 ErrInfo = "Only VGPRs allowed as operands in SDWA instructions on VI"; 3406 return false; 3407 } 3408 } else { 3409 // No immediates on GFX9 3410 if (!MO.isReg()) { 3411 ErrInfo = "Only reg allowed as operands in SDWA instructions on GFX9"; 3412 return false; 3413 } 3414 } 3415 } 3416 3417 if (!ST.hasSDWAOmod()) { 3418 // No omod allowed on VI 3419 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 3420 if (OMod != nullptr && 3421 (!OMod->isImm() || OMod->getImm() != 0)) { 3422 ErrInfo = "OMod not allowed in SDWA instructions on VI"; 3423 return false; 3424 } 3425 } 3426 3427 uint16_t BasicOpcode = AMDGPU::getBasicFromSDWAOp(Opcode); 3428 if (isVOPC(BasicOpcode)) { 3429 if (!ST.hasSDWASdst() && DstIdx != -1) { 3430 // Only vcc allowed as dst on VI for VOPC 3431 const MachineOperand &Dst = MI.getOperand(DstIdx); 3432 if (!Dst.isReg() || Dst.getReg() != AMDGPU::VCC) { 3433 ErrInfo = "Only VCC allowed as dst in SDWA instructions on VI"; 3434 return false; 3435 } 3436 } else if (!ST.hasSDWAOutModsVOPC()) { 3437 // No clamp allowed on GFX9 for VOPC 3438 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 3439 if (Clamp && (!Clamp->isImm() || Clamp->getImm() != 0)) { 3440 ErrInfo = "Clamp not allowed in VOPC SDWA instructions on VI"; 3441 return false; 3442 } 3443 3444 // No omod allowed on GFX9 for VOPC 3445 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 3446 if (OMod && (!OMod->isImm() || OMod->getImm() != 0)) { 3447 ErrInfo = "OMod not allowed in VOPC SDWA instructions on VI"; 3448 return false; 3449 } 3450 } 3451 } 3452 3453 const MachineOperand *DstUnused = getNamedOperand(MI, AMDGPU::OpName::dst_unused); 3454 if (DstUnused && DstUnused->isImm() && 3455 DstUnused->getImm() == AMDGPU::SDWA::UNUSED_PRESERVE) { 3456 const MachineOperand &Dst = MI.getOperand(DstIdx); 3457 if (!Dst.isReg() || !Dst.isTied()) { 3458 ErrInfo = "Dst register should have tied register"; 3459 return false; 3460 } 3461 3462 const MachineOperand &TiedMO = 3463 MI.getOperand(MI.findTiedOperandIdx(DstIdx)); 3464 if (!TiedMO.isReg() || !TiedMO.isImplicit() || !TiedMO.isUse()) { 3465 ErrInfo = 3466 "Dst register should be tied to implicit use of preserved register"; 3467 return false; 3468 } else if (Register::isPhysicalRegister(TiedMO.getReg()) && 3469 Dst.getReg() != TiedMO.getReg()) { 3470 ErrInfo = "Dst register should use same physical register as preserved"; 3471 return false; 3472 } 3473 } 3474 } 3475 3476 // Verify MIMG 3477 if (isMIMG(MI.getOpcode()) && !MI.mayStore()) { 3478 // Ensure that the return type used is large enough for all the options 3479 // being used TFE/LWE require an extra result register. 3480 const MachineOperand *DMask = getNamedOperand(MI, AMDGPU::OpName::dmask); 3481 if (DMask) { 3482 uint64_t DMaskImm = DMask->getImm(); 3483 uint32_t RegCount = 3484 isGather4(MI.getOpcode()) ? 4 : countPopulation(DMaskImm); 3485 const MachineOperand *TFE = getNamedOperand(MI, AMDGPU::OpName::tfe); 3486 const MachineOperand *LWE = getNamedOperand(MI, AMDGPU::OpName::lwe); 3487 const MachineOperand *D16 = getNamedOperand(MI, AMDGPU::OpName::d16); 3488 3489 // Adjust for packed 16 bit values 3490 if (D16 && D16->getImm() && !ST.hasUnpackedD16VMem()) 3491 RegCount >>= 1; 3492 3493 // Adjust if using LWE or TFE 3494 if ((LWE && LWE->getImm()) || (TFE && TFE->getImm())) 3495 RegCount += 1; 3496 3497 const uint32_t DstIdx = 3498 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata); 3499 const MachineOperand &Dst = MI.getOperand(DstIdx); 3500 if (Dst.isReg()) { 3501 const TargetRegisterClass *DstRC = getOpRegClass(MI, DstIdx); 3502 uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32; 3503 if (RegCount > DstSize) { 3504 ErrInfo = "MIMG instruction returns too many registers for dst " 3505 "register class"; 3506 return false; 3507 } 3508 } 3509 } 3510 } 3511 3512 // Verify VOP*. Ignore multiple sgpr operands on writelane. 3513 if (Desc.getOpcode() != AMDGPU::V_WRITELANE_B32 3514 && (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI) || isSDWA(MI))) { 3515 // Only look at the true operands. Only a real operand can use the constant 3516 // bus, and we don't want to check pseudo-operands like the source modifier 3517 // flags. 3518 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx }; 3519 3520 unsigned ConstantBusCount = 0; 3521 unsigned LiteralCount = 0; 3522 3523 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) 3524 ++ConstantBusCount; 3525 3526 SmallVector<unsigned, 2> SGPRsUsed; 3527 unsigned SGPRUsed = findImplicitSGPRRead(MI); 3528 if (SGPRUsed != AMDGPU::NoRegister) { 3529 ++ConstantBusCount; 3530 SGPRsUsed.push_back(SGPRUsed); 3531 } 3532 3533 for (int OpIdx : OpIndices) { 3534 if (OpIdx == -1) 3535 break; 3536 const MachineOperand &MO = MI.getOperand(OpIdx); 3537 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 3538 if (MO.isReg()) { 3539 SGPRUsed = MO.getReg(); 3540 if (llvm::all_of(SGPRsUsed, [this, SGPRUsed](unsigned SGPR) { 3541 return !RI.regsOverlap(SGPRUsed, SGPR); 3542 })) { 3543 ++ConstantBusCount; 3544 SGPRsUsed.push_back(SGPRUsed); 3545 } 3546 } else { 3547 ++ConstantBusCount; 3548 ++LiteralCount; 3549 } 3550 } 3551 } 3552 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 3553 // v_writelane_b32 is an exception from constant bus restriction: 3554 // vsrc0 can be sgpr, const or m0 and lane select sgpr, m0 or inline-const 3555 if (ConstantBusCount > ST.getConstantBusLimit(Opcode) && 3556 Opcode != AMDGPU::V_WRITELANE_B32) { 3557 ErrInfo = "VOP* instruction violates constant bus restriction"; 3558 return false; 3559 } 3560 3561 if (isVOP3(MI) && LiteralCount) { 3562 if (LiteralCount && !ST.hasVOP3Literal()) { 3563 ErrInfo = "VOP3 instruction uses literal"; 3564 return false; 3565 } 3566 if (LiteralCount > 1) { 3567 ErrInfo = "VOP3 instruction uses more than one literal"; 3568 return false; 3569 } 3570 } 3571 } 3572 3573 // Special case for writelane - this can break the multiple constant bus rule, 3574 // but still can't use more than one SGPR register 3575 if (Desc.getOpcode() == AMDGPU::V_WRITELANE_B32) { 3576 unsigned SGPRCount = 0; 3577 Register SGPRUsed = AMDGPU::NoRegister; 3578 3579 for (int OpIdx : {Src0Idx, Src1Idx, Src2Idx}) { 3580 if (OpIdx == -1) 3581 break; 3582 3583 const MachineOperand &MO = MI.getOperand(OpIdx); 3584 3585 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 3586 if (MO.isReg() && MO.getReg() != AMDGPU::M0) { 3587 if (MO.getReg() != SGPRUsed) 3588 ++SGPRCount; 3589 SGPRUsed = MO.getReg(); 3590 } 3591 } 3592 if (SGPRCount > ST.getConstantBusLimit(Opcode)) { 3593 ErrInfo = "WRITELANE instruction violates constant bus restriction"; 3594 return false; 3595 } 3596 } 3597 } 3598 3599 // Verify misc. restrictions on specific instructions. 3600 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 || 3601 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) { 3602 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3603 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 3604 const MachineOperand &Src2 = MI.getOperand(Src2Idx); 3605 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) { 3606 if (!compareMachineOp(Src0, Src1) && 3607 !compareMachineOp(Src0, Src2)) { 3608 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2"; 3609 return false; 3610 } 3611 } 3612 } 3613 3614 if (isSOP2(MI) || isSOPC(MI)) { 3615 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3616 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 3617 unsigned Immediates = 0; 3618 3619 if (!Src0.isReg() && 3620 !isInlineConstant(Src0, Desc.OpInfo[Src0Idx].OperandType)) 3621 Immediates++; 3622 if (!Src1.isReg() && 3623 !isInlineConstant(Src1, Desc.OpInfo[Src1Idx].OperandType)) 3624 Immediates++; 3625 3626 if (Immediates > 1) { 3627 ErrInfo = "SOP2/SOPC instruction requires too many immediate constants"; 3628 return false; 3629 } 3630 } 3631 3632 if (isSOPK(MI)) { 3633 auto Op = getNamedOperand(MI, AMDGPU::OpName::simm16); 3634 if (Desc.isBranch()) { 3635 if (!Op->isMBB()) { 3636 ErrInfo = "invalid branch target for SOPK instruction"; 3637 return false; 3638 } 3639 } else { 3640 uint64_t Imm = Op->getImm(); 3641 if (sopkIsZext(MI)) { 3642 if (!isUInt<16>(Imm)) { 3643 ErrInfo = "invalid immediate for SOPK instruction"; 3644 return false; 3645 } 3646 } else { 3647 if (!isInt<16>(Imm)) { 3648 ErrInfo = "invalid immediate for SOPK instruction"; 3649 return false; 3650 } 3651 } 3652 } 3653 } 3654 3655 if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 || 3656 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 || 3657 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 3658 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) { 3659 const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 3660 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64; 3661 3662 const unsigned StaticNumOps = Desc.getNumOperands() + 3663 Desc.getNumImplicitUses(); 3664 const unsigned NumImplicitOps = IsDst ? 2 : 1; 3665 3666 // Allow additional implicit operands. This allows a fixup done by the post 3667 // RA scheduler where the main implicit operand is killed and implicit-defs 3668 // are added for sub-registers that remain live after this instruction. 3669 if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) { 3670 ErrInfo = "missing implicit register operands"; 3671 return false; 3672 } 3673 3674 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 3675 if (IsDst) { 3676 if (!Dst->isUse()) { 3677 ErrInfo = "v_movreld_b32 vdst should be a use operand"; 3678 return false; 3679 } 3680 3681 unsigned UseOpIdx; 3682 if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) || 3683 UseOpIdx != StaticNumOps + 1) { 3684 ErrInfo = "movrel implicit operands should be tied"; 3685 return false; 3686 } 3687 } 3688 3689 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3690 const MachineOperand &ImpUse 3691 = MI.getOperand(StaticNumOps + NumImplicitOps - 1); 3692 if (!ImpUse.isReg() || !ImpUse.isUse() || 3693 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) { 3694 ErrInfo = "src0 should be subreg of implicit vector use"; 3695 return false; 3696 } 3697 } 3698 3699 // Make sure we aren't losing exec uses in the td files. This mostly requires 3700 // being careful when using let Uses to try to add other use registers. 3701 if (shouldReadExec(MI)) { 3702 if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) { 3703 ErrInfo = "VALU instruction does not implicitly read exec mask"; 3704 return false; 3705 } 3706 } 3707 3708 if (isSMRD(MI)) { 3709 if (MI.mayStore()) { 3710 // The register offset form of scalar stores may only use m0 as the 3711 // soffset register. 3712 const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff); 3713 if (Soff && Soff->getReg() != AMDGPU::M0) { 3714 ErrInfo = "scalar stores must use m0 as offset register"; 3715 return false; 3716 } 3717 } 3718 } 3719 3720 if (isFLAT(MI) && !MF->getSubtarget<GCNSubtarget>().hasFlatInstOffsets()) { 3721 const MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 3722 if (Offset->getImm() != 0) { 3723 ErrInfo = "subtarget does not support offsets in flat instructions"; 3724 return false; 3725 } 3726 } 3727 3728 if (isMIMG(MI)) { 3729 const MachineOperand *DimOp = getNamedOperand(MI, AMDGPU::OpName::dim); 3730 if (DimOp) { 3731 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opcode, 3732 AMDGPU::OpName::vaddr0); 3733 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc); 3734 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opcode); 3735 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = 3736 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode); 3737 const AMDGPU::MIMGDimInfo *Dim = 3738 AMDGPU::getMIMGDimInfoByEncoding(DimOp->getImm()); 3739 3740 if (!Dim) { 3741 ErrInfo = "dim is out of range"; 3742 return false; 3743 } 3744 3745 bool IsA16 = false; 3746 if (ST.hasR128A16()) { 3747 const MachineOperand *R128A16 = getNamedOperand(MI, AMDGPU::OpName::r128); 3748 IsA16 = R128A16->getImm() != 0; 3749 } else if (ST.hasGFX10A16()) { 3750 const MachineOperand *A16 = getNamedOperand(MI, AMDGPU::OpName::a16); 3751 IsA16 = A16->getImm() != 0; 3752 } 3753 3754 bool PackDerivatives = IsA16; // Either A16 or G16 3755 bool IsNSA = SRsrcIdx - VAddr0Idx > 1; 3756 3757 unsigned AddrWords = BaseOpcode->NumExtraArgs; 3758 unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) + 3759 (BaseOpcode->LodOrClampOrMip ? 1 : 0); 3760 if (IsA16) 3761 AddrWords += (AddrComponents + 1) / 2; 3762 else 3763 AddrWords += AddrComponents; 3764 3765 if (BaseOpcode->Gradients) { 3766 if (PackDerivatives) 3767 // There are two gradients per coordinate, we pack them separately. 3768 // For the 3d case, we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv) 3769 AddrWords += (Dim->NumGradients / 2 + 1) / 2 * 2; 3770 else 3771 AddrWords += Dim->NumGradients; 3772 } 3773 3774 unsigned VAddrWords; 3775 if (IsNSA) { 3776 VAddrWords = SRsrcIdx - VAddr0Idx; 3777 } else { 3778 const TargetRegisterClass *RC = getOpRegClass(MI, VAddr0Idx); 3779 VAddrWords = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC) / 32; 3780 if (AddrWords > 8) 3781 AddrWords = 16; 3782 else if (AddrWords > 4) 3783 AddrWords = 8; 3784 else if (AddrWords == 4) 3785 AddrWords = 4; 3786 else if (AddrWords == 3) 3787 AddrWords = 3; 3788 } 3789 3790 if (VAddrWords != AddrWords) { 3791 ErrInfo = "bad vaddr size"; 3792 return false; 3793 } 3794 } 3795 } 3796 3797 const MachineOperand *DppCt = getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl); 3798 if (DppCt) { 3799 using namespace AMDGPU::DPP; 3800 3801 unsigned DC = DppCt->getImm(); 3802 if (DC == DppCtrl::DPP_UNUSED1 || DC == DppCtrl::DPP_UNUSED2 || 3803 DC == DppCtrl::DPP_UNUSED3 || DC > DppCtrl::DPP_LAST || 3804 (DC >= DppCtrl::DPP_UNUSED4_FIRST && DC <= DppCtrl::DPP_UNUSED4_LAST) || 3805 (DC >= DppCtrl::DPP_UNUSED5_FIRST && DC <= DppCtrl::DPP_UNUSED5_LAST) || 3806 (DC >= DppCtrl::DPP_UNUSED6_FIRST && DC <= DppCtrl::DPP_UNUSED6_LAST) || 3807 (DC >= DppCtrl::DPP_UNUSED7_FIRST && DC <= DppCtrl::DPP_UNUSED7_LAST) || 3808 (DC >= DppCtrl::DPP_UNUSED8_FIRST && DC <= DppCtrl::DPP_UNUSED8_LAST)) { 3809 ErrInfo = "Invalid dpp_ctrl value"; 3810 return false; 3811 } 3812 if (DC >= DppCtrl::WAVE_SHL1 && DC <= DppCtrl::WAVE_ROR1 && 3813 ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 3814 ErrInfo = "Invalid dpp_ctrl value: " 3815 "wavefront shifts are not supported on GFX10+"; 3816 return false; 3817 } 3818 if (DC >= DppCtrl::BCAST15 && DC <= DppCtrl::BCAST31 && 3819 ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 3820 ErrInfo = "Invalid dpp_ctrl value: " 3821 "broadcasts are not supported on GFX10+"; 3822 return false; 3823 } 3824 if (DC >= DppCtrl::ROW_SHARE_FIRST && DC <= DppCtrl::ROW_XMASK_LAST && 3825 ST.getGeneration() < AMDGPUSubtarget::GFX10) { 3826 ErrInfo = "Invalid dpp_ctrl value: " 3827 "row_share and row_xmask are not supported before GFX10"; 3828 return false; 3829 } 3830 } 3831 3832 return true; 3833 } 3834 3835 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) const { 3836 switch (MI.getOpcode()) { 3837 default: return AMDGPU::INSTRUCTION_LIST_END; 3838 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE; 3839 case AMDGPU::COPY: return AMDGPU::COPY; 3840 case AMDGPU::PHI: return AMDGPU::PHI; 3841 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG; 3842 case AMDGPU::WQM: return AMDGPU::WQM; 3843 case AMDGPU::SOFT_WQM: return AMDGPU::SOFT_WQM; 3844 case AMDGPU::WWM: return AMDGPU::WWM; 3845 case AMDGPU::S_MOV_B32: { 3846 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 3847 return MI.getOperand(1).isReg() || 3848 RI.isAGPR(MRI, MI.getOperand(0).getReg()) ? 3849 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32; 3850 } 3851 case AMDGPU::S_ADD_I32: 3852 return ST.hasAddNoCarry() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_I32_e32; 3853 case AMDGPU::S_ADDC_U32: 3854 return AMDGPU::V_ADDC_U32_e32; 3855 case AMDGPU::S_SUB_I32: 3856 return ST.hasAddNoCarry() ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_I32_e32; 3857 // FIXME: These are not consistently handled, and selected when the carry is 3858 // used. 3859 case AMDGPU::S_ADD_U32: 3860 return AMDGPU::V_ADD_I32_e32; 3861 case AMDGPU::S_SUB_U32: 3862 return AMDGPU::V_SUB_I32_e32; 3863 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32; 3864 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_U32; 3865 case AMDGPU::S_MUL_HI_U32: return AMDGPU::V_MUL_HI_U32; 3866 case AMDGPU::S_MUL_HI_I32: return AMDGPU::V_MUL_HI_I32; 3867 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64; 3868 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64; 3869 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64; 3870 case AMDGPU::S_XNOR_B32: 3871 return ST.hasDLInsts() ? AMDGPU::V_XNOR_B32_e64 : AMDGPU::INSTRUCTION_LIST_END; 3872 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64; 3873 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64; 3874 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64; 3875 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64; 3876 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32; 3877 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64; 3878 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32; 3879 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64; 3880 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32; 3881 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64; 3882 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32; 3883 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32; 3884 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32; 3885 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32; 3886 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64; 3887 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32; 3888 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32; 3889 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32; 3890 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32; 3891 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32; 3892 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32; 3893 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32; 3894 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32; 3895 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32; 3896 case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e32; 3897 case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e32; 3898 case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e32; 3899 case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e32; 3900 case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e32; 3901 case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e32; 3902 case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e32; 3903 case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e32; 3904 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64; 3905 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32; 3906 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32; 3907 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64; 3908 case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ; 3909 case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ; 3910 } 3911 llvm_unreachable( 3912 "Unexpected scalar opcode without corresponding vector one!"); 3913 } 3914 3915 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI, 3916 unsigned OpNo) const { 3917 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 3918 const MCInstrDesc &Desc = get(MI.getOpcode()); 3919 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() || 3920 Desc.OpInfo[OpNo].RegClass == -1) { 3921 Register Reg = MI.getOperand(OpNo).getReg(); 3922 3923 if (Register::isVirtualRegister(Reg)) 3924 return MRI.getRegClass(Reg); 3925 return RI.getPhysRegClass(Reg); 3926 } 3927 3928 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 3929 return RI.getRegClass(RCID); 3930 } 3931 3932 void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const { 3933 MachineBasicBlock::iterator I = MI; 3934 MachineBasicBlock *MBB = MI.getParent(); 3935 MachineOperand &MO = MI.getOperand(OpIdx); 3936 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 3937 const SIRegisterInfo *TRI = 3938 static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo()); 3939 unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass; 3940 const TargetRegisterClass *RC = RI.getRegClass(RCID); 3941 unsigned Size = TRI->getRegSizeInBits(*RC); 3942 unsigned Opcode = (Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO : AMDGPU::V_MOV_B32_e32; 3943 if (MO.isReg()) 3944 Opcode = AMDGPU::COPY; 3945 else if (RI.isSGPRClass(RC)) 3946 Opcode = (Size == 64) ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; 3947 3948 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); 3949 if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC)) 3950 VRC = &AMDGPU::VReg_64RegClass; 3951 else 3952 VRC = &AMDGPU::VGPR_32RegClass; 3953 3954 Register Reg = MRI.createVirtualRegister(VRC); 3955 DebugLoc DL = MBB->findDebugLoc(I); 3956 BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO); 3957 MO.ChangeToRegister(Reg, false); 3958 } 3959 3960 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI, 3961 MachineRegisterInfo &MRI, 3962 MachineOperand &SuperReg, 3963 const TargetRegisterClass *SuperRC, 3964 unsigned SubIdx, 3965 const TargetRegisterClass *SubRC) 3966 const { 3967 MachineBasicBlock *MBB = MI->getParent(); 3968 DebugLoc DL = MI->getDebugLoc(); 3969 Register SubReg = MRI.createVirtualRegister(SubRC); 3970 3971 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) { 3972 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 3973 .addReg(SuperReg.getReg(), 0, SubIdx); 3974 return SubReg; 3975 } 3976 3977 // Just in case the super register is itself a sub-register, copy it to a new 3978 // value so we don't need to worry about merging its subreg index with the 3979 // SubIdx passed to this function. The register coalescer should be able to 3980 // eliminate this extra copy. 3981 Register NewSuperReg = MRI.createVirtualRegister(SuperRC); 3982 3983 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg) 3984 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg()); 3985 3986 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 3987 .addReg(NewSuperReg, 0, SubIdx); 3988 3989 return SubReg; 3990 } 3991 3992 MachineOperand SIInstrInfo::buildExtractSubRegOrImm( 3993 MachineBasicBlock::iterator MII, 3994 MachineRegisterInfo &MRI, 3995 MachineOperand &Op, 3996 const TargetRegisterClass *SuperRC, 3997 unsigned SubIdx, 3998 const TargetRegisterClass *SubRC) const { 3999 if (Op.isImm()) { 4000 if (SubIdx == AMDGPU::sub0) 4001 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm())); 4002 if (SubIdx == AMDGPU::sub1) 4003 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32)); 4004 4005 llvm_unreachable("Unhandled register index for immediate"); 4006 } 4007 4008 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC, 4009 SubIdx, SubRC); 4010 return MachineOperand::CreateReg(SubReg, false); 4011 } 4012 4013 // Change the order of operands from (0, 1, 2) to (0, 2, 1) 4014 void SIInstrInfo::swapOperands(MachineInstr &Inst) const { 4015 assert(Inst.getNumExplicitOperands() == 3); 4016 MachineOperand Op1 = Inst.getOperand(1); 4017 Inst.RemoveOperand(1); 4018 Inst.addOperand(Op1); 4019 } 4020 4021 bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI, 4022 const MCOperandInfo &OpInfo, 4023 const MachineOperand &MO) const { 4024 if (!MO.isReg()) 4025 return false; 4026 4027 Register Reg = MO.getReg(); 4028 const TargetRegisterClass *RC = Register::isVirtualRegister(Reg) 4029 ? MRI.getRegClass(Reg) 4030 : RI.getPhysRegClass(Reg); 4031 4032 const TargetRegisterClass *DRC = RI.getRegClass(OpInfo.RegClass); 4033 if (MO.getSubReg()) { 4034 const MachineFunction *MF = MO.getParent()->getParent()->getParent(); 4035 const TargetRegisterClass *SuperRC = RI.getLargestLegalSuperClass(RC, *MF); 4036 if (!SuperRC) 4037 return false; 4038 4039 DRC = RI.getMatchingSuperRegClass(SuperRC, DRC, MO.getSubReg()); 4040 if (!DRC) 4041 return false; 4042 } 4043 return RC->hasSuperClassEq(DRC); 4044 } 4045 4046 bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI, 4047 const MCOperandInfo &OpInfo, 4048 const MachineOperand &MO) const { 4049 if (MO.isReg()) 4050 return isLegalRegOperand(MRI, OpInfo, MO); 4051 4052 // Handle non-register types that are treated like immediates. 4053 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()); 4054 return true; 4055 } 4056 4057 bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx, 4058 const MachineOperand *MO) const { 4059 const MachineFunction &MF = *MI.getParent()->getParent(); 4060 const MachineRegisterInfo &MRI = MF.getRegInfo(); 4061 const MCInstrDesc &InstDesc = MI.getDesc(); 4062 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; 4063 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 4064 const TargetRegisterClass *DefinedRC = 4065 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; 4066 if (!MO) 4067 MO = &MI.getOperand(OpIdx); 4068 4069 int ConstantBusLimit = ST.getConstantBusLimit(MI.getOpcode()); 4070 int VOP3LiteralLimit = ST.hasVOP3Literal() ? 1 : 0; 4071 if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) { 4072 if (isVOP3(MI) && isLiteralConstantLike(*MO, OpInfo) && !VOP3LiteralLimit--) 4073 return false; 4074 4075 SmallDenseSet<RegSubRegPair> SGPRsUsed; 4076 if (MO->isReg()) 4077 SGPRsUsed.insert(RegSubRegPair(MO->getReg(), MO->getSubReg())); 4078 4079 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 4080 if (i == OpIdx) 4081 continue; 4082 const MachineOperand &Op = MI.getOperand(i); 4083 if (Op.isReg()) { 4084 RegSubRegPair SGPR(Op.getReg(), Op.getSubReg()); 4085 if (!SGPRsUsed.count(SGPR) && 4086 usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) { 4087 if (--ConstantBusLimit <= 0) 4088 return false; 4089 SGPRsUsed.insert(SGPR); 4090 } 4091 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) { 4092 if (--ConstantBusLimit <= 0) 4093 return false; 4094 } else if (isVOP3(MI) && AMDGPU::isSISrcOperand(InstDesc, i) && 4095 isLiteralConstantLike(Op, InstDesc.OpInfo[i])) { 4096 if (!VOP3LiteralLimit--) 4097 return false; 4098 if (--ConstantBusLimit <= 0) 4099 return false; 4100 } 4101 } 4102 } 4103 4104 if (MO->isReg()) { 4105 assert(DefinedRC); 4106 return isLegalRegOperand(MRI, OpInfo, *MO); 4107 } 4108 4109 // Handle non-register types that are treated like immediates. 4110 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI() || MO->isGlobal()); 4111 4112 if (!DefinedRC) { 4113 // This operand expects an immediate. 4114 return true; 4115 } 4116 4117 return isImmOperandLegal(MI, OpIdx, *MO); 4118 } 4119 4120 void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI, 4121 MachineInstr &MI) const { 4122 unsigned Opc = MI.getOpcode(); 4123 const MCInstrDesc &InstrDesc = get(Opc); 4124 4125 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 4126 MachineOperand &Src0 = MI.getOperand(Src0Idx); 4127 4128 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 4129 MachineOperand &Src1 = MI.getOperand(Src1Idx); 4130 4131 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32 4132 // we need to only have one constant bus use before GFX10. 4133 bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister; 4134 if (HasImplicitSGPR && ST.getConstantBusLimit(Opc) <= 1 && 4135 Src0.isReg() && (RI.isSGPRReg(MRI, Src0.getReg()) || 4136 isLiteralConstantLike(Src0, InstrDesc.OpInfo[Src0Idx]))) 4137 legalizeOpWithMove(MI, Src0Idx); 4138 4139 // Special case: V_WRITELANE_B32 accepts only immediate or SGPR operands for 4140 // both the value to write (src0) and lane select (src1). Fix up non-SGPR 4141 // src0/src1 with V_READFIRSTLANE. 4142 if (Opc == AMDGPU::V_WRITELANE_B32) { 4143 const DebugLoc &DL = MI.getDebugLoc(); 4144 if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) { 4145 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4146 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4147 .add(Src0); 4148 Src0.ChangeToRegister(Reg, false); 4149 } 4150 if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) { 4151 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4152 const DebugLoc &DL = MI.getDebugLoc(); 4153 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4154 .add(Src1); 4155 Src1.ChangeToRegister(Reg, false); 4156 } 4157 return; 4158 } 4159 4160 // No VOP2 instructions support AGPRs. 4161 if (Src0.isReg() && RI.isAGPR(MRI, Src0.getReg())) 4162 legalizeOpWithMove(MI, Src0Idx); 4163 4164 if (Src1.isReg() && RI.isAGPR(MRI, Src1.getReg())) 4165 legalizeOpWithMove(MI, Src1Idx); 4166 4167 // VOP2 src0 instructions support all operand types, so we don't need to check 4168 // their legality. If src1 is already legal, we don't need to do anything. 4169 if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1)) 4170 return; 4171 4172 // Special case: V_READLANE_B32 accepts only immediate or SGPR operands for 4173 // lane select. Fix up using V_READFIRSTLANE, since we assume that the lane 4174 // select is uniform. 4175 if (Opc == AMDGPU::V_READLANE_B32 && Src1.isReg() && 4176 RI.isVGPR(MRI, Src1.getReg())) { 4177 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4178 const DebugLoc &DL = MI.getDebugLoc(); 4179 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4180 .add(Src1); 4181 Src1.ChangeToRegister(Reg, false); 4182 return; 4183 } 4184 4185 // We do not use commuteInstruction here because it is too aggressive and will 4186 // commute if it is possible. We only want to commute here if it improves 4187 // legality. This can be called a fairly large number of times so don't waste 4188 // compile time pointlessly swapping and checking legality again. 4189 if (HasImplicitSGPR || !MI.isCommutable()) { 4190 legalizeOpWithMove(MI, Src1Idx); 4191 return; 4192 } 4193 4194 // If src0 can be used as src1, commuting will make the operands legal. 4195 // Otherwise we have to give up and insert a move. 4196 // 4197 // TODO: Other immediate-like operand kinds could be commuted if there was a 4198 // MachineOperand::ChangeTo* for them. 4199 if ((!Src1.isImm() && !Src1.isReg()) || 4200 !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) { 4201 legalizeOpWithMove(MI, Src1Idx); 4202 return; 4203 } 4204 4205 int CommutedOpc = commuteOpcode(MI); 4206 if (CommutedOpc == -1) { 4207 legalizeOpWithMove(MI, Src1Idx); 4208 return; 4209 } 4210 4211 MI.setDesc(get(CommutedOpc)); 4212 4213 Register Src0Reg = Src0.getReg(); 4214 unsigned Src0SubReg = Src0.getSubReg(); 4215 bool Src0Kill = Src0.isKill(); 4216 4217 if (Src1.isImm()) 4218 Src0.ChangeToImmediate(Src1.getImm()); 4219 else if (Src1.isReg()) { 4220 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill()); 4221 Src0.setSubReg(Src1.getSubReg()); 4222 } else 4223 llvm_unreachable("Should only have register or immediate operands"); 4224 4225 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill); 4226 Src1.setSubReg(Src0SubReg); 4227 fixImplicitOperands(MI); 4228 } 4229 4230 // Legalize VOP3 operands. All operand types are supported for any operand 4231 // but only one literal constant and only starting from GFX10. 4232 void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI, 4233 MachineInstr &MI) const { 4234 unsigned Opc = MI.getOpcode(); 4235 4236 int VOP3Idx[3] = { 4237 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), 4238 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1), 4239 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2) 4240 }; 4241 4242 if (Opc == AMDGPU::V_PERMLANE16_B32 || 4243 Opc == AMDGPU::V_PERMLANEX16_B32) { 4244 // src1 and src2 must be scalar 4245 MachineOperand &Src1 = MI.getOperand(VOP3Idx[1]); 4246 MachineOperand &Src2 = MI.getOperand(VOP3Idx[2]); 4247 const DebugLoc &DL = MI.getDebugLoc(); 4248 if (Src1.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) { 4249 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4250 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4251 .add(Src1); 4252 Src1.ChangeToRegister(Reg, false); 4253 } 4254 if (Src2.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src2.getReg()))) { 4255 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4256 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4257 .add(Src2); 4258 Src2.ChangeToRegister(Reg, false); 4259 } 4260 } 4261 4262 // Find the one SGPR operand we are allowed to use. 4263 int ConstantBusLimit = ST.getConstantBusLimit(Opc); 4264 int LiteralLimit = ST.hasVOP3Literal() ? 1 : 0; 4265 SmallDenseSet<unsigned> SGPRsUsed; 4266 unsigned SGPRReg = findUsedSGPR(MI, VOP3Idx); 4267 if (SGPRReg != AMDGPU::NoRegister) { 4268 SGPRsUsed.insert(SGPRReg); 4269 --ConstantBusLimit; 4270 } 4271 4272 for (unsigned i = 0; i < 3; ++i) { 4273 int Idx = VOP3Idx[i]; 4274 if (Idx == -1) 4275 break; 4276 MachineOperand &MO = MI.getOperand(Idx); 4277 4278 if (!MO.isReg()) { 4279 if (!isLiteralConstantLike(MO, get(Opc).OpInfo[Idx])) 4280 continue; 4281 4282 if (LiteralLimit > 0 && ConstantBusLimit > 0) { 4283 --LiteralLimit; 4284 --ConstantBusLimit; 4285 continue; 4286 } 4287 4288 --LiteralLimit; 4289 --ConstantBusLimit; 4290 legalizeOpWithMove(MI, Idx); 4291 continue; 4292 } 4293 4294 if (RI.hasAGPRs(MRI.getRegClass(MO.getReg())) && 4295 !isOperandLegal(MI, Idx, &MO)) { 4296 legalizeOpWithMove(MI, Idx); 4297 continue; 4298 } 4299 4300 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg()))) 4301 continue; // VGPRs are legal 4302 4303 // We can use one SGPR in each VOP3 instruction prior to GFX10 4304 // and two starting from GFX10. 4305 if (SGPRsUsed.count(MO.getReg())) 4306 continue; 4307 if (ConstantBusLimit > 0) { 4308 SGPRsUsed.insert(MO.getReg()); 4309 --ConstantBusLimit; 4310 continue; 4311 } 4312 4313 // If we make it this far, then the operand is not legal and we must 4314 // legalize it. 4315 legalizeOpWithMove(MI, Idx); 4316 } 4317 } 4318 4319 unsigned SIInstrInfo::readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI, 4320 MachineRegisterInfo &MRI) const { 4321 const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg); 4322 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC); 4323 Register DstReg = MRI.createVirtualRegister(SRC); 4324 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32; 4325 4326 if (RI.hasAGPRs(VRC)) { 4327 VRC = RI.getEquivalentVGPRClass(VRC); 4328 Register NewSrcReg = MRI.createVirtualRegister(VRC); 4329 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4330 get(TargetOpcode::COPY), NewSrcReg) 4331 .addReg(SrcReg); 4332 SrcReg = NewSrcReg; 4333 } 4334 4335 if (SubRegs == 1) { 4336 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4337 get(AMDGPU::V_READFIRSTLANE_B32), DstReg) 4338 .addReg(SrcReg); 4339 return DstReg; 4340 } 4341 4342 SmallVector<unsigned, 8> SRegs; 4343 for (unsigned i = 0; i < SubRegs; ++i) { 4344 Register SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4345 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4346 get(AMDGPU::V_READFIRSTLANE_B32), SGPR) 4347 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i)); 4348 SRegs.push_back(SGPR); 4349 } 4350 4351 MachineInstrBuilder MIB = 4352 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 4353 get(AMDGPU::REG_SEQUENCE), DstReg); 4354 for (unsigned i = 0; i < SubRegs; ++i) { 4355 MIB.addReg(SRegs[i]); 4356 MIB.addImm(RI.getSubRegFromChannel(i)); 4357 } 4358 return DstReg; 4359 } 4360 4361 void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI, 4362 MachineInstr &MI) const { 4363 4364 // If the pointer is store in VGPRs, then we need to move them to 4365 // SGPRs using v_readfirstlane. This is safe because we only select 4366 // loads with uniform pointers to SMRD instruction so we know the 4367 // pointer value is uniform. 4368 MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase); 4369 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) { 4370 unsigned SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI); 4371 SBase->setReg(SGPR); 4372 } 4373 MachineOperand *SOff = getNamedOperand(MI, AMDGPU::OpName::soff); 4374 if (SOff && !RI.isSGPRClass(MRI.getRegClass(SOff->getReg()))) { 4375 unsigned SGPR = readlaneVGPRToSGPR(SOff->getReg(), MI, MRI); 4376 SOff->setReg(SGPR); 4377 } 4378 } 4379 4380 void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB, 4381 MachineBasicBlock::iterator I, 4382 const TargetRegisterClass *DstRC, 4383 MachineOperand &Op, 4384 MachineRegisterInfo &MRI, 4385 const DebugLoc &DL) const { 4386 Register OpReg = Op.getReg(); 4387 unsigned OpSubReg = Op.getSubReg(); 4388 4389 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg( 4390 RI.getRegClassForReg(MRI, OpReg), OpSubReg); 4391 4392 // Check if operand is already the correct register class. 4393 if (DstRC == OpRC) 4394 return; 4395 4396 Register DstReg = MRI.createVirtualRegister(DstRC); 4397 MachineInstr *Copy = 4398 BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op); 4399 4400 Op.setReg(DstReg); 4401 Op.setSubReg(0); 4402 4403 MachineInstr *Def = MRI.getVRegDef(OpReg); 4404 if (!Def) 4405 return; 4406 4407 // Try to eliminate the copy if it is copying an immediate value. 4408 if (Def->isMoveImmediate() && DstRC != &AMDGPU::VReg_1RegClass) 4409 FoldImmediate(*Copy, *Def, OpReg, &MRI); 4410 4411 bool ImpDef = Def->isImplicitDef(); 4412 while (!ImpDef && Def && Def->isCopy()) { 4413 if (Def->getOperand(1).getReg().isPhysical()) 4414 break; 4415 Def = MRI.getUniqueVRegDef(Def->getOperand(1).getReg()); 4416 ImpDef = Def && Def->isImplicitDef(); 4417 } 4418 if (!RI.isSGPRClass(DstRC) && !Copy->readsRegister(AMDGPU::EXEC, &RI) && 4419 !ImpDef) 4420 Copy->addOperand(MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 4421 } 4422 4423 // Emit the actual waterfall loop, executing the wrapped instruction for each 4424 // unique value of \p Rsrc across all lanes. In the best case we execute 1 4425 // iteration, in the worst case we execute 64 (once per lane). 4426 static void 4427 emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI, 4428 MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB, 4429 const DebugLoc &DL, MachineOperand &Rsrc) { 4430 MachineFunction &MF = *OrigBB.getParent(); 4431 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 4432 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 4433 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 4434 unsigned SaveExecOpc = 4435 ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 : AMDGPU::S_AND_SAVEEXEC_B64; 4436 unsigned XorTermOpc = 4437 ST.isWave32() ? AMDGPU::S_XOR_B32_term : AMDGPU::S_XOR_B64_term; 4438 unsigned AndOpc = 4439 ST.isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64; 4440 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 4441 4442 MachineBasicBlock::iterator I = LoopBB.begin(); 4443 4444 Register VRsrc = Rsrc.getReg(); 4445 unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef()); 4446 4447 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); 4448 Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC); 4449 Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC); 4450 Register AndCond = MRI.createVirtualRegister(BoolXExecRC); 4451 Register SRsrcSub0 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4452 Register SRsrcSub1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4453 Register SRsrcSub2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4454 Register SRsrcSub3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4455 Register SRsrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass); 4456 4457 // Beginning of the loop, read the next Rsrc variant. 4458 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub0) 4459 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub0); 4460 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub1) 4461 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub1); 4462 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub2) 4463 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub2); 4464 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub3) 4465 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub3); 4466 4467 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), SRsrc) 4468 .addReg(SRsrcSub0) 4469 .addImm(AMDGPU::sub0) 4470 .addReg(SRsrcSub1) 4471 .addImm(AMDGPU::sub1) 4472 .addReg(SRsrcSub2) 4473 .addImm(AMDGPU::sub2) 4474 .addReg(SRsrcSub3) 4475 .addImm(AMDGPU::sub3); 4476 4477 // Update Rsrc operand to use the SGPR Rsrc. 4478 Rsrc.setReg(SRsrc); 4479 Rsrc.setIsKill(true); 4480 4481 // Identify all lanes with identical Rsrc operands in their VGPRs. 4482 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), CondReg0) 4483 .addReg(SRsrc, 0, AMDGPU::sub0_sub1) 4484 .addReg(VRsrc, 0, AMDGPU::sub0_sub1); 4485 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), CondReg1) 4486 .addReg(SRsrc, 0, AMDGPU::sub2_sub3) 4487 .addReg(VRsrc, 0, AMDGPU::sub2_sub3); 4488 BuildMI(LoopBB, I, DL, TII.get(AndOpc), AndCond) 4489 .addReg(CondReg0) 4490 .addReg(CondReg1); 4491 4492 MRI.setSimpleHint(SaveExec, AndCond); 4493 4494 // Update EXEC to matching lanes, saving original to SaveExec. 4495 BuildMI(LoopBB, I, DL, TII.get(SaveExecOpc), SaveExec) 4496 .addReg(AndCond, RegState::Kill); 4497 4498 // The original instruction is here; we insert the terminators after it. 4499 I = LoopBB.end(); 4500 4501 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 4502 BuildMI(LoopBB, I, DL, TII.get(XorTermOpc), Exec) 4503 .addReg(Exec) 4504 .addReg(SaveExec); 4505 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_CBRANCH_EXECNZ)).addMBB(&LoopBB); 4506 } 4507 4508 // Build a waterfall loop around \p MI, replacing the VGPR \p Rsrc register 4509 // with SGPRs by iterating over all unique values across all lanes. 4510 static void loadSRsrcFromVGPR(const SIInstrInfo &TII, MachineInstr &MI, 4511 MachineOperand &Rsrc, MachineDominatorTree *MDT) { 4512 MachineBasicBlock &MBB = *MI.getParent(); 4513 MachineFunction &MF = *MBB.getParent(); 4514 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 4515 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 4516 MachineRegisterInfo &MRI = MF.getRegInfo(); 4517 MachineBasicBlock::iterator I(&MI); 4518 const DebugLoc &DL = MI.getDebugLoc(); 4519 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 4520 unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 4521 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 4522 4523 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); 4524 4525 // Save the EXEC mask 4526 BuildMI(MBB, I, DL, TII.get(MovExecOpc), SaveExec).addReg(Exec); 4527 4528 // Killed uses in the instruction we are waterfalling around will be 4529 // incorrect due to the added control-flow. 4530 for (auto &MO : MI.uses()) { 4531 if (MO.isReg() && MO.isUse()) { 4532 MRI.clearKillFlags(MO.getReg()); 4533 } 4534 } 4535 4536 // To insert the loop we need to split the block. Move everything after this 4537 // point to a new block, and insert a new empty block between the two. 4538 MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock(); 4539 MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock(); 4540 MachineFunction::iterator MBBI(MBB); 4541 ++MBBI; 4542 4543 MF.insert(MBBI, LoopBB); 4544 MF.insert(MBBI, RemainderBB); 4545 4546 LoopBB->addSuccessor(LoopBB); 4547 LoopBB->addSuccessor(RemainderBB); 4548 4549 // Move MI to the LoopBB, and the remainder of the block to RemainderBB. 4550 MachineBasicBlock::iterator J = I++; 4551 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 4552 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); 4553 LoopBB->splice(LoopBB->begin(), &MBB, J); 4554 4555 MBB.addSuccessor(LoopBB); 4556 4557 // Update dominators. We know that MBB immediately dominates LoopBB, that 4558 // LoopBB immediately dominates RemainderBB, and that RemainderBB immediately 4559 // dominates all of the successors transferred to it from MBB that MBB used 4560 // to properly dominate. 4561 if (MDT) { 4562 MDT->addNewBlock(LoopBB, &MBB); 4563 MDT->addNewBlock(RemainderBB, LoopBB); 4564 for (auto &Succ : RemainderBB->successors()) { 4565 if (MDT->properlyDominates(&MBB, Succ)) { 4566 MDT->changeImmediateDominator(Succ, RemainderBB); 4567 } 4568 } 4569 } 4570 4571 emitLoadSRsrcFromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, Rsrc); 4572 4573 // Restore the EXEC mask 4574 MachineBasicBlock::iterator First = RemainderBB->begin(); 4575 BuildMI(*RemainderBB, First, DL, TII.get(MovExecOpc), Exec).addReg(SaveExec); 4576 } 4577 4578 // Extract pointer from Rsrc and return a zero-value Rsrc replacement. 4579 static std::tuple<unsigned, unsigned> 4580 extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc) { 4581 MachineBasicBlock &MBB = *MI.getParent(); 4582 MachineFunction &MF = *MBB.getParent(); 4583 MachineRegisterInfo &MRI = MF.getRegInfo(); 4584 4585 // Extract the ptr from the resource descriptor. 4586 unsigned RsrcPtr = 4587 TII.buildExtractSubReg(MI, MRI, Rsrc, &AMDGPU::VReg_128RegClass, 4588 AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass); 4589 4590 // Create an empty resource descriptor 4591 Register Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 4592 Register SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4593 Register SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 4594 Register NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass); 4595 uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat(); 4596 4597 // Zero64 = 0 4598 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B64), Zero64) 4599 .addImm(0); 4600 4601 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0} 4602 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatLo) 4603 .addImm(RsrcDataFormat & 0xFFFFFFFF); 4604 4605 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32} 4606 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatHi) 4607 .addImm(RsrcDataFormat >> 32); 4608 4609 // NewSRsrc = {Zero64, SRsrcFormat} 4610 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::REG_SEQUENCE), NewSRsrc) 4611 .addReg(Zero64) 4612 .addImm(AMDGPU::sub0_sub1) 4613 .addReg(SRsrcFormatLo) 4614 .addImm(AMDGPU::sub2) 4615 .addReg(SRsrcFormatHi) 4616 .addImm(AMDGPU::sub3); 4617 4618 return std::make_tuple(RsrcPtr, NewSRsrc); 4619 } 4620 4621 void SIInstrInfo::legalizeOperands(MachineInstr &MI, 4622 MachineDominatorTree *MDT) const { 4623 MachineFunction &MF = *MI.getParent()->getParent(); 4624 MachineRegisterInfo &MRI = MF.getRegInfo(); 4625 4626 // Legalize VOP2 4627 if (isVOP2(MI) || isVOPC(MI)) { 4628 legalizeOperandsVOP2(MRI, MI); 4629 return; 4630 } 4631 4632 // Legalize VOP3 4633 if (isVOP3(MI)) { 4634 legalizeOperandsVOP3(MRI, MI); 4635 return; 4636 } 4637 4638 // Legalize SMRD 4639 if (isSMRD(MI)) { 4640 legalizeOperandsSMRD(MRI, MI); 4641 return; 4642 } 4643 4644 // Legalize REG_SEQUENCE and PHI 4645 // The register class of the operands much be the same type as the register 4646 // class of the output. 4647 if (MI.getOpcode() == AMDGPU::PHI) { 4648 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr; 4649 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) { 4650 if (!MI.getOperand(i).isReg() || 4651 !Register::isVirtualRegister(MI.getOperand(i).getReg())) 4652 continue; 4653 const TargetRegisterClass *OpRC = 4654 MRI.getRegClass(MI.getOperand(i).getReg()); 4655 if (RI.hasVectorRegisters(OpRC)) { 4656 VRC = OpRC; 4657 } else { 4658 SRC = OpRC; 4659 } 4660 } 4661 4662 // If any of the operands are VGPR registers, then they all most be 4663 // otherwise we will create illegal VGPR->SGPR copies when legalizing 4664 // them. 4665 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) { 4666 if (!VRC) { 4667 assert(SRC); 4668 if (getOpRegClass(MI, 0) == &AMDGPU::VReg_1RegClass) { 4669 VRC = &AMDGPU::VReg_1RegClass; 4670 } else 4671 VRC = RI.hasAGPRs(getOpRegClass(MI, 0)) 4672 ? RI.getEquivalentAGPRClass(SRC) 4673 : RI.getEquivalentVGPRClass(SRC); 4674 } else { 4675 VRC = RI.hasAGPRs(getOpRegClass(MI, 0)) 4676 ? RI.getEquivalentAGPRClass(VRC) 4677 : RI.getEquivalentVGPRClass(VRC); 4678 } 4679 RC = VRC; 4680 } else { 4681 RC = SRC; 4682 } 4683 4684 // Update all the operands so they have the same type. 4685 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 4686 MachineOperand &Op = MI.getOperand(I); 4687 if (!Op.isReg() || !Register::isVirtualRegister(Op.getReg())) 4688 continue; 4689 4690 // MI is a PHI instruction. 4691 MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB(); 4692 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator(); 4693 4694 // Avoid creating no-op copies with the same src and dst reg class. These 4695 // confuse some of the machine passes. 4696 legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc()); 4697 } 4698 } 4699 4700 // REG_SEQUENCE doesn't really require operand legalization, but if one has a 4701 // VGPR dest type and SGPR sources, insert copies so all operands are 4702 // VGPRs. This seems to help operand folding / the register coalescer. 4703 if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) { 4704 MachineBasicBlock *MBB = MI.getParent(); 4705 const TargetRegisterClass *DstRC = getOpRegClass(MI, 0); 4706 if (RI.hasVGPRs(DstRC)) { 4707 // Update all the operands so they are VGPR register classes. These may 4708 // not be the same register class because REG_SEQUENCE supports mixing 4709 // subregister index types e.g. sub0_sub1 + sub2 + sub3 4710 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 4711 MachineOperand &Op = MI.getOperand(I); 4712 if (!Op.isReg() || !Register::isVirtualRegister(Op.getReg())) 4713 continue; 4714 4715 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg()); 4716 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); 4717 if (VRC == OpRC) 4718 continue; 4719 4720 legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc()); 4721 Op.setIsKill(); 4722 } 4723 } 4724 4725 return; 4726 } 4727 4728 // Legalize INSERT_SUBREG 4729 // src0 must have the same register class as dst 4730 if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) { 4731 Register Dst = MI.getOperand(0).getReg(); 4732 Register Src0 = MI.getOperand(1).getReg(); 4733 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst); 4734 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0); 4735 if (DstRC != Src0RC) { 4736 MachineBasicBlock *MBB = MI.getParent(); 4737 MachineOperand &Op = MI.getOperand(1); 4738 legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc()); 4739 } 4740 return; 4741 } 4742 4743 // Legalize SI_INIT_M0 4744 if (MI.getOpcode() == AMDGPU::SI_INIT_M0) { 4745 MachineOperand &Src = MI.getOperand(0); 4746 if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg()))) 4747 Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI)); 4748 return; 4749 } 4750 4751 // Legalize MIMG and MUBUF/MTBUF for shaders. 4752 // 4753 // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via 4754 // scratch memory access. In both cases, the legalization never involves 4755 // conversion to the addr64 form. 4756 if (isMIMG(MI) || 4757 (AMDGPU::isShader(MF.getFunction().getCallingConv()) && 4758 (isMUBUF(MI) || isMTBUF(MI)))) { 4759 MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc); 4760 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) { 4761 unsigned SGPR = readlaneVGPRToSGPR(SRsrc->getReg(), MI, MRI); 4762 SRsrc->setReg(SGPR); 4763 } 4764 4765 MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp); 4766 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) { 4767 unsigned SGPR = readlaneVGPRToSGPR(SSamp->getReg(), MI, MRI); 4768 SSamp->setReg(SGPR); 4769 } 4770 return; 4771 } 4772 4773 // Legalize MUBUF* instructions. 4774 int RsrcIdx = 4775 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc); 4776 if (RsrcIdx != -1) { 4777 // We have an MUBUF instruction 4778 MachineOperand *Rsrc = &MI.getOperand(RsrcIdx); 4779 unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass; 4780 if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()), 4781 RI.getRegClass(RsrcRC))) { 4782 // The operands are legal. 4783 // FIXME: We may need to legalize operands besided srsrc. 4784 return; 4785 } 4786 4787 // Legalize a VGPR Rsrc. 4788 // 4789 // If the instruction is _ADDR64, we can avoid a waterfall by extracting 4790 // the base pointer from the VGPR Rsrc, adding it to the VAddr, then using 4791 // a zero-value SRsrc. 4792 // 4793 // If the instruction is _OFFSET (both idxen and offen disabled), and we 4794 // support ADDR64 instructions, we can convert to ADDR64 and do the same as 4795 // above. 4796 // 4797 // Otherwise we are on non-ADDR64 hardware, and/or we have 4798 // idxen/offen/bothen and we fall back to a waterfall loop. 4799 4800 MachineBasicBlock &MBB = *MI.getParent(); 4801 4802 MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 4803 if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) { 4804 // This is already an ADDR64 instruction so we need to add the pointer 4805 // extracted from the resource descriptor to the current value of VAddr. 4806 Register NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4807 Register NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4808 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 4809 4810 const auto *BoolXExecRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 4811 Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC); 4812 Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC); 4813 4814 unsigned RsrcPtr, NewSRsrc; 4815 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 4816 4817 // NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0 4818 const DebugLoc &DL = MI.getDebugLoc(); 4819 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e64), NewVAddrLo) 4820 .addDef(CondReg0) 4821 .addReg(RsrcPtr, 0, AMDGPU::sub0) 4822 .addReg(VAddr->getReg(), 0, AMDGPU::sub0) 4823 .addImm(0); 4824 4825 // NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1 4826 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e64), NewVAddrHi) 4827 .addDef(CondReg1, RegState::Dead) 4828 .addReg(RsrcPtr, 0, AMDGPU::sub1) 4829 .addReg(VAddr->getReg(), 0, AMDGPU::sub1) 4830 .addReg(CondReg0, RegState::Kill) 4831 .addImm(0); 4832 4833 // NewVaddr = {NewVaddrHi, NewVaddrLo} 4834 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) 4835 .addReg(NewVAddrLo) 4836 .addImm(AMDGPU::sub0) 4837 .addReg(NewVAddrHi) 4838 .addImm(AMDGPU::sub1); 4839 4840 VAddr->setReg(NewVAddr); 4841 Rsrc->setReg(NewSRsrc); 4842 } else if (!VAddr && ST.hasAddr64()) { 4843 // This instructions is the _OFFSET variant, so we need to convert it to 4844 // ADDR64. 4845 assert(MBB.getParent()->getSubtarget<GCNSubtarget>().getGeneration() 4846 < AMDGPUSubtarget::VOLCANIC_ISLANDS && 4847 "FIXME: Need to emit flat atomics here"); 4848 4849 unsigned RsrcPtr, NewSRsrc; 4850 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 4851 4852 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 4853 MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata); 4854 MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 4855 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset); 4856 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode()); 4857 4858 // Atomics rith return have have an additional tied operand and are 4859 // missing some of the special bits. 4860 MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in); 4861 MachineInstr *Addr64; 4862 4863 if (!VDataIn) { 4864 // Regular buffer load / store. 4865 MachineInstrBuilder MIB = 4866 BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 4867 .add(*VData) 4868 .addReg(NewVAddr) 4869 .addReg(NewSRsrc) 4870 .add(*SOffset) 4871 .add(*Offset); 4872 4873 // Atomics do not have this operand. 4874 if (const MachineOperand *GLC = 4875 getNamedOperand(MI, AMDGPU::OpName::glc)) { 4876 MIB.addImm(GLC->getImm()); 4877 } 4878 if (const MachineOperand *DLC = 4879 getNamedOperand(MI, AMDGPU::OpName::dlc)) { 4880 MIB.addImm(DLC->getImm()); 4881 } 4882 4883 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)); 4884 4885 if (const MachineOperand *TFE = 4886 getNamedOperand(MI, AMDGPU::OpName::tfe)) { 4887 MIB.addImm(TFE->getImm()); 4888 } 4889 4890 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::swz)); 4891 4892 MIB.cloneMemRefs(MI); 4893 Addr64 = MIB; 4894 } else { 4895 // Atomics with return. 4896 Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 4897 .add(*VData) 4898 .add(*VDataIn) 4899 .addReg(NewVAddr) 4900 .addReg(NewSRsrc) 4901 .add(*SOffset) 4902 .add(*Offset) 4903 .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)) 4904 .cloneMemRefs(MI); 4905 } 4906 4907 MI.removeFromParent(); 4908 4909 // NewVaddr = {NewVaddrHi, NewVaddrLo} 4910 BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), 4911 NewVAddr) 4912 .addReg(RsrcPtr, 0, AMDGPU::sub0) 4913 .addImm(AMDGPU::sub0) 4914 .addReg(RsrcPtr, 0, AMDGPU::sub1) 4915 .addImm(AMDGPU::sub1); 4916 } else { 4917 // This is another variant; legalize Rsrc with waterfall loop from VGPRs 4918 // to SGPRs. 4919 loadSRsrcFromVGPR(*this, MI, *Rsrc, MDT); 4920 } 4921 } 4922 } 4923 4924 void SIInstrInfo::moveToVALU(MachineInstr &TopInst, 4925 MachineDominatorTree *MDT) const { 4926 SetVectorType Worklist; 4927 Worklist.insert(&TopInst); 4928 4929 while (!Worklist.empty()) { 4930 MachineInstr &Inst = *Worklist.pop_back_val(); 4931 MachineBasicBlock *MBB = Inst.getParent(); 4932 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 4933 4934 unsigned Opcode = Inst.getOpcode(); 4935 unsigned NewOpcode = getVALUOp(Inst); 4936 4937 // Handle some special cases 4938 switch (Opcode) { 4939 default: 4940 break; 4941 case AMDGPU::S_ADD_U64_PSEUDO: 4942 case AMDGPU::S_SUB_U64_PSEUDO: 4943 splitScalar64BitAddSub(Worklist, Inst, MDT); 4944 Inst.eraseFromParent(); 4945 continue; 4946 case AMDGPU::S_ADD_I32: 4947 case AMDGPU::S_SUB_I32: 4948 // FIXME: The u32 versions currently selected use the carry. 4949 if (moveScalarAddSub(Worklist, Inst, MDT)) 4950 continue; 4951 4952 // Default handling 4953 break; 4954 case AMDGPU::S_AND_B64: 4955 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32, MDT); 4956 Inst.eraseFromParent(); 4957 continue; 4958 4959 case AMDGPU::S_OR_B64: 4960 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32, MDT); 4961 Inst.eraseFromParent(); 4962 continue; 4963 4964 case AMDGPU::S_XOR_B64: 4965 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32, MDT); 4966 Inst.eraseFromParent(); 4967 continue; 4968 4969 case AMDGPU::S_NAND_B64: 4970 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NAND_B32, MDT); 4971 Inst.eraseFromParent(); 4972 continue; 4973 4974 case AMDGPU::S_NOR_B64: 4975 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NOR_B32, MDT); 4976 Inst.eraseFromParent(); 4977 continue; 4978 4979 case AMDGPU::S_XNOR_B64: 4980 if (ST.hasDLInsts()) 4981 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT); 4982 else 4983 splitScalar64BitXnor(Worklist, Inst, MDT); 4984 Inst.eraseFromParent(); 4985 continue; 4986 4987 case AMDGPU::S_ANDN2_B64: 4988 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ANDN2_B32, MDT); 4989 Inst.eraseFromParent(); 4990 continue; 4991 4992 case AMDGPU::S_ORN2_B64: 4993 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ORN2_B32, MDT); 4994 Inst.eraseFromParent(); 4995 continue; 4996 4997 case AMDGPU::S_NOT_B64: 4998 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32); 4999 Inst.eraseFromParent(); 5000 continue; 5001 5002 case AMDGPU::S_BCNT1_I32_B64: 5003 splitScalar64BitBCNT(Worklist, Inst); 5004 Inst.eraseFromParent(); 5005 continue; 5006 5007 case AMDGPU::S_BFE_I64: 5008 splitScalar64BitBFE(Worklist, Inst); 5009 Inst.eraseFromParent(); 5010 continue; 5011 5012 case AMDGPU::S_LSHL_B32: 5013 if (ST.hasOnlyRevVALUShifts()) { 5014 NewOpcode = AMDGPU::V_LSHLREV_B32_e64; 5015 swapOperands(Inst); 5016 } 5017 break; 5018 case AMDGPU::S_ASHR_I32: 5019 if (ST.hasOnlyRevVALUShifts()) { 5020 NewOpcode = AMDGPU::V_ASHRREV_I32_e64; 5021 swapOperands(Inst); 5022 } 5023 break; 5024 case AMDGPU::S_LSHR_B32: 5025 if (ST.hasOnlyRevVALUShifts()) { 5026 NewOpcode = AMDGPU::V_LSHRREV_B32_e64; 5027 swapOperands(Inst); 5028 } 5029 break; 5030 case AMDGPU::S_LSHL_B64: 5031 if (ST.hasOnlyRevVALUShifts()) { 5032 NewOpcode = AMDGPU::V_LSHLREV_B64; 5033 swapOperands(Inst); 5034 } 5035 break; 5036 case AMDGPU::S_ASHR_I64: 5037 if (ST.hasOnlyRevVALUShifts()) { 5038 NewOpcode = AMDGPU::V_ASHRREV_I64; 5039 swapOperands(Inst); 5040 } 5041 break; 5042 case AMDGPU::S_LSHR_B64: 5043 if (ST.hasOnlyRevVALUShifts()) { 5044 NewOpcode = AMDGPU::V_LSHRREV_B64; 5045 swapOperands(Inst); 5046 } 5047 break; 5048 5049 case AMDGPU::S_ABS_I32: 5050 lowerScalarAbs(Worklist, Inst); 5051 Inst.eraseFromParent(); 5052 continue; 5053 5054 case AMDGPU::S_CBRANCH_SCC0: 5055 case AMDGPU::S_CBRANCH_SCC1: 5056 // Clear unused bits of vcc 5057 if (ST.isWave32()) 5058 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B32), 5059 AMDGPU::VCC_LO) 5060 .addReg(AMDGPU::EXEC_LO) 5061 .addReg(AMDGPU::VCC_LO); 5062 else 5063 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64), 5064 AMDGPU::VCC) 5065 .addReg(AMDGPU::EXEC) 5066 .addReg(AMDGPU::VCC); 5067 break; 5068 5069 case AMDGPU::S_BFE_U64: 5070 case AMDGPU::S_BFM_B64: 5071 llvm_unreachable("Moving this op to VALU not implemented"); 5072 5073 case AMDGPU::S_PACK_LL_B32_B16: 5074 case AMDGPU::S_PACK_LH_B32_B16: 5075 case AMDGPU::S_PACK_HH_B32_B16: 5076 movePackToVALU(Worklist, MRI, Inst); 5077 Inst.eraseFromParent(); 5078 continue; 5079 5080 case AMDGPU::S_XNOR_B32: 5081 lowerScalarXnor(Worklist, Inst); 5082 Inst.eraseFromParent(); 5083 continue; 5084 5085 case AMDGPU::S_NAND_B32: 5086 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_AND_B32); 5087 Inst.eraseFromParent(); 5088 continue; 5089 5090 case AMDGPU::S_NOR_B32: 5091 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_OR_B32); 5092 Inst.eraseFromParent(); 5093 continue; 5094 5095 case AMDGPU::S_ANDN2_B32: 5096 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_AND_B32); 5097 Inst.eraseFromParent(); 5098 continue; 5099 5100 case AMDGPU::S_ORN2_B32: 5101 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_OR_B32); 5102 Inst.eraseFromParent(); 5103 continue; 5104 } 5105 5106 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) { 5107 // We cannot move this instruction to the VALU, so we should try to 5108 // legalize its operands instead. 5109 legalizeOperands(Inst, MDT); 5110 continue; 5111 } 5112 5113 // Use the new VALU Opcode. 5114 const MCInstrDesc &NewDesc = get(NewOpcode); 5115 Inst.setDesc(NewDesc); 5116 5117 // Remove any references to SCC. Vector instructions can't read from it, and 5118 // We're just about to add the implicit use / defs of VCC, and we don't want 5119 // both. 5120 for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) { 5121 MachineOperand &Op = Inst.getOperand(i); 5122 if (Op.isReg() && Op.getReg() == AMDGPU::SCC) { 5123 // Only propagate through live-def of SCC. 5124 if (Op.isDef() && !Op.isDead()) 5125 addSCCDefUsersToVALUWorklist(Op, Inst, Worklist); 5126 Inst.RemoveOperand(i); 5127 } 5128 } 5129 5130 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) { 5131 // We are converting these to a BFE, so we need to add the missing 5132 // operands for the size and offset. 5133 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16; 5134 Inst.addOperand(MachineOperand::CreateImm(0)); 5135 Inst.addOperand(MachineOperand::CreateImm(Size)); 5136 5137 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) { 5138 // The VALU version adds the second operand to the result, so insert an 5139 // extra 0 operand. 5140 Inst.addOperand(MachineOperand::CreateImm(0)); 5141 } 5142 5143 Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent()); 5144 fixImplicitOperands(Inst); 5145 5146 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) { 5147 const MachineOperand &OffsetWidthOp = Inst.getOperand(2); 5148 // If we need to move this to VGPRs, we need to unpack the second operand 5149 // back into the 2 separate ones for bit offset and width. 5150 assert(OffsetWidthOp.isImm() && 5151 "Scalar BFE is only implemented for constant width and offset"); 5152 uint32_t Imm = OffsetWidthOp.getImm(); 5153 5154 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 5155 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 5156 Inst.RemoveOperand(2); // Remove old immediate. 5157 Inst.addOperand(MachineOperand::CreateImm(Offset)); 5158 Inst.addOperand(MachineOperand::CreateImm(BitWidth)); 5159 } 5160 5161 bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef(); 5162 unsigned NewDstReg = AMDGPU::NoRegister; 5163 if (HasDst) { 5164 Register DstReg = Inst.getOperand(0).getReg(); 5165 if (Register::isPhysicalRegister(DstReg)) 5166 continue; 5167 5168 // Update the destination register class. 5169 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst); 5170 if (!NewDstRC) 5171 continue; 5172 5173 if (Inst.isCopy() && 5174 Register::isVirtualRegister(Inst.getOperand(1).getReg()) && 5175 NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) { 5176 // Instead of creating a copy where src and dst are the same register 5177 // class, we just replace all uses of dst with src. These kinds of 5178 // copies interfere with the heuristics MachineSink uses to decide 5179 // whether or not to split a critical edge. Since the pass assumes 5180 // that copies will end up as machine instructions and not be 5181 // eliminated. 5182 addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist); 5183 MRI.replaceRegWith(DstReg, Inst.getOperand(1).getReg()); 5184 MRI.clearKillFlags(Inst.getOperand(1).getReg()); 5185 Inst.getOperand(0).setReg(DstReg); 5186 5187 // Make sure we don't leave around a dead VGPR->SGPR copy. Normally 5188 // these are deleted later, but at -O0 it would leave a suspicious 5189 // looking illegal copy of an undef register. 5190 for (unsigned I = Inst.getNumOperands() - 1; I != 0; --I) 5191 Inst.RemoveOperand(I); 5192 Inst.setDesc(get(AMDGPU::IMPLICIT_DEF)); 5193 continue; 5194 } 5195 5196 NewDstReg = MRI.createVirtualRegister(NewDstRC); 5197 MRI.replaceRegWith(DstReg, NewDstReg); 5198 } 5199 5200 // Legalize the operands 5201 legalizeOperands(Inst, MDT); 5202 5203 if (HasDst) 5204 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); 5205 } 5206 } 5207 5208 // Add/sub require special handling to deal with carry outs. 5209 bool SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst, 5210 MachineDominatorTree *MDT) const { 5211 if (ST.hasAddNoCarry()) { 5212 // Assume there is no user of scc since we don't select this in that case. 5213 // Since scc isn't used, it doesn't really matter if the i32 or u32 variant 5214 // is used. 5215 5216 MachineBasicBlock &MBB = *Inst.getParent(); 5217 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5218 5219 Register OldDstReg = Inst.getOperand(0).getReg(); 5220 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5221 5222 unsigned Opc = Inst.getOpcode(); 5223 assert(Opc == AMDGPU::S_ADD_I32 || Opc == AMDGPU::S_SUB_I32); 5224 5225 unsigned NewOpc = Opc == AMDGPU::S_ADD_I32 ? 5226 AMDGPU::V_ADD_U32_e64 : AMDGPU::V_SUB_U32_e64; 5227 5228 assert(Inst.getOperand(3).getReg() == AMDGPU::SCC); 5229 Inst.RemoveOperand(3); 5230 5231 Inst.setDesc(get(NewOpc)); 5232 Inst.addOperand(MachineOperand::CreateImm(0)); // clamp bit 5233 Inst.addImplicitDefUseOperands(*MBB.getParent()); 5234 MRI.replaceRegWith(OldDstReg, ResultReg); 5235 legalizeOperands(Inst, MDT); 5236 5237 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5238 return true; 5239 } 5240 5241 return false; 5242 } 5243 5244 void SIInstrInfo::lowerScalarAbs(SetVectorType &Worklist, 5245 MachineInstr &Inst) const { 5246 MachineBasicBlock &MBB = *Inst.getParent(); 5247 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5248 MachineBasicBlock::iterator MII = Inst; 5249 DebugLoc DL = Inst.getDebugLoc(); 5250 5251 MachineOperand &Dest = Inst.getOperand(0); 5252 MachineOperand &Src = Inst.getOperand(1); 5253 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5254 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5255 5256 unsigned SubOp = ST.hasAddNoCarry() ? 5257 AMDGPU::V_SUB_U32_e32 : AMDGPU::V_SUB_I32_e32; 5258 5259 BuildMI(MBB, MII, DL, get(SubOp), TmpReg) 5260 .addImm(0) 5261 .addReg(Src.getReg()); 5262 5263 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg) 5264 .addReg(Src.getReg()) 5265 .addReg(TmpReg); 5266 5267 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5268 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5269 } 5270 5271 void SIInstrInfo::lowerScalarXnor(SetVectorType &Worklist, 5272 MachineInstr &Inst) const { 5273 MachineBasicBlock &MBB = *Inst.getParent(); 5274 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5275 MachineBasicBlock::iterator MII = Inst; 5276 const DebugLoc &DL = Inst.getDebugLoc(); 5277 5278 MachineOperand &Dest = Inst.getOperand(0); 5279 MachineOperand &Src0 = Inst.getOperand(1); 5280 MachineOperand &Src1 = Inst.getOperand(2); 5281 5282 if (ST.hasDLInsts()) { 5283 Register NewDest = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5284 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src0, MRI, DL); 5285 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src1, MRI, DL); 5286 5287 BuildMI(MBB, MII, DL, get(AMDGPU::V_XNOR_B32_e64), NewDest) 5288 .add(Src0) 5289 .add(Src1); 5290 5291 MRI.replaceRegWith(Dest.getReg(), NewDest); 5292 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5293 } else { 5294 // Using the identity !(x ^ y) == (!x ^ y) == (x ^ !y), we can 5295 // invert either source and then perform the XOR. If either source is a 5296 // scalar register, then we can leave the inversion on the scalar unit to 5297 // acheive a better distrubution of scalar and vector instructions. 5298 bool Src0IsSGPR = Src0.isReg() && 5299 RI.isSGPRClass(MRI.getRegClass(Src0.getReg())); 5300 bool Src1IsSGPR = Src1.isReg() && 5301 RI.isSGPRClass(MRI.getRegClass(Src1.getReg())); 5302 MachineInstr *Xor; 5303 Register Temp = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5304 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5305 5306 // Build a pair of scalar instructions and add them to the work list. 5307 // The next iteration over the work list will lower these to the vector 5308 // unit as necessary. 5309 if (Src0IsSGPR) { 5310 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src0); 5311 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 5312 .addReg(Temp) 5313 .add(Src1); 5314 } else if (Src1IsSGPR) { 5315 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src1); 5316 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 5317 .add(Src0) 5318 .addReg(Temp); 5319 } else { 5320 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), Temp) 5321 .add(Src0) 5322 .add(Src1); 5323 MachineInstr *Not = 5324 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest).addReg(Temp); 5325 Worklist.insert(Not); 5326 } 5327 5328 MRI.replaceRegWith(Dest.getReg(), NewDest); 5329 5330 Worklist.insert(Xor); 5331 5332 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5333 } 5334 } 5335 5336 void SIInstrInfo::splitScalarNotBinop(SetVectorType &Worklist, 5337 MachineInstr &Inst, 5338 unsigned Opcode) const { 5339 MachineBasicBlock &MBB = *Inst.getParent(); 5340 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5341 MachineBasicBlock::iterator MII = Inst; 5342 const DebugLoc &DL = Inst.getDebugLoc(); 5343 5344 MachineOperand &Dest = Inst.getOperand(0); 5345 MachineOperand &Src0 = Inst.getOperand(1); 5346 MachineOperand &Src1 = Inst.getOperand(2); 5347 5348 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5349 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 5350 5351 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), Interm) 5352 .add(Src0) 5353 .add(Src1); 5354 5355 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest) 5356 .addReg(Interm); 5357 5358 Worklist.insert(&Op); 5359 Worklist.insert(&Not); 5360 5361 MRI.replaceRegWith(Dest.getReg(), NewDest); 5362 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5363 } 5364 5365 void SIInstrInfo::splitScalarBinOpN2(SetVectorType& Worklist, 5366 MachineInstr &Inst, 5367 unsigned Opcode) const { 5368 MachineBasicBlock &MBB = *Inst.getParent(); 5369 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5370 MachineBasicBlock::iterator MII = Inst; 5371 const DebugLoc &DL = Inst.getDebugLoc(); 5372 5373 MachineOperand &Dest = Inst.getOperand(0); 5374 MachineOperand &Src0 = Inst.getOperand(1); 5375 MachineOperand &Src1 = Inst.getOperand(2); 5376 5377 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5378 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5379 5380 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Interm) 5381 .add(Src1); 5382 5383 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), NewDest) 5384 .add(Src0) 5385 .addReg(Interm); 5386 5387 Worklist.insert(&Not); 5388 Worklist.insert(&Op); 5389 5390 MRI.replaceRegWith(Dest.getReg(), NewDest); 5391 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 5392 } 5393 5394 void SIInstrInfo::splitScalar64BitUnaryOp( 5395 SetVectorType &Worklist, MachineInstr &Inst, 5396 unsigned Opcode) const { 5397 MachineBasicBlock &MBB = *Inst.getParent(); 5398 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5399 5400 MachineOperand &Dest = Inst.getOperand(0); 5401 MachineOperand &Src0 = Inst.getOperand(1); 5402 DebugLoc DL = Inst.getDebugLoc(); 5403 5404 MachineBasicBlock::iterator MII = Inst; 5405 5406 const MCInstrDesc &InstDesc = get(Opcode); 5407 const TargetRegisterClass *Src0RC = Src0.isReg() ? 5408 MRI.getRegClass(Src0.getReg()) : 5409 &AMDGPU::SGPR_32RegClass; 5410 5411 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 5412 5413 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5414 AMDGPU::sub0, Src0SubRC); 5415 5416 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 5417 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 5418 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 5419 5420 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 5421 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0); 5422 5423 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5424 AMDGPU::sub1, Src0SubRC); 5425 5426 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 5427 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1); 5428 5429 Register FullDestReg = MRI.createVirtualRegister(NewDestRC); 5430 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 5431 .addReg(DestSub0) 5432 .addImm(AMDGPU::sub0) 5433 .addReg(DestSub1) 5434 .addImm(AMDGPU::sub1); 5435 5436 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 5437 5438 Worklist.insert(&LoHalf); 5439 Worklist.insert(&HiHalf); 5440 5441 // We don't need to legalizeOperands here because for a single operand, src0 5442 // will support any kind of input. 5443 5444 // Move all users of this moved value. 5445 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 5446 } 5447 5448 void SIInstrInfo::splitScalar64BitAddSub(SetVectorType &Worklist, 5449 MachineInstr &Inst, 5450 MachineDominatorTree *MDT) const { 5451 bool IsAdd = (Inst.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); 5452 5453 MachineBasicBlock &MBB = *Inst.getParent(); 5454 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5455 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5456 5457 Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5458 Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5459 Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5460 5461 Register CarryReg = MRI.createVirtualRegister(CarryRC); 5462 Register DeadCarryReg = MRI.createVirtualRegister(CarryRC); 5463 5464 MachineOperand &Dest = Inst.getOperand(0); 5465 MachineOperand &Src0 = Inst.getOperand(1); 5466 MachineOperand &Src1 = Inst.getOperand(2); 5467 const DebugLoc &DL = Inst.getDebugLoc(); 5468 MachineBasicBlock::iterator MII = Inst; 5469 5470 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg()); 5471 const TargetRegisterClass *Src1RC = MRI.getRegClass(Src1.getReg()); 5472 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 5473 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 5474 5475 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5476 AMDGPU::sub0, Src0SubRC); 5477 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 5478 AMDGPU::sub0, Src1SubRC); 5479 5480 5481 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5482 AMDGPU::sub1, Src0SubRC); 5483 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 5484 AMDGPU::sub1, Src1SubRC); 5485 5486 unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_I32_e64 : AMDGPU::V_SUB_I32_e64; 5487 MachineInstr *LoHalf = 5488 BuildMI(MBB, MII, DL, get(LoOpc), DestSub0) 5489 .addReg(CarryReg, RegState::Define) 5490 .add(SrcReg0Sub0) 5491 .add(SrcReg1Sub0) 5492 .addImm(0); // clamp bit 5493 5494 unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; 5495 MachineInstr *HiHalf = 5496 BuildMI(MBB, MII, DL, get(HiOpc), DestSub1) 5497 .addReg(DeadCarryReg, RegState::Define | RegState::Dead) 5498 .add(SrcReg0Sub1) 5499 .add(SrcReg1Sub1) 5500 .addReg(CarryReg, RegState::Kill) 5501 .addImm(0); // clamp bit 5502 5503 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 5504 .addReg(DestSub0) 5505 .addImm(AMDGPU::sub0) 5506 .addReg(DestSub1) 5507 .addImm(AMDGPU::sub1); 5508 5509 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 5510 5511 // Try to legalize the operands in case we need to swap the order to keep it 5512 // valid. 5513 legalizeOperands(*LoHalf, MDT); 5514 legalizeOperands(*HiHalf, MDT); 5515 5516 // Move all users of this moved vlaue. 5517 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 5518 } 5519 5520 void SIInstrInfo::splitScalar64BitBinaryOp(SetVectorType &Worklist, 5521 MachineInstr &Inst, unsigned Opcode, 5522 MachineDominatorTree *MDT) const { 5523 MachineBasicBlock &MBB = *Inst.getParent(); 5524 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5525 5526 MachineOperand &Dest = Inst.getOperand(0); 5527 MachineOperand &Src0 = Inst.getOperand(1); 5528 MachineOperand &Src1 = Inst.getOperand(2); 5529 DebugLoc DL = Inst.getDebugLoc(); 5530 5531 MachineBasicBlock::iterator MII = Inst; 5532 5533 const MCInstrDesc &InstDesc = get(Opcode); 5534 const TargetRegisterClass *Src0RC = Src0.isReg() ? 5535 MRI.getRegClass(Src0.getReg()) : 5536 &AMDGPU::SGPR_32RegClass; 5537 5538 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 5539 const TargetRegisterClass *Src1RC = Src1.isReg() ? 5540 MRI.getRegClass(Src1.getReg()) : 5541 &AMDGPU::SGPR_32RegClass; 5542 5543 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 5544 5545 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5546 AMDGPU::sub0, Src0SubRC); 5547 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 5548 AMDGPU::sub0, Src1SubRC); 5549 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 5550 AMDGPU::sub1, Src0SubRC); 5551 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 5552 AMDGPU::sub1, Src1SubRC); 5553 5554 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 5555 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 5556 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 5557 5558 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 5559 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0) 5560 .add(SrcReg0Sub0) 5561 .add(SrcReg1Sub0); 5562 5563 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 5564 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1) 5565 .add(SrcReg0Sub1) 5566 .add(SrcReg1Sub1); 5567 5568 Register FullDestReg = MRI.createVirtualRegister(NewDestRC); 5569 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 5570 .addReg(DestSub0) 5571 .addImm(AMDGPU::sub0) 5572 .addReg(DestSub1) 5573 .addImm(AMDGPU::sub1); 5574 5575 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 5576 5577 Worklist.insert(&LoHalf); 5578 Worklist.insert(&HiHalf); 5579 5580 // Move all users of this moved vlaue. 5581 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 5582 } 5583 5584 void SIInstrInfo::splitScalar64BitXnor(SetVectorType &Worklist, 5585 MachineInstr &Inst, 5586 MachineDominatorTree *MDT) const { 5587 MachineBasicBlock &MBB = *Inst.getParent(); 5588 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5589 5590 MachineOperand &Dest = Inst.getOperand(0); 5591 MachineOperand &Src0 = Inst.getOperand(1); 5592 MachineOperand &Src1 = Inst.getOperand(2); 5593 const DebugLoc &DL = Inst.getDebugLoc(); 5594 5595 MachineBasicBlock::iterator MII = Inst; 5596 5597 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 5598 5599 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 5600 5601 MachineOperand* Op0; 5602 MachineOperand* Op1; 5603 5604 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) { 5605 Op0 = &Src0; 5606 Op1 = &Src1; 5607 } else { 5608 Op0 = &Src1; 5609 Op1 = &Src0; 5610 } 5611 5612 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B64), Interm) 5613 .add(*Op0); 5614 5615 Register NewDest = MRI.createVirtualRegister(DestRC); 5616 5617 MachineInstr &Xor = *BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B64), NewDest) 5618 .addReg(Interm) 5619 .add(*Op1); 5620 5621 MRI.replaceRegWith(Dest.getReg(), NewDest); 5622 5623 Worklist.insert(&Xor); 5624 } 5625 5626 void SIInstrInfo::splitScalar64BitBCNT( 5627 SetVectorType &Worklist, MachineInstr &Inst) const { 5628 MachineBasicBlock &MBB = *Inst.getParent(); 5629 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5630 5631 MachineBasicBlock::iterator MII = Inst; 5632 const DebugLoc &DL = Inst.getDebugLoc(); 5633 5634 MachineOperand &Dest = Inst.getOperand(0); 5635 MachineOperand &Src = Inst.getOperand(1); 5636 5637 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64); 5638 const TargetRegisterClass *SrcRC = Src.isReg() ? 5639 MRI.getRegClass(Src.getReg()) : 5640 &AMDGPU::SGPR_32RegClass; 5641 5642 Register MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5643 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5644 5645 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); 5646 5647 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 5648 AMDGPU::sub0, SrcSubRC); 5649 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 5650 AMDGPU::sub1, SrcSubRC); 5651 5652 BuildMI(MBB, MII, DL, InstDesc, MidReg).add(SrcRegSub0).addImm(0); 5653 5654 BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg); 5655 5656 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5657 5658 // We don't need to legalize operands here. src0 for etiher instruction can be 5659 // an SGPR, and the second input is unused or determined here. 5660 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5661 } 5662 5663 void SIInstrInfo::splitScalar64BitBFE(SetVectorType &Worklist, 5664 MachineInstr &Inst) const { 5665 MachineBasicBlock &MBB = *Inst.getParent(); 5666 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5667 MachineBasicBlock::iterator MII = Inst; 5668 const DebugLoc &DL = Inst.getDebugLoc(); 5669 5670 MachineOperand &Dest = Inst.getOperand(0); 5671 uint32_t Imm = Inst.getOperand(2).getImm(); 5672 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 5673 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 5674 5675 (void) Offset; 5676 5677 // Only sext_inreg cases handled. 5678 assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 && 5679 Offset == 0 && "Not implemented"); 5680 5681 if (BitWidth < 32) { 5682 Register MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5683 Register MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5684 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5685 5686 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo) 5687 .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0) 5688 .addImm(0) 5689 .addImm(BitWidth); 5690 5691 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi) 5692 .addImm(31) 5693 .addReg(MidRegLo); 5694 5695 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 5696 .addReg(MidRegLo) 5697 .addImm(AMDGPU::sub0) 5698 .addReg(MidRegHi) 5699 .addImm(AMDGPU::sub1); 5700 5701 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5702 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5703 return; 5704 } 5705 5706 MachineOperand &Src = Inst.getOperand(1); 5707 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5708 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5709 5710 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg) 5711 .addImm(31) 5712 .addReg(Src.getReg(), 0, AMDGPU::sub0); 5713 5714 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 5715 .addReg(Src.getReg(), 0, AMDGPU::sub0) 5716 .addImm(AMDGPU::sub0) 5717 .addReg(TmpReg) 5718 .addImm(AMDGPU::sub1); 5719 5720 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5721 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5722 } 5723 5724 void SIInstrInfo::addUsersToMoveToVALUWorklist( 5725 unsigned DstReg, 5726 MachineRegisterInfo &MRI, 5727 SetVectorType &Worklist) const { 5728 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg), 5729 E = MRI.use_end(); I != E;) { 5730 MachineInstr &UseMI = *I->getParent(); 5731 5732 unsigned OpNo = 0; 5733 5734 switch (UseMI.getOpcode()) { 5735 case AMDGPU::COPY: 5736 case AMDGPU::WQM: 5737 case AMDGPU::SOFT_WQM: 5738 case AMDGPU::WWM: 5739 case AMDGPU::REG_SEQUENCE: 5740 case AMDGPU::PHI: 5741 case AMDGPU::INSERT_SUBREG: 5742 break; 5743 default: 5744 OpNo = I.getOperandNo(); 5745 break; 5746 } 5747 5748 if (!RI.hasVectorRegisters(getOpRegClass(UseMI, OpNo))) { 5749 Worklist.insert(&UseMI); 5750 5751 do { 5752 ++I; 5753 } while (I != E && I->getParent() == &UseMI); 5754 } else { 5755 ++I; 5756 } 5757 } 5758 } 5759 5760 void SIInstrInfo::movePackToVALU(SetVectorType &Worklist, 5761 MachineRegisterInfo &MRI, 5762 MachineInstr &Inst) const { 5763 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5764 MachineBasicBlock *MBB = Inst.getParent(); 5765 MachineOperand &Src0 = Inst.getOperand(1); 5766 MachineOperand &Src1 = Inst.getOperand(2); 5767 const DebugLoc &DL = Inst.getDebugLoc(); 5768 5769 switch (Inst.getOpcode()) { 5770 case AMDGPU::S_PACK_LL_B32_B16: { 5771 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5772 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5773 5774 // FIXME: Can do a lot better if we know the high bits of src0 or src1 are 5775 // 0. 5776 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 5777 .addImm(0xffff); 5778 5779 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_B32_e64), TmpReg) 5780 .addReg(ImmReg, RegState::Kill) 5781 .add(Src0); 5782 5783 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHL_OR_B32), ResultReg) 5784 .add(Src1) 5785 .addImm(16) 5786 .addReg(TmpReg, RegState::Kill); 5787 break; 5788 } 5789 case AMDGPU::S_PACK_LH_B32_B16: { 5790 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5791 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 5792 .addImm(0xffff); 5793 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_BFI_B32), ResultReg) 5794 .addReg(ImmReg, RegState::Kill) 5795 .add(Src0) 5796 .add(Src1); 5797 break; 5798 } 5799 case AMDGPU::S_PACK_HH_B32_B16: { 5800 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5801 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5802 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg) 5803 .addImm(16) 5804 .add(Src0); 5805 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 5806 .addImm(0xffff0000); 5807 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_OR_B32), ResultReg) 5808 .add(Src1) 5809 .addReg(ImmReg, RegState::Kill) 5810 .addReg(TmpReg, RegState::Kill); 5811 break; 5812 } 5813 default: 5814 llvm_unreachable("unhandled s_pack_* instruction"); 5815 } 5816 5817 MachineOperand &Dest = Inst.getOperand(0); 5818 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5819 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5820 } 5821 5822 void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op, 5823 MachineInstr &SCCDefInst, 5824 SetVectorType &Worklist) const { 5825 // Ensure that def inst defines SCC, which is still live. 5826 assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isDef() && 5827 !Op.isDead() && Op.getParent() == &SCCDefInst); 5828 // This assumes that all the users of SCC are in the same block 5829 // as the SCC def. 5830 for (MachineInstr &MI : // Skip the def inst itself. 5831 make_range(std::next(MachineBasicBlock::iterator(SCCDefInst)), 5832 SCCDefInst.getParent()->end())) { 5833 // Check if SCC is used first. 5834 if (MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI) != -1) 5835 Worklist.insert(&MI); 5836 // Exit if we find another SCC def. 5837 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != -1) 5838 return; 5839 } 5840 } 5841 5842 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass( 5843 const MachineInstr &Inst) const { 5844 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0); 5845 5846 switch (Inst.getOpcode()) { 5847 // For target instructions, getOpRegClass just returns the virtual register 5848 // class associated with the operand, so we need to find an equivalent VGPR 5849 // register class in order to move the instruction to the VALU. 5850 case AMDGPU::COPY: 5851 case AMDGPU::PHI: 5852 case AMDGPU::REG_SEQUENCE: 5853 case AMDGPU::INSERT_SUBREG: 5854 case AMDGPU::WQM: 5855 case AMDGPU::SOFT_WQM: 5856 case AMDGPU::WWM: { 5857 const TargetRegisterClass *SrcRC = getOpRegClass(Inst, 1); 5858 if (RI.hasAGPRs(SrcRC)) { 5859 if (RI.hasAGPRs(NewDstRC)) 5860 return nullptr; 5861 5862 switch (Inst.getOpcode()) { 5863 case AMDGPU::PHI: 5864 case AMDGPU::REG_SEQUENCE: 5865 case AMDGPU::INSERT_SUBREG: 5866 NewDstRC = RI.getEquivalentAGPRClass(NewDstRC); 5867 break; 5868 default: 5869 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 5870 } 5871 5872 if (!NewDstRC) 5873 return nullptr; 5874 } else { 5875 if (RI.hasVGPRs(NewDstRC) || NewDstRC == &AMDGPU::VReg_1RegClass) 5876 return nullptr; 5877 5878 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 5879 if (!NewDstRC) 5880 return nullptr; 5881 } 5882 5883 return NewDstRC; 5884 } 5885 default: 5886 return NewDstRC; 5887 } 5888 } 5889 5890 // Find the one SGPR operand we are allowed to use. 5891 unsigned SIInstrInfo::findUsedSGPR(const MachineInstr &MI, 5892 int OpIndices[3]) const { 5893 const MCInstrDesc &Desc = MI.getDesc(); 5894 5895 // Find the one SGPR operand we are allowed to use. 5896 // 5897 // First we need to consider the instruction's operand requirements before 5898 // legalizing. Some operands are required to be SGPRs, such as implicit uses 5899 // of VCC, but we are still bound by the constant bus requirement to only use 5900 // one. 5901 // 5902 // If the operand's class is an SGPR, we can never move it. 5903 5904 unsigned SGPRReg = findImplicitSGPRRead(MI); 5905 if (SGPRReg != AMDGPU::NoRegister) 5906 return SGPRReg; 5907 5908 unsigned UsedSGPRs[3] = { AMDGPU::NoRegister }; 5909 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 5910 5911 for (unsigned i = 0; i < 3; ++i) { 5912 int Idx = OpIndices[i]; 5913 if (Idx == -1) 5914 break; 5915 5916 const MachineOperand &MO = MI.getOperand(Idx); 5917 if (!MO.isReg()) 5918 continue; 5919 5920 // Is this operand statically required to be an SGPR based on the operand 5921 // constraints? 5922 const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); 5923 bool IsRequiredSGPR = RI.isSGPRClass(OpRC); 5924 if (IsRequiredSGPR) 5925 return MO.getReg(); 5926 5927 // If this could be a VGPR or an SGPR, Check the dynamic register class. 5928 Register Reg = MO.getReg(); 5929 const TargetRegisterClass *RegRC = MRI.getRegClass(Reg); 5930 if (RI.isSGPRClass(RegRC)) 5931 UsedSGPRs[i] = Reg; 5932 } 5933 5934 // We don't have a required SGPR operand, so we have a bit more freedom in 5935 // selecting operands to move. 5936 5937 // Try to select the most used SGPR. If an SGPR is equal to one of the 5938 // others, we choose that. 5939 // 5940 // e.g. 5941 // V_FMA_F32 v0, s0, s0, s0 -> No moves 5942 // V_FMA_F32 v0, s0, s1, s0 -> Move s1 5943 5944 // TODO: If some of the operands are 64-bit SGPRs and some 32, we should 5945 // prefer those. 5946 5947 if (UsedSGPRs[0] != AMDGPU::NoRegister) { 5948 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2]) 5949 SGPRReg = UsedSGPRs[0]; 5950 } 5951 5952 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) { 5953 if (UsedSGPRs[1] == UsedSGPRs[2]) 5954 SGPRReg = UsedSGPRs[1]; 5955 } 5956 5957 return SGPRReg; 5958 } 5959 5960 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI, 5961 unsigned OperandName) const { 5962 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName); 5963 if (Idx == -1) 5964 return nullptr; 5965 5966 return &MI.getOperand(Idx); 5967 } 5968 5969 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const { 5970 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 5971 return (22ULL << 44) | // IMG_FORMAT_32_FLOAT 5972 (1ULL << 56) | // RESOURCE_LEVEL = 1 5973 (3ULL << 60); // OOB_SELECT = 3 5974 } 5975 5976 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT; 5977 if (ST.isAmdHsaOS()) { 5978 // Set ATC = 1. GFX9 doesn't have this bit. 5979 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) 5980 RsrcDataFormat |= (1ULL << 56); 5981 5982 // Set MTYPE = 2 (MTYPE_UC = uncached). GFX9 doesn't have this. 5983 // BTW, it disables TC L2 and therefore decreases performance. 5984 if (ST.getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS) 5985 RsrcDataFormat |= (2ULL << 59); 5986 } 5987 5988 return RsrcDataFormat; 5989 } 5990 5991 uint64_t SIInstrInfo::getScratchRsrcWords23() const { 5992 uint64_t Rsrc23 = getDefaultRsrcDataFormat() | 5993 AMDGPU::RSRC_TID_ENABLE | 5994 0xffffffff; // Size; 5995 5996 // GFX9 doesn't have ELEMENT_SIZE. 5997 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 5998 uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize()) - 1; 5999 Rsrc23 |= EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT; 6000 } 6001 6002 // IndexStride = 64 / 32. 6003 uint64_t IndexStride = ST.getWavefrontSize() == 64 ? 3 : 2; 6004 Rsrc23 |= IndexStride << AMDGPU::RSRC_INDEX_STRIDE_SHIFT; 6005 6006 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17]. 6007 // Clear them unless we want a huge stride. 6008 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS && 6009 ST.getGeneration() <= AMDGPUSubtarget::GFX9) 6010 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT; 6011 6012 return Rsrc23; 6013 } 6014 6015 bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const { 6016 unsigned Opc = MI.getOpcode(); 6017 6018 return isSMRD(Opc); 6019 } 6020 6021 bool SIInstrInfo::isHighLatencyDef(int Opc) const { 6022 return get(Opc).mayLoad() && 6023 (isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc) || isFLAT(Opc)); 6024 } 6025 6026 unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI, 6027 int &FrameIndex) const { 6028 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 6029 if (!Addr || !Addr->isFI()) 6030 return AMDGPU::NoRegister; 6031 6032 assert(!MI.memoperands_empty() && 6033 (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS); 6034 6035 FrameIndex = Addr->getIndex(); 6036 return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg(); 6037 } 6038 6039 unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI, 6040 int &FrameIndex) const { 6041 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr); 6042 assert(Addr && Addr->isFI()); 6043 FrameIndex = Addr->getIndex(); 6044 return getNamedOperand(MI, AMDGPU::OpName::data)->getReg(); 6045 } 6046 6047 unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 6048 int &FrameIndex) const { 6049 if (!MI.mayLoad()) 6050 return AMDGPU::NoRegister; 6051 6052 if (isMUBUF(MI) || isVGPRSpill(MI)) 6053 return isStackAccess(MI, FrameIndex); 6054 6055 if (isSGPRSpill(MI)) 6056 return isSGPRStackAccess(MI, FrameIndex); 6057 6058 return AMDGPU::NoRegister; 6059 } 6060 6061 unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 6062 int &FrameIndex) const { 6063 if (!MI.mayStore()) 6064 return AMDGPU::NoRegister; 6065 6066 if (isMUBUF(MI) || isVGPRSpill(MI)) 6067 return isStackAccess(MI, FrameIndex); 6068 6069 if (isSGPRSpill(MI)) 6070 return isSGPRStackAccess(MI, FrameIndex); 6071 6072 return AMDGPU::NoRegister; 6073 } 6074 6075 unsigned SIInstrInfo::getInstBundleSize(const MachineInstr &MI) const { 6076 unsigned Size = 0; 6077 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 6078 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 6079 while (++I != E && I->isInsideBundle()) { 6080 assert(!I->isBundle() && "No nested bundle!"); 6081 Size += getInstSizeInBytes(*I); 6082 } 6083 6084 return Size; 6085 } 6086 6087 unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { 6088 unsigned Opc = MI.getOpcode(); 6089 const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc); 6090 unsigned DescSize = Desc.getSize(); 6091 6092 // If we have a definitive size, we can use it. Otherwise we need to inspect 6093 // the operands to know the size. 6094 if (isFixedSize(MI)) 6095 return DescSize; 6096 6097 // 4-byte instructions may have a 32-bit literal encoded after them. Check 6098 // operands that coud ever be literals. 6099 if (isVALU(MI) || isSALU(MI)) { 6100 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 6101 if (Src0Idx == -1) 6102 return DescSize; // No operands. 6103 6104 if (isLiteralConstantLike(MI.getOperand(Src0Idx), Desc.OpInfo[Src0Idx])) 6105 return isVOP3(MI) ? 12 : (DescSize + 4); 6106 6107 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 6108 if (Src1Idx == -1) 6109 return DescSize; 6110 6111 if (isLiteralConstantLike(MI.getOperand(Src1Idx), Desc.OpInfo[Src1Idx])) 6112 return isVOP3(MI) ? 12 : (DescSize + 4); 6113 6114 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 6115 if (Src2Idx == -1) 6116 return DescSize; 6117 6118 if (isLiteralConstantLike(MI.getOperand(Src2Idx), Desc.OpInfo[Src2Idx])) 6119 return isVOP3(MI) ? 12 : (DescSize + 4); 6120 6121 return DescSize; 6122 } 6123 6124 // Check whether we have extra NSA words. 6125 if (isMIMG(MI)) { 6126 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); 6127 if (VAddr0Idx < 0) 6128 return 8; 6129 6130 int RSrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); 6131 return 8 + 4 * ((RSrcIdx - VAddr0Idx + 2) / 4); 6132 } 6133 6134 switch (Opc) { 6135 case TargetOpcode::IMPLICIT_DEF: 6136 case TargetOpcode::KILL: 6137 case TargetOpcode::DBG_VALUE: 6138 case TargetOpcode::EH_LABEL: 6139 return 0; 6140 case TargetOpcode::BUNDLE: 6141 return getInstBundleSize(MI); 6142 case TargetOpcode::INLINEASM: 6143 case TargetOpcode::INLINEASM_BR: { 6144 const MachineFunction *MF = MI.getParent()->getParent(); 6145 const char *AsmStr = MI.getOperand(0).getSymbolName(); 6146 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo(), 6147 &MF->getSubtarget()); 6148 } 6149 default: 6150 return DescSize; 6151 } 6152 } 6153 6154 bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const { 6155 if (!isFLAT(MI)) 6156 return false; 6157 6158 if (MI.memoperands_empty()) 6159 return true; 6160 6161 for (const MachineMemOperand *MMO : MI.memoperands()) { 6162 if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS) 6163 return true; 6164 } 6165 return false; 6166 } 6167 6168 bool SIInstrInfo::isNonUniformBranchInstr(MachineInstr &Branch) const { 6169 return Branch.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO; 6170 } 6171 6172 void SIInstrInfo::convertNonUniformIfRegion(MachineBasicBlock *IfEntry, 6173 MachineBasicBlock *IfEnd) const { 6174 MachineBasicBlock::iterator TI = IfEntry->getFirstTerminator(); 6175 assert(TI != IfEntry->end()); 6176 6177 MachineInstr *Branch = &(*TI); 6178 MachineFunction *MF = IfEntry->getParent(); 6179 MachineRegisterInfo &MRI = IfEntry->getParent()->getRegInfo(); 6180 6181 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 6182 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 6183 MachineInstr *SIIF = 6184 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_IF), DstReg) 6185 .add(Branch->getOperand(0)) 6186 .add(Branch->getOperand(1)); 6187 MachineInstr *SIEND = 6188 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_END_CF)) 6189 .addReg(DstReg); 6190 6191 IfEntry->erase(TI); 6192 IfEntry->insert(IfEntry->end(), SIIF); 6193 IfEnd->insert(IfEnd->getFirstNonPHI(), SIEND); 6194 } 6195 } 6196 6197 void SIInstrInfo::convertNonUniformLoopRegion( 6198 MachineBasicBlock *LoopEntry, MachineBasicBlock *LoopEnd) const { 6199 MachineBasicBlock::iterator TI = LoopEnd->getFirstTerminator(); 6200 // We expect 2 terminators, one conditional and one unconditional. 6201 assert(TI != LoopEnd->end()); 6202 6203 MachineInstr *Branch = &(*TI); 6204 MachineFunction *MF = LoopEnd->getParent(); 6205 MachineRegisterInfo &MRI = LoopEnd->getParent()->getRegInfo(); 6206 6207 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 6208 6209 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 6210 Register BackEdgeReg = MRI.createVirtualRegister(RI.getBoolRC()); 6211 MachineInstrBuilder HeaderPHIBuilder = 6212 BuildMI(*(MF), Branch->getDebugLoc(), get(TargetOpcode::PHI), DstReg); 6213 for (MachineBasicBlock::pred_iterator PI = LoopEntry->pred_begin(), 6214 E = LoopEntry->pred_end(); 6215 PI != E; ++PI) { 6216 if (*PI == LoopEnd) { 6217 HeaderPHIBuilder.addReg(BackEdgeReg); 6218 } else { 6219 MachineBasicBlock *PMBB = *PI; 6220 Register ZeroReg = MRI.createVirtualRegister(RI.getBoolRC()); 6221 materializeImmediate(*PMBB, PMBB->getFirstTerminator(), DebugLoc(), 6222 ZeroReg, 0); 6223 HeaderPHIBuilder.addReg(ZeroReg); 6224 } 6225 HeaderPHIBuilder.addMBB(*PI); 6226 } 6227 MachineInstr *HeaderPhi = HeaderPHIBuilder; 6228 MachineInstr *SIIFBREAK = BuildMI(*(MF), Branch->getDebugLoc(), 6229 get(AMDGPU::SI_IF_BREAK), BackEdgeReg) 6230 .addReg(DstReg) 6231 .add(Branch->getOperand(0)); 6232 MachineInstr *SILOOP = 6233 BuildMI(*(MF), Branch->getDebugLoc(), get(AMDGPU::SI_LOOP)) 6234 .addReg(BackEdgeReg) 6235 .addMBB(LoopEntry); 6236 6237 LoopEntry->insert(LoopEntry->begin(), HeaderPhi); 6238 LoopEnd->erase(TI); 6239 LoopEnd->insert(LoopEnd->end(), SIIFBREAK); 6240 LoopEnd->insert(LoopEnd->end(), SILOOP); 6241 } 6242 } 6243 6244 ArrayRef<std::pair<int, const char *>> 6245 SIInstrInfo::getSerializableTargetIndices() const { 6246 static const std::pair<int, const char *> TargetIndices[] = { 6247 {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"}, 6248 {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"}, 6249 {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"}, 6250 {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"}, 6251 {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}}; 6252 return makeArrayRef(TargetIndices); 6253 } 6254 6255 /// This is used by the post-RA scheduler (SchedulePostRAList.cpp). The 6256 /// post-RA version of misched uses CreateTargetMIHazardRecognizer. 6257 ScheduleHazardRecognizer * 6258 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 6259 const ScheduleDAG *DAG) const { 6260 return new GCNHazardRecognizer(DAG->MF); 6261 } 6262 6263 /// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer 6264 /// pass. 6265 ScheduleHazardRecognizer * 6266 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const { 6267 return new GCNHazardRecognizer(MF); 6268 } 6269 6270 std::pair<unsigned, unsigned> 6271 SIInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { 6272 return std::make_pair(TF & MO_MASK, TF & ~MO_MASK); 6273 } 6274 6275 ArrayRef<std::pair<unsigned, const char *>> 6276 SIInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { 6277 static const std::pair<unsigned, const char *> TargetFlags[] = { 6278 { MO_GOTPCREL, "amdgpu-gotprel" }, 6279 { MO_GOTPCREL32_LO, "amdgpu-gotprel32-lo" }, 6280 { MO_GOTPCREL32_HI, "amdgpu-gotprel32-hi" }, 6281 { MO_REL32_LO, "amdgpu-rel32-lo" }, 6282 { MO_REL32_HI, "amdgpu-rel32-hi" }, 6283 { MO_ABS32_LO, "amdgpu-abs32-lo" }, 6284 { MO_ABS32_HI, "amdgpu-abs32-hi" }, 6285 }; 6286 6287 return makeArrayRef(TargetFlags); 6288 } 6289 6290 bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const { 6291 return !MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY && 6292 MI.modifiesRegister(AMDGPU::EXEC, &RI); 6293 } 6294 6295 MachineInstrBuilder 6296 SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 6297 MachineBasicBlock::iterator I, 6298 const DebugLoc &DL, 6299 unsigned DestReg) const { 6300 if (ST.hasAddNoCarry()) 6301 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg); 6302 6303 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6304 Register UnusedCarry = MRI.createVirtualRegister(RI.getBoolRC()); 6305 MRI.setRegAllocationHint(UnusedCarry, 0, RI.getVCC()); 6306 6307 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_I32_e64), DestReg) 6308 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 6309 } 6310 6311 MachineInstrBuilder SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 6312 MachineBasicBlock::iterator I, 6313 const DebugLoc &DL, 6314 Register DestReg, 6315 RegScavenger &RS) const { 6316 if (ST.hasAddNoCarry()) 6317 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e32), DestReg); 6318 6319 // If available, prefer to use vcc. 6320 Register UnusedCarry = !RS.isRegUsed(AMDGPU::VCC) 6321 ? Register(RI.getVCC()) 6322 : RS.scavengeRegister(RI.getBoolRC(), I, 0, false); 6323 6324 // TODO: Users need to deal with this. 6325 if (!UnusedCarry.isValid()) 6326 return MachineInstrBuilder(); 6327 6328 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_I32_e64), DestReg) 6329 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 6330 } 6331 6332 bool SIInstrInfo::isKillTerminator(unsigned Opcode) { 6333 switch (Opcode) { 6334 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 6335 case AMDGPU::SI_KILL_I1_TERMINATOR: 6336 return true; 6337 default: 6338 return false; 6339 } 6340 } 6341 6342 const MCInstrDesc &SIInstrInfo::getKillTerminatorFromPseudo(unsigned Opcode) const { 6343 switch (Opcode) { 6344 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: 6345 return get(AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR); 6346 case AMDGPU::SI_KILL_I1_PSEUDO: 6347 return get(AMDGPU::SI_KILL_I1_TERMINATOR); 6348 default: 6349 llvm_unreachable("invalid opcode, expected SI_KILL_*_PSEUDO"); 6350 } 6351 } 6352 6353 void SIInstrInfo::fixImplicitOperands(MachineInstr &MI) const { 6354 MachineBasicBlock *MBB = MI.getParent(); 6355 MachineFunction *MF = MBB->getParent(); 6356 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 6357 6358 if (!ST.isWave32()) 6359 return; 6360 6361 for (auto &Op : MI.implicit_operands()) { 6362 if (Op.isReg() && Op.getReg() == AMDGPU::VCC) 6363 Op.setReg(AMDGPU::VCC_LO); 6364 } 6365 } 6366 6367 bool SIInstrInfo::isBufferSMRD(const MachineInstr &MI) const { 6368 if (!isSMRD(MI)) 6369 return false; 6370 6371 // Check that it is using a buffer resource. 6372 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sbase); 6373 if (Idx == -1) // e.g. s_memtime 6374 return false; 6375 6376 const auto RCID = MI.getDesc().OpInfo[Idx].RegClass; 6377 return RI.getRegClass(RCID)->hasSubClassEq(&AMDGPU::SGPR_128RegClass); 6378 } 6379 6380 unsigned SIInstrInfo::getNumFlatOffsetBits(unsigned AddrSpace, 6381 bool Signed) const { 6382 if (!ST.hasFlatInstOffsets()) 6383 return 0; 6384 6385 if (ST.hasFlatSegmentOffsetBug() && AddrSpace == AMDGPUAS::FLAT_ADDRESS) 6386 return 0; 6387 6388 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) 6389 return Signed ? 12 : 11; 6390 6391 return Signed ? 13 : 12; 6392 } 6393 6394 bool SIInstrInfo::isLegalFLATOffset(int64_t Offset, unsigned AddrSpace, 6395 bool Signed) const { 6396 // TODO: Should 0 be special cased? 6397 if (!ST.hasFlatInstOffsets()) 6398 return false; 6399 6400 if (ST.hasFlatSegmentOffsetBug() && AddrSpace == AMDGPUAS::FLAT_ADDRESS) 6401 return false; 6402 6403 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 6404 return (Signed && isInt<12>(Offset)) || 6405 (!Signed && isUInt<11>(Offset)); 6406 } 6407 6408 return (Signed && isInt<13>(Offset)) || 6409 (!Signed && isUInt<12>(Offset)); 6410 } 6411 6412 6413 // This must be kept in sync with the SIEncodingFamily class in SIInstrInfo.td 6414 enum SIEncodingFamily { 6415 SI = 0, 6416 VI = 1, 6417 SDWA = 2, 6418 SDWA9 = 3, 6419 GFX80 = 4, 6420 GFX9 = 5, 6421 GFX10 = 6, 6422 SDWA10 = 7 6423 }; 6424 6425 static SIEncodingFamily subtargetEncodingFamily(const GCNSubtarget &ST) { 6426 switch (ST.getGeneration()) { 6427 default: 6428 break; 6429 case AMDGPUSubtarget::SOUTHERN_ISLANDS: 6430 case AMDGPUSubtarget::SEA_ISLANDS: 6431 return SIEncodingFamily::SI; 6432 case AMDGPUSubtarget::VOLCANIC_ISLANDS: 6433 case AMDGPUSubtarget::GFX9: 6434 return SIEncodingFamily::VI; 6435 case AMDGPUSubtarget::GFX10: 6436 return SIEncodingFamily::GFX10; 6437 } 6438 llvm_unreachable("Unknown subtarget generation!"); 6439 } 6440 6441 bool SIInstrInfo::isAsmOnlyOpcode(int MCOp) const { 6442 switch(MCOp) { 6443 // These opcodes use indirect register addressing so 6444 // they need special handling by codegen (currently missing). 6445 // Therefore it is too risky to allow these opcodes 6446 // to be selected by dpp combiner or sdwa peepholer. 6447 case AMDGPU::V_MOVRELS_B32_dpp_gfx10: 6448 case AMDGPU::V_MOVRELS_B32_sdwa_gfx10: 6449 case AMDGPU::V_MOVRELD_B32_dpp_gfx10: 6450 case AMDGPU::V_MOVRELD_B32_sdwa_gfx10: 6451 case AMDGPU::V_MOVRELSD_B32_dpp_gfx10: 6452 case AMDGPU::V_MOVRELSD_B32_sdwa_gfx10: 6453 case AMDGPU::V_MOVRELSD_2_B32_dpp_gfx10: 6454 case AMDGPU::V_MOVRELSD_2_B32_sdwa_gfx10: 6455 return true; 6456 default: 6457 return false; 6458 } 6459 } 6460 6461 int SIInstrInfo::pseudoToMCOpcode(int Opcode) const { 6462 SIEncodingFamily Gen = subtargetEncodingFamily(ST); 6463 6464 if ((get(Opcode).TSFlags & SIInstrFlags::renamedInGFX9) != 0 && 6465 ST.getGeneration() == AMDGPUSubtarget::GFX9) 6466 Gen = SIEncodingFamily::GFX9; 6467 6468 // Adjust the encoding family to GFX80 for D16 buffer instructions when the 6469 // subtarget has UnpackedD16VMem feature. 6470 // TODO: remove this when we discard GFX80 encoding. 6471 if (ST.hasUnpackedD16VMem() && (get(Opcode).TSFlags & SIInstrFlags::D16Buf)) 6472 Gen = SIEncodingFamily::GFX80; 6473 6474 if (get(Opcode).TSFlags & SIInstrFlags::SDWA) { 6475 switch (ST.getGeneration()) { 6476 default: 6477 Gen = SIEncodingFamily::SDWA; 6478 break; 6479 case AMDGPUSubtarget::GFX9: 6480 Gen = SIEncodingFamily::SDWA9; 6481 break; 6482 case AMDGPUSubtarget::GFX10: 6483 Gen = SIEncodingFamily::SDWA10; 6484 break; 6485 } 6486 } 6487 6488 int MCOp = AMDGPU::getMCOpcode(Opcode, Gen); 6489 6490 // -1 means that Opcode is already a native instruction. 6491 if (MCOp == -1) 6492 return Opcode; 6493 6494 // (uint16_t)-1 means that Opcode is a pseudo instruction that has 6495 // no encoding in the given subtarget generation. 6496 if (MCOp == (uint16_t)-1) 6497 return -1; 6498 6499 if (isAsmOnlyOpcode(MCOp)) 6500 return -1; 6501 6502 return MCOp; 6503 } 6504 6505 static 6506 TargetInstrInfo::RegSubRegPair getRegOrUndef(const MachineOperand &RegOpnd) { 6507 assert(RegOpnd.isReg()); 6508 return RegOpnd.isUndef() ? TargetInstrInfo::RegSubRegPair() : 6509 getRegSubRegPair(RegOpnd); 6510 } 6511 6512 TargetInstrInfo::RegSubRegPair 6513 llvm::getRegSequenceSubReg(MachineInstr &MI, unsigned SubReg) { 6514 assert(MI.isRegSequence()); 6515 for (unsigned I = 0, E = (MI.getNumOperands() - 1)/ 2; I < E; ++I) 6516 if (MI.getOperand(1 + 2 * I + 1).getImm() == SubReg) { 6517 auto &RegOp = MI.getOperand(1 + 2 * I); 6518 return getRegOrUndef(RegOp); 6519 } 6520 return TargetInstrInfo::RegSubRegPair(); 6521 } 6522 6523 // Try to find the definition of reg:subreg in subreg-manipulation pseudos 6524 // Following a subreg of reg:subreg isn't supported 6525 static bool followSubRegDef(MachineInstr &MI, 6526 TargetInstrInfo::RegSubRegPair &RSR) { 6527 if (!RSR.SubReg) 6528 return false; 6529 switch (MI.getOpcode()) { 6530 default: break; 6531 case AMDGPU::REG_SEQUENCE: 6532 RSR = getRegSequenceSubReg(MI, RSR.SubReg); 6533 return true; 6534 // EXTRACT_SUBREG ins't supported as this would follow a subreg of subreg 6535 case AMDGPU::INSERT_SUBREG: 6536 if (RSR.SubReg == (unsigned)MI.getOperand(3).getImm()) 6537 // inserted the subreg we're looking for 6538 RSR = getRegOrUndef(MI.getOperand(2)); 6539 else { // the subreg in the rest of the reg 6540 auto R1 = getRegOrUndef(MI.getOperand(1)); 6541 if (R1.SubReg) // subreg of subreg isn't supported 6542 return false; 6543 RSR.Reg = R1.Reg; 6544 } 6545 return true; 6546 } 6547 return false; 6548 } 6549 6550 MachineInstr *llvm::getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P, 6551 MachineRegisterInfo &MRI) { 6552 assert(MRI.isSSA()); 6553 if (!Register::isVirtualRegister(P.Reg)) 6554 return nullptr; 6555 6556 auto RSR = P; 6557 auto *DefInst = MRI.getVRegDef(RSR.Reg); 6558 while (auto *MI = DefInst) { 6559 DefInst = nullptr; 6560 switch (MI->getOpcode()) { 6561 case AMDGPU::COPY: 6562 case AMDGPU::V_MOV_B32_e32: { 6563 auto &Op1 = MI->getOperand(1); 6564 if (Op1.isReg() && Register::isVirtualRegister(Op1.getReg())) { 6565 if (Op1.isUndef()) 6566 return nullptr; 6567 RSR = getRegSubRegPair(Op1); 6568 DefInst = MRI.getVRegDef(RSR.Reg); 6569 } 6570 break; 6571 } 6572 default: 6573 if (followSubRegDef(*MI, RSR)) { 6574 if (!RSR.Reg) 6575 return nullptr; 6576 DefInst = MRI.getVRegDef(RSR.Reg); 6577 } 6578 } 6579 if (!DefInst) 6580 return MI; 6581 } 6582 return nullptr; 6583 } 6584 6585 bool llvm::execMayBeModifiedBeforeUse(const MachineRegisterInfo &MRI, 6586 Register VReg, 6587 const MachineInstr &DefMI, 6588 const MachineInstr &UseMI) { 6589 assert(MRI.isSSA() && "Must be run on SSA"); 6590 6591 auto *TRI = MRI.getTargetRegisterInfo(); 6592 auto *DefBB = DefMI.getParent(); 6593 6594 // Don't bother searching between blocks, although it is possible this block 6595 // doesn't modify exec. 6596 if (UseMI.getParent() != DefBB) 6597 return true; 6598 6599 const int MaxInstScan = 20; 6600 int NumInst = 0; 6601 6602 // Stop scan at the use. 6603 auto E = UseMI.getIterator(); 6604 for (auto I = std::next(DefMI.getIterator()); I != E; ++I) { 6605 if (I->isDebugInstr()) 6606 continue; 6607 6608 if (++NumInst > MaxInstScan) 6609 return true; 6610 6611 if (I->modifiesRegister(AMDGPU::EXEC, TRI)) 6612 return true; 6613 } 6614 6615 return false; 6616 } 6617 6618 bool llvm::execMayBeModifiedBeforeAnyUse(const MachineRegisterInfo &MRI, 6619 Register VReg, 6620 const MachineInstr &DefMI) { 6621 assert(MRI.isSSA() && "Must be run on SSA"); 6622 6623 auto *TRI = MRI.getTargetRegisterInfo(); 6624 auto *DefBB = DefMI.getParent(); 6625 6626 const int MaxUseInstScan = 10; 6627 int NumUseInst = 0; 6628 6629 for (auto &UseInst : MRI.use_nodbg_instructions(VReg)) { 6630 // Don't bother searching between blocks, although it is possible this block 6631 // doesn't modify exec. 6632 if (UseInst.getParent() != DefBB) 6633 return true; 6634 6635 if (++NumUseInst > MaxUseInstScan) 6636 return true; 6637 } 6638 6639 const int MaxInstScan = 20; 6640 int NumInst = 0; 6641 6642 // Stop scan when we have seen all the uses. 6643 for (auto I = std::next(DefMI.getIterator()); ; ++I) { 6644 if (I->isDebugInstr()) 6645 continue; 6646 6647 if (++NumInst > MaxInstScan) 6648 return true; 6649 6650 if (I->readsRegister(VReg)) 6651 if (--NumUseInst == 0) 6652 return false; 6653 6654 if (I->modifiesRegister(AMDGPU::EXEC, TRI)) 6655 return true; 6656 } 6657 } 6658 6659 MachineInstr *SIInstrInfo::createPHIDestinationCopy( 6660 MachineBasicBlock &MBB, MachineBasicBlock::iterator LastPHIIt, 6661 const DebugLoc &DL, Register Src, Register Dst) const { 6662 auto Cur = MBB.begin(); 6663 if (Cur != MBB.end()) 6664 do { 6665 if (!Cur->isPHI() && Cur->readsRegister(Dst)) 6666 return BuildMI(MBB, Cur, DL, get(TargetOpcode::COPY), Dst).addReg(Src); 6667 ++Cur; 6668 } while (Cur != MBB.end() && Cur != LastPHIIt); 6669 6670 return TargetInstrInfo::createPHIDestinationCopy(MBB, LastPHIIt, DL, Src, 6671 Dst); 6672 } 6673 6674 MachineInstr *SIInstrInfo::createPHISourceCopy( 6675 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, 6676 const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const { 6677 if (InsPt != MBB.end() && 6678 (InsPt->getOpcode() == AMDGPU::SI_IF || 6679 InsPt->getOpcode() == AMDGPU::SI_ELSE || 6680 InsPt->getOpcode() == AMDGPU::SI_IF_BREAK) && 6681 InsPt->definesRegister(Src)) { 6682 InsPt++; 6683 return BuildMI(MBB, InsPt, DL, 6684 get(ST.isWave32() ? AMDGPU::S_MOV_B32_term 6685 : AMDGPU::S_MOV_B64_term), 6686 Dst) 6687 .addReg(Src, 0, SrcSubReg) 6688 .addReg(AMDGPU::EXEC, RegState::Implicit); 6689 } 6690 return TargetInstrInfo::createPHISourceCopy(MBB, InsPt, DL, Src, SrcSubReg, 6691 Dst); 6692 } 6693 6694 bool llvm::SIInstrInfo::isWave32() const { return ST.isWave32(); } 6695 6696 MachineInstr *SIInstrInfo::foldMemoryOperandImpl( 6697 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, 6698 MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS, 6699 VirtRegMap *VRM) const { 6700 // This is a bit of a hack (copied from AArch64). Consider this instruction: 6701 // 6702 // %0:sreg_32 = COPY $m0 6703 // 6704 // We explicitly chose SReg_32 for the virtual register so such a copy might 6705 // be eliminated by RegisterCoalescer. However, that may not be possible, and 6706 // %0 may even spill. We can't spill $m0 normally (it would require copying to 6707 // a numbered SGPR anyway), and since it is in the SReg_32 register class, 6708 // TargetInstrInfo::foldMemoryOperand() is going to try. 6709 // 6710 // To prevent that, constrain the %0 register class here. 6711 if (MI.isFullCopy()) { 6712 Register DstReg = MI.getOperand(0).getReg(); 6713 Register SrcReg = MI.getOperand(1).getReg(); 6714 6715 if (DstReg == AMDGPU::M0 && SrcReg.isVirtual()) { 6716 MF.getRegInfo().constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0RegClass); 6717 return nullptr; 6718 } 6719 6720 if (SrcReg == AMDGPU::M0 && DstReg.isVirtual()) { 6721 MF.getRegInfo().constrainRegClass(DstReg, &AMDGPU::SReg_32_XM0RegClass); 6722 return nullptr; 6723 } 6724 } 6725 6726 return nullptr; 6727 } 6728 6729 unsigned SIInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 6730 const MachineInstr &MI, 6731 unsigned *PredCost) const { 6732 if (MI.isBundle()) { 6733 MachineBasicBlock::const_instr_iterator I(MI.getIterator()); 6734 MachineBasicBlock::const_instr_iterator E(MI.getParent()->instr_end()); 6735 unsigned Lat = 0, Count = 0; 6736 for (++I; I != E && I->isBundledWithPred(); ++I) { 6737 ++Count; 6738 Lat = std::max(Lat, SchedModel.computeInstrLatency(&*I)); 6739 } 6740 return Lat + Count - 1; 6741 } 6742 6743 return SchedModel.computeInstrLatency(&MI); 6744 } 6745