1 //===- SIInstrInfo.cpp - SI Instruction Information ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// SI Implementation of TargetInstrInfo. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SIInstrInfo.h" 15 #include "AMDGPU.h" 16 #include "AMDGPUSubtarget.h" 17 #include "GCNHazardRecognizer.h" 18 #include "SIDefines.h" 19 #include "SIMachineFunctionInfo.h" 20 #include "SIRegisterInfo.h" 21 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 22 #include "Utils/AMDGPUBaseInfo.h" 23 #include "llvm/ADT/APInt.h" 24 #include "llvm/ADT/ArrayRef.h" 25 #include "llvm/ADT/SmallVector.h" 26 #include "llvm/ADT/StringRef.h" 27 #include "llvm/ADT/iterator_range.h" 28 #include "llvm/Analysis/AliasAnalysis.h" 29 #include "llvm/Analysis/MemoryLocation.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/CodeGen/MachineBasicBlock.h" 32 #include "llvm/CodeGen/MachineDominators.h" 33 #include "llvm/CodeGen/MachineFrameInfo.h" 34 #include "llvm/CodeGen/MachineFunction.h" 35 #include "llvm/CodeGen/MachineInstr.h" 36 #include "llvm/CodeGen/MachineInstrBuilder.h" 37 #include "llvm/CodeGen/MachineInstrBundle.h" 38 #include "llvm/CodeGen/MachineMemOperand.h" 39 #include "llvm/CodeGen/MachineOperand.h" 40 #include "llvm/CodeGen/MachineRegisterInfo.h" 41 #include "llvm/CodeGen/RegisterScavenging.h" 42 #include "llvm/CodeGen/ScheduleDAG.h" 43 #include "llvm/CodeGen/SelectionDAGNodes.h" 44 #include "llvm/CodeGen/TargetOpcodes.h" 45 #include "llvm/CodeGen/TargetRegisterInfo.h" 46 #include "llvm/IR/DebugLoc.h" 47 #include "llvm/IR/DiagnosticInfo.h" 48 #include "llvm/IR/Function.h" 49 #include "llvm/IR/InlineAsm.h" 50 #include "llvm/IR/LLVMContext.h" 51 #include "llvm/MC/MCInstrDesc.h" 52 #include "llvm/Support/Casting.h" 53 #include "llvm/Support/CommandLine.h" 54 #include "llvm/Support/Compiler.h" 55 #include "llvm/Support/ErrorHandling.h" 56 #include "llvm/Support/MachineValueType.h" 57 #include "llvm/Support/MathExtras.h" 58 #include "llvm/Target/TargetMachine.h" 59 #include <cassert> 60 #include <cstdint> 61 #include <iterator> 62 #include <utility> 63 64 using namespace llvm; 65 66 #define GET_INSTRINFO_CTOR_DTOR 67 #include "AMDGPUGenInstrInfo.inc" 68 69 namespace llvm { 70 namespace AMDGPU { 71 #define GET_D16ImageDimIntrinsics_IMPL 72 #define GET_ImageDimIntrinsicTable_IMPL 73 #define GET_RsrcIntrinsics_IMPL 74 #include "AMDGPUGenSearchableTables.inc" 75 } 76 } 77 78 79 // Must be at least 4 to be able to branch over minimum unconditional branch 80 // code. This is only for making it possible to write reasonably small tests for 81 // long branches. 82 static cl::opt<unsigned> 83 BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16), 84 cl::desc("Restrict range of branch instructions (DEBUG)")); 85 86 SIInstrInfo::SIInstrInfo(const GCNSubtarget &ST) 87 : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN), 88 RI(ST), ST(ST) {} 89 90 //===----------------------------------------------------------------------===// 91 // TargetInstrInfo callbacks 92 //===----------------------------------------------------------------------===// 93 94 static unsigned getNumOperandsNoGlue(SDNode *Node) { 95 unsigned N = Node->getNumOperands(); 96 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) 97 --N; 98 return N; 99 } 100 101 static SDValue findChainOperand(SDNode *Load) { 102 SDValue LastOp = Load->getOperand(getNumOperandsNoGlue(Load) - 1); 103 assert(LastOp.getValueType() == MVT::Other && "Chain missing from load node"); 104 return LastOp; 105 } 106 107 /// Returns true if both nodes have the same value for the given 108 /// operand \p Op, or if both nodes do not have this operand. 109 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { 110 unsigned Opc0 = N0->getMachineOpcode(); 111 unsigned Opc1 = N1->getMachineOpcode(); 112 113 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); 114 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); 115 116 if (Op0Idx == -1 && Op1Idx == -1) 117 return true; 118 119 120 if ((Op0Idx == -1 && Op1Idx != -1) || 121 (Op1Idx == -1 && Op0Idx != -1)) 122 return false; 123 124 // getNamedOperandIdx returns the index for the MachineInstr's operands, 125 // which includes the result as the first operand. We are indexing into the 126 // MachineSDNode's operands, so we need to skip the result operand to get 127 // the real index. 128 --Op0Idx; 129 --Op1Idx; 130 131 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx); 132 } 133 134 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, 135 AliasAnalysis *AA) const { 136 // TODO: The generic check fails for VALU instructions that should be 137 // rematerializable due to implicit reads of exec. We really want all of the 138 // generic logic for this except for this. 139 switch (MI.getOpcode()) { 140 case AMDGPU::V_MOV_B32_e32: 141 case AMDGPU::V_MOV_B32_e64: 142 case AMDGPU::V_MOV_B64_PSEUDO: 143 // No implicit operands. 144 return MI.getNumOperands() == MI.getDesc().getNumOperands(); 145 default: 146 return false; 147 } 148 } 149 150 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, 151 int64_t &Offset0, 152 int64_t &Offset1) const { 153 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode()) 154 return false; 155 156 unsigned Opc0 = Load0->getMachineOpcode(); 157 unsigned Opc1 = Load1->getMachineOpcode(); 158 159 // Make sure both are actually loads. 160 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad()) 161 return false; 162 163 if (isDS(Opc0) && isDS(Opc1)) { 164 165 // FIXME: Handle this case: 166 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1)) 167 return false; 168 169 // Check base reg. 170 if (Load0->getOperand(1) != Load1->getOperand(1)) 171 return false; 172 173 // Check chain. 174 if (findChainOperand(Load0) != findChainOperand(Load1)) 175 return false; 176 177 // Skip read2 / write2 variants for simplicity. 178 // TODO: We should report true if the used offsets are adjacent (excluded 179 // st64 versions). 180 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::data1) != -1 || 181 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::data1) != -1) 182 return false; 183 184 Offset0 = cast<ConstantSDNode>(Load0->getOperand(2))->getZExtValue(); 185 Offset1 = cast<ConstantSDNode>(Load1->getOperand(2))->getZExtValue(); 186 return true; 187 } 188 189 if (isSMRD(Opc0) && isSMRD(Opc1)) { 190 // Skip time and cache invalidation instructions. 191 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::sbase) == -1 || 192 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::sbase) == -1) 193 return false; 194 195 assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1)); 196 197 // Check base reg. 198 if (Load0->getOperand(0) != Load1->getOperand(0)) 199 return false; 200 201 const ConstantSDNode *Load0Offset = 202 dyn_cast<ConstantSDNode>(Load0->getOperand(1)); 203 const ConstantSDNode *Load1Offset = 204 dyn_cast<ConstantSDNode>(Load1->getOperand(1)); 205 206 if (!Load0Offset || !Load1Offset) 207 return false; 208 209 // Check chain. 210 if (findChainOperand(Load0) != findChainOperand(Load1)) 211 return false; 212 213 Offset0 = Load0Offset->getZExtValue(); 214 Offset1 = Load1Offset->getZExtValue(); 215 return true; 216 } 217 218 // MUBUF and MTBUF can access the same addresses. 219 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) { 220 221 // MUBUF and MTBUF have vaddr at different indices. 222 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || 223 findChainOperand(Load0) != findChainOperand(Load1) || 224 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || 225 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) 226 return false; 227 228 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 229 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 230 231 if (OffIdx0 == -1 || OffIdx1 == -1) 232 return false; 233 234 // getNamedOperandIdx returns the index for MachineInstrs. Since they 235 // inlcude the output in the operand list, but SDNodes don't, we need to 236 // subtract the index by one. 237 --OffIdx0; 238 --OffIdx1; 239 240 SDValue Off0 = Load0->getOperand(OffIdx0); 241 SDValue Off1 = Load1->getOperand(OffIdx1); 242 243 // The offset might be a FrameIndexSDNode. 244 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1)) 245 return false; 246 247 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue(); 248 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue(); 249 return true; 250 } 251 252 return false; 253 } 254 255 static bool isStride64(unsigned Opc) { 256 switch (Opc) { 257 case AMDGPU::DS_READ2ST64_B32: 258 case AMDGPU::DS_READ2ST64_B64: 259 case AMDGPU::DS_WRITE2ST64_B32: 260 case AMDGPU::DS_WRITE2ST64_B64: 261 return true; 262 default: 263 return false; 264 } 265 } 266 267 bool SIInstrInfo::getMemOperandWithOffset(MachineInstr &LdSt, 268 MachineOperand *&BaseOp, 269 int64_t &Offset, 270 const TargetRegisterInfo *TRI) const { 271 unsigned Opc = LdSt.getOpcode(); 272 273 if (isDS(LdSt)) { 274 const MachineOperand *OffsetImm = 275 getNamedOperand(LdSt, AMDGPU::OpName::offset); 276 if (OffsetImm) { 277 // Normal, single offset LDS instruction. 278 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr); 279 // TODO: ds_consume/ds_append use M0 for the base address. Is it safe to 280 // report that here? 281 if (!BaseOp) 282 return false; 283 284 Offset = OffsetImm->getImm(); 285 assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " 286 "operands of type register."); 287 return true; 288 } 289 290 // The 2 offset instructions use offset0 and offset1 instead. We can treat 291 // these as a load with a single offset if the 2 offsets are consecutive. We 292 // will use this for some partially aligned loads. 293 const MachineOperand *Offset0Imm = 294 getNamedOperand(LdSt, AMDGPU::OpName::offset0); 295 const MachineOperand *Offset1Imm = 296 getNamedOperand(LdSt, AMDGPU::OpName::offset1); 297 298 uint8_t Offset0 = Offset0Imm->getImm(); 299 uint8_t Offset1 = Offset1Imm->getImm(); 300 301 if (Offset1 > Offset0 && Offset1 - Offset0 == 1) { 302 // Each of these offsets is in element sized units, so we need to convert 303 // to bytes of the individual reads. 304 305 unsigned EltSize; 306 if (LdSt.mayLoad()) 307 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16; 308 else { 309 assert(LdSt.mayStore()); 310 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 311 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8; 312 } 313 314 if (isStride64(Opc)) 315 EltSize *= 64; 316 317 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr); 318 Offset = EltSize * Offset0; 319 assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " 320 "operands of type register."); 321 return true; 322 } 323 324 return false; 325 } 326 327 if (isMUBUF(LdSt) || isMTBUF(LdSt)) { 328 const MachineOperand *SOffset = getNamedOperand(LdSt, AMDGPU::OpName::soffset); 329 if (SOffset && SOffset->isReg()) 330 return false; 331 332 MachineOperand *AddrReg = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 333 if (!AddrReg) 334 return false; 335 336 const MachineOperand *OffsetImm = 337 getNamedOperand(LdSt, AMDGPU::OpName::offset); 338 BaseOp = AddrReg; 339 Offset = OffsetImm->getImm(); 340 341 if (SOffset) // soffset can be an inline immediate. 342 Offset += SOffset->getImm(); 343 344 assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " 345 "operands of type register."); 346 return true; 347 } 348 349 if (isSMRD(LdSt)) { 350 const MachineOperand *OffsetImm = 351 getNamedOperand(LdSt, AMDGPU::OpName::offset); 352 if (!OffsetImm) 353 return false; 354 355 MachineOperand *SBaseReg = getNamedOperand(LdSt, AMDGPU::OpName::sbase); 356 BaseOp = SBaseReg; 357 Offset = OffsetImm->getImm(); 358 assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " 359 "operands of type register."); 360 return true; 361 } 362 363 if (isFLAT(LdSt)) { 364 MachineOperand *VAddr = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 365 if (VAddr) { 366 // Can't analyze 2 offsets. 367 if (getNamedOperand(LdSt, AMDGPU::OpName::saddr)) 368 return false; 369 370 BaseOp = VAddr; 371 } else { 372 // scratch instructions have either vaddr or saddr. 373 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::saddr); 374 } 375 376 Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm(); 377 assert(BaseOp->isReg() && "getMemOperandWithOffset only supports base " 378 "operands of type register."); 379 return true; 380 } 381 382 return false; 383 } 384 385 static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, 386 const MachineOperand &BaseOp1, 387 const MachineInstr &MI2, 388 const MachineOperand &BaseOp2) { 389 // Support only base operands with base registers. 390 // Note: this could be extended to support FI operands. 391 if (!BaseOp1.isReg() || !BaseOp2.isReg()) 392 return false; 393 394 if (BaseOp1.isIdenticalTo(BaseOp2)) 395 return true; 396 397 if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand()) 398 return false; 399 400 auto MO1 = *MI1.memoperands_begin(); 401 auto MO2 = *MI2.memoperands_begin(); 402 if (MO1->getAddrSpace() != MO2->getAddrSpace()) 403 return false; 404 405 auto Base1 = MO1->getValue(); 406 auto Base2 = MO2->getValue(); 407 if (!Base1 || !Base2) 408 return false; 409 const MachineFunction &MF = *MI1.getParent()->getParent(); 410 const DataLayout &DL = MF.getFunction().getParent()->getDataLayout(); 411 Base1 = GetUnderlyingObject(Base1, DL); 412 Base2 = GetUnderlyingObject(Base1, DL); 413 414 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2)) 415 return false; 416 417 return Base1 == Base2; 418 } 419 420 bool SIInstrInfo::shouldClusterMemOps(MachineOperand &BaseOp1, 421 MachineOperand &BaseOp2, 422 unsigned NumLoads) const { 423 MachineInstr &FirstLdSt = *BaseOp1.getParent(); 424 MachineInstr &SecondLdSt = *BaseOp2.getParent(); 425 426 if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOp1, SecondLdSt, BaseOp2)) 427 return false; 428 429 const MachineOperand *FirstDst = nullptr; 430 const MachineOperand *SecondDst = nullptr; 431 432 if ((isMUBUF(FirstLdSt) && isMUBUF(SecondLdSt)) || 433 (isMTBUF(FirstLdSt) && isMTBUF(SecondLdSt)) || 434 (isFLAT(FirstLdSt) && isFLAT(SecondLdSt))) { 435 const unsigned MaxGlobalLoadCluster = 6; 436 if (NumLoads > MaxGlobalLoadCluster) 437 return false; 438 439 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdata); 440 if (!FirstDst) 441 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdst); 442 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdata); 443 if (!SecondDst) 444 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdst); 445 } else if (isSMRD(FirstLdSt) && isSMRD(SecondLdSt)) { 446 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::sdst); 447 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::sdst); 448 } else if (isDS(FirstLdSt) && isDS(SecondLdSt)) { 449 FirstDst = getNamedOperand(FirstLdSt, AMDGPU::OpName::vdst); 450 SecondDst = getNamedOperand(SecondLdSt, AMDGPU::OpName::vdst); 451 } 452 453 if (!FirstDst || !SecondDst) 454 return false; 455 456 // Try to limit clustering based on the total number of bytes loaded 457 // rather than the number of instructions. This is done to help reduce 458 // register pressure. The method used is somewhat inexact, though, 459 // because it assumes that all loads in the cluster will load the 460 // same number of bytes as FirstLdSt. 461 462 // The unit of this value is bytes. 463 // FIXME: This needs finer tuning. 464 unsigned LoadClusterThreshold = 16; 465 466 const MachineRegisterInfo &MRI = 467 FirstLdSt.getParent()->getParent()->getRegInfo(); 468 const TargetRegisterClass *DstRC = MRI.getRegClass(FirstDst->getReg()); 469 470 return (NumLoads * (RI.getRegSizeInBits(*DstRC) / 8)) <= LoadClusterThreshold; 471 } 472 473 // FIXME: This behaves strangely. If, for example, you have 32 load + stores, 474 // the first 16 loads will be interleaved with the stores, and the next 16 will 475 // be clustered as expected. It should really split into 2 16 store batches. 476 // 477 // Loads are clustered until this returns false, rather than trying to schedule 478 // groups of stores. This also means we have to deal with saying different 479 // address space loads should be clustered, and ones which might cause bank 480 // conflicts. 481 // 482 // This might be deprecated so it might not be worth that much effort to fix. 483 bool SIInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, 484 int64_t Offset0, int64_t Offset1, 485 unsigned NumLoads) const { 486 assert(Offset1 > Offset0 && 487 "Second offset should be larger than first offset!"); 488 // If we have less than 16 loads in a row, and the offsets are within 64 489 // bytes, then schedule together. 490 491 // A cacheline is 64 bytes (for global memory). 492 return (NumLoads <= 16 && (Offset1 - Offset0) < 64); 493 } 494 495 static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB, 496 MachineBasicBlock::iterator MI, 497 const DebugLoc &DL, unsigned DestReg, 498 unsigned SrcReg, bool KillSrc) { 499 MachineFunction *MF = MBB.getParent(); 500 DiagnosticInfoUnsupported IllegalCopy(MF->getFunction(), 501 "illegal SGPR to VGPR copy", 502 DL, DS_Error); 503 LLVMContext &C = MF->getFunction().getContext(); 504 C.diagnose(IllegalCopy); 505 506 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg) 507 .addReg(SrcReg, getKillRegState(KillSrc)); 508 } 509 510 void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 511 MachineBasicBlock::iterator MI, 512 const DebugLoc &DL, unsigned DestReg, 513 unsigned SrcReg, bool KillSrc) const { 514 const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg); 515 516 if (RC == &AMDGPU::VGPR_32RegClass) { 517 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || 518 AMDGPU::SReg_32RegClass.contains(SrcReg)); 519 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) 520 .addReg(SrcReg, getKillRegState(KillSrc)); 521 return; 522 } 523 524 if (RC == &AMDGPU::SReg_32_XM0RegClass || 525 RC == &AMDGPU::SReg_32RegClass) { 526 if (SrcReg == AMDGPU::SCC) { 527 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg) 528 .addImm(-1) 529 .addImm(0); 530 return; 531 } 532 533 if (!AMDGPU::SReg_32RegClass.contains(SrcReg)) { 534 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 535 return; 536 } 537 538 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 539 .addReg(SrcReg, getKillRegState(KillSrc)); 540 return; 541 } 542 543 if (RC == &AMDGPU::SReg_64RegClass) { 544 if (DestReg == AMDGPU::VCC) { 545 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 546 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC) 547 .addReg(SrcReg, getKillRegState(KillSrc)); 548 } else { 549 // FIXME: Hack until VReg_1 removed. 550 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 551 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 552 .addImm(0) 553 .addReg(SrcReg, getKillRegState(KillSrc)); 554 } 555 556 return; 557 } 558 559 if (!AMDGPU::SReg_64RegClass.contains(SrcReg)) { 560 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 561 return; 562 } 563 564 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 565 .addReg(SrcReg, getKillRegState(KillSrc)); 566 return; 567 } 568 569 if (DestReg == AMDGPU::SCC) { 570 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 571 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32)) 572 .addReg(SrcReg, getKillRegState(KillSrc)) 573 .addImm(0); 574 return; 575 } 576 577 unsigned EltSize = 4; 578 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 579 if (RI.isSGPRClass(RC)) { 580 if (RI.getRegSizeInBits(*RC) > 32) { 581 Opcode = AMDGPU::S_MOV_B64; 582 EltSize = 8; 583 } else { 584 Opcode = AMDGPU::S_MOV_B32; 585 EltSize = 4; 586 } 587 588 if (!RI.isSGPRClass(RI.getPhysRegClass(SrcReg))) { 589 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 590 return; 591 } 592 } 593 594 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize); 595 bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg); 596 597 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 598 unsigned SubIdx; 599 if (Forward) 600 SubIdx = SubIndices[Idx]; 601 else 602 SubIdx = SubIndices[SubIndices.size() - Idx - 1]; 603 604 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 605 get(Opcode), RI.getSubReg(DestReg, SubIdx)); 606 607 Builder.addReg(RI.getSubReg(SrcReg, SubIdx)); 608 609 if (Idx == 0) 610 Builder.addReg(DestReg, RegState::Define | RegState::Implicit); 611 612 bool UseKill = KillSrc && Idx == SubIndices.size() - 1; 613 Builder.addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit); 614 } 615 } 616 617 int SIInstrInfo::commuteOpcode(unsigned Opcode) const { 618 int NewOpc; 619 620 // Try to map original to commuted opcode 621 NewOpc = AMDGPU::getCommuteRev(Opcode); 622 if (NewOpc != -1) 623 // Check if the commuted (REV) opcode exists on the target. 624 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 625 626 // Try to map commuted to original opcode 627 NewOpc = AMDGPU::getCommuteOrig(Opcode); 628 if (NewOpc != -1) 629 // Check if the original (non-REV) opcode exists on the target. 630 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 631 632 return Opcode; 633 } 634 635 void SIInstrInfo::materializeImmediate(MachineBasicBlock &MBB, 636 MachineBasicBlock::iterator MI, 637 const DebugLoc &DL, unsigned DestReg, 638 int64_t Value) const { 639 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 640 const TargetRegisterClass *RegClass = MRI.getRegClass(DestReg); 641 if (RegClass == &AMDGPU::SReg_32RegClass || 642 RegClass == &AMDGPU::SGPR_32RegClass || 643 RegClass == &AMDGPU::SReg_32_XM0RegClass || 644 RegClass == &AMDGPU::SReg_32_XM0_XEXECRegClass) { 645 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 646 .addImm(Value); 647 return; 648 } 649 650 if (RegClass == &AMDGPU::SReg_64RegClass || 651 RegClass == &AMDGPU::SGPR_64RegClass || 652 RegClass == &AMDGPU::SReg_64_XEXECRegClass) { 653 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 654 .addImm(Value); 655 return; 656 } 657 658 if (RegClass == &AMDGPU::VGPR_32RegClass) { 659 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) 660 .addImm(Value); 661 return; 662 } 663 if (RegClass == &AMDGPU::VReg_64RegClass) { 664 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), DestReg) 665 .addImm(Value); 666 return; 667 } 668 669 unsigned EltSize = 4; 670 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 671 if (RI.isSGPRClass(RegClass)) { 672 if (RI.getRegSizeInBits(*RegClass) > 32) { 673 Opcode = AMDGPU::S_MOV_B64; 674 EltSize = 8; 675 } else { 676 Opcode = AMDGPU::S_MOV_B32; 677 EltSize = 4; 678 } 679 } 680 681 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RegClass, EltSize); 682 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 683 int64_t IdxValue = Idx == 0 ? Value : 0; 684 685 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 686 get(Opcode), RI.getSubReg(DestReg, Idx)); 687 Builder.addImm(IdxValue); 688 } 689 } 690 691 const TargetRegisterClass * 692 SIInstrInfo::getPreferredSelectRegClass(unsigned Size) const { 693 return &AMDGPU::VGPR_32RegClass; 694 } 695 696 void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB, 697 MachineBasicBlock::iterator I, 698 const DebugLoc &DL, unsigned DstReg, 699 ArrayRef<MachineOperand> Cond, 700 unsigned TrueReg, 701 unsigned FalseReg) const { 702 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 703 assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass && 704 "Not a VGPR32 reg"); 705 706 if (Cond.size() == 1) { 707 unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 708 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 709 .add(Cond[0]); 710 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 711 .addReg(FalseReg) 712 .addReg(TrueReg) 713 .addReg(SReg); 714 } else if (Cond.size() == 2) { 715 assert(Cond[0].isImm() && "Cond[0] is not an immediate"); 716 switch (Cond[0].getImm()) { 717 case SIInstrInfo::SCC_TRUE: { 718 unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 719 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg) 720 .addImm(-1) 721 .addImm(0); 722 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 723 .addReg(FalseReg) 724 .addReg(TrueReg) 725 .addReg(SReg); 726 break; 727 } 728 case SIInstrInfo::SCC_FALSE: { 729 unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 730 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg) 731 .addImm(0) 732 .addImm(-1); 733 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 734 .addReg(FalseReg) 735 .addReg(TrueReg) 736 .addReg(SReg); 737 break; 738 } 739 case SIInstrInfo::VCCNZ: { 740 MachineOperand RegOp = Cond[1]; 741 RegOp.setImplicit(false); 742 unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 743 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 744 .add(RegOp); 745 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 746 .addReg(FalseReg) 747 .addReg(TrueReg) 748 .addReg(SReg); 749 break; 750 } 751 case SIInstrInfo::VCCZ: { 752 MachineOperand RegOp = Cond[1]; 753 RegOp.setImplicit(false); 754 unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 755 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 756 .add(RegOp); 757 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 758 .addReg(TrueReg) 759 .addReg(FalseReg) 760 .addReg(SReg); 761 break; 762 } 763 case SIInstrInfo::EXECNZ: { 764 unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 765 unsigned SReg2 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 766 BuildMI(MBB, I, DL, get(AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 767 .addImm(0); 768 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg) 769 .addImm(-1) 770 .addImm(0); 771 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 772 .addReg(FalseReg) 773 .addReg(TrueReg) 774 .addReg(SReg); 775 break; 776 } 777 case SIInstrInfo::EXECZ: { 778 unsigned SReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 779 unsigned SReg2 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 780 BuildMI(MBB, I, DL, get(AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 781 .addImm(0); 782 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), SReg) 783 .addImm(0) 784 .addImm(-1); 785 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 786 .addReg(FalseReg) 787 .addReg(TrueReg) 788 .addReg(SReg); 789 llvm_unreachable("Unhandled branch predicate EXECZ"); 790 break; 791 } 792 default: 793 llvm_unreachable("invalid branch predicate"); 794 } 795 } else { 796 llvm_unreachable("Can only handle Cond size 1 or 2"); 797 } 798 } 799 800 unsigned SIInstrInfo::insertEQ(MachineBasicBlock *MBB, 801 MachineBasicBlock::iterator I, 802 const DebugLoc &DL, 803 unsigned SrcReg, int Value) const { 804 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 805 unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 806 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg) 807 .addImm(Value) 808 .addReg(SrcReg); 809 810 return Reg; 811 } 812 813 unsigned SIInstrInfo::insertNE(MachineBasicBlock *MBB, 814 MachineBasicBlock::iterator I, 815 const DebugLoc &DL, 816 unsigned SrcReg, int Value) const { 817 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 818 unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 819 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg) 820 .addImm(Value) 821 .addReg(SrcReg); 822 823 return Reg; 824 } 825 826 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const { 827 828 if (RI.getRegSizeInBits(*DstRC) == 32) { 829 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 830 } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) { 831 return AMDGPU::S_MOV_B64; 832 } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) { 833 return AMDGPU::V_MOV_B64_PSEUDO; 834 } 835 return AMDGPU::COPY; 836 } 837 838 static unsigned getSGPRSpillSaveOpcode(unsigned Size) { 839 switch (Size) { 840 case 4: 841 return AMDGPU::SI_SPILL_S32_SAVE; 842 case 8: 843 return AMDGPU::SI_SPILL_S64_SAVE; 844 case 16: 845 return AMDGPU::SI_SPILL_S128_SAVE; 846 case 32: 847 return AMDGPU::SI_SPILL_S256_SAVE; 848 case 64: 849 return AMDGPU::SI_SPILL_S512_SAVE; 850 default: 851 llvm_unreachable("unknown register size"); 852 } 853 } 854 855 static unsigned getVGPRSpillSaveOpcode(unsigned Size) { 856 switch (Size) { 857 case 4: 858 return AMDGPU::SI_SPILL_V32_SAVE; 859 case 8: 860 return AMDGPU::SI_SPILL_V64_SAVE; 861 case 12: 862 return AMDGPU::SI_SPILL_V96_SAVE; 863 case 16: 864 return AMDGPU::SI_SPILL_V128_SAVE; 865 case 32: 866 return AMDGPU::SI_SPILL_V256_SAVE; 867 case 64: 868 return AMDGPU::SI_SPILL_V512_SAVE; 869 default: 870 llvm_unreachable("unknown register size"); 871 } 872 } 873 874 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 875 MachineBasicBlock::iterator MI, 876 unsigned SrcReg, bool isKill, 877 int FrameIndex, 878 const TargetRegisterClass *RC, 879 const TargetRegisterInfo *TRI) const { 880 MachineFunction *MF = MBB.getParent(); 881 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 882 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 883 const DebugLoc &DL = MBB.findDebugLoc(MI); 884 885 unsigned Size = FrameInfo.getObjectSize(FrameIndex); 886 unsigned Align = FrameInfo.getObjectAlignment(FrameIndex); 887 MachinePointerInfo PtrInfo 888 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 889 MachineMemOperand *MMO 890 = MF->getMachineMemOperand(PtrInfo, MachineMemOperand::MOStore, 891 Size, Align); 892 unsigned SpillSize = TRI->getSpillSize(*RC); 893 894 if (RI.isSGPRClass(RC)) { 895 MFI->setHasSpilledSGPRs(); 896 897 // We are only allowed to create one new instruction when spilling 898 // registers, so we need to use pseudo instruction for spilling SGPRs. 899 const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(SpillSize)); 900 901 // The SGPR spill/restore instructions only work on number sgprs, so we need 902 // to make sure we are using the correct register class. 903 if (TargetRegisterInfo::isVirtualRegister(SrcReg) && SpillSize == 4) { 904 MachineRegisterInfo &MRI = MF->getRegInfo(); 905 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0RegClass); 906 } 907 908 MachineInstrBuilder Spill = BuildMI(MBB, MI, DL, OpDesc) 909 .addReg(SrcReg, getKillRegState(isKill)) // data 910 .addFrameIndex(FrameIndex) // addr 911 .addMemOperand(MMO) 912 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) 913 .addReg(MFI->getFrameOffsetReg(), RegState::Implicit); 914 // Add the scratch resource registers as implicit uses because we may end up 915 // needing them, and need to ensure that the reserved registers are 916 // correctly handled. 917 918 FrameInfo.setStackID(FrameIndex, SIStackID::SGPR_SPILL); 919 if (ST.hasScalarStores()) { 920 // m0 is used for offset to scalar stores if used to spill. 921 Spill.addReg(AMDGPU::M0, RegState::ImplicitDefine | RegState::Dead); 922 } 923 924 return; 925 } 926 927 assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected"); 928 929 unsigned Opcode = getVGPRSpillSaveOpcode(SpillSize); 930 MFI->setHasSpilledVGPRs(); 931 BuildMI(MBB, MI, DL, get(Opcode)) 932 .addReg(SrcReg, getKillRegState(isKill)) // data 933 .addFrameIndex(FrameIndex) // addr 934 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 935 .addReg(MFI->getFrameOffsetReg()) // scratch_offset 936 .addImm(0) // offset 937 .addMemOperand(MMO); 938 } 939 940 static unsigned getSGPRSpillRestoreOpcode(unsigned Size) { 941 switch (Size) { 942 case 4: 943 return AMDGPU::SI_SPILL_S32_RESTORE; 944 case 8: 945 return AMDGPU::SI_SPILL_S64_RESTORE; 946 case 16: 947 return AMDGPU::SI_SPILL_S128_RESTORE; 948 case 32: 949 return AMDGPU::SI_SPILL_S256_RESTORE; 950 case 64: 951 return AMDGPU::SI_SPILL_S512_RESTORE; 952 default: 953 llvm_unreachable("unknown register size"); 954 } 955 } 956 957 static unsigned getVGPRSpillRestoreOpcode(unsigned Size) { 958 switch (Size) { 959 case 4: 960 return AMDGPU::SI_SPILL_V32_RESTORE; 961 case 8: 962 return AMDGPU::SI_SPILL_V64_RESTORE; 963 case 12: 964 return AMDGPU::SI_SPILL_V96_RESTORE; 965 case 16: 966 return AMDGPU::SI_SPILL_V128_RESTORE; 967 case 32: 968 return AMDGPU::SI_SPILL_V256_RESTORE; 969 case 64: 970 return AMDGPU::SI_SPILL_V512_RESTORE; 971 default: 972 llvm_unreachable("unknown register size"); 973 } 974 } 975 976 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 977 MachineBasicBlock::iterator MI, 978 unsigned DestReg, int FrameIndex, 979 const TargetRegisterClass *RC, 980 const TargetRegisterInfo *TRI) const { 981 MachineFunction *MF = MBB.getParent(); 982 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 983 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 984 const DebugLoc &DL = MBB.findDebugLoc(MI); 985 unsigned Align = FrameInfo.getObjectAlignment(FrameIndex); 986 unsigned Size = FrameInfo.getObjectSize(FrameIndex); 987 unsigned SpillSize = TRI->getSpillSize(*RC); 988 989 MachinePointerInfo PtrInfo 990 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 991 992 MachineMemOperand *MMO = MF->getMachineMemOperand( 993 PtrInfo, MachineMemOperand::MOLoad, Size, Align); 994 995 if (RI.isSGPRClass(RC)) { 996 MFI->setHasSpilledSGPRs(); 997 998 // FIXME: Maybe this should not include a memoperand because it will be 999 // lowered to non-memory instructions. 1000 const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize)); 1001 if (TargetRegisterInfo::isVirtualRegister(DestReg) && SpillSize == 4) { 1002 MachineRegisterInfo &MRI = MF->getRegInfo(); 1003 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0RegClass); 1004 } 1005 1006 FrameInfo.setStackID(FrameIndex, SIStackID::SGPR_SPILL); 1007 MachineInstrBuilder Spill = BuildMI(MBB, MI, DL, OpDesc, DestReg) 1008 .addFrameIndex(FrameIndex) // addr 1009 .addMemOperand(MMO) 1010 .addReg(MFI->getScratchRSrcReg(), RegState::Implicit) 1011 .addReg(MFI->getFrameOffsetReg(), RegState::Implicit); 1012 1013 if (ST.hasScalarStores()) { 1014 // m0 is used for offset to scalar stores if used to spill. 1015 Spill.addReg(AMDGPU::M0, RegState::ImplicitDefine | RegState::Dead); 1016 } 1017 1018 return; 1019 } 1020 1021 assert(RI.hasVGPRs(RC) && "Only VGPR spilling expected"); 1022 1023 unsigned Opcode = getVGPRSpillRestoreOpcode(SpillSize); 1024 BuildMI(MBB, MI, DL, get(Opcode), DestReg) 1025 .addFrameIndex(FrameIndex) // vaddr 1026 .addReg(MFI->getScratchRSrcReg()) // scratch_rsrc 1027 .addReg(MFI->getFrameOffsetReg()) // scratch_offset 1028 .addImm(0) // offset 1029 .addMemOperand(MMO); 1030 } 1031 1032 /// \param @Offset Offset in bytes of the FrameIndex being spilled 1033 unsigned SIInstrInfo::calculateLDSSpillAddress( 1034 MachineBasicBlock &MBB, MachineInstr &MI, RegScavenger *RS, unsigned TmpReg, 1035 unsigned FrameOffset, unsigned Size) const { 1036 MachineFunction *MF = MBB.getParent(); 1037 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1038 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 1039 const DebugLoc &DL = MBB.findDebugLoc(MI); 1040 unsigned WorkGroupSize = MFI->getMaxFlatWorkGroupSize(); 1041 unsigned WavefrontSize = ST.getWavefrontSize(); 1042 1043 unsigned TIDReg = MFI->getTIDReg(); 1044 if (!MFI->hasCalculatedTID()) { 1045 MachineBasicBlock &Entry = MBB.getParent()->front(); 1046 MachineBasicBlock::iterator Insert = Entry.front(); 1047 const DebugLoc &DL = Insert->getDebugLoc(); 1048 1049 TIDReg = RI.findUnusedRegister(MF->getRegInfo(), &AMDGPU::VGPR_32RegClass, 1050 *MF); 1051 if (TIDReg == AMDGPU::NoRegister) 1052 return TIDReg; 1053 1054 if (!AMDGPU::isShader(MF->getFunction().getCallingConv()) && 1055 WorkGroupSize > WavefrontSize) { 1056 unsigned TIDIGXReg 1057 = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_X); 1058 unsigned TIDIGYReg 1059 = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Y); 1060 unsigned TIDIGZReg 1061 = MFI->getPreloadedReg(AMDGPUFunctionArgInfo::WORKGROUP_ID_Z); 1062 unsigned InputPtrReg = 1063 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 1064 for (unsigned Reg : {TIDIGXReg, TIDIGYReg, TIDIGZReg}) { 1065 if (!Entry.isLiveIn(Reg)) 1066 Entry.addLiveIn(Reg); 1067 } 1068 1069 RS->enterBasicBlock(Entry); 1070 // FIXME: Can we scavenge an SReg_64 and access the subregs? 1071 unsigned STmp0 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); 1072 unsigned STmp1 = RS->scavengeRegister(&AMDGPU::SGPR_32RegClass, 0); 1073 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp0) 1074 .addReg(InputPtrReg) 1075 .addImm(SI::KernelInputOffsets::NGROUPS_Z); 1076 BuildMI(Entry, Insert, DL, get(AMDGPU::S_LOAD_DWORD_IMM), STmp1) 1077 .addReg(InputPtrReg) 1078 .addImm(SI::KernelInputOffsets::NGROUPS_Y); 1079 1080 // NGROUPS.X * NGROUPS.Y 1081 BuildMI(Entry, Insert, DL, get(AMDGPU::S_MUL_I32), STmp1) 1082 .addReg(STmp1) 1083 .addReg(STmp0); 1084 // (NGROUPS.X * NGROUPS.Y) * TIDIG.X 1085 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MUL_U32_U24_e32), TIDReg) 1086 .addReg(STmp1) 1087 .addReg(TIDIGXReg); 1088 // NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X) 1089 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MAD_U32_U24), TIDReg) 1090 .addReg(STmp0) 1091 .addReg(TIDIGYReg) 1092 .addReg(TIDReg); 1093 // (NGROUPS.Z * TIDIG.Y + (NGROUPS.X * NGROPUS.Y * TIDIG.X)) + TIDIG.Z 1094 getAddNoCarry(Entry, Insert, DL, TIDReg) 1095 .addReg(TIDReg) 1096 .addReg(TIDIGZReg); 1097 } else { 1098 // Get the wave id 1099 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_LO_U32_B32_e64), 1100 TIDReg) 1101 .addImm(-1) 1102 .addImm(0); 1103 1104 BuildMI(Entry, Insert, DL, get(AMDGPU::V_MBCNT_HI_U32_B32_e64), 1105 TIDReg) 1106 .addImm(-1) 1107 .addReg(TIDReg); 1108 } 1109 1110 BuildMI(Entry, Insert, DL, get(AMDGPU::V_LSHLREV_B32_e32), 1111 TIDReg) 1112 .addImm(2) 1113 .addReg(TIDReg); 1114 MFI->setTIDReg(TIDReg); 1115 } 1116 1117 // Add FrameIndex to LDS offset 1118 unsigned LDSOffset = MFI->getLDSSize() + (FrameOffset * WorkGroupSize); 1119 getAddNoCarry(MBB, MI, DL, TmpReg) 1120 .addImm(LDSOffset) 1121 .addReg(TIDReg); 1122 1123 return TmpReg; 1124 } 1125 1126 void SIInstrInfo::insertWaitStates(MachineBasicBlock &MBB, 1127 MachineBasicBlock::iterator MI, 1128 int Count) const { 1129 DebugLoc DL = MBB.findDebugLoc(MI); 1130 while (Count > 0) { 1131 int Arg; 1132 if (Count >= 8) 1133 Arg = 7; 1134 else 1135 Arg = Count - 1; 1136 Count -= 8; 1137 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP)) 1138 .addImm(Arg); 1139 } 1140 } 1141 1142 void SIInstrInfo::insertNoop(MachineBasicBlock &MBB, 1143 MachineBasicBlock::iterator MI) const { 1144 insertWaitStates(MBB, MI, 1); 1145 } 1146 1147 void SIInstrInfo::insertReturn(MachineBasicBlock &MBB) const { 1148 auto MF = MBB.getParent(); 1149 SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 1150 1151 assert(Info->isEntryFunction()); 1152 1153 if (MBB.succ_empty()) { 1154 bool HasNoTerminator = MBB.getFirstTerminator() == MBB.end(); 1155 if (HasNoTerminator) 1156 BuildMI(MBB, MBB.end(), DebugLoc(), 1157 get(Info->returnsVoid() ? AMDGPU::S_ENDPGM : AMDGPU::SI_RETURN_TO_EPILOG)); 1158 } 1159 } 1160 1161 unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) { 1162 switch (MI.getOpcode()) { 1163 default: return 1; // FIXME: Do wait states equal cycles? 1164 1165 case AMDGPU::S_NOP: 1166 return MI.getOperand(0).getImm() + 1; 1167 } 1168 } 1169 1170 bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 1171 MachineBasicBlock &MBB = *MI.getParent(); 1172 DebugLoc DL = MBB.findDebugLoc(MI); 1173 switch (MI.getOpcode()) { 1174 default: return TargetInstrInfo::expandPostRAPseudo(MI); 1175 case AMDGPU::S_MOV_B64_term: 1176 // This is only a terminator to get the correct spill code placement during 1177 // register allocation. 1178 MI.setDesc(get(AMDGPU::S_MOV_B64)); 1179 break; 1180 1181 case AMDGPU::S_XOR_B64_term: 1182 // This is only a terminator to get the correct spill code placement during 1183 // register allocation. 1184 MI.setDesc(get(AMDGPU::S_XOR_B64)); 1185 break; 1186 1187 case AMDGPU::S_ANDN2_B64_term: 1188 // This is only a terminator to get the correct spill code placement during 1189 // register allocation. 1190 MI.setDesc(get(AMDGPU::S_ANDN2_B64)); 1191 break; 1192 1193 case AMDGPU::V_MOV_B64_PSEUDO: { 1194 unsigned Dst = MI.getOperand(0).getReg(); 1195 unsigned DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 1196 unsigned DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 1197 1198 const MachineOperand &SrcOp = MI.getOperand(1); 1199 // FIXME: Will this work for 64-bit floating point immediates? 1200 assert(!SrcOp.isFPImm()); 1201 if (SrcOp.isImm()) { 1202 APInt Imm(64, SrcOp.getImm()); 1203 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1204 .addImm(Imm.getLoBits(32).getZExtValue()) 1205 .addReg(Dst, RegState::Implicit | RegState::Define); 1206 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1207 .addImm(Imm.getHiBits(32).getZExtValue()) 1208 .addReg(Dst, RegState::Implicit | RegState::Define); 1209 } else { 1210 assert(SrcOp.isReg()); 1211 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1212 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) 1213 .addReg(Dst, RegState::Implicit | RegState::Define); 1214 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1215 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) 1216 .addReg(Dst, RegState::Implicit | RegState::Define); 1217 } 1218 MI.eraseFromParent(); 1219 break; 1220 } 1221 case AMDGPU::V_SET_INACTIVE_B32: { 1222 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOT_B64), AMDGPU::EXEC) 1223 .addReg(AMDGPU::EXEC); 1224 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), MI.getOperand(0).getReg()) 1225 .add(MI.getOperand(2)); 1226 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOT_B64), AMDGPU::EXEC) 1227 .addReg(AMDGPU::EXEC); 1228 MI.eraseFromParent(); 1229 break; 1230 } 1231 case AMDGPU::V_SET_INACTIVE_B64: { 1232 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOT_B64), AMDGPU::EXEC) 1233 .addReg(AMDGPU::EXEC); 1234 MachineInstr *Copy = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), 1235 MI.getOperand(0).getReg()) 1236 .add(MI.getOperand(2)); 1237 expandPostRAPseudo(*Copy); 1238 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOT_B64), AMDGPU::EXEC) 1239 .addReg(AMDGPU::EXEC); 1240 MI.eraseFromParent(); 1241 break; 1242 } 1243 case AMDGPU::V_MOVRELD_B32_V1: 1244 case AMDGPU::V_MOVRELD_B32_V2: 1245 case AMDGPU::V_MOVRELD_B32_V4: 1246 case AMDGPU::V_MOVRELD_B32_V8: 1247 case AMDGPU::V_MOVRELD_B32_V16: { 1248 const MCInstrDesc &MovRelDesc = get(AMDGPU::V_MOVRELD_B32_e32); 1249 unsigned VecReg = MI.getOperand(0).getReg(); 1250 bool IsUndef = MI.getOperand(1).isUndef(); 1251 unsigned SubReg = AMDGPU::sub0 + MI.getOperand(3).getImm(); 1252 assert(VecReg == MI.getOperand(1).getReg()); 1253 1254 MachineInstr *MovRel = 1255 BuildMI(MBB, MI, DL, MovRelDesc) 1256 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 1257 .add(MI.getOperand(2)) 1258 .addReg(VecReg, RegState::ImplicitDefine) 1259 .addReg(VecReg, 1260 RegState::Implicit | (IsUndef ? RegState::Undef : 0)); 1261 1262 const int ImpDefIdx = 1263 MovRelDesc.getNumOperands() + MovRelDesc.getNumImplicitUses(); 1264 const int ImpUseIdx = ImpDefIdx + 1; 1265 MovRel->tieOperands(ImpDefIdx, ImpUseIdx); 1266 1267 MI.eraseFromParent(); 1268 break; 1269 } 1270 case AMDGPU::SI_PC_ADD_REL_OFFSET: { 1271 MachineFunction &MF = *MBB.getParent(); 1272 unsigned Reg = MI.getOperand(0).getReg(); 1273 unsigned RegLo = RI.getSubReg(Reg, AMDGPU::sub0); 1274 unsigned RegHi = RI.getSubReg(Reg, AMDGPU::sub1); 1275 1276 // Create a bundle so these instructions won't be re-ordered by the 1277 // post-RA scheduler. 1278 MIBundleBuilder Bundler(MBB, MI); 1279 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg)); 1280 1281 // Add 32-bit offset from this instruction to the start of the 1282 // constant data. 1283 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo) 1284 .addReg(RegLo) 1285 .add(MI.getOperand(1))); 1286 1287 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi) 1288 .addReg(RegHi); 1289 if (MI.getOperand(2).getTargetFlags() == SIInstrInfo::MO_NONE) 1290 MIB.addImm(0); 1291 else 1292 MIB.add(MI.getOperand(2)); 1293 1294 Bundler.append(MIB); 1295 finalizeBundle(MBB, Bundler.begin()); 1296 1297 MI.eraseFromParent(); 1298 break; 1299 } 1300 case AMDGPU::EXIT_WWM: { 1301 // This only gets its own opcode so that SIFixWWMLiveness can tell when WWM 1302 // is exited. 1303 MI.setDesc(get(AMDGPU::S_MOV_B64)); 1304 break; 1305 } 1306 case TargetOpcode::BUNDLE: { 1307 if (!MI.mayLoad()) 1308 return false; 1309 1310 // If it is a load it must be a memory clause 1311 for (MachineBasicBlock::instr_iterator I = MI.getIterator(); 1312 I->isBundledWithSucc(); ++I) { 1313 I->unbundleFromSucc(); 1314 for (MachineOperand &MO : I->operands()) 1315 if (MO.isReg()) 1316 MO.setIsInternalRead(false); 1317 } 1318 1319 MI.eraseFromParent(); 1320 break; 1321 } 1322 } 1323 return true; 1324 } 1325 1326 bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI, 1327 MachineOperand &Src0, 1328 unsigned Src0OpName, 1329 MachineOperand &Src1, 1330 unsigned Src1OpName) const { 1331 MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName); 1332 if (!Src0Mods) 1333 return false; 1334 1335 MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName); 1336 assert(Src1Mods && 1337 "All commutable instructions have both src0 and src1 modifiers"); 1338 1339 int Src0ModsVal = Src0Mods->getImm(); 1340 int Src1ModsVal = Src1Mods->getImm(); 1341 1342 Src1Mods->setImm(Src0ModsVal); 1343 Src0Mods->setImm(Src1ModsVal); 1344 return true; 1345 } 1346 1347 static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI, 1348 MachineOperand &RegOp, 1349 MachineOperand &NonRegOp) { 1350 unsigned Reg = RegOp.getReg(); 1351 unsigned SubReg = RegOp.getSubReg(); 1352 bool IsKill = RegOp.isKill(); 1353 bool IsDead = RegOp.isDead(); 1354 bool IsUndef = RegOp.isUndef(); 1355 bool IsDebug = RegOp.isDebug(); 1356 1357 if (NonRegOp.isImm()) 1358 RegOp.ChangeToImmediate(NonRegOp.getImm()); 1359 else if (NonRegOp.isFI()) 1360 RegOp.ChangeToFrameIndex(NonRegOp.getIndex()); 1361 else 1362 return nullptr; 1363 1364 NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug); 1365 NonRegOp.setSubReg(SubReg); 1366 1367 return &MI; 1368 } 1369 1370 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, 1371 unsigned Src0Idx, 1372 unsigned Src1Idx) const { 1373 assert(!NewMI && "this should never be used"); 1374 1375 unsigned Opc = MI.getOpcode(); 1376 int CommutedOpcode = commuteOpcode(Opc); 1377 if (CommutedOpcode == -1) 1378 return nullptr; 1379 1380 assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == 1381 static_cast<int>(Src0Idx) && 1382 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == 1383 static_cast<int>(Src1Idx) && 1384 "inconsistency with findCommutedOpIndices"); 1385 1386 MachineOperand &Src0 = MI.getOperand(Src0Idx); 1387 MachineOperand &Src1 = MI.getOperand(Src1Idx); 1388 1389 MachineInstr *CommutedMI = nullptr; 1390 if (Src0.isReg() && Src1.isReg()) { 1391 if (isOperandLegal(MI, Src1Idx, &Src0)) { 1392 // Be sure to copy the source modifiers to the right place. 1393 CommutedMI 1394 = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx); 1395 } 1396 1397 } else if (Src0.isReg() && !Src1.isReg()) { 1398 // src0 should always be able to support any operand type, so no need to 1399 // check operand legality. 1400 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1); 1401 } else if (!Src0.isReg() && Src1.isReg()) { 1402 if (isOperandLegal(MI, Src1Idx, &Src0)) 1403 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0); 1404 } else { 1405 // FIXME: Found two non registers to commute. This does happen. 1406 return nullptr; 1407 } 1408 1409 if (CommutedMI) { 1410 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers, 1411 Src1, AMDGPU::OpName::src1_modifiers); 1412 1413 CommutedMI->setDesc(get(CommutedOpcode)); 1414 } 1415 1416 return CommutedMI; 1417 } 1418 1419 // This needs to be implemented because the source modifiers may be inserted 1420 // between the true commutable operands, and the base 1421 // TargetInstrInfo::commuteInstruction uses it. 1422 bool SIInstrInfo::findCommutedOpIndices(MachineInstr &MI, unsigned &SrcOpIdx0, 1423 unsigned &SrcOpIdx1) const { 1424 return findCommutedOpIndices(MI.getDesc(), SrcOpIdx0, SrcOpIdx1); 1425 } 1426 1427 bool SIInstrInfo::findCommutedOpIndices(MCInstrDesc Desc, unsigned &SrcOpIdx0, 1428 unsigned &SrcOpIdx1) const { 1429 if (!Desc.isCommutable()) 1430 return false; 1431 1432 unsigned Opc = Desc.getOpcode(); 1433 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 1434 if (Src0Idx == -1) 1435 return false; 1436 1437 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 1438 if (Src1Idx == -1) 1439 return false; 1440 1441 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx); 1442 } 1443 1444 bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp, 1445 int64_t BrOffset) const { 1446 // BranchRelaxation should never have to check s_setpc_b64 because its dest 1447 // block is unanalyzable. 1448 assert(BranchOp != AMDGPU::S_SETPC_B64); 1449 1450 // Convert to dwords. 1451 BrOffset /= 4; 1452 1453 // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is 1454 // from the next instruction. 1455 BrOffset -= 1; 1456 1457 return isIntN(BranchOffsetBits, BrOffset); 1458 } 1459 1460 MachineBasicBlock *SIInstrInfo::getBranchDestBlock( 1461 const MachineInstr &MI) const { 1462 if (MI.getOpcode() == AMDGPU::S_SETPC_B64) { 1463 // This would be a difficult analysis to perform, but can always be legal so 1464 // there's no need to analyze it. 1465 return nullptr; 1466 } 1467 1468 return MI.getOperand(0).getMBB(); 1469 } 1470 1471 unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB, 1472 MachineBasicBlock &DestBB, 1473 const DebugLoc &DL, 1474 int64_t BrOffset, 1475 RegScavenger *RS) const { 1476 assert(RS && "RegScavenger required for long branching"); 1477 assert(MBB.empty() && 1478 "new block should be inserted for expanding unconditional branch"); 1479 assert(MBB.pred_size() == 1); 1480 1481 MachineFunction *MF = MBB.getParent(); 1482 MachineRegisterInfo &MRI = MF->getRegInfo(); 1483 1484 // FIXME: Virtual register workaround for RegScavenger not working with empty 1485 // blocks. 1486 unsigned PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 1487 1488 auto I = MBB.end(); 1489 1490 // We need to compute the offset relative to the instruction immediately after 1491 // s_getpc_b64. Insert pc arithmetic code before last terminator. 1492 MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg); 1493 1494 // TODO: Handle > 32-bit block address. 1495 if (BrOffset >= 0) { 1496 BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32)) 1497 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 1498 .addReg(PCReg, 0, AMDGPU::sub0) 1499 .addMBB(&DestBB, AMDGPU::TF_LONG_BRANCH_FORWARD); 1500 BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32)) 1501 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 1502 .addReg(PCReg, 0, AMDGPU::sub1) 1503 .addImm(0); 1504 } else { 1505 // Backwards branch. 1506 BuildMI(MBB, I, DL, get(AMDGPU::S_SUB_U32)) 1507 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 1508 .addReg(PCReg, 0, AMDGPU::sub0) 1509 .addMBB(&DestBB, AMDGPU::TF_LONG_BRANCH_BACKWARD); 1510 BuildMI(MBB, I, DL, get(AMDGPU::S_SUBB_U32)) 1511 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 1512 .addReg(PCReg, 0, AMDGPU::sub1) 1513 .addImm(0); 1514 } 1515 1516 // Insert the indirect branch after the other terminator. 1517 BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64)) 1518 .addReg(PCReg); 1519 1520 // FIXME: If spilling is necessary, this will fail because this scavenger has 1521 // no emergency stack slots. It is non-trivial to spill in this situation, 1522 // because the restore code needs to be specially placed after the 1523 // jump. BranchRelaxation then needs to be made aware of the newly inserted 1524 // block. 1525 // 1526 // If a spill is needed for the pc register pair, we need to insert a spill 1527 // restore block right before the destination block, and insert a short branch 1528 // into the old destination block's fallthrough predecessor. 1529 // e.g.: 1530 // 1531 // s_cbranch_scc0 skip_long_branch: 1532 // 1533 // long_branch_bb: 1534 // spill s[8:9] 1535 // s_getpc_b64 s[8:9] 1536 // s_add_u32 s8, s8, restore_bb 1537 // s_addc_u32 s9, s9, 0 1538 // s_setpc_b64 s[8:9] 1539 // 1540 // skip_long_branch: 1541 // foo; 1542 // 1543 // ..... 1544 // 1545 // dest_bb_fallthrough_predecessor: 1546 // bar; 1547 // s_branch dest_bb 1548 // 1549 // restore_bb: 1550 // restore s[8:9] 1551 // fallthrough dest_bb 1552 /// 1553 // dest_bb: 1554 // buzz; 1555 1556 RS->enterBasicBlockEnd(MBB); 1557 unsigned Scav = RS->scavengeRegisterBackwards( 1558 AMDGPU::SReg_64RegClass, 1559 MachineBasicBlock::iterator(GetPC), false, 0); 1560 MRI.replaceRegWith(PCReg, Scav); 1561 MRI.clearVirtRegs(); 1562 RS->setRegUsed(Scav); 1563 1564 return 4 + 8 + 4 + 4; 1565 } 1566 1567 unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) { 1568 switch (Cond) { 1569 case SIInstrInfo::SCC_TRUE: 1570 return AMDGPU::S_CBRANCH_SCC1; 1571 case SIInstrInfo::SCC_FALSE: 1572 return AMDGPU::S_CBRANCH_SCC0; 1573 case SIInstrInfo::VCCNZ: 1574 return AMDGPU::S_CBRANCH_VCCNZ; 1575 case SIInstrInfo::VCCZ: 1576 return AMDGPU::S_CBRANCH_VCCZ; 1577 case SIInstrInfo::EXECNZ: 1578 return AMDGPU::S_CBRANCH_EXECNZ; 1579 case SIInstrInfo::EXECZ: 1580 return AMDGPU::S_CBRANCH_EXECZ; 1581 default: 1582 llvm_unreachable("invalid branch predicate"); 1583 } 1584 } 1585 1586 SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) { 1587 switch (Opcode) { 1588 case AMDGPU::S_CBRANCH_SCC0: 1589 return SCC_FALSE; 1590 case AMDGPU::S_CBRANCH_SCC1: 1591 return SCC_TRUE; 1592 case AMDGPU::S_CBRANCH_VCCNZ: 1593 return VCCNZ; 1594 case AMDGPU::S_CBRANCH_VCCZ: 1595 return VCCZ; 1596 case AMDGPU::S_CBRANCH_EXECNZ: 1597 return EXECNZ; 1598 case AMDGPU::S_CBRANCH_EXECZ: 1599 return EXECZ; 1600 default: 1601 return INVALID_BR; 1602 } 1603 } 1604 1605 bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB, 1606 MachineBasicBlock::iterator I, 1607 MachineBasicBlock *&TBB, 1608 MachineBasicBlock *&FBB, 1609 SmallVectorImpl<MachineOperand> &Cond, 1610 bool AllowModify) const { 1611 if (I->getOpcode() == AMDGPU::S_BRANCH) { 1612 // Unconditional Branch 1613 TBB = I->getOperand(0).getMBB(); 1614 return false; 1615 } 1616 1617 MachineBasicBlock *CondBB = nullptr; 1618 1619 if (I->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 1620 CondBB = I->getOperand(1).getMBB(); 1621 Cond.push_back(I->getOperand(0)); 1622 } else { 1623 BranchPredicate Pred = getBranchPredicate(I->getOpcode()); 1624 if (Pred == INVALID_BR) 1625 return true; 1626 1627 CondBB = I->getOperand(0).getMBB(); 1628 Cond.push_back(MachineOperand::CreateImm(Pred)); 1629 Cond.push_back(I->getOperand(1)); // Save the branch register. 1630 } 1631 ++I; 1632 1633 if (I == MBB.end()) { 1634 // Conditional branch followed by fall-through. 1635 TBB = CondBB; 1636 return false; 1637 } 1638 1639 if (I->getOpcode() == AMDGPU::S_BRANCH) { 1640 TBB = CondBB; 1641 FBB = I->getOperand(0).getMBB(); 1642 return false; 1643 } 1644 1645 return true; 1646 } 1647 1648 bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 1649 MachineBasicBlock *&FBB, 1650 SmallVectorImpl<MachineOperand> &Cond, 1651 bool AllowModify) const { 1652 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 1653 auto E = MBB.end(); 1654 if (I == E) 1655 return false; 1656 1657 // Skip over the instructions that are artificially terminators for special 1658 // exec management. 1659 while (I != E && !I->isBranch() && !I->isReturn() && 1660 I->getOpcode() != AMDGPU::SI_MASK_BRANCH) { 1661 switch (I->getOpcode()) { 1662 case AMDGPU::SI_MASK_BRANCH: 1663 case AMDGPU::S_MOV_B64_term: 1664 case AMDGPU::S_XOR_B64_term: 1665 case AMDGPU::S_ANDN2_B64_term: 1666 break; 1667 case AMDGPU::SI_IF: 1668 case AMDGPU::SI_ELSE: 1669 case AMDGPU::SI_KILL_I1_TERMINATOR: 1670 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 1671 // FIXME: It's messy that these need to be considered here at all. 1672 return true; 1673 default: 1674 llvm_unreachable("unexpected non-branch terminator inst"); 1675 } 1676 1677 ++I; 1678 } 1679 1680 if (I == E) 1681 return false; 1682 1683 if (I->getOpcode() != AMDGPU::SI_MASK_BRANCH) 1684 return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify); 1685 1686 ++I; 1687 1688 // TODO: Should be able to treat as fallthrough? 1689 if (I == MBB.end()) 1690 return true; 1691 1692 if (analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify)) 1693 return true; 1694 1695 MachineBasicBlock *MaskBrDest = I->getOperand(0).getMBB(); 1696 1697 // Specifically handle the case where the conditional branch is to the same 1698 // destination as the mask branch. e.g. 1699 // 1700 // si_mask_branch BB8 1701 // s_cbranch_execz BB8 1702 // s_cbranch BB9 1703 // 1704 // This is required to understand divergent loops which may need the branches 1705 // to be relaxed. 1706 if (TBB != MaskBrDest || Cond.empty()) 1707 return true; 1708 1709 auto Pred = Cond[0].getImm(); 1710 return (Pred != EXECZ && Pred != EXECNZ); 1711 } 1712 1713 unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB, 1714 int *BytesRemoved) const { 1715 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 1716 1717 unsigned Count = 0; 1718 unsigned RemovedSize = 0; 1719 while (I != MBB.end()) { 1720 MachineBasicBlock::iterator Next = std::next(I); 1721 if (I->getOpcode() == AMDGPU::SI_MASK_BRANCH) { 1722 I = Next; 1723 continue; 1724 } 1725 1726 RemovedSize += getInstSizeInBytes(*I); 1727 I->eraseFromParent(); 1728 ++Count; 1729 I = Next; 1730 } 1731 1732 if (BytesRemoved) 1733 *BytesRemoved = RemovedSize; 1734 1735 return Count; 1736 } 1737 1738 // Copy the flags onto the implicit condition register operand. 1739 static void preserveCondRegFlags(MachineOperand &CondReg, 1740 const MachineOperand &OrigCond) { 1741 CondReg.setIsUndef(OrigCond.isUndef()); 1742 CondReg.setIsKill(OrigCond.isKill()); 1743 } 1744 1745 unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB, 1746 MachineBasicBlock *TBB, 1747 MachineBasicBlock *FBB, 1748 ArrayRef<MachineOperand> Cond, 1749 const DebugLoc &DL, 1750 int *BytesAdded) const { 1751 if (!FBB && Cond.empty()) { 1752 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 1753 .addMBB(TBB); 1754 if (BytesAdded) 1755 *BytesAdded = 4; 1756 return 1; 1757 } 1758 1759 if(Cond.size() == 1 && Cond[0].isReg()) { 1760 BuildMI(&MBB, DL, get(AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO)) 1761 .add(Cond[0]) 1762 .addMBB(TBB); 1763 return 1; 1764 } 1765 1766 assert(TBB && Cond[0].isImm()); 1767 1768 unsigned Opcode 1769 = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm())); 1770 1771 if (!FBB) { 1772 Cond[1].isUndef(); 1773 MachineInstr *CondBr = 1774 BuildMI(&MBB, DL, get(Opcode)) 1775 .addMBB(TBB); 1776 1777 // Copy the flags onto the implicit condition register operand. 1778 preserveCondRegFlags(CondBr->getOperand(1), Cond[1]); 1779 1780 if (BytesAdded) 1781 *BytesAdded = 4; 1782 return 1; 1783 } 1784 1785 assert(TBB && FBB); 1786 1787 MachineInstr *CondBr = 1788 BuildMI(&MBB, DL, get(Opcode)) 1789 .addMBB(TBB); 1790 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 1791 .addMBB(FBB); 1792 1793 MachineOperand &CondReg = CondBr->getOperand(1); 1794 CondReg.setIsUndef(Cond[1].isUndef()); 1795 CondReg.setIsKill(Cond[1].isKill()); 1796 1797 if (BytesAdded) 1798 *BytesAdded = 8; 1799 1800 return 2; 1801 } 1802 1803 bool SIInstrInfo::reverseBranchCondition( 1804 SmallVectorImpl<MachineOperand> &Cond) const { 1805 if (Cond.size() != 2) { 1806 return true; 1807 } 1808 1809 if (Cond[0].isImm()) { 1810 Cond[0].setImm(-Cond[0].getImm()); 1811 return false; 1812 } 1813 1814 return true; 1815 } 1816 1817 bool SIInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, 1818 ArrayRef<MachineOperand> Cond, 1819 unsigned TrueReg, unsigned FalseReg, 1820 int &CondCycles, 1821 int &TrueCycles, int &FalseCycles) const { 1822 switch (Cond[0].getImm()) { 1823 case VCCNZ: 1824 case VCCZ: { 1825 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 1826 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 1827 assert(MRI.getRegClass(FalseReg) == RC); 1828 1829 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 1830 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 1831 1832 // Limit to equal cost for branch vs. N v_cndmask_b32s. 1833 return !RI.isSGPRClass(RC) && NumInsts <= 6; 1834 } 1835 case SCC_TRUE: 1836 case SCC_FALSE: { 1837 // FIXME: We could insert for VGPRs if we could replace the original compare 1838 // with a vector one. 1839 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 1840 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 1841 assert(MRI.getRegClass(FalseReg) == RC); 1842 1843 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 1844 1845 // Multiples of 8 can do s_cselect_b64 1846 if (NumInsts % 2 == 0) 1847 NumInsts /= 2; 1848 1849 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 1850 return RI.isSGPRClass(RC); 1851 } 1852 default: 1853 return false; 1854 } 1855 } 1856 1857 void SIInstrInfo::insertSelect(MachineBasicBlock &MBB, 1858 MachineBasicBlock::iterator I, const DebugLoc &DL, 1859 unsigned DstReg, ArrayRef<MachineOperand> Cond, 1860 unsigned TrueReg, unsigned FalseReg) const { 1861 BranchPredicate Pred = static_cast<BranchPredicate>(Cond[0].getImm()); 1862 if (Pred == VCCZ || Pred == SCC_FALSE) { 1863 Pred = static_cast<BranchPredicate>(-Pred); 1864 std::swap(TrueReg, FalseReg); 1865 } 1866 1867 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 1868 const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg); 1869 unsigned DstSize = RI.getRegSizeInBits(*DstRC); 1870 1871 if (DstSize == 32) { 1872 unsigned SelOp = Pred == SCC_TRUE ? 1873 AMDGPU::S_CSELECT_B32 : AMDGPU::V_CNDMASK_B32_e32; 1874 1875 // Instruction's operands are backwards from what is expected. 1876 MachineInstr *Select = 1877 BuildMI(MBB, I, DL, get(SelOp), DstReg) 1878 .addReg(FalseReg) 1879 .addReg(TrueReg); 1880 1881 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 1882 return; 1883 } 1884 1885 if (DstSize == 64 && Pred == SCC_TRUE) { 1886 MachineInstr *Select = 1887 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg) 1888 .addReg(FalseReg) 1889 .addReg(TrueReg); 1890 1891 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 1892 return; 1893 } 1894 1895 static const int16_t Sub0_15[] = { 1896 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 1897 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 1898 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, 1899 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 1900 }; 1901 1902 static const int16_t Sub0_15_64[] = { 1903 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, 1904 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7, 1905 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11, 1906 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15, 1907 }; 1908 1909 unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32; 1910 const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass; 1911 const int16_t *SubIndices = Sub0_15; 1912 int NElts = DstSize / 32; 1913 1914 // 64-bit select is only avaialble for SALU. 1915 if (Pred == SCC_TRUE) { 1916 SelOp = AMDGPU::S_CSELECT_B64; 1917 EltRC = &AMDGPU::SGPR_64RegClass; 1918 SubIndices = Sub0_15_64; 1919 1920 assert(NElts % 2 == 0); 1921 NElts /= 2; 1922 } 1923 1924 MachineInstrBuilder MIB = BuildMI( 1925 MBB, I, DL, get(AMDGPU::REG_SEQUENCE), DstReg); 1926 1927 I = MIB->getIterator(); 1928 1929 SmallVector<unsigned, 8> Regs; 1930 for (int Idx = 0; Idx != NElts; ++Idx) { 1931 unsigned DstElt = MRI.createVirtualRegister(EltRC); 1932 Regs.push_back(DstElt); 1933 1934 unsigned SubIdx = SubIndices[Idx]; 1935 1936 MachineInstr *Select = 1937 BuildMI(MBB, I, DL, get(SelOp), DstElt) 1938 .addReg(FalseReg, 0, SubIdx) 1939 .addReg(TrueReg, 0, SubIdx); 1940 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 1941 1942 MIB.addReg(DstElt) 1943 .addImm(SubIdx); 1944 } 1945 } 1946 1947 bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) const { 1948 switch (MI.getOpcode()) { 1949 case AMDGPU::V_MOV_B32_e32: 1950 case AMDGPU::V_MOV_B32_e64: 1951 case AMDGPU::V_MOV_B64_PSEUDO: { 1952 // If there are additional implicit register operands, this may be used for 1953 // register indexing so the source register operand isn't simply copied. 1954 unsigned NumOps = MI.getDesc().getNumOperands() + 1955 MI.getDesc().getNumImplicitUses(); 1956 1957 return MI.getNumOperands() == NumOps; 1958 } 1959 case AMDGPU::S_MOV_B32: 1960 case AMDGPU::S_MOV_B64: 1961 case AMDGPU::COPY: 1962 return true; 1963 default: 1964 return false; 1965 } 1966 } 1967 1968 unsigned SIInstrInfo::getAddressSpaceForPseudoSourceKind( 1969 unsigned Kind) const { 1970 switch(Kind) { 1971 case PseudoSourceValue::Stack: 1972 case PseudoSourceValue::FixedStack: 1973 return AMDGPUAS::PRIVATE_ADDRESS; 1974 case PseudoSourceValue::ConstantPool: 1975 case PseudoSourceValue::GOT: 1976 case PseudoSourceValue::JumpTable: 1977 case PseudoSourceValue::GlobalValueCallEntry: 1978 case PseudoSourceValue::ExternalSymbolCallEntry: 1979 case PseudoSourceValue::TargetCustom: 1980 return AMDGPUAS::CONSTANT_ADDRESS; 1981 } 1982 return AMDGPUAS::FLAT_ADDRESS; 1983 } 1984 1985 static void removeModOperands(MachineInstr &MI) { 1986 unsigned Opc = MI.getOpcode(); 1987 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, 1988 AMDGPU::OpName::src0_modifiers); 1989 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, 1990 AMDGPU::OpName::src1_modifiers); 1991 int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc, 1992 AMDGPU::OpName::src2_modifiers); 1993 1994 MI.RemoveOperand(Src2ModIdx); 1995 MI.RemoveOperand(Src1ModIdx); 1996 MI.RemoveOperand(Src0ModIdx); 1997 } 1998 1999 bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, 2000 unsigned Reg, MachineRegisterInfo *MRI) const { 2001 if (!MRI->hasOneNonDBGUse(Reg)) 2002 return false; 2003 2004 switch (DefMI.getOpcode()) { 2005 default: 2006 return false; 2007 case AMDGPU::S_MOV_B64: 2008 // TODO: We could fold 64-bit immediates, but this get compilicated 2009 // when there are sub-registers. 2010 return false; 2011 2012 case AMDGPU::V_MOV_B32_e32: 2013 case AMDGPU::S_MOV_B32: 2014 break; 2015 } 2016 2017 const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0); 2018 assert(ImmOp); 2019 // FIXME: We could handle FrameIndex values here. 2020 if (!ImmOp->isImm()) 2021 return false; 2022 2023 unsigned Opc = UseMI.getOpcode(); 2024 if (Opc == AMDGPU::COPY) { 2025 bool isVGPRCopy = RI.isVGPR(*MRI, UseMI.getOperand(0).getReg()); 2026 unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32; 2027 UseMI.setDesc(get(NewOpc)); 2028 UseMI.getOperand(1).ChangeToImmediate(ImmOp->getImm()); 2029 UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent()); 2030 return true; 2031 } 2032 2033 if (Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64 || 2034 Opc == AMDGPU::V_MAD_F16 || Opc == AMDGPU::V_MAC_F16_e64) { 2035 // Don't fold if we are using source or output modifiers. The new VOP2 2036 // instructions don't have them. 2037 if (hasAnyModifiersSet(UseMI)) 2038 return false; 2039 2040 // If this is a free constant, there's no reason to do this. 2041 // TODO: We could fold this here instead of letting SIFoldOperands do it 2042 // later. 2043 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0); 2044 2045 // Any src operand can be used for the legality check. 2046 if (isInlineConstant(UseMI, *Src0, *ImmOp)) 2047 return false; 2048 2049 bool IsF32 = Opc == AMDGPU::V_MAD_F32 || Opc == AMDGPU::V_MAC_F32_e64; 2050 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1); 2051 MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2); 2052 2053 // Multiplied part is the constant: Use v_madmk_{f16, f32}. 2054 // We should only expect these to be on src0 due to canonicalizations. 2055 if (Src0->isReg() && Src0->getReg() == Reg) { 2056 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) 2057 return false; 2058 2059 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg()))) 2060 return false; 2061 2062 // We need to swap operands 0 and 1 since madmk constant is at operand 1. 2063 2064 const int64_t Imm = ImmOp->getImm(); 2065 2066 // FIXME: This would be a lot easier if we could return a new instruction 2067 // instead of having to modify in place. 2068 2069 // Remove these first since they are at the end. 2070 UseMI.RemoveOperand( 2071 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 2072 UseMI.RemoveOperand( 2073 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 2074 2075 unsigned Src1Reg = Src1->getReg(); 2076 unsigned Src1SubReg = Src1->getSubReg(); 2077 Src0->setReg(Src1Reg); 2078 Src0->setSubReg(Src1SubReg); 2079 Src0->setIsKill(Src1->isKill()); 2080 2081 if (Opc == AMDGPU::V_MAC_F32_e64 || 2082 Opc == AMDGPU::V_MAC_F16_e64) 2083 UseMI.untieRegOperand( 2084 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 2085 2086 Src1->ChangeToImmediate(Imm); 2087 2088 removeModOperands(UseMI); 2089 UseMI.setDesc(get(IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16)); 2090 2091 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 2092 if (DeleteDef) 2093 DefMI.eraseFromParent(); 2094 2095 return true; 2096 } 2097 2098 // Added part is the constant: Use v_madak_{f16, f32}. 2099 if (Src2->isReg() && Src2->getReg() == Reg) { 2100 // Not allowed to use constant bus for another operand. 2101 // We can however allow an inline immediate as src0. 2102 bool Src0Inlined = false; 2103 if (Src0->isReg()) { 2104 // Try to inline constant if possible. 2105 // If the Def moves immediate and the use is single 2106 // We are saving VGPR here. 2107 MachineInstr *Def = MRI->getUniqueVRegDef(Src0->getReg()); 2108 if (Def && Def->isMoveImmediate() && 2109 isInlineConstant(Def->getOperand(1)) && 2110 MRI->hasOneUse(Src0->getReg())) { 2111 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 2112 Src0Inlined = true; 2113 } else if ((RI.isPhysicalRegister(Src0->getReg()) && 2114 RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg()))) || 2115 (RI.isVirtualRegister(Src0->getReg()) && 2116 RI.isSGPRClass(MRI->getRegClass(Src0->getReg())))) 2117 return false; 2118 // VGPR is okay as Src0 - fallthrough 2119 } 2120 2121 if (Src1->isReg() && !Src0Inlined ) { 2122 // We have one slot for inlinable constant so far - try to fill it 2123 MachineInstr *Def = MRI->getUniqueVRegDef(Src1->getReg()); 2124 if (Def && Def->isMoveImmediate() && 2125 isInlineConstant(Def->getOperand(1)) && 2126 MRI->hasOneUse(Src1->getReg()) && 2127 commuteInstruction(UseMI)) { 2128 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 2129 } else if ((RI.isPhysicalRegister(Src1->getReg()) && 2130 RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) || 2131 (RI.isVirtualRegister(Src1->getReg()) && 2132 RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) 2133 return false; 2134 // VGPR is okay as Src1 - fallthrough 2135 } 2136 2137 const int64_t Imm = ImmOp->getImm(); 2138 2139 // FIXME: This would be a lot easier if we could return a new instruction 2140 // instead of having to modify in place. 2141 2142 // Remove these first since they are at the end. 2143 UseMI.RemoveOperand( 2144 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 2145 UseMI.RemoveOperand( 2146 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 2147 2148 if (Opc == AMDGPU::V_MAC_F32_e64 || 2149 Opc == AMDGPU::V_MAC_F16_e64) 2150 UseMI.untieRegOperand( 2151 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 2152 2153 // ChangingToImmediate adds Src2 back to the instruction. 2154 Src2->ChangeToImmediate(Imm); 2155 2156 // These come before src2. 2157 removeModOperands(UseMI); 2158 UseMI.setDesc(get(IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16)); 2159 2160 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 2161 if (DeleteDef) 2162 DefMI.eraseFromParent(); 2163 2164 return true; 2165 } 2166 } 2167 2168 return false; 2169 } 2170 2171 static bool offsetsDoNotOverlap(int WidthA, int OffsetA, 2172 int WidthB, int OffsetB) { 2173 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; 2174 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; 2175 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; 2176 return LowOffset + LowWidth <= HighOffset; 2177 } 2178 2179 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(MachineInstr &MIa, 2180 MachineInstr &MIb) const { 2181 MachineOperand *BaseOp0, *BaseOp1; 2182 int64_t Offset0, Offset1; 2183 2184 if (getMemOperandWithOffset(MIa, BaseOp0, Offset0, &RI) && 2185 getMemOperandWithOffset(MIb, BaseOp1, Offset1, &RI)) { 2186 if (!BaseOp0->isIdenticalTo(*BaseOp1)) 2187 return false; 2188 2189 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) { 2190 // FIXME: Handle ds_read2 / ds_write2. 2191 return false; 2192 } 2193 unsigned Width0 = (*MIa.memoperands_begin())->getSize(); 2194 unsigned Width1 = (*MIb.memoperands_begin())->getSize(); 2195 if (offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1)) { 2196 return true; 2197 } 2198 } 2199 2200 return false; 2201 } 2202 2203 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(MachineInstr &MIa, 2204 MachineInstr &MIb, 2205 AliasAnalysis *AA) const { 2206 assert((MIa.mayLoad() || MIa.mayStore()) && 2207 "MIa must load from or modify a memory location"); 2208 assert((MIb.mayLoad() || MIb.mayStore()) && 2209 "MIb must load from or modify a memory location"); 2210 2211 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects()) 2212 return false; 2213 2214 // XXX - Can we relax this between address spaces? 2215 if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef()) 2216 return false; 2217 2218 // TODO: Should we check the address space from the MachineMemOperand? That 2219 // would allow us to distinguish objects we know don't alias based on the 2220 // underlying address space, even if it was lowered to a different one, 2221 // e.g. private accesses lowered to use MUBUF instructions on a scratch 2222 // buffer. 2223 if (isDS(MIa)) { 2224 if (isDS(MIb)) 2225 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2226 2227 return !isFLAT(MIb) || isSegmentSpecificFLAT(MIb); 2228 } 2229 2230 if (isMUBUF(MIa) || isMTBUF(MIa)) { 2231 if (isMUBUF(MIb) || isMTBUF(MIb)) 2232 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2233 2234 return !isFLAT(MIb) && !isSMRD(MIb); 2235 } 2236 2237 if (isSMRD(MIa)) { 2238 if (isSMRD(MIb)) 2239 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2240 2241 return !isFLAT(MIb) && !isMUBUF(MIa) && !isMTBUF(MIa); 2242 } 2243 2244 if (isFLAT(MIa)) { 2245 if (isFLAT(MIb)) 2246 return checkInstOffsetsDoNotOverlap(MIa, MIb); 2247 2248 return false; 2249 } 2250 2251 return false; 2252 } 2253 2254 static int64_t getFoldableImm(const MachineOperand* MO) { 2255 if (!MO->isReg()) 2256 return false; 2257 const MachineFunction *MF = MO->getParent()->getParent()->getParent(); 2258 const MachineRegisterInfo &MRI = MF->getRegInfo(); 2259 auto Def = MRI.getUniqueVRegDef(MO->getReg()); 2260 if (Def && Def->getOpcode() == AMDGPU::V_MOV_B32_e32 && 2261 Def->getOperand(1).isImm()) 2262 return Def->getOperand(1).getImm(); 2263 return AMDGPU::NoRegister; 2264 } 2265 2266 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB, 2267 MachineInstr &MI, 2268 LiveVariables *LV) const { 2269 unsigned Opc = MI.getOpcode(); 2270 bool IsF16 = false; 2271 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e32 || Opc == AMDGPU::V_FMAC_F32_e64; 2272 2273 switch (Opc) { 2274 default: 2275 return nullptr; 2276 case AMDGPU::V_MAC_F16_e64: 2277 IsF16 = true; 2278 LLVM_FALLTHROUGH; 2279 case AMDGPU::V_MAC_F32_e64: 2280 case AMDGPU::V_FMAC_F32_e64: 2281 break; 2282 case AMDGPU::V_MAC_F16_e32: 2283 IsF16 = true; 2284 LLVM_FALLTHROUGH; 2285 case AMDGPU::V_MAC_F32_e32: 2286 case AMDGPU::V_FMAC_F32_e32: { 2287 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 2288 AMDGPU::OpName::src0); 2289 const MachineOperand *Src0 = &MI.getOperand(Src0Idx); 2290 if (!Src0->isReg() && !Src0->isImm()) 2291 return nullptr; 2292 2293 if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0)) 2294 return nullptr; 2295 2296 break; 2297 } 2298 } 2299 2300 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 2301 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0); 2302 const MachineOperand *Src0Mods = 2303 getNamedOperand(MI, AMDGPU::OpName::src0_modifiers); 2304 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 2305 const MachineOperand *Src1Mods = 2306 getNamedOperand(MI, AMDGPU::OpName::src1_modifiers); 2307 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 2308 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 2309 const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod); 2310 2311 if (!IsFMA && !Src0Mods && !Src1Mods && !Clamp && !Omod && 2312 // If we have an SGPR input, we will violate the constant bus restriction. 2313 (!Src0->isReg() || !RI.isSGPRReg(MBB->getParent()->getRegInfo(), Src0->getReg()))) { 2314 if (auto Imm = getFoldableImm(Src2)) { 2315 return BuildMI(*MBB, MI, MI.getDebugLoc(), 2316 get(IsF16 ? AMDGPU::V_MADAK_F16 : AMDGPU::V_MADAK_F32)) 2317 .add(*Dst) 2318 .add(*Src0) 2319 .add(*Src1) 2320 .addImm(Imm); 2321 } 2322 if (auto Imm = getFoldableImm(Src1)) { 2323 return BuildMI(*MBB, MI, MI.getDebugLoc(), 2324 get(IsF16 ? AMDGPU::V_MADMK_F16 : AMDGPU::V_MADMK_F32)) 2325 .add(*Dst) 2326 .add(*Src0) 2327 .addImm(Imm) 2328 .add(*Src2); 2329 } 2330 if (auto Imm = getFoldableImm(Src0)) { 2331 if (isOperandLegal(MI, AMDGPU::getNamedOperandIdx(AMDGPU::V_MADMK_F32, 2332 AMDGPU::OpName::src0), Src1)) 2333 return BuildMI(*MBB, MI, MI.getDebugLoc(), 2334 get(IsF16 ? AMDGPU::V_MADMK_F16 : AMDGPU::V_MADMK_F32)) 2335 .add(*Dst) 2336 .add(*Src1) 2337 .addImm(Imm) 2338 .add(*Src2); 2339 } 2340 } 2341 2342 assert((!IsFMA || !IsF16) && "fmac only expected with f32"); 2343 unsigned NewOpc = IsFMA ? AMDGPU::V_FMA_F32 : 2344 (IsF16 ? AMDGPU::V_MAD_F16 : AMDGPU::V_MAD_F32); 2345 return BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 2346 .add(*Dst) 2347 .addImm(Src0Mods ? Src0Mods->getImm() : 0) 2348 .add(*Src0) 2349 .addImm(Src1Mods ? Src1Mods->getImm() : 0) 2350 .add(*Src1) 2351 .addImm(0) // Src mods 2352 .add(*Src2) 2353 .addImm(Clamp ? Clamp->getImm() : 0) 2354 .addImm(Omod ? Omod->getImm() : 0); 2355 } 2356 2357 // It's not generally safe to move VALU instructions across these since it will 2358 // start using the register as a base index rather than directly. 2359 // XXX - Why isn't hasSideEffects sufficient for these? 2360 static bool changesVGPRIndexingMode(const MachineInstr &MI) { 2361 switch (MI.getOpcode()) { 2362 case AMDGPU::S_SET_GPR_IDX_ON: 2363 case AMDGPU::S_SET_GPR_IDX_MODE: 2364 case AMDGPU::S_SET_GPR_IDX_OFF: 2365 return true; 2366 default: 2367 return false; 2368 } 2369 } 2370 2371 bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 2372 const MachineBasicBlock *MBB, 2373 const MachineFunction &MF) const { 2374 // XXX - Do we want the SP check in the base implementation? 2375 2376 // Target-independent instructions do not have an implicit-use of EXEC, even 2377 // when they operate on VGPRs. Treating EXEC modifications as scheduling 2378 // boundaries prevents incorrect movements of such instructions. 2379 return TargetInstrInfo::isSchedulingBoundary(MI, MBB, MF) || 2380 MI.modifiesRegister(AMDGPU::EXEC, &RI) || 2381 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 || 2382 MI.getOpcode() == AMDGPU::S_SETREG_B32 || 2383 changesVGPRIndexingMode(MI); 2384 } 2385 2386 bool SIInstrInfo::isAlwaysGDS(uint16_t Opcode) const { 2387 return Opcode == AMDGPU::DS_ORDERED_COUNT || 2388 Opcode == AMDGPU::DS_GWS_INIT || 2389 Opcode == AMDGPU::DS_GWS_SEMA_V || 2390 Opcode == AMDGPU::DS_GWS_SEMA_BR || 2391 Opcode == AMDGPU::DS_GWS_SEMA_P || 2392 Opcode == AMDGPU::DS_GWS_SEMA_RELEASE_ALL || 2393 Opcode == AMDGPU::DS_GWS_BARRIER; 2394 } 2395 2396 bool SIInstrInfo::hasUnwantedEffectsWhenEXECEmpty(const MachineInstr &MI) const { 2397 unsigned Opcode = MI.getOpcode(); 2398 2399 if (MI.mayStore() && isSMRD(MI)) 2400 return true; // scalar store or atomic 2401 2402 // These instructions cause shader I/O that may cause hardware lockups 2403 // when executed with an empty EXEC mask. 2404 // 2405 // Note: exp with VM = DONE = 0 is automatically skipped by hardware when 2406 // EXEC = 0, but checking for that case here seems not worth it 2407 // given the typical code patterns. 2408 if (Opcode == AMDGPU::S_SENDMSG || Opcode == AMDGPU::S_SENDMSGHALT || 2409 Opcode == AMDGPU::EXP || Opcode == AMDGPU::EXP_DONE || 2410 Opcode == AMDGPU::DS_ORDERED_COUNT) 2411 return true; 2412 2413 if (MI.isInlineAsm()) 2414 return true; // conservative assumption 2415 2416 // These are like SALU instructions in terms of effects, so it's questionable 2417 // whether we should return true for those. 2418 // 2419 // However, executing them with EXEC = 0 causes them to operate on undefined 2420 // data, which we avoid by returning true here. 2421 if (Opcode == AMDGPU::V_READFIRSTLANE_B32 || Opcode == AMDGPU::V_READLANE_B32) 2422 return true; 2423 2424 return false; 2425 } 2426 2427 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const { 2428 switch (Imm.getBitWidth()) { 2429 case 32: 2430 return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(), 2431 ST.hasInv2PiInlineImm()); 2432 case 64: 2433 return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(), 2434 ST.hasInv2PiInlineImm()); 2435 case 16: 2436 return ST.has16BitInsts() && 2437 AMDGPU::isInlinableLiteral16(Imm.getSExtValue(), 2438 ST.hasInv2PiInlineImm()); 2439 default: 2440 llvm_unreachable("invalid bitwidth"); 2441 } 2442 } 2443 2444 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO, 2445 uint8_t OperandType) const { 2446 if (!MO.isImm() || 2447 OperandType < AMDGPU::OPERAND_SRC_FIRST || 2448 OperandType > AMDGPU::OPERAND_SRC_LAST) 2449 return false; 2450 2451 // MachineOperand provides no way to tell the true operand size, since it only 2452 // records a 64-bit value. We need to know the size to determine if a 32-bit 2453 // floating point immediate bit pattern is legal for an integer immediate. It 2454 // would be for any 32-bit integer operand, but would not be for a 64-bit one. 2455 2456 int64_t Imm = MO.getImm(); 2457 switch (OperandType) { 2458 case AMDGPU::OPERAND_REG_IMM_INT32: 2459 case AMDGPU::OPERAND_REG_IMM_FP32: 2460 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 2461 case AMDGPU::OPERAND_REG_INLINE_C_FP32: { 2462 int32_t Trunc = static_cast<int32_t>(Imm); 2463 return AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm()); 2464 } 2465 case AMDGPU::OPERAND_REG_IMM_INT64: 2466 case AMDGPU::OPERAND_REG_IMM_FP64: 2467 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 2468 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 2469 return AMDGPU::isInlinableLiteral64(MO.getImm(), 2470 ST.hasInv2PiInlineImm()); 2471 case AMDGPU::OPERAND_REG_IMM_INT16: 2472 case AMDGPU::OPERAND_REG_IMM_FP16: 2473 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 2474 case AMDGPU::OPERAND_REG_INLINE_C_FP16: { 2475 if (isInt<16>(Imm) || isUInt<16>(Imm)) { 2476 // A few special case instructions have 16-bit operands on subtargets 2477 // where 16-bit instructions are not legal. 2478 // TODO: Do the 32-bit immediates work? We shouldn't really need to handle 2479 // constants in these cases 2480 int16_t Trunc = static_cast<int16_t>(Imm); 2481 return ST.has16BitInsts() && 2482 AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm()); 2483 } 2484 2485 return false; 2486 } 2487 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 2488 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: { 2489 if (isUInt<16>(Imm)) { 2490 int16_t Trunc = static_cast<int16_t>(Imm); 2491 return ST.has16BitInsts() && 2492 AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm()); 2493 } 2494 if (!(Imm & 0xffff)) { 2495 return ST.has16BitInsts() && 2496 AMDGPU::isInlinableLiteral16(Imm >> 16, ST.hasInv2PiInlineImm()); 2497 } 2498 uint32_t Trunc = static_cast<uint32_t>(Imm); 2499 return AMDGPU::isInlinableLiteralV216(Trunc, ST.hasInv2PiInlineImm()); 2500 } 2501 default: 2502 llvm_unreachable("invalid bitwidth"); 2503 } 2504 } 2505 2506 bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO, 2507 const MCOperandInfo &OpInfo) const { 2508 switch (MO.getType()) { 2509 case MachineOperand::MO_Register: 2510 return false; 2511 case MachineOperand::MO_Immediate: 2512 return !isInlineConstant(MO, OpInfo); 2513 case MachineOperand::MO_FrameIndex: 2514 case MachineOperand::MO_MachineBasicBlock: 2515 case MachineOperand::MO_ExternalSymbol: 2516 case MachineOperand::MO_GlobalAddress: 2517 case MachineOperand::MO_MCSymbol: 2518 return true; 2519 default: 2520 llvm_unreachable("unexpected operand type"); 2521 } 2522 } 2523 2524 static bool compareMachineOp(const MachineOperand &Op0, 2525 const MachineOperand &Op1) { 2526 if (Op0.getType() != Op1.getType()) 2527 return false; 2528 2529 switch (Op0.getType()) { 2530 case MachineOperand::MO_Register: 2531 return Op0.getReg() == Op1.getReg(); 2532 case MachineOperand::MO_Immediate: 2533 return Op0.getImm() == Op1.getImm(); 2534 default: 2535 llvm_unreachable("Didn't expect to be comparing these operand types"); 2536 } 2537 } 2538 2539 bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo, 2540 const MachineOperand &MO) const { 2541 const MCOperandInfo &OpInfo = get(MI.getOpcode()).OpInfo[OpNo]; 2542 2543 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI()); 2544 2545 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE) 2546 return true; 2547 2548 if (OpInfo.RegClass < 0) 2549 return false; 2550 2551 if (MO.isImm() && isInlineConstant(MO, OpInfo)) 2552 return RI.opCanUseInlineConstant(OpInfo.OperandType); 2553 2554 return RI.opCanUseLiteralConstant(OpInfo.OperandType); 2555 } 2556 2557 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const { 2558 int Op32 = AMDGPU::getVOPe32(Opcode); 2559 if (Op32 == -1) 2560 return false; 2561 2562 return pseudoToMCOpcode(Op32) != -1; 2563 } 2564 2565 bool SIInstrInfo::hasModifiers(unsigned Opcode) const { 2566 // The src0_modifier operand is present on all instructions 2567 // that have modifiers. 2568 2569 return AMDGPU::getNamedOperandIdx(Opcode, 2570 AMDGPU::OpName::src0_modifiers) != -1; 2571 } 2572 2573 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI, 2574 unsigned OpName) const { 2575 const MachineOperand *Mods = getNamedOperand(MI, OpName); 2576 return Mods && Mods->getImm(); 2577 } 2578 2579 bool SIInstrInfo::hasAnyModifiersSet(const MachineInstr &MI) const { 2580 return hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || 2581 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || 2582 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers) || 2583 hasModifiersSet(MI, AMDGPU::OpName::clamp) || 2584 hasModifiersSet(MI, AMDGPU::OpName::omod); 2585 } 2586 2587 bool SIInstrInfo::canShrink(const MachineInstr &MI, 2588 const MachineRegisterInfo &MRI) const { 2589 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 2590 // Can't shrink instruction with three operands. 2591 // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add 2592 // a special case for it. It can only be shrunk if the third operand 2593 // is vcc. We should handle this the same way we handle vopc, by addding 2594 // a register allocation hint pre-regalloc and then do the shrinking 2595 // post-regalloc. 2596 if (Src2) { 2597 switch (MI.getOpcode()) { 2598 default: return false; 2599 2600 case AMDGPU::V_ADDC_U32_e64: 2601 case AMDGPU::V_SUBB_U32_e64: 2602 case AMDGPU::V_SUBBREV_U32_e64: { 2603 const MachineOperand *Src1 2604 = getNamedOperand(MI, AMDGPU::OpName::src1); 2605 if (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg())) 2606 return false; 2607 // Additional verification is needed for sdst/src2. 2608 return true; 2609 } 2610 case AMDGPU::V_MAC_F32_e64: 2611 case AMDGPU::V_MAC_F16_e64: 2612 case AMDGPU::V_FMAC_F32_e64: 2613 if (!Src2->isReg() || !RI.isVGPR(MRI, Src2->getReg()) || 2614 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers)) 2615 return false; 2616 break; 2617 2618 case AMDGPU::V_CNDMASK_B32_e64: 2619 break; 2620 } 2621 } 2622 2623 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 2624 if (Src1 && (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()) || 2625 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers))) 2626 return false; 2627 2628 // We don't need to check src0, all input types are legal, so just make sure 2629 // src0 isn't using any modifiers. 2630 if (hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers)) 2631 return false; 2632 2633 // Can it be shrunk to a valid 32 bit opcode? 2634 if (!hasVALU32BitEncoding(MI.getOpcode())) 2635 return false; 2636 2637 // Check output modifiers 2638 return !hasModifiersSet(MI, AMDGPU::OpName::omod) && 2639 !hasModifiersSet(MI, AMDGPU::OpName::clamp); 2640 } 2641 2642 // Set VCC operand with all flags from \p Orig, except for setting it as 2643 // implicit. 2644 static void copyFlagsToImplicitVCC(MachineInstr &MI, 2645 const MachineOperand &Orig) { 2646 2647 for (MachineOperand &Use : MI.implicit_operands()) { 2648 if (Use.isUse() && Use.getReg() == AMDGPU::VCC) { 2649 Use.setIsUndef(Orig.isUndef()); 2650 Use.setIsKill(Orig.isKill()); 2651 return; 2652 } 2653 } 2654 } 2655 2656 MachineInstr *SIInstrInfo::buildShrunkInst(MachineInstr &MI, 2657 unsigned Op32) const { 2658 MachineBasicBlock *MBB = MI.getParent();; 2659 MachineInstrBuilder Inst32 = 2660 BuildMI(*MBB, MI, MI.getDebugLoc(), get(Op32)); 2661 2662 // Add the dst operand if the 32-bit encoding also has an explicit $vdst. 2663 // For VOPC instructions, this is replaced by an implicit def of vcc. 2664 int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst); 2665 if (Op32DstIdx != -1) { 2666 // dst 2667 Inst32.add(MI.getOperand(0)); 2668 } else { 2669 assert(MI.getOperand(0).getReg() == AMDGPU::VCC && 2670 "Unexpected case"); 2671 } 2672 2673 Inst32.add(*getNamedOperand(MI, AMDGPU::OpName::src0)); 2674 2675 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 2676 if (Src1) 2677 Inst32.add(*Src1); 2678 2679 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 2680 2681 if (Src2) { 2682 int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2); 2683 if (Op32Src2Idx != -1) { 2684 Inst32.add(*Src2); 2685 } else { 2686 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is 2687 // replaced with an implicit read of vcc. This was already added 2688 // during the initial BuildMI, so find it to preserve the flags. 2689 copyFlagsToImplicitVCC(*Inst32, *Src2); 2690 } 2691 } 2692 2693 return Inst32; 2694 } 2695 2696 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI, 2697 const MachineOperand &MO, 2698 const MCOperandInfo &OpInfo) const { 2699 // Literal constants use the constant bus. 2700 //if (isLiteralConstantLike(MO, OpInfo)) 2701 // return true; 2702 if (MO.isImm()) 2703 return !isInlineConstant(MO, OpInfo); 2704 2705 if (!MO.isReg()) 2706 return true; // Misc other operands like FrameIndex 2707 2708 if (!MO.isUse()) 2709 return false; 2710 2711 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) 2712 return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); 2713 2714 // FLAT_SCR is just an SGPR pair. 2715 if (!MO.isImplicit() && (MO.getReg() == AMDGPU::FLAT_SCR)) 2716 return true; 2717 2718 // EXEC register uses the constant bus. 2719 if (!MO.isImplicit() && MO.getReg() == AMDGPU::EXEC) 2720 return true; 2721 2722 // SGPRs use the constant bus 2723 return (MO.getReg() == AMDGPU::VCC || MO.getReg() == AMDGPU::M0 || 2724 (!MO.isImplicit() && 2725 (AMDGPU::SGPR_32RegClass.contains(MO.getReg()) || 2726 AMDGPU::SGPR_64RegClass.contains(MO.getReg())))); 2727 } 2728 2729 static unsigned findImplicitSGPRRead(const MachineInstr &MI) { 2730 for (const MachineOperand &MO : MI.implicit_operands()) { 2731 // We only care about reads. 2732 if (MO.isDef()) 2733 continue; 2734 2735 switch (MO.getReg()) { 2736 case AMDGPU::VCC: 2737 case AMDGPU::M0: 2738 case AMDGPU::FLAT_SCR: 2739 return MO.getReg(); 2740 2741 default: 2742 break; 2743 } 2744 } 2745 2746 return AMDGPU::NoRegister; 2747 } 2748 2749 static bool shouldReadExec(const MachineInstr &MI) { 2750 if (SIInstrInfo::isVALU(MI)) { 2751 switch (MI.getOpcode()) { 2752 case AMDGPU::V_READLANE_B32: 2753 case AMDGPU::V_READLANE_B32_si: 2754 case AMDGPU::V_READLANE_B32_vi: 2755 case AMDGPU::V_WRITELANE_B32: 2756 case AMDGPU::V_WRITELANE_B32_si: 2757 case AMDGPU::V_WRITELANE_B32_vi: 2758 return false; 2759 } 2760 2761 return true; 2762 } 2763 2764 if (SIInstrInfo::isGenericOpcode(MI.getOpcode()) || 2765 SIInstrInfo::isSALU(MI) || 2766 SIInstrInfo::isSMRD(MI)) 2767 return false; 2768 2769 return true; 2770 } 2771 2772 static bool isSubRegOf(const SIRegisterInfo &TRI, 2773 const MachineOperand &SuperVec, 2774 const MachineOperand &SubReg) { 2775 if (TargetRegisterInfo::isPhysicalRegister(SubReg.getReg())) 2776 return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg()); 2777 2778 return SubReg.getSubReg() != AMDGPU::NoSubRegister && 2779 SubReg.getReg() == SuperVec.getReg(); 2780 } 2781 2782 bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, 2783 StringRef &ErrInfo) const { 2784 uint16_t Opcode = MI.getOpcode(); 2785 if (SIInstrInfo::isGenericOpcode(MI.getOpcode())) 2786 return true; 2787 2788 const MachineFunction *MF = MI.getParent()->getParent(); 2789 const MachineRegisterInfo &MRI = MF->getRegInfo(); 2790 2791 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); 2792 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1); 2793 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2); 2794 2795 // Make sure the number of operands is correct. 2796 const MCInstrDesc &Desc = get(Opcode); 2797 if (!Desc.isVariadic() && 2798 Desc.getNumOperands() != MI.getNumExplicitOperands()) { 2799 ErrInfo = "Instruction has wrong number of operands."; 2800 return false; 2801 } 2802 2803 if (MI.isInlineAsm()) { 2804 // Verify register classes for inlineasm constraints. 2805 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands(); 2806 I != E; ++I) { 2807 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI); 2808 if (!RC) 2809 continue; 2810 2811 const MachineOperand &Op = MI.getOperand(I); 2812 if (!Op.isReg()) 2813 continue; 2814 2815 unsigned Reg = Op.getReg(); 2816 if (!TargetRegisterInfo::isVirtualRegister(Reg) && !RC->contains(Reg)) { 2817 ErrInfo = "inlineasm operand has incorrect register class."; 2818 return false; 2819 } 2820 } 2821 2822 return true; 2823 } 2824 2825 // Make sure the register classes are correct. 2826 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) { 2827 if (MI.getOperand(i).isFPImm()) { 2828 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast " 2829 "all fp values to integers."; 2830 return false; 2831 } 2832 2833 int RegClass = Desc.OpInfo[i].RegClass; 2834 2835 switch (Desc.OpInfo[i].OperandType) { 2836 case MCOI::OPERAND_REGISTER: 2837 if (MI.getOperand(i).isImm()) { 2838 ErrInfo = "Illegal immediate value for operand."; 2839 return false; 2840 } 2841 break; 2842 case AMDGPU::OPERAND_REG_IMM_INT32: 2843 case AMDGPU::OPERAND_REG_IMM_FP32: 2844 break; 2845 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 2846 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 2847 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 2848 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 2849 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 2850 case AMDGPU::OPERAND_REG_INLINE_C_FP16: { 2851 const MachineOperand &MO = MI.getOperand(i); 2852 if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) { 2853 ErrInfo = "Illegal immediate value for operand."; 2854 return false; 2855 } 2856 break; 2857 } 2858 case MCOI::OPERAND_IMMEDIATE: 2859 case AMDGPU::OPERAND_KIMM32: 2860 // Check if this operand is an immediate. 2861 // FrameIndex operands will be replaced by immediates, so they are 2862 // allowed. 2863 if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) { 2864 ErrInfo = "Expected immediate, but got non-immediate"; 2865 return false; 2866 } 2867 LLVM_FALLTHROUGH; 2868 default: 2869 continue; 2870 } 2871 2872 if (!MI.getOperand(i).isReg()) 2873 continue; 2874 2875 if (RegClass != -1) { 2876 unsigned Reg = MI.getOperand(i).getReg(); 2877 if (Reg == AMDGPU::NoRegister || 2878 TargetRegisterInfo::isVirtualRegister(Reg)) 2879 continue; 2880 2881 const TargetRegisterClass *RC = RI.getRegClass(RegClass); 2882 if (!RC->contains(Reg)) { 2883 ErrInfo = "Operand has incorrect register class."; 2884 return false; 2885 } 2886 } 2887 } 2888 2889 // Verify SDWA 2890 if (isSDWA(MI)) { 2891 if (!ST.hasSDWA()) { 2892 ErrInfo = "SDWA is not supported on this target"; 2893 return false; 2894 } 2895 2896 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst); 2897 2898 const int OpIndicies[] = { DstIdx, Src0Idx, Src1Idx, Src2Idx }; 2899 2900 for (int OpIdx: OpIndicies) { 2901 if (OpIdx == -1) 2902 continue; 2903 const MachineOperand &MO = MI.getOperand(OpIdx); 2904 2905 if (!ST.hasSDWAScalar()) { 2906 // Only VGPRS on VI 2907 if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) { 2908 ErrInfo = "Only VGPRs allowed as operands in SDWA instructions on VI"; 2909 return false; 2910 } 2911 } else { 2912 // No immediates on GFX9 2913 if (!MO.isReg()) { 2914 ErrInfo = "Only reg allowed as operands in SDWA instructions on GFX9"; 2915 return false; 2916 } 2917 } 2918 } 2919 2920 if (!ST.hasSDWAOmod()) { 2921 // No omod allowed on VI 2922 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 2923 if (OMod != nullptr && 2924 (!OMod->isImm() || OMod->getImm() != 0)) { 2925 ErrInfo = "OMod not allowed in SDWA instructions on VI"; 2926 return false; 2927 } 2928 } 2929 2930 uint16_t BasicOpcode = AMDGPU::getBasicFromSDWAOp(Opcode); 2931 if (isVOPC(BasicOpcode)) { 2932 if (!ST.hasSDWASdst() && DstIdx != -1) { 2933 // Only vcc allowed as dst on VI for VOPC 2934 const MachineOperand &Dst = MI.getOperand(DstIdx); 2935 if (!Dst.isReg() || Dst.getReg() != AMDGPU::VCC) { 2936 ErrInfo = "Only VCC allowed as dst in SDWA instructions on VI"; 2937 return false; 2938 } 2939 } else if (!ST.hasSDWAOutModsVOPC()) { 2940 // No clamp allowed on GFX9 for VOPC 2941 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 2942 if (Clamp && (!Clamp->isImm() || Clamp->getImm() != 0)) { 2943 ErrInfo = "Clamp not allowed in VOPC SDWA instructions on VI"; 2944 return false; 2945 } 2946 2947 // No omod allowed on GFX9 for VOPC 2948 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 2949 if (OMod && (!OMod->isImm() || OMod->getImm() != 0)) { 2950 ErrInfo = "OMod not allowed in VOPC SDWA instructions on VI"; 2951 return false; 2952 } 2953 } 2954 } 2955 2956 const MachineOperand *DstUnused = getNamedOperand(MI, AMDGPU::OpName::dst_unused); 2957 if (DstUnused && DstUnused->isImm() && 2958 DstUnused->getImm() == AMDGPU::SDWA::UNUSED_PRESERVE) { 2959 const MachineOperand &Dst = MI.getOperand(DstIdx); 2960 if (!Dst.isReg() || !Dst.isTied()) { 2961 ErrInfo = "Dst register should have tied register"; 2962 return false; 2963 } 2964 2965 const MachineOperand &TiedMO = 2966 MI.getOperand(MI.findTiedOperandIdx(DstIdx)); 2967 if (!TiedMO.isReg() || !TiedMO.isImplicit() || !TiedMO.isUse()) { 2968 ErrInfo = 2969 "Dst register should be tied to implicit use of preserved register"; 2970 return false; 2971 } else if (TargetRegisterInfo::isPhysicalRegister(TiedMO.getReg()) && 2972 Dst.getReg() != TiedMO.getReg()) { 2973 ErrInfo = "Dst register should use same physical register as preserved"; 2974 return false; 2975 } 2976 } 2977 } 2978 2979 // Verify MIMG 2980 if (isMIMG(MI.getOpcode()) && !MI.mayStore()) { 2981 // Ensure that the return type used is large enough for all the options 2982 // being used TFE/LWE require an extra result register. 2983 const MachineOperand *DMask = getNamedOperand(MI, AMDGPU::OpName::dmask); 2984 if (DMask) { 2985 uint64_t DMaskImm = DMask->getImm(); 2986 uint32_t RegCount = 2987 isGather4(MI.getOpcode()) ? 4 : countPopulation(DMaskImm); 2988 const MachineOperand *TFE = getNamedOperand(MI, AMDGPU::OpName::tfe); 2989 const MachineOperand *LWE = getNamedOperand(MI, AMDGPU::OpName::lwe); 2990 const MachineOperand *D16 = getNamedOperand(MI, AMDGPU::OpName::d16); 2991 2992 // Adjust for packed 16 bit values 2993 if (D16 && D16->getImm() && !ST.hasUnpackedD16VMem()) 2994 RegCount >>= 1; 2995 2996 // Adjust if using LWE or TFE 2997 if ((LWE && LWE->getImm()) || (TFE && TFE->getImm())) 2998 RegCount += 1; 2999 3000 const uint32_t DstIdx = 3001 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata); 3002 const MachineOperand &Dst = MI.getOperand(DstIdx); 3003 if (Dst.isReg()) { 3004 const TargetRegisterClass *DstRC = getOpRegClass(MI, DstIdx); 3005 uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32; 3006 if (RegCount > DstSize) { 3007 ErrInfo = "MIMG instruction returns too many registers for dst " 3008 "register class"; 3009 return false; 3010 } 3011 } 3012 } 3013 } 3014 3015 // Verify VOP*. Ignore multiple sgpr operands on writelane. 3016 if (Desc.getOpcode() != AMDGPU::V_WRITELANE_B32 3017 && (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI) || isSDWA(MI))) { 3018 // Only look at the true operands. Only a real operand can use the constant 3019 // bus, and we don't want to check pseudo-operands like the source modifier 3020 // flags. 3021 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx }; 3022 3023 unsigned ConstantBusCount = 0; 3024 unsigned LiteralCount = 0; 3025 3026 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) 3027 ++ConstantBusCount; 3028 3029 unsigned SGPRUsed = findImplicitSGPRRead(MI); 3030 if (SGPRUsed != AMDGPU::NoRegister) 3031 ++ConstantBusCount; 3032 3033 for (int OpIdx : OpIndices) { 3034 if (OpIdx == -1) 3035 break; 3036 const MachineOperand &MO = MI.getOperand(OpIdx); 3037 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 3038 if (MO.isReg()) { 3039 if (MO.getReg() != SGPRUsed) 3040 ++ConstantBusCount; 3041 SGPRUsed = MO.getReg(); 3042 } else { 3043 ++ConstantBusCount; 3044 ++LiteralCount; 3045 } 3046 } 3047 } 3048 if (ConstantBusCount > 1) { 3049 ErrInfo = "VOP* instruction uses the constant bus more than once"; 3050 return false; 3051 } 3052 3053 if (isVOP3(MI) && LiteralCount) { 3054 ErrInfo = "VOP3 instruction uses literal"; 3055 return false; 3056 } 3057 } 3058 3059 // Verify misc. restrictions on specific instructions. 3060 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32 || 3061 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64) { 3062 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3063 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 3064 const MachineOperand &Src2 = MI.getOperand(Src2Idx); 3065 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) { 3066 if (!compareMachineOp(Src0, Src1) && 3067 !compareMachineOp(Src0, Src2)) { 3068 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2"; 3069 return false; 3070 } 3071 } 3072 } 3073 3074 if (isSOPK(MI)) { 3075 int64_t Imm = getNamedOperand(MI, AMDGPU::OpName::simm16)->getImm(); 3076 if (sopkIsZext(MI)) { 3077 if (!isUInt<16>(Imm)) { 3078 ErrInfo = "invalid immediate for SOPK instruction"; 3079 return false; 3080 } 3081 } else { 3082 if (!isInt<16>(Imm)) { 3083 ErrInfo = "invalid immediate for SOPK instruction"; 3084 return false; 3085 } 3086 } 3087 } 3088 3089 if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 || 3090 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 || 3091 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 3092 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) { 3093 const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 3094 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64; 3095 3096 const unsigned StaticNumOps = Desc.getNumOperands() + 3097 Desc.getNumImplicitUses(); 3098 const unsigned NumImplicitOps = IsDst ? 2 : 1; 3099 3100 // Allow additional implicit operands. This allows a fixup done by the post 3101 // RA scheduler where the main implicit operand is killed and implicit-defs 3102 // are added for sub-registers that remain live after this instruction. 3103 if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) { 3104 ErrInfo = "missing implicit register operands"; 3105 return false; 3106 } 3107 3108 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 3109 if (IsDst) { 3110 if (!Dst->isUse()) { 3111 ErrInfo = "v_movreld_b32 vdst should be a use operand"; 3112 return false; 3113 } 3114 3115 unsigned UseOpIdx; 3116 if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) || 3117 UseOpIdx != StaticNumOps + 1) { 3118 ErrInfo = "movrel implicit operands should be tied"; 3119 return false; 3120 } 3121 } 3122 3123 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 3124 const MachineOperand &ImpUse 3125 = MI.getOperand(StaticNumOps + NumImplicitOps - 1); 3126 if (!ImpUse.isReg() || !ImpUse.isUse() || 3127 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) { 3128 ErrInfo = "src0 should be subreg of implicit vector use"; 3129 return false; 3130 } 3131 } 3132 3133 // Make sure we aren't losing exec uses in the td files. This mostly requires 3134 // being careful when using let Uses to try to add other use registers. 3135 if (shouldReadExec(MI)) { 3136 if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) { 3137 ErrInfo = "VALU instruction does not implicitly read exec mask"; 3138 return false; 3139 } 3140 } 3141 3142 if (isSMRD(MI)) { 3143 if (MI.mayStore()) { 3144 // The register offset form of scalar stores may only use m0 as the 3145 // soffset register. 3146 const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff); 3147 if (Soff && Soff->getReg() != AMDGPU::M0) { 3148 ErrInfo = "scalar stores must use m0 as offset register"; 3149 return false; 3150 } 3151 } 3152 } 3153 3154 if (isFLAT(MI) && !MF->getSubtarget<GCNSubtarget>().hasFlatInstOffsets()) { 3155 const MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 3156 if (Offset->getImm() != 0) { 3157 ErrInfo = "subtarget does not support offsets in flat instructions"; 3158 return false; 3159 } 3160 } 3161 3162 const MachineOperand *DppCt = getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl); 3163 if (DppCt) { 3164 using namespace AMDGPU::DPP; 3165 3166 unsigned DC = DppCt->getImm(); 3167 if (DC == DppCtrl::DPP_UNUSED1 || DC == DppCtrl::DPP_UNUSED2 || 3168 DC == DppCtrl::DPP_UNUSED3 || DC > DppCtrl::DPP_LAST || 3169 (DC >= DppCtrl::DPP_UNUSED4_FIRST && DC <= DppCtrl::DPP_UNUSED4_LAST) || 3170 (DC >= DppCtrl::DPP_UNUSED5_FIRST && DC <= DppCtrl::DPP_UNUSED5_LAST) || 3171 (DC >= DppCtrl::DPP_UNUSED6_FIRST && DC <= DppCtrl::DPP_UNUSED6_LAST) || 3172 (DC >= DppCtrl::DPP_UNUSED7_FIRST && DC <= DppCtrl::DPP_UNUSED7_LAST)) { 3173 ErrInfo = "Invalid dpp_ctrl value"; 3174 return false; 3175 } 3176 } 3177 3178 return true; 3179 } 3180 3181 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) const { 3182 switch (MI.getOpcode()) { 3183 default: return AMDGPU::INSTRUCTION_LIST_END; 3184 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE; 3185 case AMDGPU::COPY: return AMDGPU::COPY; 3186 case AMDGPU::PHI: return AMDGPU::PHI; 3187 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG; 3188 case AMDGPU::WQM: return AMDGPU::WQM; 3189 case AMDGPU::WWM: return AMDGPU::WWM; 3190 case AMDGPU::S_MOV_B32: 3191 return MI.getOperand(1).isReg() ? 3192 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32; 3193 case AMDGPU::S_ADD_I32: 3194 return ST.hasAddNoCarry() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_I32_e32; 3195 case AMDGPU::S_ADDC_U32: 3196 return AMDGPU::V_ADDC_U32_e32; 3197 case AMDGPU::S_SUB_I32: 3198 return ST.hasAddNoCarry() ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_I32_e32; 3199 // FIXME: These are not consistently handled, and selected when the carry is 3200 // used. 3201 case AMDGPU::S_ADD_U32: 3202 return AMDGPU::V_ADD_I32_e32; 3203 case AMDGPU::S_SUB_U32: 3204 return AMDGPU::V_SUB_I32_e32; 3205 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32; 3206 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_I32; 3207 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64; 3208 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64; 3209 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64; 3210 case AMDGPU::S_XNOR_B32: 3211 return ST.hasDLInsts() ? AMDGPU::V_XNOR_B32_e64 : AMDGPU::INSTRUCTION_LIST_END; 3212 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64; 3213 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64; 3214 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64; 3215 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64; 3216 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32; 3217 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64; 3218 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32; 3219 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64; 3220 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32; 3221 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64; 3222 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32; 3223 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32; 3224 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32; 3225 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32; 3226 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64; 3227 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32; 3228 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32; 3229 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32; 3230 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e32; 3231 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e32; 3232 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e32; 3233 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e32; 3234 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e32; 3235 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e32; 3236 case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e32; 3237 case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e32; 3238 case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e32; 3239 case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e32; 3240 case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e32; 3241 case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e32; 3242 case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e32; 3243 case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e32; 3244 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64; 3245 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32; 3246 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32; 3247 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64; 3248 case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ; 3249 case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ; 3250 } 3251 } 3252 3253 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI, 3254 unsigned OpNo) const { 3255 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 3256 const MCInstrDesc &Desc = get(MI.getOpcode()); 3257 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() || 3258 Desc.OpInfo[OpNo].RegClass == -1) { 3259 unsigned Reg = MI.getOperand(OpNo).getReg(); 3260 3261 if (TargetRegisterInfo::isVirtualRegister(Reg)) 3262 return MRI.getRegClass(Reg); 3263 return RI.getPhysRegClass(Reg); 3264 } 3265 3266 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 3267 return RI.getRegClass(RCID); 3268 } 3269 3270 void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const { 3271 MachineBasicBlock::iterator I = MI; 3272 MachineBasicBlock *MBB = MI.getParent(); 3273 MachineOperand &MO = MI.getOperand(OpIdx); 3274 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 3275 unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass; 3276 const TargetRegisterClass *RC = RI.getRegClass(RCID); 3277 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 3278 if (MO.isReg()) 3279 Opcode = AMDGPU::COPY; 3280 else if (RI.isSGPRClass(RC)) 3281 Opcode = AMDGPU::S_MOV_B32; 3282 3283 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); 3284 if (RI.getCommonSubClass(&AMDGPU::VReg_64RegClass, VRC)) 3285 VRC = &AMDGPU::VReg_64RegClass; 3286 else 3287 VRC = &AMDGPU::VGPR_32RegClass; 3288 3289 unsigned Reg = MRI.createVirtualRegister(VRC); 3290 DebugLoc DL = MBB->findDebugLoc(I); 3291 BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO); 3292 MO.ChangeToRegister(Reg, false); 3293 } 3294 3295 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI, 3296 MachineRegisterInfo &MRI, 3297 MachineOperand &SuperReg, 3298 const TargetRegisterClass *SuperRC, 3299 unsigned SubIdx, 3300 const TargetRegisterClass *SubRC) 3301 const { 3302 MachineBasicBlock *MBB = MI->getParent(); 3303 DebugLoc DL = MI->getDebugLoc(); 3304 unsigned SubReg = MRI.createVirtualRegister(SubRC); 3305 3306 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) { 3307 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 3308 .addReg(SuperReg.getReg(), 0, SubIdx); 3309 return SubReg; 3310 } 3311 3312 // Just in case the super register is itself a sub-register, copy it to a new 3313 // value so we don't need to worry about merging its subreg index with the 3314 // SubIdx passed to this function. The register coalescer should be able to 3315 // eliminate this extra copy. 3316 unsigned NewSuperReg = MRI.createVirtualRegister(SuperRC); 3317 3318 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg) 3319 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg()); 3320 3321 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 3322 .addReg(NewSuperReg, 0, SubIdx); 3323 3324 return SubReg; 3325 } 3326 3327 MachineOperand SIInstrInfo::buildExtractSubRegOrImm( 3328 MachineBasicBlock::iterator MII, 3329 MachineRegisterInfo &MRI, 3330 MachineOperand &Op, 3331 const TargetRegisterClass *SuperRC, 3332 unsigned SubIdx, 3333 const TargetRegisterClass *SubRC) const { 3334 if (Op.isImm()) { 3335 if (SubIdx == AMDGPU::sub0) 3336 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm())); 3337 if (SubIdx == AMDGPU::sub1) 3338 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32)); 3339 3340 llvm_unreachable("Unhandled register index for immediate"); 3341 } 3342 3343 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC, 3344 SubIdx, SubRC); 3345 return MachineOperand::CreateReg(SubReg, false); 3346 } 3347 3348 // Change the order of operands from (0, 1, 2) to (0, 2, 1) 3349 void SIInstrInfo::swapOperands(MachineInstr &Inst) const { 3350 assert(Inst.getNumExplicitOperands() == 3); 3351 MachineOperand Op1 = Inst.getOperand(1); 3352 Inst.RemoveOperand(1); 3353 Inst.addOperand(Op1); 3354 } 3355 3356 bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI, 3357 const MCOperandInfo &OpInfo, 3358 const MachineOperand &MO) const { 3359 if (!MO.isReg()) 3360 return false; 3361 3362 unsigned Reg = MO.getReg(); 3363 const TargetRegisterClass *RC = 3364 TargetRegisterInfo::isVirtualRegister(Reg) ? 3365 MRI.getRegClass(Reg) : 3366 RI.getPhysRegClass(Reg); 3367 3368 const SIRegisterInfo *TRI = 3369 static_cast<const SIRegisterInfo*>(MRI.getTargetRegisterInfo()); 3370 RC = TRI->getSubRegClass(RC, MO.getSubReg()); 3371 3372 // In order to be legal, the common sub-class must be equal to the 3373 // class of the current operand. For example: 3374 // 3375 // v_mov_b32 s0 ; Operand defined as vsrc_b32 3376 // ; RI.getCommonSubClass(s0,vsrc_b32) = sgpr ; LEGAL 3377 // 3378 // s_sendmsg 0, s0 ; Operand defined as m0reg 3379 // ; RI.getCommonSubClass(s0,m0reg) = m0reg ; NOT LEGAL 3380 3381 return RI.getCommonSubClass(RC, RI.getRegClass(OpInfo.RegClass)) == RC; 3382 } 3383 3384 bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI, 3385 const MCOperandInfo &OpInfo, 3386 const MachineOperand &MO) const { 3387 if (MO.isReg()) 3388 return isLegalRegOperand(MRI, OpInfo, MO); 3389 3390 // Handle non-register types that are treated like immediates. 3391 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI()); 3392 return true; 3393 } 3394 3395 bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx, 3396 const MachineOperand *MO) const { 3397 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 3398 const MCInstrDesc &InstDesc = MI.getDesc(); 3399 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; 3400 const TargetRegisterClass *DefinedRC = 3401 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; 3402 if (!MO) 3403 MO = &MI.getOperand(OpIdx); 3404 3405 if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) { 3406 3407 RegSubRegPair SGPRUsed; 3408 if (MO->isReg()) 3409 SGPRUsed = RegSubRegPair(MO->getReg(), MO->getSubReg()); 3410 3411 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 3412 if (i == OpIdx) 3413 continue; 3414 const MachineOperand &Op = MI.getOperand(i); 3415 if (Op.isReg()) { 3416 if ((Op.getReg() != SGPRUsed.Reg || Op.getSubReg() != SGPRUsed.SubReg) && 3417 usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) { 3418 return false; 3419 } 3420 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) { 3421 return false; 3422 } 3423 } 3424 } 3425 3426 if (MO->isReg()) { 3427 assert(DefinedRC); 3428 return isLegalRegOperand(MRI, OpInfo, *MO); 3429 } 3430 3431 // Handle non-register types that are treated like immediates. 3432 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI()); 3433 3434 if (!DefinedRC) { 3435 // This operand expects an immediate. 3436 return true; 3437 } 3438 3439 return isImmOperandLegal(MI, OpIdx, *MO); 3440 } 3441 3442 void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI, 3443 MachineInstr &MI) const { 3444 unsigned Opc = MI.getOpcode(); 3445 const MCInstrDesc &InstrDesc = get(Opc); 3446 3447 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 3448 MachineOperand &Src1 = MI.getOperand(Src1Idx); 3449 3450 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32 3451 // we need to only have one constant bus use. 3452 // 3453 // Note we do not need to worry about literal constants here. They are 3454 // disabled for the operand type for instructions because they will always 3455 // violate the one constant bus use rule. 3456 bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister; 3457 if (HasImplicitSGPR) { 3458 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 3459 MachineOperand &Src0 = MI.getOperand(Src0Idx); 3460 3461 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) 3462 legalizeOpWithMove(MI, Src0Idx); 3463 } 3464 3465 // Special case: V_WRITELANE_B32 accepts only immediate or SGPR operands for 3466 // both the value to write (src0) and lane select (src1). Fix up non-SGPR 3467 // src0/src1 with V_READFIRSTLANE. 3468 if (Opc == AMDGPU::V_WRITELANE_B32) { 3469 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 3470 MachineOperand &Src0 = MI.getOperand(Src0Idx); 3471 const DebugLoc &DL = MI.getDebugLoc(); 3472 if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) { 3473 unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 3474 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 3475 .add(Src0); 3476 Src0.ChangeToRegister(Reg, false); 3477 } 3478 if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) { 3479 unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 3480 const DebugLoc &DL = MI.getDebugLoc(); 3481 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 3482 .add(Src1); 3483 Src1.ChangeToRegister(Reg, false); 3484 } 3485 return; 3486 } 3487 3488 // VOP2 src0 instructions support all operand types, so we don't need to check 3489 // their legality. If src1 is already legal, we don't need to do anything. 3490 if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1)) 3491 return; 3492 3493 // Special case: V_READLANE_B32 accepts only immediate or SGPR operands for 3494 // lane select. Fix up using V_READFIRSTLANE, since we assume that the lane 3495 // select is uniform. 3496 if (Opc == AMDGPU::V_READLANE_B32 && Src1.isReg() && 3497 RI.isVGPR(MRI, Src1.getReg())) { 3498 unsigned Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 3499 const DebugLoc &DL = MI.getDebugLoc(); 3500 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 3501 .add(Src1); 3502 Src1.ChangeToRegister(Reg, false); 3503 return; 3504 } 3505 3506 // We do not use commuteInstruction here because it is too aggressive and will 3507 // commute if it is possible. We only want to commute here if it improves 3508 // legality. This can be called a fairly large number of times so don't waste 3509 // compile time pointlessly swapping and checking legality again. 3510 if (HasImplicitSGPR || !MI.isCommutable()) { 3511 legalizeOpWithMove(MI, Src1Idx); 3512 return; 3513 } 3514 3515 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 3516 MachineOperand &Src0 = MI.getOperand(Src0Idx); 3517 3518 // If src0 can be used as src1, commuting will make the operands legal. 3519 // Otherwise we have to give up and insert a move. 3520 // 3521 // TODO: Other immediate-like operand kinds could be commuted if there was a 3522 // MachineOperand::ChangeTo* for them. 3523 if ((!Src1.isImm() && !Src1.isReg()) || 3524 !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) { 3525 legalizeOpWithMove(MI, Src1Idx); 3526 return; 3527 } 3528 3529 int CommutedOpc = commuteOpcode(MI); 3530 if (CommutedOpc == -1) { 3531 legalizeOpWithMove(MI, Src1Idx); 3532 return; 3533 } 3534 3535 MI.setDesc(get(CommutedOpc)); 3536 3537 unsigned Src0Reg = Src0.getReg(); 3538 unsigned Src0SubReg = Src0.getSubReg(); 3539 bool Src0Kill = Src0.isKill(); 3540 3541 if (Src1.isImm()) 3542 Src0.ChangeToImmediate(Src1.getImm()); 3543 else if (Src1.isReg()) { 3544 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill()); 3545 Src0.setSubReg(Src1.getSubReg()); 3546 } else 3547 llvm_unreachable("Should only have register or immediate operands"); 3548 3549 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill); 3550 Src1.setSubReg(Src0SubReg); 3551 } 3552 3553 // Legalize VOP3 operands. Because all operand types are supported for any 3554 // operand, and since literal constants are not allowed and should never be 3555 // seen, we only need to worry about inserting copies if we use multiple SGPR 3556 // operands. 3557 void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI, 3558 MachineInstr &MI) const { 3559 unsigned Opc = MI.getOpcode(); 3560 3561 int VOP3Idx[3] = { 3562 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), 3563 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1), 3564 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2) 3565 }; 3566 3567 // Find the one SGPR operand we are allowed to use. 3568 unsigned SGPRReg = findUsedSGPR(MI, VOP3Idx); 3569 3570 for (unsigned i = 0; i < 3; ++i) { 3571 int Idx = VOP3Idx[i]; 3572 if (Idx == -1) 3573 break; 3574 MachineOperand &MO = MI.getOperand(Idx); 3575 3576 // We should never see a VOP3 instruction with an illegal immediate operand. 3577 if (!MO.isReg()) 3578 continue; 3579 3580 if (!RI.isSGPRClass(MRI.getRegClass(MO.getReg()))) 3581 continue; // VGPRs are legal 3582 3583 if (SGPRReg == AMDGPU::NoRegister || SGPRReg == MO.getReg()) { 3584 SGPRReg = MO.getReg(); 3585 // We can use one SGPR in each VOP3 instruction. 3586 continue; 3587 } 3588 3589 // If we make it this far, then the operand is not legal and we must 3590 // legalize it. 3591 legalizeOpWithMove(MI, Idx); 3592 } 3593 } 3594 3595 unsigned SIInstrInfo::readlaneVGPRToSGPR(unsigned SrcReg, MachineInstr &UseMI, 3596 MachineRegisterInfo &MRI) const { 3597 const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg); 3598 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC); 3599 unsigned DstReg = MRI.createVirtualRegister(SRC); 3600 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32; 3601 3602 if (SubRegs == 1) { 3603 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 3604 get(AMDGPU::V_READFIRSTLANE_B32), DstReg) 3605 .addReg(SrcReg); 3606 return DstReg; 3607 } 3608 3609 SmallVector<unsigned, 8> SRegs; 3610 for (unsigned i = 0; i < SubRegs; ++i) { 3611 unsigned SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 3612 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 3613 get(AMDGPU::V_READFIRSTLANE_B32), SGPR) 3614 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i)); 3615 SRegs.push_back(SGPR); 3616 } 3617 3618 MachineInstrBuilder MIB = 3619 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 3620 get(AMDGPU::REG_SEQUENCE), DstReg); 3621 for (unsigned i = 0; i < SubRegs; ++i) { 3622 MIB.addReg(SRegs[i]); 3623 MIB.addImm(RI.getSubRegFromChannel(i)); 3624 } 3625 return DstReg; 3626 } 3627 3628 void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI, 3629 MachineInstr &MI) const { 3630 3631 // If the pointer is store in VGPRs, then we need to move them to 3632 // SGPRs using v_readfirstlane. This is safe because we only select 3633 // loads with uniform pointers to SMRD instruction so we know the 3634 // pointer value is uniform. 3635 MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase); 3636 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) { 3637 unsigned SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI); 3638 SBase->setReg(SGPR); 3639 } 3640 MachineOperand *SOff = getNamedOperand(MI, AMDGPU::OpName::soff); 3641 if (SOff && !RI.isSGPRClass(MRI.getRegClass(SOff->getReg()))) { 3642 unsigned SGPR = readlaneVGPRToSGPR(SOff->getReg(), MI, MRI); 3643 SOff->setReg(SGPR); 3644 } 3645 } 3646 3647 void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB, 3648 MachineBasicBlock::iterator I, 3649 const TargetRegisterClass *DstRC, 3650 MachineOperand &Op, 3651 MachineRegisterInfo &MRI, 3652 const DebugLoc &DL) const { 3653 unsigned OpReg = Op.getReg(); 3654 unsigned OpSubReg = Op.getSubReg(); 3655 3656 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg( 3657 RI.getRegClassForReg(MRI, OpReg), OpSubReg); 3658 3659 // Check if operand is already the correct register class. 3660 if (DstRC == OpRC) 3661 return; 3662 3663 unsigned DstReg = MRI.createVirtualRegister(DstRC); 3664 MachineInstr *Copy = 3665 BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op); 3666 3667 Op.setReg(DstReg); 3668 Op.setSubReg(0); 3669 3670 MachineInstr *Def = MRI.getVRegDef(OpReg); 3671 if (!Def) 3672 return; 3673 3674 // Try to eliminate the copy if it is copying an immediate value. 3675 if (Def->isMoveImmediate()) 3676 FoldImmediate(*Copy, *Def, OpReg, &MRI); 3677 } 3678 3679 // Emit the actual waterfall loop, executing the wrapped instruction for each 3680 // unique value of \p Rsrc across all lanes. In the best case we execute 1 3681 // iteration, in the worst case we execute 64 (once per lane). 3682 static void 3683 emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI, 3684 MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB, 3685 const DebugLoc &DL, MachineOperand &Rsrc) { 3686 MachineBasicBlock::iterator I = LoopBB.begin(); 3687 3688 unsigned VRsrc = Rsrc.getReg(); 3689 unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef()); 3690 3691 unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 3692 unsigned CondReg0 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 3693 unsigned CondReg1 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 3694 unsigned AndCond = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 3695 unsigned SRsrcSub0 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 3696 unsigned SRsrcSub1 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 3697 unsigned SRsrcSub2 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 3698 unsigned SRsrcSub3 = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 3699 unsigned SRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass); 3700 3701 // Beginning of the loop, read the next Rsrc variant. 3702 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub0) 3703 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub0); 3704 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub1) 3705 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub1); 3706 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub2) 3707 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub2); 3708 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), SRsrcSub3) 3709 .addReg(VRsrc, VRsrcUndef, AMDGPU::sub3); 3710 3711 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), SRsrc) 3712 .addReg(SRsrcSub0) 3713 .addImm(AMDGPU::sub0) 3714 .addReg(SRsrcSub1) 3715 .addImm(AMDGPU::sub1) 3716 .addReg(SRsrcSub2) 3717 .addImm(AMDGPU::sub2) 3718 .addReg(SRsrcSub3) 3719 .addImm(AMDGPU::sub3); 3720 3721 // Update Rsrc operand to use the SGPR Rsrc. 3722 Rsrc.setReg(SRsrc); 3723 Rsrc.setIsKill(true); 3724 3725 // Identify all lanes with identical Rsrc operands in their VGPRs. 3726 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), CondReg0) 3727 .addReg(SRsrc, 0, AMDGPU::sub0_sub1) 3728 .addReg(VRsrc, 0, AMDGPU::sub0_sub1); 3729 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), CondReg1) 3730 .addReg(SRsrc, 0, AMDGPU::sub2_sub3) 3731 .addReg(VRsrc, 0, AMDGPU::sub2_sub3); 3732 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_AND_B64), AndCond) 3733 .addReg(CondReg0) 3734 .addReg(CondReg1); 3735 3736 MRI.setSimpleHint(SaveExec, AndCond); 3737 3738 // Update EXEC to matching lanes, saving original to SaveExec. 3739 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_AND_SAVEEXEC_B64), SaveExec) 3740 .addReg(AndCond, RegState::Kill); 3741 3742 // The original instruction is here; we insert the terminators after it. 3743 I = LoopBB.end(); 3744 3745 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 3746 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_XOR_B64_term), AMDGPU::EXEC) 3747 .addReg(AMDGPU::EXEC) 3748 .addReg(SaveExec); 3749 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::S_CBRANCH_EXECNZ)).addMBB(&LoopBB); 3750 } 3751 3752 // Build a waterfall loop around \p MI, replacing the VGPR \p Rsrc register 3753 // with SGPRs by iterating over all unique values across all lanes. 3754 static void loadSRsrcFromVGPR(const SIInstrInfo &TII, MachineInstr &MI, 3755 MachineOperand &Rsrc, MachineDominatorTree *MDT) { 3756 MachineBasicBlock &MBB = *MI.getParent(); 3757 MachineFunction &MF = *MBB.getParent(); 3758 MachineRegisterInfo &MRI = MF.getRegInfo(); 3759 MachineBasicBlock::iterator I(&MI); 3760 const DebugLoc &DL = MI.getDebugLoc(); 3761 3762 unsigned SaveExec = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 3763 3764 // Save the EXEC mask 3765 BuildMI(MBB, I, DL, TII.get(AMDGPU::S_MOV_B64), SaveExec) 3766 .addReg(AMDGPU::EXEC); 3767 3768 // Killed uses in the instruction we are waterfalling around will be 3769 // incorrect due to the added control-flow. 3770 for (auto &MO : MI.uses()) { 3771 if (MO.isReg() && MO.isUse()) { 3772 MRI.clearKillFlags(MO.getReg()); 3773 } 3774 } 3775 3776 // To insert the loop we need to split the block. Move everything after this 3777 // point to a new block, and insert a new empty block between the two. 3778 MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock(); 3779 MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock(); 3780 MachineFunction::iterator MBBI(MBB); 3781 ++MBBI; 3782 3783 MF.insert(MBBI, LoopBB); 3784 MF.insert(MBBI, RemainderBB); 3785 3786 LoopBB->addSuccessor(LoopBB); 3787 LoopBB->addSuccessor(RemainderBB); 3788 3789 // Move MI to the LoopBB, and the remainder of the block to RemainderBB. 3790 MachineBasicBlock::iterator J = I++; 3791 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 3792 RemainderBB->splice(RemainderBB->begin(), &MBB, I, MBB.end()); 3793 LoopBB->splice(LoopBB->begin(), &MBB, J); 3794 3795 MBB.addSuccessor(LoopBB); 3796 3797 // Update dominators. We know that MBB immediately dominates LoopBB, that 3798 // LoopBB immediately dominates RemainderBB, and that RemainderBB immediately 3799 // dominates all of the successors transferred to it from MBB that MBB used 3800 // to dominate. 3801 if (MDT) { 3802 MDT->addNewBlock(LoopBB, &MBB); 3803 MDT->addNewBlock(RemainderBB, LoopBB); 3804 for (auto &Succ : RemainderBB->successors()) { 3805 if (MDT->dominates(&MBB, Succ)) { 3806 MDT->changeImmediateDominator(Succ, RemainderBB); 3807 } 3808 } 3809 } 3810 3811 emitLoadSRsrcFromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, Rsrc); 3812 3813 // Restore the EXEC mask 3814 MachineBasicBlock::iterator First = RemainderBB->begin(); 3815 BuildMI(*RemainderBB, First, DL, TII.get(AMDGPU::S_MOV_B64), AMDGPU::EXEC) 3816 .addReg(SaveExec); 3817 } 3818 3819 // Extract pointer from Rsrc and return a zero-value Rsrc replacement. 3820 static std::tuple<unsigned, unsigned> 3821 extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc) { 3822 MachineBasicBlock &MBB = *MI.getParent(); 3823 MachineFunction &MF = *MBB.getParent(); 3824 MachineRegisterInfo &MRI = MF.getRegInfo(); 3825 3826 // Extract the ptr from the resource descriptor. 3827 unsigned RsrcPtr = 3828 TII.buildExtractSubReg(MI, MRI, Rsrc, &AMDGPU::VReg_128RegClass, 3829 AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass); 3830 3831 // Create an empty resource descriptor 3832 unsigned Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 3833 unsigned SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 3834 unsigned SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 3835 unsigned NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SReg_128RegClass); 3836 uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat(); 3837 3838 // Zero64 = 0 3839 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B64), Zero64) 3840 .addImm(0); 3841 3842 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0} 3843 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatLo) 3844 .addImm(RsrcDataFormat & 0xFFFFFFFF); 3845 3846 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32} 3847 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatHi) 3848 .addImm(RsrcDataFormat >> 32); 3849 3850 // NewSRsrc = {Zero64, SRsrcFormat} 3851 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::REG_SEQUENCE), NewSRsrc) 3852 .addReg(Zero64) 3853 .addImm(AMDGPU::sub0_sub1) 3854 .addReg(SRsrcFormatLo) 3855 .addImm(AMDGPU::sub2) 3856 .addReg(SRsrcFormatHi) 3857 .addImm(AMDGPU::sub3); 3858 3859 return std::make_tuple(RsrcPtr, NewSRsrc); 3860 } 3861 3862 void SIInstrInfo::legalizeOperands(MachineInstr &MI, 3863 MachineDominatorTree *MDT) const { 3864 MachineFunction &MF = *MI.getParent()->getParent(); 3865 MachineRegisterInfo &MRI = MF.getRegInfo(); 3866 3867 // Legalize VOP2 3868 if (isVOP2(MI) || isVOPC(MI)) { 3869 legalizeOperandsVOP2(MRI, MI); 3870 return; 3871 } 3872 3873 // Legalize VOP3 3874 if (isVOP3(MI)) { 3875 legalizeOperandsVOP3(MRI, MI); 3876 return; 3877 } 3878 3879 // Legalize SMRD 3880 if (isSMRD(MI)) { 3881 legalizeOperandsSMRD(MRI, MI); 3882 return; 3883 } 3884 3885 // Legalize REG_SEQUENCE and PHI 3886 // The register class of the operands much be the same type as the register 3887 // class of the output. 3888 if (MI.getOpcode() == AMDGPU::PHI) { 3889 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr; 3890 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) { 3891 if (!MI.getOperand(i).isReg() || 3892 !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg())) 3893 continue; 3894 const TargetRegisterClass *OpRC = 3895 MRI.getRegClass(MI.getOperand(i).getReg()); 3896 if (RI.hasVGPRs(OpRC)) { 3897 VRC = OpRC; 3898 } else { 3899 SRC = OpRC; 3900 } 3901 } 3902 3903 // If any of the operands are VGPR registers, then they all most be 3904 // otherwise we will create illegal VGPR->SGPR copies when legalizing 3905 // them. 3906 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) { 3907 if (!VRC) { 3908 assert(SRC); 3909 VRC = RI.getEquivalentVGPRClass(SRC); 3910 } 3911 RC = VRC; 3912 } else { 3913 RC = SRC; 3914 } 3915 3916 // Update all the operands so they have the same type. 3917 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 3918 MachineOperand &Op = MI.getOperand(I); 3919 if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg())) 3920 continue; 3921 3922 // MI is a PHI instruction. 3923 MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB(); 3924 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator(); 3925 3926 // Avoid creating no-op copies with the same src and dst reg class. These 3927 // confuse some of the machine passes. 3928 legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc()); 3929 } 3930 } 3931 3932 // REG_SEQUENCE doesn't really require operand legalization, but if one has a 3933 // VGPR dest type and SGPR sources, insert copies so all operands are 3934 // VGPRs. This seems to help operand folding / the register coalescer. 3935 if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) { 3936 MachineBasicBlock *MBB = MI.getParent(); 3937 const TargetRegisterClass *DstRC = getOpRegClass(MI, 0); 3938 if (RI.hasVGPRs(DstRC)) { 3939 // Update all the operands so they are VGPR register classes. These may 3940 // not be the same register class because REG_SEQUENCE supports mixing 3941 // subregister index types e.g. sub0_sub1 + sub2 + sub3 3942 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 3943 MachineOperand &Op = MI.getOperand(I); 3944 if (!Op.isReg() || !TargetRegisterInfo::isVirtualRegister(Op.getReg())) 3945 continue; 3946 3947 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg()); 3948 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); 3949 if (VRC == OpRC) 3950 continue; 3951 3952 legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc()); 3953 Op.setIsKill(); 3954 } 3955 } 3956 3957 return; 3958 } 3959 3960 // Legalize INSERT_SUBREG 3961 // src0 must have the same register class as dst 3962 if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) { 3963 unsigned Dst = MI.getOperand(0).getReg(); 3964 unsigned Src0 = MI.getOperand(1).getReg(); 3965 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst); 3966 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0); 3967 if (DstRC != Src0RC) { 3968 MachineBasicBlock *MBB = MI.getParent(); 3969 MachineOperand &Op = MI.getOperand(1); 3970 legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc()); 3971 } 3972 return; 3973 } 3974 3975 // Legalize SI_INIT_M0 3976 if (MI.getOpcode() == AMDGPU::SI_INIT_M0) { 3977 MachineOperand &Src = MI.getOperand(0); 3978 if (Src.isReg() && RI.hasVGPRs(MRI.getRegClass(Src.getReg()))) 3979 Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI)); 3980 return; 3981 } 3982 3983 // Legalize MIMG and MUBUF/MTBUF for shaders. 3984 // 3985 // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via 3986 // scratch memory access. In both cases, the legalization never involves 3987 // conversion to the addr64 form. 3988 if (isMIMG(MI) || 3989 (AMDGPU::isShader(MF.getFunction().getCallingConv()) && 3990 (isMUBUF(MI) || isMTBUF(MI)))) { 3991 MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc); 3992 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) { 3993 unsigned SGPR = readlaneVGPRToSGPR(SRsrc->getReg(), MI, MRI); 3994 SRsrc->setReg(SGPR); 3995 } 3996 3997 MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp); 3998 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) { 3999 unsigned SGPR = readlaneVGPRToSGPR(SSamp->getReg(), MI, MRI); 4000 SSamp->setReg(SGPR); 4001 } 4002 return; 4003 } 4004 4005 // Legalize MUBUF* instructions. 4006 int RsrcIdx = 4007 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc); 4008 if (RsrcIdx != -1) { 4009 // We have an MUBUF instruction 4010 MachineOperand *Rsrc = &MI.getOperand(RsrcIdx); 4011 unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass; 4012 if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()), 4013 RI.getRegClass(RsrcRC))) { 4014 // The operands are legal. 4015 // FIXME: We may need to legalize operands besided srsrc. 4016 return; 4017 } 4018 4019 // Legalize a VGPR Rsrc. 4020 // 4021 // If the instruction is _ADDR64, we can avoid a waterfall by extracting 4022 // the base pointer from the VGPR Rsrc, adding it to the VAddr, then using 4023 // a zero-value SRsrc. 4024 // 4025 // If the instruction is _OFFSET (both idxen and offen disabled), and we 4026 // support ADDR64 instructions, we can convert to ADDR64 and do the same as 4027 // above. 4028 // 4029 // Otherwise we are on non-ADDR64 hardware, and/or we have 4030 // idxen/offen/bothen and we fall back to a waterfall loop. 4031 4032 MachineBasicBlock &MBB = *MI.getParent(); 4033 4034 MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 4035 if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) { 4036 // This is already an ADDR64 instruction so we need to add the pointer 4037 // extracted from the resource descriptor to the current value of VAddr. 4038 unsigned NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4039 unsigned NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4040 unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 4041 4042 unsigned RsrcPtr, NewSRsrc; 4043 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 4044 4045 // NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0 4046 DebugLoc DL = MI.getDebugLoc(); 4047 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_I32_e32), NewVAddrLo) 4048 .addReg(RsrcPtr, 0, AMDGPU::sub0) 4049 .addReg(VAddr->getReg(), 0, AMDGPU::sub0); 4050 4051 // NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1 4052 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e32), NewVAddrHi) 4053 .addReg(RsrcPtr, 0, AMDGPU::sub1) 4054 .addReg(VAddr->getReg(), 0, AMDGPU::sub1); 4055 4056 // NewVaddr = {NewVaddrHi, NewVaddrLo} 4057 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) 4058 .addReg(NewVAddrLo) 4059 .addImm(AMDGPU::sub0) 4060 .addReg(NewVAddrHi) 4061 .addImm(AMDGPU::sub1); 4062 4063 VAddr->setReg(NewVAddr); 4064 Rsrc->setReg(NewSRsrc); 4065 } else if (!VAddr && ST.hasAddr64()) { 4066 // This instructions is the _OFFSET variant, so we need to convert it to 4067 // ADDR64. 4068 assert(MBB.getParent()->getSubtarget<GCNSubtarget>().getGeneration() 4069 < AMDGPUSubtarget::VOLCANIC_ISLANDS && 4070 "FIXME: Need to emit flat atomics here"); 4071 4072 unsigned RsrcPtr, NewSRsrc; 4073 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 4074 4075 unsigned NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 4076 MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata); 4077 MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 4078 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset); 4079 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode()); 4080 4081 // Atomics rith return have have an additional tied operand and are 4082 // missing some of the special bits. 4083 MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in); 4084 MachineInstr *Addr64; 4085 4086 if (!VDataIn) { 4087 // Regular buffer load / store. 4088 MachineInstrBuilder MIB = 4089 BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 4090 .add(*VData) 4091 .addReg(NewVAddr) 4092 .addReg(NewSRsrc) 4093 .add(*SOffset) 4094 .add(*Offset); 4095 4096 // Atomics do not have this operand. 4097 if (const MachineOperand *GLC = 4098 getNamedOperand(MI, AMDGPU::OpName::glc)) { 4099 MIB.addImm(GLC->getImm()); 4100 } 4101 4102 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)); 4103 4104 if (const MachineOperand *TFE = 4105 getNamedOperand(MI, AMDGPU::OpName::tfe)) { 4106 MIB.addImm(TFE->getImm()); 4107 } 4108 4109 MIB.cloneMemRefs(MI); 4110 Addr64 = MIB; 4111 } else { 4112 // Atomics with return. 4113 Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 4114 .add(*VData) 4115 .add(*VDataIn) 4116 .addReg(NewVAddr) 4117 .addReg(NewSRsrc) 4118 .add(*SOffset) 4119 .add(*Offset) 4120 .addImm(getNamedImmOperand(MI, AMDGPU::OpName::slc)) 4121 .cloneMemRefs(MI); 4122 } 4123 4124 MI.removeFromParent(); 4125 4126 // NewVaddr = {NewVaddrHi, NewVaddrLo} 4127 BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), 4128 NewVAddr) 4129 .addReg(RsrcPtr, 0, AMDGPU::sub0) 4130 .addImm(AMDGPU::sub0) 4131 .addReg(RsrcPtr, 0, AMDGPU::sub1) 4132 .addImm(AMDGPU::sub1); 4133 } else { 4134 // This is another variant; legalize Rsrc with waterfall loop from VGPRs 4135 // to SGPRs. 4136 loadSRsrcFromVGPR(*this, MI, *Rsrc, MDT); 4137 } 4138 } 4139 } 4140 4141 void SIInstrInfo::moveToVALU(MachineInstr &TopInst, 4142 MachineDominatorTree *MDT) const { 4143 SetVectorType Worklist; 4144 Worklist.insert(&TopInst); 4145 4146 while (!Worklist.empty()) { 4147 MachineInstr &Inst = *Worklist.pop_back_val(); 4148 MachineBasicBlock *MBB = Inst.getParent(); 4149 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 4150 4151 unsigned Opcode = Inst.getOpcode(); 4152 unsigned NewOpcode = getVALUOp(Inst); 4153 4154 // Handle some special cases 4155 switch (Opcode) { 4156 default: 4157 break; 4158 case AMDGPU::S_ADD_U64_PSEUDO: 4159 case AMDGPU::S_SUB_U64_PSEUDO: 4160 splitScalar64BitAddSub(Worklist, Inst, MDT); 4161 Inst.eraseFromParent(); 4162 continue; 4163 case AMDGPU::S_ADD_I32: 4164 case AMDGPU::S_SUB_I32: 4165 // FIXME: The u32 versions currently selected use the carry. 4166 if (moveScalarAddSub(Worklist, Inst, MDT)) 4167 continue; 4168 4169 // Default handling 4170 break; 4171 case AMDGPU::S_AND_B64: 4172 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32, MDT); 4173 Inst.eraseFromParent(); 4174 continue; 4175 4176 case AMDGPU::S_OR_B64: 4177 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32, MDT); 4178 Inst.eraseFromParent(); 4179 continue; 4180 4181 case AMDGPU::S_XOR_B64: 4182 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32, MDT); 4183 Inst.eraseFromParent(); 4184 continue; 4185 4186 case AMDGPU::S_NAND_B64: 4187 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NAND_B32, MDT); 4188 Inst.eraseFromParent(); 4189 continue; 4190 4191 case AMDGPU::S_NOR_B64: 4192 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NOR_B32, MDT); 4193 Inst.eraseFromParent(); 4194 continue; 4195 4196 case AMDGPU::S_XNOR_B64: 4197 if (ST.hasDLInsts()) 4198 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT); 4199 else 4200 splitScalar64BitXnor(Worklist, Inst, MDT); 4201 Inst.eraseFromParent(); 4202 continue; 4203 4204 case AMDGPU::S_ANDN2_B64: 4205 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ANDN2_B32, MDT); 4206 Inst.eraseFromParent(); 4207 continue; 4208 4209 case AMDGPU::S_ORN2_B64: 4210 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ORN2_B32, MDT); 4211 Inst.eraseFromParent(); 4212 continue; 4213 4214 case AMDGPU::S_NOT_B64: 4215 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32); 4216 Inst.eraseFromParent(); 4217 continue; 4218 4219 case AMDGPU::S_BCNT1_I32_B64: 4220 splitScalar64BitBCNT(Worklist, Inst); 4221 Inst.eraseFromParent(); 4222 continue; 4223 4224 case AMDGPU::S_BFE_I64: 4225 splitScalar64BitBFE(Worklist, Inst); 4226 Inst.eraseFromParent(); 4227 continue; 4228 4229 case AMDGPU::S_LSHL_B32: 4230 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 4231 NewOpcode = AMDGPU::V_LSHLREV_B32_e64; 4232 swapOperands(Inst); 4233 } 4234 break; 4235 case AMDGPU::S_ASHR_I32: 4236 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 4237 NewOpcode = AMDGPU::V_ASHRREV_I32_e64; 4238 swapOperands(Inst); 4239 } 4240 break; 4241 case AMDGPU::S_LSHR_B32: 4242 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 4243 NewOpcode = AMDGPU::V_LSHRREV_B32_e64; 4244 swapOperands(Inst); 4245 } 4246 break; 4247 case AMDGPU::S_LSHL_B64: 4248 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 4249 NewOpcode = AMDGPU::V_LSHLREV_B64; 4250 swapOperands(Inst); 4251 } 4252 break; 4253 case AMDGPU::S_ASHR_I64: 4254 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 4255 NewOpcode = AMDGPU::V_ASHRREV_I64; 4256 swapOperands(Inst); 4257 } 4258 break; 4259 case AMDGPU::S_LSHR_B64: 4260 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 4261 NewOpcode = AMDGPU::V_LSHRREV_B64; 4262 swapOperands(Inst); 4263 } 4264 break; 4265 4266 case AMDGPU::S_ABS_I32: 4267 lowerScalarAbs(Worklist, Inst); 4268 Inst.eraseFromParent(); 4269 continue; 4270 4271 case AMDGPU::S_CBRANCH_SCC0: 4272 case AMDGPU::S_CBRANCH_SCC1: 4273 // Clear unused bits of vcc 4274 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(AMDGPU::S_AND_B64), 4275 AMDGPU::VCC) 4276 .addReg(AMDGPU::EXEC) 4277 .addReg(AMDGPU::VCC); 4278 break; 4279 4280 case AMDGPU::S_BFE_U64: 4281 case AMDGPU::S_BFM_B64: 4282 llvm_unreachable("Moving this op to VALU not implemented"); 4283 4284 case AMDGPU::S_PACK_LL_B32_B16: 4285 case AMDGPU::S_PACK_LH_B32_B16: 4286 case AMDGPU::S_PACK_HH_B32_B16: 4287 movePackToVALU(Worklist, MRI, Inst); 4288 Inst.eraseFromParent(); 4289 continue; 4290 4291 case AMDGPU::S_XNOR_B32: 4292 lowerScalarXnor(Worklist, Inst); 4293 Inst.eraseFromParent(); 4294 continue; 4295 4296 case AMDGPU::S_NAND_B32: 4297 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_AND_B32); 4298 Inst.eraseFromParent(); 4299 continue; 4300 4301 case AMDGPU::S_NOR_B32: 4302 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_OR_B32); 4303 Inst.eraseFromParent(); 4304 continue; 4305 4306 case AMDGPU::S_ANDN2_B32: 4307 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_AND_B32); 4308 Inst.eraseFromParent(); 4309 continue; 4310 4311 case AMDGPU::S_ORN2_B32: 4312 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_OR_B32); 4313 Inst.eraseFromParent(); 4314 continue; 4315 } 4316 4317 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) { 4318 // We cannot move this instruction to the VALU, so we should try to 4319 // legalize its operands instead. 4320 legalizeOperands(Inst, MDT); 4321 continue; 4322 } 4323 4324 // Use the new VALU Opcode. 4325 const MCInstrDesc &NewDesc = get(NewOpcode); 4326 Inst.setDesc(NewDesc); 4327 4328 // Remove any references to SCC. Vector instructions can't read from it, and 4329 // We're just about to add the implicit use / defs of VCC, and we don't want 4330 // both. 4331 for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) { 4332 MachineOperand &Op = Inst.getOperand(i); 4333 if (Op.isReg() && Op.getReg() == AMDGPU::SCC) { 4334 Inst.RemoveOperand(i); 4335 addSCCDefUsersToVALUWorklist(Inst, Worklist); 4336 } 4337 } 4338 4339 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) { 4340 // We are converting these to a BFE, so we need to add the missing 4341 // operands for the size and offset. 4342 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16; 4343 Inst.addOperand(MachineOperand::CreateImm(0)); 4344 Inst.addOperand(MachineOperand::CreateImm(Size)); 4345 4346 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) { 4347 // The VALU version adds the second operand to the result, so insert an 4348 // extra 0 operand. 4349 Inst.addOperand(MachineOperand::CreateImm(0)); 4350 } 4351 4352 Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent()); 4353 4354 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) { 4355 const MachineOperand &OffsetWidthOp = Inst.getOperand(2); 4356 // If we need to move this to VGPRs, we need to unpack the second operand 4357 // back into the 2 separate ones for bit offset and width. 4358 assert(OffsetWidthOp.isImm() && 4359 "Scalar BFE is only implemented for constant width and offset"); 4360 uint32_t Imm = OffsetWidthOp.getImm(); 4361 4362 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 4363 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 4364 Inst.RemoveOperand(2); // Remove old immediate. 4365 Inst.addOperand(MachineOperand::CreateImm(Offset)); 4366 Inst.addOperand(MachineOperand::CreateImm(BitWidth)); 4367 } 4368 4369 bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef(); 4370 unsigned NewDstReg = AMDGPU::NoRegister; 4371 if (HasDst) { 4372 unsigned DstReg = Inst.getOperand(0).getReg(); 4373 if (TargetRegisterInfo::isPhysicalRegister(DstReg)) 4374 continue; 4375 4376 // Update the destination register class. 4377 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst); 4378 if (!NewDstRC) 4379 continue; 4380 4381 if (Inst.isCopy() && 4382 TargetRegisterInfo::isVirtualRegister(Inst.getOperand(1).getReg()) && 4383 NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) { 4384 // Instead of creating a copy where src and dst are the same register 4385 // class, we just replace all uses of dst with src. These kinds of 4386 // copies interfere with the heuristics MachineSink uses to decide 4387 // whether or not to split a critical edge. Since the pass assumes 4388 // that copies will end up as machine instructions and not be 4389 // eliminated. 4390 addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist); 4391 MRI.replaceRegWith(DstReg, Inst.getOperand(1).getReg()); 4392 MRI.clearKillFlags(Inst.getOperand(1).getReg()); 4393 Inst.getOperand(0).setReg(DstReg); 4394 4395 // Make sure we don't leave around a dead VGPR->SGPR copy. Normally 4396 // these are deleted later, but at -O0 it would leave a suspicious 4397 // looking illegal copy of an undef register. 4398 for (unsigned I = Inst.getNumOperands() - 1; I != 0; --I) 4399 Inst.RemoveOperand(I); 4400 Inst.setDesc(get(AMDGPU::IMPLICIT_DEF)); 4401 continue; 4402 } 4403 4404 NewDstReg = MRI.createVirtualRegister(NewDstRC); 4405 MRI.replaceRegWith(DstReg, NewDstReg); 4406 } 4407 4408 // Legalize the operands 4409 legalizeOperands(Inst, MDT); 4410 4411 if (HasDst) 4412 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); 4413 } 4414 } 4415 4416 // Add/sub require special handling to deal with carry outs. 4417 bool SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst, 4418 MachineDominatorTree *MDT) const { 4419 if (ST.hasAddNoCarry()) { 4420 // Assume there is no user of scc since we don't select this in that case. 4421 // Since scc isn't used, it doesn't really matter if the i32 or u32 variant 4422 // is used. 4423 4424 MachineBasicBlock &MBB = *Inst.getParent(); 4425 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 4426 4427 unsigned OldDstReg = Inst.getOperand(0).getReg(); 4428 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4429 4430 unsigned Opc = Inst.getOpcode(); 4431 assert(Opc == AMDGPU::S_ADD_I32 || Opc == AMDGPU::S_SUB_I32); 4432 4433 unsigned NewOpc = Opc == AMDGPU::S_ADD_I32 ? 4434 AMDGPU::V_ADD_U32_e64 : AMDGPU::V_SUB_U32_e64; 4435 4436 assert(Inst.getOperand(3).getReg() == AMDGPU::SCC); 4437 Inst.RemoveOperand(3); 4438 4439 Inst.setDesc(get(NewOpc)); 4440 Inst.addImplicitDefUseOperands(*MBB.getParent()); 4441 MRI.replaceRegWith(OldDstReg, ResultReg); 4442 legalizeOperands(Inst, MDT); 4443 4444 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 4445 return true; 4446 } 4447 4448 return false; 4449 } 4450 4451 void SIInstrInfo::lowerScalarAbs(SetVectorType &Worklist, 4452 MachineInstr &Inst) const { 4453 MachineBasicBlock &MBB = *Inst.getParent(); 4454 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 4455 MachineBasicBlock::iterator MII = Inst; 4456 DebugLoc DL = Inst.getDebugLoc(); 4457 4458 MachineOperand &Dest = Inst.getOperand(0); 4459 MachineOperand &Src = Inst.getOperand(1); 4460 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4461 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4462 4463 unsigned SubOp = ST.hasAddNoCarry() ? 4464 AMDGPU::V_SUB_U32_e32 : AMDGPU::V_SUB_I32_e32; 4465 4466 BuildMI(MBB, MII, DL, get(SubOp), TmpReg) 4467 .addImm(0) 4468 .addReg(Src.getReg()); 4469 4470 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg) 4471 .addReg(Src.getReg()) 4472 .addReg(TmpReg); 4473 4474 MRI.replaceRegWith(Dest.getReg(), ResultReg); 4475 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 4476 } 4477 4478 void SIInstrInfo::lowerScalarXnor(SetVectorType &Worklist, 4479 MachineInstr &Inst) const { 4480 MachineBasicBlock &MBB = *Inst.getParent(); 4481 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 4482 MachineBasicBlock::iterator MII = Inst; 4483 const DebugLoc &DL = Inst.getDebugLoc(); 4484 4485 MachineOperand &Dest = Inst.getOperand(0); 4486 MachineOperand &Src0 = Inst.getOperand(1); 4487 MachineOperand &Src1 = Inst.getOperand(2); 4488 4489 if (ST.hasDLInsts()) { 4490 unsigned NewDest = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4491 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src0, MRI, DL); 4492 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src1, MRI, DL); 4493 4494 BuildMI(MBB, MII, DL, get(AMDGPU::V_XNOR_B32_e64), NewDest) 4495 .add(Src0) 4496 .add(Src1); 4497 4498 MRI.replaceRegWith(Dest.getReg(), NewDest); 4499 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 4500 } else { 4501 // Using the identity !(x ^ y) == (!x ^ y) == (x ^ !y), we can 4502 // invert either source and then perform the XOR. If either source is a 4503 // scalar register, then we can leave the inversion on the scalar unit to 4504 // acheive a better distrubution of scalar and vector instructions. 4505 bool Src0IsSGPR = Src0.isReg() && 4506 RI.isSGPRClass(MRI.getRegClass(Src0.getReg())); 4507 bool Src1IsSGPR = Src1.isReg() && 4508 RI.isSGPRClass(MRI.getRegClass(Src1.getReg())); 4509 MachineInstr *Not = nullptr; 4510 MachineInstr *Xor = nullptr; 4511 unsigned Temp = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4512 unsigned NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4513 4514 // Build a pair of scalar instructions and add them to the work list. 4515 // The next iteration over the work list will lower these to the vector 4516 // unit as necessary. 4517 if (Src0IsSGPR) { 4518 Not = BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp) 4519 .add(Src0); 4520 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 4521 .addReg(Temp) 4522 .add(Src1); 4523 } else if (Src1IsSGPR) { 4524 Not = BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp) 4525 .add(Src1); 4526 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 4527 .add(Src0) 4528 .addReg(Temp); 4529 } else { 4530 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), Temp) 4531 .add(Src0) 4532 .add(Src1); 4533 Not = BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest) 4534 .addReg(Temp); 4535 Worklist.insert(Not); 4536 } 4537 4538 MRI.replaceRegWith(Dest.getReg(), NewDest); 4539 4540 Worklist.insert(Xor); 4541 4542 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 4543 } 4544 } 4545 4546 void SIInstrInfo::splitScalarNotBinop(SetVectorType &Worklist, 4547 MachineInstr &Inst, 4548 unsigned Opcode) const { 4549 MachineBasicBlock &MBB = *Inst.getParent(); 4550 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 4551 MachineBasicBlock::iterator MII = Inst; 4552 const DebugLoc &DL = Inst.getDebugLoc(); 4553 4554 MachineOperand &Dest = Inst.getOperand(0); 4555 MachineOperand &Src0 = Inst.getOperand(1); 4556 MachineOperand &Src1 = Inst.getOperand(2); 4557 4558 unsigned NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4559 unsigned Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4560 4561 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), Interm) 4562 .add(Src0) 4563 .add(Src1); 4564 4565 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest) 4566 .addReg(Interm); 4567 4568 Worklist.insert(&Op); 4569 Worklist.insert(&Not); 4570 4571 MRI.replaceRegWith(Dest.getReg(), NewDest); 4572 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 4573 } 4574 4575 void SIInstrInfo::splitScalarBinOpN2(SetVectorType& Worklist, 4576 MachineInstr &Inst, 4577 unsigned Opcode) const { 4578 MachineBasicBlock &MBB = *Inst.getParent(); 4579 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 4580 MachineBasicBlock::iterator MII = Inst; 4581 const DebugLoc &DL = Inst.getDebugLoc(); 4582 4583 MachineOperand &Dest = Inst.getOperand(0); 4584 MachineOperand &Src0 = Inst.getOperand(1); 4585 MachineOperand &Src1 = Inst.getOperand(2); 4586 4587 unsigned NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4588 unsigned Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4589 4590 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Interm) 4591 .add(Src1); 4592 4593 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), NewDest) 4594 .add(Src0) 4595 .addReg(Interm); 4596 4597 Worklist.insert(&Not); 4598 Worklist.insert(&Op); 4599 4600 MRI.replaceRegWith(Dest.getReg(), NewDest); 4601 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 4602 } 4603 4604 void SIInstrInfo::splitScalar64BitUnaryOp( 4605 SetVectorType &Worklist, MachineInstr &Inst, 4606 unsigned Opcode) const { 4607 MachineBasicBlock &MBB = *Inst.getParent(); 4608 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 4609 4610 MachineOperand &Dest = Inst.getOperand(0); 4611 MachineOperand &Src0 = Inst.getOperand(1); 4612 DebugLoc DL = Inst.getDebugLoc(); 4613 4614 MachineBasicBlock::iterator MII = Inst; 4615 4616 const MCInstrDesc &InstDesc = get(Opcode); 4617 const TargetRegisterClass *Src0RC = Src0.isReg() ? 4618 MRI.getRegClass(Src0.getReg()) : 4619 &AMDGPU::SGPR_32RegClass; 4620 4621 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 4622 4623 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 4624 AMDGPU::sub0, Src0SubRC); 4625 4626 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 4627 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 4628 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 4629 4630 unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 4631 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0); 4632 4633 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 4634 AMDGPU::sub1, Src0SubRC); 4635 4636 unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 4637 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1); 4638 4639 unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); 4640 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 4641 .addReg(DestSub0) 4642 .addImm(AMDGPU::sub0) 4643 .addReg(DestSub1) 4644 .addImm(AMDGPU::sub1); 4645 4646 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 4647 4648 Worklist.insert(&LoHalf); 4649 Worklist.insert(&HiHalf); 4650 4651 // We don't need to legalizeOperands here because for a single operand, src0 4652 // will support any kind of input. 4653 4654 // Move all users of this moved value. 4655 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 4656 } 4657 4658 void SIInstrInfo::splitScalar64BitAddSub(SetVectorType &Worklist, 4659 MachineInstr &Inst, 4660 MachineDominatorTree *MDT) const { 4661 bool IsAdd = (Inst.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); 4662 4663 MachineBasicBlock &MBB = *Inst.getParent(); 4664 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 4665 4666 unsigned FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 4667 unsigned DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4668 unsigned DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4669 4670 unsigned CarryReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 4671 unsigned DeadCarryReg = MRI.createVirtualRegister(&AMDGPU::SReg_64_XEXECRegClass); 4672 4673 MachineOperand &Dest = Inst.getOperand(0); 4674 MachineOperand &Src0 = Inst.getOperand(1); 4675 MachineOperand &Src1 = Inst.getOperand(2); 4676 const DebugLoc &DL = Inst.getDebugLoc(); 4677 MachineBasicBlock::iterator MII = Inst; 4678 4679 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg()); 4680 const TargetRegisterClass *Src1RC = MRI.getRegClass(Src1.getReg()); 4681 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 4682 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 4683 4684 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 4685 AMDGPU::sub0, Src0SubRC); 4686 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 4687 AMDGPU::sub0, Src1SubRC); 4688 4689 4690 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 4691 AMDGPU::sub1, Src0SubRC); 4692 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 4693 AMDGPU::sub1, Src1SubRC); 4694 4695 unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_I32_e64 : AMDGPU::V_SUB_I32_e64; 4696 MachineInstr *LoHalf = 4697 BuildMI(MBB, MII, DL, get(LoOpc), DestSub0) 4698 .addReg(CarryReg, RegState::Define) 4699 .add(SrcReg0Sub0) 4700 .add(SrcReg1Sub0); 4701 4702 unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; 4703 MachineInstr *HiHalf = 4704 BuildMI(MBB, MII, DL, get(HiOpc), DestSub1) 4705 .addReg(DeadCarryReg, RegState::Define | RegState::Dead) 4706 .add(SrcReg0Sub1) 4707 .add(SrcReg1Sub1) 4708 .addReg(CarryReg, RegState::Kill); 4709 4710 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 4711 .addReg(DestSub0) 4712 .addImm(AMDGPU::sub0) 4713 .addReg(DestSub1) 4714 .addImm(AMDGPU::sub1); 4715 4716 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 4717 4718 // Try to legalize the operands in case we need to swap the order to keep it 4719 // valid. 4720 legalizeOperands(*LoHalf, MDT); 4721 legalizeOperands(*HiHalf, MDT); 4722 4723 // Move all users of this moved vlaue. 4724 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 4725 } 4726 4727 void SIInstrInfo::splitScalar64BitBinaryOp(SetVectorType &Worklist, 4728 MachineInstr &Inst, unsigned Opcode, 4729 MachineDominatorTree *MDT) const { 4730 MachineBasicBlock &MBB = *Inst.getParent(); 4731 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 4732 4733 MachineOperand &Dest = Inst.getOperand(0); 4734 MachineOperand &Src0 = Inst.getOperand(1); 4735 MachineOperand &Src1 = Inst.getOperand(2); 4736 DebugLoc DL = Inst.getDebugLoc(); 4737 4738 MachineBasicBlock::iterator MII = Inst; 4739 4740 const MCInstrDesc &InstDesc = get(Opcode); 4741 const TargetRegisterClass *Src0RC = Src0.isReg() ? 4742 MRI.getRegClass(Src0.getReg()) : 4743 &AMDGPU::SGPR_32RegClass; 4744 4745 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 4746 const TargetRegisterClass *Src1RC = Src1.isReg() ? 4747 MRI.getRegClass(Src1.getReg()) : 4748 &AMDGPU::SGPR_32RegClass; 4749 4750 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 4751 4752 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 4753 AMDGPU::sub0, Src0SubRC); 4754 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 4755 AMDGPU::sub0, Src1SubRC); 4756 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 4757 AMDGPU::sub1, Src0SubRC); 4758 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 4759 AMDGPU::sub1, Src1SubRC); 4760 4761 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 4762 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 4763 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 4764 4765 unsigned DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 4766 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0) 4767 .add(SrcReg0Sub0) 4768 .add(SrcReg1Sub0); 4769 4770 unsigned DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 4771 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1) 4772 .add(SrcReg0Sub1) 4773 .add(SrcReg1Sub1); 4774 4775 unsigned FullDestReg = MRI.createVirtualRegister(NewDestRC); 4776 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 4777 .addReg(DestSub0) 4778 .addImm(AMDGPU::sub0) 4779 .addReg(DestSub1) 4780 .addImm(AMDGPU::sub1); 4781 4782 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 4783 4784 Worklist.insert(&LoHalf); 4785 Worklist.insert(&HiHalf); 4786 4787 // Move all users of this moved vlaue. 4788 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 4789 } 4790 4791 void SIInstrInfo::splitScalar64BitXnor(SetVectorType &Worklist, 4792 MachineInstr &Inst, 4793 MachineDominatorTree *MDT) const { 4794 MachineBasicBlock &MBB = *Inst.getParent(); 4795 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 4796 4797 MachineOperand &Dest = Inst.getOperand(0); 4798 MachineOperand &Src0 = Inst.getOperand(1); 4799 MachineOperand &Src1 = Inst.getOperand(2); 4800 const DebugLoc &DL = Inst.getDebugLoc(); 4801 4802 MachineBasicBlock::iterator MII = Inst; 4803 4804 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 4805 4806 unsigned Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 4807 4808 MachineOperand* Op0; 4809 MachineOperand* Op1; 4810 4811 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) { 4812 Op0 = &Src0; 4813 Op1 = &Src1; 4814 } else { 4815 Op0 = &Src1; 4816 Op1 = &Src0; 4817 } 4818 4819 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B64), Interm) 4820 .add(*Op0); 4821 4822 unsigned NewDest = MRI.createVirtualRegister(DestRC); 4823 4824 MachineInstr &Xor = *BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B64), NewDest) 4825 .addReg(Interm) 4826 .add(*Op1); 4827 4828 MRI.replaceRegWith(Dest.getReg(), NewDest); 4829 4830 Worklist.insert(&Xor); 4831 } 4832 4833 void SIInstrInfo::splitScalar64BitBCNT( 4834 SetVectorType &Worklist, MachineInstr &Inst) const { 4835 MachineBasicBlock &MBB = *Inst.getParent(); 4836 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 4837 4838 MachineBasicBlock::iterator MII = Inst; 4839 const DebugLoc &DL = Inst.getDebugLoc(); 4840 4841 MachineOperand &Dest = Inst.getOperand(0); 4842 MachineOperand &Src = Inst.getOperand(1); 4843 4844 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64); 4845 const TargetRegisterClass *SrcRC = Src.isReg() ? 4846 MRI.getRegClass(Src.getReg()) : 4847 &AMDGPU::SGPR_32RegClass; 4848 4849 unsigned MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4850 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4851 4852 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); 4853 4854 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 4855 AMDGPU::sub0, SrcSubRC); 4856 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 4857 AMDGPU::sub1, SrcSubRC); 4858 4859 BuildMI(MBB, MII, DL, InstDesc, MidReg).add(SrcRegSub0).addImm(0); 4860 4861 BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg); 4862 4863 MRI.replaceRegWith(Dest.getReg(), ResultReg); 4864 4865 // We don't need to legalize operands here. src0 for etiher instruction can be 4866 // an SGPR, and the second input is unused or determined here. 4867 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 4868 } 4869 4870 void SIInstrInfo::splitScalar64BitBFE(SetVectorType &Worklist, 4871 MachineInstr &Inst) const { 4872 MachineBasicBlock &MBB = *Inst.getParent(); 4873 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 4874 MachineBasicBlock::iterator MII = Inst; 4875 const DebugLoc &DL = Inst.getDebugLoc(); 4876 4877 MachineOperand &Dest = Inst.getOperand(0); 4878 uint32_t Imm = Inst.getOperand(2).getImm(); 4879 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 4880 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 4881 4882 (void) Offset; 4883 4884 // Only sext_inreg cases handled. 4885 assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 && 4886 Offset == 0 && "Not implemented"); 4887 4888 if (BitWidth < 32) { 4889 unsigned MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4890 unsigned MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4891 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 4892 4893 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32), MidRegLo) 4894 .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0) 4895 .addImm(0) 4896 .addImm(BitWidth); 4897 4898 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi) 4899 .addImm(31) 4900 .addReg(MidRegLo); 4901 4902 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 4903 .addReg(MidRegLo) 4904 .addImm(AMDGPU::sub0) 4905 .addReg(MidRegHi) 4906 .addImm(AMDGPU::sub1); 4907 4908 MRI.replaceRegWith(Dest.getReg(), ResultReg); 4909 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 4910 return; 4911 } 4912 4913 MachineOperand &Src = Inst.getOperand(1); 4914 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4915 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 4916 4917 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg) 4918 .addImm(31) 4919 .addReg(Src.getReg(), 0, AMDGPU::sub0); 4920 4921 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 4922 .addReg(Src.getReg(), 0, AMDGPU::sub0) 4923 .addImm(AMDGPU::sub0) 4924 .addReg(TmpReg) 4925 .addImm(AMDGPU::sub1); 4926 4927 MRI.replaceRegWith(Dest.getReg(), ResultReg); 4928 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 4929 } 4930 4931 void SIInstrInfo::addUsersToMoveToVALUWorklist( 4932 unsigned DstReg, 4933 MachineRegisterInfo &MRI, 4934 SetVectorType &Worklist) const { 4935 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg), 4936 E = MRI.use_end(); I != E;) { 4937 MachineInstr &UseMI = *I->getParent(); 4938 4939 unsigned OpNo = 0; 4940 4941 switch (UseMI.getOpcode()) { 4942 case AMDGPU::COPY: 4943 case AMDGPU::WQM: 4944 case AMDGPU::WWM: 4945 case AMDGPU::REG_SEQUENCE: 4946 case AMDGPU::PHI: 4947 case AMDGPU::INSERT_SUBREG: 4948 break; 4949 default: 4950 OpNo = I.getOperandNo(); 4951 break; 4952 } 4953 4954 if (!RI.hasVGPRs(getOpRegClass(UseMI, OpNo))) { 4955 Worklist.insert(&UseMI); 4956 4957 do { 4958 ++I; 4959 } while (I != E && I->getParent() == &UseMI); 4960 } else { 4961 ++I; 4962 } 4963 } 4964 } 4965 4966 void SIInstrInfo::movePackToVALU(SetVectorType &Worklist, 4967 MachineRegisterInfo &MRI, 4968 MachineInstr &Inst) const { 4969 unsigned ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4970 MachineBasicBlock *MBB = Inst.getParent(); 4971 MachineOperand &Src0 = Inst.getOperand(1); 4972 MachineOperand &Src1 = Inst.getOperand(2); 4973 const DebugLoc &DL = Inst.getDebugLoc(); 4974 4975 switch (Inst.getOpcode()) { 4976 case AMDGPU::S_PACK_LL_B32_B16: { 4977 unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4978 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4979 4980 // FIXME: Can do a lot better if we know the high bits of src0 or src1 are 4981 // 0. 4982 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 4983 .addImm(0xffff); 4984 4985 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_B32_e64), TmpReg) 4986 .addReg(ImmReg, RegState::Kill) 4987 .add(Src0); 4988 4989 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHL_OR_B32), ResultReg) 4990 .add(Src1) 4991 .addImm(16) 4992 .addReg(TmpReg, RegState::Kill); 4993 break; 4994 } 4995 case AMDGPU::S_PACK_LH_B32_B16: { 4996 unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4997 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 4998 .addImm(0xffff); 4999 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_BFI_B32), ResultReg) 5000 .addReg(ImmReg, RegState::Kill) 5001 .add(Src0) 5002 .add(Src1); 5003 break; 5004 } 5005 case AMDGPU::S_PACK_HH_B32_B16: { 5006 unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5007 unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5008 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg) 5009 .addImm(16) 5010 .add(Src0); 5011 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 5012 .addImm(0xffff0000); 5013 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_OR_B32), ResultReg) 5014 .add(Src1) 5015 .addReg(ImmReg, RegState::Kill) 5016 .addReg(TmpReg, RegState::Kill); 5017 break; 5018 } 5019 default: 5020 llvm_unreachable("unhandled s_pack_* instruction"); 5021 } 5022 5023 MachineOperand &Dest = Inst.getOperand(0); 5024 MRI.replaceRegWith(Dest.getReg(), ResultReg); 5025 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 5026 } 5027 5028 void SIInstrInfo::addSCCDefUsersToVALUWorklist( 5029 MachineInstr &SCCDefInst, SetVectorType &Worklist) const { 5030 // This assumes that all the users of SCC are in the same block 5031 // as the SCC def. 5032 for (MachineInstr &MI : 5033 make_range(MachineBasicBlock::iterator(SCCDefInst), 5034 SCCDefInst.getParent()->end())) { 5035 // Exit if we find another SCC def. 5036 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != -1) 5037 return; 5038 5039 if (MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI) != -1) 5040 Worklist.insert(&MI); 5041 } 5042 } 5043 5044 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass( 5045 const MachineInstr &Inst) const { 5046 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0); 5047 5048 switch (Inst.getOpcode()) { 5049 // For target instructions, getOpRegClass just returns the virtual register 5050 // class associated with the operand, so we need to find an equivalent VGPR 5051 // register class in order to move the instruction to the VALU. 5052 case AMDGPU::COPY: 5053 case AMDGPU::PHI: 5054 case AMDGPU::REG_SEQUENCE: 5055 case AMDGPU::INSERT_SUBREG: 5056 case AMDGPU::WQM: 5057 case AMDGPU::WWM: 5058 if (RI.hasVGPRs(NewDstRC)) 5059 return nullptr; 5060 5061 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 5062 if (!NewDstRC) 5063 return nullptr; 5064 return NewDstRC; 5065 default: 5066 return NewDstRC; 5067 } 5068 } 5069 5070 // Find the one SGPR operand we are allowed to use. 5071 unsigned SIInstrInfo::findUsedSGPR(const MachineInstr &MI, 5072 int OpIndices[3]) const { 5073 const MCInstrDesc &Desc = MI.getDesc(); 5074 5075 // Find the one SGPR operand we are allowed to use. 5076 // 5077 // First we need to consider the instruction's operand requirements before 5078 // legalizing. Some operands are required to be SGPRs, such as implicit uses 5079 // of VCC, but we are still bound by the constant bus requirement to only use 5080 // one. 5081 // 5082 // If the operand's class is an SGPR, we can never move it. 5083 5084 unsigned SGPRReg = findImplicitSGPRRead(MI); 5085 if (SGPRReg != AMDGPU::NoRegister) 5086 return SGPRReg; 5087 5088 unsigned UsedSGPRs[3] = { AMDGPU::NoRegister }; 5089 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 5090 5091 for (unsigned i = 0; i < 3; ++i) { 5092 int Idx = OpIndices[i]; 5093 if (Idx == -1) 5094 break; 5095 5096 const MachineOperand &MO = MI.getOperand(Idx); 5097 if (!MO.isReg()) 5098 continue; 5099 5100 // Is this operand statically required to be an SGPR based on the operand 5101 // constraints? 5102 const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); 5103 bool IsRequiredSGPR = RI.isSGPRClass(OpRC); 5104 if (IsRequiredSGPR) 5105 return MO.getReg(); 5106 5107 // If this could be a VGPR or an SGPR, Check the dynamic register class. 5108 unsigned Reg = MO.getReg(); 5109 const TargetRegisterClass *RegRC = MRI.getRegClass(Reg); 5110 if (RI.isSGPRClass(RegRC)) 5111 UsedSGPRs[i] = Reg; 5112 } 5113 5114 // We don't have a required SGPR operand, so we have a bit more freedom in 5115 // selecting operands to move. 5116 5117 // Try to select the most used SGPR. If an SGPR is equal to one of the 5118 // others, we choose that. 5119 // 5120 // e.g. 5121 // V_FMA_F32 v0, s0, s0, s0 -> No moves 5122 // V_FMA_F32 v0, s0, s1, s0 -> Move s1 5123 5124 // TODO: If some of the operands are 64-bit SGPRs and some 32, we should 5125 // prefer those. 5126 5127 if (UsedSGPRs[0] != AMDGPU::NoRegister) { 5128 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2]) 5129 SGPRReg = UsedSGPRs[0]; 5130 } 5131 5132 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) { 5133 if (UsedSGPRs[1] == UsedSGPRs[2]) 5134 SGPRReg = UsedSGPRs[1]; 5135 } 5136 5137 return SGPRReg; 5138 } 5139 5140 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI, 5141 unsigned OperandName) const { 5142 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName); 5143 if (Idx == -1) 5144 return nullptr; 5145 5146 return &MI.getOperand(Idx); 5147 } 5148 5149 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const { 5150 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT; 5151 if (ST.isAmdHsaOS()) { 5152 // Set ATC = 1. GFX9 doesn't have this bit. 5153 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) 5154 RsrcDataFormat |= (1ULL << 56); 5155 5156 // Set MTYPE = 2 (MTYPE_UC = uncached). GFX9 doesn't have this. 5157 // BTW, it disables TC L2 and therefore decreases performance. 5158 if (ST.getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS) 5159 RsrcDataFormat |= (2ULL << 59); 5160 } 5161 5162 return RsrcDataFormat; 5163 } 5164 5165 uint64_t SIInstrInfo::getScratchRsrcWords23() const { 5166 uint64_t Rsrc23 = getDefaultRsrcDataFormat() | 5167 AMDGPU::RSRC_TID_ENABLE | 5168 0xffffffff; // Size; 5169 5170 // GFX9 doesn't have ELEMENT_SIZE. 5171 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 5172 uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize()) - 1; 5173 Rsrc23 |= EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT; 5174 } 5175 5176 // IndexStride = 64. 5177 Rsrc23 |= UINT64_C(3) << AMDGPU::RSRC_INDEX_STRIDE_SHIFT; 5178 5179 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17]. 5180 // Clear them unless we want a huge stride. 5181 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 5182 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT; 5183 5184 return Rsrc23; 5185 } 5186 5187 bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const { 5188 unsigned Opc = MI.getOpcode(); 5189 5190 return isSMRD(Opc); 5191 } 5192 5193 bool SIInstrInfo::isHighLatencyInstruction(const MachineInstr &MI) const { 5194 unsigned Opc = MI.getOpcode(); 5195 5196 return isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc); 5197 } 5198 5199 unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI, 5200 int &FrameIndex) const { 5201 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 5202 if (!Addr || !Addr->isFI()) 5203 return AMDGPU::NoRegister; 5204 5205 assert(!MI.memoperands_empty() && 5206 (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS); 5207 5208 FrameIndex = Addr->getIndex(); 5209 return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg(); 5210 } 5211 5212 unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI, 5213 int &FrameIndex) const { 5214 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr); 5215 assert(Addr && Addr->isFI()); 5216 FrameIndex = Addr->getIndex(); 5217 return getNamedOperand(MI, AMDGPU::OpName::data)->getReg(); 5218 } 5219 5220 unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 5221 int &FrameIndex) const { 5222 if (!MI.mayLoad()) 5223 return AMDGPU::NoRegister; 5224 5225 if (isMUBUF(MI) || isVGPRSpill(MI)) 5226 return isStackAccess(MI, FrameIndex); 5227 5228 if (isSGPRSpill(MI)) 5229 return isSGPRStackAccess(MI, FrameIndex); 5230 5231 return AMDGPU::NoRegister; 5232 } 5233 5234 unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 5235 int &FrameIndex) const { 5236 if (!MI.mayStore()) 5237 return AMDGPU::NoRegister; 5238 5239 if (isMUBUF(MI) || isVGPRSpill(MI)) 5240 return isStackAccess(MI, FrameIndex); 5241 5242 if (isSGPRSpill(MI)) 5243 return isSGPRStackAccess(MI, FrameIndex); 5244 5245 return AMDGPU::NoRegister; 5246 } 5247 5248 unsigned SIInstrInfo::getInstBundleSize(const MachineInstr &MI) const { 5249 unsigned Size = 0; 5250 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 5251 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 5252 while (++I != E && I->isInsideBundle()) { 5253 assert(!I->isBundle() && "No nested bundle!"); 5254 Size += getInstSizeInBytes(*I); 5255 } 5256 5257 return Size; 5258 } 5259 5260 unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { 5261 unsigned Opc = MI.getOpcode(); 5262 const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc); 5263 unsigned DescSize = Desc.getSize(); 5264 5265 // If we have a definitive size, we can use it. Otherwise we need to inspect 5266 // the operands to know the size. 5267 if (isFixedSize(MI)) 5268 return DescSize; 5269 5270 // 4-byte instructions may have a 32-bit literal encoded after them. Check 5271 // operands that coud ever be literals. 5272 if (isVALU(MI) || isSALU(MI)) { 5273 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 5274 if (Src0Idx == -1) 5275 return DescSize; // No operands. 5276 5277 if (isLiteralConstantLike(MI.getOperand(Src0Idx), Desc.OpInfo[Src0Idx])) 5278 return DescSize + 4; 5279 5280 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 5281 if (Src1Idx == -1) 5282 return DescSize; 5283 5284 if (isLiteralConstantLike(MI.getOperand(Src1Idx), Desc.OpInfo[Src1Idx])) 5285 return DescSize + 4; 5286 5287 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 5288 if (Src2Idx == -1) 5289 return DescSize; 5290 5291 if (isLiteralConstantLike(MI.getOperand(Src2Idx), Desc.OpInfo[Src2Idx])) 5292 return DescSize + 4; 5293 5294 return DescSize; 5295 } 5296 5297 switch (Opc) { 5298 case TargetOpcode::IMPLICIT_DEF: 5299 case TargetOpcode::KILL: 5300 case TargetOpcode::DBG_VALUE: 5301 case TargetOpcode::EH_LABEL: 5302 return 0; 5303 case TargetOpcode::BUNDLE: 5304 return getInstBundleSize(MI); 5305 case TargetOpcode::INLINEASM: 5306 case TargetOpcode::INLINEASM_BR: { 5307 const MachineFunction *MF = MI.getParent()->getParent(); 5308 const char *AsmStr = MI.getOperand(0).getSymbolName(); 5309 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo()); 5310 } 5311 default: 5312 return DescSize; 5313 } 5314 } 5315 5316 bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const { 5317 if (!isFLAT(MI)) 5318 return false; 5319 5320 if (MI.memoperands_empty()) 5321 return true; 5322 5323 for (const MachineMemOperand *MMO : MI.memoperands()) { 5324 if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS) 5325 return true; 5326 } 5327 return false; 5328 } 5329 5330 bool SIInstrInfo::isNonUniformBranchInstr(MachineInstr &Branch) const { 5331 return Branch.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO; 5332 } 5333 5334 void SIInstrInfo::convertNonUniformIfRegion(MachineBasicBlock *IfEntry, 5335 MachineBasicBlock *IfEnd) const { 5336 MachineBasicBlock::iterator TI = IfEntry->getFirstTerminator(); 5337 assert(TI != IfEntry->end()); 5338 5339 MachineInstr *Branch = &(*TI); 5340 MachineFunction *MF = IfEntry->getParent(); 5341 MachineRegisterInfo &MRI = IfEntry->getParent()->getRegInfo(); 5342 5343 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 5344 unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 5345 MachineInstr *SIIF = 5346 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_IF), DstReg) 5347 .add(Branch->getOperand(0)) 5348 .add(Branch->getOperand(1)); 5349 MachineInstr *SIEND = 5350 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_END_CF)) 5351 .addReg(DstReg); 5352 5353 IfEntry->erase(TI); 5354 IfEntry->insert(IfEntry->end(), SIIF); 5355 IfEnd->insert(IfEnd->getFirstNonPHI(), SIEND); 5356 } 5357 } 5358 5359 void SIInstrInfo::convertNonUniformLoopRegion( 5360 MachineBasicBlock *LoopEntry, MachineBasicBlock *LoopEnd) const { 5361 MachineBasicBlock::iterator TI = LoopEnd->getFirstTerminator(); 5362 // We expect 2 terminators, one conditional and one unconditional. 5363 assert(TI != LoopEnd->end()); 5364 5365 MachineInstr *Branch = &(*TI); 5366 MachineFunction *MF = LoopEnd->getParent(); 5367 MachineRegisterInfo &MRI = LoopEnd->getParent()->getRegInfo(); 5368 5369 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 5370 5371 unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 5372 unsigned BackEdgeReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 5373 MachineInstrBuilder HeaderPHIBuilder = 5374 BuildMI(*(MF), Branch->getDebugLoc(), get(TargetOpcode::PHI), DstReg); 5375 for (MachineBasicBlock::pred_iterator PI = LoopEntry->pred_begin(), 5376 E = LoopEntry->pred_end(); 5377 PI != E; ++PI) { 5378 if (*PI == LoopEnd) { 5379 HeaderPHIBuilder.addReg(BackEdgeReg); 5380 } else { 5381 MachineBasicBlock *PMBB = *PI; 5382 unsigned ZeroReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 5383 materializeImmediate(*PMBB, PMBB->getFirstTerminator(), DebugLoc(), 5384 ZeroReg, 0); 5385 HeaderPHIBuilder.addReg(ZeroReg); 5386 } 5387 HeaderPHIBuilder.addMBB(*PI); 5388 } 5389 MachineInstr *HeaderPhi = HeaderPHIBuilder; 5390 MachineInstr *SIIFBREAK = BuildMI(*(MF), Branch->getDebugLoc(), 5391 get(AMDGPU::SI_IF_BREAK), BackEdgeReg) 5392 .addReg(DstReg) 5393 .add(Branch->getOperand(0)); 5394 MachineInstr *SILOOP = 5395 BuildMI(*(MF), Branch->getDebugLoc(), get(AMDGPU::SI_LOOP)) 5396 .addReg(BackEdgeReg) 5397 .addMBB(LoopEntry); 5398 5399 LoopEntry->insert(LoopEntry->begin(), HeaderPhi); 5400 LoopEnd->erase(TI); 5401 LoopEnd->insert(LoopEnd->end(), SIIFBREAK); 5402 LoopEnd->insert(LoopEnd->end(), SILOOP); 5403 } 5404 } 5405 5406 ArrayRef<std::pair<int, const char *>> 5407 SIInstrInfo::getSerializableTargetIndices() const { 5408 static const std::pair<int, const char *> TargetIndices[] = { 5409 {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"}, 5410 {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"}, 5411 {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"}, 5412 {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"}, 5413 {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}}; 5414 return makeArrayRef(TargetIndices); 5415 } 5416 5417 /// This is used by the post-RA scheduler (SchedulePostRAList.cpp). The 5418 /// post-RA version of misched uses CreateTargetMIHazardRecognizer. 5419 ScheduleHazardRecognizer * 5420 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 5421 const ScheduleDAG *DAG) const { 5422 return new GCNHazardRecognizer(DAG->MF); 5423 } 5424 5425 /// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer 5426 /// pass. 5427 ScheduleHazardRecognizer * 5428 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const { 5429 return new GCNHazardRecognizer(MF); 5430 } 5431 5432 std::pair<unsigned, unsigned> 5433 SIInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { 5434 return std::make_pair(TF & MO_MASK, TF & ~MO_MASK); 5435 } 5436 5437 ArrayRef<std::pair<unsigned, const char *>> 5438 SIInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { 5439 static const std::pair<unsigned, const char *> TargetFlags[] = { 5440 { MO_GOTPCREL, "amdgpu-gotprel" }, 5441 { MO_GOTPCREL32_LO, "amdgpu-gotprel32-lo" }, 5442 { MO_GOTPCREL32_HI, "amdgpu-gotprel32-hi" }, 5443 { MO_REL32_LO, "amdgpu-rel32-lo" }, 5444 { MO_REL32_HI, "amdgpu-rel32-hi" } 5445 }; 5446 5447 return makeArrayRef(TargetFlags); 5448 } 5449 5450 bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const { 5451 return !MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY && 5452 MI.modifiesRegister(AMDGPU::EXEC, &RI); 5453 } 5454 5455 MachineInstrBuilder 5456 SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 5457 MachineBasicBlock::iterator I, 5458 const DebugLoc &DL, 5459 unsigned DestReg) const { 5460 if (ST.hasAddNoCarry()) 5461 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg); 5462 5463 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 5464 unsigned UnusedCarry = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 5465 MRI.setRegAllocationHint(UnusedCarry, 0, AMDGPU::VCC); 5466 5467 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_I32_e64), DestReg) 5468 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 5469 } 5470 5471 bool SIInstrInfo::isKillTerminator(unsigned Opcode) { 5472 switch (Opcode) { 5473 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 5474 case AMDGPU::SI_KILL_I1_TERMINATOR: 5475 return true; 5476 default: 5477 return false; 5478 } 5479 } 5480 5481 const MCInstrDesc &SIInstrInfo::getKillTerminatorFromPseudo(unsigned Opcode) const { 5482 switch (Opcode) { 5483 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: 5484 return get(AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR); 5485 case AMDGPU::SI_KILL_I1_PSEUDO: 5486 return get(AMDGPU::SI_KILL_I1_TERMINATOR); 5487 default: 5488 llvm_unreachable("invalid opcode, expected SI_KILL_*_PSEUDO"); 5489 } 5490 } 5491 5492 bool SIInstrInfo::isBufferSMRD(const MachineInstr &MI) const { 5493 if (!isSMRD(MI)) 5494 return false; 5495 5496 // Check that it is using a buffer resource. 5497 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sbase); 5498 if (Idx == -1) // e.g. s_memtime 5499 return false; 5500 5501 const auto RCID = MI.getDesc().OpInfo[Idx].RegClass; 5502 return RCID == AMDGPU::SReg_128RegClassID; 5503 } 5504 5505 // This must be kept in sync with the SIEncodingFamily class in SIInstrInfo.td 5506 enum SIEncodingFamily { 5507 SI = 0, 5508 VI = 1, 5509 SDWA = 2, 5510 SDWA9 = 3, 5511 GFX80 = 4, 5512 GFX9 = 5 5513 }; 5514 5515 static SIEncodingFamily subtargetEncodingFamily(const GCNSubtarget &ST) { 5516 switch (ST.getGeneration()) { 5517 default: 5518 break; 5519 case AMDGPUSubtarget::SOUTHERN_ISLANDS: 5520 case AMDGPUSubtarget::SEA_ISLANDS: 5521 return SIEncodingFamily::SI; 5522 case AMDGPUSubtarget::VOLCANIC_ISLANDS: 5523 case AMDGPUSubtarget::GFX9: 5524 return SIEncodingFamily::VI; 5525 } 5526 llvm_unreachable("Unknown subtarget generation!"); 5527 } 5528 5529 int SIInstrInfo::pseudoToMCOpcode(int Opcode) const { 5530 SIEncodingFamily Gen = subtargetEncodingFamily(ST); 5531 5532 if ((get(Opcode).TSFlags & SIInstrFlags::renamedInGFX9) != 0 && 5533 ST.getGeneration() >= AMDGPUSubtarget::GFX9) 5534 Gen = SIEncodingFamily::GFX9; 5535 5536 if (get(Opcode).TSFlags & SIInstrFlags::SDWA) 5537 Gen = ST.getGeneration() == AMDGPUSubtarget::GFX9 ? SIEncodingFamily::SDWA9 5538 : SIEncodingFamily::SDWA; 5539 // Adjust the encoding family to GFX80 for D16 buffer instructions when the 5540 // subtarget has UnpackedD16VMem feature. 5541 // TODO: remove this when we discard GFX80 encoding. 5542 if (ST.hasUnpackedD16VMem() && (get(Opcode).TSFlags & SIInstrFlags::D16Buf)) 5543 Gen = SIEncodingFamily::GFX80; 5544 5545 int MCOp = AMDGPU::getMCOpcode(Opcode, Gen); 5546 5547 // -1 means that Opcode is already a native instruction. 5548 if (MCOp == -1) 5549 return Opcode; 5550 5551 // (uint16_t)-1 means that Opcode is a pseudo instruction that has 5552 // no encoding in the given subtarget generation. 5553 if (MCOp == (uint16_t)-1) 5554 return -1; 5555 5556 return MCOp; 5557 } 5558 5559 static 5560 TargetInstrInfo::RegSubRegPair getRegOrUndef(const MachineOperand &RegOpnd) { 5561 assert(RegOpnd.isReg()); 5562 return RegOpnd.isUndef() ? TargetInstrInfo::RegSubRegPair() : 5563 getRegSubRegPair(RegOpnd); 5564 } 5565 5566 TargetInstrInfo::RegSubRegPair 5567 llvm::getRegSequenceSubReg(MachineInstr &MI, unsigned SubReg) { 5568 assert(MI.isRegSequence()); 5569 for (unsigned I = 0, E = (MI.getNumOperands() - 1)/ 2; I < E; ++I) 5570 if (MI.getOperand(1 + 2 * I + 1).getImm() == SubReg) { 5571 auto &RegOp = MI.getOperand(1 + 2 * I); 5572 return getRegOrUndef(RegOp); 5573 } 5574 return TargetInstrInfo::RegSubRegPair(); 5575 } 5576 5577 // Try to find the definition of reg:subreg in subreg-manipulation pseudos 5578 // Following a subreg of reg:subreg isn't supported 5579 static bool followSubRegDef(MachineInstr &MI, 5580 TargetInstrInfo::RegSubRegPair &RSR) { 5581 if (!RSR.SubReg) 5582 return false; 5583 switch (MI.getOpcode()) { 5584 default: break; 5585 case AMDGPU::REG_SEQUENCE: 5586 RSR = getRegSequenceSubReg(MI, RSR.SubReg); 5587 return true; 5588 // EXTRACT_SUBREG ins't supported as this would follow a subreg of subreg 5589 case AMDGPU::INSERT_SUBREG: 5590 if (RSR.SubReg == (unsigned)MI.getOperand(3).getImm()) 5591 // inserted the subreg we're looking for 5592 RSR = getRegOrUndef(MI.getOperand(2)); 5593 else { // the subreg in the rest of the reg 5594 auto R1 = getRegOrUndef(MI.getOperand(1)); 5595 if (R1.SubReg) // subreg of subreg isn't supported 5596 return false; 5597 RSR.Reg = R1.Reg; 5598 } 5599 return true; 5600 } 5601 return false; 5602 } 5603 5604 MachineInstr *llvm::getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P, 5605 MachineRegisterInfo &MRI) { 5606 assert(MRI.isSSA()); 5607 if (!TargetRegisterInfo::isVirtualRegister(P.Reg)) 5608 return nullptr; 5609 5610 auto RSR = P; 5611 auto *DefInst = MRI.getVRegDef(RSR.Reg); 5612 while (auto *MI = DefInst) { 5613 DefInst = nullptr; 5614 switch (MI->getOpcode()) { 5615 case AMDGPU::COPY: 5616 case AMDGPU::V_MOV_B32_e32: { 5617 auto &Op1 = MI->getOperand(1); 5618 if (Op1.isReg() && 5619 TargetRegisterInfo::isVirtualRegister(Op1.getReg())) { 5620 if (Op1.isUndef()) 5621 return nullptr; 5622 RSR = getRegSubRegPair(Op1); 5623 DefInst = MRI.getVRegDef(RSR.Reg); 5624 } 5625 break; 5626 } 5627 default: 5628 if (followSubRegDef(*MI, RSR)) { 5629 if (!RSR.Reg) 5630 return nullptr; 5631 DefInst = MRI.getVRegDef(RSR.Reg); 5632 } 5633 } 5634 if (!DefInst) 5635 return MI; 5636 } 5637 return nullptr; 5638 } 5639 5640 bool llvm::isEXECMaskConstantBetweenDefAndUses(unsigned VReg, 5641 MachineRegisterInfo &MRI) { 5642 assert(MRI.isSSA() && "Must be run on SSA"); 5643 auto *TRI = MRI.getTargetRegisterInfo(); 5644 5645 auto *DefI = MRI.getVRegDef(VReg); 5646 auto *BB = DefI->getParent(); 5647 5648 DenseSet<MachineInstr*> Uses; 5649 for (auto &Use : MRI.use_nodbg_operands(VReg)) { 5650 auto *I = Use.getParent(); 5651 if (I->getParent() != BB) 5652 return false; 5653 Uses.insert(I); 5654 } 5655 5656 auto E = BB->end(); 5657 for (auto I = std::next(DefI->getIterator()); I != E; ++I) { 5658 Uses.erase(&*I); 5659 // don't check the last use 5660 if (Uses.empty() || I->modifiesRegister(AMDGPU::EXEC, TRI)) 5661 break; 5662 } 5663 return Uses.empty(); 5664 } 5665