1 //===- SIInstrInfo.cpp - SI Instruction Information ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// SI Implementation of TargetInstrInfo. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SIInstrInfo.h" 15 #include "AMDGPU.h" 16 #include "AMDGPUInstrInfo.h" 17 #include "GCNHazardRecognizer.h" 18 #include "GCNSubtarget.h" 19 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 20 #include "SIMachineFunctionInfo.h" 21 #include "llvm/Analysis/ValueTracking.h" 22 #include "llvm/CodeGen/LiveIntervals.h" 23 #include "llvm/CodeGen/LiveVariables.h" 24 #include "llvm/CodeGen/MachineDominators.h" 25 #include "llvm/CodeGen/MachineFrameInfo.h" 26 #include "llvm/CodeGen/MachineScheduler.h" 27 #include "llvm/CodeGen/RegisterScavenging.h" 28 #include "llvm/CodeGen/ScheduleDAG.h" 29 #include "llvm/IR/DiagnosticInfo.h" 30 #include "llvm/IR/IntrinsicsAMDGPU.h" 31 #include "llvm/MC/MCContext.h" 32 #include "llvm/Support/CommandLine.h" 33 #include "llvm/Target/TargetMachine.h" 34 35 using namespace llvm; 36 37 #define DEBUG_TYPE "si-instr-info" 38 39 #define GET_INSTRINFO_CTOR_DTOR 40 #include "AMDGPUGenInstrInfo.inc" 41 42 namespace llvm { 43 44 class AAResults; 45 46 namespace AMDGPU { 47 #define GET_D16ImageDimIntrinsics_IMPL 48 #define GET_ImageDimIntrinsicTable_IMPL 49 #define GET_RsrcIntrinsics_IMPL 50 #include "AMDGPUGenSearchableTables.inc" 51 } 52 } 53 54 55 // Must be at least 4 to be able to branch over minimum unconditional branch 56 // code. This is only for making it possible to write reasonably small tests for 57 // long branches. 58 static cl::opt<unsigned> 59 BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16), 60 cl::desc("Restrict range of branch instructions (DEBUG)")); 61 62 static cl::opt<bool> Fix16BitCopies( 63 "amdgpu-fix-16-bit-physreg-copies", 64 cl::desc("Fix copies between 32 and 16 bit registers by extending to 32 bit"), 65 cl::init(true), 66 cl::ReallyHidden); 67 68 SIInstrInfo::SIInstrInfo(const GCNSubtarget &ST) 69 : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN), 70 RI(ST), ST(ST) { 71 SchedModel.init(&ST); 72 } 73 74 //===----------------------------------------------------------------------===// 75 // TargetInstrInfo callbacks 76 //===----------------------------------------------------------------------===// 77 78 static unsigned getNumOperandsNoGlue(SDNode *Node) { 79 unsigned N = Node->getNumOperands(); 80 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) 81 --N; 82 return N; 83 } 84 85 /// Returns true if both nodes have the same value for the given 86 /// operand \p Op, or if both nodes do not have this operand. 87 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { 88 unsigned Opc0 = N0->getMachineOpcode(); 89 unsigned Opc1 = N1->getMachineOpcode(); 90 91 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); 92 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); 93 94 if (Op0Idx == -1 && Op1Idx == -1) 95 return true; 96 97 98 if ((Op0Idx == -1 && Op1Idx != -1) || 99 (Op1Idx == -1 && Op0Idx != -1)) 100 return false; 101 102 // getNamedOperandIdx returns the index for the MachineInstr's operands, 103 // which includes the result as the first operand. We are indexing into the 104 // MachineSDNode's operands, so we need to skip the result operand to get 105 // the real index. 106 --Op0Idx; 107 --Op1Idx; 108 109 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx); 110 } 111 112 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, 113 AAResults *AA) const { 114 if (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isSDWA(MI) || isSALU(MI)) { 115 // Normally VALU use of exec would block the rematerialization, but that 116 // is OK in this case to have an implicit exec read as all VALU do. 117 // We really want all of the generic logic for this except for this. 118 119 // Another potential implicit use is mode register. The core logic of 120 // the RA will not attempt rematerialization if mode is set anywhere 121 // in the function, otherwise it is safe since mode is not changed. 122 123 // There is difference to generic method which does not allow 124 // rematerialization if there are virtual register uses. We allow this, 125 // therefore this method includes SOP instructions as well. 126 return !MI.hasImplicitDef() && 127 MI.getNumImplicitOperands() == MI.getDesc().getNumImplicitUses() && 128 !MI.mayRaiseFPException(); 129 } 130 131 return false; 132 } 133 134 // Returns true if the scalar result of a VALU instruction depends on exec. 135 static bool resultDependsOnExec(const MachineInstr &MI) { 136 // Ignore comparisons which are only used masked with exec. 137 // This allows some hoisting/sinking of VALU comparisons. 138 if (MI.isCompare()) { 139 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 140 Register DstReg = MI.getOperand(0).getReg(); 141 if (!DstReg.isVirtual()) 142 return true; 143 for (MachineInstr &Use : MRI.use_nodbg_instructions(DstReg)) { 144 switch (Use.getOpcode()) { 145 case AMDGPU::S_AND_SAVEEXEC_B32: 146 case AMDGPU::S_AND_SAVEEXEC_B64: 147 break; 148 case AMDGPU::S_AND_B32: 149 case AMDGPU::S_AND_B64: 150 if (!Use.readsRegister(AMDGPU::EXEC)) 151 return true; 152 break; 153 default: 154 return true; 155 } 156 } 157 return false; 158 } 159 160 switch (MI.getOpcode()) { 161 default: 162 break; 163 case AMDGPU::V_READFIRSTLANE_B32: 164 return true; 165 } 166 167 return false; 168 } 169 170 bool SIInstrInfo::isIgnorableUse(const MachineOperand &MO) const { 171 // Any implicit use of exec by VALU is not a real register read. 172 return MO.getReg() == AMDGPU::EXEC && MO.isImplicit() && 173 isVALU(*MO.getParent()) && !resultDependsOnExec(*MO.getParent()); 174 } 175 176 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, 177 int64_t &Offset0, 178 int64_t &Offset1) const { 179 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode()) 180 return false; 181 182 unsigned Opc0 = Load0->getMachineOpcode(); 183 unsigned Opc1 = Load1->getMachineOpcode(); 184 185 // Make sure both are actually loads. 186 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad()) 187 return false; 188 189 if (isDS(Opc0) && isDS(Opc1)) { 190 191 // FIXME: Handle this case: 192 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1)) 193 return false; 194 195 // Check base reg. 196 if (Load0->getOperand(0) != Load1->getOperand(0)) 197 return false; 198 199 // Skip read2 / write2 variants for simplicity. 200 // TODO: We should report true if the used offsets are adjacent (excluded 201 // st64 versions). 202 int Offset0Idx = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 203 int Offset1Idx = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 204 if (Offset0Idx == -1 || Offset1Idx == -1) 205 return false; 206 207 // XXX - be careful of dataless loads 208 // getNamedOperandIdx returns the index for MachineInstrs. Since they 209 // include the output in the operand list, but SDNodes don't, we need to 210 // subtract the index by one. 211 Offset0Idx -= get(Opc0).NumDefs; 212 Offset1Idx -= get(Opc1).NumDefs; 213 Offset0 = cast<ConstantSDNode>(Load0->getOperand(Offset0Idx))->getZExtValue(); 214 Offset1 = cast<ConstantSDNode>(Load1->getOperand(Offset1Idx))->getZExtValue(); 215 return true; 216 } 217 218 if (isSMRD(Opc0) && isSMRD(Opc1)) { 219 // Skip time and cache invalidation instructions. 220 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::sbase) == -1 || 221 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::sbase) == -1) 222 return false; 223 224 assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1)); 225 226 // Check base reg. 227 if (Load0->getOperand(0) != Load1->getOperand(0)) 228 return false; 229 230 const ConstantSDNode *Load0Offset = 231 dyn_cast<ConstantSDNode>(Load0->getOperand(1)); 232 const ConstantSDNode *Load1Offset = 233 dyn_cast<ConstantSDNode>(Load1->getOperand(1)); 234 235 if (!Load0Offset || !Load1Offset) 236 return false; 237 238 Offset0 = Load0Offset->getZExtValue(); 239 Offset1 = Load1Offset->getZExtValue(); 240 return true; 241 } 242 243 // MUBUF and MTBUF can access the same addresses. 244 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) { 245 246 // MUBUF and MTBUF have vaddr at different indices. 247 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || 248 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || 249 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) 250 return false; 251 252 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 253 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 254 255 if (OffIdx0 == -1 || OffIdx1 == -1) 256 return false; 257 258 // getNamedOperandIdx returns the index for MachineInstrs. Since they 259 // include the output in the operand list, but SDNodes don't, we need to 260 // subtract the index by one. 261 OffIdx0 -= get(Opc0).NumDefs; 262 OffIdx1 -= get(Opc1).NumDefs; 263 264 SDValue Off0 = Load0->getOperand(OffIdx0); 265 SDValue Off1 = Load1->getOperand(OffIdx1); 266 267 // The offset might be a FrameIndexSDNode. 268 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1)) 269 return false; 270 271 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue(); 272 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue(); 273 return true; 274 } 275 276 return false; 277 } 278 279 static bool isStride64(unsigned Opc) { 280 switch (Opc) { 281 case AMDGPU::DS_READ2ST64_B32: 282 case AMDGPU::DS_READ2ST64_B64: 283 case AMDGPU::DS_WRITE2ST64_B32: 284 case AMDGPU::DS_WRITE2ST64_B64: 285 return true; 286 default: 287 return false; 288 } 289 } 290 291 bool SIInstrInfo::getMemOperandsWithOffsetWidth( 292 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps, 293 int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, 294 const TargetRegisterInfo *TRI) const { 295 if (!LdSt.mayLoadOrStore()) 296 return false; 297 298 unsigned Opc = LdSt.getOpcode(); 299 OffsetIsScalable = false; 300 const MachineOperand *BaseOp, *OffsetOp; 301 int DataOpIdx; 302 303 if (isDS(LdSt)) { 304 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr); 305 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset); 306 if (OffsetOp) { 307 // Normal, single offset LDS instruction. 308 if (!BaseOp) { 309 // DS_CONSUME/DS_APPEND use M0 for the base address. 310 // TODO: find the implicit use operand for M0 and use that as BaseOp? 311 return false; 312 } 313 BaseOps.push_back(BaseOp); 314 Offset = OffsetOp->getImm(); 315 // Get appropriate operand, and compute width accordingly. 316 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 317 if (DataOpIdx == -1) 318 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 319 Width = getOpSize(LdSt, DataOpIdx); 320 } else { 321 // The 2 offset instructions use offset0 and offset1 instead. We can treat 322 // these as a load with a single offset if the 2 offsets are consecutive. 323 // We will use this for some partially aligned loads. 324 const MachineOperand *Offset0Op = 325 getNamedOperand(LdSt, AMDGPU::OpName::offset0); 326 const MachineOperand *Offset1Op = 327 getNamedOperand(LdSt, AMDGPU::OpName::offset1); 328 329 unsigned Offset0 = Offset0Op->getImm(); 330 unsigned Offset1 = Offset1Op->getImm(); 331 if (Offset0 + 1 != Offset1) 332 return false; 333 334 // Each of these offsets is in element sized units, so we need to convert 335 // to bytes of the individual reads. 336 337 unsigned EltSize; 338 if (LdSt.mayLoad()) 339 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16; 340 else { 341 assert(LdSt.mayStore()); 342 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 343 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8; 344 } 345 346 if (isStride64(Opc)) 347 EltSize *= 64; 348 349 BaseOps.push_back(BaseOp); 350 Offset = EltSize * Offset0; 351 // Get appropriate operand(s), and compute width accordingly. 352 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 353 if (DataOpIdx == -1) { 354 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 355 Width = getOpSize(LdSt, DataOpIdx); 356 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1); 357 Width += getOpSize(LdSt, DataOpIdx); 358 } else { 359 Width = getOpSize(LdSt, DataOpIdx); 360 } 361 } 362 return true; 363 } 364 365 if (isMUBUF(LdSt) || isMTBUF(LdSt)) { 366 const MachineOperand *RSrc = getNamedOperand(LdSt, AMDGPU::OpName::srsrc); 367 if (!RSrc) // e.g. BUFFER_WBINVL1_VOL 368 return false; 369 BaseOps.push_back(RSrc); 370 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 371 if (BaseOp && !BaseOp->isFI()) 372 BaseOps.push_back(BaseOp); 373 const MachineOperand *OffsetImm = 374 getNamedOperand(LdSt, AMDGPU::OpName::offset); 375 Offset = OffsetImm->getImm(); 376 const MachineOperand *SOffset = 377 getNamedOperand(LdSt, AMDGPU::OpName::soffset); 378 if (SOffset) { 379 if (SOffset->isReg()) 380 BaseOps.push_back(SOffset); 381 else 382 Offset += SOffset->getImm(); 383 } 384 // Get appropriate operand, and compute width accordingly. 385 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 386 if (DataOpIdx == -1) 387 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 388 Width = getOpSize(LdSt, DataOpIdx); 389 return true; 390 } 391 392 if (isMIMG(LdSt)) { 393 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); 394 BaseOps.push_back(&LdSt.getOperand(SRsrcIdx)); 395 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); 396 if (VAddr0Idx >= 0) { 397 // GFX10 possible NSA encoding. 398 for (int I = VAddr0Idx; I < SRsrcIdx; ++I) 399 BaseOps.push_back(&LdSt.getOperand(I)); 400 } else { 401 BaseOps.push_back(getNamedOperand(LdSt, AMDGPU::OpName::vaddr)); 402 } 403 Offset = 0; 404 // Get appropriate operand, and compute width accordingly. 405 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 406 Width = getOpSize(LdSt, DataOpIdx); 407 return true; 408 } 409 410 if (isSMRD(LdSt)) { 411 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::sbase); 412 if (!BaseOp) // e.g. S_MEMTIME 413 return false; 414 BaseOps.push_back(BaseOp); 415 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset); 416 Offset = OffsetOp ? OffsetOp->getImm() : 0; 417 // Get appropriate operand, and compute width accordingly. 418 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::sdst); 419 Width = getOpSize(LdSt, DataOpIdx); 420 return true; 421 } 422 423 if (isFLAT(LdSt)) { 424 // Instructions have either vaddr or saddr or both or none. 425 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 426 if (BaseOp) 427 BaseOps.push_back(BaseOp); 428 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::saddr); 429 if (BaseOp) 430 BaseOps.push_back(BaseOp); 431 Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm(); 432 // Get appropriate operand, and compute width accordingly. 433 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 434 if (DataOpIdx == -1) 435 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 436 Width = getOpSize(LdSt, DataOpIdx); 437 return true; 438 } 439 440 return false; 441 } 442 443 static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, 444 ArrayRef<const MachineOperand *> BaseOps1, 445 const MachineInstr &MI2, 446 ArrayRef<const MachineOperand *> BaseOps2) { 447 // Only examine the first "base" operand of each instruction, on the 448 // assumption that it represents the real base address of the memory access. 449 // Other operands are typically offsets or indices from this base address. 450 if (BaseOps1.front()->isIdenticalTo(*BaseOps2.front())) 451 return true; 452 453 if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand()) 454 return false; 455 456 auto MO1 = *MI1.memoperands_begin(); 457 auto MO2 = *MI2.memoperands_begin(); 458 if (MO1->getAddrSpace() != MO2->getAddrSpace()) 459 return false; 460 461 auto Base1 = MO1->getValue(); 462 auto Base2 = MO2->getValue(); 463 if (!Base1 || !Base2) 464 return false; 465 Base1 = getUnderlyingObject(Base1); 466 Base2 = getUnderlyingObject(Base2); 467 468 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2)) 469 return false; 470 471 return Base1 == Base2; 472 } 473 474 bool SIInstrInfo::shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1, 475 ArrayRef<const MachineOperand *> BaseOps2, 476 unsigned NumLoads, 477 unsigned NumBytes) const { 478 // If the mem ops (to be clustered) do not have the same base ptr, then they 479 // should not be clustered 480 if (!BaseOps1.empty() && !BaseOps2.empty()) { 481 const MachineInstr &FirstLdSt = *BaseOps1.front()->getParent(); 482 const MachineInstr &SecondLdSt = *BaseOps2.front()->getParent(); 483 if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOps1, SecondLdSt, BaseOps2)) 484 return false; 485 } else if (!BaseOps1.empty() || !BaseOps2.empty()) { 486 // If only one base op is empty, they do not have the same base ptr 487 return false; 488 } 489 490 // In order to avoid register pressure, on an average, the number of DWORDS 491 // loaded together by all clustered mem ops should not exceed 8. This is an 492 // empirical value based on certain observations and performance related 493 // experiments. 494 // The good thing about this heuristic is - it avoids clustering of too many 495 // sub-word loads, and also avoids clustering of wide loads. Below is the 496 // brief summary of how the heuristic behaves for various `LoadSize`. 497 // (1) 1 <= LoadSize <= 4: cluster at max 8 mem ops 498 // (2) 5 <= LoadSize <= 8: cluster at max 4 mem ops 499 // (3) 9 <= LoadSize <= 12: cluster at max 2 mem ops 500 // (4) 13 <= LoadSize <= 16: cluster at max 2 mem ops 501 // (5) LoadSize >= 17: do not cluster 502 const unsigned LoadSize = NumBytes / NumLoads; 503 const unsigned NumDWORDs = ((LoadSize + 3) / 4) * NumLoads; 504 return NumDWORDs <= 8; 505 } 506 507 // FIXME: This behaves strangely. If, for example, you have 32 load + stores, 508 // the first 16 loads will be interleaved with the stores, and the next 16 will 509 // be clustered as expected. It should really split into 2 16 store batches. 510 // 511 // Loads are clustered until this returns false, rather than trying to schedule 512 // groups of stores. This also means we have to deal with saying different 513 // address space loads should be clustered, and ones which might cause bank 514 // conflicts. 515 // 516 // This might be deprecated so it might not be worth that much effort to fix. 517 bool SIInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, 518 int64_t Offset0, int64_t Offset1, 519 unsigned NumLoads) const { 520 assert(Offset1 > Offset0 && 521 "Second offset should be larger than first offset!"); 522 // If we have less than 16 loads in a row, and the offsets are within 64 523 // bytes, then schedule together. 524 525 // A cacheline is 64 bytes (for global memory). 526 return (NumLoads <= 16 && (Offset1 - Offset0) < 64); 527 } 528 529 static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB, 530 MachineBasicBlock::iterator MI, 531 const DebugLoc &DL, MCRegister DestReg, 532 MCRegister SrcReg, bool KillSrc, 533 const char *Msg = "illegal SGPR to VGPR copy") { 534 MachineFunction *MF = MBB.getParent(); 535 DiagnosticInfoUnsupported IllegalCopy(MF->getFunction(), Msg, DL, DS_Error); 536 LLVMContext &C = MF->getFunction().getContext(); 537 C.diagnose(IllegalCopy); 538 539 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg) 540 .addReg(SrcReg, getKillRegState(KillSrc)); 541 } 542 543 /// Handle copying from SGPR to AGPR, or from AGPR to AGPR on GFX908. It is not 544 /// possible to have a direct copy in these cases on GFX908, so an intermediate 545 /// VGPR copy is required. 546 static void indirectCopyToAGPR(const SIInstrInfo &TII, 547 MachineBasicBlock &MBB, 548 MachineBasicBlock::iterator MI, 549 const DebugLoc &DL, MCRegister DestReg, 550 MCRegister SrcReg, bool KillSrc, 551 RegScavenger &RS, 552 Register ImpDefSuperReg = Register(), 553 Register ImpUseSuperReg = Register()) { 554 assert((TII.getSubtarget().hasMAIInsts() && 555 !TII.getSubtarget().hasGFX90AInsts()) && 556 "Expected GFX908 subtarget."); 557 558 assert((AMDGPU::SReg_32RegClass.contains(SrcReg) || 559 AMDGPU::AGPR_32RegClass.contains(SrcReg)) && 560 "Source register of the copy should be either an SGPR or an AGPR."); 561 562 assert(AMDGPU::AGPR_32RegClass.contains(DestReg) && 563 "Destination register of the copy should be an AGPR."); 564 565 const SIRegisterInfo &RI = TII.getRegisterInfo(); 566 567 // First try to find defining accvgpr_write to avoid temporary registers. 568 for (auto Def = MI, E = MBB.begin(); Def != E; ) { 569 --Def; 570 if (!Def->definesRegister(SrcReg, &RI)) 571 continue; 572 if (Def->getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64) 573 break; 574 575 MachineOperand &DefOp = Def->getOperand(1); 576 assert(DefOp.isReg() || DefOp.isImm()); 577 578 if (DefOp.isReg()) { 579 // Check that register source operand if not clobbered before MI. 580 // Immediate operands are always safe to propagate. 581 bool SafeToPropagate = true; 582 for (auto I = Def; I != MI && SafeToPropagate; ++I) 583 if (I->modifiesRegister(DefOp.getReg(), &RI)) 584 SafeToPropagate = false; 585 586 if (!SafeToPropagate) 587 break; 588 589 DefOp.setIsKill(false); 590 } 591 592 MachineInstrBuilder Builder = 593 BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg) 594 .add(DefOp); 595 if (ImpDefSuperReg) 596 Builder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit); 597 598 if (ImpUseSuperReg) { 599 Builder.addReg(ImpUseSuperReg, 600 getKillRegState(KillSrc) | RegState::Implicit); 601 } 602 603 return; 604 } 605 606 RS.enterBasicBlock(MBB); 607 RS.forward(MI); 608 609 // Ideally we want to have three registers for a long reg_sequence copy 610 // to hide 2 waitstates between v_mov_b32 and accvgpr_write. 611 unsigned MaxVGPRs = RI.getRegPressureLimit(&AMDGPU::VGPR_32RegClass, 612 *MBB.getParent()); 613 614 // Registers in the sequence are allocated contiguously so we can just 615 // use register number to pick one of three round-robin temps. 616 unsigned RegNo = DestReg % 3; 617 Register Tmp = AMDGPU::VGPR32; 618 assert(MBB.getParent()->getRegInfo().isReserved(Tmp) && 619 "VGPR used for an intermediate copy should have been reserved."); 620 621 // Only loop through if there are any free registers left, otherwise 622 // scavenger may report a fatal error without emergency spill slot 623 // or spill with the slot. 624 while (RegNo-- && RS.FindUnusedReg(&AMDGPU::VGPR_32RegClass)) { 625 Register Tmp2 = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0); 626 if (!Tmp2 || RI.getHWRegIndex(Tmp2) >= MaxVGPRs) 627 break; 628 Tmp = Tmp2; 629 RS.setRegUsed(Tmp); 630 } 631 632 // Insert copy to temporary VGPR. 633 unsigned TmpCopyOp = AMDGPU::V_MOV_B32_e32; 634 if (AMDGPU::AGPR_32RegClass.contains(SrcReg)) { 635 TmpCopyOp = AMDGPU::V_ACCVGPR_READ_B32_e64; 636 } else { 637 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 638 } 639 640 MachineInstrBuilder UseBuilder = BuildMI(MBB, MI, DL, TII.get(TmpCopyOp), Tmp) 641 .addReg(SrcReg, getKillRegState(KillSrc)); 642 if (ImpUseSuperReg) { 643 UseBuilder.addReg(ImpUseSuperReg, 644 getKillRegState(KillSrc) | RegState::Implicit); 645 } 646 647 MachineInstrBuilder DefBuilder 648 = BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg) 649 .addReg(Tmp, RegState::Kill); 650 651 if (ImpDefSuperReg) 652 DefBuilder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit); 653 } 654 655 static void expandSGPRCopy(const SIInstrInfo &TII, MachineBasicBlock &MBB, 656 MachineBasicBlock::iterator MI, const DebugLoc &DL, 657 MCRegister DestReg, MCRegister SrcReg, bool KillSrc, 658 const TargetRegisterClass *RC, bool Forward) { 659 const SIRegisterInfo &RI = TII.getRegisterInfo(); 660 ArrayRef<int16_t> BaseIndices = RI.getRegSplitParts(RC, 4); 661 MachineBasicBlock::iterator I = MI; 662 MachineInstr *FirstMI = nullptr, *LastMI = nullptr; 663 664 for (unsigned Idx = 0; Idx < BaseIndices.size(); ++Idx) { 665 int16_t SubIdx = BaseIndices[Idx]; 666 Register Reg = RI.getSubReg(DestReg, SubIdx); 667 unsigned Opcode = AMDGPU::S_MOV_B32; 668 669 // Is SGPR aligned? If so try to combine with next. 670 Register Src = RI.getSubReg(SrcReg, SubIdx); 671 bool AlignedDest = ((Reg - AMDGPU::SGPR0) % 2) == 0; 672 bool AlignedSrc = ((Src - AMDGPU::SGPR0) % 2) == 0; 673 if (AlignedDest && AlignedSrc && (Idx + 1 < BaseIndices.size())) { 674 // Can use SGPR64 copy 675 unsigned Channel = RI.getChannelFromSubReg(SubIdx); 676 SubIdx = RI.getSubRegFromChannel(Channel, 2); 677 Opcode = AMDGPU::S_MOV_B64; 678 Idx++; 679 } 680 681 LastMI = BuildMI(MBB, I, DL, TII.get(Opcode), RI.getSubReg(DestReg, SubIdx)) 682 .addReg(RI.getSubReg(SrcReg, SubIdx)) 683 .addReg(SrcReg, RegState::Implicit); 684 685 if (!FirstMI) 686 FirstMI = LastMI; 687 688 if (!Forward) 689 I--; 690 } 691 692 assert(FirstMI && LastMI); 693 if (!Forward) 694 std::swap(FirstMI, LastMI); 695 696 FirstMI->addOperand( 697 MachineOperand::CreateReg(DestReg, true /*IsDef*/, true /*IsImp*/)); 698 699 if (KillSrc) 700 LastMI->addRegisterKilled(SrcReg, &RI); 701 } 702 703 void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 704 MachineBasicBlock::iterator MI, 705 const DebugLoc &DL, MCRegister DestReg, 706 MCRegister SrcReg, bool KillSrc) const { 707 const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg); 708 709 // FIXME: This is hack to resolve copies between 16 bit and 32 bit 710 // registers until all patterns are fixed. 711 if (Fix16BitCopies && 712 ((RI.getRegSizeInBits(*RC) == 16) ^ 713 (RI.getRegSizeInBits(*RI.getPhysRegClass(SrcReg)) == 16))) { 714 MCRegister &RegToFix = (RI.getRegSizeInBits(*RC) == 16) ? DestReg : SrcReg; 715 MCRegister Super = RI.get32BitRegister(RegToFix); 716 assert(RI.getSubReg(Super, AMDGPU::lo16) == RegToFix); 717 RegToFix = Super; 718 719 if (DestReg == SrcReg) { 720 // Insert empty bundle since ExpandPostRA expects an instruction here. 721 BuildMI(MBB, MI, DL, get(AMDGPU::BUNDLE)); 722 return; 723 } 724 725 RC = RI.getPhysRegClass(DestReg); 726 } 727 728 if (RC == &AMDGPU::VGPR_32RegClass) { 729 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || 730 AMDGPU::SReg_32RegClass.contains(SrcReg) || 731 AMDGPU::AGPR_32RegClass.contains(SrcReg)); 732 unsigned Opc = AMDGPU::AGPR_32RegClass.contains(SrcReg) ? 733 AMDGPU::V_ACCVGPR_READ_B32_e64 : AMDGPU::V_MOV_B32_e32; 734 BuildMI(MBB, MI, DL, get(Opc), DestReg) 735 .addReg(SrcReg, getKillRegState(KillSrc)); 736 return; 737 } 738 739 if (RC == &AMDGPU::SReg_32_XM0RegClass || 740 RC == &AMDGPU::SReg_32RegClass) { 741 if (SrcReg == AMDGPU::SCC) { 742 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg) 743 .addImm(1) 744 .addImm(0); 745 return; 746 } 747 748 if (DestReg == AMDGPU::VCC_LO) { 749 if (AMDGPU::SReg_32RegClass.contains(SrcReg)) { 750 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), AMDGPU::VCC_LO) 751 .addReg(SrcReg, getKillRegState(KillSrc)); 752 } else { 753 // FIXME: Hack until VReg_1 removed. 754 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 755 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 756 .addImm(0) 757 .addReg(SrcReg, getKillRegState(KillSrc)); 758 } 759 760 return; 761 } 762 763 if (!AMDGPU::SReg_32RegClass.contains(SrcReg)) { 764 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 765 return; 766 } 767 768 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 769 .addReg(SrcReg, getKillRegState(KillSrc)); 770 return; 771 } 772 773 if (RC == &AMDGPU::SReg_64RegClass) { 774 if (SrcReg == AMDGPU::SCC) { 775 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B64), DestReg) 776 .addImm(1) 777 .addImm(0); 778 return; 779 } 780 781 if (DestReg == AMDGPU::VCC) { 782 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 783 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC) 784 .addReg(SrcReg, getKillRegState(KillSrc)); 785 } else { 786 // FIXME: Hack until VReg_1 removed. 787 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 788 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 789 .addImm(0) 790 .addReg(SrcReg, getKillRegState(KillSrc)); 791 } 792 793 return; 794 } 795 796 if (!AMDGPU::SReg_64RegClass.contains(SrcReg)) { 797 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 798 return; 799 } 800 801 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 802 .addReg(SrcReg, getKillRegState(KillSrc)); 803 return; 804 } 805 806 if (DestReg == AMDGPU::SCC) { 807 // Copying 64-bit or 32-bit sources to SCC barely makes sense, 808 // but SelectionDAG emits such copies for i1 sources. 809 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 810 // This copy can only be produced by patterns 811 // with explicit SCC, which are known to be enabled 812 // only for subtargets with S_CMP_LG_U64 present. 813 assert(ST.hasScalarCompareEq64()); 814 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U64)) 815 .addReg(SrcReg, getKillRegState(KillSrc)) 816 .addImm(0); 817 } else { 818 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 819 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32)) 820 .addReg(SrcReg, getKillRegState(KillSrc)) 821 .addImm(0); 822 } 823 824 return; 825 } 826 827 if (RC == &AMDGPU::AGPR_32RegClass) { 828 if (AMDGPU::VGPR_32RegClass.contains(SrcReg) || 829 (ST.hasGFX90AInsts() && AMDGPU::SReg_32RegClass.contains(SrcReg))) { 830 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg) 831 .addReg(SrcReg, getKillRegState(KillSrc)); 832 return; 833 } 834 835 if (AMDGPU::AGPR_32RegClass.contains(SrcReg) && ST.hasGFX90AInsts()) { 836 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_MOV_B32), DestReg) 837 .addReg(SrcReg, getKillRegState(KillSrc)); 838 return; 839 } 840 841 // FIXME: Pass should maintain scavenger to avoid scan through the block on 842 // every AGPR spill. 843 RegScavenger RS; 844 indirectCopyToAGPR(*this, MBB, MI, DL, DestReg, SrcReg, KillSrc, RS); 845 return; 846 } 847 848 const unsigned Size = RI.getRegSizeInBits(*RC); 849 if (Size == 16) { 850 assert(AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || 851 AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || 852 AMDGPU::SReg_LO16RegClass.contains(SrcReg) || 853 AMDGPU::AGPR_LO16RegClass.contains(SrcReg)); 854 855 bool IsSGPRDst = AMDGPU::SReg_LO16RegClass.contains(DestReg); 856 bool IsSGPRSrc = AMDGPU::SReg_LO16RegClass.contains(SrcReg); 857 bool IsAGPRDst = AMDGPU::AGPR_LO16RegClass.contains(DestReg); 858 bool IsAGPRSrc = AMDGPU::AGPR_LO16RegClass.contains(SrcReg); 859 bool DstLow = AMDGPU::VGPR_LO16RegClass.contains(DestReg) || 860 AMDGPU::SReg_LO16RegClass.contains(DestReg) || 861 AMDGPU::AGPR_LO16RegClass.contains(DestReg); 862 bool SrcLow = AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || 863 AMDGPU::SReg_LO16RegClass.contains(SrcReg) || 864 AMDGPU::AGPR_LO16RegClass.contains(SrcReg); 865 MCRegister NewDestReg = RI.get32BitRegister(DestReg); 866 MCRegister NewSrcReg = RI.get32BitRegister(SrcReg); 867 868 if (IsSGPRDst) { 869 if (!IsSGPRSrc) { 870 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 871 return; 872 } 873 874 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), NewDestReg) 875 .addReg(NewSrcReg, getKillRegState(KillSrc)); 876 return; 877 } 878 879 if (IsAGPRDst || IsAGPRSrc) { 880 if (!DstLow || !SrcLow) { 881 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc, 882 "Cannot use hi16 subreg with an AGPR!"); 883 } 884 885 copyPhysReg(MBB, MI, DL, NewDestReg, NewSrcReg, KillSrc); 886 return; 887 } 888 889 if (IsSGPRSrc && !ST.hasSDWAScalar()) { 890 if (!DstLow || !SrcLow) { 891 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc, 892 "Cannot use hi16 subreg on VI!"); 893 } 894 895 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), NewDestReg) 896 .addReg(NewSrcReg, getKillRegState(KillSrc)); 897 return; 898 } 899 900 auto MIB = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_sdwa), NewDestReg) 901 .addImm(0) // src0_modifiers 902 .addReg(NewSrcReg) 903 .addImm(0) // clamp 904 .addImm(DstLow ? AMDGPU::SDWA::SdwaSel::WORD_0 905 : AMDGPU::SDWA::SdwaSel::WORD_1) 906 .addImm(AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE) 907 .addImm(SrcLow ? AMDGPU::SDWA::SdwaSel::WORD_0 908 : AMDGPU::SDWA::SdwaSel::WORD_1) 909 .addReg(NewDestReg, RegState::Implicit | RegState::Undef); 910 // First implicit operand is $exec. 911 MIB->tieOperands(0, MIB->getNumOperands() - 1); 912 return; 913 } 914 915 const TargetRegisterClass *SrcRC = RI.getPhysRegClass(SrcReg); 916 if (RC == RI.getVGPR64Class() && (SrcRC == RC || RI.isSGPRClass(SrcRC))) { 917 if (ST.hasMovB64()) { 918 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_e32), DestReg) 919 .addReg(SrcReg, getKillRegState(KillSrc)); 920 return; 921 } 922 if (ST.hasPackedFP32Ops()) { 923 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), DestReg) 924 .addImm(SISrcMods::OP_SEL_1) 925 .addReg(SrcReg) 926 .addImm(SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1) 927 .addReg(SrcReg) 928 .addImm(0) // op_sel_lo 929 .addImm(0) // op_sel_hi 930 .addImm(0) // neg_lo 931 .addImm(0) // neg_hi 932 .addImm(0) // clamp 933 .addReg(SrcReg, getKillRegState(KillSrc) | RegState::Implicit); 934 return; 935 } 936 } 937 938 const bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg); 939 if (RI.isSGPRClass(RC)) { 940 if (!RI.isSGPRClass(SrcRC)) { 941 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 942 return; 943 } 944 const bool CanKillSuperReg = KillSrc && !RI.regsOverlap(SrcReg, DestReg); 945 expandSGPRCopy(*this, MBB, MI, DL, DestReg, SrcReg, CanKillSuperReg, RC, 946 Forward); 947 return; 948 } 949 950 unsigned EltSize = 4; 951 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 952 if (RI.isAGPRClass(RC)) { 953 if (ST.hasGFX90AInsts() && RI.isAGPRClass(SrcRC)) 954 Opcode = AMDGPU::V_ACCVGPR_MOV_B32; 955 else if (RI.hasVGPRs(SrcRC) || 956 (ST.hasGFX90AInsts() && RI.isSGPRClass(SrcRC))) 957 Opcode = AMDGPU::V_ACCVGPR_WRITE_B32_e64; 958 else 959 Opcode = AMDGPU::INSTRUCTION_LIST_END; 960 } else if (RI.hasVGPRs(RC) && RI.isAGPRClass(SrcRC)) { 961 Opcode = AMDGPU::V_ACCVGPR_READ_B32_e64; 962 } else if ((Size % 64 == 0) && RI.hasVGPRs(RC) && 963 (RI.isProperlyAlignedRC(*RC) && 964 (SrcRC == RC || RI.isSGPRClass(SrcRC)))) { 965 // TODO: In 96-bit case, could do a 64-bit mov and then a 32-bit mov. 966 if (ST.hasMovB64()) { 967 Opcode = AMDGPU::V_MOV_B64_e32; 968 EltSize = 8; 969 } else if (ST.hasPackedFP32Ops()) { 970 Opcode = AMDGPU::V_PK_MOV_B32; 971 EltSize = 8; 972 } 973 } 974 975 // For the cases where we need an intermediate instruction/temporary register 976 // (destination is an AGPR), we need a scavenger. 977 // 978 // FIXME: The pass should maintain this for us so we don't have to re-scan the 979 // whole block for every handled copy. 980 std::unique_ptr<RegScavenger> RS; 981 if (Opcode == AMDGPU::INSTRUCTION_LIST_END) 982 RS.reset(new RegScavenger()); 983 984 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize); 985 986 // If there is an overlap, we can't kill the super-register on the last 987 // instruction, since it will also kill the components made live by this def. 988 const bool CanKillSuperReg = KillSrc && !RI.regsOverlap(SrcReg, DestReg); 989 990 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 991 unsigned SubIdx; 992 if (Forward) 993 SubIdx = SubIndices[Idx]; 994 else 995 SubIdx = SubIndices[SubIndices.size() - Idx - 1]; 996 997 bool UseKill = CanKillSuperReg && Idx == SubIndices.size() - 1; 998 999 if (Opcode == AMDGPU::INSTRUCTION_LIST_END) { 1000 Register ImpDefSuper = Idx == 0 ? Register(DestReg) : Register(); 1001 Register ImpUseSuper = SrcReg; 1002 indirectCopyToAGPR(*this, MBB, MI, DL, RI.getSubReg(DestReg, SubIdx), 1003 RI.getSubReg(SrcReg, SubIdx), UseKill, *RS, 1004 ImpDefSuper, ImpUseSuper); 1005 } else if (Opcode == AMDGPU::V_PK_MOV_B32) { 1006 Register DstSubReg = RI.getSubReg(DestReg, SubIdx); 1007 Register SrcSubReg = RI.getSubReg(SrcReg, SubIdx); 1008 MachineInstrBuilder MIB = 1009 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), DstSubReg) 1010 .addImm(SISrcMods::OP_SEL_1) 1011 .addReg(SrcSubReg) 1012 .addImm(SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1) 1013 .addReg(SrcSubReg) 1014 .addImm(0) // op_sel_lo 1015 .addImm(0) // op_sel_hi 1016 .addImm(0) // neg_lo 1017 .addImm(0) // neg_hi 1018 .addImm(0) // clamp 1019 .addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit); 1020 if (Idx == 0) 1021 MIB.addReg(DestReg, RegState::Define | RegState::Implicit); 1022 } else { 1023 MachineInstrBuilder Builder = 1024 BuildMI(MBB, MI, DL, get(Opcode), RI.getSubReg(DestReg, SubIdx)) 1025 .addReg(RI.getSubReg(SrcReg, SubIdx)); 1026 if (Idx == 0) 1027 Builder.addReg(DestReg, RegState::Define | RegState::Implicit); 1028 1029 Builder.addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit); 1030 } 1031 } 1032 } 1033 1034 int SIInstrInfo::commuteOpcode(unsigned Opcode) const { 1035 int NewOpc; 1036 1037 // Try to map original to commuted opcode 1038 NewOpc = AMDGPU::getCommuteRev(Opcode); 1039 if (NewOpc != -1) 1040 // Check if the commuted (REV) opcode exists on the target. 1041 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 1042 1043 // Try to map commuted to original opcode 1044 NewOpc = AMDGPU::getCommuteOrig(Opcode); 1045 if (NewOpc != -1) 1046 // Check if the original (non-REV) opcode exists on the target. 1047 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 1048 1049 return Opcode; 1050 } 1051 1052 void SIInstrInfo::materializeImmediate(MachineBasicBlock &MBB, 1053 MachineBasicBlock::iterator MI, 1054 const DebugLoc &DL, unsigned DestReg, 1055 int64_t Value) const { 1056 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 1057 const TargetRegisterClass *RegClass = MRI.getRegClass(DestReg); 1058 if (RegClass == &AMDGPU::SReg_32RegClass || 1059 RegClass == &AMDGPU::SGPR_32RegClass || 1060 RegClass == &AMDGPU::SReg_32_XM0RegClass || 1061 RegClass == &AMDGPU::SReg_32_XM0_XEXECRegClass) { 1062 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 1063 .addImm(Value); 1064 return; 1065 } 1066 1067 if (RegClass == &AMDGPU::SReg_64RegClass || 1068 RegClass == &AMDGPU::SGPR_64RegClass || 1069 RegClass == &AMDGPU::SReg_64_XEXECRegClass) { 1070 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 1071 .addImm(Value); 1072 return; 1073 } 1074 1075 if (RegClass == &AMDGPU::VGPR_32RegClass) { 1076 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) 1077 .addImm(Value); 1078 return; 1079 } 1080 if (RegClass->hasSuperClassEq(&AMDGPU::VReg_64RegClass)) { 1081 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), DestReg) 1082 .addImm(Value); 1083 return; 1084 } 1085 1086 unsigned EltSize = 4; 1087 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 1088 if (RI.isSGPRClass(RegClass)) { 1089 if (RI.getRegSizeInBits(*RegClass) > 32) { 1090 Opcode = AMDGPU::S_MOV_B64; 1091 EltSize = 8; 1092 } else { 1093 Opcode = AMDGPU::S_MOV_B32; 1094 EltSize = 4; 1095 } 1096 } 1097 1098 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RegClass, EltSize); 1099 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 1100 int64_t IdxValue = Idx == 0 ? Value : 0; 1101 1102 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 1103 get(Opcode), RI.getSubReg(DestReg, SubIndices[Idx])); 1104 Builder.addImm(IdxValue); 1105 } 1106 } 1107 1108 const TargetRegisterClass * 1109 SIInstrInfo::getPreferredSelectRegClass(unsigned Size) const { 1110 return &AMDGPU::VGPR_32RegClass; 1111 } 1112 1113 void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB, 1114 MachineBasicBlock::iterator I, 1115 const DebugLoc &DL, Register DstReg, 1116 ArrayRef<MachineOperand> Cond, 1117 Register TrueReg, 1118 Register FalseReg) const { 1119 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 1120 const TargetRegisterClass *BoolXExecRC = 1121 RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 1122 assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass && 1123 "Not a VGPR32 reg"); 1124 1125 if (Cond.size() == 1) { 1126 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1127 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1128 .add(Cond[0]); 1129 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1130 .addImm(0) 1131 .addReg(FalseReg) 1132 .addImm(0) 1133 .addReg(TrueReg) 1134 .addReg(SReg); 1135 } else if (Cond.size() == 2) { 1136 assert(Cond[0].isImm() && "Cond[0] is not an immediate"); 1137 switch (Cond[0].getImm()) { 1138 case SIInstrInfo::SCC_TRUE: { 1139 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1140 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1141 : AMDGPU::S_CSELECT_B64), SReg) 1142 .addImm(1) 1143 .addImm(0); 1144 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1145 .addImm(0) 1146 .addReg(FalseReg) 1147 .addImm(0) 1148 .addReg(TrueReg) 1149 .addReg(SReg); 1150 break; 1151 } 1152 case SIInstrInfo::SCC_FALSE: { 1153 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1154 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1155 : AMDGPU::S_CSELECT_B64), SReg) 1156 .addImm(0) 1157 .addImm(1); 1158 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1159 .addImm(0) 1160 .addReg(FalseReg) 1161 .addImm(0) 1162 .addReg(TrueReg) 1163 .addReg(SReg); 1164 break; 1165 } 1166 case SIInstrInfo::VCCNZ: { 1167 MachineOperand RegOp = Cond[1]; 1168 RegOp.setImplicit(false); 1169 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1170 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1171 .add(RegOp); 1172 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1173 .addImm(0) 1174 .addReg(FalseReg) 1175 .addImm(0) 1176 .addReg(TrueReg) 1177 .addReg(SReg); 1178 break; 1179 } 1180 case SIInstrInfo::VCCZ: { 1181 MachineOperand RegOp = Cond[1]; 1182 RegOp.setImplicit(false); 1183 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1184 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1185 .add(RegOp); 1186 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1187 .addImm(0) 1188 .addReg(TrueReg) 1189 .addImm(0) 1190 .addReg(FalseReg) 1191 .addReg(SReg); 1192 break; 1193 } 1194 case SIInstrInfo::EXECNZ: { 1195 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1196 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 1197 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1198 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 1199 .addImm(0); 1200 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1201 : AMDGPU::S_CSELECT_B64), SReg) 1202 .addImm(1) 1203 .addImm(0); 1204 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1205 .addImm(0) 1206 .addReg(FalseReg) 1207 .addImm(0) 1208 .addReg(TrueReg) 1209 .addReg(SReg); 1210 break; 1211 } 1212 case SIInstrInfo::EXECZ: { 1213 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1214 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 1215 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1216 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 1217 .addImm(0); 1218 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1219 : AMDGPU::S_CSELECT_B64), SReg) 1220 .addImm(0) 1221 .addImm(1); 1222 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1223 .addImm(0) 1224 .addReg(FalseReg) 1225 .addImm(0) 1226 .addReg(TrueReg) 1227 .addReg(SReg); 1228 llvm_unreachable("Unhandled branch predicate EXECZ"); 1229 break; 1230 } 1231 default: 1232 llvm_unreachable("invalid branch predicate"); 1233 } 1234 } else { 1235 llvm_unreachable("Can only handle Cond size 1 or 2"); 1236 } 1237 } 1238 1239 Register SIInstrInfo::insertEQ(MachineBasicBlock *MBB, 1240 MachineBasicBlock::iterator I, 1241 const DebugLoc &DL, 1242 Register SrcReg, int Value) const { 1243 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1244 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 1245 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg) 1246 .addImm(Value) 1247 .addReg(SrcReg); 1248 1249 return Reg; 1250 } 1251 1252 Register SIInstrInfo::insertNE(MachineBasicBlock *MBB, 1253 MachineBasicBlock::iterator I, 1254 const DebugLoc &DL, 1255 Register SrcReg, int Value) const { 1256 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1257 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 1258 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg) 1259 .addImm(Value) 1260 .addReg(SrcReg); 1261 1262 return Reg; 1263 } 1264 1265 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const { 1266 1267 if (RI.isAGPRClass(DstRC)) 1268 return AMDGPU::COPY; 1269 if (RI.getRegSizeInBits(*DstRC) == 32) { 1270 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 1271 } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) { 1272 return AMDGPU::S_MOV_B64; 1273 } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) { 1274 return AMDGPU::V_MOV_B64_PSEUDO; 1275 } 1276 return AMDGPU::COPY; 1277 } 1278 1279 const MCInstrDesc & 1280 SIInstrInfo::getIndirectGPRIDXPseudo(unsigned VecSize, 1281 bool IsIndirectSrc) const { 1282 if (IsIndirectSrc) { 1283 if (VecSize <= 32) // 4 bytes 1284 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V1); 1285 if (VecSize <= 64) // 8 bytes 1286 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V2); 1287 if (VecSize <= 96) // 12 bytes 1288 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V3); 1289 if (VecSize <= 128) // 16 bytes 1290 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V4); 1291 if (VecSize <= 160) // 20 bytes 1292 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V5); 1293 if (VecSize <= 256) // 32 bytes 1294 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V8); 1295 if (VecSize <= 512) // 64 bytes 1296 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V16); 1297 if (VecSize <= 1024) // 128 bytes 1298 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V32); 1299 1300 llvm_unreachable("unsupported size for IndirectRegReadGPRIDX pseudos"); 1301 } 1302 1303 if (VecSize <= 32) // 4 bytes 1304 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V1); 1305 if (VecSize <= 64) // 8 bytes 1306 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V2); 1307 if (VecSize <= 96) // 12 bytes 1308 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V3); 1309 if (VecSize <= 128) // 16 bytes 1310 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V4); 1311 if (VecSize <= 160) // 20 bytes 1312 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V5); 1313 if (VecSize <= 256) // 32 bytes 1314 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V8); 1315 if (VecSize <= 512) // 64 bytes 1316 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V16); 1317 if (VecSize <= 1024) // 128 bytes 1318 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V32); 1319 1320 llvm_unreachable("unsupported size for IndirectRegWriteGPRIDX pseudos"); 1321 } 1322 1323 static unsigned getIndirectVGPRWriteMovRelPseudoOpc(unsigned VecSize) { 1324 if (VecSize <= 32) // 4 bytes 1325 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V1; 1326 if (VecSize <= 64) // 8 bytes 1327 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V2; 1328 if (VecSize <= 96) // 12 bytes 1329 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V3; 1330 if (VecSize <= 128) // 16 bytes 1331 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V4; 1332 if (VecSize <= 160) // 20 bytes 1333 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V5; 1334 if (VecSize <= 256) // 32 bytes 1335 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V8; 1336 if (VecSize <= 512) // 64 bytes 1337 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V16; 1338 if (VecSize <= 1024) // 128 bytes 1339 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V32; 1340 1341 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1342 } 1343 1344 static unsigned getIndirectSGPRWriteMovRelPseudo32(unsigned VecSize) { 1345 if (VecSize <= 32) // 4 bytes 1346 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V1; 1347 if (VecSize <= 64) // 8 bytes 1348 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V2; 1349 if (VecSize <= 96) // 12 bytes 1350 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V3; 1351 if (VecSize <= 128) // 16 bytes 1352 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V4; 1353 if (VecSize <= 160) // 20 bytes 1354 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V5; 1355 if (VecSize <= 256) // 32 bytes 1356 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V8; 1357 if (VecSize <= 512) // 64 bytes 1358 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V16; 1359 if (VecSize <= 1024) // 128 bytes 1360 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V32; 1361 1362 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1363 } 1364 1365 static unsigned getIndirectSGPRWriteMovRelPseudo64(unsigned VecSize) { 1366 if (VecSize <= 64) // 8 bytes 1367 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V1; 1368 if (VecSize <= 128) // 16 bytes 1369 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V2; 1370 if (VecSize <= 256) // 32 bytes 1371 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V4; 1372 if (VecSize <= 512) // 64 bytes 1373 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V8; 1374 if (VecSize <= 1024) // 128 bytes 1375 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V16; 1376 1377 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1378 } 1379 1380 const MCInstrDesc & 1381 SIInstrInfo::getIndirectRegWriteMovRelPseudo(unsigned VecSize, unsigned EltSize, 1382 bool IsSGPR) const { 1383 if (IsSGPR) { 1384 switch (EltSize) { 1385 case 32: 1386 return get(getIndirectSGPRWriteMovRelPseudo32(VecSize)); 1387 case 64: 1388 return get(getIndirectSGPRWriteMovRelPseudo64(VecSize)); 1389 default: 1390 llvm_unreachable("invalid reg indexing elt size"); 1391 } 1392 } 1393 1394 assert(EltSize == 32 && "invalid reg indexing elt size"); 1395 return get(getIndirectVGPRWriteMovRelPseudoOpc(VecSize)); 1396 } 1397 1398 static unsigned getSGPRSpillSaveOpcode(unsigned Size) { 1399 switch (Size) { 1400 case 4: 1401 return AMDGPU::SI_SPILL_S32_SAVE; 1402 case 8: 1403 return AMDGPU::SI_SPILL_S64_SAVE; 1404 case 12: 1405 return AMDGPU::SI_SPILL_S96_SAVE; 1406 case 16: 1407 return AMDGPU::SI_SPILL_S128_SAVE; 1408 case 20: 1409 return AMDGPU::SI_SPILL_S160_SAVE; 1410 case 24: 1411 return AMDGPU::SI_SPILL_S192_SAVE; 1412 case 28: 1413 return AMDGPU::SI_SPILL_S224_SAVE; 1414 case 32: 1415 return AMDGPU::SI_SPILL_S256_SAVE; 1416 case 64: 1417 return AMDGPU::SI_SPILL_S512_SAVE; 1418 case 128: 1419 return AMDGPU::SI_SPILL_S1024_SAVE; 1420 default: 1421 llvm_unreachable("unknown register size"); 1422 } 1423 } 1424 1425 static unsigned getVGPRSpillSaveOpcode(unsigned Size) { 1426 switch (Size) { 1427 case 4: 1428 return AMDGPU::SI_SPILL_V32_SAVE; 1429 case 8: 1430 return AMDGPU::SI_SPILL_V64_SAVE; 1431 case 12: 1432 return AMDGPU::SI_SPILL_V96_SAVE; 1433 case 16: 1434 return AMDGPU::SI_SPILL_V128_SAVE; 1435 case 20: 1436 return AMDGPU::SI_SPILL_V160_SAVE; 1437 case 24: 1438 return AMDGPU::SI_SPILL_V192_SAVE; 1439 case 28: 1440 return AMDGPU::SI_SPILL_V224_SAVE; 1441 case 32: 1442 return AMDGPU::SI_SPILL_V256_SAVE; 1443 case 64: 1444 return AMDGPU::SI_SPILL_V512_SAVE; 1445 case 128: 1446 return AMDGPU::SI_SPILL_V1024_SAVE; 1447 default: 1448 llvm_unreachable("unknown register size"); 1449 } 1450 } 1451 1452 static unsigned getAGPRSpillSaveOpcode(unsigned Size) { 1453 switch (Size) { 1454 case 4: 1455 return AMDGPU::SI_SPILL_A32_SAVE; 1456 case 8: 1457 return AMDGPU::SI_SPILL_A64_SAVE; 1458 case 12: 1459 return AMDGPU::SI_SPILL_A96_SAVE; 1460 case 16: 1461 return AMDGPU::SI_SPILL_A128_SAVE; 1462 case 20: 1463 return AMDGPU::SI_SPILL_A160_SAVE; 1464 case 24: 1465 return AMDGPU::SI_SPILL_A192_SAVE; 1466 case 28: 1467 return AMDGPU::SI_SPILL_A224_SAVE; 1468 case 32: 1469 return AMDGPU::SI_SPILL_A256_SAVE; 1470 case 64: 1471 return AMDGPU::SI_SPILL_A512_SAVE; 1472 case 128: 1473 return AMDGPU::SI_SPILL_A1024_SAVE; 1474 default: 1475 llvm_unreachable("unknown register size"); 1476 } 1477 } 1478 1479 static unsigned getAVSpillSaveOpcode(unsigned Size) { 1480 switch (Size) { 1481 case 4: 1482 return AMDGPU::SI_SPILL_AV32_SAVE; 1483 case 8: 1484 return AMDGPU::SI_SPILL_AV64_SAVE; 1485 case 12: 1486 return AMDGPU::SI_SPILL_AV96_SAVE; 1487 case 16: 1488 return AMDGPU::SI_SPILL_AV128_SAVE; 1489 case 20: 1490 return AMDGPU::SI_SPILL_AV160_SAVE; 1491 case 24: 1492 return AMDGPU::SI_SPILL_AV192_SAVE; 1493 case 28: 1494 return AMDGPU::SI_SPILL_AV224_SAVE; 1495 case 32: 1496 return AMDGPU::SI_SPILL_AV256_SAVE; 1497 case 64: 1498 return AMDGPU::SI_SPILL_AV512_SAVE; 1499 case 128: 1500 return AMDGPU::SI_SPILL_AV1024_SAVE; 1501 default: 1502 llvm_unreachable("unknown register size"); 1503 } 1504 } 1505 1506 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 1507 MachineBasicBlock::iterator MI, 1508 Register SrcReg, bool isKill, 1509 int FrameIndex, 1510 const TargetRegisterClass *RC, 1511 const TargetRegisterInfo *TRI) const { 1512 MachineFunction *MF = MBB.getParent(); 1513 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1514 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 1515 const DebugLoc &DL = MBB.findDebugLoc(MI); 1516 1517 MachinePointerInfo PtrInfo 1518 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 1519 MachineMemOperand *MMO = MF->getMachineMemOperand( 1520 PtrInfo, MachineMemOperand::MOStore, FrameInfo.getObjectSize(FrameIndex), 1521 FrameInfo.getObjectAlign(FrameIndex)); 1522 unsigned SpillSize = TRI->getSpillSize(*RC); 1523 1524 MachineRegisterInfo &MRI = MF->getRegInfo(); 1525 if (RI.isSGPRClass(RC)) { 1526 MFI->setHasSpilledSGPRs(); 1527 assert(SrcReg != AMDGPU::M0 && "m0 should not be spilled"); 1528 assert(SrcReg != AMDGPU::EXEC_LO && SrcReg != AMDGPU::EXEC_HI && 1529 SrcReg != AMDGPU::EXEC && "exec should not be spilled"); 1530 1531 // We are only allowed to create one new instruction when spilling 1532 // registers, so we need to use pseudo instruction for spilling SGPRs. 1533 const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(SpillSize)); 1534 1535 // The SGPR spill/restore instructions only work on number sgprs, so we need 1536 // to make sure we are using the correct register class. 1537 if (SrcReg.isVirtual() && SpillSize == 4) { 1538 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 1539 } 1540 1541 BuildMI(MBB, MI, DL, OpDesc) 1542 .addReg(SrcReg, getKillRegState(isKill)) // data 1543 .addFrameIndex(FrameIndex) // addr 1544 .addMemOperand(MMO) 1545 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); 1546 1547 if (RI.spillSGPRToVGPR()) 1548 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill); 1549 return; 1550 } 1551 1552 unsigned Opcode = RI.isVectorSuperClass(RC) ? getAVSpillSaveOpcode(SpillSize) 1553 : RI.isAGPRClass(RC) ? getAGPRSpillSaveOpcode(SpillSize) 1554 : getVGPRSpillSaveOpcode(SpillSize); 1555 MFI->setHasSpilledVGPRs(); 1556 1557 BuildMI(MBB, MI, DL, get(Opcode)) 1558 .addReg(SrcReg, getKillRegState(isKill)) // data 1559 .addFrameIndex(FrameIndex) // addr 1560 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset 1561 .addImm(0) // offset 1562 .addMemOperand(MMO); 1563 } 1564 1565 static unsigned getSGPRSpillRestoreOpcode(unsigned Size) { 1566 switch (Size) { 1567 case 4: 1568 return AMDGPU::SI_SPILL_S32_RESTORE; 1569 case 8: 1570 return AMDGPU::SI_SPILL_S64_RESTORE; 1571 case 12: 1572 return AMDGPU::SI_SPILL_S96_RESTORE; 1573 case 16: 1574 return AMDGPU::SI_SPILL_S128_RESTORE; 1575 case 20: 1576 return AMDGPU::SI_SPILL_S160_RESTORE; 1577 case 24: 1578 return AMDGPU::SI_SPILL_S192_RESTORE; 1579 case 28: 1580 return AMDGPU::SI_SPILL_S224_RESTORE; 1581 case 32: 1582 return AMDGPU::SI_SPILL_S256_RESTORE; 1583 case 64: 1584 return AMDGPU::SI_SPILL_S512_RESTORE; 1585 case 128: 1586 return AMDGPU::SI_SPILL_S1024_RESTORE; 1587 default: 1588 llvm_unreachable("unknown register size"); 1589 } 1590 } 1591 1592 static unsigned getVGPRSpillRestoreOpcode(unsigned Size) { 1593 switch (Size) { 1594 case 4: 1595 return AMDGPU::SI_SPILL_V32_RESTORE; 1596 case 8: 1597 return AMDGPU::SI_SPILL_V64_RESTORE; 1598 case 12: 1599 return AMDGPU::SI_SPILL_V96_RESTORE; 1600 case 16: 1601 return AMDGPU::SI_SPILL_V128_RESTORE; 1602 case 20: 1603 return AMDGPU::SI_SPILL_V160_RESTORE; 1604 case 24: 1605 return AMDGPU::SI_SPILL_V192_RESTORE; 1606 case 28: 1607 return AMDGPU::SI_SPILL_V224_RESTORE; 1608 case 32: 1609 return AMDGPU::SI_SPILL_V256_RESTORE; 1610 case 64: 1611 return AMDGPU::SI_SPILL_V512_RESTORE; 1612 case 128: 1613 return AMDGPU::SI_SPILL_V1024_RESTORE; 1614 default: 1615 llvm_unreachable("unknown register size"); 1616 } 1617 } 1618 1619 static unsigned getAGPRSpillRestoreOpcode(unsigned Size) { 1620 switch (Size) { 1621 case 4: 1622 return AMDGPU::SI_SPILL_A32_RESTORE; 1623 case 8: 1624 return AMDGPU::SI_SPILL_A64_RESTORE; 1625 case 12: 1626 return AMDGPU::SI_SPILL_A96_RESTORE; 1627 case 16: 1628 return AMDGPU::SI_SPILL_A128_RESTORE; 1629 case 20: 1630 return AMDGPU::SI_SPILL_A160_RESTORE; 1631 case 24: 1632 return AMDGPU::SI_SPILL_A192_RESTORE; 1633 case 28: 1634 return AMDGPU::SI_SPILL_A224_RESTORE; 1635 case 32: 1636 return AMDGPU::SI_SPILL_A256_RESTORE; 1637 case 64: 1638 return AMDGPU::SI_SPILL_A512_RESTORE; 1639 case 128: 1640 return AMDGPU::SI_SPILL_A1024_RESTORE; 1641 default: 1642 llvm_unreachable("unknown register size"); 1643 } 1644 } 1645 1646 static unsigned getAVSpillRestoreOpcode(unsigned Size) { 1647 switch (Size) { 1648 case 4: 1649 return AMDGPU::SI_SPILL_AV32_RESTORE; 1650 case 8: 1651 return AMDGPU::SI_SPILL_AV64_RESTORE; 1652 case 12: 1653 return AMDGPU::SI_SPILL_AV96_RESTORE; 1654 case 16: 1655 return AMDGPU::SI_SPILL_AV128_RESTORE; 1656 case 20: 1657 return AMDGPU::SI_SPILL_AV160_RESTORE; 1658 case 24: 1659 return AMDGPU::SI_SPILL_AV192_RESTORE; 1660 case 28: 1661 return AMDGPU::SI_SPILL_AV224_RESTORE; 1662 case 32: 1663 return AMDGPU::SI_SPILL_AV256_RESTORE; 1664 case 64: 1665 return AMDGPU::SI_SPILL_AV512_RESTORE; 1666 case 128: 1667 return AMDGPU::SI_SPILL_AV1024_RESTORE; 1668 default: 1669 llvm_unreachable("unknown register size"); 1670 } 1671 } 1672 1673 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 1674 MachineBasicBlock::iterator MI, 1675 Register DestReg, int FrameIndex, 1676 const TargetRegisterClass *RC, 1677 const TargetRegisterInfo *TRI) const { 1678 MachineFunction *MF = MBB.getParent(); 1679 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1680 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 1681 const DebugLoc &DL = MBB.findDebugLoc(MI); 1682 unsigned SpillSize = TRI->getSpillSize(*RC); 1683 1684 MachinePointerInfo PtrInfo 1685 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 1686 1687 MachineMemOperand *MMO = MF->getMachineMemOperand( 1688 PtrInfo, MachineMemOperand::MOLoad, FrameInfo.getObjectSize(FrameIndex), 1689 FrameInfo.getObjectAlign(FrameIndex)); 1690 1691 if (RI.isSGPRClass(RC)) { 1692 MFI->setHasSpilledSGPRs(); 1693 assert(DestReg != AMDGPU::M0 && "m0 should not be reloaded into"); 1694 assert(DestReg != AMDGPU::EXEC_LO && DestReg != AMDGPU::EXEC_HI && 1695 DestReg != AMDGPU::EXEC && "exec should not be spilled"); 1696 1697 // FIXME: Maybe this should not include a memoperand because it will be 1698 // lowered to non-memory instructions. 1699 const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize)); 1700 if (DestReg.isVirtual() && SpillSize == 4) { 1701 MachineRegisterInfo &MRI = MF->getRegInfo(); 1702 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 1703 } 1704 1705 if (RI.spillSGPRToVGPR()) 1706 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill); 1707 BuildMI(MBB, MI, DL, OpDesc, DestReg) 1708 .addFrameIndex(FrameIndex) // addr 1709 .addMemOperand(MMO) 1710 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); 1711 1712 return; 1713 } 1714 1715 unsigned Opcode = RI.isVectorSuperClass(RC) 1716 ? getAVSpillRestoreOpcode(SpillSize) 1717 : RI.isAGPRClass(RC) ? getAGPRSpillRestoreOpcode(SpillSize) 1718 : getVGPRSpillRestoreOpcode(SpillSize); 1719 BuildMI(MBB, MI, DL, get(Opcode), DestReg) 1720 .addFrameIndex(FrameIndex) // vaddr 1721 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset 1722 .addImm(0) // offset 1723 .addMemOperand(MMO); 1724 } 1725 1726 void SIInstrInfo::insertNoop(MachineBasicBlock &MBB, 1727 MachineBasicBlock::iterator MI) const { 1728 insertNoops(MBB, MI, 1); 1729 } 1730 1731 void SIInstrInfo::insertNoops(MachineBasicBlock &MBB, 1732 MachineBasicBlock::iterator MI, 1733 unsigned Quantity) const { 1734 DebugLoc DL = MBB.findDebugLoc(MI); 1735 while (Quantity > 0) { 1736 unsigned Arg = std::min(Quantity, 8u); 1737 Quantity -= Arg; 1738 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP)).addImm(Arg - 1); 1739 } 1740 } 1741 1742 void SIInstrInfo::insertReturn(MachineBasicBlock &MBB) const { 1743 auto MF = MBB.getParent(); 1744 SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 1745 1746 assert(Info->isEntryFunction()); 1747 1748 if (MBB.succ_empty()) { 1749 bool HasNoTerminator = MBB.getFirstTerminator() == MBB.end(); 1750 if (HasNoTerminator) { 1751 if (Info->returnsVoid()) { 1752 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::S_ENDPGM)).addImm(0); 1753 } else { 1754 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::SI_RETURN_TO_EPILOG)); 1755 } 1756 } 1757 } 1758 } 1759 1760 unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) { 1761 switch (MI.getOpcode()) { 1762 default: 1763 if (MI.isMetaInstruction()) 1764 return 0; 1765 return 1; // FIXME: Do wait states equal cycles? 1766 1767 case AMDGPU::S_NOP: 1768 return MI.getOperand(0).getImm() + 1; 1769 1770 // FIXME: Any other pseudo instruction? 1771 // SI_RETURN_TO_EPILOG is a fallthrough to code outside of the function. The 1772 // hazard, even if one exist, won't really be visible. Should we handle it? 1773 case AMDGPU::SI_MASKED_UNREACHABLE: 1774 case AMDGPU::WAVE_BARRIER: 1775 return 0; 1776 } 1777 } 1778 1779 bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 1780 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1781 MachineBasicBlock &MBB = *MI.getParent(); 1782 DebugLoc DL = MBB.findDebugLoc(MI); 1783 switch (MI.getOpcode()) { 1784 default: return TargetInstrInfo::expandPostRAPseudo(MI); 1785 case AMDGPU::S_MOV_B64_term: 1786 // This is only a terminator to get the correct spill code placement during 1787 // register allocation. 1788 MI.setDesc(get(AMDGPU::S_MOV_B64)); 1789 break; 1790 1791 case AMDGPU::S_MOV_B32_term: 1792 // This is only a terminator to get the correct spill code placement during 1793 // register allocation. 1794 MI.setDesc(get(AMDGPU::S_MOV_B32)); 1795 break; 1796 1797 case AMDGPU::S_XOR_B64_term: 1798 // This is only a terminator to get the correct spill code placement during 1799 // register allocation. 1800 MI.setDesc(get(AMDGPU::S_XOR_B64)); 1801 break; 1802 1803 case AMDGPU::S_XOR_B32_term: 1804 // This is only a terminator to get the correct spill code placement during 1805 // register allocation. 1806 MI.setDesc(get(AMDGPU::S_XOR_B32)); 1807 break; 1808 case AMDGPU::S_OR_B64_term: 1809 // This is only a terminator to get the correct spill code placement during 1810 // register allocation. 1811 MI.setDesc(get(AMDGPU::S_OR_B64)); 1812 break; 1813 case AMDGPU::S_OR_B32_term: 1814 // This is only a terminator to get the correct spill code placement during 1815 // register allocation. 1816 MI.setDesc(get(AMDGPU::S_OR_B32)); 1817 break; 1818 1819 case AMDGPU::S_ANDN2_B64_term: 1820 // This is only a terminator to get the correct spill code placement during 1821 // register allocation. 1822 MI.setDesc(get(AMDGPU::S_ANDN2_B64)); 1823 break; 1824 1825 case AMDGPU::S_ANDN2_B32_term: 1826 // This is only a terminator to get the correct spill code placement during 1827 // register allocation. 1828 MI.setDesc(get(AMDGPU::S_ANDN2_B32)); 1829 break; 1830 1831 case AMDGPU::S_AND_B64_term: 1832 // This is only a terminator to get the correct spill code placement during 1833 // register allocation. 1834 MI.setDesc(get(AMDGPU::S_AND_B64)); 1835 break; 1836 1837 case AMDGPU::S_AND_B32_term: 1838 // This is only a terminator to get the correct spill code placement during 1839 // register allocation. 1840 MI.setDesc(get(AMDGPU::S_AND_B32)); 1841 break; 1842 1843 case AMDGPU::V_MOV_B64_PSEUDO: { 1844 Register Dst = MI.getOperand(0).getReg(); 1845 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 1846 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 1847 1848 const MachineOperand &SrcOp = MI.getOperand(1); 1849 // FIXME: Will this work for 64-bit floating point immediates? 1850 assert(!SrcOp.isFPImm()); 1851 if (ST.hasMovB64()) { 1852 MI.setDesc(get(AMDGPU::V_MOV_B64_e32)); 1853 if (!isLiteralConstant(MI, 1) || isUInt<32>(SrcOp.getImm())) 1854 break; 1855 } 1856 if (SrcOp.isImm()) { 1857 APInt Imm(64, SrcOp.getImm()); 1858 APInt Lo(32, Imm.getLoBits(32).getZExtValue()); 1859 APInt Hi(32, Imm.getHiBits(32).getZExtValue()); 1860 if (ST.hasPackedFP32Ops() && Lo == Hi && isInlineConstant(Lo)) { 1861 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), Dst) 1862 .addImm(SISrcMods::OP_SEL_1) 1863 .addImm(Lo.getSExtValue()) 1864 .addImm(SISrcMods::OP_SEL_1) 1865 .addImm(Lo.getSExtValue()) 1866 .addImm(0) // op_sel_lo 1867 .addImm(0) // op_sel_hi 1868 .addImm(0) // neg_lo 1869 .addImm(0) // neg_hi 1870 .addImm(0); // clamp 1871 } else { 1872 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1873 .addImm(Lo.getSExtValue()) 1874 .addReg(Dst, RegState::Implicit | RegState::Define); 1875 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1876 .addImm(Hi.getSExtValue()) 1877 .addReg(Dst, RegState::Implicit | RegState::Define); 1878 } 1879 } else { 1880 assert(SrcOp.isReg()); 1881 if (ST.hasPackedFP32Ops() && 1882 !RI.isAGPR(MBB.getParent()->getRegInfo(), SrcOp.getReg())) { 1883 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), Dst) 1884 .addImm(SISrcMods::OP_SEL_1) // src0_mod 1885 .addReg(SrcOp.getReg()) 1886 .addImm(SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1) // src1_mod 1887 .addReg(SrcOp.getReg()) 1888 .addImm(0) // op_sel_lo 1889 .addImm(0) // op_sel_hi 1890 .addImm(0) // neg_lo 1891 .addImm(0) // neg_hi 1892 .addImm(0); // clamp 1893 } else { 1894 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1895 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) 1896 .addReg(Dst, RegState::Implicit | RegState::Define); 1897 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1898 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) 1899 .addReg(Dst, RegState::Implicit | RegState::Define); 1900 } 1901 } 1902 MI.eraseFromParent(); 1903 break; 1904 } 1905 case AMDGPU::V_MOV_B64_DPP_PSEUDO: { 1906 expandMovDPP64(MI); 1907 break; 1908 } 1909 case AMDGPU::S_MOV_B64_IMM_PSEUDO: { 1910 const MachineOperand &SrcOp = MI.getOperand(1); 1911 assert(!SrcOp.isFPImm()); 1912 APInt Imm(64, SrcOp.getImm()); 1913 if (Imm.isIntN(32) || isInlineConstant(Imm)) { 1914 MI.setDesc(get(AMDGPU::S_MOV_B64)); 1915 break; 1916 } 1917 1918 Register Dst = MI.getOperand(0).getReg(); 1919 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 1920 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 1921 1922 APInt Lo(32, Imm.getLoBits(32).getZExtValue()); 1923 APInt Hi(32, Imm.getHiBits(32).getZExtValue()); 1924 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DstLo) 1925 .addImm(Lo.getSExtValue()) 1926 .addReg(Dst, RegState::Implicit | RegState::Define); 1927 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DstHi) 1928 .addImm(Hi.getSExtValue()) 1929 .addReg(Dst, RegState::Implicit | RegState::Define); 1930 MI.eraseFromParent(); 1931 break; 1932 } 1933 case AMDGPU::V_SET_INACTIVE_B32: { 1934 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; 1935 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1936 // FIXME: We may possibly optimize the COPY once we find ways to make LLVM 1937 // optimizations (mainly Register Coalescer) aware of WWM register liveness. 1938 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), MI.getOperand(0).getReg()) 1939 .add(MI.getOperand(1)); 1940 auto FirstNot = BuildMI(MBB, MI, DL, get(NotOpc), Exec).addReg(Exec); 1941 FirstNot->addRegisterDead(AMDGPU::SCC, TRI); // SCC is overwritten 1942 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), MI.getOperand(0).getReg()) 1943 .add(MI.getOperand(2)); 1944 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1945 .addReg(Exec); 1946 MI.eraseFromParent(); 1947 break; 1948 } 1949 case AMDGPU::V_SET_INACTIVE_B64: { 1950 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; 1951 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1952 MachineInstr *Copy = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), 1953 MI.getOperand(0).getReg()) 1954 .add(MI.getOperand(1)); 1955 expandPostRAPseudo(*Copy); 1956 auto FirstNot = BuildMI(MBB, MI, DL, get(NotOpc), Exec).addReg(Exec); 1957 FirstNot->addRegisterDead(AMDGPU::SCC, TRI); // SCC is overwritten 1958 Copy = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), 1959 MI.getOperand(0).getReg()) 1960 .add(MI.getOperand(2)); 1961 expandPostRAPseudo(*Copy); 1962 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1963 .addReg(Exec); 1964 MI.eraseFromParent(); 1965 break; 1966 } 1967 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V1: 1968 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V2: 1969 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V3: 1970 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V4: 1971 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V5: 1972 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V8: 1973 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V16: 1974 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V32: 1975 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V1: 1976 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V2: 1977 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V3: 1978 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V4: 1979 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V5: 1980 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V8: 1981 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V16: 1982 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V32: 1983 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V1: 1984 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V2: 1985 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V4: 1986 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V8: 1987 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V16: { 1988 const TargetRegisterClass *EltRC = getOpRegClass(MI, 2); 1989 1990 unsigned Opc; 1991 if (RI.hasVGPRs(EltRC)) { 1992 Opc = AMDGPU::V_MOVRELD_B32_e32; 1993 } else { 1994 Opc = RI.getRegSizeInBits(*EltRC) == 64 ? AMDGPU::S_MOVRELD_B64 1995 : AMDGPU::S_MOVRELD_B32; 1996 } 1997 1998 const MCInstrDesc &OpDesc = get(Opc); 1999 Register VecReg = MI.getOperand(0).getReg(); 2000 bool IsUndef = MI.getOperand(1).isUndef(); 2001 unsigned SubReg = MI.getOperand(3).getImm(); 2002 assert(VecReg == MI.getOperand(1).getReg()); 2003 2004 MachineInstrBuilder MIB = 2005 BuildMI(MBB, MI, DL, OpDesc) 2006 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 2007 .add(MI.getOperand(2)) 2008 .addReg(VecReg, RegState::ImplicitDefine) 2009 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0)); 2010 2011 const int ImpDefIdx = 2012 OpDesc.getNumOperands() + OpDesc.getNumImplicitUses(); 2013 const int ImpUseIdx = ImpDefIdx + 1; 2014 MIB->tieOperands(ImpDefIdx, ImpUseIdx); 2015 MI.eraseFromParent(); 2016 break; 2017 } 2018 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V1: 2019 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V2: 2020 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V3: 2021 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V4: 2022 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V5: 2023 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V8: 2024 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V16: 2025 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V32: { 2026 assert(ST.useVGPRIndexMode()); 2027 Register VecReg = MI.getOperand(0).getReg(); 2028 bool IsUndef = MI.getOperand(1).isUndef(); 2029 Register Idx = MI.getOperand(3).getReg(); 2030 Register SubReg = MI.getOperand(4).getImm(); 2031 2032 MachineInstr *SetOn = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_ON)) 2033 .addReg(Idx) 2034 .addImm(AMDGPU::VGPRIndexMode::DST_ENABLE); 2035 SetOn->getOperand(3).setIsUndef(); 2036 2037 const MCInstrDesc &OpDesc = get(AMDGPU::V_MOV_B32_indirect_write); 2038 MachineInstrBuilder MIB = 2039 BuildMI(MBB, MI, DL, OpDesc) 2040 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 2041 .add(MI.getOperand(2)) 2042 .addReg(VecReg, RegState::ImplicitDefine) 2043 .addReg(VecReg, 2044 RegState::Implicit | (IsUndef ? RegState::Undef : 0)); 2045 2046 const int ImpDefIdx = OpDesc.getNumOperands() + OpDesc.getNumImplicitUses(); 2047 const int ImpUseIdx = ImpDefIdx + 1; 2048 MIB->tieOperands(ImpDefIdx, ImpUseIdx); 2049 2050 MachineInstr *SetOff = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_OFF)); 2051 2052 finalizeBundle(MBB, SetOn->getIterator(), std::next(SetOff->getIterator())); 2053 2054 MI.eraseFromParent(); 2055 break; 2056 } 2057 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V1: 2058 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V2: 2059 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V3: 2060 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V4: 2061 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V5: 2062 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V8: 2063 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V16: 2064 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V32: { 2065 assert(ST.useVGPRIndexMode()); 2066 Register Dst = MI.getOperand(0).getReg(); 2067 Register VecReg = MI.getOperand(1).getReg(); 2068 bool IsUndef = MI.getOperand(1).isUndef(); 2069 Register Idx = MI.getOperand(2).getReg(); 2070 Register SubReg = MI.getOperand(3).getImm(); 2071 2072 MachineInstr *SetOn = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_ON)) 2073 .addReg(Idx) 2074 .addImm(AMDGPU::VGPRIndexMode::SRC0_ENABLE); 2075 SetOn->getOperand(3).setIsUndef(); 2076 2077 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_indirect_read)) 2078 .addDef(Dst) 2079 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 2080 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0)); 2081 2082 MachineInstr *SetOff = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_OFF)); 2083 2084 finalizeBundle(MBB, SetOn->getIterator(), std::next(SetOff->getIterator())); 2085 2086 MI.eraseFromParent(); 2087 break; 2088 } 2089 case AMDGPU::SI_PC_ADD_REL_OFFSET: { 2090 MachineFunction &MF = *MBB.getParent(); 2091 Register Reg = MI.getOperand(0).getReg(); 2092 Register RegLo = RI.getSubReg(Reg, AMDGPU::sub0); 2093 Register RegHi = RI.getSubReg(Reg, AMDGPU::sub1); 2094 2095 // Create a bundle so these instructions won't be re-ordered by the 2096 // post-RA scheduler. 2097 MIBundleBuilder Bundler(MBB, MI); 2098 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg)); 2099 2100 // Add 32-bit offset from this instruction to the start of the 2101 // constant data. 2102 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo) 2103 .addReg(RegLo) 2104 .add(MI.getOperand(1))); 2105 2106 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi) 2107 .addReg(RegHi); 2108 MIB.add(MI.getOperand(2)); 2109 2110 Bundler.append(MIB); 2111 finalizeBundle(MBB, Bundler.begin()); 2112 2113 MI.eraseFromParent(); 2114 break; 2115 } 2116 case AMDGPU::ENTER_STRICT_WWM: { 2117 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 2118 // Whole Wave Mode is entered. 2119 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 2120 : AMDGPU::S_OR_SAVEEXEC_B64)); 2121 break; 2122 } 2123 case AMDGPU::ENTER_STRICT_WQM: { 2124 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 2125 // STRICT_WQM is entered. 2126 const unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 2127 const unsigned WQMOp = ST.isWave32() ? AMDGPU::S_WQM_B32 : AMDGPU::S_WQM_B64; 2128 const unsigned MovOp = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 2129 BuildMI(MBB, MI, DL, get(MovOp), MI.getOperand(0).getReg()).addReg(Exec); 2130 BuildMI(MBB, MI, DL, get(WQMOp), Exec).addReg(Exec); 2131 2132 MI.eraseFromParent(); 2133 break; 2134 } 2135 case AMDGPU::EXIT_STRICT_WWM: 2136 case AMDGPU::EXIT_STRICT_WQM: { 2137 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 2138 // WWM/STICT_WQM is exited. 2139 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64)); 2140 break; 2141 } 2142 case AMDGPU::SI_RETURN: { 2143 const MachineFunction *MF = MBB.getParent(); 2144 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 2145 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 2146 // Hiding the return address use with SI_RETURN may lead to extra kills in 2147 // the function and missing live-ins. We are fine in practice because callee 2148 // saved register handling ensures the register value is restored before 2149 // RET, but we need the undef flag here to appease the MachineVerifier 2150 // liveness checks. 2151 MachineInstrBuilder MIB = 2152 BuildMI(MBB, MI, DL, get(AMDGPU::S_SETPC_B64_return)) 2153 .addReg(TRI->getReturnAddressReg(*MF), RegState::Undef); 2154 2155 MIB.copyImplicitOps(MI); 2156 MI.eraseFromParent(); 2157 break; 2158 } 2159 } 2160 return true; 2161 } 2162 2163 std::pair<MachineInstr*, MachineInstr*> 2164 SIInstrInfo::expandMovDPP64(MachineInstr &MI) const { 2165 assert (MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO); 2166 2167 if (ST.hasMovB64() && 2168 AMDGPU::isLegal64BitDPPControl( 2169 getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl)->getImm())) { 2170 MI.setDesc(get(AMDGPU::V_MOV_B64_dpp)); 2171 return std::make_pair(&MI, nullptr); 2172 } 2173 2174 MachineBasicBlock &MBB = *MI.getParent(); 2175 DebugLoc DL = MBB.findDebugLoc(MI); 2176 MachineFunction *MF = MBB.getParent(); 2177 MachineRegisterInfo &MRI = MF->getRegInfo(); 2178 Register Dst = MI.getOperand(0).getReg(); 2179 unsigned Part = 0; 2180 MachineInstr *Split[2]; 2181 2182 for (auto Sub : { AMDGPU::sub0, AMDGPU::sub1 }) { 2183 auto MovDPP = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_dpp)); 2184 if (Dst.isPhysical()) { 2185 MovDPP.addDef(RI.getSubReg(Dst, Sub)); 2186 } else { 2187 assert(MRI.isSSA()); 2188 auto Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2189 MovDPP.addDef(Tmp); 2190 } 2191 2192 for (unsigned I = 1; I <= 2; ++I) { // old and src operands. 2193 const MachineOperand &SrcOp = MI.getOperand(I); 2194 assert(!SrcOp.isFPImm()); 2195 if (SrcOp.isImm()) { 2196 APInt Imm(64, SrcOp.getImm()); 2197 Imm.ashrInPlace(Part * 32); 2198 MovDPP.addImm(Imm.getLoBits(32).getZExtValue()); 2199 } else { 2200 assert(SrcOp.isReg()); 2201 Register Src = SrcOp.getReg(); 2202 if (Src.isPhysical()) 2203 MovDPP.addReg(RI.getSubReg(Src, Sub)); 2204 else 2205 MovDPP.addReg(Src, SrcOp.isUndef() ? RegState::Undef : 0, Sub); 2206 } 2207 } 2208 2209 for (unsigned I = 3; I < MI.getNumExplicitOperands(); ++I) 2210 MovDPP.addImm(MI.getOperand(I).getImm()); 2211 2212 Split[Part] = MovDPP; 2213 ++Part; 2214 } 2215 2216 if (Dst.isVirtual()) 2217 BuildMI(MBB, MI, DL, get(AMDGPU::REG_SEQUENCE), Dst) 2218 .addReg(Split[0]->getOperand(0).getReg()) 2219 .addImm(AMDGPU::sub0) 2220 .addReg(Split[1]->getOperand(0).getReg()) 2221 .addImm(AMDGPU::sub1); 2222 2223 MI.eraseFromParent(); 2224 return std::make_pair(Split[0], Split[1]); 2225 } 2226 2227 bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI, 2228 MachineOperand &Src0, 2229 unsigned Src0OpName, 2230 MachineOperand &Src1, 2231 unsigned Src1OpName) const { 2232 MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName); 2233 if (!Src0Mods) 2234 return false; 2235 2236 MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName); 2237 assert(Src1Mods && 2238 "All commutable instructions have both src0 and src1 modifiers"); 2239 2240 int Src0ModsVal = Src0Mods->getImm(); 2241 int Src1ModsVal = Src1Mods->getImm(); 2242 2243 Src1Mods->setImm(Src0ModsVal); 2244 Src0Mods->setImm(Src1ModsVal); 2245 return true; 2246 } 2247 2248 static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI, 2249 MachineOperand &RegOp, 2250 MachineOperand &NonRegOp) { 2251 Register Reg = RegOp.getReg(); 2252 unsigned SubReg = RegOp.getSubReg(); 2253 bool IsKill = RegOp.isKill(); 2254 bool IsDead = RegOp.isDead(); 2255 bool IsUndef = RegOp.isUndef(); 2256 bool IsDebug = RegOp.isDebug(); 2257 2258 if (NonRegOp.isImm()) 2259 RegOp.ChangeToImmediate(NonRegOp.getImm()); 2260 else if (NonRegOp.isFI()) 2261 RegOp.ChangeToFrameIndex(NonRegOp.getIndex()); 2262 else if (NonRegOp.isGlobal()) { 2263 RegOp.ChangeToGA(NonRegOp.getGlobal(), NonRegOp.getOffset(), 2264 NonRegOp.getTargetFlags()); 2265 } else 2266 return nullptr; 2267 2268 // Make sure we don't reinterpret a subreg index in the target flags. 2269 RegOp.setTargetFlags(NonRegOp.getTargetFlags()); 2270 2271 NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug); 2272 NonRegOp.setSubReg(SubReg); 2273 2274 return &MI; 2275 } 2276 2277 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, 2278 unsigned Src0Idx, 2279 unsigned Src1Idx) const { 2280 assert(!NewMI && "this should never be used"); 2281 2282 unsigned Opc = MI.getOpcode(); 2283 int CommutedOpcode = commuteOpcode(Opc); 2284 if (CommutedOpcode == -1) 2285 return nullptr; 2286 2287 assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == 2288 static_cast<int>(Src0Idx) && 2289 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == 2290 static_cast<int>(Src1Idx) && 2291 "inconsistency with findCommutedOpIndices"); 2292 2293 MachineOperand &Src0 = MI.getOperand(Src0Idx); 2294 MachineOperand &Src1 = MI.getOperand(Src1Idx); 2295 2296 MachineInstr *CommutedMI = nullptr; 2297 if (Src0.isReg() && Src1.isReg()) { 2298 if (isOperandLegal(MI, Src1Idx, &Src0)) { 2299 // Be sure to copy the source modifiers to the right place. 2300 CommutedMI 2301 = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx); 2302 } 2303 2304 } else if (Src0.isReg() && !Src1.isReg()) { 2305 // src0 should always be able to support any operand type, so no need to 2306 // check operand legality. 2307 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1); 2308 } else if (!Src0.isReg() && Src1.isReg()) { 2309 if (isOperandLegal(MI, Src1Idx, &Src0)) 2310 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0); 2311 } else { 2312 // FIXME: Found two non registers to commute. This does happen. 2313 return nullptr; 2314 } 2315 2316 if (CommutedMI) { 2317 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers, 2318 Src1, AMDGPU::OpName::src1_modifiers); 2319 2320 CommutedMI->setDesc(get(CommutedOpcode)); 2321 } 2322 2323 return CommutedMI; 2324 } 2325 2326 // This needs to be implemented because the source modifiers may be inserted 2327 // between the true commutable operands, and the base 2328 // TargetInstrInfo::commuteInstruction uses it. 2329 bool SIInstrInfo::findCommutedOpIndices(const MachineInstr &MI, 2330 unsigned &SrcOpIdx0, 2331 unsigned &SrcOpIdx1) const { 2332 return findCommutedOpIndices(MI.getDesc(), SrcOpIdx0, SrcOpIdx1); 2333 } 2334 2335 bool SIInstrInfo::findCommutedOpIndices(MCInstrDesc Desc, unsigned &SrcOpIdx0, 2336 unsigned &SrcOpIdx1) const { 2337 if (!Desc.isCommutable()) 2338 return false; 2339 2340 unsigned Opc = Desc.getOpcode(); 2341 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 2342 if (Src0Idx == -1) 2343 return false; 2344 2345 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 2346 if (Src1Idx == -1) 2347 return false; 2348 2349 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx); 2350 } 2351 2352 bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp, 2353 int64_t BrOffset) const { 2354 // BranchRelaxation should never have to check s_setpc_b64 because its dest 2355 // block is unanalyzable. 2356 assert(BranchOp != AMDGPU::S_SETPC_B64); 2357 2358 // Convert to dwords. 2359 BrOffset /= 4; 2360 2361 // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is 2362 // from the next instruction. 2363 BrOffset -= 1; 2364 2365 return isIntN(BranchOffsetBits, BrOffset); 2366 } 2367 2368 MachineBasicBlock *SIInstrInfo::getBranchDestBlock( 2369 const MachineInstr &MI) const { 2370 if (MI.getOpcode() == AMDGPU::S_SETPC_B64) { 2371 // This would be a difficult analysis to perform, but can always be legal so 2372 // there's no need to analyze it. 2373 return nullptr; 2374 } 2375 2376 return MI.getOperand(0).getMBB(); 2377 } 2378 2379 void SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB, 2380 MachineBasicBlock &DestBB, 2381 MachineBasicBlock &RestoreBB, 2382 const DebugLoc &DL, int64_t BrOffset, 2383 RegScavenger *RS) const { 2384 assert(RS && "RegScavenger required for long branching"); 2385 assert(MBB.empty() && 2386 "new block should be inserted for expanding unconditional branch"); 2387 assert(MBB.pred_size() == 1); 2388 assert(RestoreBB.empty() && 2389 "restore block should be inserted for restoring clobbered registers"); 2390 2391 MachineFunction *MF = MBB.getParent(); 2392 MachineRegisterInfo &MRI = MF->getRegInfo(); 2393 2394 // FIXME: Virtual register workaround for RegScavenger not working with empty 2395 // blocks. 2396 Register PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 2397 2398 auto I = MBB.end(); 2399 2400 // We need to compute the offset relative to the instruction immediately after 2401 // s_getpc_b64. Insert pc arithmetic code before last terminator. 2402 MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg); 2403 2404 auto &MCCtx = MF->getContext(); 2405 MCSymbol *PostGetPCLabel = 2406 MCCtx.createTempSymbol("post_getpc", /*AlwaysAddSuffix=*/true); 2407 GetPC->setPostInstrSymbol(*MF, PostGetPCLabel); 2408 2409 MCSymbol *OffsetLo = 2410 MCCtx.createTempSymbol("offset_lo", /*AlwaysAddSuffix=*/true); 2411 MCSymbol *OffsetHi = 2412 MCCtx.createTempSymbol("offset_hi", /*AlwaysAddSuffix=*/true); 2413 BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32)) 2414 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 2415 .addReg(PCReg, 0, AMDGPU::sub0) 2416 .addSym(OffsetLo, MO_FAR_BRANCH_OFFSET); 2417 BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32)) 2418 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 2419 .addReg(PCReg, 0, AMDGPU::sub1) 2420 .addSym(OffsetHi, MO_FAR_BRANCH_OFFSET); 2421 2422 // Insert the indirect branch after the other terminator. 2423 BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64)) 2424 .addReg(PCReg); 2425 2426 // FIXME: If spilling is necessary, this will fail because this scavenger has 2427 // no emergency stack slots. It is non-trivial to spill in this situation, 2428 // because the restore code needs to be specially placed after the 2429 // jump. BranchRelaxation then needs to be made aware of the newly inserted 2430 // block. 2431 // 2432 // If a spill is needed for the pc register pair, we need to insert a spill 2433 // restore block right before the destination block, and insert a short branch 2434 // into the old destination block's fallthrough predecessor. 2435 // e.g.: 2436 // 2437 // s_cbranch_scc0 skip_long_branch: 2438 // 2439 // long_branch_bb: 2440 // spill s[8:9] 2441 // s_getpc_b64 s[8:9] 2442 // s_add_u32 s8, s8, restore_bb 2443 // s_addc_u32 s9, s9, 0 2444 // s_setpc_b64 s[8:9] 2445 // 2446 // skip_long_branch: 2447 // foo; 2448 // 2449 // ..... 2450 // 2451 // dest_bb_fallthrough_predecessor: 2452 // bar; 2453 // s_branch dest_bb 2454 // 2455 // restore_bb: 2456 // restore s[8:9] 2457 // fallthrough dest_bb 2458 /// 2459 // dest_bb: 2460 // buzz; 2461 2462 RS->enterBasicBlockEnd(MBB); 2463 Register Scav = RS->scavengeRegisterBackwards( 2464 AMDGPU::SReg_64RegClass, MachineBasicBlock::iterator(GetPC), 2465 /* RestoreAfter */ false, 0, /* AllowSpill */ false); 2466 if (Scav) { 2467 RS->setRegUsed(Scav); 2468 MRI.replaceRegWith(PCReg, Scav); 2469 MRI.clearVirtRegs(); 2470 } else { 2471 // As SGPR needs VGPR to be spilled, we reuse the slot of temporary VGPR for 2472 // SGPR spill. 2473 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 2474 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 2475 TRI->spillEmergencySGPR(GetPC, RestoreBB, AMDGPU::SGPR0_SGPR1, RS); 2476 MRI.replaceRegWith(PCReg, AMDGPU::SGPR0_SGPR1); 2477 MRI.clearVirtRegs(); 2478 } 2479 2480 MCSymbol *DestLabel = Scav ? DestBB.getSymbol() : RestoreBB.getSymbol(); 2481 // Now, the distance could be defined. 2482 auto *Offset = MCBinaryExpr::createSub( 2483 MCSymbolRefExpr::create(DestLabel, MCCtx), 2484 MCSymbolRefExpr::create(PostGetPCLabel, MCCtx), MCCtx); 2485 // Add offset assignments. 2486 auto *Mask = MCConstantExpr::create(0xFFFFFFFFULL, MCCtx); 2487 OffsetLo->setVariableValue(MCBinaryExpr::createAnd(Offset, Mask, MCCtx)); 2488 auto *ShAmt = MCConstantExpr::create(32, MCCtx); 2489 OffsetHi->setVariableValue(MCBinaryExpr::createAShr(Offset, ShAmt, MCCtx)); 2490 } 2491 2492 unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) { 2493 switch (Cond) { 2494 case SIInstrInfo::SCC_TRUE: 2495 return AMDGPU::S_CBRANCH_SCC1; 2496 case SIInstrInfo::SCC_FALSE: 2497 return AMDGPU::S_CBRANCH_SCC0; 2498 case SIInstrInfo::VCCNZ: 2499 return AMDGPU::S_CBRANCH_VCCNZ; 2500 case SIInstrInfo::VCCZ: 2501 return AMDGPU::S_CBRANCH_VCCZ; 2502 case SIInstrInfo::EXECNZ: 2503 return AMDGPU::S_CBRANCH_EXECNZ; 2504 case SIInstrInfo::EXECZ: 2505 return AMDGPU::S_CBRANCH_EXECZ; 2506 default: 2507 llvm_unreachable("invalid branch predicate"); 2508 } 2509 } 2510 2511 SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) { 2512 switch (Opcode) { 2513 case AMDGPU::S_CBRANCH_SCC0: 2514 return SCC_FALSE; 2515 case AMDGPU::S_CBRANCH_SCC1: 2516 return SCC_TRUE; 2517 case AMDGPU::S_CBRANCH_VCCNZ: 2518 return VCCNZ; 2519 case AMDGPU::S_CBRANCH_VCCZ: 2520 return VCCZ; 2521 case AMDGPU::S_CBRANCH_EXECNZ: 2522 return EXECNZ; 2523 case AMDGPU::S_CBRANCH_EXECZ: 2524 return EXECZ; 2525 default: 2526 return INVALID_BR; 2527 } 2528 } 2529 2530 bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB, 2531 MachineBasicBlock::iterator I, 2532 MachineBasicBlock *&TBB, 2533 MachineBasicBlock *&FBB, 2534 SmallVectorImpl<MachineOperand> &Cond, 2535 bool AllowModify) const { 2536 if (I->getOpcode() == AMDGPU::S_BRANCH) { 2537 // Unconditional Branch 2538 TBB = I->getOperand(0).getMBB(); 2539 return false; 2540 } 2541 2542 MachineBasicBlock *CondBB = nullptr; 2543 2544 if (I->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 2545 CondBB = I->getOperand(1).getMBB(); 2546 Cond.push_back(I->getOperand(0)); 2547 } else { 2548 BranchPredicate Pred = getBranchPredicate(I->getOpcode()); 2549 if (Pred == INVALID_BR) 2550 return true; 2551 2552 CondBB = I->getOperand(0).getMBB(); 2553 Cond.push_back(MachineOperand::CreateImm(Pred)); 2554 Cond.push_back(I->getOperand(1)); // Save the branch register. 2555 } 2556 ++I; 2557 2558 if (I == MBB.end()) { 2559 // Conditional branch followed by fall-through. 2560 TBB = CondBB; 2561 return false; 2562 } 2563 2564 if (I->getOpcode() == AMDGPU::S_BRANCH) { 2565 TBB = CondBB; 2566 FBB = I->getOperand(0).getMBB(); 2567 return false; 2568 } 2569 2570 return true; 2571 } 2572 2573 bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 2574 MachineBasicBlock *&FBB, 2575 SmallVectorImpl<MachineOperand> &Cond, 2576 bool AllowModify) const { 2577 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 2578 auto E = MBB.end(); 2579 if (I == E) 2580 return false; 2581 2582 // Skip over the instructions that are artificially terminators for special 2583 // exec management. 2584 while (I != E && !I->isBranch() && !I->isReturn()) { 2585 switch (I->getOpcode()) { 2586 case AMDGPU::S_MOV_B64_term: 2587 case AMDGPU::S_XOR_B64_term: 2588 case AMDGPU::S_OR_B64_term: 2589 case AMDGPU::S_ANDN2_B64_term: 2590 case AMDGPU::S_AND_B64_term: 2591 case AMDGPU::S_MOV_B32_term: 2592 case AMDGPU::S_XOR_B32_term: 2593 case AMDGPU::S_OR_B32_term: 2594 case AMDGPU::S_ANDN2_B32_term: 2595 case AMDGPU::S_AND_B32_term: 2596 break; 2597 case AMDGPU::SI_IF: 2598 case AMDGPU::SI_ELSE: 2599 case AMDGPU::SI_KILL_I1_TERMINATOR: 2600 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 2601 // FIXME: It's messy that these need to be considered here at all. 2602 return true; 2603 default: 2604 llvm_unreachable("unexpected non-branch terminator inst"); 2605 } 2606 2607 ++I; 2608 } 2609 2610 if (I == E) 2611 return false; 2612 2613 return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify); 2614 } 2615 2616 unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB, 2617 int *BytesRemoved) const { 2618 unsigned Count = 0; 2619 unsigned RemovedSize = 0; 2620 for (MachineInstr &MI : llvm::make_early_inc_range(MBB.terminators())) { 2621 // Skip over artificial terminators when removing instructions. 2622 if (MI.isBranch() || MI.isReturn()) { 2623 RemovedSize += getInstSizeInBytes(MI); 2624 MI.eraseFromParent(); 2625 ++Count; 2626 } 2627 } 2628 2629 if (BytesRemoved) 2630 *BytesRemoved = RemovedSize; 2631 2632 return Count; 2633 } 2634 2635 // Copy the flags onto the implicit condition register operand. 2636 static void preserveCondRegFlags(MachineOperand &CondReg, 2637 const MachineOperand &OrigCond) { 2638 CondReg.setIsUndef(OrigCond.isUndef()); 2639 CondReg.setIsKill(OrigCond.isKill()); 2640 } 2641 2642 unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB, 2643 MachineBasicBlock *TBB, 2644 MachineBasicBlock *FBB, 2645 ArrayRef<MachineOperand> Cond, 2646 const DebugLoc &DL, 2647 int *BytesAdded) const { 2648 if (!FBB && Cond.empty()) { 2649 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 2650 .addMBB(TBB); 2651 if (BytesAdded) 2652 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4; 2653 return 1; 2654 } 2655 2656 if(Cond.size() == 1 && Cond[0].isReg()) { 2657 BuildMI(&MBB, DL, get(AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO)) 2658 .add(Cond[0]) 2659 .addMBB(TBB); 2660 return 1; 2661 } 2662 2663 assert(TBB && Cond[0].isImm()); 2664 2665 unsigned Opcode 2666 = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm())); 2667 2668 if (!FBB) { 2669 Cond[1].isUndef(); 2670 MachineInstr *CondBr = 2671 BuildMI(&MBB, DL, get(Opcode)) 2672 .addMBB(TBB); 2673 2674 // Copy the flags onto the implicit condition register operand. 2675 preserveCondRegFlags(CondBr->getOperand(1), Cond[1]); 2676 fixImplicitOperands(*CondBr); 2677 2678 if (BytesAdded) 2679 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4; 2680 return 1; 2681 } 2682 2683 assert(TBB && FBB); 2684 2685 MachineInstr *CondBr = 2686 BuildMI(&MBB, DL, get(Opcode)) 2687 .addMBB(TBB); 2688 fixImplicitOperands(*CondBr); 2689 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 2690 .addMBB(FBB); 2691 2692 MachineOperand &CondReg = CondBr->getOperand(1); 2693 CondReg.setIsUndef(Cond[1].isUndef()); 2694 CondReg.setIsKill(Cond[1].isKill()); 2695 2696 if (BytesAdded) 2697 *BytesAdded = ST.hasOffset3fBug() ? 16 : 8; 2698 2699 return 2; 2700 } 2701 2702 bool SIInstrInfo::reverseBranchCondition( 2703 SmallVectorImpl<MachineOperand> &Cond) const { 2704 if (Cond.size() != 2) { 2705 return true; 2706 } 2707 2708 if (Cond[0].isImm()) { 2709 Cond[0].setImm(-Cond[0].getImm()); 2710 return false; 2711 } 2712 2713 return true; 2714 } 2715 2716 bool SIInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, 2717 ArrayRef<MachineOperand> Cond, 2718 Register DstReg, Register TrueReg, 2719 Register FalseReg, int &CondCycles, 2720 int &TrueCycles, int &FalseCycles) const { 2721 switch (Cond[0].getImm()) { 2722 case VCCNZ: 2723 case VCCZ: { 2724 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2725 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 2726 if (MRI.getRegClass(FalseReg) != RC) 2727 return false; 2728 2729 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 2730 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 2731 2732 // Limit to equal cost for branch vs. N v_cndmask_b32s. 2733 return RI.hasVGPRs(RC) && NumInsts <= 6; 2734 } 2735 case SCC_TRUE: 2736 case SCC_FALSE: { 2737 // FIXME: We could insert for VGPRs if we could replace the original compare 2738 // with a vector one. 2739 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2740 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 2741 if (MRI.getRegClass(FalseReg) != RC) 2742 return false; 2743 2744 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 2745 2746 // Multiples of 8 can do s_cselect_b64 2747 if (NumInsts % 2 == 0) 2748 NumInsts /= 2; 2749 2750 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 2751 return RI.isSGPRClass(RC); 2752 } 2753 default: 2754 return false; 2755 } 2756 } 2757 2758 void SIInstrInfo::insertSelect(MachineBasicBlock &MBB, 2759 MachineBasicBlock::iterator I, const DebugLoc &DL, 2760 Register DstReg, ArrayRef<MachineOperand> Cond, 2761 Register TrueReg, Register FalseReg) const { 2762 BranchPredicate Pred = static_cast<BranchPredicate>(Cond[0].getImm()); 2763 if (Pred == VCCZ || Pred == SCC_FALSE) { 2764 Pred = static_cast<BranchPredicate>(-Pred); 2765 std::swap(TrueReg, FalseReg); 2766 } 2767 2768 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2769 const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg); 2770 unsigned DstSize = RI.getRegSizeInBits(*DstRC); 2771 2772 if (DstSize == 32) { 2773 MachineInstr *Select; 2774 if (Pred == SCC_TRUE) { 2775 Select = BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B32), DstReg) 2776 .addReg(TrueReg) 2777 .addReg(FalseReg); 2778 } else { 2779 // Instruction's operands are backwards from what is expected. 2780 Select = BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e32), DstReg) 2781 .addReg(FalseReg) 2782 .addReg(TrueReg); 2783 } 2784 2785 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2786 return; 2787 } 2788 2789 if (DstSize == 64 && Pred == SCC_TRUE) { 2790 MachineInstr *Select = 2791 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg) 2792 .addReg(TrueReg) 2793 .addReg(FalseReg); 2794 2795 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2796 return; 2797 } 2798 2799 static const int16_t Sub0_15[] = { 2800 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 2801 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 2802 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, 2803 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 2804 }; 2805 2806 static const int16_t Sub0_15_64[] = { 2807 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, 2808 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7, 2809 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11, 2810 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15, 2811 }; 2812 2813 unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32; 2814 const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass; 2815 const int16_t *SubIndices = Sub0_15; 2816 int NElts = DstSize / 32; 2817 2818 // 64-bit select is only available for SALU. 2819 // TODO: Split 96-bit into 64-bit and 32-bit, not 3x 32-bit. 2820 if (Pred == SCC_TRUE) { 2821 if (NElts % 2) { 2822 SelOp = AMDGPU::S_CSELECT_B32; 2823 EltRC = &AMDGPU::SGPR_32RegClass; 2824 } else { 2825 SelOp = AMDGPU::S_CSELECT_B64; 2826 EltRC = &AMDGPU::SGPR_64RegClass; 2827 SubIndices = Sub0_15_64; 2828 NElts /= 2; 2829 } 2830 } 2831 2832 MachineInstrBuilder MIB = BuildMI( 2833 MBB, I, DL, get(AMDGPU::REG_SEQUENCE), DstReg); 2834 2835 I = MIB->getIterator(); 2836 2837 SmallVector<Register, 8> Regs; 2838 for (int Idx = 0; Idx != NElts; ++Idx) { 2839 Register DstElt = MRI.createVirtualRegister(EltRC); 2840 Regs.push_back(DstElt); 2841 2842 unsigned SubIdx = SubIndices[Idx]; 2843 2844 MachineInstr *Select; 2845 if (SelOp == AMDGPU::V_CNDMASK_B32_e32) { 2846 Select = 2847 BuildMI(MBB, I, DL, get(SelOp), DstElt) 2848 .addReg(FalseReg, 0, SubIdx) 2849 .addReg(TrueReg, 0, SubIdx); 2850 } else { 2851 Select = 2852 BuildMI(MBB, I, DL, get(SelOp), DstElt) 2853 .addReg(TrueReg, 0, SubIdx) 2854 .addReg(FalseReg, 0, SubIdx); 2855 } 2856 2857 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2858 fixImplicitOperands(*Select); 2859 2860 MIB.addReg(DstElt) 2861 .addImm(SubIdx); 2862 } 2863 } 2864 2865 bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) { 2866 switch (MI.getOpcode()) { 2867 case AMDGPU::V_MOV_B32_e32: 2868 case AMDGPU::V_MOV_B32_e64: 2869 case AMDGPU::V_MOV_B64_PSEUDO: 2870 case AMDGPU::V_MOV_B64_e32: 2871 case AMDGPU::V_MOV_B64_e64: 2872 case AMDGPU::S_MOV_B32: 2873 case AMDGPU::S_MOV_B64: 2874 case AMDGPU::COPY: 2875 case AMDGPU::V_ACCVGPR_WRITE_B32_e64: 2876 case AMDGPU::V_ACCVGPR_READ_B32_e64: 2877 case AMDGPU::V_ACCVGPR_MOV_B32: 2878 return true; 2879 default: 2880 return false; 2881 } 2882 } 2883 2884 unsigned SIInstrInfo::getAddressSpaceForPseudoSourceKind( 2885 unsigned Kind) const { 2886 switch(Kind) { 2887 case PseudoSourceValue::Stack: 2888 case PseudoSourceValue::FixedStack: 2889 return AMDGPUAS::PRIVATE_ADDRESS; 2890 case PseudoSourceValue::ConstantPool: 2891 case PseudoSourceValue::GOT: 2892 case PseudoSourceValue::JumpTable: 2893 case PseudoSourceValue::GlobalValueCallEntry: 2894 case PseudoSourceValue::ExternalSymbolCallEntry: 2895 case PseudoSourceValue::TargetCustom: 2896 return AMDGPUAS::CONSTANT_ADDRESS; 2897 } 2898 return AMDGPUAS::FLAT_ADDRESS; 2899 } 2900 2901 static void removeModOperands(MachineInstr &MI) { 2902 unsigned Opc = MI.getOpcode(); 2903 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2904 AMDGPU::OpName::src0_modifiers); 2905 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2906 AMDGPU::OpName::src1_modifiers); 2907 int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2908 AMDGPU::OpName::src2_modifiers); 2909 2910 MI.removeOperand(Src2ModIdx); 2911 MI.removeOperand(Src1ModIdx); 2912 MI.removeOperand(Src0ModIdx); 2913 } 2914 2915 bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, 2916 Register Reg, MachineRegisterInfo *MRI) const { 2917 if (!MRI->hasOneNonDBGUse(Reg)) 2918 return false; 2919 2920 switch (DefMI.getOpcode()) { 2921 default: 2922 return false; 2923 case AMDGPU::S_MOV_B64: 2924 // TODO: We could fold 64-bit immediates, but this get complicated 2925 // when there are sub-registers. 2926 return false; 2927 2928 case AMDGPU::V_MOV_B32_e32: 2929 case AMDGPU::S_MOV_B32: 2930 case AMDGPU::V_ACCVGPR_WRITE_B32_e64: 2931 break; 2932 } 2933 2934 const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0); 2935 assert(ImmOp); 2936 // FIXME: We could handle FrameIndex values here. 2937 if (!ImmOp->isImm()) 2938 return false; 2939 2940 unsigned Opc = UseMI.getOpcode(); 2941 if (Opc == AMDGPU::COPY) { 2942 Register DstReg = UseMI.getOperand(0).getReg(); 2943 bool Is16Bit = getOpSize(UseMI, 0) == 2; 2944 bool isVGPRCopy = RI.isVGPR(*MRI, DstReg); 2945 unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32; 2946 APInt Imm(32, ImmOp->getImm()); 2947 2948 if (UseMI.getOperand(1).getSubReg() == AMDGPU::hi16) 2949 Imm = Imm.ashr(16); 2950 2951 if (RI.isAGPR(*MRI, DstReg)) { 2952 if (!isInlineConstant(Imm)) 2953 return false; 2954 NewOpc = AMDGPU::V_ACCVGPR_WRITE_B32_e64; 2955 } 2956 2957 if (Is16Bit) { 2958 if (isVGPRCopy) 2959 return false; // Do not clobber vgpr_hi16 2960 2961 if (DstReg.isVirtual() && UseMI.getOperand(0).getSubReg() != AMDGPU::lo16) 2962 return false; 2963 2964 UseMI.getOperand(0).setSubReg(0); 2965 if (DstReg.isPhysical()) { 2966 DstReg = RI.get32BitRegister(DstReg); 2967 UseMI.getOperand(0).setReg(DstReg); 2968 } 2969 assert(UseMI.getOperand(1).getReg().isVirtual()); 2970 } 2971 2972 UseMI.setDesc(get(NewOpc)); 2973 UseMI.getOperand(1).ChangeToImmediate(Imm.getSExtValue()); 2974 UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent()); 2975 return true; 2976 } 2977 2978 if (Opc == AMDGPU::V_MAD_F32_e64 || Opc == AMDGPU::V_MAC_F32_e64 || 2979 Opc == AMDGPU::V_MAD_F16_e64 || Opc == AMDGPU::V_MAC_F16_e64 || 2980 Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64 || 2981 Opc == AMDGPU::V_FMA_F16_e64 || Opc == AMDGPU::V_FMAC_F16_e64) { 2982 // Don't fold if we are using source or output modifiers. The new VOP2 2983 // instructions don't have them. 2984 if (hasAnyModifiersSet(UseMI)) 2985 return false; 2986 2987 // If this is a free constant, there's no reason to do this. 2988 // TODO: We could fold this here instead of letting SIFoldOperands do it 2989 // later. 2990 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0); 2991 2992 // Any src operand can be used for the legality check. 2993 if (isInlineConstant(UseMI, *Src0, *ImmOp)) 2994 return false; 2995 2996 bool IsF32 = Opc == AMDGPU::V_MAD_F32_e64 || Opc == AMDGPU::V_MAC_F32_e64 || 2997 Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64; 2998 bool IsFMA = Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64 || 2999 Opc == AMDGPU::V_FMA_F16_e64 || Opc == AMDGPU::V_FMAC_F16_e64; 3000 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1); 3001 MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2); 3002 3003 // Multiplied part is the constant: Use v_madmk_{f16, f32}. 3004 // We should only expect these to be on src0 due to canonicalization. 3005 if (Src0->isReg() && Src0->getReg() == Reg) { 3006 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) 3007 return false; 3008 3009 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg()))) 3010 return false; 3011 3012 unsigned NewOpc = 3013 IsFMA ? (IsF32 ? AMDGPU::V_FMAMK_F32 : AMDGPU::V_FMAMK_F16) 3014 : (IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16); 3015 if (pseudoToMCOpcode(NewOpc) == -1) 3016 return false; 3017 3018 // We need to swap operands 0 and 1 since madmk constant is at operand 1. 3019 3020 const int64_t Imm = ImmOp->getImm(); 3021 3022 // FIXME: This would be a lot easier if we could return a new instruction 3023 // instead of having to modify in place. 3024 3025 // Remove these first since they are at the end. 3026 UseMI.removeOperand( 3027 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 3028 UseMI.removeOperand( 3029 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 3030 3031 Register Src1Reg = Src1->getReg(); 3032 unsigned Src1SubReg = Src1->getSubReg(); 3033 Src0->setReg(Src1Reg); 3034 Src0->setSubReg(Src1SubReg); 3035 Src0->setIsKill(Src1->isKill()); 3036 3037 if (Opc == AMDGPU::V_MAC_F32_e64 || 3038 Opc == AMDGPU::V_MAC_F16_e64 || 3039 Opc == AMDGPU::V_FMAC_F32_e64 || 3040 Opc == AMDGPU::V_FMAC_F16_e64) 3041 UseMI.untieRegOperand( 3042 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 3043 3044 Src1->ChangeToImmediate(Imm); 3045 3046 removeModOperands(UseMI); 3047 UseMI.setDesc(get(NewOpc)); 3048 3049 bool DeleteDef = MRI->use_nodbg_empty(Reg); 3050 if (DeleteDef) 3051 DefMI.eraseFromParent(); 3052 3053 return true; 3054 } 3055 3056 // Added part is the constant: Use v_madak_{f16, f32}. 3057 if (Src2->isReg() && Src2->getReg() == Reg) { 3058 // Not allowed to use constant bus for another operand. 3059 // We can however allow an inline immediate as src0. 3060 bool Src0Inlined = false; 3061 if (Src0->isReg()) { 3062 // Try to inline constant if possible. 3063 // If the Def moves immediate and the use is single 3064 // We are saving VGPR here. 3065 MachineInstr *Def = MRI->getUniqueVRegDef(Src0->getReg()); 3066 if (Def && Def->isMoveImmediate() && 3067 isInlineConstant(Def->getOperand(1)) && 3068 MRI->hasOneUse(Src0->getReg())) { 3069 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 3070 Src0Inlined = true; 3071 } else if ((Src0->getReg().isPhysical() && 3072 (ST.getConstantBusLimit(Opc) <= 1 && 3073 RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg())))) || 3074 (Src0->getReg().isVirtual() && 3075 (ST.getConstantBusLimit(Opc) <= 1 && 3076 RI.isSGPRClass(MRI->getRegClass(Src0->getReg()))))) 3077 return false; 3078 // VGPR is okay as Src0 - fallthrough 3079 } 3080 3081 if (Src1->isReg() && !Src0Inlined ) { 3082 // We have one slot for inlinable constant so far - try to fill it 3083 MachineInstr *Def = MRI->getUniqueVRegDef(Src1->getReg()); 3084 if (Def && Def->isMoveImmediate() && 3085 isInlineConstant(Def->getOperand(1)) && 3086 MRI->hasOneUse(Src1->getReg()) && 3087 commuteInstruction(UseMI)) { 3088 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 3089 } else if ((Src1->getReg().isPhysical() && 3090 RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) || 3091 (Src1->getReg().isVirtual() && 3092 RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) 3093 return false; 3094 // VGPR is okay as Src1 - fallthrough 3095 } 3096 3097 unsigned NewOpc = 3098 IsFMA ? (IsF32 ? AMDGPU::V_FMAAK_F32 : AMDGPU::V_FMAAK_F16) 3099 : (IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16); 3100 if (pseudoToMCOpcode(NewOpc) == -1) 3101 return false; 3102 3103 const int64_t Imm = ImmOp->getImm(); 3104 3105 // FIXME: This would be a lot easier if we could return a new instruction 3106 // instead of having to modify in place. 3107 3108 // Remove these first since they are at the end. 3109 UseMI.removeOperand( 3110 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 3111 UseMI.removeOperand( 3112 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 3113 3114 if (Opc == AMDGPU::V_MAC_F32_e64 || 3115 Opc == AMDGPU::V_MAC_F16_e64 || 3116 Opc == AMDGPU::V_FMAC_F32_e64 || 3117 Opc == AMDGPU::V_FMAC_F16_e64) 3118 UseMI.untieRegOperand( 3119 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 3120 3121 // ChangingToImmediate adds Src2 back to the instruction. 3122 Src2->ChangeToImmediate(Imm); 3123 3124 // These come before src2. 3125 removeModOperands(UseMI); 3126 UseMI.setDesc(get(NewOpc)); 3127 // It might happen that UseMI was commuted 3128 // and we now have SGPR as SRC1. If so 2 inlined 3129 // constant and SGPR are illegal. 3130 legalizeOperands(UseMI); 3131 3132 bool DeleteDef = MRI->use_nodbg_empty(Reg); 3133 if (DeleteDef) 3134 DefMI.eraseFromParent(); 3135 3136 return true; 3137 } 3138 } 3139 3140 return false; 3141 } 3142 3143 static bool 3144 memOpsHaveSameBaseOperands(ArrayRef<const MachineOperand *> BaseOps1, 3145 ArrayRef<const MachineOperand *> BaseOps2) { 3146 if (BaseOps1.size() != BaseOps2.size()) 3147 return false; 3148 for (size_t I = 0, E = BaseOps1.size(); I < E; ++I) { 3149 if (!BaseOps1[I]->isIdenticalTo(*BaseOps2[I])) 3150 return false; 3151 } 3152 return true; 3153 } 3154 3155 static bool offsetsDoNotOverlap(int WidthA, int OffsetA, 3156 int WidthB, int OffsetB) { 3157 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; 3158 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; 3159 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; 3160 return LowOffset + LowWidth <= HighOffset; 3161 } 3162 3163 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa, 3164 const MachineInstr &MIb) const { 3165 SmallVector<const MachineOperand *, 4> BaseOps0, BaseOps1; 3166 int64_t Offset0, Offset1; 3167 unsigned Dummy0, Dummy1; 3168 bool Offset0IsScalable, Offset1IsScalable; 3169 if (!getMemOperandsWithOffsetWidth(MIa, BaseOps0, Offset0, Offset0IsScalable, 3170 Dummy0, &RI) || 3171 !getMemOperandsWithOffsetWidth(MIb, BaseOps1, Offset1, Offset1IsScalable, 3172 Dummy1, &RI)) 3173 return false; 3174 3175 if (!memOpsHaveSameBaseOperands(BaseOps0, BaseOps1)) 3176 return false; 3177 3178 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) { 3179 // FIXME: Handle ds_read2 / ds_write2. 3180 return false; 3181 } 3182 unsigned Width0 = MIa.memoperands().front()->getSize(); 3183 unsigned Width1 = MIb.memoperands().front()->getSize(); 3184 return offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1); 3185 } 3186 3187 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, 3188 const MachineInstr &MIb) const { 3189 assert(MIa.mayLoadOrStore() && 3190 "MIa must load from or modify a memory location"); 3191 assert(MIb.mayLoadOrStore() && 3192 "MIb must load from or modify a memory location"); 3193 3194 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects()) 3195 return false; 3196 3197 // XXX - Can we relax this between address spaces? 3198 if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef()) 3199 return false; 3200 3201 // TODO: Should we check the address space from the MachineMemOperand? That 3202 // would allow us to distinguish objects we know don't alias based on the 3203 // underlying address space, even if it was lowered to a different one, 3204 // e.g. private accesses lowered to use MUBUF instructions on a scratch 3205 // buffer. 3206 if (isDS(MIa)) { 3207 if (isDS(MIb)) 3208 return checkInstOffsetsDoNotOverlap(MIa, MIb); 3209 3210 return !isFLAT(MIb) || isSegmentSpecificFLAT(MIb); 3211 } 3212 3213 if (isMUBUF(MIa) || isMTBUF(MIa)) { 3214 if (isMUBUF(MIb) || isMTBUF(MIb)) 3215 return checkInstOffsetsDoNotOverlap(MIa, MIb); 3216 3217 return !isFLAT(MIb) && !isSMRD(MIb); 3218 } 3219 3220 if (isSMRD(MIa)) { 3221 if (isSMRD(MIb)) 3222 return checkInstOffsetsDoNotOverlap(MIa, MIb); 3223 3224 return !isFLAT(MIb) && !isMUBUF(MIb) && !isMTBUF(MIb); 3225 } 3226 3227 if (isFLAT(MIa)) { 3228 if (isFLAT(MIb)) 3229 return checkInstOffsetsDoNotOverlap(MIa, MIb); 3230 3231 return false; 3232 } 3233 3234 return false; 3235 } 3236 3237 static bool getFoldableImm(Register Reg, const MachineRegisterInfo &MRI, 3238 int64_t &Imm, MachineInstr **DefMI = nullptr) { 3239 if (Reg.isPhysical()) 3240 return false; 3241 auto *Def = MRI.getUniqueVRegDef(Reg); 3242 if (Def && SIInstrInfo::isFoldableCopy(*Def) && Def->getOperand(1).isImm()) { 3243 Imm = Def->getOperand(1).getImm(); 3244 if (DefMI) 3245 *DefMI = Def; 3246 return true; 3247 } 3248 return false; 3249 } 3250 3251 static bool getFoldableImm(const MachineOperand *MO, int64_t &Imm, 3252 MachineInstr **DefMI = nullptr) { 3253 if (!MO->isReg()) 3254 return false; 3255 const MachineFunction *MF = MO->getParent()->getParent()->getParent(); 3256 const MachineRegisterInfo &MRI = MF->getRegInfo(); 3257 return getFoldableImm(MO->getReg(), MRI, Imm, DefMI); 3258 } 3259 3260 static void updateLiveVariables(LiveVariables *LV, MachineInstr &MI, 3261 MachineInstr &NewMI) { 3262 if (LV) { 3263 unsigned NumOps = MI.getNumOperands(); 3264 for (unsigned I = 1; I < NumOps; ++I) { 3265 MachineOperand &Op = MI.getOperand(I); 3266 if (Op.isReg() && Op.isKill()) 3267 LV->replaceKillInstruction(Op.getReg(), MI, NewMI); 3268 } 3269 } 3270 } 3271 3272 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineInstr &MI, 3273 LiveVariables *LV, 3274 LiveIntervals *LIS) const { 3275 MachineBasicBlock &MBB = *MI.getParent(); 3276 unsigned Opc = MI.getOpcode(); 3277 3278 // Handle MFMA. 3279 int NewMFMAOpc = AMDGPU::getMFMAEarlyClobberOp(Opc); 3280 if (NewMFMAOpc != -1) { 3281 MachineInstrBuilder MIB = 3282 BuildMI(MBB, MI, MI.getDebugLoc(), get(NewMFMAOpc)); 3283 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) 3284 MIB.add(MI.getOperand(I)); 3285 updateLiveVariables(LV, MI, *MIB); 3286 if (LIS) 3287 LIS->ReplaceMachineInstrInMaps(MI, *MIB); 3288 return MIB; 3289 } 3290 3291 // Handle MAC/FMAC. 3292 bool IsF16 = Opc == AMDGPU::V_MAC_F16_e32 || Opc == AMDGPU::V_MAC_F16_e64 || 3293 Opc == AMDGPU::V_FMAC_F16_e32 || Opc == AMDGPU::V_FMAC_F16_e64; 3294 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e32 || Opc == AMDGPU::V_FMAC_F32_e64 || 3295 Opc == AMDGPU::V_FMAC_LEGACY_F32_e32 || 3296 Opc == AMDGPU::V_FMAC_LEGACY_F32_e64 || 3297 Opc == AMDGPU::V_FMAC_F16_e32 || Opc == AMDGPU::V_FMAC_F16_e64 || 3298 Opc == AMDGPU::V_FMAC_F64_e32 || Opc == AMDGPU::V_FMAC_F64_e64; 3299 bool IsF64 = Opc == AMDGPU::V_FMAC_F64_e32 || Opc == AMDGPU::V_FMAC_F64_e64; 3300 bool IsLegacy = Opc == AMDGPU::V_MAC_LEGACY_F32_e32 || 3301 Opc == AMDGPU::V_MAC_LEGACY_F32_e64 || 3302 Opc == AMDGPU::V_FMAC_LEGACY_F32_e32 || 3303 Opc == AMDGPU::V_FMAC_LEGACY_F32_e64; 3304 bool Src0Literal = false; 3305 3306 switch (Opc) { 3307 default: 3308 return nullptr; 3309 case AMDGPU::V_MAC_F16_e64: 3310 case AMDGPU::V_FMAC_F16_e64: 3311 case AMDGPU::V_MAC_F32_e64: 3312 case AMDGPU::V_MAC_LEGACY_F32_e64: 3313 case AMDGPU::V_FMAC_F32_e64: 3314 case AMDGPU::V_FMAC_LEGACY_F32_e64: 3315 case AMDGPU::V_FMAC_F64_e64: 3316 break; 3317 case AMDGPU::V_MAC_F16_e32: 3318 case AMDGPU::V_FMAC_F16_e32: 3319 case AMDGPU::V_MAC_F32_e32: 3320 case AMDGPU::V_MAC_LEGACY_F32_e32: 3321 case AMDGPU::V_FMAC_F32_e32: 3322 case AMDGPU::V_FMAC_LEGACY_F32_e32: 3323 case AMDGPU::V_FMAC_F64_e32: { 3324 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 3325 AMDGPU::OpName::src0); 3326 const MachineOperand *Src0 = &MI.getOperand(Src0Idx); 3327 if (!Src0->isReg() && !Src0->isImm()) 3328 return nullptr; 3329 3330 if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0)) 3331 Src0Literal = true; 3332 3333 break; 3334 } 3335 } 3336 3337 MachineInstrBuilder MIB; 3338 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 3339 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0); 3340 const MachineOperand *Src0Mods = 3341 getNamedOperand(MI, AMDGPU::OpName::src0_modifiers); 3342 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3343 const MachineOperand *Src1Mods = 3344 getNamedOperand(MI, AMDGPU::OpName::src1_modifiers); 3345 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3346 const MachineOperand *Src2Mods = 3347 getNamedOperand(MI, AMDGPU::OpName::src2_modifiers); 3348 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 3349 const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod); 3350 3351 if (!Src0Mods && !Src1Mods && !Src2Mods && !Clamp && !Omod && !IsF64 && 3352 !IsLegacy && 3353 // If we have an SGPR input, we will violate the constant bus restriction. 3354 (ST.getConstantBusLimit(Opc) > 1 || !Src0->isReg() || 3355 !RI.isSGPRReg(MBB.getParent()->getRegInfo(), Src0->getReg()))) { 3356 MachineInstr *DefMI; 3357 const auto killDef = [&DefMI, &MBB, this]() -> void { 3358 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 3359 // The only user is the instruction which will be killed. 3360 if (!MRI.hasOneNonDBGUse(DefMI->getOperand(0).getReg())) 3361 return; 3362 // We cannot just remove the DefMI here, calling pass will crash. 3363 DefMI->setDesc(get(AMDGPU::IMPLICIT_DEF)); 3364 for (unsigned I = DefMI->getNumOperands() - 1; I != 0; --I) 3365 DefMI->removeOperand(I); 3366 }; 3367 3368 int64_t Imm; 3369 if (!Src0Literal && getFoldableImm(Src2, Imm, &DefMI)) { 3370 unsigned NewOpc = 3371 IsFMA ? (IsF16 ? AMDGPU::V_FMAAK_F16 : AMDGPU::V_FMAAK_F32) 3372 : (IsF16 ? AMDGPU::V_MADAK_F16 : AMDGPU::V_MADAK_F32); 3373 if (pseudoToMCOpcode(NewOpc) != -1) { 3374 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc)) 3375 .add(*Dst) 3376 .add(*Src0) 3377 .add(*Src1) 3378 .addImm(Imm); 3379 updateLiveVariables(LV, MI, *MIB); 3380 if (LIS) 3381 LIS->ReplaceMachineInstrInMaps(MI, *MIB); 3382 killDef(); 3383 return MIB; 3384 } 3385 } 3386 unsigned NewOpc = IsFMA 3387 ? (IsF16 ? AMDGPU::V_FMAMK_F16 : AMDGPU::V_FMAMK_F32) 3388 : (IsF16 ? AMDGPU::V_MADMK_F16 : AMDGPU::V_MADMK_F32); 3389 if (!Src0Literal && getFoldableImm(Src1, Imm, &DefMI)) { 3390 if (pseudoToMCOpcode(NewOpc) != -1) { 3391 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc)) 3392 .add(*Dst) 3393 .add(*Src0) 3394 .addImm(Imm) 3395 .add(*Src2); 3396 updateLiveVariables(LV, MI, *MIB); 3397 if (LIS) 3398 LIS->ReplaceMachineInstrInMaps(MI, *MIB); 3399 killDef(); 3400 return MIB; 3401 } 3402 } 3403 if (Src0Literal || getFoldableImm(Src0, Imm, &DefMI)) { 3404 if (Src0Literal) { 3405 Imm = Src0->getImm(); 3406 DefMI = nullptr; 3407 } 3408 if (pseudoToMCOpcode(NewOpc) != -1 && 3409 isOperandLegal( 3410 MI, AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::src0), 3411 Src1)) { 3412 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc)) 3413 .add(*Dst) 3414 .add(*Src1) 3415 .addImm(Imm) 3416 .add(*Src2); 3417 updateLiveVariables(LV, MI, *MIB); 3418 if (LIS) 3419 LIS->ReplaceMachineInstrInMaps(MI, *MIB); 3420 if (DefMI) 3421 killDef(); 3422 return MIB; 3423 } 3424 } 3425 } 3426 3427 // VOP2 mac/fmac with a literal operand cannot be converted to VOP3 mad/fma 3428 // because VOP3 does not allow a literal operand. 3429 // TODO: Remove this restriction for GFX10. 3430 if (Src0Literal) 3431 return nullptr; 3432 3433 unsigned NewOpc = IsFMA ? IsF16 ? AMDGPU::V_FMA_F16_gfx9_e64 3434 : IsF64 ? AMDGPU::V_FMA_F64_e64 3435 : IsLegacy 3436 ? AMDGPU::V_FMA_LEGACY_F32_e64 3437 : AMDGPU::V_FMA_F32_e64 3438 : IsF16 ? AMDGPU::V_MAD_F16_e64 3439 : IsLegacy ? AMDGPU::V_MAD_LEGACY_F32_e64 3440 : AMDGPU::V_MAD_F32_e64; 3441 if (pseudoToMCOpcode(NewOpc) == -1) 3442 return nullptr; 3443 3444 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc)) 3445 .add(*Dst) 3446 .addImm(Src0Mods ? Src0Mods->getImm() : 0) 3447 .add(*Src0) 3448 .addImm(Src1Mods ? Src1Mods->getImm() : 0) 3449 .add(*Src1) 3450 .addImm(Src2Mods ? Src2Mods->getImm() : 0) 3451 .add(*Src2) 3452 .addImm(Clamp ? Clamp->getImm() : 0) 3453 .addImm(Omod ? Omod->getImm() : 0); 3454 updateLiveVariables(LV, MI, *MIB); 3455 if (LIS) 3456 LIS->ReplaceMachineInstrInMaps(MI, *MIB); 3457 return MIB; 3458 } 3459 3460 // It's not generally safe to move VALU instructions across these since it will 3461 // start using the register as a base index rather than directly. 3462 // XXX - Why isn't hasSideEffects sufficient for these? 3463 static bool changesVGPRIndexingMode(const MachineInstr &MI) { 3464 switch (MI.getOpcode()) { 3465 case AMDGPU::S_SET_GPR_IDX_ON: 3466 case AMDGPU::S_SET_GPR_IDX_MODE: 3467 case AMDGPU::S_SET_GPR_IDX_OFF: 3468 return true; 3469 default: 3470 return false; 3471 } 3472 } 3473 3474 bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 3475 const MachineBasicBlock *MBB, 3476 const MachineFunction &MF) const { 3477 // Skipping the check for SP writes in the base implementation. The reason it 3478 // was added was apparently due to compile time concerns. 3479 // 3480 // TODO: Do we really want this barrier? It triggers unnecessary hazard nops 3481 // but is probably avoidable. 3482 3483 // Copied from base implementation. 3484 // Terminators and labels can't be scheduled around. 3485 if (MI.isTerminator() || MI.isPosition()) 3486 return true; 3487 3488 // INLINEASM_BR can jump to another block 3489 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR) 3490 return true; 3491 3492 // Target-independent instructions do not have an implicit-use of EXEC, even 3493 // when they operate on VGPRs. Treating EXEC modifications as scheduling 3494 // boundaries prevents incorrect movements of such instructions. 3495 return MI.modifiesRegister(AMDGPU::EXEC, &RI) || 3496 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 || 3497 MI.getOpcode() == AMDGPU::S_SETREG_B32 || 3498 changesVGPRIndexingMode(MI); 3499 } 3500 3501 bool SIInstrInfo::isAlwaysGDS(uint16_t Opcode) const { 3502 return Opcode == AMDGPU::DS_ORDERED_COUNT || 3503 Opcode == AMDGPU::DS_GWS_INIT || 3504 Opcode == AMDGPU::DS_GWS_SEMA_V || 3505 Opcode == AMDGPU::DS_GWS_SEMA_BR || 3506 Opcode == AMDGPU::DS_GWS_SEMA_P || 3507 Opcode == AMDGPU::DS_GWS_SEMA_RELEASE_ALL || 3508 Opcode == AMDGPU::DS_GWS_BARRIER; 3509 } 3510 3511 bool SIInstrInfo::modifiesModeRegister(const MachineInstr &MI) { 3512 // Skip the full operand and register alias search modifiesRegister 3513 // does. There's only a handful of instructions that touch this, it's only an 3514 // implicit def, and doesn't alias any other registers. 3515 if (const MCPhysReg *ImpDef = MI.getDesc().getImplicitDefs()) { 3516 for (; ImpDef && *ImpDef; ++ImpDef) { 3517 if (*ImpDef == AMDGPU::MODE) 3518 return true; 3519 } 3520 } 3521 3522 return false; 3523 } 3524 3525 bool SIInstrInfo::hasUnwantedEffectsWhenEXECEmpty(const MachineInstr &MI) const { 3526 unsigned Opcode = MI.getOpcode(); 3527 3528 if (MI.mayStore() && isSMRD(MI)) 3529 return true; // scalar store or atomic 3530 3531 // This will terminate the function when other lanes may need to continue. 3532 if (MI.isReturn()) 3533 return true; 3534 3535 // These instructions cause shader I/O that may cause hardware lockups 3536 // when executed with an empty EXEC mask. 3537 // 3538 // Note: exp with VM = DONE = 0 is automatically skipped by hardware when 3539 // EXEC = 0, but checking for that case here seems not worth it 3540 // given the typical code patterns. 3541 if (Opcode == AMDGPU::S_SENDMSG || Opcode == AMDGPU::S_SENDMSGHALT || 3542 isEXP(Opcode) || 3543 Opcode == AMDGPU::DS_ORDERED_COUNT || Opcode == AMDGPU::S_TRAP || 3544 Opcode == AMDGPU::DS_GWS_INIT || Opcode == AMDGPU::DS_GWS_BARRIER) 3545 return true; 3546 3547 if (MI.isCall() || MI.isInlineAsm()) 3548 return true; // conservative assumption 3549 3550 // A mode change is a scalar operation that influences vector instructions. 3551 if (modifiesModeRegister(MI)) 3552 return true; 3553 3554 // These are like SALU instructions in terms of effects, so it's questionable 3555 // whether we should return true for those. 3556 // 3557 // However, executing them with EXEC = 0 causes them to operate on undefined 3558 // data, which we avoid by returning true here. 3559 if (Opcode == AMDGPU::V_READFIRSTLANE_B32 || 3560 Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32) 3561 return true; 3562 3563 return false; 3564 } 3565 3566 bool SIInstrInfo::mayReadEXEC(const MachineRegisterInfo &MRI, 3567 const MachineInstr &MI) const { 3568 if (MI.isMetaInstruction()) 3569 return false; 3570 3571 // This won't read exec if this is an SGPR->SGPR copy. 3572 if (MI.isCopyLike()) { 3573 if (!RI.isSGPRReg(MRI, MI.getOperand(0).getReg())) 3574 return true; 3575 3576 // Make sure this isn't copying exec as a normal operand 3577 return MI.readsRegister(AMDGPU::EXEC, &RI); 3578 } 3579 3580 // Make a conservative assumption about the callee. 3581 if (MI.isCall()) 3582 return true; 3583 3584 // Be conservative with any unhandled generic opcodes. 3585 if (!isTargetSpecificOpcode(MI.getOpcode())) 3586 return true; 3587 3588 return !isSALU(MI) || MI.readsRegister(AMDGPU::EXEC, &RI); 3589 } 3590 3591 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const { 3592 switch (Imm.getBitWidth()) { 3593 case 1: // This likely will be a condition code mask. 3594 return true; 3595 3596 case 32: 3597 return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(), 3598 ST.hasInv2PiInlineImm()); 3599 case 64: 3600 return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(), 3601 ST.hasInv2PiInlineImm()); 3602 case 16: 3603 return ST.has16BitInsts() && 3604 AMDGPU::isInlinableLiteral16(Imm.getSExtValue(), 3605 ST.hasInv2PiInlineImm()); 3606 default: 3607 llvm_unreachable("invalid bitwidth"); 3608 } 3609 } 3610 3611 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO, 3612 uint8_t OperandType) const { 3613 if (!MO.isImm() || 3614 OperandType < AMDGPU::OPERAND_SRC_FIRST || 3615 OperandType > AMDGPU::OPERAND_SRC_LAST) 3616 return false; 3617 3618 // MachineOperand provides no way to tell the true operand size, since it only 3619 // records a 64-bit value. We need to know the size to determine if a 32-bit 3620 // floating point immediate bit pattern is legal for an integer immediate. It 3621 // would be for any 32-bit integer operand, but would not be for a 64-bit one. 3622 3623 int64_t Imm = MO.getImm(); 3624 switch (OperandType) { 3625 case AMDGPU::OPERAND_REG_IMM_INT32: 3626 case AMDGPU::OPERAND_REG_IMM_FP32: 3627 case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED: 3628 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 3629 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 3630 case AMDGPU::OPERAND_REG_IMM_V2FP32: 3631 case AMDGPU::OPERAND_REG_INLINE_C_V2FP32: 3632 case AMDGPU::OPERAND_REG_IMM_V2INT32: 3633 case AMDGPU::OPERAND_REG_INLINE_C_V2INT32: 3634 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 3635 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: { 3636 int32_t Trunc = static_cast<int32_t>(Imm); 3637 return AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm()); 3638 } 3639 case AMDGPU::OPERAND_REG_IMM_INT64: 3640 case AMDGPU::OPERAND_REG_IMM_FP64: 3641 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 3642 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 3643 case AMDGPU::OPERAND_REG_INLINE_AC_FP64: 3644 return AMDGPU::isInlinableLiteral64(MO.getImm(), 3645 ST.hasInv2PiInlineImm()); 3646 case AMDGPU::OPERAND_REG_IMM_INT16: 3647 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 3648 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 3649 // We would expect inline immediates to not be concerned with an integer/fp 3650 // distinction. However, in the case of 16-bit integer operations, the 3651 // "floating point" values appear to not work. It seems read the low 16-bits 3652 // of 32-bit immediates, which happens to always work for the integer 3653 // values. 3654 // 3655 // See llvm bugzilla 46302. 3656 // 3657 // TODO: Theoretically we could use op-sel to use the high bits of the 3658 // 32-bit FP values. 3659 return AMDGPU::isInlinableIntLiteral(Imm); 3660 case AMDGPU::OPERAND_REG_IMM_V2INT16: 3661 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 3662 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16: 3663 // This suffers the same problem as the scalar 16-bit cases. 3664 return AMDGPU::isInlinableIntLiteralV216(Imm); 3665 case AMDGPU::OPERAND_REG_IMM_FP16: 3666 case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED: 3667 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 3668 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: { 3669 if (isInt<16>(Imm) || isUInt<16>(Imm)) { 3670 // A few special case instructions have 16-bit operands on subtargets 3671 // where 16-bit instructions are not legal. 3672 // TODO: Do the 32-bit immediates work? We shouldn't really need to handle 3673 // constants in these cases 3674 int16_t Trunc = static_cast<int16_t>(Imm); 3675 return ST.has16BitInsts() && 3676 AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm()); 3677 } 3678 3679 return false; 3680 } 3681 case AMDGPU::OPERAND_REG_IMM_V2FP16: 3682 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 3683 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: { 3684 uint32_t Trunc = static_cast<uint32_t>(Imm); 3685 return AMDGPU::isInlinableLiteralV216(Trunc, ST.hasInv2PiInlineImm()); 3686 } 3687 case AMDGPU::OPERAND_KIMM32: 3688 case AMDGPU::OPERAND_KIMM16: 3689 return false; 3690 default: 3691 llvm_unreachable("invalid bitwidth"); 3692 } 3693 } 3694 3695 bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO, 3696 const MCOperandInfo &OpInfo) const { 3697 switch (MO.getType()) { 3698 case MachineOperand::MO_Register: 3699 return false; 3700 case MachineOperand::MO_Immediate: 3701 return !isInlineConstant(MO, OpInfo); 3702 case MachineOperand::MO_FrameIndex: 3703 case MachineOperand::MO_MachineBasicBlock: 3704 case MachineOperand::MO_ExternalSymbol: 3705 case MachineOperand::MO_GlobalAddress: 3706 case MachineOperand::MO_MCSymbol: 3707 return true; 3708 default: 3709 llvm_unreachable("unexpected operand type"); 3710 } 3711 } 3712 3713 static bool compareMachineOp(const MachineOperand &Op0, 3714 const MachineOperand &Op1) { 3715 if (Op0.getType() != Op1.getType()) 3716 return false; 3717 3718 switch (Op0.getType()) { 3719 case MachineOperand::MO_Register: 3720 return Op0.getReg() == Op1.getReg(); 3721 case MachineOperand::MO_Immediate: 3722 return Op0.getImm() == Op1.getImm(); 3723 default: 3724 llvm_unreachable("Didn't expect to be comparing these operand types"); 3725 } 3726 } 3727 3728 bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo, 3729 const MachineOperand &MO) const { 3730 const MCInstrDesc &InstDesc = MI.getDesc(); 3731 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo]; 3732 3733 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()); 3734 3735 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE) 3736 return true; 3737 3738 if (OpInfo.RegClass < 0) 3739 return false; 3740 3741 if (MO.isImm() && isInlineConstant(MO, OpInfo)) { 3742 if (isMAI(MI) && ST.hasMFMAInlineLiteralBug() && 3743 OpNo ==(unsigned)AMDGPU::getNamedOperandIdx(MI.getOpcode(), 3744 AMDGPU::OpName::src2)) 3745 return false; 3746 return RI.opCanUseInlineConstant(OpInfo.OperandType); 3747 } 3748 3749 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType)) 3750 return false; 3751 3752 if (!isVOP3(MI) || !AMDGPU::isSISrcOperand(InstDesc, OpNo)) 3753 return true; 3754 3755 return ST.hasVOP3Literal(); 3756 } 3757 3758 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const { 3759 // GFX90A does not have V_MUL_LEGACY_F32_e32. 3760 if (Opcode == AMDGPU::V_MUL_LEGACY_F32_e64 && ST.hasGFX90AInsts()) 3761 return false; 3762 3763 int Op32 = AMDGPU::getVOPe32(Opcode); 3764 if (Op32 == -1) 3765 return false; 3766 3767 return pseudoToMCOpcode(Op32) != -1; 3768 } 3769 3770 bool SIInstrInfo::hasModifiers(unsigned Opcode) const { 3771 // The src0_modifier operand is present on all instructions 3772 // that have modifiers. 3773 3774 return AMDGPU::getNamedOperandIdx(Opcode, 3775 AMDGPU::OpName::src0_modifiers) != -1; 3776 } 3777 3778 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI, 3779 unsigned OpName) const { 3780 const MachineOperand *Mods = getNamedOperand(MI, OpName); 3781 return Mods && Mods->getImm(); 3782 } 3783 3784 bool SIInstrInfo::hasAnyModifiersSet(const MachineInstr &MI) const { 3785 return hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || 3786 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || 3787 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers) || 3788 hasModifiersSet(MI, AMDGPU::OpName::clamp) || 3789 hasModifiersSet(MI, AMDGPU::OpName::omod); 3790 } 3791 3792 bool SIInstrInfo::canShrink(const MachineInstr &MI, 3793 const MachineRegisterInfo &MRI) const { 3794 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3795 // Can't shrink instruction with three operands. 3796 if (Src2) { 3797 switch (MI.getOpcode()) { 3798 default: return false; 3799 3800 case AMDGPU::V_ADDC_U32_e64: 3801 case AMDGPU::V_SUBB_U32_e64: 3802 case AMDGPU::V_SUBBREV_U32_e64: { 3803 const MachineOperand *Src1 3804 = getNamedOperand(MI, AMDGPU::OpName::src1); 3805 if (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg())) 3806 return false; 3807 // Additional verification is needed for sdst/src2. 3808 return true; 3809 } 3810 case AMDGPU::V_MAC_F16_e64: 3811 case AMDGPU::V_MAC_F32_e64: 3812 case AMDGPU::V_MAC_LEGACY_F32_e64: 3813 case AMDGPU::V_FMAC_F16_e64: 3814 case AMDGPU::V_FMAC_F32_e64: 3815 case AMDGPU::V_FMAC_F64_e64: 3816 case AMDGPU::V_FMAC_LEGACY_F32_e64: 3817 if (!Src2->isReg() || !RI.isVGPR(MRI, Src2->getReg()) || 3818 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers)) 3819 return false; 3820 break; 3821 3822 case AMDGPU::V_CNDMASK_B32_e64: 3823 break; 3824 } 3825 } 3826 3827 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3828 if (Src1 && (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()) || 3829 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers))) 3830 return false; 3831 3832 // We don't need to check src0, all input types are legal, so just make sure 3833 // src0 isn't using any modifiers. 3834 if (hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers)) 3835 return false; 3836 3837 // Can it be shrunk to a valid 32 bit opcode? 3838 if (!hasVALU32BitEncoding(MI.getOpcode())) 3839 return false; 3840 3841 // Check output modifiers 3842 return !hasModifiersSet(MI, AMDGPU::OpName::omod) && 3843 !hasModifiersSet(MI, AMDGPU::OpName::clamp); 3844 } 3845 3846 // Set VCC operand with all flags from \p Orig, except for setting it as 3847 // implicit. 3848 static void copyFlagsToImplicitVCC(MachineInstr &MI, 3849 const MachineOperand &Orig) { 3850 3851 for (MachineOperand &Use : MI.implicit_operands()) { 3852 if (Use.isUse() && 3853 (Use.getReg() == AMDGPU::VCC || Use.getReg() == AMDGPU::VCC_LO)) { 3854 Use.setIsUndef(Orig.isUndef()); 3855 Use.setIsKill(Orig.isKill()); 3856 return; 3857 } 3858 } 3859 } 3860 3861 MachineInstr *SIInstrInfo::buildShrunkInst(MachineInstr &MI, 3862 unsigned Op32) const { 3863 MachineBasicBlock *MBB = MI.getParent(); 3864 MachineInstrBuilder Inst32 = 3865 BuildMI(*MBB, MI, MI.getDebugLoc(), get(Op32)) 3866 .setMIFlags(MI.getFlags()); 3867 3868 // Add the dst operand if the 32-bit encoding also has an explicit $vdst. 3869 // For VOPC instructions, this is replaced by an implicit def of vcc. 3870 if (AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst) != -1) { 3871 // dst 3872 Inst32.add(MI.getOperand(0)); 3873 } else if (AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::sdst) != -1) { 3874 // VOPCX instructions won't be writing to an explicit dst, so this should 3875 // not fail for these instructions. 3876 assert(((MI.getOperand(0).getReg() == AMDGPU::VCC) || 3877 (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) && 3878 "Unexpected case"); 3879 } 3880 3881 Inst32.add(*getNamedOperand(MI, AMDGPU::OpName::src0)); 3882 3883 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3884 if (Src1) 3885 Inst32.add(*Src1); 3886 3887 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3888 3889 if (Src2) { 3890 int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2); 3891 if (Op32Src2Idx != -1) { 3892 Inst32.add(*Src2); 3893 } else { 3894 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is 3895 // replaced with an implicit read of vcc or vcc_lo. The implicit read 3896 // of vcc was already added during the initial BuildMI, but we 3897 // 1) may need to change vcc to vcc_lo to preserve the original register 3898 // 2) have to preserve the original flags. 3899 fixImplicitOperands(*Inst32); 3900 copyFlagsToImplicitVCC(*Inst32, *Src2); 3901 } 3902 } 3903 3904 return Inst32; 3905 } 3906 3907 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI, 3908 const MachineOperand &MO, 3909 const MCOperandInfo &OpInfo) const { 3910 // Literal constants use the constant bus. 3911 //if (isLiteralConstantLike(MO, OpInfo)) 3912 // return true; 3913 if (MO.isImm()) 3914 return !isInlineConstant(MO, OpInfo); 3915 3916 if (!MO.isReg()) 3917 return true; // Misc other operands like FrameIndex 3918 3919 if (!MO.isUse()) 3920 return false; 3921 3922 if (MO.getReg().isVirtual()) 3923 return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); 3924 3925 // Null is free 3926 if (MO.getReg() == AMDGPU::SGPR_NULL) 3927 return false; 3928 3929 // SGPRs use the constant bus 3930 if (MO.isImplicit()) { 3931 return MO.getReg() == AMDGPU::M0 || 3932 MO.getReg() == AMDGPU::VCC || 3933 MO.getReg() == AMDGPU::VCC_LO; 3934 } else { 3935 return AMDGPU::SReg_32RegClass.contains(MO.getReg()) || 3936 AMDGPU::SReg_64RegClass.contains(MO.getReg()); 3937 } 3938 } 3939 3940 static Register findImplicitSGPRRead(const MachineInstr &MI) { 3941 for (const MachineOperand &MO : MI.implicit_operands()) { 3942 // We only care about reads. 3943 if (MO.isDef()) 3944 continue; 3945 3946 switch (MO.getReg()) { 3947 case AMDGPU::VCC: 3948 case AMDGPU::VCC_LO: 3949 case AMDGPU::VCC_HI: 3950 case AMDGPU::M0: 3951 case AMDGPU::FLAT_SCR: 3952 return MO.getReg(); 3953 3954 default: 3955 break; 3956 } 3957 } 3958 3959 return AMDGPU::NoRegister; 3960 } 3961 3962 static bool shouldReadExec(const MachineInstr &MI) { 3963 if (SIInstrInfo::isVALU(MI)) { 3964 switch (MI.getOpcode()) { 3965 case AMDGPU::V_READLANE_B32: 3966 case AMDGPU::V_WRITELANE_B32: 3967 return false; 3968 } 3969 3970 return true; 3971 } 3972 3973 if (MI.isPreISelOpcode() || 3974 SIInstrInfo::isGenericOpcode(MI.getOpcode()) || 3975 SIInstrInfo::isSALU(MI) || 3976 SIInstrInfo::isSMRD(MI)) 3977 return false; 3978 3979 return true; 3980 } 3981 3982 static bool isSubRegOf(const SIRegisterInfo &TRI, 3983 const MachineOperand &SuperVec, 3984 const MachineOperand &SubReg) { 3985 if (SubReg.getReg().isPhysical()) 3986 return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg()); 3987 3988 return SubReg.getSubReg() != AMDGPU::NoSubRegister && 3989 SubReg.getReg() == SuperVec.getReg(); 3990 } 3991 3992 bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, 3993 StringRef &ErrInfo) const { 3994 uint16_t Opcode = MI.getOpcode(); 3995 if (SIInstrInfo::isGenericOpcode(MI.getOpcode())) 3996 return true; 3997 3998 const MachineFunction *MF = MI.getParent()->getParent(); 3999 const MachineRegisterInfo &MRI = MF->getRegInfo(); 4000 4001 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); 4002 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1); 4003 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2); 4004 4005 // Make sure the number of operands is correct. 4006 const MCInstrDesc &Desc = get(Opcode); 4007 if (!Desc.isVariadic() && 4008 Desc.getNumOperands() != MI.getNumExplicitOperands()) { 4009 ErrInfo = "Instruction has wrong number of operands."; 4010 return false; 4011 } 4012 4013 if (MI.isInlineAsm()) { 4014 // Verify register classes for inlineasm constraints. 4015 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands(); 4016 I != E; ++I) { 4017 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI); 4018 if (!RC) 4019 continue; 4020 4021 const MachineOperand &Op = MI.getOperand(I); 4022 if (!Op.isReg()) 4023 continue; 4024 4025 Register Reg = Op.getReg(); 4026 if (!Reg.isVirtual() && !RC->contains(Reg)) { 4027 ErrInfo = "inlineasm operand has incorrect register class."; 4028 return false; 4029 } 4030 } 4031 4032 return true; 4033 } 4034 4035 if (isMIMG(MI) && MI.memoperands_empty() && MI.mayLoadOrStore()) { 4036 ErrInfo = "missing memory operand from MIMG instruction."; 4037 return false; 4038 } 4039 4040 // Make sure the register classes are correct. 4041 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) { 4042 const MachineOperand &MO = MI.getOperand(i); 4043 if (MO.isFPImm()) { 4044 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast " 4045 "all fp values to integers."; 4046 return false; 4047 } 4048 4049 int RegClass = Desc.OpInfo[i].RegClass; 4050 4051 switch (Desc.OpInfo[i].OperandType) { 4052 case MCOI::OPERAND_REGISTER: 4053 if (MI.getOperand(i).isImm() || MI.getOperand(i).isGlobal()) { 4054 ErrInfo = "Illegal immediate value for operand."; 4055 return false; 4056 } 4057 break; 4058 case AMDGPU::OPERAND_REG_IMM_INT32: 4059 case AMDGPU::OPERAND_REG_IMM_FP32: 4060 case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED: 4061 case AMDGPU::OPERAND_REG_IMM_V2FP32: 4062 break; 4063 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 4064 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 4065 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 4066 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 4067 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 4068 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 4069 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 4070 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: 4071 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 4072 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: 4073 case AMDGPU::OPERAND_REG_INLINE_AC_FP64: { 4074 if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) { 4075 ErrInfo = "Illegal immediate value for operand."; 4076 return false; 4077 } 4078 break; 4079 } 4080 case MCOI::OPERAND_IMMEDIATE: 4081 case AMDGPU::OPERAND_KIMM32: 4082 // Check if this operand is an immediate. 4083 // FrameIndex operands will be replaced by immediates, so they are 4084 // allowed. 4085 if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) { 4086 ErrInfo = "Expected immediate, but got non-immediate"; 4087 return false; 4088 } 4089 LLVM_FALLTHROUGH; 4090 default: 4091 continue; 4092 } 4093 4094 if (!MO.isReg()) 4095 continue; 4096 Register Reg = MO.getReg(); 4097 if (!Reg) 4098 continue; 4099 4100 // FIXME: Ideally we would have separate instruction definitions with the 4101 // aligned register constraint. 4102 // FIXME: We do not verify inline asm operands, but custom inline asm 4103 // verification is broken anyway 4104 if (ST.needsAlignedVGPRs()) { 4105 const TargetRegisterClass *RC = RI.getRegClassForReg(MRI, Reg); 4106 if (RI.hasVectorRegisters(RC) && MO.getSubReg()) { 4107 const TargetRegisterClass *SubRC = 4108 RI.getSubRegClass(RC, MO.getSubReg()); 4109 RC = RI.getCompatibleSubRegClass(RC, SubRC, MO.getSubReg()); 4110 if (RC) 4111 RC = SubRC; 4112 } 4113 4114 // Check that this is the aligned version of the class. 4115 if (!RC || !RI.isProperlyAlignedRC(*RC)) { 4116 ErrInfo = "Subtarget requires even aligned vector registers"; 4117 return false; 4118 } 4119 } 4120 4121 if (RegClass != -1) { 4122 if (Reg.isVirtual()) 4123 continue; 4124 4125 const TargetRegisterClass *RC = RI.getRegClass(RegClass); 4126 if (!RC->contains(Reg)) { 4127 ErrInfo = "Operand has incorrect register class."; 4128 return false; 4129 } 4130 } 4131 } 4132 4133 // Verify SDWA 4134 if (isSDWA(MI)) { 4135 if (!ST.hasSDWA()) { 4136 ErrInfo = "SDWA is not supported on this target"; 4137 return false; 4138 } 4139 4140 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst); 4141 4142 for (int OpIdx : {DstIdx, Src0Idx, Src1Idx, Src2Idx}) { 4143 if (OpIdx == -1) 4144 continue; 4145 const MachineOperand &MO = MI.getOperand(OpIdx); 4146 4147 if (!ST.hasSDWAScalar()) { 4148 // Only VGPRS on VI 4149 if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) { 4150 ErrInfo = "Only VGPRs allowed as operands in SDWA instructions on VI"; 4151 return false; 4152 } 4153 } else { 4154 // No immediates on GFX9 4155 if (!MO.isReg()) { 4156 ErrInfo = 4157 "Only reg allowed as operands in SDWA instructions on GFX9+"; 4158 return false; 4159 } 4160 } 4161 } 4162 4163 if (!ST.hasSDWAOmod()) { 4164 // No omod allowed on VI 4165 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 4166 if (OMod != nullptr && 4167 (!OMod->isImm() || OMod->getImm() != 0)) { 4168 ErrInfo = "OMod not allowed in SDWA instructions on VI"; 4169 return false; 4170 } 4171 } 4172 4173 uint16_t BasicOpcode = AMDGPU::getBasicFromSDWAOp(Opcode); 4174 if (isVOPC(BasicOpcode)) { 4175 if (!ST.hasSDWASdst() && DstIdx != -1) { 4176 // Only vcc allowed as dst on VI for VOPC 4177 const MachineOperand &Dst = MI.getOperand(DstIdx); 4178 if (!Dst.isReg() || Dst.getReg() != AMDGPU::VCC) { 4179 ErrInfo = "Only VCC allowed as dst in SDWA instructions on VI"; 4180 return false; 4181 } 4182 } else if (!ST.hasSDWAOutModsVOPC()) { 4183 // No clamp allowed on GFX9 for VOPC 4184 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 4185 if (Clamp && (!Clamp->isImm() || Clamp->getImm() != 0)) { 4186 ErrInfo = "Clamp not allowed in VOPC SDWA instructions on VI"; 4187 return false; 4188 } 4189 4190 // No omod allowed on GFX9 for VOPC 4191 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 4192 if (OMod && (!OMod->isImm() || OMod->getImm() != 0)) { 4193 ErrInfo = "OMod not allowed in VOPC SDWA instructions on VI"; 4194 return false; 4195 } 4196 } 4197 } 4198 4199 const MachineOperand *DstUnused = getNamedOperand(MI, AMDGPU::OpName::dst_unused); 4200 if (DstUnused && DstUnused->isImm() && 4201 DstUnused->getImm() == AMDGPU::SDWA::UNUSED_PRESERVE) { 4202 const MachineOperand &Dst = MI.getOperand(DstIdx); 4203 if (!Dst.isReg() || !Dst.isTied()) { 4204 ErrInfo = "Dst register should have tied register"; 4205 return false; 4206 } 4207 4208 const MachineOperand &TiedMO = 4209 MI.getOperand(MI.findTiedOperandIdx(DstIdx)); 4210 if (!TiedMO.isReg() || !TiedMO.isImplicit() || !TiedMO.isUse()) { 4211 ErrInfo = 4212 "Dst register should be tied to implicit use of preserved register"; 4213 return false; 4214 } else if (TiedMO.getReg().isPhysical() && 4215 Dst.getReg() != TiedMO.getReg()) { 4216 ErrInfo = "Dst register should use same physical register as preserved"; 4217 return false; 4218 } 4219 } 4220 } 4221 4222 // Verify MIMG 4223 if (isMIMG(MI.getOpcode()) && !MI.mayStore()) { 4224 // Ensure that the return type used is large enough for all the options 4225 // being used TFE/LWE require an extra result register. 4226 const MachineOperand *DMask = getNamedOperand(MI, AMDGPU::OpName::dmask); 4227 if (DMask) { 4228 uint64_t DMaskImm = DMask->getImm(); 4229 uint32_t RegCount = 4230 isGather4(MI.getOpcode()) ? 4 : countPopulation(DMaskImm); 4231 const MachineOperand *TFE = getNamedOperand(MI, AMDGPU::OpName::tfe); 4232 const MachineOperand *LWE = getNamedOperand(MI, AMDGPU::OpName::lwe); 4233 const MachineOperand *D16 = getNamedOperand(MI, AMDGPU::OpName::d16); 4234 4235 // Adjust for packed 16 bit values 4236 if (D16 && D16->getImm() && !ST.hasUnpackedD16VMem()) 4237 RegCount >>= 1; 4238 4239 // Adjust if using LWE or TFE 4240 if ((LWE && LWE->getImm()) || (TFE && TFE->getImm())) 4241 RegCount += 1; 4242 4243 const uint32_t DstIdx = 4244 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata); 4245 const MachineOperand &Dst = MI.getOperand(DstIdx); 4246 if (Dst.isReg()) { 4247 const TargetRegisterClass *DstRC = getOpRegClass(MI, DstIdx); 4248 uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32; 4249 if (RegCount > DstSize) { 4250 ErrInfo = "MIMG instruction returns too many registers for dst " 4251 "register class"; 4252 return false; 4253 } 4254 } 4255 } 4256 } 4257 4258 // Verify VOP*. Ignore multiple sgpr operands on writelane. 4259 if (isVALU(MI) && Desc.getOpcode() != AMDGPU::V_WRITELANE_B32) { 4260 unsigned ConstantBusCount = 0; 4261 bool UsesLiteral = false; 4262 const MachineOperand *LiteralVal = nullptr; 4263 4264 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) 4265 ++ConstantBusCount; 4266 4267 SmallVector<Register, 2> SGPRsUsed; 4268 Register SGPRUsed; 4269 4270 // Only look at the true operands. Only a real operand can use the constant 4271 // bus, and we don't want to check pseudo-operands like the source modifier 4272 // flags. 4273 for (int OpIdx : {Src0Idx, Src1Idx, Src2Idx}) { 4274 if (OpIdx == -1) 4275 break; 4276 const MachineOperand &MO = MI.getOperand(OpIdx); 4277 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 4278 if (MO.isReg()) { 4279 SGPRUsed = MO.getReg(); 4280 if (llvm::all_of(SGPRsUsed, [SGPRUsed](unsigned SGPR) { 4281 return SGPRUsed != SGPR; 4282 })) { 4283 ++ConstantBusCount; 4284 SGPRsUsed.push_back(SGPRUsed); 4285 } 4286 } else { 4287 if (!UsesLiteral) { 4288 ++ConstantBusCount; 4289 UsesLiteral = true; 4290 LiteralVal = &MO; 4291 } else if (!MO.isIdenticalTo(*LiteralVal)) { 4292 assert(isVOP3(MI)); 4293 ErrInfo = "VOP3 instruction uses more than one literal"; 4294 return false; 4295 } 4296 } 4297 } 4298 } 4299 4300 SGPRUsed = findImplicitSGPRRead(MI); 4301 if (SGPRUsed != AMDGPU::NoRegister) { 4302 // Implicit uses may safely overlap true operands 4303 if (llvm::all_of(SGPRsUsed, [this, SGPRUsed](unsigned SGPR) { 4304 return !RI.regsOverlap(SGPRUsed, SGPR); 4305 })) { 4306 ++ConstantBusCount; 4307 SGPRsUsed.push_back(SGPRUsed); 4308 } 4309 } 4310 4311 // v_writelane_b32 is an exception from constant bus restriction: 4312 // vsrc0 can be sgpr, const or m0 and lane select sgpr, m0 or inline-const 4313 if (ConstantBusCount > ST.getConstantBusLimit(Opcode) && 4314 Opcode != AMDGPU::V_WRITELANE_B32) { 4315 ErrInfo = "VOP* instruction violates constant bus restriction"; 4316 return false; 4317 } 4318 4319 if (isVOP3(MI) && UsesLiteral && !ST.hasVOP3Literal()) { 4320 ErrInfo = "VOP3 instruction uses literal"; 4321 return false; 4322 } 4323 } 4324 4325 // Special case for writelane - this can break the multiple constant bus rule, 4326 // but still can't use more than one SGPR register 4327 if (Desc.getOpcode() == AMDGPU::V_WRITELANE_B32) { 4328 unsigned SGPRCount = 0; 4329 Register SGPRUsed = AMDGPU::NoRegister; 4330 4331 for (int OpIdx : {Src0Idx, Src1Idx}) { 4332 if (OpIdx == -1) 4333 break; 4334 4335 const MachineOperand &MO = MI.getOperand(OpIdx); 4336 4337 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 4338 if (MO.isReg() && MO.getReg() != AMDGPU::M0) { 4339 if (MO.getReg() != SGPRUsed) 4340 ++SGPRCount; 4341 SGPRUsed = MO.getReg(); 4342 } 4343 } 4344 if (SGPRCount > ST.getConstantBusLimit(Opcode)) { 4345 ErrInfo = "WRITELANE instruction violates constant bus restriction"; 4346 return false; 4347 } 4348 } 4349 } 4350 4351 // Verify misc. restrictions on specific instructions. 4352 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32_e64 || 4353 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64_e64) { 4354 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 4355 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 4356 const MachineOperand &Src2 = MI.getOperand(Src2Idx); 4357 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) { 4358 if (!compareMachineOp(Src0, Src1) && 4359 !compareMachineOp(Src0, Src2)) { 4360 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2"; 4361 return false; 4362 } 4363 } 4364 if ((getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm() & 4365 SISrcMods::ABS) || 4366 (getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm() & 4367 SISrcMods::ABS) || 4368 (getNamedOperand(MI, AMDGPU::OpName::src2_modifiers)->getImm() & 4369 SISrcMods::ABS)) { 4370 ErrInfo = "ABS not allowed in VOP3B instructions"; 4371 return false; 4372 } 4373 } 4374 4375 if (isSOP2(MI) || isSOPC(MI)) { 4376 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 4377 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 4378 unsigned Immediates = 0; 4379 4380 if (!Src0.isReg() && 4381 !isInlineConstant(Src0, Desc.OpInfo[Src0Idx].OperandType)) 4382 Immediates++; 4383 if (!Src1.isReg() && 4384 !isInlineConstant(Src1, Desc.OpInfo[Src1Idx].OperandType)) 4385 Immediates++; 4386 4387 if (Immediates > 1) { 4388 ErrInfo = "SOP2/SOPC instruction requires too many immediate constants"; 4389 return false; 4390 } 4391 } 4392 4393 if (isSOPK(MI)) { 4394 auto Op = getNamedOperand(MI, AMDGPU::OpName::simm16); 4395 if (Desc.isBranch()) { 4396 if (!Op->isMBB()) { 4397 ErrInfo = "invalid branch target for SOPK instruction"; 4398 return false; 4399 } 4400 } else { 4401 uint64_t Imm = Op->getImm(); 4402 if (sopkIsZext(MI)) { 4403 if (!isUInt<16>(Imm)) { 4404 ErrInfo = "invalid immediate for SOPK instruction"; 4405 return false; 4406 } 4407 } else { 4408 if (!isInt<16>(Imm)) { 4409 ErrInfo = "invalid immediate for SOPK instruction"; 4410 return false; 4411 } 4412 } 4413 } 4414 } 4415 4416 if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 || 4417 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 || 4418 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 4419 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) { 4420 const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 4421 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64; 4422 4423 const unsigned StaticNumOps = Desc.getNumOperands() + 4424 Desc.getNumImplicitUses(); 4425 const unsigned NumImplicitOps = IsDst ? 2 : 1; 4426 4427 // Allow additional implicit operands. This allows a fixup done by the post 4428 // RA scheduler where the main implicit operand is killed and implicit-defs 4429 // are added for sub-registers that remain live after this instruction. 4430 if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) { 4431 ErrInfo = "missing implicit register operands"; 4432 return false; 4433 } 4434 4435 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 4436 if (IsDst) { 4437 if (!Dst->isUse()) { 4438 ErrInfo = "v_movreld_b32 vdst should be a use operand"; 4439 return false; 4440 } 4441 4442 unsigned UseOpIdx; 4443 if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) || 4444 UseOpIdx != StaticNumOps + 1) { 4445 ErrInfo = "movrel implicit operands should be tied"; 4446 return false; 4447 } 4448 } 4449 4450 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 4451 const MachineOperand &ImpUse 4452 = MI.getOperand(StaticNumOps + NumImplicitOps - 1); 4453 if (!ImpUse.isReg() || !ImpUse.isUse() || 4454 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) { 4455 ErrInfo = "src0 should be subreg of implicit vector use"; 4456 return false; 4457 } 4458 } 4459 4460 // Make sure we aren't losing exec uses in the td files. This mostly requires 4461 // being careful when using let Uses to try to add other use registers. 4462 if (shouldReadExec(MI)) { 4463 if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) { 4464 ErrInfo = "VALU instruction does not implicitly read exec mask"; 4465 return false; 4466 } 4467 } 4468 4469 if (isSMRD(MI)) { 4470 if (MI.mayStore()) { 4471 // The register offset form of scalar stores may only use m0 as the 4472 // soffset register. 4473 const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff); 4474 if (Soff && Soff->getReg() != AMDGPU::M0) { 4475 ErrInfo = "scalar stores must use m0 as offset register"; 4476 return false; 4477 } 4478 } 4479 } 4480 4481 if (isFLAT(MI) && !ST.hasFlatInstOffsets()) { 4482 const MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 4483 if (Offset->getImm() != 0) { 4484 ErrInfo = "subtarget does not support offsets in flat instructions"; 4485 return false; 4486 } 4487 } 4488 4489 if (isMIMG(MI)) { 4490 const MachineOperand *DimOp = getNamedOperand(MI, AMDGPU::OpName::dim); 4491 if (DimOp) { 4492 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opcode, 4493 AMDGPU::OpName::vaddr0); 4494 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc); 4495 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opcode); 4496 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = 4497 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode); 4498 const AMDGPU::MIMGDimInfo *Dim = 4499 AMDGPU::getMIMGDimInfoByEncoding(DimOp->getImm()); 4500 4501 if (!Dim) { 4502 ErrInfo = "dim is out of range"; 4503 return false; 4504 } 4505 4506 bool IsA16 = false; 4507 if (ST.hasR128A16()) { 4508 const MachineOperand *R128A16 = getNamedOperand(MI, AMDGPU::OpName::r128); 4509 IsA16 = R128A16->getImm() != 0; 4510 } else if (ST.hasGFX10A16()) { 4511 const MachineOperand *A16 = getNamedOperand(MI, AMDGPU::OpName::a16); 4512 IsA16 = A16->getImm() != 0; 4513 } 4514 4515 bool IsNSA = SRsrcIdx - VAddr0Idx > 1; 4516 4517 unsigned AddrWords = 4518 AMDGPU::getAddrSizeMIMGOp(BaseOpcode, Dim, IsA16, ST.hasG16()); 4519 4520 unsigned VAddrWords; 4521 if (IsNSA) { 4522 VAddrWords = SRsrcIdx - VAddr0Idx; 4523 } else { 4524 const TargetRegisterClass *RC = getOpRegClass(MI, VAddr0Idx); 4525 VAddrWords = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC) / 32; 4526 if (AddrWords > 8) 4527 AddrWords = 16; 4528 } 4529 4530 if (VAddrWords != AddrWords) { 4531 LLVM_DEBUG(dbgs() << "bad vaddr size, expected " << AddrWords 4532 << " but got " << VAddrWords << "\n"); 4533 ErrInfo = "bad vaddr size"; 4534 return false; 4535 } 4536 } 4537 } 4538 4539 const MachineOperand *DppCt = getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl); 4540 if (DppCt) { 4541 using namespace AMDGPU::DPP; 4542 4543 unsigned DC = DppCt->getImm(); 4544 if (DC == DppCtrl::DPP_UNUSED1 || DC == DppCtrl::DPP_UNUSED2 || 4545 DC == DppCtrl::DPP_UNUSED3 || DC > DppCtrl::DPP_LAST || 4546 (DC >= DppCtrl::DPP_UNUSED4_FIRST && DC <= DppCtrl::DPP_UNUSED4_LAST) || 4547 (DC >= DppCtrl::DPP_UNUSED5_FIRST && DC <= DppCtrl::DPP_UNUSED5_LAST) || 4548 (DC >= DppCtrl::DPP_UNUSED6_FIRST && DC <= DppCtrl::DPP_UNUSED6_LAST) || 4549 (DC >= DppCtrl::DPP_UNUSED7_FIRST && DC <= DppCtrl::DPP_UNUSED7_LAST) || 4550 (DC >= DppCtrl::DPP_UNUSED8_FIRST && DC <= DppCtrl::DPP_UNUSED8_LAST)) { 4551 ErrInfo = "Invalid dpp_ctrl value"; 4552 return false; 4553 } 4554 if (DC >= DppCtrl::WAVE_SHL1 && DC <= DppCtrl::WAVE_ROR1 && 4555 ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 4556 ErrInfo = "Invalid dpp_ctrl value: " 4557 "wavefront shifts are not supported on GFX10+"; 4558 return false; 4559 } 4560 if (DC >= DppCtrl::BCAST15 && DC <= DppCtrl::BCAST31 && 4561 ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 4562 ErrInfo = "Invalid dpp_ctrl value: " 4563 "broadcasts are not supported on GFX10+"; 4564 return false; 4565 } 4566 if (DC >= DppCtrl::ROW_SHARE_FIRST && DC <= DppCtrl::ROW_XMASK_LAST && 4567 ST.getGeneration() < AMDGPUSubtarget::GFX10) { 4568 if (DC >= DppCtrl::ROW_NEWBCAST_FIRST && 4569 DC <= DppCtrl::ROW_NEWBCAST_LAST && 4570 !ST.hasGFX90AInsts()) { 4571 ErrInfo = "Invalid dpp_ctrl value: " 4572 "row_newbroadcast/row_share is not supported before " 4573 "GFX90A/GFX10"; 4574 return false; 4575 } else if (DC > DppCtrl::ROW_NEWBCAST_LAST || !ST.hasGFX90AInsts()) { 4576 ErrInfo = "Invalid dpp_ctrl value: " 4577 "row_share and row_xmask are not supported before GFX10"; 4578 return false; 4579 } 4580 } 4581 4582 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst); 4583 4584 if (Opcode != AMDGPU::V_MOV_B64_DPP_PSEUDO && 4585 ((DstIdx >= 0 && 4586 (Desc.OpInfo[DstIdx].RegClass == AMDGPU::VReg_64RegClassID || 4587 Desc.OpInfo[DstIdx].RegClass == AMDGPU::VReg_64_Align2RegClassID)) || 4588 ((Src0Idx >= 0 && 4589 (Desc.OpInfo[Src0Idx].RegClass == AMDGPU::VReg_64RegClassID || 4590 Desc.OpInfo[Src0Idx].RegClass == 4591 AMDGPU::VReg_64_Align2RegClassID)))) && 4592 !AMDGPU::isLegal64BitDPPControl(DC)) { 4593 ErrInfo = "Invalid dpp_ctrl value: " 4594 "64 bit dpp only support row_newbcast"; 4595 return false; 4596 } 4597 } 4598 4599 if ((MI.mayStore() || MI.mayLoad()) && !isVGPRSpill(MI)) { 4600 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 4601 uint16_t DataNameIdx = isDS(Opcode) ? AMDGPU::OpName::data0 4602 : AMDGPU::OpName::vdata; 4603 const MachineOperand *Data = getNamedOperand(MI, DataNameIdx); 4604 const MachineOperand *Data2 = getNamedOperand(MI, AMDGPU::OpName::data1); 4605 if (Data && !Data->isReg()) 4606 Data = nullptr; 4607 4608 if (ST.hasGFX90AInsts()) { 4609 if (Dst && Data && 4610 (RI.isAGPR(MRI, Dst->getReg()) != RI.isAGPR(MRI, Data->getReg()))) { 4611 ErrInfo = "Invalid register class: " 4612 "vdata and vdst should be both VGPR or AGPR"; 4613 return false; 4614 } 4615 if (Data && Data2 && 4616 (RI.isAGPR(MRI, Data->getReg()) != RI.isAGPR(MRI, Data2->getReg()))) { 4617 ErrInfo = "Invalid register class: " 4618 "both data operands should be VGPR or AGPR"; 4619 return false; 4620 } 4621 } else { 4622 if ((Dst && RI.isAGPR(MRI, Dst->getReg())) || 4623 (Data && RI.isAGPR(MRI, Data->getReg())) || 4624 (Data2 && RI.isAGPR(MRI, Data2->getReg()))) { 4625 ErrInfo = "Invalid register class: " 4626 "agpr loads and stores not supported on this GPU"; 4627 return false; 4628 } 4629 } 4630 } 4631 4632 if (ST.needsAlignedVGPRs() && 4633 (MI.getOpcode() == AMDGPU::DS_GWS_INIT || 4634 MI.getOpcode() == AMDGPU::DS_GWS_SEMA_BR || 4635 MI.getOpcode() == AMDGPU::DS_GWS_BARRIER)) { 4636 const MachineOperand *Op = getNamedOperand(MI, AMDGPU::OpName::data0); 4637 Register Reg = Op->getReg(); 4638 bool Aligned = true; 4639 if (Reg.isPhysical()) { 4640 Aligned = !(RI.getHWRegIndex(Reg) & 1); 4641 } else { 4642 const TargetRegisterClass &RC = *MRI.getRegClass(Reg); 4643 Aligned = RI.getRegSizeInBits(RC) > 32 && RI.isProperlyAlignedRC(RC) && 4644 !(RI.getChannelFromSubReg(Op->getSubReg()) & 1); 4645 } 4646 4647 if (!Aligned) { 4648 ErrInfo = "Subtarget requires even aligned vector registers " 4649 "for DS_GWS instructions"; 4650 return false; 4651 } 4652 } 4653 4654 if (MI.getOpcode() == AMDGPU::V_ACCVGPR_WRITE_B32_e64 && 4655 !ST.hasGFX90AInsts()) { 4656 const MachineOperand *Src = getNamedOperand(MI, AMDGPU::OpName::src0); 4657 if (Src->isReg() && RI.isSGPRReg(MRI, Src->getReg())) { 4658 ErrInfo = "Invalid register class: " 4659 "v_accvgpr_write with an SGPR is not supported on this GPU"; 4660 return false; 4661 } 4662 } 4663 4664 if (Desc.getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS) { 4665 const MachineOperand &SrcOp = MI.getOperand(1); 4666 if (!SrcOp.isReg() || SrcOp.getReg().isVirtual()) { 4667 ErrInfo = "pseudo expects only physical SGPRs"; 4668 return false; 4669 } 4670 } 4671 4672 return true; 4673 } 4674 4675 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) const { 4676 switch (MI.getOpcode()) { 4677 default: return AMDGPU::INSTRUCTION_LIST_END; 4678 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE; 4679 case AMDGPU::COPY: return AMDGPU::COPY; 4680 case AMDGPU::PHI: return AMDGPU::PHI; 4681 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG; 4682 case AMDGPU::WQM: return AMDGPU::WQM; 4683 case AMDGPU::SOFT_WQM: return AMDGPU::SOFT_WQM; 4684 case AMDGPU::STRICT_WWM: return AMDGPU::STRICT_WWM; 4685 case AMDGPU::STRICT_WQM: return AMDGPU::STRICT_WQM; 4686 case AMDGPU::S_MOV_B32: { 4687 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 4688 return MI.getOperand(1).isReg() || 4689 RI.isAGPR(MRI, MI.getOperand(0).getReg()) ? 4690 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32; 4691 } 4692 case AMDGPU::S_ADD_I32: 4693 return ST.hasAddNoCarry() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_CO_U32_e32; 4694 case AMDGPU::S_ADDC_U32: 4695 return AMDGPU::V_ADDC_U32_e32; 4696 case AMDGPU::S_SUB_I32: 4697 return ST.hasAddNoCarry() ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_CO_U32_e32; 4698 // FIXME: These are not consistently handled, and selected when the carry is 4699 // used. 4700 case AMDGPU::S_ADD_U32: 4701 return AMDGPU::V_ADD_CO_U32_e32; 4702 case AMDGPU::S_SUB_U32: 4703 return AMDGPU::V_SUB_CO_U32_e32; 4704 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32; 4705 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_U32_e64; 4706 case AMDGPU::S_MUL_HI_U32: return AMDGPU::V_MUL_HI_U32_e64; 4707 case AMDGPU::S_MUL_HI_I32: return AMDGPU::V_MUL_HI_I32_e64; 4708 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64; 4709 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64; 4710 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64; 4711 case AMDGPU::S_XNOR_B32: 4712 return ST.hasDLInsts() ? AMDGPU::V_XNOR_B32_e64 : AMDGPU::INSTRUCTION_LIST_END; 4713 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64; 4714 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64; 4715 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64; 4716 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64; 4717 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32; 4718 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64_e64; 4719 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32; 4720 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64_e64; 4721 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32; 4722 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64_e64; 4723 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32_e64; 4724 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32_e64; 4725 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32_e64; 4726 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32_e64; 4727 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64; 4728 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32; 4729 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32; 4730 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32; 4731 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e64; 4732 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e64; 4733 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e64; 4734 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e64; 4735 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e64; 4736 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e64; 4737 case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e64; 4738 case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e64; 4739 case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e64; 4740 case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e64; 4741 case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e64; 4742 case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e64; 4743 case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e64; 4744 case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e64; 4745 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64; 4746 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32; 4747 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32; 4748 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64; 4749 case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ; 4750 case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ; 4751 } 4752 llvm_unreachable( 4753 "Unexpected scalar opcode without corresponding vector one!"); 4754 } 4755 4756 static const TargetRegisterClass * 4757 adjustAllocatableRegClass(const GCNSubtarget &ST, const SIRegisterInfo &RI, 4758 const MachineRegisterInfo &MRI, 4759 const MCInstrDesc &TID, unsigned RCID, 4760 bool IsAllocatable) { 4761 if ((IsAllocatable || !ST.hasGFX90AInsts() || !MRI.reservedRegsFrozen()) && 4762 (((TID.mayLoad() || TID.mayStore()) && 4763 !(TID.TSFlags & SIInstrFlags::VGPRSpill)) || 4764 (TID.TSFlags & (SIInstrFlags::DS | SIInstrFlags::MIMG)))) { 4765 switch (RCID) { 4766 case AMDGPU::AV_32RegClassID: 4767 RCID = AMDGPU::VGPR_32RegClassID; 4768 break; 4769 case AMDGPU::AV_64RegClassID: 4770 RCID = AMDGPU::VReg_64RegClassID; 4771 break; 4772 case AMDGPU::AV_96RegClassID: 4773 RCID = AMDGPU::VReg_96RegClassID; 4774 break; 4775 case AMDGPU::AV_128RegClassID: 4776 RCID = AMDGPU::VReg_128RegClassID; 4777 break; 4778 case AMDGPU::AV_160RegClassID: 4779 RCID = AMDGPU::VReg_160RegClassID; 4780 break; 4781 case AMDGPU::AV_512RegClassID: 4782 RCID = AMDGPU::VReg_512RegClassID; 4783 break; 4784 default: 4785 break; 4786 } 4787 } 4788 4789 return RI.getProperlyAlignedRC(RI.getRegClass(RCID)); 4790 } 4791 4792 const TargetRegisterClass *SIInstrInfo::getRegClass(const MCInstrDesc &TID, 4793 unsigned OpNum, const TargetRegisterInfo *TRI, 4794 const MachineFunction &MF) 4795 const { 4796 if (OpNum >= TID.getNumOperands()) 4797 return nullptr; 4798 auto RegClass = TID.OpInfo[OpNum].RegClass; 4799 bool IsAllocatable = false; 4800 if (TID.TSFlags & (SIInstrFlags::DS | SIInstrFlags::FLAT)) { 4801 // vdst and vdata should be both VGPR or AGPR, same for the DS instructions 4802 // with two data operands. Request register class constrained to VGPR only 4803 // of both operands present as Machine Copy Propagation can not check this 4804 // constraint and possibly other passes too. 4805 // 4806 // The check is limited to FLAT and DS because atomics in non-flat encoding 4807 // have their vdst and vdata tied to be the same register. 4808 const int VDstIdx = AMDGPU::getNamedOperandIdx(TID.Opcode, 4809 AMDGPU::OpName::vdst); 4810 const int DataIdx = AMDGPU::getNamedOperandIdx(TID.Opcode, 4811 (TID.TSFlags & SIInstrFlags::DS) ? AMDGPU::OpName::data0 4812 : AMDGPU::OpName::vdata); 4813 if (DataIdx != -1) { 4814 IsAllocatable = VDstIdx != -1 || 4815 AMDGPU::getNamedOperandIdx(TID.Opcode, 4816 AMDGPU::OpName::data1) != -1; 4817 } 4818 } 4819 return adjustAllocatableRegClass(ST, RI, MF.getRegInfo(), TID, RegClass, 4820 IsAllocatable); 4821 } 4822 4823 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI, 4824 unsigned OpNo) const { 4825 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 4826 const MCInstrDesc &Desc = get(MI.getOpcode()); 4827 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() || 4828 Desc.OpInfo[OpNo].RegClass == -1) { 4829 Register Reg = MI.getOperand(OpNo).getReg(); 4830 4831 if (Reg.isVirtual()) 4832 return MRI.getRegClass(Reg); 4833 return RI.getPhysRegClass(Reg); 4834 } 4835 4836 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 4837 return adjustAllocatableRegClass(ST, RI, MRI, Desc, RCID, true); 4838 } 4839 4840 void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const { 4841 MachineBasicBlock::iterator I = MI; 4842 MachineBasicBlock *MBB = MI.getParent(); 4843 MachineOperand &MO = MI.getOperand(OpIdx); 4844 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 4845 unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass; 4846 const TargetRegisterClass *RC = RI.getRegClass(RCID); 4847 unsigned Size = RI.getRegSizeInBits(*RC); 4848 unsigned Opcode = (Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO : AMDGPU::V_MOV_B32_e32; 4849 if (MO.isReg()) 4850 Opcode = AMDGPU::COPY; 4851 else if (RI.isSGPRClass(RC)) 4852 Opcode = (Size == 64) ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; 4853 4854 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); 4855 const TargetRegisterClass *VRC64 = RI.getVGPR64Class(); 4856 if (RI.getCommonSubClass(VRC64, VRC)) 4857 VRC = VRC64; 4858 else 4859 VRC = &AMDGPU::VGPR_32RegClass; 4860 4861 Register Reg = MRI.createVirtualRegister(VRC); 4862 DebugLoc DL = MBB->findDebugLoc(I); 4863 BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO); 4864 MO.ChangeToRegister(Reg, false); 4865 } 4866 4867 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI, 4868 MachineRegisterInfo &MRI, 4869 MachineOperand &SuperReg, 4870 const TargetRegisterClass *SuperRC, 4871 unsigned SubIdx, 4872 const TargetRegisterClass *SubRC) 4873 const { 4874 MachineBasicBlock *MBB = MI->getParent(); 4875 DebugLoc DL = MI->getDebugLoc(); 4876 Register SubReg = MRI.createVirtualRegister(SubRC); 4877 4878 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) { 4879 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 4880 .addReg(SuperReg.getReg(), 0, SubIdx); 4881 return SubReg; 4882 } 4883 4884 // Just in case the super register is itself a sub-register, copy it to a new 4885 // value so we don't need to worry about merging its subreg index with the 4886 // SubIdx passed to this function. The register coalescer should be able to 4887 // eliminate this extra copy. 4888 Register NewSuperReg = MRI.createVirtualRegister(SuperRC); 4889 4890 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg) 4891 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg()); 4892 4893 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 4894 .addReg(NewSuperReg, 0, SubIdx); 4895 4896 return SubReg; 4897 } 4898 4899 MachineOperand SIInstrInfo::buildExtractSubRegOrImm( 4900 MachineBasicBlock::iterator MII, 4901 MachineRegisterInfo &MRI, 4902 MachineOperand &Op, 4903 const TargetRegisterClass *SuperRC, 4904 unsigned SubIdx, 4905 const TargetRegisterClass *SubRC) const { 4906 if (Op.isImm()) { 4907 if (SubIdx == AMDGPU::sub0) 4908 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm())); 4909 if (SubIdx == AMDGPU::sub1) 4910 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32)); 4911 4912 llvm_unreachable("Unhandled register index for immediate"); 4913 } 4914 4915 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC, 4916 SubIdx, SubRC); 4917 return MachineOperand::CreateReg(SubReg, false); 4918 } 4919 4920 // Change the order of operands from (0, 1, 2) to (0, 2, 1) 4921 void SIInstrInfo::swapOperands(MachineInstr &Inst) const { 4922 assert(Inst.getNumExplicitOperands() == 3); 4923 MachineOperand Op1 = Inst.getOperand(1); 4924 Inst.removeOperand(1); 4925 Inst.addOperand(Op1); 4926 } 4927 4928 bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI, 4929 const MCOperandInfo &OpInfo, 4930 const MachineOperand &MO) const { 4931 if (!MO.isReg()) 4932 return false; 4933 4934 Register Reg = MO.getReg(); 4935 4936 const TargetRegisterClass *DRC = RI.getRegClass(OpInfo.RegClass); 4937 if (Reg.isPhysical()) 4938 return DRC->contains(Reg); 4939 4940 const TargetRegisterClass *RC = MRI.getRegClass(Reg); 4941 4942 if (MO.getSubReg()) { 4943 const MachineFunction *MF = MO.getParent()->getParent()->getParent(); 4944 const TargetRegisterClass *SuperRC = RI.getLargestLegalSuperClass(RC, *MF); 4945 if (!SuperRC) 4946 return false; 4947 4948 DRC = RI.getMatchingSuperRegClass(SuperRC, DRC, MO.getSubReg()); 4949 if (!DRC) 4950 return false; 4951 } 4952 return RC->hasSuperClassEq(DRC); 4953 } 4954 4955 bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI, 4956 const MCOperandInfo &OpInfo, 4957 const MachineOperand &MO) const { 4958 if (MO.isReg()) 4959 return isLegalRegOperand(MRI, OpInfo, MO); 4960 4961 // Handle non-register types that are treated like immediates. 4962 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()); 4963 return true; 4964 } 4965 4966 bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx, 4967 const MachineOperand *MO) const { 4968 const MachineFunction &MF = *MI.getParent()->getParent(); 4969 const MachineRegisterInfo &MRI = MF.getRegInfo(); 4970 const MCInstrDesc &InstDesc = MI.getDesc(); 4971 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; 4972 const TargetRegisterClass *DefinedRC = 4973 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; 4974 if (!MO) 4975 MO = &MI.getOperand(OpIdx); 4976 4977 int ConstantBusLimit = ST.getConstantBusLimit(MI.getOpcode()); 4978 int VOP3LiteralLimit = ST.hasVOP3Literal() ? 1 : 0; 4979 if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) { 4980 if (isVOP3(MI) && isLiteralConstantLike(*MO, OpInfo) && !VOP3LiteralLimit--) 4981 return false; 4982 4983 SmallDenseSet<RegSubRegPair> SGPRsUsed; 4984 if (MO->isReg()) 4985 SGPRsUsed.insert(RegSubRegPair(MO->getReg(), MO->getSubReg())); 4986 4987 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 4988 if (i == OpIdx) 4989 continue; 4990 const MachineOperand &Op = MI.getOperand(i); 4991 if (Op.isReg()) { 4992 RegSubRegPair SGPR(Op.getReg(), Op.getSubReg()); 4993 if (!SGPRsUsed.count(SGPR) && 4994 usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) { 4995 if (--ConstantBusLimit <= 0) 4996 return false; 4997 SGPRsUsed.insert(SGPR); 4998 } 4999 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) { 5000 if (--ConstantBusLimit <= 0) 5001 return false; 5002 } else if (isVOP3(MI) && AMDGPU::isSISrcOperand(InstDesc, i) && 5003 isLiteralConstantLike(Op, InstDesc.OpInfo[i])) { 5004 if (!VOP3LiteralLimit--) 5005 return false; 5006 if (--ConstantBusLimit <= 0) 5007 return false; 5008 } 5009 } 5010 } 5011 5012 if (MO->isReg()) { 5013 assert(DefinedRC); 5014 if (!isLegalRegOperand(MRI, OpInfo, *MO)) 5015 return false; 5016 bool IsAGPR = RI.isAGPR(MRI, MO->getReg()); 5017 if (IsAGPR && !ST.hasMAIInsts()) 5018 return false; 5019 unsigned Opc = MI.getOpcode(); 5020 if (IsAGPR && 5021 (!ST.hasGFX90AInsts() || !MRI.reservedRegsFrozen()) && 5022 (MI.mayLoad() || MI.mayStore() || isDS(Opc) || isMIMG(Opc))) 5023 return false; 5024 // Atomics should have both vdst and vdata either vgpr or agpr. 5025 const int VDstIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 5026 const int DataIdx = AMDGPU::getNamedOperandIdx(Opc, 5027 isDS(Opc) ? AMDGPU::OpName::data0 : AMDGPU::OpName::vdata); 5028 if ((int)OpIdx == VDstIdx && DataIdx != -1 && 5029 MI.getOperand(DataIdx).isReg() && 5030 RI.isAGPR(MRI, MI.getOperand(DataIdx).getReg()) != IsAGPR) 5031 return false; 5032 if ((int)OpIdx == DataIdx) { 5033 if (VDstIdx != -1 && 5034 RI.isAGPR(MRI, MI.getOperand(VDstIdx).getReg()) != IsAGPR) 5035 return false; 5036 // DS instructions with 2 src operands also must have tied RC. 5037 const int Data1Idx = AMDGPU::getNamedOperandIdx(Opc, 5038 AMDGPU::OpName::data1); 5039 if (Data1Idx != -1 && MI.getOperand(Data1Idx).isReg() && 5040 RI.isAGPR(MRI, MI.getOperand(Data1Idx).getReg()) != IsAGPR) 5041 return false; 5042 } 5043 if (Opc == AMDGPU::V_ACCVGPR_WRITE_B32_e64 && !ST.hasGFX90AInsts() && 5044 (int)OpIdx == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) && 5045 RI.isSGPRReg(MRI, MO->getReg())) 5046 return false; 5047 return true; 5048 } 5049 5050 // Handle non-register types that are treated like immediates. 5051 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI() || MO->isGlobal()); 5052 5053 if (!DefinedRC) { 5054 // This operand expects an immediate. 5055 return true; 5056 } 5057 5058 return isImmOperandLegal(MI, OpIdx, *MO); 5059 } 5060 5061 void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI, 5062 MachineInstr &MI) const { 5063 unsigned Opc = MI.getOpcode(); 5064 const MCInstrDesc &InstrDesc = get(Opc); 5065 5066 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 5067 MachineOperand &Src0 = MI.getOperand(Src0Idx); 5068 5069 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 5070 MachineOperand &Src1 = MI.getOperand(Src1Idx); 5071 5072 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32 5073 // we need to only have one constant bus use before GFX10. 5074 bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister; 5075 if (HasImplicitSGPR && ST.getConstantBusLimit(Opc) <= 1 && 5076 Src0.isReg() && (RI.isSGPRReg(MRI, Src0.getReg()) || 5077 isLiteralConstantLike(Src0, InstrDesc.OpInfo[Src0Idx]))) 5078 legalizeOpWithMove(MI, Src0Idx); 5079 5080 // Special case: V_WRITELANE_B32 accepts only immediate or SGPR operands for 5081 // both the value to write (src0) and lane select (src1). Fix up non-SGPR 5082 // src0/src1 with V_READFIRSTLANE. 5083 if (Opc == AMDGPU::V_WRITELANE_B32) { 5084 const DebugLoc &DL = MI.getDebugLoc(); 5085 if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) { 5086 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5087 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 5088 .add(Src0); 5089 Src0.ChangeToRegister(Reg, false); 5090 } 5091 if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) { 5092 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5093 const DebugLoc &DL = MI.getDebugLoc(); 5094 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 5095 .add(Src1); 5096 Src1.ChangeToRegister(Reg, false); 5097 } 5098 return; 5099 } 5100 5101 // No VOP2 instructions support AGPRs. 5102 if (Src0.isReg() && RI.isAGPR(MRI, Src0.getReg())) 5103 legalizeOpWithMove(MI, Src0Idx); 5104 5105 if (Src1.isReg() && RI.isAGPR(MRI, Src1.getReg())) 5106 legalizeOpWithMove(MI, Src1Idx); 5107 5108 // VOP2 src0 instructions support all operand types, so we don't need to check 5109 // their legality. If src1 is already legal, we don't need to do anything. 5110 if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1)) 5111 return; 5112 5113 // Special case: V_READLANE_B32 accepts only immediate or SGPR operands for 5114 // lane select. Fix up using V_READFIRSTLANE, since we assume that the lane 5115 // select is uniform. 5116 if (Opc == AMDGPU::V_READLANE_B32 && Src1.isReg() && 5117 RI.isVGPR(MRI, Src1.getReg())) { 5118 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5119 const DebugLoc &DL = MI.getDebugLoc(); 5120 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 5121 .add(Src1); 5122 Src1.ChangeToRegister(Reg, false); 5123 return; 5124 } 5125 5126 // We do not use commuteInstruction here because it is too aggressive and will 5127 // commute if it is possible. We only want to commute here if it improves 5128 // legality. This can be called a fairly large number of times so don't waste 5129 // compile time pointlessly swapping and checking legality again. 5130 if (HasImplicitSGPR || !MI.isCommutable()) { 5131 legalizeOpWithMove(MI, Src1Idx); 5132 return; 5133 } 5134 5135 // If src0 can be used as src1, commuting will make the operands legal. 5136 // Otherwise we have to give up and insert a move. 5137 // 5138 // TODO: Other immediate-like operand kinds could be commuted if there was a 5139 // MachineOperand::ChangeTo* for them. 5140 if ((!Src1.isImm() && !Src1.isReg()) || 5141 !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) { 5142 legalizeOpWithMove(MI, Src1Idx); 5143 return; 5144 } 5145 5146 int CommutedOpc = commuteOpcode(MI); 5147 if (CommutedOpc == -1) { 5148 legalizeOpWithMove(MI, Src1Idx); 5149 return; 5150 } 5151 5152 MI.setDesc(get(CommutedOpc)); 5153 5154 Register Src0Reg = Src0.getReg(); 5155 unsigned Src0SubReg = Src0.getSubReg(); 5156 bool Src0Kill = Src0.isKill(); 5157 5158 if (Src1.isImm()) 5159 Src0.ChangeToImmediate(Src1.getImm()); 5160 else if (Src1.isReg()) { 5161 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill()); 5162 Src0.setSubReg(Src1.getSubReg()); 5163 } else 5164 llvm_unreachable("Should only have register or immediate operands"); 5165 5166 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill); 5167 Src1.setSubReg(Src0SubReg); 5168 fixImplicitOperands(MI); 5169 } 5170 5171 // Legalize VOP3 operands. All operand types are supported for any operand 5172 // but only one literal constant and only starting from GFX10. 5173 void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI, 5174 MachineInstr &MI) const { 5175 unsigned Opc = MI.getOpcode(); 5176 5177 int VOP3Idx[3] = { 5178 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), 5179 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1), 5180 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2) 5181 }; 5182 5183 if (Opc == AMDGPU::V_PERMLANE16_B32_e64 || 5184 Opc == AMDGPU::V_PERMLANEX16_B32_e64) { 5185 // src1 and src2 must be scalar 5186 MachineOperand &Src1 = MI.getOperand(VOP3Idx[1]); 5187 MachineOperand &Src2 = MI.getOperand(VOP3Idx[2]); 5188 const DebugLoc &DL = MI.getDebugLoc(); 5189 if (Src1.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) { 5190 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5191 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 5192 .add(Src1); 5193 Src1.ChangeToRegister(Reg, false); 5194 } 5195 if (Src2.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src2.getReg()))) { 5196 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5197 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 5198 .add(Src2); 5199 Src2.ChangeToRegister(Reg, false); 5200 } 5201 } 5202 5203 // Find the one SGPR operand we are allowed to use. 5204 int ConstantBusLimit = ST.getConstantBusLimit(Opc); 5205 int LiteralLimit = ST.hasVOP3Literal() ? 1 : 0; 5206 SmallDenseSet<unsigned> SGPRsUsed; 5207 Register SGPRReg = findUsedSGPR(MI, VOP3Idx); 5208 if (SGPRReg != AMDGPU::NoRegister) { 5209 SGPRsUsed.insert(SGPRReg); 5210 --ConstantBusLimit; 5211 } 5212 5213 for (int Idx : VOP3Idx) { 5214 if (Idx == -1) 5215 break; 5216 MachineOperand &MO = MI.getOperand(Idx); 5217 5218 if (!MO.isReg()) { 5219 if (!isLiteralConstantLike(MO, get(Opc).OpInfo[Idx])) 5220 continue; 5221 5222 if (LiteralLimit > 0 && ConstantBusLimit > 0) { 5223 --LiteralLimit; 5224 --ConstantBusLimit; 5225 continue; 5226 } 5227 5228 --LiteralLimit; 5229 --ConstantBusLimit; 5230 legalizeOpWithMove(MI, Idx); 5231 continue; 5232 } 5233 5234 if (RI.hasAGPRs(RI.getRegClassForReg(MRI, MO.getReg())) && 5235 !isOperandLegal(MI, Idx, &MO)) { 5236 legalizeOpWithMove(MI, Idx); 5237 continue; 5238 } 5239 5240 if (!RI.isSGPRClass(RI.getRegClassForReg(MRI, MO.getReg()))) 5241 continue; // VGPRs are legal 5242 5243 // We can use one SGPR in each VOP3 instruction prior to GFX10 5244 // and two starting from GFX10. 5245 if (SGPRsUsed.count(MO.getReg())) 5246 continue; 5247 if (ConstantBusLimit > 0) { 5248 SGPRsUsed.insert(MO.getReg()); 5249 --ConstantBusLimit; 5250 continue; 5251 } 5252 5253 // If we make it this far, then the operand is not legal and we must 5254 // legalize it. 5255 legalizeOpWithMove(MI, Idx); 5256 } 5257 } 5258 5259 Register SIInstrInfo::readlaneVGPRToSGPR(Register SrcReg, MachineInstr &UseMI, 5260 MachineRegisterInfo &MRI) const { 5261 const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg); 5262 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC); 5263 Register DstReg = MRI.createVirtualRegister(SRC); 5264 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32; 5265 5266 if (RI.hasAGPRs(VRC)) { 5267 VRC = RI.getEquivalentVGPRClass(VRC); 5268 Register NewSrcReg = MRI.createVirtualRegister(VRC); 5269 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 5270 get(TargetOpcode::COPY), NewSrcReg) 5271 .addReg(SrcReg); 5272 SrcReg = NewSrcReg; 5273 } 5274 5275 if (SubRegs == 1) { 5276 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 5277 get(AMDGPU::V_READFIRSTLANE_B32), DstReg) 5278 .addReg(SrcReg); 5279 return DstReg; 5280 } 5281 5282 SmallVector<unsigned, 8> SRegs; 5283 for (unsigned i = 0; i < SubRegs; ++i) { 5284 Register SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 5285 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 5286 get(AMDGPU::V_READFIRSTLANE_B32), SGPR) 5287 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i)); 5288 SRegs.push_back(SGPR); 5289 } 5290 5291 MachineInstrBuilder MIB = 5292 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 5293 get(AMDGPU::REG_SEQUENCE), DstReg); 5294 for (unsigned i = 0; i < SubRegs; ++i) { 5295 MIB.addReg(SRegs[i]); 5296 MIB.addImm(RI.getSubRegFromChannel(i)); 5297 } 5298 return DstReg; 5299 } 5300 5301 void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI, 5302 MachineInstr &MI) const { 5303 5304 // If the pointer is store in VGPRs, then we need to move them to 5305 // SGPRs using v_readfirstlane. This is safe because we only select 5306 // loads with uniform pointers to SMRD instruction so we know the 5307 // pointer value is uniform. 5308 MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase); 5309 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) { 5310 Register SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI); 5311 SBase->setReg(SGPR); 5312 } 5313 MachineOperand *SOff = getNamedOperand(MI, AMDGPU::OpName::soff); 5314 if (SOff && !RI.isSGPRClass(MRI.getRegClass(SOff->getReg()))) { 5315 Register SGPR = readlaneVGPRToSGPR(SOff->getReg(), MI, MRI); 5316 SOff->setReg(SGPR); 5317 } 5318 } 5319 5320 bool SIInstrInfo::moveFlatAddrToVGPR(MachineInstr &Inst) const { 5321 unsigned Opc = Inst.getOpcode(); 5322 int OldSAddrIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::saddr); 5323 if (OldSAddrIdx < 0) 5324 return false; 5325 5326 assert(isSegmentSpecificFLAT(Inst)); 5327 5328 int NewOpc = AMDGPU::getGlobalVaddrOp(Opc); 5329 if (NewOpc < 0) 5330 NewOpc = AMDGPU::getFlatScratchInstSVfromSS(Opc); 5331 if (NewOpc < 0) 5332 return false; 5333 5334 MachineRegisterInfo &MRI = Inst.getMF()->getRegInfo(); 5335 MachineOperand &SAddr = Inst.getOperand(OldSAddrIdx); 5336 if (RI.isSGPRReg(MRI, SAddr.getReg())) 5337 return false; 5338 5339 int NewVAddrIdx = AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vaddr); 5340 if (NewVAddrIdx < 0) 5341 return false; 5342 5343 int OldVAddrIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr); 5344 5345 // Check vaddr, it shall be zero or absent. 5346 MachineInstr *VAddrDef = nullptr; 5347 if (OldVAddrIdx >= 0) { 5348 MachineOperand &VAddr = Inst.getOperand(OldVAddrIdx); 5349 VAddrDef = MRI.getUniqueVRegDef(VAddr.getReg()); 5350 if (!VAddrDef || VAddrDef->getOpcode() != AMDGPU::V_MOV_B32_e32 || 5351 !VAddrDef->getOperand(1).isImm() || 5352 VAddrDef->getOperand(1).getImm() != 0) 5353 return false; 5354 } 5355 5356 const MCInstrDesc &NewDesc = get(NewOpc); 5357 Inst.setDesc(NewDesc); 5358 5359 // Callers expect iterator to be valid after this call, so modify the 5360 // instruction in place. 5361 if (OldVAddrIdx == NewVAddrIdx) { 5362 MachineOperand &NewVAddr = Inst.getOperand(NewVAddrIdx); 5363 // Clear use list from the old vaddr holding a zero register. 5364 MRI.removeRegOperandFromUseList(&NewVAddr); 5365 MRI.moveOperands(&NewVAddr, &SAddr, 1); 5366 Inst.removeOperand(OldSAddrIdx); 5367 // Update the use list with the pointer we have just moved from vaddr to 5368 // saddr position. Otherwise new vaddr will be missing from the use list. 5369 MRI.removeRegOperandFromUseList(&NewVAddr); 5370 MRI.addRegOperandToUseList(&NewVAddr); 5371 } else { 5372 assert(OldSAddrIdx == NewVAddrIdx); 5373 5374 if (OldVAddrIdx >= 0) { 5375 int NewVDstIn = AMDGPU::getNamedOperandIdx(NewOpc, 5376 AMDGPU::OpName::vdst_in); 5377 5378 // removeOperand doesn't try to fixup tied operand indexes at it goes, so 5379 // it asserts. Untie the operands for now and retie them afterwards. 5380 if (NewVDstIn != -1) { 5381 int OldVDstIn = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst_in); 5382 Inst.untieRegOperand(OldVDstIn); 5383 } 5384 5385 Inst.removeOperand(OldVAddrIdx); 5386 5387 if (NewVDstIn != -1) { 5388 int NewVDst = AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vdst); 5389 Inst.tieOperands(NewVDst, NewVDstIn); 5390 } 5391 } 5392 } 5393 5394 if (VAddrDef && MRI.use_nodbg_empty(VAddrDef->getOperand(0).getReg())) 5395 VAddrDef->eraseFromParent(); 5396 5397 return true; 5398 } 5399 5400 // FIXME: Remove this when SelectionDAG is obsoleted. 5401 void SIInstrInfo::legalizeOperandsFLAT(MachineRegisterInfo &MRI, 5402 MachineInstr &MI) const { 5403 if (!isSegmentSpecificFLAT(MI)) 5404 return; 5405 5406 // Fixup SGPR operands in VGPRs. We only select these when the DAG divergence 5407 // thinks they are uniform, so a readfirstlane should be valid. 5408 MachineOperand *SAddr = getNamedOperand(MI, AMDGPU::OpName::saddr); 5409 if (!SAddr || RI.isSGPRClass(MRI.getRegClass(SAddr->getReg()))) 5410 return; 5411 5412 if (moveFlatAddrToVGPR(MI)) 5413 return; 5414 5415 Register ToSGPR = readlaneVGPRToSGPR(SAddr->getReg(), MI, MRI); 5416 SAddr->setReg(ToSGPR); 5417 } 5418 5419 void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB, 5420 MachineBasicBlock::iterator I, 5421 const TargetRegisterClass *DstRC, 5422 MachineOperand &Op, 5423 MachineRegisterInfo &MRI, 5424 const DebugLoc &DL) const { 5425 Register OpReg = Op.getReg(); 5426 unsigned OpSubReg = Op.getSubReg(); 5427 5428 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg( 5429 RI.getRegClassForReg(MRI, OpReg), OpSubReg); 5430 5431 // Check if operand is already the correct register class. 5432 if (DstRC == OpRC) 5433 return; 5434 5435 Register DstReg = MRI.createVirtualRegister(DstRC); 5436 auto Copy = BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op); 5437 5438 Op.setReg(DstReg); 5439 Op.setSubReg(0); 5440 5441 MachineInstr *Def = MRI.getVRegDef(OpReg); 5442 if (!Def) 5443 return; 5444 5445 // Try to eliminate the copy if it is copying an immediate value. 5446 if (Def->isMoveImmediate() && DstRC != &AMDGPU::VReg_1RegClass) 5447 FoldImmediate(*Copy, *Def, OpReg, &MRI); 5448 5449 bool ImpDef = Def->isImplicitDef(); 5450 while (!ImpDef && Def && Def->isCopy()) { 5451 if (Def->getOperand(1).getReg().isPhysical()) 5452 break; 5453 Def = MRI.getUniqueVRegDef(Def->getOperand(1).getReg()); 5454 ImpDef = Def && Def->isImplicitDef(); 5455 } 5456 if (!RI.isSGPRClass(DstRC) && !Copy->readsRegister(AMDGPU::EXEC, &RI) && 5457 !ImpDef) 5458 Copy.addReg(AMDGPU::EXEC, RegState::Implicit); 5459 } 5460 5461 // Emit the actual waterfall loop, executing the wrapped instruction for each 5462 // unique value of \p Rsrc across all lanes. In the best case we execute 1 5463 // iteration, in the worst case we execute 64 (once per lane). 5464 static void 5465 emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI, 5466 MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB, 5467 const DebugLoc &DL, MachineOperand &Rsrc) { 5468 MachineFunction &MF = *OrigBB.getParent(); 5469 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 5470 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 5471 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 5472 unsigned SaveExecOpc = 5473 ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 : AMDGPU::S_AND_SAVEEXEC_B64; 5474 unsigned XorTermOpc = 5475 ST.isWave32() ? AMDGPU::S_XOR_B32_term : AMDGPU::S_XOR_B64_term; 5476 unsigned AndOpc = 5477 ST.isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64; 5478 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5479 5480 MachineBasicBlock::iterator I = LoopBB.begin(); 5481 5482 SmallVector<Register, 8> ReadlanePieces; 5483 Register CondReg = AMDGPU::NoRegister; 5484 5485 Register VRsrc = Rsrc.getReg(); 5486 unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef()); 5487 5488 unsigned RegSize = TRI->getRegSizeInBits(Rsrc.getReg(), MRI); 5489 unsigned NumSubRegs = RegSize / 32; 5490 assert(NumSubRegs % 2 == 0 && NumSubRegs <= 32 && "Unhandled register size"); 5491 5492 for (unsigned Idx = 0; Idx < NumSubRegs; Idx += 2) { 5493 5494 Register CurRegLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 5495 Register CurRegHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 5496 5497 // Read the next variant <- also loop target. 5498 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegLo) 5499 .addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx)); 5500 5501 // Read the next variant <- also loop target. 5502 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegHi) 5503 .addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx + 1)); 5504 5505 ReadlanePieces.push_back(CurRegLo); 5506 ReadlanePieces.push_back(CurRegHi); 5507 5508 // Comparison is to be done as 64-bit. 5509 Register CurReg = MRI.createVirtualRegister(&AMDGPU::SGPR_64RegClass); 5510 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), CurReg) 5511 .addReg(CurRegLo) 5512 .addImm(AMDGPU::sub0) 5513 .addReg(CurRegHi) 5514 .addImm(AMDGPU::sub1); 5515 5516 Register NewCondReg = MRI.createVirtualRegister(BoolXExecRC); 5517 auto Cmp = 5518 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), NewCondReg) 5519 .addReg(CurReg); 5520 if (NumSubRegs <= 2) 5521 Cmp.addReg(VRsrc); 5522 else 5523 Cmp.addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx, 2)); 5524 5525 // Combine the comparison results with AND. 5526 if (CondReg == AMDGPU::NoRegister) // First. 5527 CondReg = NewCondReg; 5528 else { // If not the first, we create an AND. 5529 Register AndReg = MRI.createVirtualRegister(BoolXExecRC); 5530 BuildMI(LoopBB, I, DL, TII.get(AndOpc), AndReg) 5531 .addReg(CondReg) 5532 .addReg(NewCondReg); 5533 CondReg = AndReg; 5534 } 5535 } // End for loop. 5536 5537 auto SRsrcRC = TRI->getEquivalentSGPRClass(MRI.getRegClass(VRsrc)); 5538 Register SRsrc = MRI.createVirtualRegister(SRsrcRC); 5539 5540 // Build scalar Rsrc. 5541 auto Merge = BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), SRsrc); 5542 unsigned Channel = 0; 5543 for (Register Piece : ReadlanePieces) { 5544 Merge.addReg(Piece) 5545 .addImm(TRI->getSubRegFromChannel(Channel++)); 5546 } 5547 5548 // Update Rsrc operand to use the SGPR Rsrc. 5549 Rsrc.setReg(SRsrc); 5550 Rsrc.setIsKill(true); 5551 5552 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); 5553 MRI.setSimpleHint(SaveExec, CondReg); 5554 5555 // Update EXEC to matching lanes, saving original to SaveExec. 5556 BuildMI(LoopBB, I, DL, TII.get(SaveExecOpc), SaveExec) 5557 .addReg(CondReg, RegState::Kill); 5558 5559 // The original instruction is here; we insert the terminators after it. 5560 I = LoopBB.end(); 5561 5562 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 5563 BuildMI(LoopBB, I, DL, TII.get(XorTermOpc), Exec) 5564 .addReg(Exec) 5565 .addReg(SaveExec); 5566 5567 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::SI_WATERFALL_LOOP)).addMBB(&LoopBB); 5568 } 5569 5570 // Build a waterfall loop around \p MI, replacing the VGPR \p Rsrc register 5571 // with SGPRs by iterating over all unique values across all lanes. 5572 // Returns the loop basic block that now contains \p MI. 5573 static MachineBasicBlock * 5574 loadSRsrcFromVGPR(const SIInstrInfo &TII, MachineInstr &MI, 5575 MachineOperand &Rsrc, MachineDominatorTree *MDT, 5576 MachineBasicBlock::iterator Begin = nullptr, 5577 MachineBasicBlock::iterator End = nullptr) { 5578 MachineBasicBlock &MBB = *MI.getParent(); 5579 MachineFunction &MF = *MBB.getParent(); 5580 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 5581 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 5582 MachineRegisterInfo &MRI = MF.getRegInfo(); 5583 if (!Begin.isValid()) 5584 Begin = &MI; 5585 if (!End.isValid()) { 5586 End = &MI; 5587 ++End; 5588 } 5589 const DebugLoc &DL = MI.getDebugLoc(); 5590 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 5591 unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 5592 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5593 5594 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); 5595 5596 // Save the EXEC mask 5597 BuildMI(MBB, Begin, DL, TII.get(MovExecOpc), SaveExec).addReg(Exec); 5598 5599 // Killed uses in the instruction we are waterfalling around will be 5600 // incorrect due to the added control-flow. 5601 MachineBasicBlock::iterator AfterMI = MI; 5602 ++AfterMI; 5603 for (auto I = Begin; I != AfterMI; I++) { 5604 for (auto &MO : I->uses()) { 5605 if (MO.isReg() && MO.isUse()) { 5606 MRI.clearKillFlags(MO.getReg()); 5607 } 5608 } 5609 } 5610 5611 // To insert the loop we need to split the block. Move everything after this 5612 // point to a new block, and insert a new empty block between the two. 5613 MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock(); 5614 MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock(); 5615 MachineFunction::iterator MBBI(MBB); 5616 ++MBBI; 5617 5618 MF.insert(MBBI, LoopBB); 5619 MF.insert(MBBI, RemainderBB); 5620 5621 LoopBB->addSuccessor(LoopBB); 5622 LoopBB->addSuccessor(RemainderBB); 5623 5624 // Move Begin to MI to the LoopBB, and the remainder of the block to 5625 // RemainderBB. 5626 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 5627 RemainderBB->splice(RemainderBB->begin(), &MBB, End, MBB.end()); 5628 LoopBB->splice(LoopBB->begin(), &MBB, Begin, MBB.end()); 5629 5630 MBB.addSuccessor(LoopBB); 5631 5632 // Update dominators. We know that MBB immediately dominates LoopBB, that 5633 // LoopBB immediately dominates RemainderBB, and that RemainderBB immediately 5634 // dominates all of the successors transferred to it from MBB that MBB used 5635 // to properly dominate. 5636 if (MDT) { 5637 MDT->addNewBlock(LoopBB, &MBB); 5638 MDT->addNewBlock(RemainderBB, LoopBB); 5639 for (auto &Succ : RemainderBB->successors()) { 5640 if (MDT->properlyDominates(&MBB, Succ)) { 5641 MDT->changeImmediateDominator(Succ, RemainderBB); 5642 } 5643 } 5644 } 5645 5646 emitLoadSRsrcFromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, Rsrc); 5647 5648 // Restore the EXEC mask 5649 MachineBasicBlock::iterator First = RemainderBB->begin(); 5650 BuildMI(*RemainderBB, First, DL, TII.get(MovExecOpc), Exec).addReg(SaveExec); 5651 return LoopBB; 5652 } 5653 5654 // Extract pointer from Rsrc and return a zero-value Rsrc replacement. 5655 static std::tuple<unsigned, unsigned> 5656 extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc) { 5657 MachineBasicBlock &MBB = *MI.getParent(); 5658 MachineFunction &MF = *MBB.getParent(); 5659 MachineRegisterInfo &MRI = MF.getRegInfo(); 5660 5661 // Extract the ptr from the resource descriptor. 5662 unsigned RsrcPtr = 5663 TII.buildExtractSubReg(MI, MRI, Rsrc, &AMDGPU::VReg_128RegClass, 5664 AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass); 5665 5666 // Create an empty resource descriptor 5667 Register Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 5668 Register SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 5669 Register SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 5670 Register NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass); 5671 uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat(); 5672 5673 // Zero64 = 0 5674 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B64), Zero64) 5675 .addImm(0); 5676 5677 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0} 5678 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatLo) 5679 .addImm(RsrcDataFormat & 0xFFFFFFFF); 5680 5681 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32} 5682 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatHi) 5683 .addImm(RsrcDataFormat >> 32); 5684 5685 // NewSRsrc = {Zero64, SRsrcFormat} 5686 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::REG_SEQUENCE), NewSRsrc) 5687 .addReg(Zero64) 5688 .addImm(AMDGPU::sub0_sub1) 5689 .addReg(SRsrcFormatLo) 5690 .addImm(AMDGPU::sub2) 5691 .addReg(SRsrcFormatHi) 5692 .addImm(AMDGPU::sub3); 5693 5694 return std::make_tuple(RsrcPtr, NewSRsrc); 5695 } 5696 5697 MachineBasicBlock * 5698 SIInstrInfo::legalizeOperands(MachineInstr &MI, 5699 MachineDominatorTree *MDT) const { 5700 MachineFunction &MF = *MI.getParent()->getParent(); 5701 MachineRegisterInfo &MRI = MF.getRegInfo(); 5702 MachineBasicBlock *CreatedBB = nullptr; 5703 5704 // Legalize VOP2 5705 if (isVOP2(MI) || isVOPC(MI)) { 5706 legalizeOperandsVOP2(MRI, MI); 5707 return CreatedBB; 5708 } 5709 5710 // Legalize VOP3 5711 if (isVOP3(MI)) { 5712 legalizeOperandsVOP3(MRI, MI); 5713 return CreatedBB; 5714 } 5715 5716 // Legalize SMRD 5717 if (isSMRD(MI)) { 5718 legalizeOperandsSMRD(MRI, MI); 5719 return CreatedBB; 5720 } 5721 5722 // Legalize FLAT 5723 if (isFLAT(MI)) { 5724 legalizeOperandsFLAT(MRI, MI); 5725 return CreatedBB; 5726 } 5727 5728 // Legalize REG_SEQUENCE and PHI 5729 // The register class of the operands much be the same type as the register 5730 // class of the output. 5731 if (MI.getOpcode() == AMDGPU::PHI) { 5732 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr; 5733 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) { 5734 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).getReg().isVirtual()) 5735 continue; 5736 const TargetRegisterClass *OpRC = 5737 MRI.getRegClass(MI.getOperand(i).getReg()); 5738 if (RI.hasVectorRegisters(OpRC)) { 5739 VRC = OpRC; 5740 } else { 5741 SRC = OpRC; 5742 } 5743 } 5744 5745 // If any of the operands are VGPR registers, then they all most be 5746 // otherwise we will create illegal VGPR->SGPR copies when legalizing 5747 // them. 5748 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) { 5749 if (!VRC) { 5750 assert(SRC); 5751 if (getOpRegClass(MI, 0) == &AMDGPU::VReg_1RegClass) { 5752 VRC = &AMDGPU::VReg_1RegClass; 5753 } else 5754 VRC = RI.isAGPRClass(getOpRegClass(MI, 0)) 5755 ? RI.getEquivalentAGPRClass(SRC) 5756 : RI.getEquivalentVGPRClass(SRC); 5757 } else { 5758 VRC = RI.isAGPRClass(getOpRegClass(MI, 0)) 5759 ? RI.getEquivalentAGPRClass(VRC) 5760 : RI.getEquivalentVGPRClass(VRC); 5761 } 5762 RC = VRC; 5763 } else { 5764 RC = SRC; 5765 } 5766 5767 // Update all the operands so they have the same type. 5768 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 5769 MachineOperand &Op = MI.getOperand(I); 5770 if (!Op.isReg() || !Op.getReg().isVirtual()) 5771 continue; 5772 5773 // MI is a PHI instruction. 5774 MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB(); 5775 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator(); 5776 5777 // Avoid creating no-op copies with the same src and dst reg class. These 5778 // confuse some of the machine passes. 5779 legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc()); 5780 } 5781 } 5782 5783 // REG_SEQUENCE doesn't really require operand legalization, but if one has a 5784 // VGPR dest type and SGPR sources, insert copies so all operands are 5785 // VGPRs. This seems to help operand folding / the register coalescer. 5786 if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) { 5787 MachineBasicBlock *MBB = MI.getParent(); 5788 const TargetRegisterClass *DstRC = getOpRegClass(MI, 0); 5789 if (RI.hasVGPRs(DstRC)) { 5790 // Update all the operands so they are VGPR register classes. These may 5791 // not be the same register class because REG_SEQUENCE supports mixing 5792 // subregister index types e.g. sub0_sub1 + sub2 + sub3 5793 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 5794 MachineOperand &Op = MI.getOperand(I); 5795 if (!Op.isReg() || !Op.getReg().isVirtual()) 5796 continue; 5797 5798 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg()); 5799 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); 5800 if (VRC == OpRC) 5801 continue; 5802 5803 legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc()); 5804 Op.setIsKill(); 5805 } 5806 } 5807 5808 return CreatedBB; 5809 } 5810 5811 // Legalize INSERT_SUBREG 5812 // src0 must have the same register class as dst 5813 if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) { 5814 Register Dst = MI.getOperand(0).getReg(); 5815 Register Src0 = MI.getOperand(1).getReg(); 5816 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst); 5817 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0); 5818 if (DstRC != Src0RC) { 5819 MachineBasicBlock *MBB = MI.getParent(); 5820 MachineOperand &Op = MI.getOperand(1); 5821 legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc()); 5822 } 5823 return CreatedBB; 5824 } 5825 5826 // Legalize SI_INIT_M0 5827 if (MI.getOpcode() == AMDGPU::SI_INIT_M0) { 5828 MachineOperand &Src = MI.getOperand(0); 5829 if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg()))) 5830 Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI)); 5831 return CreatedBB; 5832 } 5833 5834 // Legalize MIMG and MUBUF/MTBUF for shaders. 5835 // 5836 // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via 5837 // scratch memory access. In both cases, the legalization never involves 5838 // conversion to the addr64 form. 5839 if (isMIMG(MI) || (AMDGPU::isGraphics(MF.getFunction().getCallingConv()) && 5840 (isMUBUF(MI) || isMTBUF(MI)))) { 5841 MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc); 5842 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) 5843 CreatedBB = loadSRsrcFromVGPR(*this, MI, *SRsrc, MDT); 5844 5845 MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp); 5846 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) 5847 CreatedBB = loadSRsrcFromVGPR(*this, MI, *SSamp, MDT); 5848 5849 return CreatedBB; 5850 } 5851 5852 // Legalize SI_CALL 5853 if (MI.getOpcode() == AMDGPU::SI_CALL_ISEL) { 5854 MachineOperand *Dest = &MI.getOperand(0); 5855 if (!RI.isSGPRClass(MRI.getRegClass(Dest->getReg()))) { 5856 // Move everything between ADJCALLSTACKUP and ADJCALLSTACKDOWN and 5857 // following copies, we also need to move copies from and to physical 5858 // registers into the loop block. 5859 unsigned FrameSetupOpcode = getCallFrameSetupOpcode(); 5860 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode(); 5861 5862 // Also move the copies to physical registers into the loop block 5863 MachineBasicBlock &MBB = *MI.getParent(); 5864 MachineBasicBlock::iterator Start(&MI); 5865 while (Start->getOpcode() != FrameSetupOpcode) 5866 --Start; 5867 MachineBasicBlock::iterator End(&MI); 5868 while (End->getOpcode() != FrameDestroyOpcode) 5869 ++End; 5870 // Also include following copies of the return value 5871 ++End; 5872 while (End != MBB.end() && End->isCopy() && End->getOperand(1).isReg() && 5873 MI.definesRegister(End->getOperand(1).getReg())) 5874 ++End; 5875 CreatedBB = loadSRsrcFromVGPR(*this, MI, *Dest, MDT, Start, End); 5876 } 5877 } 5878 5879 // Legalize MUBUF* instructions. 5880 int RsrcIdx = 5881 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc); 5882 if (RsrcIdx != -1) { 5883 // We have an MUBUF instruction 5884 MachineOperand *Rsrc = &MI.getOperand(RsrcIdx); 5885 unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass; 5886 if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()), 5887 RI.getRegClass(RsrcRC))) { 5888 // The operands are legal. 5889 // FIXME: We may need to legalize operands besides srsrc. 5890 return CreatedBB; 5891 } 5892 5893 // Legalize a VGPR Rsrc. 5894 // 5895 // If the instruction is _ADDR64, we can avoid a waterfall by extracting 5896 // the base pointer from the VGPR Rsrc, adding it to the VAddr, then using 5897 // a zero-value SRsrc. 5898 // 5899 // If the instruction is _OFFSET (both idxen and offen disabled), and we 5900 // support ADDR64 instructions, we can convert to ADDR64 and do the same as 5901 // above. 5902 // 5903 // Otherwise we are on non-ADDR64 hardware, and/or we have 5904 // idxen/offen/bothen and we fall back to a waterfall loop. 5905 5906 MachineBasicBlock &MBB = *MI.getParent(); 5907 5908 MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 5909 if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) { 5910 // This is already an ADDR64 instruction so we need to add the pointer 5911 // extracted from the resource descriptor to the current value of VAddr. 5912 Register NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5913 Register NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5914 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5915 5916 const auto *BoolXExecRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5917 Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC); 5918 Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC); 5919 5920 unsigned RsrcPtr, NewSRsrc; 5921 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 5922 5923 // NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0 5924 const DebugLoc &DL = MI.getDebugLoc(); 5925 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_CO_U32_e64), NewVAddrLo) 5926 .addDef(CondReg0) 5927 .addReg(RsrcPtr, 0, AMDGPU::sub0) 5928 .addReg(VAddr->getReg(), 0, AMDGPU::sub0) 5929 .addImm(0); 5930 5931 // NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1 5932 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e64), NewVAddrHi) 5933 .addDef(CondReg1, RegState::Dead) 5934 .addReg(RsrcPtr, 0, AMDGPU::sub1) 5935 .addReg(VAddr->getReg(), 0, AMDGPU::sub1) 5936 .addReg(CondReg0, RegState::Kill) 5937 .addImm(0); 5938 5939 // NewVaddr = {NewVaddrHi, NewVaddrLo} 5940 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) 5941 .addReg(NewVAddrLo) 5942 .addImm(AMDGPU::sub0) 5943 .addReg(NewVAddrHi) 5944 .addImm(AMDGPU::sub1); 5945 5946 VAddr->setReg(NewVAddr); 5947 Rsrc->setReg(NewSRsrc); 5948 } else if (!VAddr && ST.hasAddr64()) { 5949 // This instructions is the _OFFSET variant, so we need to convert it to 5950 // ADDR64. 5951 assert(ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS && 5952 "FIXME: Need to emit flat atomics here"); 5953 5954 unsigned RsrcPtr, NewSRsrc; 5955 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 5956 5957 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5958 MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata); 5959 MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 5960 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset); 5961 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode()); 5962 5963 // Atomics with return have an additional tied operand and are 5964 // missing some of the special bits. 5965 MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in); 5966 MachineInstr *Addr64; 5967 5968 if (!VDataIn) { 5969 // Regular buffer load / store. 5970 MachineInstrBuilder MIB = 5971 BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 5972 .add(*VData) 5973 .addReg(NewVAddr) 5974 .addReg(NewSRsrc) 5975 .add(*SOffset) 5976 .add(*Offset); 5977 5978 if (const MachineOperand *CPol = 5979 getNamedOperand(MI, AMDGPU::OpName::cpol)) { 5980 MIB.addImm(CPol->getImm()); 5981 } 5982 5983 if (const MachineOperand *TFE = 5984 getNamedOperand(MI, AMDGPU::OpName::tfe)) { 5985 MIB.addImm(TFE->getImm()); 5986 } 5987 5988 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::swz)); 5989 5990 MIB.cloneMemRefs(MI); 5991 Addr64 = MIB; 5992 } else { 5993 // Atomics with return. 5994 Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 5995 .add(*VData) 5996 .add(*VDataIn) 5997 .addReg(NewVAddr) 5998 .addReg(NewSRsrc) 5999 .add(*SOffset) 6000 .add(*Offset) 6001 .addImm(getNamedImmOperand(MI, AMDGPU::OpName::cpol)) 6002 .cloneMemRefs(MI); 6003 } 6004 6005 MI.removeFromParent(); 6006 6007 // NewVaddr = {NewVaddrHi, NewVaddrLo} 6008 BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), 6009 NewVAddr) 6010 .addReg(RsrcPtr, 0, AMDGPU::sub0) 6011 .addImm(AMDGPU::sub0) 6012 .addReg(RsrcPtr, 0, AMDGPU::sub1) 6013 .addImm(AMDGPU::sub1); 6014 } else { 6015 // This is another variant; legalize Rsrc with waterfall loop from VGPRs 6016 // to SGPRs. 6017 CreatedBB = loadSRsrcFromVGPR(*this, MI, *Rsrc, MDT); 6018 return CreatedBB; 6019 } 6020 } 6021 return CreatedBB; 6022 } 6023 6024 MachineBasicBlock *SIInstrInfo::moveToVALU(MachineInstr &TopInst, 6025 MachineDominatorTree *MDT) const { 6026 SetVectorType Worklist; 6027 Worklist.insert(&TopInst); 6028 MachineBasicBlock *CreatedBB = nullptr; 6029 MachineBasicBlock *CreatedBBTmp = nullptr; 6030 6031 while (!Worklist.empty()) { 6032 MachineInstr &Inst = *Worklist.pop_back_val(); 6033 MachineBasicBlock *MBB = Inst.getParent(); 6034 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 6035 6036 unsigned Opcode = Inst.getOpcode(); 6037 unsigned NewOpcode = getVALUOp(Inst); 6038 6039 // Handle some special cases 6040 switch (Opcode) { 6041 default: 6042 break; 6043 case AMDGPU::S_ADD_U64_PSEUDO: 6044 case AMDGPU::S_SUB_U64_PSEUDO: 6045 splitScalar64BitAddSub(Worklist, Inst, MDT); 6046 Inst.eraseFromParent(); 6047 continue; 6048 case AMDGPU::S_ADD_I32: 6049 case AMDGPU::S_SUB_I32: { 6050 // FIXME: The u32 versions currently selected use the carry. 6051 bool Changed; 6052 std::tie(Changed, CreatedBBTmp) = moveScalarAddSub(Worklist, Inst, MDT); 6053 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp) 6054 CreatedBB = CreatedBBTmp; 6055 if (Changed) 6056 continue; 6057 6058 // Default handling 6059 break; 6060 } 6061 case AMDGPU::S_AND_B64: 6062 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32, MDT); 6063 Inst.eraseFromParent(); 6064 continue; 6065 6066 case AMDGPU::S_OR_B64: 6067 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32, MDT); 6068 Inst.eraseFromParent(); 6069 continue; 6070 6071 case AMDGPU::S_XOR_B64: 6072 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32, MDT); 6073 Inst.eraseFromParent(); 6074 continue; 6075 6076 case AMDGPU::S_NAND_B64: 6077 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NAND_B32, MDT); 6078 Inst.eraseFromParent(); 6079 continue; 6080 6081 case AMDGPU::S_NOR_B64: 6082 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NOR_B32, MDT); 6083 Inst.eraseFromParent(); 6084 continue; 6085 6086 case AMDGPU::S_XNOR_B64: 6087 if (ST.hasDLInsts()) 6088 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT); 6089 else 6090 splitScalar64BitXnor(Worklist, Inst, MDT); 6091 Inst.eraseFromParent(); 6092 continue; 6093 6094 case AMDGPU::S_ANDN2_B64: 6095 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ANDN2_B32, MDT); 6096 Inst.eraseFromParent(); 6097 continue; 6098 6099 case AMDGPU::S_ORN2_B64: 6100 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ORN2_B32, MDT); 6101 Inst.eraseFromParent(); 6102 continue; 6103 6104 case AMDGPU::S_BREV_B64: 6105 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_BREV_B32, true); 6106 Inst.eraseFromParent(); 6107 continue; 6108 6109 case AMDGPU::S_NOT_B64: 6110 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32); 6111 Inst.eraseFromParent(); 6112 continue; 6113 6114 case AMDGPU::S_BCNT1_I32_B64: 6115 splitScalar64BitBCNT(Worklist, Inst); 6116 Inst.eraseFromParent(); 6117 continue; 6118 6119 case AMDGPU::S_BFE_I64: 6120 splitScalar64BitBFE(Worklist, Inst); 6121 Inst.eraseFromParent(); 6122 continue; 6123 6124 case AMDGPU::S_LSHL_B32: 6125 if (ST.hasOnlyRevVALUShifts()) { 6126 NewOpcode = AMDGPU::V_LSHLREV_B32_e64; 6127 swapOperands(Inst); 6128 } 6129 break; 6130 case AMDGPU::S_ASHR_I32: 6131 if (ST.hasOnlyRevVALUShifts()) { 6132 NewOpcode = AMDGPU::V_ASHRREV_I32_e64; 6133 swapOperands(Inst); 6134 } 6135 break; 6136 case AMDGPU::S_LSHR_B32: 6137 if (ST.hasOnlyRevVALUShifts()) { 6138 NewOpcode = AMDGPU::V_LSHRREV_B32_e64; 6139 swapOperands(Inst); 6140 } 6141 break; 6142 case AMDGPU::S_LSHL_B64: 6143 if (ST.hasOnlyRevVALUShifts()) { 6144 NewOpcode = AMDGPU::V_LSHLREV_B64_e64; 6145 swapOperands(Inst); 6146 } 6147 break; 6148 case AMDGPU::S_ASHR_I64: 6149 if (ST.hasOnlyRevVALUShifts()) { 6150 NewOpcode = AMDGPU::V_ASHRREV_I64_e64; 6151 swapOperands(Inst); 6152 } 6153 break; 6154 case AMDGPU::S_LSHR_B64: 6155 if (ST.hasOnlyRevVALUShifts()) { 6156 NewOpcode = AMDGPU::V_LSHRREV_B64_e64; 6157 swapOperands(Inst); 6158 } 6159 break; 6160 6161 case AMDGPU::S_ABS_I32: 6162 lowerScalarAbs(Worklist, Inst); 6163 Inst.eraseFromParent(); 6164 continue; 6165 6166 case AMDGPU::S_CBRANCH_SCC0: 6167 case AMDGPU::S_CBRANCH_SCC1: { 6168 // Clear unused bits of vcc 6169 Register CondReg = Inst.getOperand(1).getReg(); 6170 bool IsSCC = CondReg == AMDGPU::SCC; 6171 Register VCC = RI.getVCC(); 6172 Register EXEC = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 6173 unsigned Opc = ST.isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64; 6174 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(Opc), VCC) 6175 .addReg(EXEC) 6176 .addReg(IsSCC ? VCC : CondReg); 6177 Inst.removeOperand(1); 6178 } 6179 break; 6180 6181 case AMDGPU::S_BFE_U64: 6182 case AMDGPU::S_BFM_B64: 6183 llvm_unreachable("Moving this op to VALU not implemented"); 6184 6185 case AMDGPU::S_PACK_LL_B32_B16: 6186 case AMDGPU::S_PACK_LH_B32_B16: 6187 case AMDGPU::S_PACK_HH_B32_B16: 6188 movePackToVALU(Worklist, MRI, Inst); 6189 Inst.eraseFromParent(); 6190 continue; 6191 6192 case AMDGPU::S_XNOR_B32: 6193 lowerScalarXnor(Worklist, Inst); 6194 Inst.eraseFromParent(); 6195 continue; 6196 6197 case AMDGPU::S_NAND_B32: 6198 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_AND_B32); 6199 Inst.eraseFromParent(); 6200 continue; 6201 6202 case AMDGPU::S_NOR_B32: 6203 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_OR_B32); 6204 Inst.eraseFromParent(); 6205 continue; 6206 6207 case AMDGPU::S_ANDN2_B32: 6208 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_AND_B32); 6209 Inst.eraseFromParent(); 6210 continue; 6211 6212 case AMDGPU::S_ORN2_B32: 6213 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_OR_B32); 6214 Inst.eraseFromParent(); 6215 continue; 6216 6217 // TODO: remove as soon as everything is ready 6218 // to replace VGPR to SGPR copy with V_READFIRSTLANEs. 6219 // S_ADD/SUB_CO_PSEUDO as well as S_UADDO/USUBO_PSEUDO 6220 // can only be selected from the uniform SDNode. 6221 case AMDGPU::S_ADD_CO_PSEUDO: 6222 case AMDGPU::S_SUB_CO_PSEUDO: { 6223 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) 6224 ? AMDGPU::V_ADDC_U32_e64 6225 : AMDGPU::V_SUBB_U32_e64; 6226 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 6227 6228 Register CarryInReg = Inst.getOperand(4).getReg(); 6229 if (!MRI.constrainRegClass(CarryInReg, CarryRC)) { 6230 Register NewCarryReg = MRI.createVirtualRegister(CarryRC); 6231 BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(AMDGPU::COPY), NewCarryReg) 6232 .addReg(CarryInReg); 6233 } 6234 6235 Register CarryOutReg = Inst.getOperand(1).getReg(); 6236 6237 Register DestReg = MRI.createVirtualRegister(RI.getEquivalentVGPRClass( 6238 MRI.getRegClass(Inst.getOperand(0).getReg()))); 6239 MachineInstr *CarryOp = 6240 BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(Opc), DestReg) 6241 .addReg(CarryOutReg, RegState::Define) 6242 .add(Inst.getOperand(2)) 6243 .add(Inst.getOperand(3)) 6244 .addReg(CarryInReg) 6245 .addImm(0); 6246 CreatedBBTmp = legalizeOperands(*CarryOp); 6247 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp) 6248 CreatedBB = CreatedBBTmp; 6249 MRI.replaceRegWith(Inst.getOperand(0).getReg(), DestReg); 6250 addUsersToMoveToVALUWorklist(DestReg, MRI, Worklist); 6251 Inst.eraseFromParent(); 6252 } 6253 continue; 6254 case AMDGPU::S_UADDO_PSEUDO: 6255 case AMDGPU::S_USUBO_PSEUDO: { 6256 const DebugLoc &DL = Inst.getDebugLoc(); 6257 MachineOperand &Dest0 = Inst.getOperand(0); 6258 MachineOperand &Dest1 = Inst.getOperand(1); 6259 MachineOperand &Src0 = Inst.getOperand(2); 6260 MachineOperand &Src1 = Inst.getOperand(3); 6261 6262 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_UADDO_PSEUDO) 6263 ? AMDGPU::V_ADD_CO_U32_e64 6264 : AMDGPU::V_SUB_CO_U32_e64; 6265 const TargetRegisterClass *NewRC = 6266 RI.getEquivalentVGPRClass(MRI.getRegClass(Dest0.getReg())); 6267 Register DestReg = MRI.createVirtualRegister(NewRC); 6268 MachineInstr *NewInstr = BuildMI(*MBB, &Inst, DL, get(Opc), DestReg) 6269 .addReg(Dest1.getReg(), RegState::Define) 6270 .add(Src0) 6271 .add(Src1) 6272 .addImm(0); // clamp bit 6273 6274 CreatedBBTmp = legalizeOperands(*NewInstr, MDT); 6275 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp) 6276 CreatedBB = CreatedBBTmp; 6277 6278 MRI.replaceRegWith(Dest0.getReg(), DestReg); 6279 addUsersToMoveToVALUWorklist(NewInstr->getOperand(0).getReg(), MRI, 6280 Worklist); 6281 Inst.eraseFromParent(); 6282 } 6283 continue; 6284 6285 case AMDGPU::S_CSELECT_B32: 6286 case AMDGPU::S_CSELECT_B64: 6287 lowerSelect(Worklist, Inst, MDT); 6288 Inst.eraseFromParent(); 6289 continue; 6290 case AMDGPU::S_CMP_EQ_I32: 6291 case AMDGPU::S_CMP_LG_I32: 6292 case AMDGPU::S_CMP_GT_I32: 6293 case AMDGPU::S_CMP_GE_I32: 6294 case AMDGPU::S_CMP_LT_I32: 6295 case AMDGPU::S_CMP_LE_I32: 6296 case AMDGPU::S_CMP_EQ_U32: 6297 case AMDGPU::S_CMP_LG_U32: 6298 case AMDGPU::S_CMP_GT_U32: 6299 case AMDGPU::S_CMP_GE_U32: 6300 case AMDGPU::S_CMP_LT_U32: 6301 case AMDGPU::S_CMP_LE_U32: 6302 case AMDGPU::S_CMP_EQ_U64: 6303 case AMDGPU::S_CMP_LG_U64: { 6304 const MCInstrDesc &NewDesc = get(NewOpcode); 6305 Register CondReg = MRI.createVirtualRegister(RI.getWaveMaskRegClass()); 6306 MachineInstr *NewInstr = 6307 BuildMI(*MBB, Inst, Inst.getDebugLoc(), NewDesc, CondReg) 6308 .add(Inst.getOperand(0)) 6309 .add(Inst.getOperand(1)); 6310 legalizeOperands(*NewInstr, MDT); 6311 int SCCIdx = Inst.findRegisterDefOperandIdx(AMDGPU::SCC); 6312 MachineOperand SCCOp = Inst.getOperand(SCCIdx); 6313 addSCCDefUsersToVALUWorklist(SCCOp, Inst, Worklist, CondReg); 6314 Inst.eraseFromParent(); 6315 } 6316 continue; 6317 } 6318 6319 6320 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) { 6321 // We cannot move this instruction to the VALU, so we should try to 6322 // legalize its operands instead. 6323 CreatedBBTmp = legalizeOperands(Inst, MDT); 6324 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp) 6325 CreatedBB = CreatedBBTmp; 6326 continue; 6327 } 6328 6329 // Use the new VALU Opcode. 6330 const MCInstrDesc &NewDesc = get(NewOpcode); 6331 Inst.setDesc(NewDesc); 6332 6333 // Remove any references to SCC. Vector instructions can't read from it, and 6334 // We're just about to add the implicit use / defs of VCC, and we don't want 6335 // both. 6336 for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) { 6337 MachineOperand &Op = Inst.getOperand(i); 6338 if (Op.isReg() && Op.getReg() == AMDGPU::SCC) { 6339 // Only propagate through live-def of SCC. 6340 if (Op.isDef() && !Op.isDead()) 6341 addSCCDefUsersToVALUWorklist(Op, Inst, Worklist); 6342 if (Op.isUse()) 6343 addSCCDefsToVALUWorklist(Op, Worklist); 6344 Inst.removeOperand(i); 6345 } 6346 } 6347 6348 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) { 6349 // We are converting these to a BFE, so we need to add the missing 6350 // operands for the size and offset. 6351 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16; 6352 Inst.addOperand(MachineOperand::CreateImm(0)); 6353 Inst.addOperand(MachineOperand::CreateImm(Size)); 6354 6355 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) { 6356 // The VALU version adds the second operand to the result, so insert an 6357 // extra 0 operand. 6358 Inst.addOperand(MachineOperand::CreateImm(0)); 6359 } 6360 6361 Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent()); 6362 fixImplicitOperands(Inst); 6363 6364 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) { 6365 const MachineOperand &OffsetWidthOp = Inst.getOperand(2); 6366 // If we need to move this to VGPRs, we need to unpack the second operand 6367 // back into the 2 separate ones for bit offset and width. 6368 assert(OffsetWidthOp.isImm() && 6369 "Scalar BFE is only implemented for constant width and offset"); 6370 uint32_t Imm = OffsetWidthOp.getImm(); 6371 6372 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 6373 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 6374 Inst.removeOperand(2); // Remove old immediate. 6375 Inst.addOperand(MachineOperand::CreateImm(Offset)); 6376 Inst.addOperand(MachineOperand::CreateImm(BitWidth)); 6377 } 6378 6379 bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef(); 6380 unsigned NewDstReg = AMDGPU::NoRegister; 6381 if (HasDst) { 6382 Register DstReg = Inst.getOperand(0).getReg(); 6383 if (DstReg.isPhysical()) 6384 continue; 6385 6386 // Update the destination register class. 6387 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst); 6388 if (!NewDstRC) 6389 continue; 6390 6391 if (Inst.isCopy() && Inst.getOperand(1).getReg().isVirtual() && 6392 NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) { 6393 // Instead of creating a copy where src and dst are the same register 6394 // class, we just replace all uses of dst with src. These kinds of 6395 // copies interfere with the heuristics MachineSink uses to decide 6396 // whether or not to split a critical edge. Since the pass assumes 6397 // that copies will end up as machine instructions and not be 6398 // eliminated. 6399 addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist); 6400 MRI.replaceRegWith(DstReg, Inst.getOperand(1).getReg()); 6401 MRI.clearKillFlags(Inst.getOperand(1).getReg()); 6402 Inst.getOperand(0).setReg(DstReg); 6403 6404 // Make sure we don't leave around a dead VGPR->SGPR copy. Normally 6405 // these are deleted later, but at -O0 it would leave a suspicious 6406 // looking illegal copy of an undef register. 6407 for (unsigned I = Inst.getNumOperands() - 1; I != 0; --I) 6408 Inst.removeOperand(I); 6409 Inst.setDesc(get(AMDGPU::IMPLICIT_DEF)); 6410 continue; 6411 } 6412 6413 NewDstReg = MRI.createVirtualRegister(NewDstRC); 6414 MRI.replaceRegWith(DstReg, NewDstReg); 6415 } 6416 6417 // Legalize the operands 6418 CreatedBBTmp = legalizeOperands(Inst, MDT); 6419 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp) 6420 CreatedBB = CreatedBBTmp; 6421 6422 if (HasDst) 6423 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); 6424 } 6425 return CreatedBB; 6426 } 6427 6428 // Add/sub require special handling to deal with carry outs. 6429 std::pair<bool, MachineBasicBlock *> 6430 SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst, 6431 MachineDominatorTree *MDT) const { 6432 if (ST.hasAddNoCarry()) { 6433 // Assume there is no user of scc since we don't select this in that case. 6434 // Since scc isn't used, it doesn't really matter if the i32 or u32 variant 6435 // is used. 6436 6437 MachineBasicBlock &MBB = *Inst.getParent(); 6438 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6439 6440 Register OldDstReg = Inst.getOperand(0).getReg(); 6441 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6442 6443 unsigned Opc = Inst.getOpcode(); 6444 assert(Opc == AMDGPU::S_ADD_I32 || Opc == AMDGPU::S_SUB_I32); 6445 6446 unsigned NewOpc = Opc == AMDGPU::S_ADD_I32 ? 6447 AMDGPU::V_ADD_U32_e64 : AMDGPU::V_SUB_U32_e64; 6448 6449 assert(Inst.getOperand(3).getReg() == AMDGPU::SCC); 6450 Inst.removeOperand(3); 6451 6452 Inst.setDesc(get(NewOpc)); 6453 Inst.addOperand(MachineOperand::CreateImm(0)); // clamp bit 6454 Inst.addImplicitDefUseOperands(*MBB.getParent()); 6455 MRI.replaceRegWith(OldDstReg, ResultReg); 6456 MachineBasicBlock *NewBB = legalizeOperands(Inst, MDT); 6457 6458 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6459 return std::make_pair(true, NewBB); 6460 } 6461 6462 return std::make_pair(false, nullptr); 6463 } 6464 6465 void SIInstrInfo::lowerSelect(SetVectorType &Worklist, MachineInstr &Inst, 6466 MachineDominatorTree *MDT) const { 6467 6468 MachineBasicBlock &MBB = *Inst.getParent(); 6469 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6470 MachineBasicBlock::iterator MII = Inst; 6471 DebugLoc DL = Inst.getDebugLoc(); 6472 6473 MachineOperand &Dest = Inst.getOperand(0); 6474 MachineOperand &Src0 = Inst.getOperand(1); 6475 MachineOperand &Src1 = Inst.getOperand(2); 6476 MachineOperand &Cond = Inst.getOperand(3); 6477 6478 Register SCCSource = Cond.getReg(); 6479 bool IsSCC = (SCCSource == AMDGPU::SCC); 6480 6481 // If this is a trivial select where the condition is effectively not SCC 6482 // (SCCSource is a source of copy to SCC), then the select is semantically 6483 // equivalent to copying SCCSource. Hence, there is no need to create 6484 // V_CNDMASK, we can just use that and bail out. 6485 if (!IsSCC && Src0.isImm() && (Src0.getImm() == -1) && Src1.isImm() && 6486 (Src1.getImm() == 0)) { 6487 MRI.replaceRegWith(Dest.getReg(), SCCSource); 6488 return; 6489 } 6490 6491 const TargetRegisterClass *TC = 6492 RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 6493 6494 Register CopySCC = MRI.createVirtualRegister(TC); 6495 6496 if (IsSCC) { 6497 // Now look for the closest SCC def if it is a copy 6498 // replacing the SCCSource with the COPY source register 6499 bool CopyFound = false; 6500 for (MachineInstr &CandI : 6501 make_range(std::next(MachineBasicBlock::reverse_iterator(Inst)), 6502 Inst.getParent()->rend())) { 6503 if (CandI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != 6504 -1) { 6505 if (CandI.isCopy() && CandI.getOperand(0).getReg() == AMDGPU::SCC) { 6506 BuildMI(MBB, MII, DL, get(AMDGPU::COPY), CopySCC) 6507 .addReg(CandI.getOperand(1).getReg()); 6508 CopyFound = true; 6509 } 6510 break; 6511 } 6512 } 6513 if (!CopyFound) { 6514 // SCC def is not a copy 6515 // Insert a trivial select instead of creating a copy, because a copy from 6516 // SCC would semantically mean just copying a single bit, but we may need 6517 // the result to be a vector condition mask that needs preserving. 6518 unsigned Opcode = (ST.getWavefrontSize() == 64) ? AMDGPU::S_CSELECT_B64 6519 : AMDGPU::S_CSELECT_B32; 6520 auto NewSelect = 6521 BuildMI(MBB, MII, DL, get(Opcode), CopySCC).addImm(-1).addImm(0); 6522 NewSelect->getOperand(3).setIsUndef(Cond.isUndef()); 6523 } 6524 } 6525 6526 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6527 6528 auto UpdatedInst = 6529 BuildMI(MBB, MII, DL, get(AMDGPU::V_CNDMASK_B32_e64), ResultReg) 6530 .addImm(0) 6531 .add(Src1) // False 6532 .addImm(0) 6533 .add(Src0) // True 6534 .addReg(IsSCC ? CopySCC : SCCSource); 6535 6536 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6537 legalizeOperands(*UpdatedInst, MDT); 6538 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6539 } 6540 6541 void SIInstrInfo::lowerScalarAbs(SetVectorType &Worklist, 6542 MachineInstr &Inst) const { 6543 MachineBasicBlock &MBB = *Inst.getParent(); 6544 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6545 MachineBasicBlock::iterator MII = Inst; 6546 DebugLoc DL = Inst.getDebugLoc(); 6547 6548 MachineOperand &Dest = Inst.getOperand(0); 6549 MachineOperand &Src = Inst.getOperand(1); 6550 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6551 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6552 6553 unsigned SubOp = ST.hasAddNoCarry() ? 6554 AMDGPU::V_SUB_U32_e32 : AMDGPU::V_SUB_CO_U32_e32; 6555 6556 BuildMI(MBB, MII, DL, get(SubOp), TmpReg) 6557 .addImm(0) 6558 .addReg(Src.getReg()); 6559 6560 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg) 6561 .addReg(Src.getReg()) 6562 .addReg(TmpReg); 6563 6564 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6565 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6566 } 6567 6568 void SIInstrInfo::lowerScalarXnor(SetVectorType &Worklist, 6569 MachineInstr &Inst) const { 6570 MachineBasicBlock &MBB = *Inst.getParent(); 6571 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6572 MachineBasicBlock::iterator MII = Inst; 6573 const DebugLoc &DL = Inst.getDebugLoc(); 6574 6575 MachineOperand &Dest = Inst.getOperand(0); 6576 MachineOperand &Src0 = Inst.getOperand(1); 6577 MachineOperand &Src1 = Inst.getOperand(2); 6578 6579 if (ST.hasDLInsts()) { 6580 Register NewDest = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6581 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src0, MRI, DL); 6582 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src1, MRI, DL); 6583 6584 BuildMI(MBB, MII, DL, get(AMDGPU::V_XNOR_B32_e64), NewDest) 6585 .add(Src0) 6586 .add(Src1); 6587 6588 MRI.replaceRegWith(Dest.getReg(), NewDest); 6589 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 6590 } else { 6591 // Using the identity !(x ^ y) == (!x ^ y) == (x ^ !y), we can 6592 // invert either source and then perform the XOR. If either source is a 6593 // scalar register, then we can leave the inversion on the scalar unit to 6594 // achieve a better distribution of scalar and vector instructions. 6595 bool Src0IsSGPR = Src0.isReg() && 6596 RI.isSGPRClass(MRI.getRegClass(Src0.getReg())); 6597 bool Src1IsSGPR = Src1.isReg() && 6598 RI.isSGPRClass(MRI.getRegClass(Src1.getReg())); 6599 MachineInstr *Xor; 6600 Register Temp = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 6601 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 6602 6603 // Build a pair of scalar instructions and add them to the work list. 6604 // The next iteration over the work list will lower these to the vector 6605 // unit as necessary. 6606 if (Src0IsSGPR) { 6607 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src0); 6608 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 6609 .addReg(Temp) 6610 .add(Src1); 6611 } else if (Src1IsSGPR) { 6612 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src1); 6613 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 6614 .add(Src0) 6615 .addReg(Temp); 6616 } else { 6617 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), Temp) 6618 .add(Src0) 6619 .add(Src1); 6620 MachineInstr *Not = 6621 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest).addReg(Temp); 6622 Worklist.insert(Not); 6623 } 6624 6625 MRI.replaceRegWith(Dest.getReg(), NewDest); 6626 6627 Worklist.insert(Xor); 6628 6629 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 6630 } 6631 } 6632 6633 void SIInstrInfo::splitScalarNotBinop(SetVectorType &Worklist, 6634 MachineInstr &Inst, 6635 unsigned Opcode) const { 6636 MachineBasicBlock &MBB = *Inst.getParent(); 6637 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6638 MachineBasicBlock::iterator MII = Inst; 6639 const DebugLoc &DL = Inst.getDebugLoc(); 6640 6641 MachineOperand &Dest = Inst.getOperand(0); 6642 MachineOperand &Src0 = Inst.getOperand(1); 6643 MachineOperand &Src1 = Inst.getOperand(2); 6644 6645 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 6646 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 6647 6648 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), Interm) 6649 .add(Src0) 6650 .add(Src1); 6651 6652 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest) 6653 .addReg(Interm); 6654 6655 Worklist.insert(&Op); 6656 Worklist.insert(&Not); 6657 6658 MRI.replaceRegWith(Dest.getReg(), NewDest); 6659 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 6660 } 6661 6662 void SIInstrInfo::splitScalarBinOpN2(SetVectorType& Worklist, 6663 MachineInstr &Inst, 6664 unsigned Opcode) const { 6665 MachineBasicBlock &MBB = *Inst.getParent(); 6666 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6667 MachineBasicBlock::iterator MII = Inst; 6668 const DebugLoc &DL = Inst.getDebugLoc(); 6669 6670 MachineOperand &Dest = Inst.getOperand(0); 6671 MachineOperand &Src0 = Inst.getOperand(1); 6672 MachineOperand &Src1 = Inst.getOperand(2); 6673 6674 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 6675 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 6676 6677 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Interm) 6678 .add(Src1); 6679 6680 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), NewDest) 6681 .add(Src0) 6682 .addReg(Interm); 6683 6684 Worklist.insert(&Not); 6685 Worklist.insert(&Op); 6686 6687 MRI.replaceRegWith(Dest.getReg(), NewDest); 6688 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 6689 } 6690 6691 void SIInstrInfo::splitScalar64BitUnaryOp( 6692 SetVectorType &Worklist, MachineInstr &Inst, 6693 unsigned Opcode, bool Swap) const { 6694 MachineBasicBlock &MBB = *Inst.getParent(); 6695 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6696 6697 MachineOperand &Dest = Inst.getOperand(0); 6698 MachineOperand &Src0 = Inst.getOperand(1); 6699 DebugLoc DL = Inst.getDebugLoc(); 6700 6701 MachineBasicBlock::iterator MII = Inst; 6702 6703 const MCInstrDesc &InstDesc = get(Opcode); 6704 const TargetRegisterClass *Src0RC = Src0.isReg() ? 6705 MRI.getRegClass(Src0.getReg()) : 6706 &AMDGPU::SGPR_32RegClass; 6707 6708 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 6709 6710 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6711 AMDGPU::sub0, Src0SubRC); 6712 6713 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 6714 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 6715 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 6716 6717 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 6718 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0); 6719 6720 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6721 AMDGPU::sub1, Src0SubRC); 6722 6723 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 6724 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1); 6725 6726 if (Swap) 6727 std::swap(DestSub0, DestSub1); 6728 6729 Register FullDestReg = MRI.createVirtualRegister(NewDestRC); 6730 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 6731 .addReg(DestSub0) 6732 .addImm(AMDGPU::sub0) 6733 .addReg(DestSub1) 6734 .addImm(AMDGPU::sub1); 6735 6736 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 6737 6738 Worklist.insert(&LoHalf); 6739 Worklist.insert(&HiHalf); 6740 6741 // We don't need to legalizeOperands here because for a single operand, src0 6742 // will support any kind of input. 6743 6744 // Move all users of this moved value. 6745 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 6746 } 6747 6748 void SIInstrInfo::splitScalar64BitAddSub(SetVectorType &Worklist, 6749 MachineInstr &Inst, 6750 MachineDominatorTree *MDT) const { 6751 bool IsAdd = (Inst.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); 6752 6753 MachineBasicBlock &MBB = *Inst.getParent(); 6754 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6755 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 6756 6757 Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 6758 Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6759 Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6760 6761 Register CarryReg = MRI.createVirtualRegister(CarryRC); 6762 Register DeadCarryReg = MRI.createVirtualRegister(CarryRC); 6763 6764 MachineOperand &Dest = Inst.getOperand(0); 6765 MachineOperand &Src0 = Inst.getOperand(1); 6766 MachineOperand &Src1 = Inst.getOperand(2); 6767 const DebugLoc &DL = Inst.getDebugLoc(); 6768 MachineBasicBlock::iterator MII = Inst; 6769 6770 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg()); 6771 const TargetRegisterClass *Src1RC = MRI.getRegClass(Src1.getReg()); 6772 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 6773 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 6774 6775 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6776 AMDGPU::sub0, Src0SubRC); 6777 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 6778 AMDGPU::sub0, Src1SubRC); 6779 6780 6781 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6782 AMDGPU::sub1, Src0SubRC); 6783 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 6784 AMDGPU::sub1, Src1SubRC); 6785 6786 unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64; 6787 MachineInstr *LoHalf = 6788 BuildMI(MBB, MII, DL, get(LoOpc), DestSub0) 6789 .addReg(CarryReg, RegState::Define) 6790 .add(SrcReg0Sub0) 6791 .add(SrcReg1Sub0) 6792 .addImm(0); // clamp bit 6793 6794 unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; 6795 MachineInstr *HiHalf = 6796 BuildMI(MBB, MII, DL, get(HiOpc), DestSub1) 6797 .addReg(DeadCarryReg, RegState::Define | RegState::Dead) 6798 .add(SrcReg0Sub1) 6799 .add(SrcReg1Sub1) 6800 .addReg(CarryReg, RegState::Kill) 6801 .addImm(0); // clamp bit 6802 6803 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 6804 .addReg(DestSub0) 6805 .addImm(AMDGPU::sub0) 6806 .addReg(DestSub1) 6807 .addImm(AMDGPU::sub1); 6808 6809 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 6810 6811 // Try to legalize the operands in case we need to swap the order to keep it 6812 // valid. 6813 legalizeOperands(*LoHalf, MDT); 6814 legalizeOperands(*HiHalf, MDT); 6815 6816 // Move all users of this moved value. 6817 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 6818 } 6819 6820 void SIInstrInfo::splitScalar64BitBinaryOp(SetVectorType &Worklist, 6821 MachineInstr &Inst, unsigned Opcode, 6822 MachineDominatorTree *MDT) const { 6823 MachineBasicBlock &MBB = *Inst.getParent(); 6824 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6825 6826 MachineOperand &Dest = Inst.getOperand(0); 6827 MachineOperand &Src0 = Inst.getOperand(1); 6828 MachineOperand &Src1 = Inst.getOperand(2); 6829 DebugLoc DL = Inst.getDebugLoc(); 6830 6831 MachineBasicBlock::iterator MII = Inst; 6832 6833 const MCInstrDesc &InstDesc = get(Opcode); 6834 const TargetRegisterClass *Src0RC = Src0.isReg() ? 6835 MRI.getRegClass(Src0.getReg()) : 6836 &AMDGPU::SGPR_32RegClass; 6837 6838 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 6839 const TargetRegisterClass *Src1RC = Src1.isReg() ? 6840 MRI.getRegClass(Src1.getReg()) : 6841 &AMDGPU::SGPR_32RegClass; 6842 6843 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 6844 6845 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6846 AMDGPU::sub0, Src0SubRC); 6847 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 6848 AMDGPU::sub0, Src1SubRC); 6849 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6850 AMDGPU::sub1, Src0SubRC); 6851 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 6852 AMDGPU::sub1, Src1SubRC); 6853 6854 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 6855 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 6856 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 6857 6858 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 6859 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0) 6860 .add(SrcReg0Sub0) 6861 .add(SrcReg1Sub0); 6862 6863 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 6864 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1) 6865 .add(SrcReg0Sub1) 6866 .add(SrcReg1Sub1); 6867 6868 Register FullDestReg = MRI.createVirtualRegister(NewDestRC); 6869 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 6870 .addReg(DestSub0) 6871 .addImm(AMDGPU::sub0) 6872 .addReg(DestSub1) 6873 .addImm(AMDGPU::sub1); 6874 6875 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 6876 6877 Worklist.insert(&LoHalf); 6878 Worklist.insert(&HiHalf); 6879 6880 // Move all users of this moved value. 6881 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 6882 } 6883 6884 void SIInstrInfo::splitScalar64BitXnor(SetVectorType &Worklist, 6885 MachineInstr &Inst, 6886 MachineDominatorTree *MDT) const { 6887 MachineBasicBlock &MBB = *Inst.getParent(); 6888 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6889 6890 MachineOperand &Dest = Inst.getOperand(0); 6891 MachineOperand &Src0 = Inst.getOperand(1); 6892 MachineOperand &Src1 = Inst.getOperand(2); 6893 const DebugLoc &DL = Inst.getDebugLoc(); 6894 6895 MachineBasicBlock::iterator MII = Inst; 6896 6897 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 6898 6899 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 6900 6901 MachineOperand* Op0; 6902 MachineOperand* Op1; 6903 6904 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) { 6905 Op0 = &Src0; 6906 Op1 = &Src1; 6907 } else { 6908 Op0 = &Src1; 6909 Op1 = &Src0; 6910 } 6911 6912 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B64), Interm) 6913 .add(*Op0); 6914 6915 Register NewDest = MRI.createVirtualRegister(DestRC); 6916 6917 MachineInstr &Xor = *BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B64), NewDest) 6918 .addReg(Interm) 6919 .add(*Op1); 6920 6921 MRI.replaceRegWith(Dest.getReg(), NewDest); 6922 6923 Worklist.insert(&Xor); 6924 } 6925 6926 void SIInstrInfo::splitScalar64BitBCNT( 6927 SetVectorType &Worklist, MachineInstr &Inst) const { 6928 MachineBasicBlock &MBB = *Inst.getParent(); 6929 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6930 6931 MachineBasicBlock::iterator MII = Inst; 6932 const DebugLoc &DL = Inst.getDebugLoc(); 6933 6934 MachineOperand &Dest = Inst.getOperand(0); 6935 MachineOperand &Src = Inst.getOperand(1); 6936 6937 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64); 6938 const TargetRegisterClass *SrcRC = Src.isReg() ? 6939 MRI.getRegClass(Src.getReg()) : 6940 &AMDGPU::SGPR_32RegClass; 6941 6942 Register MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6943 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6944 6945 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); 6946 6947 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 6948 AMDGPU::sub0, SrcSubRC); 6949 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 6950 AMDGPU::sub1, SrcSubRC); 6951 6952 BuildMI(MBB, MII, DL, InstDesc, MidReg).add(SrcRegSub0).addImm(0); 6953 6954 BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg); 6955 6956 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6957 6958 // We don't need to legalize operands here. src0 for either instruction can be 6959 // an SGPR, and the second input is unused or determined here. 6960 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6961 } 6962 6963 void SIInstrInfo::splitScalar64BitBFE(SetVectorType &Worklist, 6964 MachineInstr &Inst) const { 6965 MachineBasicBlock &MBB = *Inst.getParent(); 6966 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6967 MachineBasicBlock::iterator MII = Inst; 6968 const DebugLoc &DL = Inst.getDebugLoc(); 6969 6970 MachineOperand &Dest = Inst.getOperand(0); 6971 uint32_t Imm = Inst.getOperand(2).getImm(); 6972 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 6973 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 6974 6975 (void) Offset; 6976 6977 // Only sext_inreg cases handled. 6978 assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 && 6979 Offset == 0 && "Not implemented"); 6980 6981 if (BitWidth < 32) { 6982 Register MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6983 Register MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6984 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 6985 6986 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32_e64), MidRegLo) 6987 .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0) 6988 .addImm(0) 6989 .addImm(BitWidth); 6990 6991 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi) 6992 .addImm(31) 6993 .addReg(MidRegLo); 6994 6995 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 6996 .addReg(MidRegLo) 6997 .addImm(AMDGPU::sub0) 6998 .addReg(MidRegHi) 6999 .addImm(AMDGPU::sub1); 7000 7001 MRI.replaceRegWith(Dest.getReg(), ResultReg); 7002 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 7003 return; 7004 } 7005 7006 MachineOperand &Src = Inst.getOperand(1); 7007 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 7008 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 7009 7010 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg) 7011 .addImm(31) 7012 .addReg(Src.getReg(), 0, AMDGPU::sub0); 7013 7014 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 7015 .addReg(Src.getReg(), 0, AMDGPU::sub0) 7016 .addImm(AMDGPU::sub0) 7017 .addReg(TmpReg) 7018 .addImm(AMDGPU::sub1); 7019 7020 MRI.replaceRegWith(Dest.getReg(), ResultReg); 7021 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 7022 } 7023 7024 void SIInstrInfo::addUsersToMoveToVALUWorklist( 7025 Register DstReg, 7026 MachineRegisterInfo &MRI, 7027 SetVectorType &Worklist) const { 7028 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg), 7029 E = MRI.use_end(); I != E;) { 7030 MachineInstr &UseMI = *I->getParent(); 7031 7032 unsigned OpNo = 0; 7033 7034 switch (UseMI.getOpcode()) { 7035 case AMDGPU::COPY: 7036 case AMDGPU::WQM: 7037 case AMDGPU::SOFT_WQM: 7038 case AMDGPU::STRICT_WWM: 7039 case AMDGPU::STRICT_WQM: 7040 case AMDGPU::REG_SEQUENCE: 7041 case AMDGPU::PHI: 7042 case AMDGPU::INSERT_SUBREG: 7043 break; 7044 default: 7045 OpNo = I.getOperandNo(); 7046 break; 7047 } 7048 7049 if (!RI.hasVectorRegisters(getOpRegClass(UseMI, OpNo))) { 7050 Worklist.insert(&UseMI); 7051 7052 do { 7053 ++I; 7054 } while (I != E && I->getParent() == &UseMI); 7055 } else { 7056 ++I; 7057 } 7058 } 7059 } 7060 7061 void SIInstrInfo::movePackToVALU(SetVectorType &Worklist, 7062 MachineRegisterInfo &MRI, 7063 MachineInstr &Inst) const { 7064 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 7065 MachineBasicBlock *MBB = Inst.getParent(); 7066 MachineOperand &Src0 = Inst.getOperand(1); 7067 MachineOperand &Src1 = Inst.getOperand(2); 7068 const DebugLoc &DL = Inst.getDebugLoc(); 7069 7070 switch (Inst.getOpcode()) { 7071 case AMDGPU::S_PACK_LL_B32_B16: { 7072 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 7073 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 7074 7075 // FIXME: Can do a lot better if we know the high bits of src0 or src1 are 7076 // 0. 7077 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 7078 .addImm(0xffff); 7079 7080 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_B32_e64), TmpReg) 7081 .addReg(ImmReg, RegState::Kill) 7082 .add(Src0); 7083 7084 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHL_OR_B32_e64), ResultReg) 7085 .add(Src1) 7086 .addImm(16) 7087 .addReg(TmpReg, RegState::Kill); 7088 break; 7089 } 7090 case AMDGPU::S_PACK_LH_B32_B16: { 7091 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 7092 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 7093 .addImm(0xffff); 7094 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_BFI_B32_e64), ResultReg) 7095 .addReg(ImmReg, RegState::Kill) 7096 .add(Src0) 7097 .add(Src1); 7098 break; 7099 } 7100 case AMDGPU::S_PACK_HH_B32_B16: { 7101 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 7102 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 7103 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg) 7104 .addImm(16) 7105 .add(Src0); 7106 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 7107 .addImm(0xffff0000); 7108 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_OR_B32_e64), ResultReg) 7109 .add(Src1) 7110 .addReg(ImmReg, RegState::Kill) 7111 .addReg(TmpReg, RegState::Kill); 7112 break; 7113 } 7114 default: 7115 llvm_unreachable("unhandled s_pack_* instruction"); 7116 } 7117 7118 MachineOperand &Dest = Inst.getOperand(0); 7119 MRI.replaceRegWith(Dest.getReg(), ResultReg); 7120 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 7121 } 7122 7123 void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op, 7124 MachineInstr &SCCDefInst, 7125 SetVectorType &Worklist, 7126 Register NewCond) const { 7127 7128 // Ensure that def inst defines SCC, which is still live. 7129 assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isDef() && 7130 !Op.isDead() && Op.getParent() == &SCCDefInst); 7131 SmallVector<MachineInstr *, 4> CopyToDelete; 7132 // This assumes that all the users of SCC are in the same block 7133 // as the SCC def. 7134 for (MachineInstr &MI : // Skip the def inst itself. 7135 make_range(std::next(MachineBasicBlock::iterator(SCCDefInst)), 7136 SCCDefInst.getParent()->end())) { 7137 // Check if SCC is used first. 7138 int SCCIdx = MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI); 7139 if (SCCIdx != -1) { 7140 if (MI.isCopy()) { 7141 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 7142 Register DestReg = MI.getOperand(0).getReg(); 7143 7144 MRI.replaceRegWith(DestReg, NewCond); 7145 CopyToDelete.push_back(&MI); 7146 } else { 7147 7148 if (NewCond.isValid()) 7149 MI.getOperand(SCCIdx).setReg(NewCond); 7150 7151 Worklist.insert(&MI); 7152 } 7153 } 7154 // Exit if we find another SCC def. 7155 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != -1) 7156 break; 7157 } 7158 for (auto &Copy : CopyToDelete) 7159 Copy->eraseFromParent(); 7160 } 7161 7162 // Instructions that use SCC may be converted to VALU instructions. When that 7163 // happens, the SCC register is changed to VCC_LO. The instruction that defines 7164 // SCC must be changed to an instruction that defines VCC. This function makes 7165 // sure that the instruction that defines SCC is added to the moveToVALU 7166 // worklist. 7167 void SIInstrInfo::addSCCDefsToVALUWorklist(MachineOperand &Op, 7168 SetVectorType &Worklist) const { 7169 assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isUse()); 7170 7171 MachineInstr *SCCUseInst = Op.getParent(); 7172 // Look for a preceding instruction that either defines VCC or SCC. If VCC 7173 // then there is nothing to do because the defining instruction has been 7174 // converted to a VALU already. If SCC then that instruction needs to be 7175 // converted to a VALU. 7176 for (MachineInstr &MI : 7177 make_range(std::next(MachineBasicBlock::reverse_iterator(SCCUseInst)), 7178 SCCUseInst->getParent()->rend())) { 7179 if (MI.modifiesRegister(AMDGPU::VCC, &RI)) 7180 break; 7181 if (MI.definesRegister(AMDGPU::SCC, &RI)) { 7182 Worklist.insert(&MI); 7183 break; 7184 } 7185 } 7186 } 7187 7188 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass( 7189 const MachineInstr &Inst) const { 7190 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0); 7191 7192 switch (Inst.getOpcode()) { 7193 // For target instructions, getOpRegClass just returns the virtual register 7194 // class associated with the operand, so we need to find an equivalent VGPR 7195 // register class in order to move the instruction to the VALU. 7196 case AMDGPU::COPY: 7197 case AMDGPU::PHI: 7198 case AMDGPU::REG_SEQUENCE: 7199 case AMDGPU::INSERT_SUBREG: 7200 case AMDGPU::WQM: 7201 case AMDGPU::SOFT_WQM: 7202 case AMDGPU::STRICT_WWM: 7203 case AMDGPU::STRICT_WQM: { 7204 const TargetRegisterClass *SrcRC = getOpRegClass(Inst, 1); 7205 if (RI.isAGPRClass(SrcRC)) { 7206 if (RI.isAGPRClass(NewDstRC)) 7207 return nullptr; 7208 7209 switch (Inst.getOpcode()) { 7210 case AMDGPU::PHI: 7211 case AMDGPU::REG_SEQUENCE: 7212 case AMDGPU::INSERT_SUBREG: 7213 NewDstRC = RI.getEquivalentAGPRClass(NewDstRC); 7214 break; 7215 default: 7216 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 7217 } 7218 7219 if (!NewDstRC) 7220 return nullptr; 7221 } else { 7222 if (RI.isVGPRClass(NewDstRC) || NewDstRC == &AMDGPU::VReg_1RegClass) 7223 return nullptr; 7224 7225 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 7226 if (!NewDstRC) 7227 return nullptr; 7228 } 7229 7230 return NewDstRC; 7231 } 7232 default: 7233 return NewDstRC; 7234 } 7235 } 7236 7237 // Find the one SGPR operand we are allowed to use. 7238 Register SIInstrInfo::findUsedSGPR(const MachineInstr &MI, 7239 int OpIndices[3]) const { 7240 const MCInstrDesc &Desc = MI.getDesc(); 7241 7242 // Find the one SGPR operand we are allowed to use. 7243 // 7244 // First we need to consider the instruction's operand requirements before 7245 // legalizing. Some operands are required to be SGPRs, such as implicit uses 7246 // of VCC, but we are still bound by the constant bus requirement to only use 7247 // one. 7248 // 7249 // If the operand's class is an SGPR, we can never move it. 7250 7251 Register SGPRReg = findImplicitSGPRRead(MI); 7252 if (SGPRReg != AMDGPU::NoRegister) 7253 return SGPRReg; 7254 7255 Register UsedSGPRs[3] = { AMDGPU::NoRegister }; 7256 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 7257 7258 for (unsigned i = 0; i < 3; ++i) { 7259 int Idx = OpIndices[i]; 7260 if (Idx == -1) 7261 break; 7262 7263 const MachineOperand &MO = MI.getOperand(Idx); 7264 if (!MO.isReg()) 7265 continue; 7266 7267 // Is this operand statically required to be an SGPR based on the operand 7268 // constraints? 7269 const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); 7270 bool IsRequiredSGPR = RI.isSGPRClass(OpRC); 7271 if (IsRequiredSGPR) 7272 return MO.getReg(); 7273 7274 // If this could be a VGPR or an SGPR, Check the dynamic register class. 7275 Register Reg = MO.getReg(); 7276 const TargetRegisterClass *RegRC = MRI.getRegClass(Reg); 7277 if (RI.isSGPRClass(RegRC)) 7278 UsedSGPRs[i] = Reg; 7279 } 7280 7281 // We don't have a required SGPR operand, so we have a bit more freedom in 7282 // selecting operands to move. 7283 7284 // Try to select the most used SGPR. If an SGPR is equal to one of the 7285 // others, we choose that. 7286 // 7287 // e.g. 7288 // V_FMA_F32 v0, s0, s0, s0 -> No moves 7289 // V_FMA_F32 v0, s0, s1, s0 -> Move s1 7290 7291 // TODO: If some of the operands are 64-bit SGPRs and some 32, we should 7292 // prefer those. 7293 7294 if (UsedSGPRs[0] != AMDGPU::NoRegister) { 7295 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2]) 7296 SGPRReg = UsedSGPRs[0]; 7297 } 7298 7299 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) { 7300 if (UsedSGPRs[1] == UsedSGPRs[2]) 7301 SGPRReg = UsedSGPRs[1]; 7302 } 7303 7304 return SGPRReg; 7305 } 7306 7307 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI, 7308 unsigned OperandName) const { 7309 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName); 7310 if (Idx == -1) 7311 return nullptr; 7312 7313 return &MI.getOperand(Idx); 7314 } 7315 7316 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const { 7317 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 7318 return (AMDGPU::MTBUFFormat::UFMT_32_FLOAT << 44) | 7319 (1ULL << 56) | // RESOURCE_LEVEL = 1 7320 (3ULL << 60); // OOB_SELECT = 3 7321 } 7322 7323 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT; 7324 if (ST.isAmdHsaOS()) { 7325 // Set ATC = 1. GFX9 doesn't have this bit. 7326 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) 7327 RsrcDataFormat |= (1ULL << 56); 7328 7329 // Set MTYPE = 2 (MTYPE_UC = uncached). GFX9 doesn't have this. 7330 // BTW, it disables TC L2 and therefore decreases performance. 7331 if (ST.getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS) 7332 RsrcDataFormat |= (2ULL << 59); 7333 } 7334 7335 return RsrcDataFormat; 7336 } 7337 7338 uint64_t SIInstrInfo::getScratchRsrcWords23() const { 7339 uint64_t Rsrc23 = getDefaultRsrcDataFormat() | 7340 AMDGPU::RSRC_TID_ENABLE | 7341 0xffffffff; // Size; 7342 7343 // GFX9 doesn't have ELEMENT_SIZE. 7344 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 7345 uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize(true)) - 1; 7346 Rsrc23 |= EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT; 7347 } 7348 7349 // IndexStride = 64 / 32. 7350 uint64_t IndexStride = ST.getWavefrontSize() == 64 ? 3 : 2; 7351 Rsrc23 |= IndexStride << AMDGPU::RSRC_INDEX_STRIDE_SHIFT; 7352 7353 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17]. 7354 // Clear them unless we want a huge stride. 7355 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS && 7356 ST.getGeneration() <= AMDGPUSubtarget::GFX9) 7357 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT; 7358 7359 return Rsrc23; 7360 } 7361 7362 bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const { 7363 unsigned Opc = MI.getOpcode(); 7364 7365 return isSMRD(Opc); 7366 } 7367 7368 bool SIInstrInfo::isHighLatencyDef(int Opc) const { 7369 return get(Opc).mayLoad() && 7370 (isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc) || isFLAT(Opc)); 7371 } 7372 7373 unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI, 7374 int &FrameIndex) const { 7375 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 7376 if (!Addr || !Addr->isFI()) 7377 return AMDGPU::NoRegister; 7378 7379 assert(!MI.memoperands_empty() && 7380 (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS); 7381 7382 FrameIndex = Addr->getIndex(); 7383 return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg(); 7384 } 7385 7386 unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI, 7387 int &FrameIndex) const { 7388 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr); 7389 assert(Addr && Addr->isFI()); 7390 FrameIndex = Addr->getIndex(); 7391 return getNamedOperand(MI, AMDGPU::OpName::data)->getReg(); 7392 } 7393 7394 unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 7395 int &FrameIndex) const { 7396 if (!MI.mayLoad()) 7397 return AMDGPU::NoRegister; 7398 7399 if (isMUBUF(MI) || isVGPRSpill(MI)) 7400 return isStackAccess(MI, FrameIndex); 7401 7402 if (isSGPRSpill(MI)) 7403 return isSGPRStackAccess(MI, FrameIndex); 7404 7405 return AMDGPU::NoRegister; 7406 } 7407 7408 unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 7409 int &FrameIndex) const { 7410 if (!MI.mayStore()) 7411 return AMDGPU::NoRegister; 7412 7413 if (isMUBUF(MI) || isVGPRSpill(MI)) 7414 return isStackAccess(MI, FrameIndex); 7415 7416 if (isSGPRSpill(MI)) 7417 return isSGPRStackAccess(MI, FrameIndex); 7418 7419 return AMDGPU::NoRegister; 7420 } 7421 7422 unsigned SIInstrInfo::getInstBundleSize(const MachineInstr &MI) const { 7423 unsigned Size = 0; 7424 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 7425 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 7426 while (++I != E && I->isInsideBundle()) { 7427 assert(!I->isBundle() && "No nested bundle!"); 7428 Size += getInstSizeInBytes(*I); 7429 } 7430 7431 return Size; 7432 } 7433 7434 unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { 7435 unsigned Opc = MI.getOpcode(); 7436 const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc); 7437 unsigned DescSize = Desc.getSize(); 7438 7439 // If we have a definitive size, we can use it. Otherwise we need to inspect 7440 // the operands to know the size. 7441 if (isFixedSize(MI)) { 7442 unsigned Size = DescSize; 7443 7444 // If we hit the buggy offset, an extra nop will be inserted in MC so 7445 // estimate the worst case. 7446 if (MI.isBranch() && ST.hasOffset3fBug()) 7447 Size += 4; 7448 7449 return Size; 7450 } 7451 7452 // Instructions may have a 32-bit literal encoded after them. Check 7453 // operands that could ever be literals. 7454 if (isVALU(MI) || isSALU(MI)) { 7455 if (isDPP(MI)) 7456 return DescSize; 7457 bool HasLiteral = false; 7458 for (int I = 0, E = MI.getNumExplicitOperands(); I != E; ++I) { 7459 if (isLiteralConstant(MI, I)) { 7460 HasLiteral = true; 7461 break; 7462 } 7463 } 7464 return HasLiteral ? DescSize + 4 : DescSize; 7465 } 7466 7467 // Check whether we have extra NSA words. 7468 if (isMIMG(MI)) { 7469 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); 7470 if (VAddr0Idx < 0) 7471 return 8; 7472 7473 int RSrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); 7474 return 8 + 4 * ((RSrcIdx - VAddr0Idx + 2) / 4); 7475 } 7476 7477 switch (Opc) { 7478 case TargetOpcode::BUNDLE: 7479 return getInstBundleSize(MI); 7480 case TargetOpcode::INLINEASM: 7481 case TargetOpcode::INLINEASM_BR: { 7482 const MachineFunction *MF = MI.getParent()->getParent(); 7483 const char *AsmStr = MI.getOperand(0).getSymbolName(); 7484 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo(), &ST); 7485 } 7486 default: 7487 if (MI.isMetaInstruction()) 7488 return 0; 7489 return DescSize; 7490 } 7491 } 7492 7493 bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const { 7494 if (!isFLAT(MI)) 7495 return false; 7496 7497 if (MI.memoperands_empty()) 7498 return true; 7499 7500 for (const MachineMemOperand *MMO : MI.memoperands()) { 7501 if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS) 7502 return true; 7503 } 7504 return false; 7505 } 7506 7507 bool SIInstrInfo::isNonUniformBranchInstr(MachineInstr &Branch) const { 7508 return Branch.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO; 7509 } 7510 7511 void SIInstrInfo::convertNonUniformIfRegion(MachineBasicBlock *IfEntry, 7512 MachineBasicBlock *IfEnd) const { 7513 MachineBasicBlock::iterator TI = IfEntry->getFirstTerminator(); 7514 assert(TI != IfEntry->end()); 7515 7516 MachineInstr *Branch = &(*TI); 7517 MachineFunction *MF = IfEntry->getParent(); 7518 MachineRegisterInfo &MRI = IfEntry->getParent()->getRegInfo(); 7519 7520 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 7521 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 7522 MachineInstr *SIIF = 7523 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_IF), DstReg) 7524 .add(Branch->getOperand(0)) 7525 .add(Branch->getOperand(1)); 7526 MachineInstr *SIEND = 7527 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_END_CF)) 7528 .addReg(DstReg); 7529 7530 IfEntry->erase(TI); 7531 IfEntry->insert(IfEntry->end(), SIIF); 7532 IfEnd->insert(IfEnd->getFirstNonPHI(), SIEND); 7533 } 7534 } 7535 7536 void SIInstrInfo::convertNonUniformLoopRegion( 7537 MachineBasicBlock *LoopEntry, MachineBasicBlock *LoopEnd) const { 7538 MachineBasicBlock::iterator TI = LoopEnd->getFirstTerminator(); 7539 // We expect 2 terminators, one conditional and one unconditional. 7540 assert(TI != LoopEnd->end()); 7541 7542 MachineInstr *Branch = &(*TI); 7543 MachineFunction *MF = LoopEnd->getParent(); 7544 MachineRegisterInfo &MRI = LoopEnd->getParent()->getRegInfo(); 7545 7546 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 7547 7548 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 7549 Register BackEdgeReg = MRI.createVirtualRegister(RI.getBoolRC()); 7550 MachineInstrBuilder HeaderPHIBuilder = 7551 BuildMI(*(MF), Branch->getDebugLoc(), get(TargetOpcode::PHI), DstReg); 7552 for (MachineBasicBlock *PMBB : LoopEntry->predecessors()) { 7553 if (PMBB == LoopEnd) { 7554 HeaderPHIBuilder.addReg(BackEdgeReg); 7555 } else { 7556 Register ZeroReg = MRI.createVirtualRegister(RI.getBoolRC()); 7557 materializeImmediate(*PMBB, PMBB->getFirstTerminator(), DebugLoc(), 7558 ZeroReg, 0); 7559 HeaderPHIBuilder.addReg(ZeroReg); 7560 } 7561 HeaderPHIBuilder.addMBB(PMBB); 7562 } 7563 MachineInstr *HeaderPhi = HeaderPHIBuilder; 7564 MachineInstr *SIIFBREAK = BuildMI(*(MF), Branch->getDebugLoc(), 7565 get(AMDGPU::SI_IF_BREAK), BackEdgeReg) 7566 .addReg(DstReg) 7567 .add(Branch->getOperand(0)); 7568 MachineInstr *SILOOP = 7569 BuildMI(*(MF), Branch->getDebugLoc(), get(AMDGPU::SI_LOOP)) 7570 .addReg(BackEdgeReg) 7571 .addMBB(LoopEntry); 7572 7573 LoopEntry->insert(LoopEntry->begin(), HeaderPhi); 7574 LoopEnd->erase(TI); 7575 LoopEnd->insert(LoopEnd->end(), SIIFBREAK); 7576 LoopEnd->insert(LoopEnd->end(), SILOOP); 7577 } 7578 } 7579 7580 ArrayRef<std::pair<int, const char *>> 7581 SIInstrInfo::getSerializableTargetIndices() const { 7582 static const std::pair<int, const char *> TargetIndices[] = { 7583 {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"}, 7584 {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"}, 7585 {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"}, 7586 {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"}, 7587 {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}}; 7588 return makeArrayRef(TargetIndices); 7589 } 7590 7591 /// This is used by the post-RA scheduler (SchedulePostRAList.cpp). The 7592 /// post-RA version of misched uses CreateTargetMIHazardRecognizer. 7593 ScheduleHazardRecognizer * 7594 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 7595 const ScheduleDAG *DAG) const { 7596 return new GCNHazardRecognizer(DAG->MF); 7597 } 7598 7599 /// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer 7600 /// pass. 7601 ScheduleHazardRecognizer * 7602 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const { 7603 return new GCNHazardRecognizer(MF); 7604 } 7605 7606 // Called during: 7607 // - pre-RA scheduling and post-RA scheduling 7608 ScheduleHazardRecognizer * 7609 SIInstrInfo::CreateTargetMIHazardRecognizer(const InstrItineraryData *II, 7610 const ScheduleDAGMI *DAG) const { 7611 // Borrowed from Arm Target 7612 // We would like to restrict this hazard recognizer to only 7613 // post-RA scheduling; we can tell that we're post-RA because we don't 7614 // track VRegLiveness. 7615 if (!DAG->hasVRegLiveness()) 7616 return new GCNHazardRecognizer(DAG->MF); 7617 return TargetInstrInfo::CreateTargetMIHazardRecognizer(II, DAG); 7618 } 7619 7620 std::pair<unsigned, unsigned> 7621 SIInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { 7622 return std::make_pair(TF & MO_MASK, TF & ~MO_MASK); 7623 } 7624 7625 ArrayRef<std::pair<unsigned, const char *>> 7626 SIInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { 7627 static const std::pair<unsigned, const char *> TargetFlags[] = { 7628 { MO_GOTPCREL, "amdgpu-gotprel" }, 7629 { MO_GOTPCREL32_LO, "amdgpu-gotprel32-lo" }, 7630 { MO_GOTPCREL32_HI, "amdgpu-gotprel32-hi" }, 7631 { MO_REL32_LO, "amdgpu-rel32-lo" }, 7632 { MO_REL32_HI, "amdgpu-rel32-hi" }, 7633 { MO_ABS32_LO, "amdgpu-abs32-lo" }, 7634 { MO_ABS32_HI, "amdgpu-abs32-hi" }, 7635 }; 7636 7637 return makeArrayRef(TargetFlags); 7638 } 7639 7640 ArrayRef<std::pair<MachineMemOperand::Flags, const char *>> 7641 SIInstrInfo::getSerializableMachineMemOperandTargetFlags() const { 7642 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] = 7643 { 7644 {MONoClobber, "amdgpu-noclobber"}, 7645 }; 7646 7647 return makeArrayRef(TargetFlags); 7648 } 7649 7650 bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const { 7651 return !MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY && 7652 MI.modifiesRegister(AMDGPU::EXEC, &RI); 7653 } 7654 7655 MachineInstrBuilder 7656 SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 7657 MachineBasicBlock::iterator I, 7658 const DebugLoc &DL, 7659 Register DestReg) const { 7660 if (ST.hasAddNoCarry()) 7661 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg); 7662 7663 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 7664 Register UnusedCarry = MRI.createVirtualRegister(RI.getBoolRC()); 7665 MRI.setRegAllocationHint(UnusedCarry, 0, RI.getVCC()); 7666 7667 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_CO_U32_e64), DestReg) 7668 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 7669 } 7670 7671 MachineInstrBuilder SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 7672 MachineBasicBlock::iterator I, 7673 const DebugLoc &DL, 7674 Register DestReg, 7675 RegScavenger &RS) const { 7676 if (ST.hasAddNoCarry()) 7677 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e32), DestReg); 7678 7679 // If available, prefer to use vcc. 7680 Register UnusedCarry = !RS.isRegUsed(AMDGPU::VCC) 7681 ? Register(RI.getVCC()) 7682 : RS.scavengeRegister(RI.getBoolRC(), I, 0, false); 7683 7684 // TODO: Users need to deal with this. 7685 if (!UnusedCarry.isValid()) 7686 return MachineInstrBuilder(); 7687 7688 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_CO_U32_e64), DestReg) 7689 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 7690 } 7691 7692 bool SIInstrInfo::isKillTerminator(unsigned Opcode) { 7693 switch (Opcode) { 7694 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 7695 case AMDGPU::SI_KILL_I1_TERMINATOR: 7696 return true; 7697 default: 7698 return false; 7699 } 7700 } 7701 7702 const MCInstrDesc &SIInstrInfo::getKillTerminatorFromPseudo(unsigned Opcode) const { 7703 switch (Opcode) { 7704 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: 7705 return get(AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR); 7706 case AMDGPU::SI_KILL_I1_PSEUDO: 7707 return get(AMDGPU::SI_KILL_I1_TERMINATOR); 7708 default: 7709 llvm_unreachable("invalid opcode, expected SI_KILL_*_PSEUDO"); 7710 } 7711 } 7712 7713 void SIInstrInfo::fixImplicitOperands(MachineInstr &MI) const { 7714 if (!ST.isWave32()) 7715 return; 7716 7717 for (auto &Op : MI.implicit_operands()) { 7718 if (Op.isReg() && Op.getReg() == AMDGPU::VCC) 7719 Op.setReg(AMDGPU::VCC_LO); 7720 } 7721 } 7722 7723 bool SIInstrInfo::isBufferSMRD(const MachineInstr &MI) const { 7724 if (!isSMRD(MI)) 7725 return false; 7726 7727 // Check that it is using a buffer resource. 7728 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sbase); 7729 if (Idx == -1) // e.g. s_memtime 7730 return false; 7731 7732 const auto RCID = MI.getDesc().OpInfo[Idx].RegClass; 7733 return RI.getRegClass(RCID)->hasSubClassEq(&AMDGPU::SGPR_128RegClass); 7734 } 7735 7736 // Depending on the used address space and instructions, some immediate offsets 7737 // are allowed and some are not. 7738 // In general, flat instruction offsets can only be non-negative, global and 7739 // scratch instruction offsets can also be negative. 7740 // 7741 // There are several bugs related to these offsets: 7742 // On gfx10.1, flat instructions that go into the global address space cannot 7743 // use an offset. 7744 // 7745 // For scratch instructions, the address can be either an SGPR or a VGPR. 7746 // The following offsets can be used, depending on the architecture (x means 7747 // cannot be used): 7748 // +----------------------------+------+------+ 7749 // | Address-Mode | SGPR | VGPR | 7750 // +----------------------------+------+------+ 7751 // | gfx9 | | | 7752 // | negative, 4-aligned offset | x | ok | 7753 // | negative, unaligned offset | x | ok | 7754 // +----------------------------+------+------+ 7755 // | gfx10 | | | 7756 // | negative, 4-aligned offset | ok | ok | 7757 // | negative, unaligned offset | ok | x | 7758 // +----------------------------+------+------+ 7759 // | gfx10.3 | | | 7760 // | negative, 4-aligned offset | ok | ok | 7761 // | negative, unaligned offset | ok | ok | 7762 // +----------------------------+------+------+ 7763 // 7764 // This function ignores the addressing mode, so if an offset cannot be used in 7765 // one addressing mode, it is considered illegal. 7766 bool SIInstrInfo::isLegalFLATOffset(int64_t Offset, unsigned AddrSpace, 7767 uint64_t FlatVariant) const { 7768 // TODO: Should 0 be special cased? 7769 if (!ST.hasFlatInstOffsets()) 7770 return false; 7771 7772 if (ST.hasFlatSegmentOffsetBug() && FlatVariant == SIInstrFlags::FLAT && 7773 (AddrSpace == AMDGPUAS::FLAT_ADDRESS || 7774 AddrSpace == AMDGPUAS::GLOBAL_ADDRESS)) 7775 return false; 7776 7777 bool Signed = FlatVariant != SIInstrFlags::FLAT; 7778 if (ST.hasNegativeScratchOffsetBug() && 7779 FlatVariant == SIInstrFlags::FlatScratch) 7780 Signed = false; 7781 if (ST.hasNegativeUnalignedScratchOffsetBug() && 7782 FlatVariant == SIInstrFlags::FlatScratch && Offset < 0 && 7783 (Offset % 4) != 0) { 7784 return false; 7785 } 7786 7787 unsigned N = AMDGPU::getNumFlatOffsetBits(ST, Signed); 7788 return Signed ? isIntN(N, Offset) : isUIntN(N, Offset); 7789 } 7790 7791 // See comment on SIInstrInfo::isLegalFLATOffset for what is legal and what not. 7792 std::pair<int64_t, int64_t> 7793 SIInstrInfo::splitFlatOffset(int64_t COffsetVal, unsigned AddrSpace, 7794 uint64_t FlatVariant) const { 7795 int64_t RemainderOffset = COffsetVal; 7796 int64_t ImmField = 0; 7797 bool Signed = FlatVariant != SIInstrFlags::FLAT; 7798 if (ST.hasNegativeScratchOffsetBug() && 7799 FlatVariant == SIInstrFlags::FlatScratch) 7800 Signed = false; 7801 7802 const unsigned NumBits = AMDGPU::getNumFlatOffsetBits(ST, Signed); 7803 if (Signed) { 7804 // Use signed division by a power of two to truncate towards 0. 7805 int64_t D = 1LL << (NumBits - 1); 7806 RemainderOffset = (COffsetVal / D) * D; 7807 ImmField = COffsetVal - RemainderOffset; 7808 7809 if (ST.hasNegativeUnalignedScratchOffsetBug() && 7810 FlatVariant == SIInstrFlags::FlatScratch && ImmField < 0 && 7811 (ImmField % 4) != 0) { 7812 // Make ImmField a multiple of 4 7813 RemainderOffset += ImmField % 4; 7814 ImmField -= ImmField % 4; 7815 } 7816 } else if (COffsetVal >= 0) { 7817 ImmField = COffsetVal & maskTrailingOnes<uint64_t>(NumBits); 7818 RemainderOffset = COffsetVal - ImmField; 7819 } 7820 7821 assert(isLegalFLATOffset(ImmField, AddrSpace, FlatVariant)); 7822 assert(RemainderOffset + ImmField == COffsetVal); 7823 return {ImmField, RemainderOffset}; 7824 } 7825 7826 // This must be kept in sync with the SIEncodingFamily class in SIInstrInfo.td 7827 enum SIEncodingFamily { 7828 SI = 0, 7829 VI = 1, 7830 SDWA = 2, 7831 SDWA9 = 3, 7832 GFX80 = 4, 7833 GFX9 = 5, 7834 GFX10 = 6, 7835 SDWA10 = 7, 7836 GFX90A = 8, 7837 GFX940 = 9 7838 }; 7839 7840 static SIEncodingFamily subtargetEncodingFamily(const GCNSubtarget &ST) { 7841 switch (ST.getGeneration()) { 7842 default: 7843 break; 7844 case AMDGPUSubtarget::SOUTHERN_ISLANDS: 7845 case AMDGPUSubtarget::SEA_ISLANDS: 7846 return SIEncodingFamily::SI; 7847 case AMDGPUSubtarget::VOLCANIC_ISLANDS: 7848 case AMDGPUSubtarget::GFX9: 7849 return SIEncodingFamily::VI; 7850 case AMDGPUSubtarget::GFX10: 7851 return SIEncodingFamily::GFX10; 7852 } 7853 llvm_unreachable("Unknown subtarget generation!"); 7854 } 7855 7856 bool SIInstrInfo::isAsmOnlyOpcode(int MCOp) const { 7857 switch(MCOp) { 7858 // These opcodes use indirect register addressing so 7859 // they need special handling by codegen (currently missing). 7860 // Therefore it is too risky to allow these opcodes 7861 // to be selected by dpp combiner or sdwa peepholer. 7862 case AMDGPU::V_MOVRELS_B32_dpp_gfx10: 7863 case AMDGPU::V_MOVRELS_B32_sdwa_gfx10: 7864 case AMDGPU::V_MOVRELD_B32_dpp_gfx10: 7865 case AMDGPU::V_MOVRELD_B32_sdwa_gfx10: 7866 case AMDGPU::V_MOVRELSD_B32_dpp_gfx10: 7867 case AMDGPU::V_MOVRELSD_B32_sdwa_gfx10: 7868 case AMDGPU::V_MOVRELSD_2_B32_dpp_gfx10: 7869 case AMDGPU::V_MOVRELSD_2_B32_sdwa_gfx10: 7870 return true; 7871 default: 7872 return false; 7873 } 7874 } 7875 7876 int SIInstrInfo::pseudoToMCOpcode(int Opcode) const { 7877 SIEncodingFamily Gen = subtargetEncodingFamily(ST); 7878 7879 if ((get(Opcode).TSFlags & SIInstrFlags::renamedInGFX9) != 0 && 7880 ST.getGeneration() == AMDGPUSubtarget::GFX9) 7881 Gen = SIEncodingFamily::GFX9; 7882 7883 // Adjust the encoding family to GFX80 for D16 buffer instructions when the 7884 // subtarget has UnpackedD16VMem feature. 7885 // TODO: remove this when we discard GFX80 encoding. 7886 if (ST.hasUnpackedD16VMem() && (get(Opcode).TSFlags & SIInstrFlags::D16Buf)) 7887 Gen = SIEncodingFamily::GFX80; 7888 7889 if (get(Opcode).TSFlags & SIInstrFlags::SDWA) { 7890 switch (ST.getGeneration()) { 7891 default: 7892 Gen = SIEncodingFamily::SDWA; 7893 break; 7894 case AMDGPUSubtarget::GFX9: 7895 Gen = SIEncodingFamily::SDWA9; 7896 break; 7897 case AMDGPUSubtarget::GFX10: 7898 Gen = SIEncodingFamily::SDWA10; 7899 break; 7900 } 7901 } 7902 7903 if (isMAI(Opcode)) { 7904 int MFMAOp = AMDGPU::getMFMAEarlyClobberOp(Opcode); 7905 if (MFMAOp != -1) 7906 Opcode = MFMAOp; 7907 } 7908 7909 int MCOp = AMDGPU::getMCOpcode(Opcode, Gen); 7910 7911 // -1 means that Opcode is already a native instruction. 7912 if (MCOp == -1) 7913 return Opcode; 7914 7915 if (ST.hasGFX90AInsts()) { 7916 uint16_t NMCOp = (uint16_t)-1; 7917 if (ST.hasGFX940Insts()) 7918 NMCOp = AMDGPU::getMCOpcode(Opcode, SIEncodingFamily::GFX940); 7919 if (NMCOp == (uint16_t)-1) 7920 NMCOp = AMDGPU::getMCOpcode(Opcode, SIEncodingFamily::GFX90A); 7921 if (NMCOp == (uint16_t)-1) 7922 NMCOp = AMDGPU::getMCOpcode(Opcode, SIEncodingFamily::GFX9); 7923 if (NMCOp != (uint16_t)-1) 7924 MCOp = NMCOp; 7925 } 7926 7927 // (uint16_t)-1 means that Opcode is a pseudo instruction that has 7928 // no encoding in the given subtarget generation. 7929 if (MCOp == (uint16_t)-1) 7930 return -1; 7931 7932 if (isAsmOnlyOpcode(MCOp)) 7933 return -1; 7934 7935 return MCOp; 7936 } 7937 7938 static 7939 TargetInstrInfo::RegSubRegPair getRegOrUndef(const MachineOperand &RegOpnd) { 7940 assert(RegOpnd.isReg()); 7941 return RegOpnd.isUndef() ? TargetInstrInfo::RegSubRegPair() : 7942 getRegSubRegPair(RegOpnd); 7943 } 7944 7945 TargetInstrInfo::RegSubRegPair 7946 llvm::getRegSequenceSubReg(MachineInstr &MI, unsigned SubReg) { 7947 assert(MI.isRegSequence()); 7948 for (unsigned I = 0, E = (MI.getNumOperands() - 1)/ 2; I < E; ++I) 7949 if (MI.getOperand(1 + 2 * I + 1).getImm() == SubReg) { 7950 auto &RegOp = MI.getOperand(1 + 2 * I); 7951 return getRegOrUndef(RegOp); 7952 } 7953 return TargetInstrInfo::RegSubRegPair(); 7954 } 7955 7956 // Try to find the definition of reg:subreg in subreg-manipulation pseudos 7957 // Following a subreg of reg:subreg isn't supported 7958 static bool followSubRegDef(MachineInstr &MI, 7959 TargetInstrInfo::RegSubRegPair &RSR) { 7960 if (!RSR.SubReg) 7961 return false; 7962 switch (MI.getOpcode()) { 7963 default: break; 7964 case AMDGPU::REG_SEQUENCE: 7965 RSR = getRegSequenceSubReg(MI, RSR.SubReg); 7966 return true; 7967 // EXTRACT_SUBREG ins't supported as this would follow a subreg of subreg 7968 case AMDGPU::INSERT_SUBREG: 7969 if (RSR.SubReg == (unsigned)MI.getOperand(3).getImm()) 7970 // inserted the subreg we're looking for 7971 RSR = getRegOrUndef(MI.getOperand(2)); 7972 else { // the subreg in the rest of the reg 7973 auto R1 = getRegOrUndef(MI.getOperand(1)); 7974 if (R1.SubReg) // subreg of subreg isn't supported 7975 return false; 7976 RSR.Reg = R1.Reg; 7977 } 7978 return true; 7979 } 7980 return false; 7981 } 7982 7983 MachineInstr *llvm::getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P, 7984 MachineRegisterInfo &MRI) { 7985 assert(MRI.isSSA()); 7986 if (!P.Reg.isVirtual()) 7987 return nullptr; 7988 7989 auto RSR = P; 7990 auto *DefInst = MRI.getVRegDef(RSR.Reg); 7991 while (auto *MI = DefInst) { 7992 DefInst = nullptr; 7993 switch (MI->getOpcode()) { 7994 case AMDGPU::COPY: 7995 case AMDGPU::V_MOV_B32_e32: { 7996 auto &Op1 = MI->getOperand(1); 7997 if (Op1.isReg() && Op1.getReg().isVirtual()) { 7998 if (Op1.isUndef()) 7999 return nullptr; 8000 RSR = getRegSubRegPair(Op1); 8001 DefInst = MRI.getVRegDef(RSR.Reg); 8002 } 8003 break; 8004 } 8005 default: 8006 if (followSubRegDef(*MI, RSR)) { 8007 if (!RSR.Reg) 8008 return nullptr; 8009 DefInst = MRI.getVRegDef(RSR.Reg); 8010 } 8011 } 8012 if (!DefInst) 8013 return MI; 8014 } 8015 return nullptr; 8016 } 8017 8018 bool llvm::execMayBeModifiedBeforeUse(const MachineRegisterInfo &MRI, 8019 Register VReg, 8020 const MachineInstr &DefMI, 8021 const MachineInstr &UseMI) { 8022 assert(MRI.isSSA() && "Must be run on SSA"); 8023 8024 auto *TRI = MRI.getTargetRegisterInfo(); 8025 auto *DefBB = DefMI.getParent(); 8026 8027 // Don't bother searching between blocks, although it is possible this block 8028 // doesn't modify exec. 8029 if (UseMI.getParent() != DefBB) 8030 return true; 8031 8032 const int MaxInstScan = 20; 8033 int NumInst = 0; 8034 8035 // Stop scan at the use. 8036 auto E = UseMI.getIterator(); 8037 for (auto I = std::next(DefMI.getIterator()); I != E; ++I) { 8038 if (I->isDebugInstr()) 8039 continue; 8040 8041 if (++NumInst > MaxInstScan) 8042 return true; 8043 8044 if (I->modifiesRegister(AMDGPU::EXEC, TRI)) 8045 return true; 8046 } 8047 8048 return false; 8049 } 8050 8051 bool llvm::execMayBeModifiedBeforeAnyUse(const MachineRegisterInfo &MRI, 8052 Register VReg, 8053 const MachineInstr &DefMI) { 8054 assert(MRI.isSSA() && "Must be run on SSA"); 8055 8056 auto *TRI = MRI.getTargetRegisterInfo(); 8057 auto *DefBB = DefMI.getParent(); 8058 8059 const int MaxUseScan = 10; 8060 int NumUse = 0; 8061 8062 for (auto &Use : MRI.use_nodbg_operands(VReg)) { 8063 auto &UseInst = *Use.getParent(); 8064 // Don't bother searching between blocks, although it is possible this block 8065 // doesn't modify exec. 8066 if (UseInst.getParent() != DefBB) 8067 return true; 8068 8069 if (++NumUse > MaxUseScan) 8070 return true; 8071 } 8072 8073 if (NumUse == 0) 8074 return false; 8075 8076 const int MaxInstScan = 20; 8077 int NumInst = 0; 8078 8079 // Stop scan when we have seen all the uses. 8080 for (auto I = std::next(DefMI.getIterator()); ; ++I) { 8081 assert(I != DefBB->end()); 8082 8083 if (I->isDebugInstr()) 8084 continue; 8085 8086 if (++NumInst > MaxInstScan) 8087 return true; 8088 8089 for (const MachineOperand &Op : I->operands()) { 8090 // We don't check reg masks here as they're used only on calls: 8091 // 1. EXEC is only considered const within one BB 8092 // 2. Call should be a terminator instruction if present in a BB 8093 8094 if (!Op.isReg()) 8095 continue; 8096 8097 Register Reg = Op.getReg(); 8098 if (Op.isUse()) { 8099 if (Reg == VReg && --NumUse == 0) 8100 return false; 8101 } else if (TRI->regsOverlap(Reg, AMDGPU::EXEC)) 8102 return true; 8103 } 8104 } 8105 } 8106 8107 MachineInstr *SIInstrInfo::createPHIDestinationCopy( 8108 MachineBasicBlock &MBB, MachineBasicBlock::iterator LastPHIIt, 8109 const DebugLoc &DL, Register Src, Register Dst) const { 8110 auto Cur = MBB.begin(); 8111 if (Cur != MBB.end()) 8112 do { 8113 if (!Cur->isPHI() && Cur->readsRegister(Dst)) 8114 return BuildMI(MBB, Cur, DL, get(TargetOpcode::COPY), Dst).addReg(Src); 8115 ++Cur; 8116 } while (Cur != MBB.end() && Cur != LastPHIIt); 8117 8118 return TargetInstrInfo::createPHIDestinationCopy(MBB, LastPHIIt, DL, Src, 8119 Dst); 8120 } 8121 8122 MachineInstr *SIInstrInfo::createPHISourceCopy( 8123 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, 8124 const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const { 8125 if (InsPt != MBB.end() && 8126 (InsPt->getOpcode() == AMDGPU::SI_IF || 8127 InsPt->getOpcode() == AMDGPU::SI_ELSE || 8128 InsPt->getOpcode() == AMDGPU::SI_IF_BREAK) && 8129 InsPt->definesRegister(Src)) { 8130 InsPt++; 8131 return BuildMI(MBB, InsPt, DL, 8132 get(ST.isWave32() ? AMDGPU::S_MOV_B32_term 8133 : AMDGPU::S_MOV_B64_term), 8134 Dst) 8135 .addReg(Src, 0, SrcSubReg) 8136 .addReg(AMDGPU::EXEC, RegState::Implicit); 8137 } 8138 return TargetInstrInfo::createPHISourceCopy(MBB, InsPt, DL, Src, SrcSubReg, 8139 Dst); 8140 } 8141 8142 bool llvm::SIInstrInfo::isWave32() const { return ST.isWave32(); } 8143 8144 MachineInstr *SIInstrInfo::foldMemoryOperandImpl( 8145 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, 8146 MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS, 8147 VirtRegMap *VRM) const { 8148 // This is a bit of a hack (copied from AArch64). Consider this instruction: 8149 // 8150 // %0:sreg_32 = COPY $m0 8151 // 8152 // We explicitly chose SReg_32 for the virtual register so such a copy might 8153 // be eliminated by RegisterCoalescer. However, that may not be possible, and 8154 // %0 may even spill. We can't spill $m0 normally (it would require copying to 8155 // a numbered SGPR anyway), and since it is in the SReg_32 register class, 8156 // TargetInstrInfo::foldMemoryOperand() is going to try. 8157 // A similar issue also exists with spilling and reloading $exec registers. 8158 // 8159 // To prevent that, constrain the %0 register class here. 8160 if (MI.isFullCopy()) { 8161 Register DstReg = MI.getOperand(0).getReg(); 8162 Register SrcReg = MI.getOperand(1).getReg(); 8163 if ((DstReg.isVirtual() || SrcReg.isVirtual()) && 8164 (DstReg.isVirtual() != SrcReg.isVirtual())) { 8165 MachineRegisterInfo &MRI = MF.getRegInfo(); 8166 Register VirtReg = DstReg.isVirtual() ? DstReg : SrcReg; 8167 const TargetRegisterClass *RC = MRI.getRegClass(VirtReg); 8168 if (RC->hasSuperClassEq(&AMDGPU::SReg_32RegClass)) { 8169 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 8170 return nullptr; 8171 } else if (RC->hasSuperClassEq(&AMDGPU::SReg_64RegClass)) { 8172 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_64_XEXECRegClass); 8173 return nullptr; 8174 } 8175 } 8176 } 8177 8178 return nullptr; 8179 } 8180 8181 unsigned SIInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 8182 const MachineInstr &MI, 8183 unsigned *PredCost) const { 8184 if (MI.isBundle()) { 8185 MachineBasicBlock::const_instr_iterator I(MI.getIterator()); 8186 MachineBasicBlock::const_instr_iterator E(MI.getParent()->instr_end()); 8187 unsigned Lat = 0, Count = 0; 8188 for (++I; I != E && I->isBundledWithPred(); ++I) { 8189 ++Count; 8190 Lat = std::max(Lat, SchedModel.computeInstrLatency(&*I)); 8191 } 8192 return Lat + Count - 1; 8193 } 8194 8195 return SchedModel.computeInstrLatency(&MI); 8196 } 8197 8198 unsigned SIInstrInfo::getDSShaderTypeValue(const MachineFunction &MF) { 8199 switch (MF.getFunction().getCallingConv()) { 8200 case CallingConv::AMDGPU_PS: 8201 return 1; 8202 case CallingConv::AMDGPU_VS: 8203 return 2; 8204 case CallingConv::AMDGPU_GS: 8205 return 3; 8206 case CallingConv::AMDGPU_HS: 8207 case CallingConv::AMDGPU_LS: 8208 case CallingConv::AMDGPU_ES: 8209 report_fatal_error("ds_ordered_count unsupported for this calling conv"); 8210 case CallingConv::AMDGPU_CS: 8211 case CallingConv::AMDGPU_KERNEL: 8212 case CallingConv::C: 8213 case CallingConv::Fast: 8214 default: 8215 // Assume other calling conventions are various compute callable functions 8216 return 0; 8217 } 8218 } 8219 8220 bool SIInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg, 8221 Register &SrcReg2, int64_t &CmpMask, 8222 int64_t &CmpValue) const { 8223 if (!MI.getOperand(0).isReg() || MI.getOperand(0).getSubReg()) 8224 return false; 8225 8226 switch (MI.getOpcode()) { 8227 default: 8228 break; 8229 case AMDGPU::S_CMP_EQ_U32: 8230 case AMDGPU::S_CMP_EQ_I32: 8231 case AMDGPU::S_CMP_LG_U32: 8232 case AMDGPU::S_CMP_LG_I32: 8233 case AMDGPU::S_CMP_LT_U32: 8234 case AMDGPU::S_CMP_LT_I32: 8235 case AMDGPU::S_CMP_GT_U32: 8236 case AMDGPU::S_CMP_GT_I32: 8237 case AMDGPU::S_CMP_LE_U32: 8238 case AMDGPU::S_CMP_LE_I32: 8239 case AMDGPU::S_CMP_GE_U32: 8240 case AMDGPU::S_CMP_GE_I32: 8241 case AMDGPU::S_CMP_EQ_U64: 8242 case AMDGPU::S_CMP_LG_U64: 8243 SrcReg = MI.getOperand(0).getReg(); 8244 if (MI.getOperand(1).isReg()) { 8245 if (MI.getOperand(1).getSubReg()) 8246 return false; 8247 SrcReg2 = MI.getOperand(1).getReg(); 8248 CmpValue = 0; 8249 } else if (MI.getOperand(1).isImm()) { 8250 SrcReg2 = Register(); 8251 CmpValue = MI.getOperand(1).getImm(); 8252 } else { 8253 return false; 8254 } 8255 CmpMask = ~0; 8256 return true; 8257 case AMDGPU::S_CMPK_EQ_U32: 8258 case AMDGPU::S_CMPK_EQ_I32: 8259 case AMDGPU::S_CMPK_LG_U32: 8260 case AMDGPU::S_CMPK_LG_I32: 8261 case AMDGPU::S_CMPK_LT_U32: 8262 case AMDGPU::S_CMPK_LT_I32: 8263 case AMDGPU::S_CMPK_GT_U32: 8264 case AMDGPU::S_CMPK_GT_I32: 8265 case AMDGPU::S_CMPK_LE_U32: 8266 case AMDGPU::S_CMPK_LE_I32: 8267 case AMDGPU::S_CMPK_GE_U32: 8268 case AMDGPU::S_CMPK_GE_I32: 8269 SrcReg = MI.getOperand(0).getReg(); 8270 SrcReg2 = Register(); 8271 CmpValue = MI.getOperand(1).getImm(); 8272 CmpMask = ~0; 8273 return true; 8274 } 8275 8276 return false; 8277 } 8278 8279 bool SIInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, 8280 Register SrcReg2, int64_t CmpMask, 8281 int64_t CmpValue, 8282 const MachineRegisterInfo *MRI) const { 8283 if (!SrcReg || SrcReg.isPhysical()) 8284 return false; 8285 8286 if (SrcReg2 && !getFoldableImm(SrcReg2, *MRI, CmpValue)) 8287 return false; 8288 8289 const auto optimizeCmpAnd = [&CmpInstr, SrcReg, CmpValue, MRI, 8290 this](int64_t ExpectedValue, unsigned SrcSize, 8291 bool IsReversible, bool IsSigned) -> bool { 8292 // s_cmp_eq_u32 (s_and_b32 $src, 1 << n), 1 << n => s_and_b32 $src, 1 << n 8293 // s_cmp_eq_i32 (s_and_b32 $src, 1 << n), 1 << n => s_and_b32 $src, 1 << n 8294 // s_cmp_ge_u32 (s_and_b32 $src, 1 << n), 1 << n => s_and_b32 $src, 1 << n 8295 // s_cmp_ge_i32 (s_and_b32 $src, 1 << n), 1 << n => s_and_b32 $src, 1 << n 8296 // s_cmp_eq_u64 (s_and_b64 $src, 1 << n), 1 << n => s_and_b64 $src, 1 << n 8297 // s_cmp_lg_u32 (s_and_b32 $src, 1 << n), 0 => s_and_b32 $src, 1 << n 8298 // s_cmp_lg_i32 (s_and_b32 $src, 1 << n), 0 => s_and_b32 $src, 1 << n 8299 // s_cmp_gt_u32 (s_and_b32 $src, 1 << n), 0 => s_and_b32 $src, 1 << n 8300 // s_cmp_gt_i32 (s_and_b32 $src, 1 << n), 0 => s_and_b32 $src, 1 << n 8301 // s_cmp_lg_u64 (s_and_b64 $src, 1 << n), 0 => s_and_b64 $src, 1 << n 8302 // 8303 // Signed ge/gt are not used for the sign bit. 8304 // 8305 // If result of the AND is unused except in the compare: 8306 // s_and_b(32|64) $src, 1 << n => s_bitcmp1_b(32|64) $src, n 8307 // 8308 // s_cmp_eq_u32 (s_and_b32 $src, 1 << n), 0 => s_bitcmp0_b32 $src, n 8309 // s_cmp_eq_i32 (s_and_b32 $src, 1 << n), 0 => s_bitcmp0_b32 $src, n 8310 // s_cmp_eq_u64 (s_and_b64 $src, 1 << n), 0 => s_bitcmp0_b64 $src, n 8311 // s_cmp_lg_u32 (s_and_b32 $src, 1 << n), 1 << n => s_bitcmp0_b32 $src, n 8312 // s_cmp_lg_i32 (s_and_b32 $src, 1 << n), 1 << n => s_bitcmp0_b32 $src, n 8313 // s_cmp_lg_u64 (s_and_b64 $src, 1 << n), 1 << n => s_bitcmp0_b64 $src, n 8314 8315 MachineInstr *Def = MRI->getUniqueVRegDef(SrcReg); 8316 if (!Def || Def->getParent() != CmpInstr.getParent()) 8317 return false; 8318 8319 if (Def->getOpcode() != AMDGPU::S_AND_B32 && 8320 Def->getOpcode() != AMDGPU::S_AND_B64) 8321 return false; 8322 8323 int64_t Mask; 8324 const auto isMask = [&Mask, SrcSize](const MachineOperand *MO) -> bool { 8325 if (MO->isImm()) 8326 Mask = MO->getImm(); 8327 else if (!getFoldableImm(MO, Mask)) 8328 return false; 8329 Mask &= maxUIntN(SrcSize); 8330 return isPowerOf2_64(Mask); 8331 }; 8332 8333 MachineOperand *SrcOp = &Def->getOperand(1); 8334 if (isMask(SrcOp)) 8335 SrcOp = &Def->getOperand(2); 8336 else if (isMask(&Def->getOperand(2))) 8337 SrcOp = &Def->getOperand(1); 8338 else 8339 return false; 8340 8341 unsigned BitNo = countTrailingZeros((uint64_t)Mask); 8342 if (IsSigned && BitNo == SrcSize - 1) 8343 return false; 8344 8345 ExpectedValue <<= BitNo; 8346 8347 bool IsReversedCC = false; 8348 if (CmpValue != ExpectedValue) { 8349 if (!IsReversible) 8350 return false; 8351 IsReversedCC = CmpValue == (ExpectedValue ^ Mask); 8352 if (!IsReversedCC) 8353 return false; 8354 } 8355 8356 Register DefReg = Def->getOperand(0).getReg(); 8357 if (IsReversedCC && !MRI->hasOneNonDBGUse(DefReg)) 8358 return false; 8359 8360 for (auto I = std::next(Def->getIterator()), E = CmpInstr.getIterator(); 8361 I != E; ++I) { 8362 if (I->modifiesRegister(AMDGPU::SCC, &RI) || 8363 I->killsRegister(AMDGPU::SCC, &RI)) 8364 return false; 8365 } 8366 8367 MachineOperand *SccDef = Def->findRegisterDefOperand(AMDGPU::SCC); 8368 SccDef->setIsDead(false); 8369 CmpInstr.eraseFromParent(); 8370 8371 if (!MRI->use_nodbg_empty(DefReg)) { 8372 assert(!IsReversedCC); 8373 return true; 8374 } 8375 8376 // Replace AND with unused result with a S_BITCMP. 8377 MachineBasicBlock *MBB = Def->getParent(); 8378 8379 unsigned NewOpc = (SrcSize == 32) ? IsReversedCC ? AMDGPU::S_BITCMP0_B32 8380 : AMDGPU::S_BITCMP1_B32 8381 : IsReversedCC ? AMDGPU::S_BITCMP0_B64 8382 : AMDGPU::S_BITCMP1_B64; 8383 8384 BuildMI(*MBB, Def, Def->getDebugLoc(), get(NewOpc)) 8385 .add(*SrcOp) 8386 .addImm(BitNo); 8387 Def->eraseFromParent(); 8388 8389 return true; 8390 }; 8391 8392 switch (CmpInstr.getOpcode()) { 8393 default: 8394 break; 8395 case AMDGPU::S_CMP_EQ_U32: 8396 case AMDGPU::S_CMP_EQ_I32: 8397 case AMDGPU::S_CMPK_EQ_U32: 8398 case AMDGPU::S_CMPK_EQ_I32: 8399 return optimizeCmpAnd(1, 32, true, false); 8400 case AMDGPU::S_CMP_GE_U32: 8401 case AMDGPU::S_CMPK_GE_U32: 8402 return optimizeCmpAnd(1, 32, false, false); 8403 case AMDGPU::S_CMP_GE_I32: 8404 case AMDGPU::S_CMPK_GE_I32: 8405 return optimizeCmpAnd(1, 32, false, true); 8406 case AMDGPU::S_CMP_EQ_U64: 8407 return optimizeCmpAnd(1, 64, true, false); 8408 case AMDGPU::S_CMP_LG_U32: 8409 case AMDGPU::S_CMP_LG_I32: 8410 case AMDGPU::S_CMPK_LG_U32: 8411 case AMDGPU::S_CMPK_LG_I32: 8412 return optimizeCmpAnd(0, 32, true, false); 8413 case AMDGPU::S_CMP_GT_U32: 8414 case AMDGPU::S_CMPK_GT_U32: 8415 return optimizeCmpAnd(0, 32, false, false); 8416 case AMDGPU::S_CMP_GT_I32: 8417 case AMDGPU::S_CMPK_GT_I32: 8418 return optimizeCmpAnd(0, 32, false, true); 8419 case AMDGPU::S_CMP_LG_U64: 8420 return optimizeCmpAnd(0, 64, true, false); 8421 } 8422 8423 return false; 8424 } 8425