1 //===- SIInstrInfo.cpp - SI Instruction Information ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// SI Implementation of TargetInstrInfo. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SIInstrInfo.h" 15 #include "AMDGPU.h" 16 #include "AMDGPUInstrInfo.h" 17 #include "GCNHazardRecognizer.h" 18 #include "GCNSubtarget.h" 19 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 20 #include "SIMachineFunctionInfo.h" 21 #include "llvm/Analysis/ValueTracking.h" 22 #include "llvm/CodeGen/LiveIntervals.h" 23 #include "llvm/CodeGen/LiveVariables.h" 24 #include "llvm/CodeGen/MachineDominators.h" 25 #include "llvm/CodeGen/MachineFrameInfo.h" 26 #include "llvm/CodeGen/MachineScheduler.h" 27 #include "llvm/CodeGen/RegisterScavenging.h" 28 #include "llvm/CodeGen/ScheduleDAG.h" 29 #include "llvm/IR/DiagnosticInfo.h" 30 #include "llvm/IR/IntrinsicsAMDGPU.h" 31 #include "llvm/MC/MCContext.h" 32 #include "llvm/Support/CommandLine.h" 33 #include "llvm/Target/TargetMachine.h" 34 35 using namespace llvm; 36 37 #define DEBUG_TYPE "si-instr-info" 38 39 #define GET_INSTRINFO_CTOR_DTOR 40 #include "AMDGPUGenInstrInfo.inc" 41 42 namespace llvm { 43 44 class AAResults; 45 46 namespace AMDGPU { 47 #define GET_D16ImageDimIntrinsics_IMPL 48 #define GET_ImageDimIntrinsicTable_IMPL 49 #define GET_RsrcIntrinsics_IMPL 50 #include "AMDGPUGenSearchableTables.inc" 51 } 52 } 53 54 55 // Must be at least 4 to be able to branch over minimum unconditional branch 56 // code. This is only for making it possible to write reasonably small tests for 57 // long branches. 58 static cl::opt<unsigned> 59 BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16), 60 cl::desc("Restrict range of branch instructions (DEBUG)")); 61 62 static cl::opt<bool> Fix16BitCopies( 63 "amdgpu-fix-16-bit-physreg-copies", 64 cl::desc("Fix copies between 32 and 16 bit registers by extending to 32 bit"), 65 cl::init(true), 66 cl::ReallyHidden); 67 68 SIInstrInfo::SIInstrInfo(const GCNSubtarget &ST) 69 : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN), 70 RI(ST), ST(ST) { 71 SchedModel.init(&ST); 72 } 73 74 //===----------------------------------------------------------------------===// 75 // TargetInstrInfo callbacks 76 //===----------------------------------------------------------------------===// 77 78 static unsigned getNumOperandsNoGlue(SDNode *Node) { 79 unsigned N = Node->getNumOperands(); 80 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) 81 --N; 82 return N; 83 } 84 85 /// Returns true if both nodes have the same value for the given 86 /// operand \p Op, or if both nodes do not have this operand. 87 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { 88 unsigned Opc0 = N0->getMachineOpcode(); 89 unsigned Opc1 = N1->getMachineOpcode(); 90 91 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); 92 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); 93 94 if (Op0Idx == -1 && Op1Idx == -1) 95 return true; 96 97 98 if ((Op0Idx == -1 && Op1Idx != -1) || 99 (Op1Idx == -1 && Op0Idx != -1)) 100 return false; 101 102 // getNamedOperandIdx returns the index for the MachineInstr's operands, 103 // which includes the result as the first operand. We are indexing into the 104 // MachineSDNode's operands, so we need to skip the result operand to get 105 // the real index. 106 --Op0Idx; 107 --Op1Idx; 108 109 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx); 110 } 111 112 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, 113 AAResults *AA) const { 114 if (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isSDWA(MI) || isSALU(MI)) { 115 // Normally VALU use of exec would block the rematerialization, but that 116 // is OK in this case to have an implicit exec read as all VALU do. 117 // We really want all of the generic logic for this except for this. 118 119 // Another potential implicit use is mode register. The core logic of 120 // the RA will not attempt rematerialization if mode is set anywhere 121 // in the function, otherwise it is safe since mode is not changed. 122 123 // There is difference to generic method which does not allow 124 // rematerialization if there are virtual register uses. We allow this, 125 // therefore this method includes SOP instructions as well. 126 return !MI.hasImplicitDef() && 127 MI.getNumImplicitOperands() == MI.getDesc().getNumImplicitUses() && 128 !MI.mayRaiseFPException(); 129 } 130 131 return false; 132 } 133 134 // Returns true if the scalar result of a VALU instruction depends on exec. 135 static bool resultDependsOnExec(const MachineInstr &MI) { 136 // Ignore comparisons which are only used masked with exec. 137 // This allows some hoisting/sinking of VALU comparisons. 138 if (MI.isCompare()) { 139 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 140 Register DstReg = MI.getOperand(0).getReg(); 141 if (!DstReg.isVirtual()) 142 return true; 143 for (MachineInstr &Use : MRI.use_nodbg_instructions(DstReg)) { 144 switch (Use.getOpcode()) { 145 case AMDGPU::S_AND_SAVEEXEC_B32: 146 case AMDGPU::S_AND_SAVEEXEC_B64: 147 break; 148 case AMDGPU::S_AND_B32: 149 case AMDGPU::S_AND_B64: 150 if (!Use.readsRegister(AMDGPU::EXEC)) 151 return true; 152 break; 153 default: 154 return true; 155 } 156 } 157 return false; 158 } 159 160 switch (MI.getOpcode()) { 161 default: 162 break; 163 case AMDGPU::V_READFIRSTLANE_B32: 164 return true; 165 } 166 167 return false; 168 } 169 170 bool SIInstrInfo::isIgnorableUse(const MachineOperand &MO) const { 171 // Any implicit use of exec by VALU is not a real register read. 172 return MO.getReg() == AMDGPU::EXEC && MO.isImplicit() && 173 isVALU(*MO.getParent()) && !resultDependsOnExec(*MO.getParent()); 174 } 175 176 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, 177 int64_t &Offset0, 178 int64_t &Offset1) const { 179 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode()) 180 return false; 181 182 unsigned Opc0 = Load0->getMachineOpcode(); 183 unsigned Opc1 = Load1->getMachineOpcode(); 184 185 // Make sure both are actually loads. 186 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad()) 187 return false; 188 189 if (isDS(Opc0) && isDS(Opc1)) { 190 191 // FIXME: Handle this case: 192 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1)) 193 return false; 194 195 // Check base reg. 196 if (Load0->getOperand(0) != Load1->getOperand(0)) 197 return false; 198 199 // Skip read2 / write2 variants for simplicity. 200 // TODO: We should report true if the used offsets are adjacent (excluded 201 // st64 versions). 202 int Offset0Idx = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 203 int Offset1Idx = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 204 if (Offset0Idx == -1 || Offset1Idx == -1) 205 return false; 206 207 // XXX - be careful of dataless loads 208 // getNamedOperandIdx returns the index for MachineInstrs. Since they 209 // include the output in the operand list, but SDNodes don't, we need to 210 // subtract the index by one. 211 Offset0Idx -= get(Opc0).NumDefs; 212 Offset1Idx -= get(Opc1).NumDefs; 213 Offset0 = cast<ConstantSDNode>(Load0->getOperand(Offset0Idx))->getZExtValue(); 214 Offset1 = cast<ConstantSDNode>(Load1->getOperand(Offset1Idx))->getZExtValue(); 215 return true; 216 } 217 218 if (isSMRD(Opc0) && isSMRD(Opc1)) { 219 // Skip time and cache invalidation instructions. 220 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::sbase) == -1 || 221 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::sbase) == -1) 222 return false; 223 224 assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1)); 225 226 // Check base reg. 227 if (Load0->getOperand(0) != Load1->getOperand(0)) 228 return false; 229 230 const ConstantSDNode *Load0Offset = 231 dyn_cast<ConstantSDNode>(Load0->getOperand(1)); 232 const ConstantSDNode *Load1Offset = 233 dyn_cast<ConstantSDNode>(Load1->getOperand(1)); 234 235 if (!Load0Offset || !Load1Offset) 236 return false; 237 238 Offset0 = Load0Offset->getZExtValue(); 239 Offset1 = Load1Offset->getZExtValue(); 240 return true; 241 } 242 243 // MUBUF and MTBUF can access the same addresses. 244 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) { 245 246 // MUBUF and MTBUF have vaddr at different indices. 247 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || 248 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || 249 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) 250 return false; 251 252 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 253 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 254 255 if (OffIdx0 == -1 || OffIdx1 == -1) 256 return false; 257 258 // getNamedOperandIdx returns the index for MachineInstrs. Since they 259 // include the output in the operand list, but SDNodes don't, we need to 260 // subtract the index by one. 261 OffIdx0 -= get(Opc0).NumDefs; 262 OffIdx1 -= get(Opc1).NumDefs; 263 264 SDValue Off0 = Load0->getOperand(OffIdx0); 265 SDValue Off1 = Load1->getOperand(OffIdx1); 266 267 // The offset might be a FrameIndexSDNode. 268 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1)) 269 return false; 270 271 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue(); 272 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue(); 273 return true; 274 } 275 276 return false; 277 } 278 279 static bool isStride64(unsigned Opc) { 280 switch (Opc) { 281 case AMDGPU::DS_READ2ST64_B32: 282 case AMDGPU::DS_READ2ST64_B64: 283 case AMDGPU::DS_WRITE2ST64_B32: 284 case AMDGPU::DS_WRITE2ST64_B64: 285 return true; 286 default: 287 return false; 288 } 289 } 290 291 bool SIInstrInfo::getMemOperandsWithOffsetWidth( 292 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps, 293 int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, 294 const TargetRegisterInfo *TRI) const { 295 if (!LdSt.mayLoadOrStore()) 296 return false; 297 298 unsigned Opc = LdSt.getOpcode(); 299 OffsetIsScalable = false; 300 const MachineOperand *BaseOp, *OffsetOp; 301 int DataOpIdx; 302 303 if (isDS(LdSt)) { 304 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr); 305 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset); 306 if (OffsetOp) { 307 // Normal, single offset LDS instruction. 308 if (!BaseOp) { 309 // DS_CONSUME/DS_APPEND use M0 for the base address. 310 // TODO: find the implicit use operand for M0 and use that as BaseOp? 311 return false; 312 } 313 BaseOps.push_back(BaseOp); 314 Offset = OffsetOp->getImm(); 315 // Get appropriate operand, and compute width accordingly. 316 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 317 if (DataOpIdx == -1) 318 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 319 Width = getOpSize(LdSt, DataOpIdx); 320 } else { 321 // The 2 offset instructions use offset0 and offset1 instead. We can treat 322 // these as a load with a single offset if the 2 offsets are consecutive. 323 // We will use this for some partially aligned loads. 324 const MachineOperand *Offset0Op = 325 getNamedOperand(LdSt, AMDGPU::OpName::offset0); 326 const MachineOperand *Offset1Op = 327 getNamedOperand(LdSt, AMDGPU::OpName::offset1); 328 329 unsigned Offset0 = Offset0Op->getImm(); 330 unsigned Offset1 = Offset1Op->getImm(); 331 if (Offset0 + 1 != Offset1) 332 return false; 333 334 // Each of these offsets is in element sized units, so we need to convert 335 // to bytes of the individual reads. 336 337 unsigned EltSize; 338 if (LdSt.mayLoad()) 339 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16; 340 else { 341 assert(LdSt.mayStore()); 342 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 343 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8; 344 } 345 346 if (isStride64(Opc)) 347 EltSize *= 64; 348 349 BaseOps.push_back(BaseOp); 350 Offset = EltSize * Offset0; 351 // Get appropriate operand(s), and compute width accordingly. 352 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 353 if (DataOpIdx == -1) { 354 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 355 Width = getOpSize(LdSt, DataOpIdx); 356 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1); 357 Width += getOpSize(LdSt, DataOpIdx); 358 } else { 359 Width = getOpSize(LdSt, DataOpIdx); 360 } 361 } 362 return true; 363 } 364 365 if (isMUBUF(LdSt) || isMTBUF(LdSt)) { 366 const MachineOperand *RSrc = getNamedOperand(LdSt, AMDGPU::OpName::srsrc); 367 if (!RSrc) // e.g. BUFFER_WBINVL1_VOL 368 return false; 369 BaseOps.push_back(RSrc); 370 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 371 if (BaseOp && !BaseOp->isFI()) 372 BaseOps.push_back(BaseOp); 373 const MachineOperand *OffsetImm = 374 getNamedOperand(LdSt, AMDGPU::OpName::offset); 375 Offset = OffsetImm->getImm(); 376 const MachineOperand *SOffset = 377 getNamedOperand(LdSt, AMDGPU::OpName::soffset); 378 if (SOffset) { 379 if (SOffset->isReg()) 380 BaseOps.push_back(SOffset); 381 else 382 Offset += SOffset->getImm(); 383 } 384 // Get appropriate operand, and compute width accordingly. 385 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 386 if (DataOpIdx == -1) 387 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 388 if (DataOpIdx == -1) // LDS DMA 389 return false; 390 Width = getOpSize(LdSt, DataOpIdx); 391 return true; 392 } 393 394 if (isMIMG(LdSt)) { 395 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); 396 BaseOps.push_back(&LdSt.getOperand(SRsrcIdx)); 397 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); 398 if (VAddr0Idx >= 0) { 399 // GFX10 possible NSA encoding. 400 for (int I = VAddr0Idx; I < SRsrcIdx; ++I) 401 BaseOps.push_back(&LdSt.getOperand(I)); 402 } else { 403 BaseOps.push_back(getNamedOperand(LdSt, AMDGPU::OpName::vaddr)); 404 } 405 Offset = 0; 406 // Get appropriate operand, and compute width accordingly. 407 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 408 Width = getOpSize(LdSt, DataOpIdx); 409 return true; 410 } 411 412 if (isSMRD(LdSt)) { 413 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::sbase); 414 if (!BaseOp) // e.g. S_MEMTIME 415 return false; 416 BaseOps.push_back(BaseOp); 417 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset); 418 Offset = OffsetOp ? OffsetOp->getImm() : 0; 419 // Get appropriate operand, and compute width accordingly. 420 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::sdst); 421 Width = getOpSize(LdSt, DataOpIdx); 422 return true; 423 } 424 425 if (isFLAT(LdSt)) { 426 // Instructions have either vaddr or saddr or both or none. 427 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 428 if (BaseOp) 429 BaseOps.push_back(BaseOp); 430 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::saddr); 431 if (BaseOp) 432 BaseOps.push_back(BaseOp); 433 Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm(); 434 // Get appropriate operand, and compute width accordingly. 435 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 436 if (DataOpIdx == -1) 437 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 438 if (DataOpIdx == -1) // LDS DMA 439 return false; 440 Width = getOpSize(LdSt, DataOpIdx); 441 return true; 442 } 443 444 return false; 445 } 446 447 static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, 448 ArrayRef<const MachineOperand *> BaseOps1, 449 const MachineInstr &MI2, 450 ArrayRef<const MachineOperand *> BaseOps2) { 451 // Only examine the first "base" operand of each instruction, on the 452 // assumption that it represents the real base address of the memory access. 453 // Other operands are typically offsets or indices from this base address. 454 if (BaseOps1.front()->isIdenticalTo(*BaseOps2.front())) 455 return true; 456 457 if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand()) 458 return false; 459 460 auto MO1 = *MI1.memoperands_begin(); 461 auto MO2 = *MI2.memoperands_begin(); 462 if (MO1->getAddrSpace() != MO2->getAddrSpace()) 463 return false; 464 465 auto Base1 = MO1->getValue(); 466 auto Base2 = MO2->getValue(); 467 if (!Base1 || !Base2) 468 return false; 469 Base1 = getUnderlyingObject(Base1); 470 Base2 = getUnderlyingObject(Base2); 471 472 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2)) 473 return false; 474 475 return Base1 == Base2; 476 } 477 478 bool SIInstrInfo::shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1, 479 ArrayRef<const MachineOperand *> BaseOps2, 480 unsigned NumLoads, 481 unsigned NumBytes) const { 482 // If the mem ops (to be clustered) do not have the same base ptr, then they 483 // should not be clustered 484 if (!BaseOps1.empty() && !BaseOps2.empty()) { 485 const MachineInstr &FirstLdSt = *BaseOps1.front()->getParent(); 486 const MachineInstr &SecondLdSt = *BaseOps2.front()->getParent(); 487 if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOps1, SecondLdSt, BaseOps2)) 488 return false; 489 } else if (!BaseOps1.empty() || !BaseOps2.empty()) { 490 // If only one base op is empty, they do not have the same base ptr 491 return false; 492 } 493 494 // In order to avoid register pressure, on an average, the number of DWORDS 495 // loaded together by all clustered mem ops should not exceed 8. This is an 496 // empirical value based on certain observations and performance related 497 // experiments. 498 // The good thing about this heuristic is - it avoids clustering of too many 499 // sub-word loads, and also avoids clustering of wide loads. Below is the 500 // brief summary of how the heuristic behaves for various `LoadSize`. 501 // (1) 1 <= LoadSize <= 4: cluster at max 8 mem ops 502 // (2) 5 <= LoadSize <= 8: cluster at max 4 mem ops 503 // (3) 9 <= LoadSize <= 12: cluster at max 2 mem ops 504 // (4) 13 <= LoadSize <= 16: cluster at max 2 mem ops 505 // (5) LoadSize >= 17: do not cluster 506 const unsigned LoadSize = NumBytes / NumLoads; 507 const unsigned NumDWORDs = ((LoadSize + 3) / 4) * NumLoads; 508 return NumDWORDs <= 8; 509 } 510 511 // FIXME: This behaves strangely. If, for example, you have 32 load + stores, 512 // the first 16 loads will be interleaved with the stores, and the next 16 will 513 // be clustered as expected. It should really split into 2 16 store batches. 514 // 515 // Loads are clustered until this returns false, rather than trying to schedule 516 // groups of stores. This also means we have to deal with saying different 517 // address space loads should be clustered, and ones which might cause bank 518 // conflicts. 519 // 520 // This might be deprecated so it might not be worth that much effort to fix. 521 bool SIInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, 522 int64_t Offset0, int64_t Offset1, 523 unsigned NumLoads) const { 524 assert(Offset1 > Offset0 && 525 "Second offset should be larger than first offset!"); 526 // If we have less than 16 loads in a row, and the offsets are within 64 527 // bytes, then schedule together. 528 529 // A cacheline is 64 bytes (for global memory). 530 return (NumLoads <= 16 && (Offset1 - Offset0) < 64); 531 } 532 533 static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB, 534 MachineBasicBlock::iterator MI, 535 const DebugLoc &DL, MCRegister DestReg, 536 MCRegister SrcReg, bool KillSrc, 537 const char *Msg = "illegal SGPR to VGPR copy") { 538 MachineFunction *MF = MBB.getParent(); 539 DiagnosticInfoUnsupported IllegalCopy(MF->getFunction(), Msg, DL, DS_Error); 540 LLVMContext &C = MF->getFunction().getContext(); 541 C.diagnose(IllegalCopy); 542 543 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg) 544 .addReg(SrcReg, getKillRegState(KillSrc)); 545 } 546 547 /// Handle copying from SGPR to AGPR, or from AGPR to AGPR on GFX908. It is not 548 /// possible to have a direct copy in these cases on GFX908, so an intermediate 549 /// VGPR copy is required. 550 static void indirectCopyToAGPR(const SIInstrInfo &TII, 551 MachineBasicBlock &MBB, 552 MachineBasicBlock::iterator MI, 553 const DebugLoc &DL, MCRegister DestReg, 554 MCRegister SrcReg, bool KillSrc, 555 RegScavenger &RS, 556 Register ImpDefSuperReg = Register(), 557 Register ImpUseSuperReg = Register()) { 558 assert((TII.getSubtarget().hasMAIInsts() && 559 !TII.getSubtarget().hasGFX90AInsts()) && 560 "Expected GFX908 subtarget."); 561 562 assert((AMDGPU::SReg_32RegClass.contains(SrcReg) || 563 AMDGPU::AGPR_32RegClass.contains(SrcReg)) && 564 "Source register of the copy should be either an SGPR or an AGPR."); 565 566 assert(AMDGPU::AGPR_32RegClass.contains(DestReg) && 567 "Destination register of the copy should be an AGPR."); 568 569 const SIRegisterInfo &RI = TII.getRegisterInfo(); 570 571 // First try to find defining accvgpr_write to avoid temporary registers. 572 for (auto Def = MI, E = MBB.begin(); Def != E; ) { 573 --Def; 574 if (!Def->definesRegister(SrcReg, &RI)) 575 continue; 576 if (Def->getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64) 577 break; 578 579 MachineOperand &DefOp = Def->getOperand(1); 580 assert(DefOp.isReg() || DefOp.isImm()); 581 582 if (DefOp.isReg()) { 583 // Check that register source operand if not clobbered before MI. 584 // Immediate operands are always safe to propagate. 585 bool SafeToPropagate = true; 586 for (auto I = Def; I != MI && SafeToPropagate; ++I) 587 if (I->modifiesRegister(DefOp.getReg(), &RI)) 588 SafeToPropagate = false; 589 590 if (!SafeToPropagate) 591 break; 592 593 DefOp.setIsKill(false); 594 } 595 596 MachineInstrBuilder Builder = 597 BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg) 598 .add(DefOp); 599 if (ImpDefSuperReg) 600 Builder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit); 601 602 if (ImpUseSuperReg) { 603 Builder.addReg(ImpUseSuperReg, 604 getKillRegState(KillSrc) | RegState::Implicit); 605 } 606 607 return; 608 } 609 610 RS.enterBasicBlock(MBB); 611 RS.forward(MI); 612 613 // Ideally we want to have three registers for a long reg_sequence copy 614 // to hide 2 waitstates between v_mov_b32 and accvgpr_write. 615 unsigned MaxVGPRs = RI.getRegPressureLimit(&AMDGPU::VGPR_32RegClass, 616 *MBB.getParent()); 617 618 // Registers in the sequence are allocated contiguously so we can just 619 // use register number to pick one of three round-robin temps. 620 unsigned RegNo = DestReg % 3; 621 Register Tmp = 622 MBB.getParent()->getInfo<SIMachineFunctionInfo>()->getVGPRForAGPRCopy(); 623 assert(MBB.getParent()->getRegInfo().isReserved(Tmp) && 624 "VGPR used for an intermediate copy should have been reserved."); 625 626 // Only loop through if there are any free registers left, otherwise 627 // scavenger may report a fatal error without emergency spill slot 628 // or spill with the slot. 629 while (RegNo-- && RS.FindUnusedReg(&AMDGPU::VGPR_32RegClass)) { 630 Register Tmp2 = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0); 631 if (!Tmp2 || RI.getHWRegIndex(Tmp2) >= MaxVGPRs) 632 break; 633 Tmp = Tmp2; 634 RS.setRegUsed(Tmp); 635 } 636 637 // Insert copy to temporary VGPR. 638 unsigned TmpCopyOp = AMDGPU::V_MOV_B32_e32; 639 if (AMDGPU::AGPR_32RegClass.contains(SrcReg)) { 640 TmpCopyOp = AMDGPU::V_ACCVGPR_READ_B32_e64; 641 } else { 642 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 643 } 644 645 MachineInstrBuilder UseBuilder = BuildMI(MBB, MI, DL, TII.get(TmpCopyOp), Tmp) 646 .addReg(SrcReg, getKillRegState(KillSrc)); 647 if (ImpUseSuperReg) { 648 UseBuilder.addReg(ImpUseSuperReg, 649 getKillRegState(KillSrc) | RegState::Implicit); 650 } 651 652 MachineInstrBuilder DefBuilder 653 = BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg) 654 .addReg(Tmp, RegState::Kill); 655 656 if (ImpDefSuperReg) 657 DefBuilder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit); 658 } 659 660 static void expandSGPRCopy(const SIInstrInfo &TII, MachineBasicBlock &MBB, 661 MachineBasicBlock::iterator MI, const DebugLoc &DL, 662 MCRegister DestReg, MCRegister SrcReg, bool KillSrc, 663 const TargetRegisterClass *RC, bool Forward) { 664 const SIRegisterInfo &RI = TII.getRegisterInfo(); 665 ArrayRef<int16_t> BaseIndices = RI.getRegSplitParts(RC, 4); 666 MachineBasicBlock::iterator I = MI; 667 MachineInstr *FirstMI = nullptr, *LastMI = nullptr; 668 669 for (unsigned Idx = 0; Idx < BaseIndices.size(); ++Idx) { 670 int16_t SubIdx = BaseIndices[Idx]; 671 Register Reg = RI.getSubReg(DestReg, SubIdx); 672 unsigned Opcode = AMDGPU::S_MOV_B32; 673 674 // Is SGPR aligned? If so try to combine with next. 675 Register Src = RI.getSubReg(SrcReg, SubIdx); 676 bool AlignedDest = ((Reg - AMDGPU::SGPR0) % 2) == 0; 677 bool AlignedSrc = ((Src - AMDGPU::SGPR0) % 2) == 0; 678 if (AlignedDest && AlignedSrc && (Idx + 1 < BaseIndices.size())) { 679 // Can use SGPR64 copy 680 unsigned Channel = RI.getChannelFromSubReg(SubIdx); 681 SubIdx = RI.getSubRegFromChannel(Channel, 2); 682 Opcode = AMDGPU::S_MOV_B64; 683 Idx++; 684 } 685 686 LastMI = BuildMI(MBB, I, DL, TII.get(Opcode), RI.getSubReg(DestReg, SubIdx)) 687 .addReg(RI.getSubReg(SrcReg, SubIdx)) 688 .addReg(SrcReg, RegState::Implicit); 689 690 if (!FirstMI) 691 FirstMI = LastMI; 692 693 if (!Forward) 694 I--; 695 } 696 697 assert(FirstMI && LastMI); 698 if (!Forward) 699 std::swap(FirstMI, LastMI); 700 701 FirstMI->addOperand( 702 MachineOperand::CreateReg(DestReg, true /*IsDef*/, true /*IsImp*/)); 703 704 if (KillSrc) 705 LastMI->addRegisterKilled(SrcReg, &RI); 706 } 707 708 void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 709 MachineBasicBlock::iterator MI, 710 const DebugLoc &DL, MCRegister DestReg, 711 MCRegister SrcReg, bool KillSrc) const { 712 const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg); 713 714 // FIXME: This is hack to resolve copies between 16 bit and 32 bit 715 // registers until all patterns are fixed. 716 if (Fix16BitCopies && 717 ((RI.getRegSizeInBits(*RC) == 16) ^ 718 (RI.getRegSizeInBits(*RI.getPhysRegClass(SrcReg)) == 16))) { 719 MCRegister &RegToFix = (RI.getRegSizeInBits(*RC) == 16) ? DestReg : SrcReg; 720 MCRegister Super = RI.get32BitRegister(RegToFix); 721 assert(RI.getSubReg(Super, AMDGPU::lo16) == RegToFix); 722 RegToFix = Super; 723 724 if (DestReg == SrcReg) { 725 // Insert empty bundle since ExpandPostRA expects an instruction here. 726 BuildMI(MBB, MI, DL, get(AMDGPU::BUNDLE)); 727 return; 728 } 729 730 RC = RI.getPhysRegClass(DestReg); 731 } 732 733 if (RC == &AMDGPU::VGPR_32RegClass) { 734 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || 735 AMDGPU::SReg_32RegClass.contains(SrcReg) || 736 AMDGPU::AGPR_32RegClass.contains(SrcReg)); 737 unsigned Opc = AMDGPU::AGPR_32RegClass.contains(SrcReg) ? 738 AMDGPU::V_ACCVGPR_READ_B32_e64 : AMDGPU::V_MOV_B32_e32; 739 BuildMI(MBB, MI, DL, get(Opc), DestReg) 740 .addReg(SrcReg, getKillRegState(KillSrc)); 741 return; 742 } 743 744 if (RC == &AMDGPU::SReg_32_XM0RegClass || 745 RC == &AMDGPU::SReg_32RegClass) { 746 if (SrcReg == AMDGPU::SCC) { 747 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg) 748 .addImm(1) 749 .addImm(0); 750 return; 751 } 752 753 if (DestReg == AMDGPU::VCC_LO) { 754 if (AMDGPU::SReg_32RegClass.contains(SrcReg)) { 755 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), AMDGPU::VCC_LO) 756 .addReg(SrcReg, getKillRegState(KillSrc)); 757 } else { 758 // FIXME: Hack until VReg_1 removed. 759 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 760 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 761 .addImm(0) 762 .addReg(SrcReg, getKillRegState(KillSrc)); 763 } 764 765 return; 766 } 767 768 if (!AMDGPU::SReg_32RegClass.contains(SrcReg)) { 769 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 770 return; 771 } 772 773 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 774 .addReg(SrcReg, getKillRegState(KillSrc)); 775 return; 776 } 777 778 if (RC == &AMDGPU::SReg_64RegClass) { 779 if (SrcReg == AMDGPU::SCC) { 780 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B64), DestReg) 781 .addImm(1) 782 .addImm(0); 783 return; 784 } 785 786 if (DestReg == AMDGPU::VCC) { 787 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 788 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC) 789 .addReg(SrcReg, getKillRegState(KillSrc)); 790 } else { 791 // FIXME: Hack until VReg_1 removed. 792 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 793 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 794 .addImm(0) 795 .addReg(SrcReg, getKillRegState(KillSrc)); 796 } 797 798 return; 799 } 800 801 if (!AMDGPU::SReg_64RegClass.contains(SrcReg)) { 802 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 803 return; 804 } 805 806 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 807 .addReg(SrcReg, getKillRegState(KillSrc)); 808 return; 809 } 810 811 if (DestReg == AMDGPU::SCC) { 812 // Copying 64-bit or 32-bit sources to SCC barely makes sense, 813 // but SelectionDAG emits such copies for i1 sources. 814 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 815 // This copy can only be produced by patterns 816 // with explicit SCC, which are known to be enabled 817 // only for subtargets with S_CMP_LG_U64 present. 818 assert(ST.hasScalarCompareEq64()); 819 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U64)) 820 .addReg(SrcReg, getKillRegState(KillSrc)) 821 .addImm(0); 822 } else { 823 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 824 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32)) 825 .addReg(SrcReg, getKillRegState(KillSrc)) 826 .addImm(0); 827 } 828 829 return; 830 } 831 832 if (RC == &AMDGPU::AGPR_32RegClass) { 833 if (AMDGPU::VGPR_32RegClass.contains(SrcReg) || 834 (ST.hasGFX90AInsts() && AMDGPU::SReg_32RegClass.contains(SrcReg))) { 835 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg) 836 .addReg(SrcReg, getKillRegState(KillSrc)); 837 return; 838 } 839 840 if (AMDGPU::AGPR_32RegClass.contains(SrcReg) && ST.hasGFX90AInsts()) { 841 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_MOV_B32), DestReg) 842 .addReg(SrcReg, getKillRegState(KillSrc)); 843 return; 844 } 845 846 // FIXME: Pass should maintain scavenger to avoid scan through the block on 847 // every AGPR spill. 848 RegScavenger RS; 849 indirectCopyToAGPR(*this, MBB, MI, DL, DestReg, SrcReg, KillSrc, RS); 850 return; 851 } 852 853 const unsigned Size = RI.getRegSizeInBits(*RC); 854 if (Size == 16) { 855 assert(AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || 856 AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || 857 AMDGPU::SReg_LO16RegClass.contains(SrcReg) || 858 AMDGPU::AGPR_LO16RegClass.contains(SrcReg)); 859 860 bool IsSGPRDst = AMDGPU::SReg_LO16RegClass.contains(DestReg); 861 bool IsSGPRSrc = AMDGPU::SReg_LO16RegClass.contains(SrcReg); 862 bool IsAGPRDst = AMDGPU::AGPR_LO16RegClass.contains(DestReg); 863 bool IsAGPRSrc = AMDGPU::AGPR_LO16RegClass.contains(SrcReg); 864 bool DstLow = AMDGPU::VGPR_LO16RegClass.contains(DestReg) || 865 AMDGPU::SReg_LO16RegClass.contains(DestReg) || 866 AMDGPU::AGPR_LO16RegClass.contains(DestReg); 867 bool SrcLow = AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || 868 AMDGPU::SReg_LO16RegClass.contains(SrcReg) || 869 AMDGPU::AGPR_LO16RegClass.contains(SrcReg); 870 MCRegister NewDestReg = RI.get32BitRegister(DestReg); 871 MCRegister NewSrcReg = RI.get32BitRegister(SrcReg); 872 873 if (IsSGPRDst) { 874 if (!IsSGPRSrc) { 875 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 876 return; 877 } 878 879 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), NewDestReg) 880 .addReg(NewSrcReg, getKillRegState(KillSrc)); 881 return; 882 } 883 884 if (IsAGPRDst || IsAGPRSrc) { 885 if (!DstLow || !SrcLow) { 886 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc, 887 "Cannot use hi16 subreg with an AGPR!"); 888 } 889 890 copyPhysReg(MBB, MI, DL, NewDestReg, NewSrcReg, KillSrc); 891 return; 892 } 893 894 if (IsSGPRSrc && !ST.hasSDWAScalar()) { 895 if (!DstLow || !SrcLow) { 896 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc, 897 "Cannot use hi16 subreg on VI!"); 898 } 899 900 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), NewDestReg) 901 .addReg(NewSrcReg, getKillRegState(KillSrc)); 902 return; 903 } 904 905 auto MIB = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_sdwa), NewDestReg) 906 .addImm(0) // src0_modifiers 907 .addReg(NewSrcReg) 908 .addImm(0) // clamp 909 .addImm(DstLow ? AMDGPU::SDWA::SdwaSel::WORD_0 910 : AMDGPU::SDWA::SdwaSel::WORD_1) 911 .addImm(AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE) 912 .addImm(SrcLow ? AMDGPU::SDWA::SdwaSel::WORD_0 913 : AMDGPU::SDWA::SdwaSel::WORD_1) 914 .addReg(NewDestReg, RegState::Implicit | RegState::Undef); 915 // First implicit operand is $exec. 916 MIB->tieOperands(0, MIB->getNumOperands() - 1); 917 return; 918 } 919 920 const TargetRegisterClass *SrcRC = RI.getPhysRegClass(SrcReg); 921 if (RC == RI.getVGPR64Class() && (SrcRC == RC || RI.isSGPRClass(SrcRC))) { 922 if (ST.hasMovB64()) { 923 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_e32), DestReg) 924 .addReg(SrcReg, getKillRegState(KillSrc)); 925 return; 926 } 927 if (ST.hasPackedFP32Ops()) { 928 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), DestReg) 929 .addImm(SISrcMods::OP_SEL_1) 930 .addReg(SrcReg) 931 .addImm(SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1) 932 .addReg(SrcReg) 933 .addImm(0) // op_sel_lo 934 .addImm(0) // op_sel_hi 935 .addImm(0) // neg_lo 936 .addImm(0) // neg_hi 937 .addImm(0) // clamp 938 .addReg(SrcReg, getKillRegState(KillSrc) | RegState::Implicit); 939 return; 940 } 941 } 942 943 const bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg); 944 if (RI.isSGPRClass(RC)) { 945 if (!RI.isSGPRClass(SrcRC)) { 946 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 947 return; 948 } 949 const bool CanKillSuperReg = KillSrc && !RI.regsOverlap(SrcReg, DestReg); 950 expandSGPRCopy(*this, MBB, MI, DL, DestReg, SrcReg, CanKillSuperReg, RC, 951 Forward); 952 return; 953 } 954 955 unsigned EltSize = 4; 956 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 957 if (RI.isAGPRClass(RC)) { 958 if (ST.hasGFX90AInsts() && RI.isAGPRClass(SrcRC)) 959 Opcode = AMDGPU::V_ACCVGPR_MOV_B32; 960 else if (RI.hasVGPRs(SrcRC) || 961 (ST.hasGFX90AInsts() && RI.isSGPRClass(SrcRC))) 962 Opcode = AMDGPU::V_ACCVGPR_WRITE_B32_e64; 963 else 964 Opcode = AMDGPU::INSTRUCTION_LIST_END; 965 } else if (RI.hasVGPRs(RC) && RI.isAGPRClass(SrcRC)) { 966 Opcode = AMDGPU::V_ACCVGPR_READ_B32_e64; 967 } else if ((Size % 64 == 0) && RI.hasVGPRs(RC) && 968 (RI.isProperlyAlignedRC(*RC) && 969 (SrcRC == RC || RI.isSGPRClass(SrcRC)))) { 970 // TODO: In 96-bit case, could do a 64-bit mov and then a 32-bit mov. 971 if (ST.hasMovB64()) { 972 Opcode = AMDGPU::V_MOV_B64_e32; 973 EltSize = 8; 974 } else if (ST.hasPackedFP32Ops()) { 975 Opcode = AMDGPU::V_PK_MOV_B32; 976 EltSize = 8; 977 } 978 } 979 980 // For the cases where we need an intermediate instruction/temporary register 981 // (destination is an AGPR), we need a scavenger. 982 // 983 // FIXME: The pass should maintain this for us so we don't have to re-scan the 984 // whole block for every handled copy. 985 std::unique_ptr<RegScavenger> RS; 986 if (Opcode == AMDGPU::INSTRUCTION_LIST_END) 987 RS.reset(new RegScavenger()); 988 989 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize); 990 991 // If there is an overlap, we can't kill the super-register on the last 992 // instruction, since it will also kill the components made live by this def. 993 const bool CanKillSuperReg = KillSrc && !RI.regsOverlap(SrcReg, DestReg); 994 995 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 996 unsigned SubIdx; 997 if (Forward) 998 SubIdx = SubIndices[Idx]; 999 else 1000 SubIdx = SubIndices[SubIndices.size() - Idx - 1]; 1001 1002 bool UseKill = CanKillSuperReg && Idx == SubIndices.size() - 1; 1003 1004 if (Opcode == AMDGPU::INSTRUCTION_LIST_END) { 1005 Register ImpDefSuper = Idx == 0 ? Register(DestReg) : Register(); 1006 Register ImpUseSuper = SrcReg; 1007 indirectCopyToAGPR(*this, MBB, MI, DL, RI.getSubReg(DestReg, SubIdx), 1008 RI.getSubReg(SrcReg, SubIdx), UseKill, *RS, 1009 ImpDefSuper, ImpUseSuper); 1010 } else if (Opcode == AMDGPU::V_PK_MOV_B32) { 1011 Register DstSubReg = RI.getSubReg(DestReg, SubIdx); 1012 Register SrcSubReg = RI.getSubReg(SrcReg, SubIdx); 1013 MachineInstrBuilder MIB = 1014 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), DstSubReg) 1015 .addImm(SISrcMods::OP_SEL_1) 1016 .addReg(SrcSubReg) 1017 .addImm(SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1) 1018 .addReg(SrcSubReg) 1019 .addImm(0) // op_sel_lo 1020 .addImm(0) // op_sel_hi 1021 .addImm(0) // neg_lo 1022 .addImm(0) // neg_hi 1023 .addImm(0) // clamp 1024 .addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit); 1025 if (Idx == 0) 1026 MIB.addReg(DestReg, RegState::Define | RegState::Implicit); 1027 } else { 1028 MachineInstrBuilder Builder = 1029 BuildMI(MBB, MI, DL, get(Opcode), RI.getSubReg(DestReg, SubIdx)) 1030 .addReg(RI.getSubReg(SrcReg, SubIdx)); 1031 if (Idx == 0) 1032 Builder.addReg(DestReg, RegState::Define | RegState::Implicit); 1033 1034 Builder.addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit); 1035 } 1036 } 1037 } 1038 1039 int SIInstrInfo::commuteOpcode(unsigned Opcode) const { 1040 int NewOpc; 1041 1042 // Try to map original to commuted opcode 1043 NewOpc = AMDGPU::getCommuteRev(Opcode); 1044 if (NewOpc != -1) 1045 // Check if the commuted (REV) opcode exists on the target. 1046 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 1047 1048 // Try to map commuted to original opcode 1049 NewOpc = AMDGPU::getCommuteOrig(Opcode); 1050 if (NewOpc != -1) 1051 // Check if the original (non-REV) opcode exists on the target. 1052 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 1053 1054 return Opcode; 1055 } 1056 1057 void SIInstrInfo::materializeImmediate(MachineBasicBlock &MBB, 1058 MachineBasicBlock::iterator MI, 1059 const DebugLoc &DL, unsigned DestReg, 1060 int64_t Value) const { 1061 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 1062 const TargetRegisterClass *RegClass = MRI.getRegClass(DestReg); 1063 if (RegClass == &AMDGPU::SReg_32RegClass || 1064 RegClass == &AMDGPU::SGPR_32RegClass || 1065 RegClass == &AMDGPU::SReg_32_XM0RegClass || 1066 RegClass == &AMDGPU::SReg_32_XM0_XEXECRegClass) { 1067 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 1068 .addImm(Value); 1069 return; 1070 } 1071 1072 if (RegClass == &AMDGPU::SReg_64RegClass || 1073 RegClass == &AMDGPU::SGPR_64RegClass || 1074 RegClass == &AMDGPU::SReg_64_XEXECRegClass) { 1075 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 1076 .addImm(Value); 1077 return; 1078 } 1079 1080 if (RegClass == &AMDGPU::VGPR_32RegClass) { 1081 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) 1082 .addImm(Value); 1083 return; 1084 } 1085 if (RegClass->hasSuperClassEq(&AMDGPU::VReg_64RegClass)) { 1086 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), DestReg) 1087 .addImm(Value); 1088 return; 1089 } 1090 1091 unsigned EltSize = 4; 1092 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 1093 if (RI.isSGPRClass(RegClass)) { 1094 if (RI.getRegSizeInBits(*RegClass) > 32) { 1095 Opcode = AMDGPU::S_MOV_B64; 1096 EltSize = 8; 1097 } else { 1098 Opcode = AMDGPU::S_MOV_B32; 1099 EltSize = 4; 1100 } 1101 } 1102 1103 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RegClass, EltSize); 1104 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 1105 int64_t IdxValue = Idx == 0 ? Value : 0; 1106 1107 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 1108 get(Opcode), RI.getSubReg(DestReg, SubIndices[Idx])); 1109 Builder.addImm(IdxValue); 1110 } 1111 } 1112 1113 const TargetRegisterClass * 1114 SIInstrInfo::getPreferredSelectRegClass(unsigned Size) const { 1115 return &AMDGPU::VGPR_32RegClass; 1116 } 1117 1118 void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB, 1119 MachineBasicBlock::iterator I, 1120 const DebugLoc &DL, Register DstReg, 1121 ArrayRef<MachineOperand> Cond, 1122 Register TrueReg, 1123 Register FalseReg) const { 1124 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 1125 const TargetRegisterClass *BoolXExecRC = 1126 RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 1127 assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass && 1128 "Not a VGPR32 reg"); 1129 1130 if (Cond.size() == 1) { 1131 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1132 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1133 .add(Cond[0]); 1134 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1135 .addImm(0) 1136 .addReg(FalseReg) 1137 .addImm(0) 1138 .addReg(TrueReg) 1139 .addReg(SReg); 1140 } else if (Cond.size() == 2) { 1141 assert(Cond[0].isImm() && "Cond[0] is not an immediate"); 1142 switch (Cond[0].getImm()) { 1143 case SIInstrInfo::SCC_TRUE: { 1144 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1145 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1146 : AMDGPU::S_CSELECT_B64), SReg) 1147 .addImm(1) 1148 .addImm(0); 1149 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1150 .addImm(0) 1151 .addReg(FalseReg) 1152 .addImm(0) 1153 .addReg(TrueReg) 1154 .addReg(SReg); 1155 break; 1156 } 1157 case SIInstrInfo::SCC_FALSE: { 1158 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1159 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1160 : AMDGPU::S_CSELECT_B64), SReg) 1161 .addImm(0) 1162 .addImm(1); 1163 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1164 .addImm(0) 1165 .addReg(FalseReg) 1166 .addImm(0) 1167 .addReg(TrueReg) 1168 .addReg(SReg); 1169 break; 1170 } 1171 case SIInstrInfo::VCCNZ: { 1172 MachineOperand RegOp = Cond[1]; 1173 RegOp.setImplicit(false); 1174 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1175 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1176 .add(RegOp); 1177 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1178 .addImm(0) 1179 .addReg(FalseReg) 1180 .addImm(0) 1181 .addReg(TrueReg) 1182 .addReg(SReg); 1183 break; 1184 } 1185 case SIInstrInfo::VCCZ: { 1186 MachineOperand RegOp = Cond[1]; 1187 RegOp.setImplicit(false); 1188 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1189 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1190 .add(RegOp); 1191 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1192 .addImm(0) 1193 .addReg(TrueReg) 1194 .addImm(0) 1195 .addReg(FalseReg) 1196 .addReg(SReg); 1197 break; 1198 } 1199 case SIInstrInfo::EXECNZ: { 1200 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1201 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 1202 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1203 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 1204 .addImm(0); 1205 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1206 : AMDGPU::S_CSELECT_B64), SReg) 1207 .addImm(1) 1208 .addImm(0); 1209 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1210 .addImm(0) 1211 .addReg(FalseReg) 1212 .addImm(0) 1213 .addReg(TrueReg) 1214 .addReg(SReg); 1215 break; 1216 } 1217 case SIInstrInfo::EXECZ: { 1218 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1219 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 1220 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1221 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 1222 .addImm(0); 1223 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1224 : AMDGPU::S_CSELECT_B64), SReg) 1225 .addImm(0) 1226 .addImm(1); 1227 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1228 .addImm(0) 1229 .addReg(FalseReg) 1230 .addImm(0) 1231 .addReg(TrueReg) 1232 .addReg(SReg); 1233 llvm_unreachable("Unhandled branch predicate EXECZ"); 1234 break; 1235 } 1236 default: 1237 llvm_unreachable("invalid branch predicate"); 1238 } 1239 } else { 1240 llvm_unreachable("Can only handle Cond size 1 or 2"); 1241 } 1242 } 1243 1244 Register SIInstrInfo::insertEQ(MachineBasicBlock *MBB, 1245 MachineBasicBlock::iterator I, 1246 const DebugLoc &DL, 1247 Register SrcReg, int Value) const { 1248 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1249 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 1250 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg) 1251 .addImm(Value) 1252 .addReg(SrcReg); 1253 1254 return Reg; 1255 } 1256 1257 Register SIInstrInfo::insertNE(MachineBasicBlock *MBB, 1258 MachineBasicBlock::iterator I, 1259 const DebugLoc &DL, 1260 Register SrcReg, int Value) const { 1261 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1262 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 1263 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg) 1264 .addImm(Value) 1265 .addReg(SrcReg); 1266 1267 return Reg; 1268 } 1269 1270 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const { 1271 1272 if (RI.isAGPRClass(DstRC)) 1273 return AMDGPU::COPY; 1274 if (RI.getRegSizeInBits(*DstRC) == 32) { 1275 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 1276 } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) { 1277 return AMDGPU::S_MOV_B64; 1278 } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) { 1279 return AMDGPU::V_MOV_B64_PSEUDO; 1280 } 1281 return AMDGPU::COPY; 1282 } 1283 1284 const MCInstrDesc & 1285 SIInstrInfo::getIndirectGPRIDXPseudo(unsigned VecSize, 1286 bool IsIndirectSrc) const { 1287 if (IsIndirectSrc) { 1288 if (VecSize <= 32) // 4 bytes 1289 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V1); 1290 if (VecSize <= 64) // 8 bytes 1291 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V2); 1292 if (VecSize <= 96) // 12 bytes 1293 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V3); 1294 if (VecSize <= 128) // 16 bytes 1295 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V4); 1296 if (VecSize <= 160) // 20 bytes 1297 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V5); 1298 if (VecSize <= 256) // 32 bytes 1299 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V8); 1300 if (VecSize <= 512) // 64 bytes 1301 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V16); 1302 if (VecSize <= 1024) // 128 bytes 1303 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V32); 1304 1305 llvm_unreachable("unsupported size for IndirectRegReadGPRIDX pseudos"); 1306 } 1307 1308 if (VecSize <= 32) // 4 bytes 1309 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V1); 1310 if (VecSize <= 64) // 8 bytes 1311 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V2); 1312 if (VecSize <= 96) // 12 bytes 1313 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V3); 1314 if (VecSize <= 128) // 16 bytes 1315 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V4); 1316 if (VecSize <= 160) // 20 bytes 1317 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V5); 1318 if (VecSize <= 256) // 32 bytes 1319 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V8); 1320 if (VecSize <= 512) // 64 bytes 1321 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V16); 1322 if (VecSize <= 1024) // 128 bytes 1323 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V32); 1324 1325 llvm_unreachable("unsupported size for IndirectRegWriteGPRIDX pseudos"); 1326 } 1327 1328 static unsigned getIndirectVGPRWriteMovRelPseudoOpc(unsigned VecSize) { 1329 if (VecSize <= 32) // 4 bytes 1330 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V1; 1331 if (VecSize <= 64) // 8 bytes 1332 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V2; 1333 if (VecSize <= 96) // 12 bytes 1334 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V3; 1335 if (VecSize <= 128) // 16 bytes 1336 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V4; 1337 if (VecSize <= 160) // 20 bytes 1338 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V5; 1339 if (VecSize <= 256) // 32 bytes 1340 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V8; 1341 if (VecSize <= 512) // 64 bytes 1342 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V16; 1343 if (VecSize <= 1024) // 128 bytes 1344 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V32; 1345 1346 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1347 } 1348 1349 static unsigned getIndirectSGPRWriteMovRelPseudo32(unsigned VecSize) { 1350 if (VecSize <= 32) // 4 bytes 1351 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V1; 1352 if (VecSize <= 64) // 8 bytes 1353 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V2; 1354 if (VecSize <= 96) // 12 bytes 1355 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V3; 1356 if (VecSize <= 128) // 16 bytes 1357 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V4; 1358 if (VecSize <= 160) // 20 bytes 1359 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V5; 1360 if (VecSize <= 256) // 32 bytes 1361 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V8; 1362 if (VecSize <= 512) // 64 bytes 1363 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V16; 1364 if (VecSize <= 1024) // 128 bytes 1365 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V32; 1366 1367 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1368 } 1369 1370 static unsigned getIndirectSGPRWriteMovRelPseudo64(unsigned VecSize) { 1371 if (VecSize <= 64) // 8 bytes 1372 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V1; 1373 if (VecSize <= 128) // 16 bytes 1374 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V2; 1375 if (VecSize <= 256) // 32 bytes 1376 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V4; 1377 if (VecSize <= 512) // 64 bytes 1378 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V8; 1379 if (VecSize <= 1024) // 128 bytes 1380 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V16; 1381 1382 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1383 } 1384 1385 const MCInstrDesc & 1386 SIInstrInfo::getIndirectRegWriteMovRelPseudo(unsigned VecSize, unsigned EltSize, 1387 bool IsSGPR) const { 1388 if (IsSGPR) { 1389 switch (EltSize) { 1390 case 32: 1391 return get(getIndirectSGPRWriteMovRelPseudo32(VecSize)); 1392 case 64: 1393 return get(getIndirectSGPRWriteMovRelPseudo64(VecSize)); 1394 default: 1395 llvm_unreachable("invalid reg indexing elt size"); 1396 } 1397 } 1398 1399 assert(EltSize == 32 && "invalid reg indexing elt size"); 1400 return get(getIndirectVGPRWriteMovRelPseudoOpc(VecSize)); 1401 } 1402 1403 static unsigned getSGPRSpillSaveOpcode(unsigned Size) { 1404 switch (Size) { 1405 case 4: 1406 return AMDGPU::SI_SPILL_S32_SAVE; 1407 case 8: 1408 return AMDGPU::SI_SPILL_S64_SAVE; 1409 case 12: 1410 return AMDGPU::SI_SPILL_S96_SAVE; 1411 case 16: 1412 return AMDGPU::SI_SPILL_S128_SAVE; 1413 case 20: 1414 return AMDGPU::SI_SPILL_S160_SAVE; 1415 case 24: 1416 return AMDGPU::SI_SPILL_S192_SAVE; 1417 case 28: 1418 return AMDGPU::SI_SPILL_S224_SAVE; 1419 case 32: 1420 return AMDGPU::SI_SPILL_S256_SAVE; 1421 case 64: 1422 return AMDGPU::SI_SPILL_S512_SAVE; 1423 case 128: 1424 return AMDGPU::SI_SPILL_S1024_SAVE; 1425 default: 1426 llvm_unreachable("unknown register size"); 1427 } 1428 } 1429 1430 static unsigned getVGPRSpillSaveOpcode(unsigned Size) { 1431 switch (Size) { 1432 case 4: 1433 return AMDGPU::SI_SPILL_V32_SAVE; 1434 case 8: 1435 return AMDGPU::SI_SPILL_V64_SAVE; 1436 case 12: 1437 return AMDGPU::SI_SPILL_V96_SAVE; 1438 case 16: 1439 return AMDGPU::SI_SPILL_V128_SAVE; 1440 case 20: 1441 return AMDGPU::SI_SPILL_V160_SAVE; 1442 case 24: 1443 return AMDGPU::SI_SPILL_V192_SAVE; 1444 case 28: 1445 return AMDGPU::SI_SPILL_V224_SAVE; 1446 case 32: 1447 return AMDGPU::SI_SPILL_V256_SAVE; 1448 case 64: 1449 return AMDGPU::SI_SPILL_V512_SAVE; 1450 case 128: 1451 return AMDGPU::SI_SPILL_V1024_SAVE; 1452 default: 1453 llvm_unreachable("unknown register size"); 1454 } 1455 } 1456 1457 static unsigned getAGPRSpillSaveOpcode(unsigned Size) { 1458 switch (Size) { 1459 case 4: 1460 return AMDGPU::SI_SPILL_A32_SAVE; 1461 case 8: 1462 return AMDGPU::SI_SPILL_A64_SAVE; 1463 case 12: 1464 return AMDGPU::SI_SPILL_A96_SAVE; 1465 case 16: 1466 return AMDGPU::SI_SPILL_A128_SAVE; 1467 case 20: 1468 return AMDGPU::SI_SPILL_A160_SAVE; 1469 case 24: 1470 return AMDGPU::SI_SPILL_A192_SAVE; 1471 case 28: 1472 return AMDGPU::SI_SPILL_A224_SAVE; 1473 case 32: 1474 return AMDGPU::SI_SPILL_A256_SAVE; 1475 case 64: 1476 return AMDGPU::SI_SPILL_A512_SAVE; 1477 case 128: 1478 return AMDGPU::SI_SPILL_A1024_SAVE; 1479 default: 1480 llvm_unreachable("unknown register size"); 1481 } 1482 } 1483 1484 static unsigned getAVSpillSaveOpcode(unsigned Size) { 1485 switch (Size) { 1486 case 4: 1487 return AMDGPU::SI_SPILL_AV32_SAVE; 1488 case 8: 1489 return AMDGPU::SI_SPILL_AV64_SAVE; 1490 case 12: 1491 return AMDGPU::SI_SPILL_AV96_SAVE; 1492 case 16: 1493 return AMDGPU::SI_SPILL_AV128_SAVE; 1494 case 20: 1495 return AMDGPU::SI_SPILL_AV160_SAVE; 1496 case 24: 1497 return AMDGPU::SI_SPILL_AV192_SAVE; 1498 case 28: 1499 return AMDGPU::SI_SPILL_AV224_SAVE; 1500 case 32: 1501 return AMDGPU::SI_SPILL_AV256_SAVE; 1502 case 64: 1503 return AMDGPU::SI_SPILL_AV512_SAVE; 1504 case 128: 1505 return AMDGPU::SI_SPILL_AV1024_SAVE; 1506 default: 1507 llvm_unreachable("unknown register size"); 1508 } 1509 } 1510 1511 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 1512 MachineBasicBlock::iterator MI, 1513 Register SrcReg, bool isKill, 1514 int FrameIndex, 1515 const TargetRegisterClass *RC, 1516 const TargetRegisterInfo *TRI) const { 1517 MachineFunction *MF = MBB.getParent(); 1518 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1519 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 1520 const DebugLoc &DL = MBB.findDebugLoc(MI); 1521 1522 MachinePointerInfo PtrInfo 1523 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 1524 MachineMemOperand *MMO = MF->getMachineMemOperand( 1525 PtrInfo, MachineMemOperand::MOStore, FrameInfo.getObjectSize(FrameIndex), 1526 FrameInfo.getObjectAlign(FrameIndex)); 1527 unsigned SpillSize = TRI->getSpillSize(*RC); 1528 1529 MachineRegisterInfo &MRI = MF->getRegInfo(); 1530 if (RI.isSGPRClass(RC)) { 1531 MFI->setHasSpilledSGPRs(); 1532 assert(SrcReg != AMDGPU::M0 && "m0 should not be spilled"); 1533 assert(SrcReg != AMDGPU::EXEC_LO && SrcReg != AMDGPU::EXEC_HI && 1534 SrcReg != AMDGPU::EXEC && "exec should not be spilled"); 1535 1536 // We are only allowed to create one new instruction when spilling 1537 // registers, so we need to use pseudo instruction for spilling SGPRs. 1538 const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(SpillSize)); 1539 1540 // The SGPR spill/restore instructions only work on number sgprs, so we need 1541 // to make sure we are using the correct register class. 1542 if (SrcReg.isVirtual() && SpillSize == 4) { 1543 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 1544 } 1545 1546 BuildMI(MBB, MI, DL, OpDesc) 1547 .addReg(SrcReg, getKillRegState(isKill)) // data 1548 .addFrameIndex(FrameIndex) // addr 1549 .addMemOperand(MMO) 1550 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); 1551 1552 if (RI.spillSGPRToVGPR()) 1553 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill); 1554 return; 1555 } 1556 1557 unsigned Opcode = RI.isVectorSuperClass(RC) ? getAVSpillSaveOpcode(SpillSize) 1558 : RI.isAGPRClass(RC) ? getAGPRSpillSaveOpcode(SpillSize) 1559 : getVGPRSpillSaveOpcode(SpillSize); 1560 MFI->setHasSpilledVGPRs(); 1561 1562 BuildMI(MBB, MI, DL, get(Opcode)) 1563 .addReg(SrcReg, getKillRegState(isKill)) // data 1564 .addFrameIndex(FrameIndex) // addr 1565 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset 1566 .addImm(0) // offset 1567 .addMemOperand(MMO); 1568 } 1569 1570 static unsigned getSGPRSpillRestoreOpcode(unsigned Size) { 1571 switch (Size) { 1572 case 4: 1573 return AMDGPU::SI_SPILL_S32_RESTORE; 1574 case 8: 1575 return AMDGPU::SI_SPILL_S64_RESTORE; 1576 case 12: 1577 return AMDGPU::SI_SPILL_S96_RESTORE; 1578 case 16: 1579 return AMDGPU::SI_SPILL_S128_RESTORE; 1580 case 20: 1581 return AMDGPU::SI_SPILL_S160_RESTORE; 1582 case 24: 1583 return AMDGPU::SI_SPILL_S192_RESTORE; 1584 case 28: 1585 return AMDGPU::SI_SPILL_S224_RESTORE; 1586 case 32: 1587 return AMDGPU::SI_SPILL_S256_RESTORE; 1588 case 64: 1589 return AMDGPU::SI_SPILL_S512_RESTORE; 1590 case 128: 1591 return AMDGPU::SI_SPILL_S1024_RESTORE; 1592 default: 1593 llvm_unreachable("unknown register size"); 1594 } 1595 } 1596 1597 static unsigned getVGPRSpillRestoreOpcode(unsigned Size) { 1598 switch (Size) { 1599 case 4: 1600 return AMDGPU::SI_SPILL_V32_RESTORE; 1601 case 8: 1602 return AMDGPU::SI_SPILL_V64_RESTORE; 1603 case 12: 1604 return AMDGPU::SI_SPILL_V96_RESTORE; 1605 case 16: 1606 return AMDGPU::SI_SPILL_V128_RESTORE; 1607 case 20: 1608 return AMDGPU::SI_SPILL_V160_RESTORE; 1609 case 24: 1610 return AMDGPU::SI_SPILL_V192_RESTORE; 1611 case 28: 1612 return AMDGPU::SI_SPILL_V224_RESTORE; 1613 case 32: 1614 return AMDGPU::SI_SPILL_V256_RESTORE; 1615 case 64: 1616 return AMDGPU::SI_SPILL_V512_RESTORE; 1617 case 128: 1618 return AMDGPU::SI_SPILL_V1024_RESTORE; 1619 default: 1620 llvm_unreachable("unknown register size"); 1621 } 1622 } 1623 1624 static unsigned getAGPRSpillRestoreOpcode(unsigned Size) { 1625 switch (Size) { 1626 case 4: 1627 return AMDGPU::SI_SPILL_A32_RESTORE; 1628 case 8: 1629 return AMDGPU::SI_SPILL_A64_RESTORE; 1630 case 12: 1631 return AMDGPU::SI_SPILL_A96_RESTORE; 1632 case 16: 1633 return AMDGPU::SI_SPILL_A128_RESTORE; 1634 case 20: 1635 return AMDGPU::SI_SPILL_A160_RESTORE; 1636 case 24: 1637 return AMDGPU::SI_SPILL_A192_RESTORE; 1638 case 28: 1639 return AMDGPU::SI_SPILL_A224_RESTORE; 1640 case 32: 1641 return AMDGPU::SI_SPILL_A256_RESTORE; 1642 case 64: 1643 return AMDGPU::SI_SPILL_A512_RESTORE; 1644 case 128: 1645 return AMDGPU::SI_SPILL_A1024_RESTORE; 1646 default: 1647 llvm_unreachable("unknown register size"); 1648 } 1649 } 1650 1651 static unsigned getAVSpillRestoreOpcode(unsigned Size) { 1652 switch (Size) { 1653 case 4: 1654 return AMDGPU::SI_SPILL_AV32_RESTORE; 1655 case 8: 1656 return AMDGPU::SI_SPILL_AV64_RESTORE; 1657 case 12: 1658 return AMDGPU::SI_SPILL_AV96_RESTORE; 1659 case 16: 1660 return AMDGPU::SI_SPILL_AV128_RESTORE; 1661 case 20: 1662 return AMDGPU::SI_SPILL_AV160_RESTORE; 1663 case 24: 1664 return AMDGPU::SI_SPILL_AV192_RESTORE; 1665 case 28: 1666 return AMDGPU::SI_SPILL_AV224_RESTORE; 1667 case 32: 1668 return AMDGPU::SI_SPILL_AV256_RESTORE; 1669 case 64: 1670 return AMDGPU::SI_SPILL_AV512_RESTORE; 1671 case 128: 1672 return AMDGPU::SI_SPILL_AV1024_RESTORE; 1673 default: 1674 llvm_unreachable("unknown register size"); 1675 } 1676 } 1677 1678 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 1679 MachineBasicBlock::iterator MI, 1680 Register DestReg, int FrameIndex, 1681 const TargetRegisterClass *RC, 1682 const TargetRegisterInfo *TRI) const { 1683 MachineFunction *MF = MBB.getParent(); 1684 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1685 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 1686 const DebugLoc &DL = MBB.findDebugLoc(MI); 1687 unsigned SpillSize = TRI->getSpillSize(*RC); 1688 1689 MachinePointerInfo PtrInfo 1690 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 1691 1692 MachineMemOperand *MMO = MF->getMachineMemOperand( 1693 PtrInfo, MachineMemOperand::MOLoad, FrameInfo.getObjectSize(FrameIndex), 1694 FrameInfo.getObjectAlign(FrameIndex)); 1695 1696 if (RI.isSGPRClass(RC)) { 1697 MFI->setHasSpilledSGPRs(); 1698 assert(DestReg != AMDGPU::M0 && "m0 should not be reloaded into"); 1699 assert(DestReg != AMDGPU::EXEC_LO && DestReg != AMDGPU::EXEC_HI && 1700 DestReg != AMDGPU::EXEC && "exec should not be spilled"); 1701 1702 // FIXME: Maybe this should not include a memoperand because it will be 1703 // lowered to non-memory instructions. 1704 const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize)); 1705 if (DestReg.isVirtual() && SpillSize == 4) { 1706 MachineRegisterInfo &MRI = MF->getRegInfo(); 1707 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 1708 } 1709 1710 if (RI.spillSGPRToVGPR()) 1711 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill); 1712 BuildMI(MBB, MI, DL, OpDesc, DestReg) 1713 .addFrameIndex(FrameIndex) // addr 1714 .addMemOperand(MMO) 1715 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); 1716 1717 return; 1718 } 1719 1720 unsigned Opcode = RI.isVectorSuperClass(RC) 1721 ? getAVSpillRestoreOpcode(SpillSize) 1722 : RI.isAGPRClass(RC) ? getAGPRSpillRestoreOpcode(SpillSize) 1723 : getVGPRSpillRestoreOpcode(SpillSize); 1724 BuildMI(MBB, MI, DL, get(Opcode), DestReg) 1725 .addFrameIndex(FrameIndex) // vaddr 1726 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset 1727 .addImm(0) // offset 1728 .addMemOperand(MMO); 1729 } 1730 1731 void SIInstrInfo::insertNoop(MachineBasicBlock &MBB, 1732 MachineBasicBlock::iterator MI) const { 1733 insertNoops(MBB, MI, 1); 1734 } 1735 1736 void SIInstrInfo::insertNoops(MachineBasicBlock &MBB, 1737 MachineBasicBlock::iterator MI, 1738 unsigned Quantity) const { 1739 DebugLoc DL = MBB.findDebugLoc(MI); 1740 while (Quantity > 0) { 1741 unsigned Arg = std::min(Quantity, 8u); 1742 Quantity -= Arg; 1743 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP)).addImm(Arg - 1); 1744 } 1745 } 1746 1747 void SIInstrInfo::insertReturn(MachineBasicBlock &MBB) const { 1748 auto MF = MBB.getParent(); 1749 SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 1750 1751 assert(Info->isEntryFunction()); 1752 1753 if (MBB.succ_empty()) { 1754 bool HasNoTerminator = MBB.getFirstTerminator() == MBB.end(); 1755 if (HasNoTerminator) { 1756 if (Info->returnsVoid()) { 1757 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::S_ENDPGM)).addImm(0); 1758 } else { 1759 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::SI_RETURN_TO_EPILOG)); 1760 } 1761 } 1762 } 1763 } 1764 1765 unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) { 1766 switch (MI.getOpcode()) { 1767 default: 1768 if (MI.isMetaInstruction()) 1769 return 0; 1770 return 1; // FIXME: Do wait states equal cycles? 1771 1772 case AMDGPU::S_NOP: 1773 return MI.getOperand(0).getImm() + 1; 1774 1775 // FIXME: Any other pseudo instruction? 1776 // SI_RETURN_TO_EPILOG is a fallthrough to code outside of the function. The 1777 // hazard, even if one exist, won't really be visible. Should we handle it? 1778 case AMDGPU::SI_MASKED_UNREACHABLE: 1779 case AMDGPU::WAVE_BARRIER: 1780 case AMDGPU::SCHED_BARRIER: 1781 return 0; 1782 } 1783 } 1784 1785 bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 1786 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1787 MachineBasicBlock &MBB = *MI.getParent(); 1788 DebugLoc DL = MBB.findDebugLoc(MI); 1789 switch (MI.getOpcode()) { 1790 default: return TargetInstrInfo::expandPostRAPseudo(MI); 1791 case AMDGPU::S_MOV_B64_term: 1792 // This is only a terminator to get the correct spill code placement during 1793 // register allocation. 1794 MI.setDesc(get(AMDGPU::S_MOV_B64)); 1795 break; 1796 1797 case AMDGPU::S_MOV_B32_term: 1798 // This is only a terminator to get the correct spill code placement during 1799 // register allocation. 1800 MI.setDesc(get(AMDGPU::S_MOV_B32)); 1801 break; 1802 1803 case AMDGPU::S_XOR_B64_term: 1804 // This is only a terminator to get the correct spill code placement during 1805 // register allocation. 1806 MI.setDesc(get(AMDGPU::S_XOR_B64)); 1807 break; 1808 1809 case AMDGPU::S_XOR_B32_term: 1810 // This is only a terminator to get the correct spill code placement during 1811 // register allocation. 1812 MI.setDesc(get(AMDGPU::S_XOR_B32)); 1813 break; 1814 case AMDGPU::S_OR_B64_term: 1815 // This is only a terminator to get the correct spill code placement during 1816 // register allocation. 1817 MI.setDesc(get(AMDGPU::S_OR_B64)); 1818 break; 1819 case AMDGPU::S_OR_B32_term: 1820 // This is only a terminator to get the correct spill code placement during 1821 // register allocation. 1822 MI.setDesc(get(AMDGPU::S_OR_B32)); 1823 break; 1824 1825 case AMDGPU::S_ANDN2_B64_term: 1826 // This is only a terminator to get the correct spill code placement during 1827 // register allocation. 1828 MI.setDesc(get(AMDGPU::S_ANDN2_B64)); 1829 break; 1830 1831 case AMDGPU::S_ANDN2_B32_term: 1832 // This is only a terminator to get the correct spill code placement during 1833 // register allocation. 1834 MI.setDesc(get(AMDGPU::S_ANDN2_B32)); 1835 break; 1836 1837 case AMDGPU::S_AND_B64_term: 1838 // This is only a terminator to get the correct spill code placement during 1839 // register allocation. 1840 MI.setDesc(get(AMDGPU::S_AND_B64)); 1841 break; 1842 1843 case AMDGPU::S_AND_B32_term: 1844 // This is only a terminator to get the correct spill code placement during 1845 // register allocation. 1846 MI.setDesc(get(AMDGPU::S_AND_B32)); 1847 break; 1848 1849 case AMDGPU::V_MOV_B64_PSEUDO: { 1850 Register Dst = MI.getOperand(0).getReg(); 1851 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 1852 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 1853 1854 const MachineOperand &SrcOp = MI.getOperand(1); 1855 // FIXME: Will this work for 64-bit floating point immediates? 1856 assert(!SrcOp.isFPImm()); 1857 if (ST.hasMovB64()) { 1858 MI.setDesc(get(AMDGPU::V_MOV_B64_e32)); 1859 if (!isLiteralConstant(MI, 1) || isUInt<32>(SrcOp.getImm())) 1860 break; 1861 } 1862 if (SrcOp.isImm()) { 1863 APInt Imm(64, SrcOp.getImm()); 1864 APInt Lo(32, Imm.getLoBits(32).getZExtValue()); 1865 APInt Hi(32, Imm.getHiBits(32).getZExtValue()); 1866 if (ST.hasPackedFP32Ops() && Lo == Hi && isInlineConstant(Lo)) { 1867 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), Dst) 1868 .addImm(SISrcMods::OP_SEL_1) 1869 .addImm(Lo.getSExtValue()) 1870 .addImm(SISrcMods::OP_SEL_1) 1871 .addImm(Lo.getSExtValue()) 1872 .addImm(0) // op_sel_lo 1873 .addImm(0) // op_sel_hi 1874 .addImm(0) // neg_lo 1875 .addImm(0) // neg_hi 1876 .addImm(0); // clamp 1877 } else { 1878 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1879 .addImm(Lo.getSExtValue()) 1880 .addReg(Dst, RegState::Implicit | RegState::Define); 1881 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1882 .addImm(Hi.getSExtValue()) 1883 .addReg(Dst, RegState::Implicit | RegState::Define); 1884 } 1885 } else { 1886 assert(SrcOp.isReg()); 1887 if (ST.hasPackedFP32Ops() && 1888 !RI.isAGPR(MBB.getParent()->getRegInfo(), SrcOp.getReg())) { 1889 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), Dst) 1890 .addImm(SISrcMods::OP_SEL_1) // src0_mod 1891 .addReg(SrcOp.getReg()) 1892 .addImm(SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1) // src1_mod 1893 .addReg(SrcOp.getReg()) 1894 .addImm(0) // op_sel_lo 1895 .addImm(0) // op_sel_hi 1896 .addImm(0) // neg_lo 1897 .addImm(0) // neg_hi 1898 .addImm(0); // clamp 1899 } else { 1900 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1901 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) 1902 .addReg(Dst, RegState::Implicit | RegState::Define); 1903 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1904 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) 1905 .addReg(Dst, RegState::Implicit | RegState::Define); 1906 } 1907 } 1908 MI.eraseFromParent(); 1909 break; 1910 } 1911 case AMDGPU::V_MOV_B64_DPP_PSEUDO: { 1912 expandMovDPP64(MI); 1913 break; 1914 } 1915 case AMDGPU::S_MOV_B64_IMM_PSEUDO: { 1916 const MachineOperand &SrcOp = MI.getOperand(1); 1917 assert(!SrcOp.isFPImm()); 1918 APInt Imm(64, SrcOp.getImm()); 1919 if (Imm.isIntN(32) || isInlineConstant(Imm)) { 1920 MI.setDesc(get(AMDGPU::S_MOV_B64)); 1921 break; 1922 } 1923 1924 Register Dst = MI.getOperand(0).getReg(); 1925 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 1926 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 1927 1928 APInt Lo(32, Imm.getLoBits(32).getZExtValue()); 1929 APInt Hi(32, Imm.getHiBits(32).getZExtValue()); 1930 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DstLo) 1931 .addImm(Lo.getSExtValue()) 1932 .addReg(Dst, RegState::Implicit | RegState::Define); 1933 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DstHi) 1934 .addImm(Hi.getSExtValue()) 1935 .addReg(Dst, RegState::Implicit | RegState::Define); 1936 MI.eraseFromParent(); 1937 break; 1938 } 1939 case AMDGPU::V_SET_INACTIVE_B32: { 1940 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; 1941 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1942 // FIXME: We may possibly optimize the COPY once we find ways to make LLVM 1943 // optimizations (mainly Register Coalescer) aware of WWM register liveness. 1944 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), MI.getOperand(0).getReg()) 1945 .add(MI.getOperand(1)); 1946 auto FirstNot = BuildMI(MBB, MI, DL, get(NotOpc), Exec).addReg(Exec); 1947 FirstNot->addRegisterDead(AMDGPU::SCC, TRI); // SCC is overwritten 1948 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), MI.getOperand(0).getReg()) 1949 .add(MI.getOperand(2)); 1950 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1951 .addReg(Exec); 1952 MI.eraseFromParent(); 1953 break; 1954 } 1955 case AMDGPU::V_SET_INACTIVE_B64: { 1956 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; 1957 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1958 MachineInstr *Copy = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), 1959 MI.getOperand(0).getReg()) 1960 .add(MI.getOperand(1)); 1961 expandPostRAPseudo(*Copy); 1962 auto FirstNot = BuildMI(MBB, MI, DL, get(NotOpc), Exec).addReg(Exec); 1963 FirstNot->addRegisterDead(AMDGPU::SCC, TRI); // SCC is overwritten 1964 Copy = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), 1965 MI.getOperand(0).getReg()) 1966 .add(MI.getOperand(2)); 1967 expandPostRAPseudo(*Copy); 1968 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1969 .addReg(Exec); 1970 MI.eraseFromParent(); 1971 break; 1972 } 1973 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V1: 1974 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V2: 1975 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V3: 1976 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V4: 1977 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V5: 1978 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V8: 1979 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V16: 1980 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V32: 1981 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V1: 1982 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V2: 1983 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V3: 1984 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V4: 1985 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V5: 1986 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V8: 1987 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V16: 1988 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V32: 1989 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V1: 1990 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V2: 1991 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V4: 1992 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V8: 1993 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V16: { 1994 const TargetRegisterClass *EltRC = getOpRegClass(MI, 2); 1995 1996 unsigned Opc; 1997 if (RI.hasVGPRs(EltRC)) { 1998 Opc = AMDGPU::V_MOVRELD_B32_e32; 1999 } else { 2000 Opc = RI.getRegSizeInBits(*EltRC) == 64 ? AMDGPU::S_MOVRELD_B64 2001 : AMDGPU::S_MOVRELD_B32; 2002 } 2003 2004 const MCInstrDesc &OpDesc = get(Opc); 2005 Register VecReg = MI.getOperand(0).getReg(); 2006 bool IsUndef = MI.getOperand(1).isUndef(); 2007 unsigned SubReg = MI.getOperand(3).getImm(); 2008 assert(VecReg == MI.getOperand(1).getReg()); 2009 2010 MachineInstrBuilder MIB = 2011 BuildMI(MBB, MI, DL, OpDesc) 2012 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 2013 .add(MI.getOperand(2)) 2014 .addReg(VecReg, RegState::ImplicitDefine) 2015 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0)); 2016 2017 const int ImpDefIdx = 2018 OpDesc.getNumOperands() + OpDesc.getNumImplicitUses(); 2019 const int ImpUseIdx = ImpDefIdx + 1; 2020 MIB->tieOperands(ImpDefIdx, ImpUseIdx); 2021 MI.eraseFromParent(); 2022 break; 2023 } 2024 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V1: 2025 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V2: 2026 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V3: 2027 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V4: 2028 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V5: 2029 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V8: 2030 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V16: 2031 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V32: { 2032 assert(ST.useVGPRIndexMode()); 2033 Register VecReg = MI.getOperand(0).getReg(); 2034 bool IsUndef = MI.getOperand(1).isUndef(); 2035 Register Idx = MI.getOperand(3).getReg(); 2036 Register SubReg = MI.getOperand(4).getImm(); 2037 2038 MachineInstr *SetOn = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_ON)) 2039 .addReg(Idx) 2040 .addImm(AMDGPU::VGPRIndexMode::DST_ENABLE); 2041 SetOn->getOperand(3).setIsUndef(); 2042 2043 const MCInstrDesc &OpDesc = get(AMDGPU::V_MOV_B32_indirect_write); 2044 MachineInstrBuilder MIB = 2045 BuildMI(MBB, MI, DL, OpDesc) 2046 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 2047 .add(MI.getOperand(2)) 2048 .addReg(VecReg, RegState::ImplicitDefine) 2049 .addReg(VecReg, 2050 RegState::Implicit | (IsUndef ? RegState::Undef : 0)); 2051 2052 const int ImpDefIdx = OpDesc.getNumOperands() + OpDesc.getNumImplicitUses(); 2053 const int ImpUseIdx = ImpDefIdx + 1; 2054 MIB->tieOperands(ImpDefIdx, ImpUseIdx); 2055 2056 MachineInstr *SetOff = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_OFF)); 2057 2058 finalizeBundle(MBB, SetOn->getIterator(), std::next(SetOff->getIterator())); 2059 2060 MI.eraseFromParent(); 2061 break; 2062 } 2063 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V1: 2064 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V2: 2065 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V3: 2066 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V4: 2067 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V5: 2068 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V8: 2069 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V16: 2070 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V32: { 2071 assert(ST.useVGPRIndexMode()); 2072 Register Dst = MI.getOperand(0).getReg(); 2073 Register VecReg = MI.getOperand(1).getReg(); 2074 bool IsUndef = MI.getOperand(1).isUndef(); 2075 Register Idx = MI.getOperand(2).getReg(); 2076 Register SubReg = MI.getOperand(3).getImm(); 2077 2078 MachineInstr *SetOn = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_ON)) 2079 .addReg(Idx) 2080 .addImm(AMDGPU::VGPRIndexMode::SRC0_ENABLE); 2081 SetOn->getOperand(3).setIsUndef(); 2082 2083 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_indirect_read)) 2084 .addDef(Dst) 2085 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 2086 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0)); 2087 2088 MachineInstr *SetOff = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_OFF)); 2089 2090 finalizeBundle(MBB, SetOn->getIterator(), std::next(SetOff->getIterator())); 2091 2092 MI.eraseFromParent(); 2093 break; 2094 } 2095 case AMDGPU::SI_PC_ADD_REL_OFFSET: { 2096 MachineFunction &MF = *MBB.getParent(); 2097 Register Reg = MI.getOperand(0).getReg(); 2098 Register RegLo = RI.getSubReg(Reg, AMDGPU::sub0); 2099 Register RegHi = RI.getSubReg(Reg, AMDGPU::sub1); 2100 2101 // Create a bundle so these instructions won't be re-ordered by the 2102 // post-RA scheduler. 2103 MIBundleBuilder Bundler(MBB, MI); 2104 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg)); 2105 2106 // Add 32-bit offset from this instruction to the start of the 2107 // constant data. 2108 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo) 2109 .addReg(RegLo) 2110 .add(MI.getOperand(1))); 2111 2112 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi) 2113 .addReg(RegHi); 2114 MIB.add(MI.getOperand(2)); 2115 2116 Bundler.append(MIB); 2117 finalizeBundle(MBB, Bundler.begin()); 2118 2119 MI.eraseFromParent(); 2120 break; 2121 } 2122 case AMDGPU::ENTER_STRICT_WWM: { 2123 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 2124 // Whole Wave Mode is entered. 2125 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 2126 : AMDGPU::S_OR_SAVEEXEC_B64)); 2127 break; 2128 } 2129 case AMDGPU::ENTER_STRICT_WQM: { 2130 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 2131 // STRICT_WQM is entered. 2132 const unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 2133 const unsigned WQMOp = ST.isWave32() ? AMDGPU::S_WQM_B32 : AMDGPU::S_WQM_B64; 2134 const unsigned MovOp = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 2135 BuildMI(MBB, MI, DL, get(MovOp), MI.getOperand(0).getReg()).addReg(Exec); 2136 BuildMI(MBB, MI, DL, get(WQMOp), Exec).addReg(Exec); 2137 2138 MI.eraseFromParent(); 2139 break; 2140 } 2141 case AMDGPU::EXIT_STRICT_WWM: 2142 case AMDGPU::EXIT_STRICT_WQM: { 2143 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 2144 // WWM/STICT_WQM is exited. 2145 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64)); 2146 break; 2147 } 2148 case AMDGPU::SI_RETURN: { 2149 const MachineFunction *MF = MBB.getParent(); 2150 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 2151 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 2152 // Hiding the return address use with SI_RETURN may lead to extra kills in 2153 // the function and missing live-ins. We are fine in practice because callee 2154 // saved register handling ensures the register value is restored before 2155 // RET, but we need the undef flag here to appease the MachineVerifier 2156 // liveness checks. 2157 MachineInstrBuilder MIB = 2158 BuildMI(MBB, MI, DL, get(AMDGPU::S_SETPC_B64_return)) 2159 .addReg(TRI->getReturnAddressReg(*MF), RegState::Undef); 2160 2161 MIB.copyImplicitOps(MI); 2162 MI.eraseFromParent(); 2163 break; 2164 } 2165 } 2166 return true; 2167 } 2168 2169 std::pair<MachineInstr*, MachineInstr*> 2170 SIInstrInfo::expandMovDPP64(MachineInstr &MI) const { 2171 assert (MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO); 2172 2173 if (ST.hasMovB64() && 2174 AMDGPU::isLegal64BitDPPControl( 2175 getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl)->getImm())) { 2176 MI.setDesc(get(AMDGPU::V_MOV_B64_dpp)); 2177 return std::make_pair(&MI, nullptr); 2178 } 2179 2180 MachineBasicBlock &MBB = *MI.getParent(); 2181 DebugLoc DL = MBB.findDebugLoc(MI); 2182 MachineFunction *MF = MBB.getParent(); 2183 MachineRegisterInfo &MRI = MF->getRegInfo(); 2184 Register Dst = MI.getOperand(0).getReg(); 2185 unsigned Part = 0; 2186 MachineInstr *Split[2]; 2187 2188 for (auto Sub : { AMDGPU::sub0, AMDGPU::sub1 }) { 2189 auto MovDPP = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_dpp)); 2190 if (Dst.isPhysical()) { 2191 MovDPP.addDef(RI.getSubReg(Dst, Sub)); 2192 } else { 2193 assert(MRI.isSSA()); 2194 auto Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2195 MovDPP.addDef(Tmp); 2196 } 2197 2198 for (unsigned I = 1; I <= 2; ++I) { // old and src operands. 2199 const MachineOperand &SrcOp = MI.getOperand(I); 2200 assert(!SrcOp.isFPImm()); 2201 if (SrcOp.isImm()) { 2202 APInt Imm(64, SrcOp.getImm()); 2203 Imm.ashrInPlace(Part * 32); 2204 MovDPP.addImm(Imm.getLoBits(32).getZExtValue()); 2205 } else { 2206 assert(SrcOp.isReg()); 2207 Register Src = SrcOp.getReg(); 2208 if (Src.isPhysical()) 2209 MovDPP.addReg(RI.getSubReg(Src, Sub)); 2210 else 2211 MovDPP.addReg(Src, SrcOp.isUndef() ? RegState::Undef : 0, Sub); 2212 } 2213 } 2214 2215 for (unsigned I = 3; I < MI.getNumExplicitOperands(); ++I) 2216 MovDPP.addImm(MI.getOperand(I).getImm()); 2217 2218 Split[Part] = MovDPP; 2219 ++Part; 2220 } 2221 2222 if (Dst.isVirtual()) 2223 BuildMI(MBB, MI, DL, get(AMDGPU::REG_SEQUENCE), Dst) 2224 .addReg(Split[0]->getOperand(0).getReg()) 2225 .addImm(AMDGPU::sub0) 2226 .addReg(Split[1]->getOperand(0).getReg()) 2227 .addImm(AMDGPU::sub1); 2228 2229 MI.eraseFromParent(); 2230 return std::make_pair(Split[0], Split[1]); 2231 } 2232 2233 bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI, 2234 MachineOperand &Src0, 2235 unsigned Src0OpName, 2236 MachineOperand &Src1, 2237 unsigned Src1OpName) const { 2238 MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName); 2239 if (!Src0Mods) 2240 return false; 2241 2242 MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName); 2243 assert(Src1Mods && 2244 "All commutable instructions have both src0 and src1 modifiers"); 2245 2246 int Src0ModsVal = Src0Mods->getImm(); 2247 int Src1ModsVal = Src1Mods->getImm(); 2248 2249 Src1Mods->setImm(Src0ModsVal); 2250 Src0Mods->setImm(Src1ModsVal); 2251 return true; 2252 } 2253 2254 static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI, 2255 MachineOperand &RegOp, 2256 MachineOperand &NonRegOp) { 2257 Register Reg = RegOp.getReg(); 2258 unsigned SubReg = RegOp.getSubReg(); 2259 bool IsKill = RegOp.isKill(); 2260 bool IsDead = RegOp.isDead(); 2261 bool IsUndef = RegOp.isUndef(); 2262 bool IsDebug = RegOp.isDebug(); 2263 2264 if (NonRegOp.isImm()) 2265 RegOp.ChangeToImmediate(NonRegOp.getImm()); 2266 else if (NonRegOp.isFI()) 2267 RegOp.ChangeToFrameIndex(NonRegOp.getIndex()); 2268 else if (NonRegOp.isGlobal()) { 2269 RegOp.ChangeToGA(NonRegOp.getGlobal(), NonRegOp.getOffset(), 2270 NonRegOp.getTargetFlags()); 2271 } else 2272 return nullptr; 2273 2274 // Make sure we don't reinterpret a subreg index in the target flags. 2275 RegOp.setTargetFlags(NonRegOp.getTargetFlags()); 2276 2277 NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug); 2278 NonRegOp.setSubReg(SubReg); 2279 2280 return &MI; 2281 } 2282 2283 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, 2284 unsigned Src0Idx, 2285 unsigned Src1Idx) const { 2286 assert(!NewMI && "this should never be used"); 2287 2288 unsigned Opc = MI.getOpcode(); 2289 int CommutedOpcode = commuteOpcode(Opc); 2290 if (CommutedOpcode == -1) 2291 return nullptr; 2292 2293 assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == 2294 static_cast<int>(Src0Idx) && 2295 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == 2296 static_cast<int>(Src1Idx) && 2297 "inconsistency with findCommutedOpIndices"); 2298 2299 MachineOperand &Src0 = MI.getOperand(Src0Idx); 2300 MachineOperand &Src1 = MI.getOperand(Src1Idx); 2301 2302 MachineInstr *CommutedMI = nullptr; 2303 if (Src0.isReg() && Src1.isReg()) { 2304 if (isOperandLegal(MI, Src1Idx, &Src0)) { 2305 // Be sure to copy the source modifiers to the right place. 2306 CommutedMI 2307 = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx); 2308 } 2309 2310 } else if (Src0.isReg() && !Src1.isReg()) { 2311 // src0 should always be able to support any operand type, so no need to 2312 // check operand legality. 2313 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1); 2314 } else if (!Src0.isReg() && Src1.isReg()) { 2315 if (isOperandLegal(MI, Src1Idx, &Src0)) 2316 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0); 2317 } else { 2318 // FIXME: Found two non registers to commute. This does happen. 2319 return nullptr; 2320 } 2321 2322 if (CommutedMI) { 2323 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers, 2324 Src1, AMDGPU::OpName::src1_modifiers); 2325 2326 CommutedMI->setDesc(get(CommutedOpcode)); 2327 } 2328 2329 return CommutedMI; 2330 } 2331 2332 // This needs to be implemented because the source modifiers may be inserted 2333 // between the true commutable operands, and the base 2334 // TargetInstrInfo::commuteInstruction uses it. 2335 bool SIInstrInfo::findCommutedOpIndices(const MachineInstr &MI, 2336 unsigned &SrcOpIdx0, 2337 unsigned &SrcOpIdx1) const { 2338 return findCommutedOpIndices(MI.getDesc(), SrcOpIdx0, SrcOpIdx1); 2339 } 2340 2341 bool SIInstrInfo::findCommutedOpIndices(MCInstrDesc Desc, unsigned &SrcOpIdx0, 2342 unsigned &SrcOpIdx1) const { 2343 if (!Desc.isCommutable()) 2344 return false; 2345 2346 unsigned Opc = Desc.getOpcode(); 2347 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 2348 if (Src0Idx == -1) 2349 return false; 2350 2351 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 2352 if (Src1Idx == -1) 2353 return false; 2354 2355 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx); 2356 } 2357 2358 bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp, 2359 int64_t BrOffset) const { 2360 // BranchRelaxation should never have to check s_setpc_b64 because its dest 2361 // block is unanalyzable. 2362 assert(BranchOp != AMDGPU::S_SETPC_B64); 2363 2364 // Convert to dwords. 2365 BrOffset /= 4; 2366 2367 // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is 2368 // from the next instruction. 2369 BrOffset -= 1; 2370 2371 return isIntN(BranchOffsetBits, BrOffset); 2372 } 2373 2374 MachineBasicBlock *SIInstrInfo::getBranchDestBlock( 2375 const MachineInstr &MI) const { 2376 if (MI.getOpcode() == AMDGPU::S_SETPC_B64) { 2377 // This would be a difficult analysis to perform, but can always be legal so 2378 // there's no need to analyze it. 2379 return nullptr; 2380 } 2381 2382 return MI.getOperand(0).getMBB(); 2383 } 2384 2385 void SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB, 2386 MachineBasicBlock &DestBB, 2387 MachineBasicBlock &RestoreBB, 2388 const DebugLoc &DL, int64_t BrOffset, 2389 RegScavenger *RS) const { 2390 assert(RS && "RegScavenger required for long branching"); 2391 assert(MBB.empty() && 2392 "new block should be inserted for expanding unconditional branch"); 2393 assert(MBB.pred_size() == 1); 2394 assert(RestoreBB.empty() && 2395 "restore block should be inserted for restoring clobbered registers"); 2396 2397 MachineFunction *MF = MBB.getParent(); 2398 MachineRegisterInfo &MRI = MF->getRegInfo(); 2399 2400 // FIXME: Virtual register workaround for RegScavenger not working with empty 2401 // blocks. 2402 Register PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 2403 2404 auto I = MBB.end(); 2405 2406 // We need to compute the offset relative to the instruction immediately after 2407 // s_getpc_b64. Insert pc arithmetic code before last terminator. 2408 MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg); 2409 2410 auto &MCCtx = MF->getContext(); 2411 MCSymbol *PostGetPCLabel = 2412 MCCtx.createTempSymbol("post_getpc", /*AlwaysAddSuffix=*/true); 2413 GetPC->setPostInstrSymbol(*MF, PostGetPCLabel); 2414 2415 MCSymbol *OffsetLo = 2416 MCCtx.createTempSymbol("offset_lo", /*AlwaysAddSuffix=*/true); 2417 MCSymbol *OffsetHi = 2418 MCCtx.createTempSymbol("offset_hi", /*AlwaysAddSuffix=*/true); 2419 BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32)) 2420 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 2421 .addReg(PCReg, 0, AMDGPU::sub0) 2422 .addSym(OffsetLo, MO_FAR_BRANCH_OFFSET); 2423 BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32)) 2424 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 2425 .addReg(PCReg, 0, AMDGPU::sub1) 2426 .addSym(OffsetHi, MO_FAR_BRANCH_OFFSET); 2427 2428 // Insert the indirect branch after the other terminator. 2429 BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64)) 2430 .addReg(PCReg); 2431 2432 // FIXME: If spilling is necessary, this will fail because this scavenger has 2433 // no emergency stack slots. It is non-trivial to spill in this situation, 2434 // because the restore code needs to be specially placed after the 2435 // jump. BranchRelaxation then needs to be made aware of the newly inserted 2436 // block. 2437 // 2438 // If a spill is needed for the pc register pair, we need to insert a spill 2439 // restore block right before the destination block, and insert a short branch 2440 // into the old destination block's fallthrough predecessor. 2441 // e.g.: 2442 // 2443 // s_cbranch_scc0 skip_long_branch: 2444 // 2445 // long_branch_bb: 2446 // spill s[8:9] 2447 // s_getpc_b64 s[8:9] 2448 // s_add_u32 s8, s8, restore_bb 2449 // s_addc_u32 s9, s9, 0 2450 // s_setpc_b64 s[8:9] 2451 // 2452 // skip_long_branch: 2453 // foo; 2454 // 2455 // ..... 2456 // 2457 // dest_bb_fallthrough_predecessor: 2458 // bar; 2459 // s_branch dest_bb 2460 // 2461 // restore_bb: 2462 // restore s[8:9] 2463 // fallthrough dest_bb 2464 /// 2465 // dest_bb: 2466 // buzz; 2467 2468 RS->enterBasicBlockEnd(MBB); 2469 Register Scav = RS->scavengeRegisterBackwards( 2470 AMDGPU::SReg_64RegClass, MachineBasicBlock::iterator(GetPC), 2471 /* RestoreAfter */ false, 0, /* AllowSpill */ false); 2472 if (Scav) { 2473 RS->setRegUsed(Scav); 2474 MRI.replaceRegWith(PCReg, Scav); 2475 MRI.clearVirtRegs(); 2476 } else { 2477 // As SGPR needs VGPR to be spilled, we reuse the slot of temporary VGPR for 2478 // SGPR spill. 2479 const GCNSubtarget &ST = MF->getSubtarget<GCNSubtarget>(); 2480 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 2481 TRI->spillEmergencySGPR(GetPC, RestoreBB, AMDGPU::SGPR0_SGPR1, RS); 2482 MRI.replaceRegWith(PCReg, AMDGPU::SGPR0_SGPR1); 2483 MRI.clearVirtRegs(); 2484 } 2485 2486 MCSymbol *DestLabel = Scav ? DestBB.getSymbol() : RestoreBB.getSymbol(); 2487 // Now, the distance could be defined. 2488 auto *Offset = MCBinaryExpr::createSub( 2489 MCSymbolRefExpr::create(DestLabel, MCCtx), 2490 MCSymbolRefExpr::create(PostGetPCLabel, MCCtx), MCCtx); 2491 // Add offset assignments. 2492 auto *Mask = MCConstantExpr::create(0xFFFFFFFFULL, MCCtx); 2493 OffsetLo->setVariableValue(MCBinaryExpr::createAnd(Offset, Mask, MCCtx)); 2494 auto *ShAmt = MCConstantExpr::create(32, MCCtx); 2495 OffsetHi->setVariableValue(MCBinaryExpr::createAShr(Offset, ShAmt, MCCtx)); 2496 } 2497 2498 unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) { 2499 switch (Cond) { 2500 case SIInstrInfo::SCC_TRUE: 2501 return AMDGPU::S_CBRANCH_SCC1; 2502 case SIInstrInfo::SCC_FALSE: 2503 return AMDGPU::S_CBRANCH_SCC0; 2504 case SIInstrInfo::VCCNZ: 2505 return AMDGPU::S_CBRANCH_VCCNZ; 2506 case SIInstrInfo::VCCZ: 2507 return AMDGPU::S_CBRANCH_VCCZ; 2508 case SIInstrInfo::EXECNZ: 2509 return AMDGPU::S_CBRANCH_EXECNZ; 2510 case SIInstrInfo::EXECZ: 2511 return AMDGPU::S_CBRANCH_EXECZ; 2512 default: 2513 llvm_unreachable("invalid branch predicate"); 2514 } 2515 } 2516 2517 SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) { 2518 switch (Opcode) { 2519 case AMDGPU::S_CBRANCH_SCC0: 2520 return SCC_FALSE; 2521 case AMDGPU::S_CBRANCH_SCC1: 2522 return SCC_TRUE; 2523 case AMDGPU::S_CBRANCH_VCCNZ: 2524 return VCCNZ; 2525 case AMDGPU::S_CBRANCH_VCCZ: 2526 return VCCZ; 2527 case AMDGPU::S_CBRANCH_EXECNZ: 2528 return EXECNZ; 2529 case AMDGPU::S_CBRANCH_EXECZ: 2530 return EXECZ; 2531 default: 2532 return INVALID_BR; 2533 } 2534 } 2535 2536 bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB, 2537 MachineBasicBlock::iterator I, 2538 MachineBasicBlock *&TBB, 2539 MachineBasicBlock *&FBB, 2540 SmallVectorImpl<MachineOperand> &Cond, 2541 bool AllowModify) const { 2542 if (I->getOpcode() == AMDGPU::S_BRANCH) { 2543 // Unconditional Branch 2544 TBB = I->getOperand(0).getMBB(); 2545 return false; 2546 } 2547 2548 MachineBasicBlock *CondBB = nullptr; 2549 2550 if (I->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 2551 CondBB = I->getOperand(1).getMBB(); 2552 Cond.push_back(I->getOperand(0)); 2553 } else { 2554 BranchPredicate Pred = getBranchPredicate(I->getOpcode()); 2555 if (Pred == INVALID_BR) 2556 return true; 2557 2558 CondBB = I->getOperand(0).getMBB(); 2559 Cond.push_back(MachineOperand::CreateImm(Pred)); 2560 Cond.push_back(I->getOperand(1)); // Save the branch register. 2561 } 2562 ++I; 2563 2564 if (I == MBB.end()) { 2565 // Conditional branch followed by fall-through. 2566 TBB = CondBB; 2567 return false; 2568 } 2569 2570 if (I->getOpcode() == AMDGPU::S_BRANCH) { 2571 TBB = CondBB; 2572 FBB = I->getOperand(0).getMBB(); 2573 return false; 2574 } 2575 2576 return true; 2577 } 2578 2579 bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 2580 MachineBasicBlock *&FBB, 2581 SmallVectorImpl<MachineOperand> &Cond, 2582 bool AllowModify) const { 2583 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 2584 auto E = MBB.end(); 2585 if (I == E) 2586 return false; 2587 2588 // Skip over the instructions that are artificially terminators for special 2589 // exec management. 2590 while (I != E && !I->isBranch() && !I->isReturn()) { 2591 switch (I->getOpcode()) { 2592 case AMDGPU::S_MOV_B64_term: 2593 case AMDGPU::S_XOR_B64_term: 2594 case AMDGPU::S_OR_B64_term: 2595 case AMDGPU::S_ANDN2_B64_term: 2596 case AMDGPU::S_AND_B64_term: 2597 case AMDGPU::S_MOV_B32_term: 2598 case AMDGPU::S_XOR_B32_term: 2599 case AMDGPU::S_OR_B32_term: 2600 case AMDGPU::S_ANDN2_B32_term: 2601 case AMDGPU::S_AND_B32_term: 2602 break; 2603 case AMDGPU::SI_IF: 2604 case AMDGPU::SI_ELSE: 2605 case AMDGPU::SI_KILL_I1_TERMINATOR: 2606 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 2607 // FIXME: It's messy that these need to be considered here at all. 2608 return true; 2609 default: 2610 llvm_unreachable("unexpected non-branch terminator inst"); 2611 } 2612 2613 ++I; 2614 } 2615 2616 if (I == E) 2617 return false; 2618 2619 return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify); 2620 } 2621 2622 unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB, 2623 int *BytesRemoved) const { 2624 unsigned Count = 0; 2625 unsigned RemovedSize = 0; 2626 for (MachineInstr &MI : llvm::make_early_inc_range(MBB.terminators())) { 2627 // Skip over artificial terminators when removing instructions. 2628 if (MI.isBranch() || MI.isReturn()) { 2629 RemovedSize += getInstSizeInBytes(MI); 2630 MI.eraseFromParent(); 2631 ++Count; 2632 } 2633 } 2634 2635 if (BytesRemoved) 2636 *BytesRemoved = RemovedSize; 2637 2638 return Count; 2639 } 2640 2641 // Copy the flags onto the implicit condition register operand. 2642 static void preserveCondRegFlags(MachineOperand &CondReg, 2643 const MachineOperand &OrigCond) { 2644 CondReg.setIsUndef(OrigCond.isUndef()); 2645 CondReg.setIsKill(OrigCond.isKill()); 2646 } 2647 2648 unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB, 2649 MachineBasicBlock *TBB, 2650 MachineBasicBlock *FBB, 2651 ArrayRef<MachineOperand> Cond, 2652 const DebugLoc &DL, 2653 int *BytesAdded) const { 2654 if (!FBB && Cond.empty()) { 2655 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 2656 .addMBB(TBB); 2657 if (BytesAdded) 2658 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4; 2659 return 1; 2660 } 2661 2662 if(Cond.size() == 1 && Cond[0].isReg()) { 2663 BuildMI(&MBB, DL, get(AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO)) 2664 .add(Cond[0]) 2665 .addMBB(TBB); 2666 return 1; 2667 } 2668 2669 assert(TBB && Cond[0].isImm()); 2670 2671 unsigned Opcode 2672 = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm())); 2673 2674 if (!FBB) { 2675 Cond[1].isUndef(); 2676 MachineInstr *CondBr = 2677 BuildMI(&MBB, DL, get(Opcode)) 2678 .addMBB(TBB); 2679 2680 // Copy the flags onto the implicit condition register operand. 2681 preserveCondRegFlags(CondBr->getOperand(1), Cond[1]); 2682 fixImplicitOperands(*CondBr); 2683 2684 if (BytesAdded) 2685 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4; 2686 return 1; 2687 } 2688 2689 assert(TBB && FBB); 2690 2691 MachineInstr *CondBr = 2692 BuildMI(&MBB, DL, get(Opcode)) 2693 .addMBB(TBB); 2694 fixImplicitOperands(*CondBr); 2695 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 2696 .addMBB(FBB); 2697 2698 MachineOperand &CondReg = CondBr->getOperand(1); 2699 CondReg.setIsUndef(Cond[1].isUndef()); 2700 CondReg.setIsKill(Cond[1].isKill()); 2701 2702 if (BytesAdded) 2703 *BytesAdded = ST.hasOffset3fBug() ? 16 : 8; 2704 2705 return 2; 2706 } 2707 2708 bool SIInstrInfo::reverseBranchCondition( 2709 SmallVectorImpl<MachineOperand> &Cond) const { 2710 if (Cond.size() != 2) { 2711 return true; 2712 } 2713 2714 if (Cond[0].isImm()) { 2715 Cond[0].setImm(-Cond[0].getImm()); 2716 return false; 2717 } 2718 2719 return true; 2720 } 2721 2722 bool SIInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, 2723 ArrayRef<MachineOperand> Cond, 2724 Register DstReg, Register TrueReg, 2725 Register FalseReg, int &CondCycles, 2726 int &TrueCycles, int &FalseCycles) const { 2727 switch (Cond[0].getImm()) { 2728 case VCCNZ: 2729 case VCCZ: { 2730 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2731 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 2732 if (MRI.getRegClass(FalseReg) != RC) 2733 return false; 2734 2735 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 2736 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 2737 2738 // Limit to equal cost for branch vs. N v_cndmask_b32s. 2739 return RI.hasVGPRs(RC) && NumInsts <= 6; 2740 } 2741 case SCC_TRUE: 2742 case SCC_FALSE: { 2743 // FIXME: We could insert for VGPRs if we could replace the original compare 2744 // with a vector one. 2745 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2746 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 2747 if (MRI.getRegClass(FalseReg) != RC) 2748 return false; 2749 2750 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 2751 2752 // Multiples of 8 can do s_cselect_b64 2753 if (NumInsts % 2 == 0) 2754 NumInsts /= 2; 2755 2756 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 2757 return RI.isSGPRClass(RC); 2758 } 2759 default: 2760 return false; 2761 } 2762 } 2763 2764 void SIInstrInfo::insertSelect(MachineBasicBlock &MBB, 2765 MachineBasicBlock::iterator I, const DebugLoc &DL, 2766 Register DstReg, ArrayRef<MachineOperand> Cond, 2767 Register TrueReg, Register FalseReg) const { 2768 BranchPredicate Pred = static_cast<BranchPredicate>(Cond[0].getImm()); 2769 if (Pred == VCCZ || Pred == SCC_FALSE) { 2770 Pred = static_cast<BranchPredicate>(-Pred); 2771 std::swap(TrueReg, FalseReg); 2772 } 2773 2774 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2775 const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg); 2776 unsigned DstSize = RI.getRegSizeInBits(*DstRC); 2777 2778 if (DstSize == 32) { 2779 MachineInstr *Select; 2780 if (Pred == SCC_TRUE) { 2781 Select = BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B32), DstReg) 2782 .addReg(TrueReg) 2783 .addReg(FalseReg); 2784 } else { 2785 // Instruction's operands are backwards from what is expected. 2786 Select = BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e32), DstReg) 2787 .addReg(FalseReg) 2788 .addReg(TrueReg); 2789 } 2790 2791 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2792 return; 2793 } 2794 2795 if (DstSize == 64 && Pred == SCC_TRUE) { 2796 MachineInstr *Select = 2797 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg) 2798 .addReg(TrueReg) 2799 .addReg(FalseReg); 2800 2801 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2802 return; 2803 } 2804 2805 static const int16_t Sub0_15[] = { 2806 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 2807 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 2808 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, 2809 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 2810 }; 2811 2812 static const int16_t Sub0_15_64[] = { 2813 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, 2814 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7, 2815 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11, 2816 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15, 2817 }; 2818 2819 unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32; 2820 const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass; 2821 const int16_t *SubIndices = Sub0_15; 2822 int NElts = DstSize / 32; 2823 2824 // 64-bit select is only available for SALU. 2825 // TODO: Split 96-bit into 64-bit and 32-bit, not 3x 32-bit. 2826 if (Pred == SCC_TRUE) { 2827 if (NElts % 2) { 2828 SelOp = AMDGPU::S_CSELECT_B32; 2829 EltRC = &AMDGPU::SGPR_32RegClass; 2830 } else { 2831 SelOp = AMDGPU::S_CSELECT_B64; 2832 EltRC = &AMDGPU::SGPR_64RegClass; 2833 SubIndices = Sub0_15_64; 2834 NElts /= 2; 2835 } 2836 } 2837 2838 MachineInstrBuilder MIB = BuildMI( 2839 MBB, I, DL, get(AMDGPU::REG_SEQUENCE), DstReg); 2840 2841 I = MIB->getIterator(); 2842 2843 SmallVector<Register, 8> Regs; 2844 for (int Idx = 0; Idx != NElts; ++Idx) { 2845 Register DstElt = MRI.createVirtualRegister(EltRC); 2846 Regs.push_back(DstElt); 2847 2848 unsigned SubIdx = SubIndices[Idx]; 2849 2850 MachineInstr *Select; 2851 if (SelOp == AMDGPU::V_CNDMASK_B32_e32) { 2852 Select = 2853 BuildMI(MBB, I, DL, get(SelOp), DstElt) 2854 .addReg(FalseReg, 0, SubIdx) 2855 .addReg(TrueReg, 0, SubIdx); 2856 } else { 2857 Select = 2858 BuildMI(MBB, I, DL, get(SelOp), DstElt) 2859 .addReg(TrueReg, 0, SubIdx) 2860 .addReg(FalseReg, 0, SubIdx); 2861 } 2862 2863 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2864 fixImplicitOperands(*Select); 2865 2866 MIB.addReg(DstElt) 2867 .addImm(SubIdx); 2868 } 2869 } 2870 2871 bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) { 2872 switch (MI.getOpcode()) { 2873 case AMDGPU::V_MOV_B32_e32: 2874 case AMDGPU::V_MOV_B32_e64: 2875 case AMDGPU::V_MOV_B64_PSEUDO: 2876 case AMDGPU::V_MOV_B64_e32: 2877 case AMDGPU::V_MOV_B64_e64: 2878 case AMDGPU::S_MOV_B32: 2879 case AMDGPU::S_MOV_B64: 2880 case AMDGPU::COPY: 2881 case AMDGPU::V_ACCVGPR_WRITE_B32_e64: 2882 case AMDGPU::V_ACCVGPR_READ_B32_e64: 2883 case AMDGPU::V_ACCVGPR_MOV_B32: 2884 return true; 2885 default: 2886 return false; 2887 } 2888 } 2889 2890 unsigned SIInstrInfo::getAddressSpaceForPseudoSourceKind( 2891 unsigned Kind) const { 2892 switch(Kind) { 2893 case PseudoSourceValue::Stack: 2894 case PseudoSourceValue::FixedStack: 2895 return AMDGPUAS::PRIVATE_ADDRESS; 2896 case PseudoSourceValue::ConstantPool: 2897 case PseudoSourceValue::GOT: 2898 case PseudoSourceValue::JumpTable: 2899 case PseudoSourceValue::GlobalValueCallEntry: 2900 case PseudoSourceValue::ExternalSymbolCallEntry: 2901 case PseudoSourceValue::TargetCustom: 2902 return AMDGPUAS::CONSTANT_ADDRESS; 2903 } 2904 return AMDGPUAS::FLAT_ADDRESS; 2905 } 2906 2907 static constexpr unsigned ModifierOpNames[] = { 2908 AMDGPU::OpName::src0_modifiers, AMDGPU::OpName::src1_modifiers, 2909 AMDGPU::OpName::src2_modifiers, AMDGPU::OpName::clamp, 2910 AMDGPU::OpName::omod}; 2911 2912 void SIInstrInfo::removeModOperands(MachineInstr &MI) const { 2913 unsigned Opc = MI.getOpcode(); 2914 for (unsigned Name : reverse(ModifierOpNames)) 2915 MI.removeOperand(AMDGPU::getNamedOperandIdx(Opc, Name)); 2916 } 2917 2918 bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, 2919 Register Reg, MachineRegisterInfo *MRI) const { 2920 if (!MRI->hasOneNonDBGUse(Reg)) 2921 return false; 2922 2923 switch (DefMI.getOpcode()) { 2924 default: 2925 return false; 2926 case AMDGPU::S_MOV_B64: 2927 // TODO: We could fold 64-bit immediates, but this get complicated 2928 // when there are sub-registers. 2929 return false; 2930 2931 case AMDGPU::V_MOV_B32_e32: 2932 case AMDGPU::S_MOV_B32: 2933 case AMDGPU::V_ACCVGPR_WRITE_B32_e64: 2934 break; 2935 } 2936 2937 const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0); 2938 assert(ImmOp); 2939 // FIXME: We could handle FrameIndex values here. 2940 if (!ImmOp->isImm()) 2941 return false; 2942 2943 unsigned Opc = UseMI.getOpcode(); 2944 if (Opc == AMDGPU::COPY) { 2945 Register DstReg = UseMI.getOperand(0).getReg(); 2946 bool Is16Bit = getOpSize(UseMI, 0) == 2; 2947 bool isVGPRCopy = RI.isVGPR(*MRI, DstReg); 2948 unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32; 2949 APInt Imm(32, ImmOp->getImm()); 2950 2951 if (UseMI.getOperand(1).getSubReg() == AMDGPU::hi16) 2952 Imm = Imm.ashr(16); 2953 2954 if (RI.isAGPR(*MRI, DstReg)) { 2955 if (!isInlineConstant(Imm)) 2956 return false; 2957 NewOpc = AMDGPU::V_ACCVGPR_WRITE_B32_e64; 2958 } 2959 2960 if (Is16Bit) { 2961 if (isVGPRCopy) 2962 return false; // Do not clobber vgpr_hi16 2963 2964 if (DstReg.isVirtual() && UseMI.getOperand(0).getSubReg() != AMDGPU::lo16) 2965 return false; 2966 2967 UseMI.getOperand(0).setSubReg(0); 2968 if (DstReg.isPhysical()) { 2969 DstReg = RI.get32BitRegister(DstReg); 2970 UseMI.getOperand(0).setReg(DstReg); 2971 } 2972 assert(UseMI.getOperand(1).getReg().isVirtual()); 2973 } 2974 2975 UseMI.setDesc(get(NewOpc)); 2976 UseMI.getOperand(1).ChangeToImmediate(Imm.getSExtValue()); 2977 UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent()); 2978 return true; 2979 } 2980 2981 if (Opc == AMDGPU::V_MAD_F32_e64 || Opc == AMDGPU::V_MAC_F32_e64 || 2982 Opc == AMDGPU::V_MAD_F16_e64 || Opc == AMDGPU::V_MAC_F16_e64 || 2983 Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64 || 2984 Opc == AMDGPU::V_FMA_F16_e64 || Opc == AMDGPU::V_FMAC_F16_e64) { 2985 // Don't fold if we are using source or output modifiers. The new VOP2 2986 // instructions don't have them. 2987 if (hasAnyModifiersSet(UseMI)) 2988 return false; 2989 2990 // If this is a free constant, there's no reason to do this. 2991 // TODO: We could fold this here instead of letting SIFoldOperands do it 2992 // later. 2993 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0); 2994 2995 // Any src operand can be used for the legality check. 2996 if (isInlineConstant(UseMI, *Src0, *ImmOp)) 2997 return false; 2998 2999 bool IsF32 = Opc == AMDGPU::V_MAD_F32_e64 || Opc == AMDGPU::V_MAC_F32_e64 || 3000 Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64; 3001 bool IsFMA = Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64 || 3002 Opc == AMDGPU::V_FMA_F16_e64 || Opc == AMDGPU::V_FMAC_F16_e64; 3003 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1); 3004 MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2); 3005 3006 // Multiplied part is the constant: Use v_madmk_{f16, f32}. 3007 // We should only expect these to be on src0 due to canonicalization. 3008 if (Src0->isReg() && Src0->getReg() == Reg) { 3009 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) 3010 return false; 3011 3012 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg()))) 3013 return false; 3014 3015 unsigned NewOpc = 3016 IsFMA ? (IsF32 ? AMDGPU::V_FMAMK_F32 : AMDGPU::V_FMAMK_F16) 3017 : (IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16); 3018 if (pseudoToMCOpcode(NewOpc) == -1) 3019 return false; 3020 3021 // We need to swap operands 0 and 1 since madmk constant is at operand 1. 3022 3023 const int64_t Imm = ImmOp->getImm(); 3024 3025 // FIXME: This would be a lot easier if we could return a new instruction 3026 // instead of having to modify in place. 3027 3028 Register Src1Reg = Src1->getReg(); 3029 unsigned Src1SubReg = Src1->getSubReg(); 3030 Src0->setReg(Src1Reg); 3031 Src0->setSubReg(Src1SubReg); 3032 Src0->setIsKill(Src1->isKill()); 3033 3034 if (Opc == AMDGPU::V_MAC_F32_e64 || 3035 Opc == AMDGPU::V_MAC_F16_e64 || 3036 Opc == AMDGPU::V_FMAC_F32_e64 || 3037 Opc == AMDGPU::V_FMAC_F16_e64) 3038 UseMI.untieRegOperand( 3039 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 3040 3041 Src1->ChangeToImmediate(Imm); 3042 3043 removeModOperands(UseMI); 3044 UseMI.setDesc(get(NewOpc)); 3045 3046 bool DeleteDef = MRI->use_nodbg_empty(Reg); 3047 if (DeleteDef) 3048 DefMI.eraseFromParent(); 3049 3050 return true; 3051 } 3052 3053 // Added part is the constant: Use v_madak_{f16, f32}. 3054 if (Src2->isReg() && Src2->getReg() == Reg) { 3055 // Not allowed to use constant bus for another operand. 3056 // We can however allow an inline immediate as src0. 3057 bool Src0Inlined = false; 3058 if (Src0->isReg()) { 3059 // Try to inline constant if possible. 3060 // If the Def moves immediate and the use is single 3061 // We are saving VGPR here. 3062 MachineInstr *Def = MRI->getUniqueVRegDef(Src0->getReg()); 3063 if (Def && Def->isMoveImmediate() && 3064 isInlineConstant(Def->getOperand(1)) && 3065 MRI->hasOneUse(Src0->getReg())) { 3066 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 3067 Src0Inlined = true; 3068 } else if ((Src0->getReg().isPhysical() && 3069 (ST.getConstantBusLimit(Opc) <= 1 && 3070 RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg())))) || 3071 (Src0->getReg().isVirtual() && 3072 (ST.getConstantBusLimit(Opc) <= 1 && 3073 RI.isSGPRClass(MRI->getRegClass(Src0->getReg()))))) 3074 return false; 3075 // VGPR is okay as Src0 - fallthrough 3076 } 3077 3078 if (Src1->isReg() && !Src0Inlined ) { 3079 // We have one slot for inlinable constant so far - try to fill it 3080 MachineInstr *Def = MRI->getUniqueVRegDef(Src1->getReg()); 3081 if (Def && Def->isMoveImmediate() && 3082 isInlineConstant(Def->getOperand(1)) && 3083 MRI->hasOneUse(Src1->getReg()) && 3084 commuteInstruction(UseMI)) { 3085 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 3086 } else if ((Src1->getReg().isPhysical() && 3087 RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) || 3088 (Src1->getReg().isVirtual() && 3089 RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) 3090 return false; 3091 // VGPR is okay as Src1 - fallthrough 3092 } 3093 3094 unsigned NewOpc = 3095 IsFMA ? (IsF32 ? AMDGPU::V_FMAAK_F32 : AMDGPU::V_FMAAK_F16) 3096 : (IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16); 3097 if (pseudoToMCOpcode(NewOpc) == -1) 3098 return false; 3099 3100 const int64_t Imm = ImmOp->getImm(); 3101 3102 // FIXME: This would be a lot easier if we could return a new instruction 3103 // instead of having to modify in place. 3104 3105 if (Opc == AMDGPU::V_MAC_F32_e64 || 3106 Opc == AMDGPU::V_MAC_F16_e64 || 3107 Opc == AMDGPU::V_FMAC_F32_e64 || 3108 Opc == AMDGPU::V_FMAC_F16_e64) 3109 UseMI.untieRegOperand( 3110 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 3111 3112 // ChangingToImmediate adds Src2 back to the instruction. 3113 Src2->ChangeToImmediate(Imm); 3114 3115 // These come before src2. 3116 removeModOperands(UseMI); 3117 UseMI.setDesc(get(NewOpc)); 3118 // It might happen that UseMI was commuted 3119 // and we now have SGPR as SRC1. If so 2 inlined 3120 // constant and SGPR are illegal. 3121 legalizeOperands(UseMI); 3122 3123 bool DeleteDef = MRI->use_nodbg_empty(Reg); 3124 if (DeleteDef) 3125 DefMI.eraseFromParent(); 3126 3127 return true; 3128 } 3129 } 3130 3131 return false; 3132 } 3133 3134 static bool 3135 memOpsHaveSameBaseOperands(ArrayRef<const MachineOperand *> BaseOps1, 3136 ArrayRef<const MachineOperand *> BaseOps2) { 3137 if (BaseOps1.size() != BaseOps2.size()) 3138 return false; 3139 for (size_t I = 0, E = BaseOps1.size(); I < E; ++I) { 3140 if (!BaseOps1[I]->isIdenticalTo(*BaseOps2[I])) 3141 return false; 3142 } 3143 return true; 3144 } 3145 3146 static bool offsetsDoNotOverlap(int WidthA, int OffsetA, 3147 int WidthB, int OffsetB) { 3148 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; 3149 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; 3150 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; 3151 return LowOffset + LowWidth <= HighOffset; 3152 } 3153 3154 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa, 3155 const MachineInstr &MIb) const { 3156 SmallVector<const MachineOperand *, 4> BaseOps0, BaseOps1; 3157 int64_t Offset0, Offset1; 3158 unsigned Dummy0, Dummy1; 3159 bool Offset0IsScalable, Offset1IsScalable; 3160 if (!getMemOperandsWithOffsetWidth(MIa, BaseOps0, Offset0, Offset0IsScalable, 3161 Dummy0, &RI) || 3162 !getMemOperandsWithOffsetWidth(MIb, BaseOps1, Offset1, Offset1IsScalable, 3163 Dummy1, &RI)) 3164 return false; 3165 3166 if (!memOpsHaveSameBaseOperands(BaseOps0, BaseOps1)) 3167 return false; 3168 3169 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) { 3170 // FIXME: Handle ds_read2 / ds_write2. 3171 return false; 3172 } 3173 unsigned Width0 = MIa.memoperands().front()->getSize(); 3174 unsigned Width1 = MIb.memoperands().front()->getSize(); 3175 return offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1); 3176 } 3177 3178 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, 3179 const MachineInstr &MIb) const { 3180 assert(MIa.mayLoadOrStore() && 3181 "MIa must load from or modify a memory location"); 3182 assert(MIb.mayLoadOrStore() && 3183 "MIb must load from or modify a memory location"); 3184 3185 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects()) 3186 return false; 3187 3188 // XXX - Can we relax this between address spaces? 3189 if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef()) 3190 return false; 3191 3192 // TODO: Should we check the address space from the MachineMemOperand? That 3193 // would allow us to distinguish objects we know don't alias based on the 3194 // underlying address space, even if it was lowered to a different one, 3195 // e.g. private accesses lowered to use MUBUF instructions on a scratch 3196 // buffer. 3197 if (isDS(MIa)) { 3198 if (isDS(MIb)) 3199 return checkInstOffsetsDoNotOverlap(MIa, MIb); 3200 3201 return !isFLAT(MIb) || isSegmentSpecificFLAT(MIb); 3202 } 3203 3204 if (isMUBUF(MIa) || isMTBUF(MIa)) { 3205 if (isMUBUF(MIb) || isMTBUF(MIb)) 3206 return checkInstOffsetsDoNotOverlap(MIa, MIb); 3207 3208 return !isFLAT(MIb) && !isSMRD(MIb); 3209 } 3210 3211 if (isSMRD(MIa)) { 3212 if (isSMRD(MIb)) 3213 return checkInstOffsetsDoNotOverlap(MIa, MIb); 3214 3215 return !isFLAT(MIb) && !isMUBUF(MIb) && !isMTBUF(MIb); 3216 } 3217 3218 if (isFLAT(MIa)) { 3219 if (isFLAT(MIb)) 3220 return checkInstOffsetsDoNotOverlap(MIa, MIb); 3221 3222 return false; 3223 } 3224 3225 return false; 3226 } 3227 3228 static bool getFoldableImm(Register Reg, const MachineRegisterInfo &MRI, 3229 int64_t &Imm, MachineInstr **DefMI = nullptr) { 3230 if (Reg.isPhysical()) 3231 return false; 3232 auto *Def = MRI.getUniqueVRegDef(Reg); 3233 if (Def && SIInstrInfo::isFoldableCopy(*Def) && Def->getOperand(1).isImm()) { 3234 Imm = Def->getOperand(1).getImm(); 3235 if (DefMI) 3236 *DefMI = Def; 3237 return true; 3238 } 3239 return false; 3240 } 3241 3242 static bool getFoldableImm(const MachineOperand *MO, int64_t &Imm, 3243 MachineInstr **DefMI = nullptr) { 3244 if (!MO->isReg()) 3245 return false; 3246 const MachineFunction *MF = MO->getParent()->getParent()->getParent(); 3247 const MachineRegisterInfo &MRI = MF->getRegInfo(); 3248 return getFoldableImm(MO->getReg(), MRI, Imm, DefMI); 3249 } 3250 3251 static void updateLiveVariables(LiveVariables *LV, MachineInstr &MI, 3252 MachineInstr &NewMI) { 3253 if (LV) { 3254 unsigned NumOps = MI.getNumOperands(); 3255 for (unsigned I = 1; I < NumOps; ++I) { 3256 MachineOperand &Op = MI.getOperand(I); 3257 if (Op.isReg() && Op.isKill()) 3258 LV->replaceKillInstruction(Op.getReg(), MI, NewMI); 3259 } 3260 } 3261 } 3262 3263 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineInstr &MI, 3264 LiveVariables *LV, 3265 LiveIntervals *LIS) const { 3266 MachineBasicBlock &MBB = *MI.getParent(); 3267 unsigned Opc = MI.getOpcode(); 3268 3269 // Handle MFMA. 3270 int NewMFMAOpc = AMDGPU::getMFMAEarlyClobberOp(Opc); 3271 if (NewMFMAOpc != -1) { 3272 MachineInstrBuilder MIB = 3273 BuildMI(MBB, MI, MI.getDebugLoc(), get(NewMFMAOpc)); 3274 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) 3275 MIB.add(MI.getOperand(I)); 3276 updateLiveVariables(LV, MI, *MIB); 3277 if (LIS) 3278 LIS->ReplaceMachineInstrInMaps(MI, *MIB); 3279 return MIB; 3280 } 3281 3282 // Handle MAC/FMAC. 3283 bool IsF16 = Opc == AMDGPU::V_MAC_F16_e32 || Opc == AMDGPU::V_MAC_F16_e64 || 3284 Opc == AMDGPU::V_FMAC_F16_e32 || Opc == AMDGPU::V_FMAC_F16_e64; 3285 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e32 || Opc == AMDGPU::V_FMAC_F32_e64 || 3286 Opc == AMDGPU::V_FMAC_LEGACY_F32_e32 || 3287 Opc == AMDGPU::V_FMAC_LEGACY_F32_e64 || 3288 Opc == AMDGPU::V_FMAC_F16_e32 || Opc == AMDGPU::V_FMAC_F16_e64 || 3289 Opc == AMDGPU::V_FMAC_F64_e32 || Opc == AMDGPU::V_FMAC_F64_e64; 3290 bool IsF64 = Opc == AMDGPU::V_FMAC_F64_e32 || Opc == AMDGPU::V_FMAC_F64_e64; 3291 bool IsLegacy = Opc == AMDGPU::V_MAC_LEGACY_F32_e32 || 3292 Opc == AMDGPU::V_MAC_LEGACY_F32_e64 || 3293 Opc == AMDGPU::V_FMAC_LEGACY_F32_e32 || 3294 Opc == AMDGPU::V_FMAC_LEGACY_F32_e64; 3295 bool Src0Literal = false; 3296 3297 switch (Opc) { 3298 default: 3299 return nullptr; 3300 case AMDGPU::V_MAC_F16_e64: 3301 case AMDGPU::V_FMAC_F16_e64: 3302 case AMDGPU::V_MAC_F32_e64: 3303 case AMDGPU::V_MAC_LEGACY_F32_e64: 3304 case AMDGPU::V_FMAC_F32_e64: 3305 case AMDGPU::V_FMAC_LEGACY_F32_e64: 3306 case AMDGPU::V_FMAC_F64_e64: 3307 break; 3308 case AMDGPU::V_MAC_F16_e32: 3309 case AMDGPU::V_FMAC_F16_e32: 3310 case AMDGPU::V_MAC_F32_e32: 3311 case AMDGPU::V_MAC_LEGACY_F32_e32: 3312 case AMDGPU::V_FMAC_F32_e32: 3313 case AMDGPU::V_FMAC_LEGACY_F32_e32: 3314 case AMDGPU::V_FMAC_F64_e32: { 3315 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 3316 AMDGPU::OpName::src0); 3317 const MachineOperand *Src0 = &MI.getOperand(Src0Idx); 3318 if (!Src0->isReg() && !Src0->isImm()) 3319 return nullptr; 3320 3321 if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0)) 3322 Src0Literal = true; 3323 3324 break; 3325 } 3326 } 3327 3328 MachineInstrBuilder MIB; 3329 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 3330 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0); 3331 const MachineOperand *Src0Mods = 3332 getNamedOperand(MI, AMDGPU::OpName::src0_modifiers); 3333 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3334 const MachineOperand *Src1Mods = 3335 getNamedOperand(MI, AMDGPU::OpName::src1_modifiers); 3336 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3337 const MachineOperand *Src2Mods = 3338 getNamedOperand(MI, AMDGPU::OpName::src2_modifiers); 3339 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 3340 const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod); 3341 3342 if (!Src0Mods && !Src1Mods && !Src2Mods && !Clamp && !Omod && !IsF64 && 3343 !IsLegacy && 3344 // If we have an SGPR input, we will violate the constant bus restriction. 3345 (ST.getConstantBusLimit(Opc) > 1 || !Src0->isReg() || 3346 !RI.isSGPRReg(MBB.getParent()->getRegInfo(), Src0->getReg()))) { 3347 MachineInstr *DefMI; 3348 const auto killDef = [&DefMI, &MBB, this]() -> void { 3349 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 3350 // The only user is the instruction which will be killed. 3351 if (!MRI.hasOneNonDBGUse(DefMI->getOperand(0).getReg())) 3352 return; 3353 // We cannot just remove the DefMI here, calling pass will crash. 3354 DefMI->setDesc(get(AMDGPU::IMPLICIT_DEF)); 3355 for (unsigned I = DefMI->getNumOperands() - 1; I != 0; --I) 3356 DefMI->removeOperand(I); 3357 }; 3358 3359 int64_t Imm; 3360 if (!Src0Literal && getFoldableImm(Src2, Imm, &DefMI)) { 3361 unsigned NewOpc = 3362 IsFMA ? (IsF16 ? AMDGPU::V_FMAAK_F16 : AMDGPU::V_FMAAK_F32) 3363 : (IsF16 ? AMDGPU::V_MADAK_F16 : AMDGPU::V_MADAK_F32); 3364 if (pseudoToMCOpcode(NewOpc) != -1) { 3365 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc)) 3366 .add(*Dst) 3367 .add(*Src0) 3368 .add(*Src1) 3369 .addImm(Imm); 3370 updateLiveVariables(LV, MI, *MIB); 3371 if (LIS) 3372 LIS->ReplaceMachineInstrInMaps(MI, *MIB); 3373 killDef(); 3374 return MIB; 3375 } 3376 } 3377 unsigned NewOpc = IsFMA 3378 ? (IsF16 ? AMDGPU::V_FMAMK_F16 : AMDGPU::V_FMAMK_F32) 3379 : (IsF16 ? AMDGPU::V_MADMK_F16 : AMDGPU::V_MADMK_F32); 3380 if (!Src0Literal && getFoldableImm(Src1, Imm, &DefMI)) { 3381 if (pseudoToMCOpcode(NewOpc) != -1) { 3382 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc)) 3383 .add(*Dst) 3384 .add(*Src0) 3385 .addImm(Imm) 3386 .add(*Src2); 3387 updateLiveVariables(LV, MI, *MIB); 3388 if (LIS) 3389 LIS->ReplaceMachineInstrInMaps(MI, *MIB); 3390 killDef(); 3391 return MIB; 3392 } 3393 } 3394 if (Src0Literal || getFoldableImm(Src0, Imm, &DefMI)) { 3395 if (Src0Literal) { 3396 Imm = Src0->getImm(); 3397 DefMI = nullptr; 3398 } 3399 if (pseudoToMCOpcode(NewOpc) != -1 && 3400 isOperandLegal( 3401 MI, AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::src0), 3402 Src1)) { 3403 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc)) 3404 .add(*Dst) 3405 .add(*Src1) 3406 .addImm(Imm) 3407 .add(*Src2); 3408 updateLiveVariables(LV, MI, *MIB); 3409 if (LIS) 3410 LIS->ReplaceMachineInstrInMaps(MI, *MIB); 3411 if (DefMI) 3412 killDef(); 3413 return MIB; 3414 } 3415 } 3416 } 3417 3418 // VOP2 mac/fmac with a literal operand cannot be converted to VOP3 mad/fma 3419 // because VOP3 does not allow a literal operand. 3420 // TODO: Remove this restriction for GFX10. 3421 if (Src0Literal) 3422 return nullptr; 3423 3424 unsigned NewOpc = IsFMA ? IsF16 ? AMDGPU::V_FMA_F16_gfx9_e64 3425 : IsF64 ? AMDGPU::V_FMA_F64_e64 3426 : IsLegacy 3427 ? AMDGPU::V_FMA_LEGACY_F32_e64 3428 : AMDGPU::V_FMA_F32_e64 3429 : IsF16 ? AMDGPU::V_MAD_F16_e64 3430 : IsLegacy ? AMDGPU::V_MAD_LEGACY_F32_e64 3431 : AMDGPU::V_MAD_F32_e64; 3432 if (pseudoToMCOpcode(NewOpc) == -1) 3433 return nullptr; 3434 3435 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc)) 3436 .add(*Dst) 3437 .addImm(Src0Mods ? Src0Mods->getImm() : 0) 3438 .add(*Src0) 3439 .addImm(Src1Mods ? Src1Mods->getImm() : 0) 3440 .add(*Src1) 3441 .addImm(Src2Mods ? Src2Mods->getImm() : 0) 3442 .add(*Src2) 3443 .addImm(Clamp ? Clamp->getImm() : 0) 3444 .addImm(Omod ? Omod->getImm() : 0); 3445 updateLiveVariables(LV, MI, *MIB); 3446 if (LIS) 3447 LIS->ReplaceMachineInstrInMaps(MI, *MIB); 3448 return MIB; 3449 } 3450 3451 // It's not generally safe to move VALU instructions across these since it will 3452 // start using the register as a base index rather than directly. 3453 // XXX - Why isn't hasSideEffects sufficient for these? 3454 static bool changesVGPRIndexingMode(const MachineInstr &MI) { 3455 switch (MI.getOpcode()) { 3456 case AMDGPU::S_SET_GPR_IDX_ON: 3457 case AMDGPU::S_SET_GPR_IDX_MODE: 3458 case AMDGPU::S_SET_GPR_IDX_OFF: 3459 return true; 3460 default: 3461 return false; 3462 } 3463 } 3464 3465 bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 3466 const MachineBasicBlock *MBB, 3467 const MachineFunction &MF) const { 3468 // Skipping the check for SP writes in the base implementation. The reason it 3469 // was added was apparently due to compile time concerns. 3470 // 3471 // TODO: Do we really want this barrier? It triggers unnecessary hazard nops 3472 // but is probably avoidable. 3473 3474 // Copied from base implementation. 3475 // Terminators and labels can't be scheduled around. 3476 if (MI.isTerminator() || MI.isPosition()) 3477 return true; 3478 3479 // INLINEASM_BR can jump to another block 3480 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR) 3481 return true; 3482 3483 if (MI.getOpcode() == AMDGPU::SCHED_BARRIER && MI.getOperand(0).getImm() == 0) 3484 return true; 3485 3486 // Target-independent instructions do not have an implicit-use of EXEC, even 3487 // when they operate on VGPRs. Treating EXEC modifications as scheduling 3488 // boundaries prevents incorrect movements of such instructions. 3489 return MI.modifiesRegister(AMDGPU::EXEC, &RI) || 3490 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 || 3491 MI.getOpcode() == AMDGPU::S_SETREG_B32 || 3492 changesVGPRIndexingMode(MI); 3493 } 3494 3495 bool SIInstrInfo::isAlwaysGDS(uint16_t Opcode) const { 3496 return Opcode == AMDGPU::DS_ORDERED_COUNT || 3497 Opcode == AMDGPU::DS_GWS_INIT || 3498 Opcode == AMDGPU::DS_GWS_SEMA_V || 3499 Opcode == AMDGPU::DS_GWS_SEMA_BR || 3500 Opcode == AMDGPU::DS_GWS_SEMA_P || 3501 Opcode == AMDGPU::DS_GWS_SEMA_RELEASE_ALL || 3502 Opcode == AMDGPU::DS_GWS_BARRIER; 3503 } 3504 3505 bool SIInstrInfo::modifiesModeRegister(const MachineInstr &MI) { 3506 // Skip the full operand and register alias search modifiesRegister 3507 // does. There's only a handful of instructions that touch this, it's only an 3508 // implicit def, and doesn't alias any other registers. 3509 if (const MCPhysReg *ImpDef = MI.getDesc().getImplicitDefs()) { 3510 for (; ImpDef && *ImpDef; ++ImpDef) { 3511 if (*ImpDef == AMDGPU::MODE) 3512 return true; 3513 } 3514 } 3515 3516 return false; 3517 } 3518 3519 bool SIInstrInfo::hasUnwantedEffectsWhenEXECEmpty(const MachineInstr &MI) const { 3520 unsigned Opcode = MI.getOpcode(); 3521 3522 if (MI.mayStore() && isSMRD(MI)) 3523 return true; // scalar store or atomic 3524 3525 // This will terminate the function when other lanes may need to continue. 3526 if (MI.isReturn()) 3527 return true; 3528 3529 // These instructions cause shader I/O that may cause hardware lockups 3530 // when executed with an empty EXEC mask. 3531 // 3532 // Note: exp with VM = DONE = 0 is automatically skipped by hardware when 3533 // EXEC = 0, but checking for that case here seems not worth it 3534 // given the typical code patterns. 3535 if (Opcode == AMDGPU::S_SENDMSG || Opcode == AMDGPU::S_SENDMSGHALT || 3536 isEXP(Opcode) || 3537 Opcode == AMDGPU::DS_ORDERED_COUNT || Opcode == AMDGPU::S_TRAP || 3538 Opcode == AMDGPU::DS_GWS_INIT || Opcode == AMDGPU::DS_GWS_BARRIER) 3539 return true; 3540 3541 if (MI.isCall() || MI.isInlineAsm()) 3542 return true; // conservative assumption 3543 3544 // A mode change is a scalar operation that influences vector instructions. 3545 if (modifiesModeRegister(MI)) 3546 return true; 3547 3548 // These are like SALU instructions in terms of effects, so it's questionable 3549 // whether we should return true for those. 3550 // 3551 // However, executing them with EXEC = 0 causes them to operate on undefined 3552 // data, which we avoid by returning true here. 3553 if (Opcode == AMDGPU::V_READFIRSTLANE_B32 || 3554 Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32) 3555 return true; 3556 3557 return false; 3558 } 3559 3560 bool SIInstrInfo::mayReadEXEC(const MachineRegisterInfo &MRI, 3561 const MachineInstr &MI) const { 3562 if (MI.isMetaInstruction()) 3563 return false; 3564 3565 // This won't read exec if this is an SGPR->SGPR copy. 3566 if (MI.isCopyLike()) { 3567 if (!RI.isSGPRReg(MRI, MI.getOperand(0).getReg())) 3568 return true; 3569 3570 // Make sure this isn't copying exec as a normal operand 3571 return MI.readsRegister(AMDGPU::EXEC, &RI); 3572 } 3573 3574 // Make a conservative assumption about the callee. 3575 if (MI.isCall()) 3576 return true; 3577 3578 // Be conservative with any unhandled generic opcodes. 3579 if (!isTargetSpecificOpcode(MI.getOpcode())) 3580 return true; 3581 3582 return !isSALU(MI) || MI.readsRegister(AMDGPU::EXEC, &RI); 3583 } 3584 3585 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const { 3586 switch (Imm.getBitWidth()) { 3587 case 1: // This likely will be a condition code mask. 3588 return true; 3589 3590 case 32: 3591 return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(), 3592 ST.hasInv2PiInlineImm()); 3593 case 64: 3594 return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(), 3595 ST.hasInv2PiInlineImm()); 3596 case 16: 3597 return ST.has16BitInsts() && 3598 AMDGPU::isInlinableLiteral16(Imm.getSExtValue(), 3599 ST.hasInv2PiInlineImm()); 3600 default: 3601 llvm_unreachable("invalid bitwidth"); 3602 } 3603 } 3604 3605 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO, 3606 uint8_t OperandType) const { 3607 if (!MO.isImm() || 3608 OperandType < AMDGPU::OPERAND_SRC_FIRST || 3609 OperandType > AMDGPU::OPERAND_SRC_LAST) 3610 return false; 3611 3612 // MachineOperand provides no way to tell the true operand size, since it only 3613 // records a 64-bit value. We need to know the size to determine if a 32-bit 3614 // floating point immediate bit pattern is legal for an integer immediate. It 3615 // would be for any 32-bit integer operand, but would not be for a 64-bit one. 3616 3617 int64_t Imm = MO.getImm(); 3618 switch (OperandType) { 3619 case AMDGPU::OPERAND_REG_IMM_INT32: 3620 case AMDGPU::OPERAND_REG_IMM_FP32: 3621 case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED: 3622 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 3623 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 3624 case AMDGPU::OPERAND_REG_IMM_V2FP32: 3625 case AMDGPU::OPERAND_REG_INLINE_C_V2FP32: 3626 case AMDGPU::OPERAND_REG_IMM_V2INT32: 3627 case AMDGPU::OPERAND_REG_INLINE_C_V2INT32: 3628 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 3629 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: { 3630 int32_t Trunc = static_cast<int32_t>(Imm); 3631 return AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm()); 3632 } 3633 case AMDGPU::OPERAND_REG_IMM_INT64: 3634 case AMDGPU::OPERAND_REG_IMM_FP64: 3635 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 3636 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 3637 case AMDGPU::OPERAND_REG_INLINE_AC_FP64: 3638 return AMDGPU::isInlinableLiteral64(MO.getImm(), 3639 ST.hasInv2PiInlineImm()); 3640 case AMDGPU::OPERAND_REG_IMM_INT16: 3641 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 3642 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 3643 // We would expect inline immediates to not be concerned with an integer/fp 3644 // distinction. However, in the case of 16-bit integer operations, the 3645 // "floating point" values appear to not work. It seems read the low 16-bits 3646 // of 32-bit immediates, which happens to always work for the integer 3647 // values. 3648 // 3649 // See llvm bugzilla 46302. 3650 // 3651 // TODO: Theoretically we could use op-sel to use the high bits of the 3652 // 32-bit FP values. 3653 return AMDGPU::isInlinableIntLiteral(Imm); 3654 case AMDGPU::OPERAND_REG_IMM_V2INT16: 3655 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 3656 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16: 3657 // This suffers the same problem as the scalar 16-bit cases. 3658 return AMDGPU::isInlinableIntLiteralV216(Imm); 3659 case AMDGPU::OPERAND_REG_IMM_FP16: 3660 case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED: 3661 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 3662 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: { 3663 if (isInt<16>(Imm) || isUInt<16>(Imm)) { 3664 // A few special case instructions have 16-bit operands on subtargets 3665 // where 16-bit instructions are not legal. 3666 // TODO: Do the 32-bit immediates work? We shouldn't really need to handle 3667 // constants in these cases 3668 int16_t Trunc = static_cast<int16_t>(Imm); 3669 return ST.has16BitInsts() && 3670 AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm()); 3671 } 3672 3673 return false; 3674 } 3675 case AMDGPU::OPERAND_REG_IMM_V2FP16: 3676 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 3677 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: { 3678 uint32_t Trunc = static_cast<uint32_t>(Imm); 3679 return AMDGPU::isInlinableLiteralV216(Trunc, ST.hasInv2PiInlineImm()); 3680 } 3681 case AMDGPU::OPERAND_KIMM32: 3682 case AMDGPU::OPERAND_KIMM16: 3683 return false; 3684 default: 3685 llvm_unreachable("invalid bitwidth"); 3686 } 3687 } 3688 3689 bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO, 3690 const MCOperandInfo &OpInfo) const { 3691 switch (MO.getType()) { 3692 case MachineOperand::MO_Register: 3693 return false; 3694 case MachineOperand::MO_Immediate: 3695 return !isInlineConstant(MO, OpInfo); 3696 case MachineOperand::MO_FrameIndex: 3697 case MachineOperand::MO_MachineBasicBlock: 3698 case MachineOperand::MO_ExternalSymbol: 3699 case MachineOperand::MO_GlobalAddress: 3700 case MachineOperand::MO_MCSymbol: 3701 return true; 3702 default: 3703 llvm_unreachable("unexpected operand type"); 3704 } 3705 } 3706 3707 static bool compareMachineOp(const MachineOperand &Op0, 3708 const MachineOperand &Op1) { 3709 if (Op0.getType() != Op1.getType()) 3710 return false; 3711 3712 switch (Op0.getType()) { 3713 case MachineOperand::MO_Register: 3714 return Op0.getReg() == Op1.getReg(); 3715 case MachineOperand::MO_Immediate: 3716 return Op0.getImm() == Op1.getImm(); 3717 default: 3718 llvm_unreachable("Didn't expect to be comparing these operand types"); 3719 } 3720 } 3721 3722 bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo, 3723 const MachineOperand &MO) const { 3724 const MCInstrDesc &InstDesc = MI.getDesc(); 3725 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo]; 3726 3727 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()); 3728 3729 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE) 3730 return true; 3731 3732 if (OpInfo.RegClass < 0) 3733 return false; 3734 3735 if (MO.isImm() && isInlineConstant(MO, OpInfo)) { 3736 if (isMAI(MI) && ST.hasMFMAInlineLiteralBug() && 3737 OpNo ==(unsigned)AMDGPU::getNamedOperandIdx(MI.getOpcode(), 3738 AMDGPU::OpName::src2)) 3739 return false; 3740 return RI.opCanUseInlineConstant(OpInfo.OperandType); 3741 } 3742 3743 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType)) 3744 return false; 3745 3746 if (!isVOP3(MI) || !AMDGPU::isSISrcOperand(InstDesc, OpNo)) 3747 return true; 3748 3749 return ST.hasVOP3Literal(); 3750 } 3751 3752 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const { 3753 // GFX90A does not have V_MUL_LEGACY_F32_e32. 3754 if (Opcode == AMDGPU::V_MUL_LEGACY_F32_e64 && ST.hasGFX90AInsts()) 3755 return false; 3756 3757 int Op32 = AMDGPU::getVOPe32(Opcode); 3758 if (Op32 == -1) 3759 return false; 3760 3761 return pseudoToMCOpcode(Op32) != -1; 3762 } 3763 3764 bool SIInstrInfo::hasModifiers(unsigned Opcode) const { 3765 // The src0_modifier operand is present on all instructions 3766 // that have modifiers. 3767 3768 return AMDGPU::getNamedOperandIdx(Opcode, 3769 AMDGPU::OpName::src0_modifiers) != -1; 3770 } 3771 3772 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI, 3773 unsigned OpName) const { 3774 const MachineOperand *Mods = getNamedOperand(MI, OpName); 3775 return Mods && Mods->getImm(); 3776 } 3777 3778 bool SIInstrInfo::hasAnyModifiersSet(const MachineInstr &MI) const { 3779 return any_of(ModifierOpNames, 3780 [&](unsigned Name) { return hasModifiersSet(MI, Name); }); 3781 } 3782 3783 bool SIInstrInfo::canShrink(const MachineInstr &MI, 3784 const MachineRegisterInfo &MRI) const { 3785 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3786 // Can't shrink instruction with three operands. 3787 if (Src2) { 3788 switch (MI.getOpcode()) { 3789 default: return false; 3790 3791 case AMDGPU::V_ADDC_U32_e64: 3792 case AMDGPU::V_SUBB_U32_e64: 3793 case AMDGPU::V_SUBBREV_U32_e64: { 3794 const MachineOperand *Src1 3795 = getNamedOperand(MI, AMDGPU::OpName::src1); 3796 if (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg())) 3797 return false; 3798 // Additional verification is needed for sdst/src2. 3799 return true; 3800 } 3801 case AMDGPU::V_MAC_F16_e64: 3802 case AMDGPU::V_MAC_F32_e64: 3803 case AMDGPU::V_MAC_LEGACY_F32_e64: 3804 case AMDGPU::V_FMAC_F16_e64: 3805 case AMDGPU::V_FMAC_F32_e64: 3806 case AMDGPU::V_FMAC_F64_e64: 3807 case AMDGPU::V_FMAC_LEGACY_F32_e64: 3808 if (!Src2->isReg() || !RI.isVGPR(MRI, Src2->getReg()) || 3809 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers)) 3810 return false; 3811 break; 3812 3813 case AMDGPU::V_CNDMASK_B32_e64: 3814 break; 3815 } 3816 } 3817 3818 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3819 if (Src1 && (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()) || 3820 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers))) 3821 return false; 3822 3823 // We don't need to check src0, all input types are legal, so just make sure 3824 // src0 isn't using any modifiers. 3825 if (hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers)) 3826 return false; 3827 3828 // Can it be shrunk to a valid 32 bit opcode? 3829 if (!hasVALU32BitEncoding(MI.getOpcode())) 3830 return false; 3831 3832 // Check output modifiers 3833 return !hasModifiersSet(MI, AMDGPU::OpName::omod) && 3834 !hasModifiersSet(MI, AMDGPU::OpName::clamp); 3835 } 3836 3837 // Set VCC operand with all flags from \p Orig, except for setting it as 3838 // implicit. 3839 static void copyFlagsToImplicitVCC(MachineInstr &MI, 3840 const MachineOperand &Orig) { 3841 3842 for (MachineOperand &Use : MI.implicit_operands()) { 3843 if (Use.isUse() && 3844 (Use.getReg() == AMDGPU::VCC || Use.getReg() == AMDGPU::VCC_LO)) { 3845 Use.setIsUndef(Orig.isUndef()); 3846 Use.setIsKill(Orig.isKill()); 3847 return; 3848 } 3849 } 3850 } 3851 3852 MachineInstr *SIInstrInfo::buildShrunkInst(MachineInstr &MI, 3853 unsigned Op32) const { 3854 MachineBasicBlock *MBB = MI.getParent(); 3855 MachineInstrBuilder Inst32 = 3856 BuildMI(*MBB, MI, MI.getDebugLoc(), get(Op32)) 3857 .setMIFlags(MI.getFlags()); 3858 3859 // Add the dst operand if the 32-bit encoding also has an explicit $vdst. 3860 // For VOPC instructions, this is replaced by an implicit def of vcc. 3861 if (AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst) != -1) { 3862 // dst 3863 Inst32.add(MI.getOperand(0)); 3864 } else if (AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::sdst) != -1) { 3865 // VOPCX instructions won't be writing to an explicit dst, so this should 3866 // not fail for these instructions. 3867 assert(((MI.getOperand(0).getReg() == AMDGPU::VCC) || 3868 (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) && 3869 "Unexpected case"); 3870 } 3871 3872 Inst32.add(*getNamedOperand(MI, AMDGPU::OpName::src0)); 3873 3874 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3875 if (Src1) 3876 Inst32.add(*Src1); 3877 3878 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3879 3880 if (Src2) { 3881 int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2); 3882 if (Op32Src2Idx != -1) { 3883 Inst32.add(*Src2); 3884 } else { 3885 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is 3886 // replaced with an implicit read of vcc or vcc_lo. The implicit read 3887 // of vcc was already added during the initial BuildMI, but we 3888 // 1) may need to change vcc to vcc_lo to preserve the original register 3889 // 2) have to preserve the original flags. 3890 fixImplicitOperands(*Inst32); 3891 copyFlagsToImplicitVCC(*Inst32, *Src2); 3892 } 3893 } 3894 3895 return Inst32; 3896 } 3897 3898 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI, 3899 const MachineOperand &MO, 3900 const MCOperandInfo &OpInfo) const { 3901 // Literal constants use the constant bus. 3902 //if (isLiteralConstantLike(MO, OpInfo)) 3903 // return true; 3904 if (MO.isImm()) 3905 return !isInlineConstant(MO, OpInfo); 3906 3907 if (!MO.isReg()) 3908 return true; // Misc other operands like FrameIndex 3909 3910 if (!MO.isUse()) 3911 return false; 3912 3913 if (MO.getReg().isVirtual()) 3914 return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); 3915 3916 // Null is free 3917 if (MO.getReg() == AMDGPU::SGPR_NULL) 3918 return false; 3919 3920 // SGPRs use the constant bus 3921 if (MO.isImplicit()) { 3922 return MO.getReg() == AMDGPU::M0 || 3923 MO.getReg() == AMDGPU::VCC || 3924 MO.getReg() == AMDGPU::VCC_LO; 3925 } else { 3926 return AMDGPU::SReg_32RegClass.contains(MO.getReg()) || 3927 AMDGPU::SReg_64RegClass.contains(MO.getReg()); 3928 } 3929 } 3930 3931 static Register findImplicitSGPRRead(const MachineInstr &MI) { 3932 for (const MachineOperand &MO : MI.implicit_operands()) { 3933 // We only care about reads. 3934 if (MO.isDef()) 3935 continue; 3936 3937 switch (MO.getReg()) { 3938 case AMDGPU::VCC: 3939 case AMDGPU::VCC_LO: 3940 case AMDGPU::VCC_HI: 3941 case AMDGPU::M0: 3942 case AMDGPU::FLAT_SCR: 3943 return MO.getReg(); 3944 3945 default: 3946 break; 3947 } 3948 } 3949 3950 return AMDGPU::NoRegister; 3951 } 3952 3953 static bool shouldReadExec(const MachineInstr &MI) { 3954 if (SIInstrInfo::isVALU(MI)) { 3955 switch (MI.getOpcode()) { 3956 case AMDGPU::V_READLANE_B32: 3957 case AMDGPU::V_WRITELANE_B32: 3958 return false; 3959 } 3960 3961 return true; 3962 } 3963 3964 if (MI.isPreISelOpcode() || 3965 SIInstrInfo::isGenericOpcode(MI.getOpcode()) || 3966 SIInstrInfo::isSALU(MI) || 3967 SIInstrInfo::isSMRD(MI)) 3968 return false; 3969 3970 return true; 3971 } 3972 3973 static bool isSubRegOf(const SIRegisterInfo &TRI, 3974 const MachineOperand &SuperVec, 3975 const MachineOperand &SubReg) { 3976 if (SubReg.getReg().isPhysical()) 3977 return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg()); 3978 3979 return SubReg.getSubReg() != AMDGPU::NoSubRegister && 3980 SubReg.getReg() == SuperVec.getReg(); 3981 } 3982 3983 bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, 3984 StringRef &ErrInfo) const { 3985 uint16_t Opcode = MI.getOpcode(); 3986 if (SIInstrInfo::isGenericOpcode(MI.getOpcode())) 3987 return true; 3988 3989 const MachineFunction *MF = MI.getParent()->getParent(); 3990 const MachineRegisterInfo &MRI = MF->getRegInfo(); 3991 3992 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); 3993 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1); 3994 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2); 3995 3996 // Make sure the number of operands is correct. 3997 const MCInstrDesc &Desc = get(Opcode); 3998 if (!Desc.isVariadic() && 3999 Desc.getNumOperands() != MI.getNumExplicitOperands()) { 4000 ErrInfo = "Instruction has wrong number of operands."; 4001 return false; 4002 } 4003 4004 if (MI.isInlineAsm()) { 4005 // Verify register classes for inlineasm constraints. 4006 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands(); 4007 I != E; ++I) { 4008 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI); 4009 if (!RC) 4010 continue; 4011 4012 const MachineOperand &Op = MI.getOperand(I); 4013 if (!Op.isReg()) 4014 continue; 4015 4016 Register Reg = Op.getReg(); 4017 if (!Reg.isVirtual() && !RC->contains(Reg)) { 4018 ErrInfo = "inlineasm operand has incorrect register class."; 4019 return false; 4020 } 4021 } 4022 4023 return true; 4024 } 4025 4026 if (isMIMG(MI) && MI.memoperands_empty() && MI.mayLoadOrStore()) { 4027 ErrInfo = "missing memory operand from MIMG instruction."; 4028 return false; 4029 } 4030 4031 // Make sure the register classes are correct. 4032 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) { 4033 const MachineOperand &MO = MI.getOperand(i); 4034 if (MO.isFPImm()) { 4035 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast " 4036 "all fp values to integers."; 4037 return false; 4038 } 4039 4040 int RegClass = Desc.OpInfo[i].RegClass; 4041 4042 switch (Desc.OpInfo[i].OperandType) { 4043 case MCOI::OPERAND_REGISTER: 4044 if (MI.getOperand(i).isImm() || MI.getOperand(i).isGlobal()) { 4045 ErrInfo = "Illegal immediate value for operand."; 4046 return false; 4047 } 4048 break; 4049 case AMDGPU::OPERAND_REG_IMM_INT32: 4050 case AMDGPU::OPERAND_REG_IMM_FP32: 4051 case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED: 4052 case AMDGPU::OPERAND_REG_IMM_V2FP32: 4053 break; 4054 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 4055 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 4056 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 4057 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 4058 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 4059 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 4060 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 4061 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: 4062 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 4063 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: 4064 case AMDGPU::OPERAND_REG_INLINE_AC_FP64: { 4065 if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) { 4066 ErrInfo = "Illegal immediate value for operand."; 4067 return false; 4068 } 4069 break; 4070 } 4071 case MCOI::OPERAND_IMMEDIATE: 4072 case AMDGPU::OPERAND_KIMM32: 4073 // Check if this operand is an immediate. 4074 // FrameIndex operands will be replaced by immediates, so they are 4075 // allowed. 4076 if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) { 4077 ErrInfo = "Expected immediate, but got non-immediate"; 4078 return false; 4079 } 4080 LLVM_FALLTHROUGH; 4081 default: 4082 continue; 4083 } 4084 4085 if (!MO.isReg()) 4086 continue; 4087 Register Reg = MO.getReg(); 4088 if (!Reg) 4089 continue; 4090 4091 // FIXME: Ideally we would have separate instruction definitions with the 4092 // aligned register constraint. 4093 // FIXME: We do not verify inline asm operands, but custom inline asm 4094 // verification is broken anyway 4095 if (ST.needsAlignedVGPRs()) { 4096 const TargetRegisterClass *RC = RI.getRegClassForReg(MRI, Reg); 4097 if (RI.hasVectorRegisters(RC) && MO.getSubReg()) { 4098 const TargetRegisterClass *SubRC = 4099 RI.getSubRegClass(RC, MO.getSubReg()); 4100 RC = RI.getCompatibleSubRegClass(RC, SubRC, MO.getSubReg()); 4101 if (RC) 4102 RC = SubRC; 4103 } 4104 4105 // Check that this is the aligned version of the class. 4106 if (!RC || !RI.isProperlyAlignedRC(*RC)) { 4107 ErrInfo = "Subtarget requires even aligned vector registers"; 4108 return false; 4109 } 4110 } 4111 4112 if (RegClass != -1) { 4113 if (Reg.isVirtual()) 4114 continue; 4115 4116 const TargetRegisterClass *RC = RI.getRegClass(RegClass); 4117 if (!RC->contains(Reg)) { 4118 ErrInfo = "Operand has incorrect register class."; 4119 return false; 4120 } 4121 } 4122 } 4123 4124 // Verify SDWA 4125 if (isSDWA(MI)) { 4126 if (!ST.hasSDWA()) { 4127 ErrInfo = "SDWA is not supported on this target"; 4128 return false; 4129 } 4130 4131 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst); 4132 4133 for (int OpIdx : {DstIdx, Src0Idx, Src1Idx, Src2Idx}) { 4134 if (OpIdx == -1) 4135 continue; 4136 const MachineOperand &MO = MI.getOperand(OpIdx); 4137 4138 if (!ST.hasSDWAScalar()) { 4139 // Only VGPRS on VI 4140 if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) { 4141 ErrInfo = "Only VGPRs allowed as operands in SDWA instructions on VI"; 4142 return false; 4143 } 4144 } else { 4145 // No immediates on GFX9 4146 if (!MO.isReg()) { 4147 ErrInfo = 4148 "Only reg allowed as operands in SDWA instructions on GFX9+"; 4149 return false; 4150 } 4151 } 4152 } 4153 4154 if (!ST.hasSDWAOmod()) { 4155 // No omod allowed on VI 4156 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 4157 if (OMod != nullptr && 4158 (!OMod->isImm() || OMod->getImm() != 0)) { 4159 ErrInfo = "OMod not allowed in SDWA instructions on VI"; 4160 return false; 4161 } 4162 } 4163 4164 uint16_t BasicOpcode = AMDGPU::getBasicFromSDWAOp(Opcode); 4165 if (isVOPC(BasicOpcode)) { 4166 if (!ST.hasSDWASdst() && DstIdx != -1) { 4167 // Only vcc allowed as dst on VI for VOPC 4168 const MachineOperand &Dst = MI.getOperand(DstIdx); 4169 if (!Dst.isReg() || Dst.getReg() != AMDGPU::VCC) { 4170 ErrInfo = "Only VCC allowed as dst in SDWA instructions on VI"; 4171 return false; 4172 } 4173 } else if (!ST.hasSDWAOutModsVOPC()) { 4174 // No clamp allowed on GFX9 for VOPC 4175 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 4176 if (Clamp && (!Clamp->isImm() || Clamp->getImm() != 0)) { 4177 ErrInfo = "Clamp not allowed in VOPC SDWA instructions on VI"; 4178 return false; 4179 } 4180 4181 // No omod allowed on GFX9 for VOPC 4182 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 4183 if (OMod && (!OMod->isImm() || OMod->getImm() != 0)) { 4184 ErrInfo = "OMod not allowed in VOPC SDWA instructions on VI"; 4185 return false; 4186 } 4187 } 4188 } 4189 4190 const MachineOperand *DstUnused = getNamedOperand(MI, AMDGPU::OpName::dst_unused); 4191 if (DstUnused && DstUnused->isImm() && 4192 DstUnused->getImm() == AMDGPU::SDWA::UNUSED_PRESERVE) { 4193 const MachineOperand &Dst = MI.getOperand(DstIdx); 4194 if (!Dst.isReg() || !Dst.isTied()) { 4195 ErrInfo = "Dst register should have tied register"; 4196 return false; 4197 } 4198 4199 const MachineOperand &TiedMO = 4200 MI.getOperand(MI.findTiedOperandIdx(DstIdx)); 4201 if (!TiedMO.isReg() || !TiedMO.isImplicit() || !TiedMO.isUse()) { 4202 ErrInfo = 4203 "Dst register should be tied to implicit use of preserved register"; 4204 return false; 4205 } else if (TiedMO.getReg().isPhysical() && 4206 Dst.getReg() != TiedMO.getReg()) { 4207 ErrInfo = "Dst register should use same physical register as preserved"; 4208 return false; 4209 } 4210 } 4211 } 4212 4213 // Verify MIMG 4214 if (isMIMG(MI.getOpcode()) && !MI.mayStore()) { 4215 // Ensure that the return type used is large enough for all the options 4216 // being used TFE/LWE require an extra result register. 4217 const MachineOperand *DMask = getNamedOperand(MI, AMDGPU::OpName::dmask); 4218 if (DMask) { 4219 uint64_t DMaskImm = DMask->getImm(); 4220 uint32_t RegCount = 4221 isGather4(MI.getOpcode()) ? 4 : countPopulation(DMaskImm); 4222 const MachineOperand *TFE = getNamedOperand(MI, AMDGPU::OpName::tfe); 4223 const MachineOperand *LWE = getNamedOperand(MI, AMDGPU::OpName::lwe); 4224 const MachineOperand *D16 = getNamedOperand(MI, AMDGPU::OpName::d16); 4225 4226 // Adjust for packed 16 bit values 4227 if (D16 && D16->getImm() && !ST.hasUnpackedD16VMem()) 4228 RegCount >>= 1; 4229 4230 // Adjust if using LWE or TFE 4231 if ((LWE && LWE->getImm()) || (TFE && TFE->getImm())) 4232 RegCount += 1; 4233 4234 const uint32_t DstIdx = 4235 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata); 4236 const MachineOperand &Dst = MI.getOperand(DstIdx); 4237 if (Dst.isReg()) { 4238 const TargetRegisterClass *DstRC = getOpRegClass(MI, DstIdx); 4239 uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32; 4240 if (RegCount > DstSize) { 4241 ErrInfo = "MIMG instruction returns too many registers for dst " 4242 "register class"; 4243 return false; 4244 } 4245 } 4246 } 4247 } 4248 4249 // Verify VOP*. Ignore multiple sgpr operands on writelane. 4250 if (isVALU(MI) && Desc.getOpcode() != AMDGPU::V_WRITELANE_B32) { 4251 unsigned ConstantBusCount = 0; 4252 bool UsesLiteral = false; 4253 const MachineOperand *LiteralVal = nullptr; 4254 4255 int ImmIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm); 4256 if (ImmIdx != -1) { 4257 ++ConstantBusCount; 4258 UsesLiteral = true; 4259 LiteralVal = &MI.getOperand(ImmIdx); 4260 } 4261 4262 SmallVector<Register, 2> SGPRsUsed; 4263 Register SGPRUsed; 4264 4265 // Only look at the true operands. Only a real operand can use the constant 4266 // bus, and we don't want to check pseudo-operands like the source modifier 4267 // flags. 4268 for (int OpIdx : {Src0Idx, Src1Idx, Src2Idx}) { 4269 if (OpIdx == -1) 4270 break; 4271 const MachineOperand &MO = MI.getOperand(OpIdx); 4272 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 4273 if (MO.isReg()) { 4274 SGPRUsed = MO.getReg(); 4275 if (llvm::all_of(SGPRsUsed, [SGPRUsed](unsigned SGPR) { 4276 return SGPRUsed != SGPR; 4277 })) { 4278 ++ConstantBusCount; 4279 SGPRsUsed.push_back(SGPRUsed); 4280 } 4281 } else { 4282 if (!UsesLiteral) { 4283 ++ConstantBusCount; 4284 UsesLiteral = true; 4285 LiteralVal = &MO; 4286 } else if (!MO.isIdenticalTo(*LiteralVal)) { 4287 assert(isVOP2(MI) || isVOP3(MI)); 4288 ErrInfo = "VOP2/VOP3 instruction uses more than one literal"; 4289 return false; 4290 } 4291 } 4292 } 4293 } 4294 4295 SGPRUsed = findImplicitSGPRRead(MI); 4296 if (SGPRUsed != AMDGPU::NoRegister) { 4297 // Implicit uses may safely overlap true operands 4298 if (llvm::all_of(SGPRsUsed, [this, SGPRUsed](unsigned SGPR) { 4299 return !RI.regsOverlap(SGPRUsed, SGPR); 4300 })) { 4301 ++ConstantBusCount; 4302 SGPRsUsed.push_back(SGPRUsed); 4303 } 4304 } 4305 4306 // v_writelane_b32 is an exception from constant bus restriction: 4307 // vsrc0 can be sgpr, const or m0 and lane select sgpr, m0 or inline-const 4308 if (ConstantBusCount > ST.getConstantBusLimit(Opcode) && 4309 Opcode != AMDGPU::V_WRITELANE_B32) { 4310 ErrInfo = "VOP* instruction violates constant bus restriction"; 4311 return false; 4312 } 4313 4314 if (isVOP3(MI) && UsesLiteral && !ST.hasVOP3Literal()) { 4315 ErrInfo = "VOP3 instruction uses literal"; 4316 return false; 4317 } 4318 } 4319 4320 // Special case for writelane - this can break the multiple constant bus rule, 4321 // but still can't use more than one SGPR register 4322 if (Desc.getOpcode() == AMDGPU::V_WRITELANE_B32) { 4323 unsigned SGPRCount = 0; 4324 Register SGPRUsed = AMDGPU::NoRegister; 4325 4326 for (int OpIdx : {Src0Idx, Src1Idx}) { 4327 if (OpIdx == -1) 4328 break; 4329 4330 const MachineOperand &MO = MI.getOperand(OpIdx); 4331 4332 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 4333 if (MO.isReg() && MO.getReg() != AMDGPU::M0) { 4334 if (MO.getReg() != SGPRUsed) 4335 ++SGPRCount; 4336 SGPRUsed = MO.getReg(); 4337 } 4338 } 4339 if (SGPRCount > ST.getConstantBusLimit(Opcode)) { 4340 ErrInfo = "WRITELANE instruction violates constant bus restriction"; 4341 return false; 4342 } 4343 } 4344 } 4345 4346 // Verify misc. restrictions on specific instructions. 4347 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32_e64 || 4348 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64_e64) { 4349 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 4350 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 4351 const MachineOperand &Src2 = MI.getOperand(Src2Idx); 4352 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) { 4353 if (!compareMachineOp(Src0, Src1) && 4354 !compareMachineOp(Src0, Src2)) { 4355 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2"; 4356 return false; 4357 } 4358 } 4359 if ((getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm() & 4360 SISrcMods::ABS) || 4361 (getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm() & 4362 SISrcMods::ABS) || 4363 (getNamedOperand(MI, AMDGPU::OpName::src2_modifiers)->getImm() & 4364 SISrcMods::ABS)) { 4365 ErrInfo = "ABS not allowed in VOP3B instructions"; 4366 return false; 4367 } 4368 } 4369 4370 if (isSOP2(MI) || isSOPC(MI)) { 4371 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 4372 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 4373 4374 if (!Src0.isReg() && !Src1.isReg() && 4375 !isInlineConstant(Src0, Desc.OpInfo[Src0Idx].OperandType) && 4376 !isInlineConstant(Src1, Desc.OpInfo[Src1Idx].OperandType) && 4377 !Src0.isIdenticalTo(Src1)) { 4378 ErrInfo = "SOP2/SOPC instruction requires too many immediate constants"; 4379 return false; 4380 } 4381 } 4382 4383 if (isSOPK(MI)) { 4384 auto Op = getNamedOperand(MI, AMDGPU::OpName::simm16); 4385 if (Desc.isBranch()) { 4386 if (!Op->isMBB()) { 4387 ErrInfo = "invalid branch target for SOPK instruction"; 4388 return false; 4389 } 4390 } else { 4391 uint64_t Imm = Op->getImm(); 4392 if (sopkIsZext(MI)) { 4393 if (!isUInt<16>(Imm)) { 4394 ErrInfo = "invalid immediate for SOPK instruction"; 4395 return false; 4396 } 4397 } else { 4398 if (!isInt<16>(Imm)) { 4399 ErrInfo = "invalid immediate for SOPK instruction"; 4400 return false; 4401 } 4402 } 4403 } 4404 } 4405 4406 if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 || 4407 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 || 4408 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 4409 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) { 4410 const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 4411 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64; 4412 4413 const unsigned StaticNumOps = Desc.getNumOperands() + 4414 Desc.getNumImplicitUses(); 4415 const unsigned NumImplicitOps = IsDst ? 2 : 1; 4416 4417 // Allow additional implicit operands. This allows a fixup done by the post 4418 // RA scheduler where the main implicit operand is killed and implicit-defs 4419 // are added for sub-registers that remain live after this instruction. 4420 if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) { 4421 ErrInfo = "missing implicit register operands"; 4422 return false; 4423 } 4424 4425 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 4426 if (IsDst) { 4427 if (!Dst->isUse()) { 4428 ErrInfo = "v_movreld_b32 vdst should be a use operand"; 4429 return false; 4430 } 4431 4432 unsigned UseOpIdx; 4433 if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) || 4434 UseOpIdx != StaticNumOps + 1) { 4435 ErrInfo = "movrel implicit operands should be tied"; 4436 return false; 4437 } 4438 } 4439 4440 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 4441 const MachineOperand &ImpUse 4442 = MI.getOperand(StaticNumOps + NumImplicitOps - 1); 4443 if (!ImpUse.isReg() || !ImpUse.isUse() || 4444 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) { 4445 ErrInfo = "src0 should be subreg of implicit vector use"; 4446 return false; 4447 } 4448 } 4449 4450 // Make sure we aren't losing exec uses in the td files. This mostly requires 4451 // being careful when using let Uses to try to add other use registers. 4452 if (shouldReadExec(MI)) { 4453 if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) { 4454 ErrInfo = "VALU instruction does not implicitly read exec mask"; 4455 return false; 4456 } 4457 } 4458 4459 if (isSMRD(MI)) { 4460 if (MI.mayStore()) { 4461 // The register offset form of scalar stores may only use m0 as the 4462 // soffset register. 4463 const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soffset); 4464 if (Soff && Soff->getReg() != AMDGPU::M0) { 4465 ErrInfo = "scalar stores must use m0 as offset register"; 4466 return false; 4467 } 4468 } 4469 } 4470 4471 if (isFLAT(MI) && !ST.hasFlatInstOffsets()) { 4472 const MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 4473 if (Offset->getImm() != 0) { 4474 ErrInfo = "subtarget does not support offsets in flat instructions"; 4475 return false; 4476 } 4477 } 4478 4479 if (isMIMG(MI)) { 4480 const MachineOperand *DimOp = getNamedOperand(MI, AMDGPU::OpName::dim); 4481 if (DimOp) { 4482 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opcode, 4483 AMDGPU::OpName::vaddr0); 4484 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc); 4485 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opcode); 4486 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = 4487 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode); 4488 const AMDGPU::MIMGDimInfo *Dim = 4489 AMDGPU::getMIMGDimInfoByEncoding(DimOp->getImm()); 4490 4491 if (!Dim) { 4492 ErrInfo = "dim is out of range"; 4493 return false; 4494 } 4495 4496 bool IsA16 = false; 4497 if (ST.hasR128A16()) { 4498 const MachineOperand *R128A16 = getNamedOperand(MI, AMDGPU::OpName::r128); 4499 IsA16 = R128A16->getImm() != 0; 4500 } else if (ST.hasGFX10A16()) { 4501 const MachineOperand *A16 = getNamedOperand(MI, AMDGPU::OpName::a16); 4502 IsA16 = A16->getImm() != 0; 4503 } 4504 4505 bool IsNSA = SRsrcIdx - VAddr0Idx > 1; 4506 4507 unsigned AddrWords = 4508 AMDGPU::getAddrSizeMIMGOp(BaseOpcode, Dim, IsA16, ST.hasG16()); 4509 4510 unsigned VAddrWords; 4511 if (IsNSA) { 4512 VAddrWords = SRsrcIdx - VAddr0Idx; 4513 } else { 4514 const TargetRegisterClass *RC = getOpRegClass(MI, VAddr0Idx); 4515 VAddrWords = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC) / 32; 4516 if (AddrWords > 8) 4517 AddrWords = 16; 4518 } 4519 4520 if (VAddrWords != AddrWords) { 4521 LLVM_DEBUG(dbgs() << "bad vaddr size, expected " << AddrWords 4522 << " but got " << VAddrWords << "\n"); 4523 ErrInfo = "bad vaddr size"; 4524 return false; 4525 } 4526 } 4527 } 4528 4529 const MachineOperand *DppCt = getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl); 4530 if (DppCt) { 4531 using namespace AMDGPU::DPP; 4532 4533 unsigned DC = DppCt->getImm(); 4534 if (DC == DppCtrl::DPP_UNUSED1 || DC == DppCtrl::DPP_UNUSED2 || 4535 DC == DppCtrl::DPP_UNUSED3 || DC > DppCtrl::DPP_LAST || 4536 (DC >= DppCtrl::DPP_UNUSED4_FIRST && DC <= DppCtrl::DPP_UNUSED4_LAST) || 4537 (DC >= DppCtrl::DPP_UNUSED5_FIRST && DC <= DppCtrl::DPP_UNUSED5_LAST) || 4538 (DC >= DppCtrl::DPP_UNUSED6_FIRST && DC <= DppCtrl::DPP_UNUSED6_LAST) || 4539 (DC >= DppCtrl::DPP_UNUSED7_FIRST && DC <= DppCtrl::DPP_UNUSED7_LAST) || 4540 (DC >= DppCtrl::DPP_UNUSED8_FIRST && DC <= DppCtrl::DPP_UNUSED8_LAST)) { 4541 ErrInfo = "Invalid dpp_ctrl value"; 4542 return false; 4543 } 4544 if (DC >= DppCtrl::WAVE_SHL1 && DC <= DppCtrl::WAVE_ROR1 && 4545 ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 4546 ErrInfo = "Invalid dpp_ctrl value: " 4547 "wavefront shifts are not supported on GFX10+"; 4548 return false; 4549 } 4550 if (DC >= DppCtrl::BCAST15 && DC <= DppCtrl::BCAST31 && 4551 ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 4552 ErrInfo = "Invalid dpp_ctrl value: " 4553 "broadcasts are not supported on GFX10+"; 4554 return false; 4555 } 4556 if (DC >= DppCtrl::ROW_SHARE_FIRST && DC <= DppCtrl::ROW_XMASK_LAST && 4557 ST.getGeneration() < AMDGPUSubtarget::GFX10) { 4558 if (DC >= DppCtrl::ROW_NEWBCAST_FIRST && 4559 DC <= DppCtrl::ROW_NEWBCAST_LAST && 4560 !ST.hasGFX90AInsts()) { 4561 ErrInfo = "Invalid dpp_ctrl value: " 4562 "row_newbroadcast/row_share is not supported before " 4563 "GFX90A/GFX10"; 4564 return false; 4565 } else if (DC > DppCtrl::ROW_NEWBCAST_LAST || !ST.hasGFX90AInsts()) { 4566 ErrInfo = "Invalid dpp_ctrl value: " 4567 "row_share and row_xmask are not supported before GFX10"; 4568 return false; 4569 } 4570 } 4571 4572 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst); 4573 4574 if (Opcode != AMDGPU::V_MOV_B64_DPP_PSEUDO && 4575 ((DstIdx >= 0 && 4576 (Desc.OpInfo[DstIdx].RegClass == AMDGPU::VReg_64RegClassID || 4577 Desc.OpInfo[DstIdx].RegClass == AMDGPU::VReg_64_Align2RegClassID)) || 4578 ((Src0Idx >= 0 && 4579 (Desc.OpInfo[Src0Idx].RegClass == AMDGPU::VReg_64RegClassID || 4580 Desc.OpInfo[Src0Idx].RegClass == 4581 AMDGPU::VReg_64_Align2RegClassID)))) && 4582 !AMDGPU::isLegal64BitDPPControl(DC)) { 4583 ErrInfo = "Invalid dpp_ctrl value: " 4584 "64 bit dpp only support row_newbcast"; 4585 return false; 4586 } 4587 } 4588 4589 if ((MI.mayStore() || MI.mayLoad()) && !isVGPRSpill(MI)) { 4590 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 4591 uint16_t DataNameIdx = isDS(Opcode) ? AMDGPU::OpName::data0 4592 : AMDGPU::OpName::vdata; 4593 const MachineOperand *Data = getNamedOperand(MI, DataNameIdx); 4594 const MachineOperand *Data2 = getNamedOperand(MI, AMDGPU::OpName::data1); 4595 if (Data && !Data->isReg()) 4596 Data = nullptr; 4597 4598 if (ST.hasGFX90AInsts()) { 4599 if (Dst && Data && 4600 (RI.isAGPR(MRI, Dst->getReg()) != RI.isAGPR(MRI, Data->getReg()))) { 4601 ErrInfo = "Invalid register class: " 4602 "vdata and vdst should be both VGPR or AGPR"; 4603 return false; 4604 } 4605 if (Data && Data2 && 4606 (RI.isAGPR(MRI, Data->getReg()) != RI.isAGPR(MRI, Data2->getReg()))) { 4607 ErrInfo = "Invalid register class: " 4608 "both data operands should be VGPR or AGPR"; 4609 return false; 4610 } 4611 } else { 4612 if ((Dst && RI.isAGPR(MRI, Dst->getReg())) || 4613 (Data && RI.isAGPR(MRI, Data->getReg())) || 4614 (Data2 && RI.isAGPR(MRI, Data2->getReg()))) { 4615 ErrInfo = "Invalid register class: " 4616 "agpr loads and stores not supported on this GPU"; 4617 return false; 4618 } 4619 } 4620 } 4621 4622 if (ST.needsAlignedVGPRs()) { 4623 const auto isAlignedReg = [&MI, &MRI, this](unsigned OpName) -> bool { 4624 const MachineOperand *Op = getNamedOperand(MI, OpName); 4625 if (!Op) 4626 return true; 4627 Register Reg = Op->getReg(); 4628 if (Reg.isPhysical()) 4629 return !(RI.getHWRegIndex(Reg) & 1); 4630 const TargetRegisterClass &RC = *MRI.getRegClass(Reg); 4631 return RI.getRegSizeInBits(RC) > 32 && RI.isProperlyAlignedRC(RC) && 4632 !(RI.getChannelFromSubReg(Op->getSubReg()) & 1); 4633 }; 4634 4635 if (MI.getOpcode() == AMDGPU::DS_GWS_INIT || 4636 MI.getOpcode() == AMDGPU::DS_GWS_SEMA_BR || 4637 MI.getOpcode() == AMDGPU::DS_GWS_BARRIER) { 4638 4639 if (!isAlignedReg(AMDGPU::OpName::data0)) { 4640 ErrInfo = "Subtarget requires even aligned vector registers " 4641 "for DS_GWS instructions"; 4642 return false; 4643 } 4644 } 4645 4646 if (isMIMG(MI)) { 4647 if (!isAlignedReg(AMDGPU::OpName::vaddr)) { 4648 ErrInfo = "Subtarget requires even aligned vector registers " 4649 "for vaddr operand of image instructions"; 4650 return false; 4651 } 4652 } 4653 } 4654 4655 if (MI.getOpcode() == AMDGPU::V_ACCVGPR_WRITE_B32_e64 && 4656 !ST.hasGFX90AInsts()) { 4657 const MachineOperand *Src = getNamedOperand(MI, AMDGPU::OpName::src0); 4658 if (Src->isReg() && RI.isSGPRReg(MRI, Src->getReg())) { 4659 ErrInfo = "Invalid register class: " 4660 "v_accvgpr_write with an SGPR is not supported on this GPU"; 4661 return false; 4662 } 4663 } 4664 4665 if (Desc.getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS) { 4666 const MachineOperand &SrcOp = MI.getOperand(1); 4667 if (!SrcOp.isReg() || SrcOp.getReg().isVirtual()) { 4668 ErrInfo = "pseudo expects only physical SGPRs"; 4669 return false; 4670 } 4671 } 4672 4673 return true; 4674 } 4675 4676 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) const { 4677 switch (MI.getOpcode()) { 4678 default: return AMDGPU::INSTRUCTION_LIST_END; 4679 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE; 4680 case AMDGPU::COPY: return AMDGPU::COPY; 4681 case AMDGPU::PHI: return AMDGPU::PHI; 4682 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG; 4683 case AMDGPU::WQM: return AMDGPU::WQM; 4684 case AMDGPU::SOFT_WQM: return AMDGPU::SOFT_WQM; 4685 case AMDGPU::STRICT_WWM: return AMDGPU::STRICT_WWM; 4686 case AMDGPU::STRICT_WQM: return AMDGPU::STRICT_WQM; 4687 case AMDGPU::S_MOV_B32: { 4688 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 4689 return MI.getOperand(1).isReg() || 4690 RI.isAGPR(MRI, MI.getOperand(0).getReg()) ? 4691 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32; 4692 } 4693 case AMDGPU::S_ADD_I32: 4694 return ST.hasAddNoCarry() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_CO_U32_e32; 4695 case AMDGPU::S_ADDC_U32: 4696 return AMDGPU::V_ADDC_U32_e32; 4697 case AMDGPU::S_SUB_I32: 4698 return ST.hasAddNoCarry() ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_CO_U32_e32; 4699 // FIXME: These are not consistently handled, and selected when the carry is 4700 // used. 4701 case AMDGPU::S_ADD_U32: 4702 return AMDGPU::V_ADD_CO_U32_e32; 4703 case AMDGPU::S_SUB_U32: 4704 return AMDGPU::V_SUB_CO_U32_e32; 4705 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32; 4706 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_U32_e64; 4707 case AMDGPU::S_MUL_HI_U32: return AMDGPU::V_MUL_HI_U32_e64; 4708 case AMDGPU::S_MUL_HI_I32: return AMDGPU::V_MUL_HI_I32_e64; 4709 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64; 4710 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64; 4711 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64; 4712 case AMDGPU::S_XNOR_B32: 4713 return ST.hasDLInsts() ? AMDGPU::V_XNOR_B32_e64 : AMDGPU::INSTRUCTION_LIST_END; 4714 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64; 4715 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64; 4716 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64; 4717 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64; 4718 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32; 4719 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64_e64; 4720 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32; 4721 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64_e64; 4722 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32; 4723 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64_e64; 4724 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32_e64; 4725 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32_e64; 4726 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32_e64; 4727 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32_e64; 4728 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64; 4729 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32; 4730 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32; 4731 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32; 4732 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e64; 4733 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e64; 4734 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e64; 4735 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e64; 4736 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e64; 4737 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e64; 4738 case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e64; 4739 case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e64; 4740 case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e64; 4741 case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e64; 4742 case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e64; 4743 case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e64; 4744 case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e64; 4745 case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e64; 4746 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64; 4747 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32; 4748 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32; 4749 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64; 4750 case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ; 4751 case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ; 4752 } 4753 llvm_unreachable( 4754 "Unexpected scalar opcode without corresponding vector one!"); 4755 } 4756 4757 static const TargetRegisterClass * 4758 adjustAllocatableRegClass(const GCNSubtarget &ST, const SIRegisterInfo &RI, 4759 const MachineRegisterInfo &MRI, 4760 const MCInstrDesc &TID, unsigned RCID, 4761 bool IsAllocatable) { 4762 if ((IsAllocatable || !ST.hasGFX90AInsts() || !MRI.reservedRegsFrozen()) && 4763 (((TID.mayLoad() || TID.mayStore()) && 4764 !(TID.TSFlags & SIInstrFlags::VGPRSpill)) || 4765 (TID.TSFlags & (SIInstrFlags::DS | SIInstrFlags::MIMG)))) { 4766 switch (RCID) { 4767 case AMDGPU::AV_32RegClassID: 4768 RCID = AMDGPU::VGPR_32RegClassID; 4769 break; 4770 case AMDGPU::AV_64RegClassID: 4771 RCID = AMDGPU::VReg_64RegClassID; 4772 break; 4773 case AMDGPU::AV_96RegClassID: 4774 RCID = AMDGPU::VReg_96RegClassID; 4775 break; 4776 case AMDGPU::AV_128RegClassID: 4777 RCID = AMDGPU::VReg_128RegClassID; 4778 break; 4779 case AMDGPU::AV_160RegClassID: 4780 RCID = AMDGPU::VReg_160RegClassID; 4781 break; 4782 case AMDGPU::AV_512RegClassID: 4783 RCID = AMDGPU::VReg_512RegClassID; 4784 break; 4785 default: 4786 break; 4787 } 4788 } 4789 4790 return RI.getProperlyAlignedRC(RI.getRegClass(RCID)); 4791 } 4792 4793 const TargetRegisterClass *SIInstrInfo::getRegClass(const MCInstrDesc &TID, 4794 unsigned OpNum, const TargetRegisterInfo *TRI, 4795 const MachineFunction &MF) 4796 const { 4797 if (OpNum >= TID.getNumOperands()) 4798 return nullptr; 4799 auto RegClass = TID.OpInfo[OpNum].RegClass; 4800 bool IsAllocatable = false; 4801 if (TID.TSFlags & (SIInstrFlags::DS | SIInstrFlags::FLAT)) { 4802 // vdst and vdata should be both VGPR or AGPR, same for the DS instructions 4803 // with two data operands. Request register class constrained to VGPR only 4804 // of both operands present as Machine Copy Propagation can not check this 4805 // constraint and possibly other passes too. 4806 // 4807 // The check is limited to FLAT and DS because atomics in non-flat encoding 4808 // have their vdst and vdata tied to be the same register. 4809 const int VDstIdx = AMDGPU::getNamedOperandIdx(TID.Opcode, 4810 AMDGPU::OpName::vdst); 4811 const int DataIdx = AMDGPU::getNamedOperandIdx(TID.Opcode, 4812 (TID.TSFlags & SIInstrFlags::DS) ? AMDGPU::OpName::data0 4813 : AMDGPU::OpName::vdata); 4814 if (DataIdx != -1) { 4815 IsAllocatable = VDstIdx != -1 || 4816 AMDGPU::getNamedOperandIdx(TID.Opcode, 4817 AMDGPU::OpName::data1) != -1; 4818 } 4819 } 4820 return adjustAllocatableRegClass(ST, RI, MF.getRegInfo(), TID, RegClass, 4821 IsAllocatable); 4822 } 4823 4824 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI, 4825 unsigned OpNo) const { 4826 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 4827 const MCInstrDesc &Desc = get(MI.getOpcode()); 4828 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() || 4829 Desc.OpInfo[OpNo].RegClass == -1) { 4830 Register Reg = MI.getOperand(OpNo).getReg(); 4831 4832 if (Reg.isVirtual()) 4833 return MRI.getRegClass(Reg); 4834 return RI.getPhysRegClass(Reg); 4835 } 4836 4837 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 4838 return adjustAllocatableRegClass(ST, RI, MRI, Desc, RCID, true); 4839 } 4840 4841 void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const { 4842 MachineBasicBlock::iterator I = MI; 4843 MachineBasicBlock *MBB = MI.getParent(); 4844 MachineOperand &MO = MI.getOperand(OpIdx); 4845 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 4846 unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass; 4847 const TargetRegisterClass *RC = RI.getRegClass(RCID); 4848 unsigned Size = RI.getRegSizeInBits(*RC); 4849 unsigned Opcode = (Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO : AMDGPU::V_MOV_B32_e32; 4850 if (MO.isReg()) 4851 Opcode = AMDGPU::COPY; 4852 else if (RI.isSGPRClass(RC)) 4853 Opcode = (Size == 64) ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; 4854 4855 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); 4856 const TargetRegisterClass *VRC64 = RI.getVGPR64Class(); 4857 if (RI.getCommonSubClass(VRC64, VRC)) 4858 VRC = VRC64; 4859 else 4860 VRC = &AMDGPU::VGPR_32RegClass; 4861 4862 Register Reg = MRI.createVirtualRegister(VRC); 4863 DebugLoc DL = MBB->findDebugLoc(I); 4864 BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO); 4865 MO.ChangeToRegister(Reg, false); 4866 } 4867 4868 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI, 4869 MachineRegisterInfo &MRI, 4870 MachineOperand &SuperReg, 4871 const TargetRegisterClass *SuperRC, 4872 unsigned SubIdx, 4873 const TargetRegisterClass *SubRC) 4874 const { 4875 MachineBasicBlock *MBB = MI->getParent(); 4876 DebugLoc DL = MI->getDebugLoc(); 4877 Register SubReg = MRI.createVirtualRegister(SubRC); 4878 4879 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) { 4880 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 4881 .addReg(SuperReg.getReg(), 0, SubIdx); 4882 return SubReg; 4883 } 4884 4885 // Just in case the super register is itself a sub-register, copy it to a new 4886 // value so we don't need to worry about merging its subreg index with the 4887 // SubIdx passed to this function. The register coalescer should be able to 4888 // eliminate this extra copy. 4889 Register NewSuperReg = MRI.createVirtualRegister(SuperRC); 4890 4891 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg) 4892 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg()); 4893 4894 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 4895 .addReg(NewSuperReg, 0, SubIdx); 4896 4897 return SubReg; 4898 } 4899 4900 MachineOperand SIInstrInfo::buildExtractSubRegOrImm( 4901 MachineBasicBlock::iterator MII, 4902 MachineRegisterInfo &MRI, 4903 MachineOperand &Op, 4904 const TargetRegisterClass *SuperRC, 4905 unsigned SubIdx, 4906 const TargetRegisterClass *SubRC) const { 4907 if (Op.isImm()) { 4908 if (SubIdx == AMDGPU::sub0) 4909 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm())); 4910 if (SubIdx == AMDGPU::sub1) 4911 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32)); 4912 4913 llvm_unreachable("Unhandled register index for immediate"); 4914 } 4915 4916 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC, 4917 SubIdx, SubRC); 4918 return MachineOperand::CreateReg(SubReg, false); 4919 } 4920 4921 // Change the order of operands from (0, 1, 2) to (0, 2, 1) 4922 void SIInstrInfo::swapOperands(MachineInstr &Inst) const { 4923 assert(Inst.getNumExplicitOperands() == 3); 4924 MachineOperand Op1 = Inst.getOperand(1); 4925 Inst.removeOperand(1); 4926 Inst.addOperand(Op1); 4927 } 4928 4929 bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI, 4930 const MCOperandInfo &OpInfo, 4931 const MachineOperand &MO) const { 4932 if (!MO.isReg()) 4933 return false; 4934 4935 Register Reg = MO.getReg(); 4936 4937 const TargetRegisterClass *DRC = RI.getRegClass(OpInfo.RegClass); 4938 if (Reg.isPhysical()) 4939 return DRC->contains(Reg); 4940 4941 const TargetRegisterClass *RC = MRI.getRegClass(Reg); 4942 4943 if (MO.getSubReg()) { 4944 const MachineFunction *MF = MO.getParent()->getParent()->getParent(); 4945 const TargetRegisterClass *SuperRC = RI.getLargestLegalSuperClass(RC, *MF); 4946 if (!SuperRC) 4947 return false; 4948 4949 DRC = RI.getMatchingSuperRegClass(SuperRC, DRC, MO.getSubReg()); 4950 if (!DRC) 4951 return false; 4952 } 4953 return RC->hasSuperClassEq(DRC); 4954 } 4955 4956 bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI, 4957 const MCOperandInfo &OpInfo, 4958 const MachineOperand &MO) const { 4959 if (MO.isReg()) 4960 return isLegalRegOperand(MRI, OpInfo, MO); 4961 4962 // Handle non-register types that are treated like immediates. 4963 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()); 4964 return true; 4965 } 4966 4967 bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx, 4968 const MachineOperand *MO) const { 4969 const MachineFunction &MF = *MI.getParent()->getParent(); 4970 const MachineRegisterInfo &MRI = MF.getRegInfo(); 4971 const MCInstrDesc &InstDesc = MI.getDesc(); 4972 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; 4973 const TargetRegisterClass *DefinedRC = 4974 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; 4975 if (!MO) 4976 MO = &MI.getOperand(OpIdx); 4977 4978 int ConstantBusLimit = ST.getConstantBusLimit(MI.getOpcode()); 4979 int LiteralLimit = !isVOP3(MI) || ST.hasVOP3Literal() ? 1 : 0; 4980 if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) { 4981 if (isLiteralConstantLike(*MO, OpInfo) && !LiteralLimit--) 4982 return false; 4983 4984 SmallDenseSet<RegSubRegPair> SGPRsUsed; 4985 if (MO->isReg()) 4986 SGPRsUsed.insert(RegSubRegPair(MO->getReg(), MO->getSubReg())); 4987 4988 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 4989 if (i == OpIdx) 4990 continue; 4991 const MachineOperand &Op = MI.getOperand(i); 4992 if (Op.isReg()) { 4993 RegSubRegPair SGPR(Op.getReg(), Op.getSubReg()); 4994 if (!SGPRsUsed.count(SGPR) && 4995 usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) { 4996 if (--ConstantBusLimit <= 0) 4997 return false; 4998 SGPRsUsed.insert(SGPR); 4999 } 5000 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32 || 5001 (AMDGPU::isSISrcOperand(InstDesc, i) && 5002 isLiteralConstantLike(Op, InstDesc.OpInfo[i]))) { 5003 if (!LiteralLimit--) 5004 return false; 5005 if (--ConstantBusLimit <= 0) 5006 return false; 5007 } 5008 } 5009 } 5010 5011 if (MO->isReg()) { 5012 if (!DefinedRC) { 5013 // This operand allows any register. 5014 return true; 5015 } 5016 if (!isLegalRegOperand(MRI, OpInfo, *MO)) 5017 return false; 5018 bool IsAGPR = RI.isAGPR(MRI, MO->getReg()); 5019 if (IsAGPR && !ST.hasMAIInsts()) 5020 return false; 5021 unsigned Opc = MI.getOpcode(); 5022 if (IsAGPR && 5023 (!ST.hasGFX90AInsts() || !MRI.reservedRegsFrozen()) && 5024 (MI.mayLoad() || MI.mayStore() || isDS(Opc) || isMIMG(Opc))) 5025 return false; 5026 // Atomics should have both vdst and vdata either vgpr or agpr. 5027 const int VDstIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 5028 const int DataIdx = AMDGPU::getNamedOperandIdx(Opc, 5029 isDS(Opc) ? AMDGPU::OpName::data0 : AMDGPU::OpName::vdata); 5030 if ((int)OpIdx == VDstIdx && DataIdx != -1 && 5031 MI.getOperand(DataIdx).isReg() && 5032 RI.isAGPR(MRI, MI.getOperand(DataIdx).getReg()) != IsAGPR) 5033 return false; 5034 if ((int)OpIdx == DataIdx) { 5035 if (VDstIdx != -1 && 5036 RI.isAGPR(MRI, MI.getOperand(VDstIdx).getReg()) != IsAGPR) 5037 return false; 5038 // DS instructions with 2 src operands also must have tied RC. 5039 const int Data1Idx = AMDGPU::getNamedOperandIdx(Opc, 5040 AMDGPU::OpName::data1); 5041 if (Data1Idx != -1 && MI.getOperand(Data1Idx).isReg() && 5042 RI.isAGPR(MRI, MI.getOperand(Data1Idx).getReg()) != IsAGPR) 5043 return false; 5044 } 5045 if (Opc == AMDGPU::V_ACCVGPR_WRITE_B32_e64 && !ST.hasGFX90AInsts() && 5046 (int)OpIdx == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) && 5047 RI.isSGPRReg(MRI, MO->getReg())) 5048 return false; 5049 return true; 5050 } 5051 5052 // Handle non-register types that are treated like immediates. 5053 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI() || MO->isGlobal()); 5054 5055 if (!DefinedRC) { 5056 // This operand expects an immediate. 5057 return true; 5058 } 5059 5060 return isImmOperandLegal(MI, OpIdx, *MO); 5061 } 5062 5063 void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI, 5064 MachineInstr &MI) const { 5065 unsigned Opc = MI.getOpcode(); 5066 const MCInstrDesc &InstrDesc = get(Opc); 5067 5068 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 5069 MachineOperand &Src0 = MI.getOperand(Src0Idx); 5070 5071 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 5072 MachineOperand &Src1 = MI.getOperand(Src1Idx); 5073 5074 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32 5075 // we need to only have one constant bus use before GFX10. 5076 bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister; 5077 if (HasImplicitSGPR && ST.getConstantBusLimit(Opc) <= 1 && 5078 Src0.isReg() && (RI.isSGPRReg(MRI, Src0.getReg()) || 5079 isLiteralConstantLike(Src0, InstrDesc.OpInfo[Src0Idx]))) 5080 legalizeOpWithMove(MI, Src0Idx); 5081 5082 // Special case: V_WRITELANE_B32 accepts only immediate or SGPR operands for 5083 // both the value to write (src0) and lane select (src1). Fix up non-SGPR 5084 // src0/src1 with V_READFIRSTLANE. 5085 if (Opc == AMDGPU::V_WRITELANE_B32) { 5086 const DebugLoc &DL = MI.getDebugLoc(); 5087 if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) { 5088 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5089 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 5090 .add(Src0); 5091 Src0.ChangeToRegister(Reg, false); 5092 } 5093 if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) { 5094 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5095 const DebugLoc &DL = MI.getDebugLoc(); 5096 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 5097 .add(Src1); 5098 Src1.ChangeToRegister(Reg, false); 5099 } 5100 return; 5101 } 5102 5103 // No VOP2 instructions support AGPRs. 5104 if (Src0.isReg() && RI.isAGPR(MRI, Src0.getReg())) 5105 legalizeOpWithMove(MI, Src0Idx); 5106 5107 if (Src1.isReg() && RI.isAGPR(MRI, Src1.getReg())) 5108 legalizeOpWithMove(MI, Src1Idx); 5109 5110 // VOP2 src0 instructions support all operand types, so we don't need to check 5111 // their legality. If src1 is already legal, we don't need to do anything. 5112 if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1)) 5113 return; 5114 5115 // Special case: V_READLANE_B32 accepts only immediate or SGPR operands for 5116 // lane select. Fix up using V_READFIRSTLANE, since we assume that the lane 5117 // select is uniform. 5118 if (Opc == AMDGPU::V_READLANE_B32 && Src1.isReg() && 5119 RI.isVGPR(MRI, Src1.getReg())) { 5120 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5121 const DebugLoc &DL = MI.getDebugLoc(); 5122 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 5123 .add(Src1); 5124 Src1.ChangeToRegister(Reg, false); 5125 return; 5126 } 5127 5128 // We do not use commuteInstruction here because it is too aggressive and will 5129 // commute if it is possible. We only want to commute here if it improves 5130 // legality. This can be called a fairly large number of times so don't waste 5131 // compile time pointlessly swapping and checking legality again. 5132 if (HasImplicitSGPR || !MI.isCommutable()) { 5133 legalizeOpWithMove(MI, Src1Idx); 5134 return; 5135 } 5136 5137 // If src0 can be used as src1, commuting will make the operands legal. 5138 // Otherwise we have to give up and insert a move. 5139 // 5140 // TODO: Other immediate-like operand kinds could be commuted if there was a 5141 // MachineOperand::ChangeTo* for them. 5142 if ((!Src1.isImm() && !Src1.isReg()) || 5143 !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) { 5144 legalizeOpWithMove(MI, Src1Idx); 5145 return; 5146 } 5147 5148 int CommutedOpc = commuteOpcode(MI); 5149 if (CommutedOpc == -1) { 5150 legalizeOpWithMove(MI, Src1Idx); 5151 return; 5152 } 5153 5154 MI.setDesc(get(CommutedOpc)); 5155 5156 Register Src0Reg = Src0.getReg(); 5157 unsigned Src0SubReg = Src0.getSubReg(); 5158 bool Src0Kill = Src0.isKill(); 5159 5160 if (Src1.isImm()) 5161 Src0.ChangeToImmediate(Src1.getImm()); 5162 else if (Src1.isReg()) { 5163 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill()); 5164 Src0.setSubReg(Src1.getSubReg()); 5165 } else 5166 llvm_unreachable("Should only have register or immediate operands"); 5167 5168 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill); 5169 Src1.setSubReg(Src0SubReg); 5170 fixImplicitOperands(MI); 5171 } 5172 5173 // Legalize VOP3 operands. All operand types are supported for any operand 5174 // but only one literal constant and only starting from GFX10. 5175 void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI, 5176 MachineInstr &MI) const { 5177 unsigned Opc = MI.getOpcode(); 5178 5179 int VOP3Idx[3] = { 5180 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), 5181 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1), 5182 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2) 5183 }; 5184 5185 if (Opc == AMDGPU::V_PERMLANE16_B32_e64 || 5186 Opc == AMDGPU::V_PERMLANEX16_B32_e64) { 5187 // src1 and src2 must be scalar 5188 MachineOperand &Src1 = MI.getOperand(VOP3Idx[1]); 5189 MachineOperand &Src2 = MI.getOperand(VOP3Idx[2]); 5190 const DebugLoc &DL = MI.getDebugLoc(); 5191 if (Src1.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) { 5192 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5193 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 5194 .add(Src1); 5195 Src1.ChangeToRegister(Reg, false); 5196 } 5197 if (Src2.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src2.getReg()))) { 5198 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 5199 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 5200 .add(Src2); 5201 Src2.ChangeToRegister(Reg, false); 5202 } 5203 } 5204 5205 // Find the one SGPR operand we are allowed to use. 5206 int ConstantBusLimit = ST.getConstantBusLimit(Opc); 5207 int LiteralLimit = ST.hasVOP3Literal() ? 1 : 0; 5208 SmallDenseSet<unsigned> SGPRsUsed; 5209 Register SGPRReg = findUsedSGPR(MI, VOP3Idx); 5210 if (SGPRReg != AMDGPU::NoRegister) { 5211 SGPRsUsed.insert(SGPRReg); 5212 --ConstantBusLimit; 5213 } 5214 5215 for (int Idx : VOP3Idx) { 5216 if (Idx == -1) 5217 break; 5218 MachineOperand &MO = MI.getOperand(Idx); 5219 5220 if (!MO.isReg()) { 5221 if (!isLiteralConstantLike(MO, get(Opc).OpInfo[Idx])) 5222 continue; 5223 5224 if (LiteralLimit > 0 && ConstantBusLimit > 0) { 5225 --LiteralLimit; 5226 --ConstantBusLimit; 5227 continue; 5228 } 5229 5230 --LiteralLimit; 5231 --ConstantBusLimit; 5232 legalizeOpWithMove(MI, Idx); 5233 continue; 5234 } 5235 5236 if (RI.hasAGPRs(RI.getRegClassForReg(MRI, MO.getReg())) && 5237 !isOperandLegal(MI, Idx, &MO)) { 5238 legalizeOpWithMove(MI, Idx); 5239 continue; 5240 } 5241 5242 if (!RI.isSGPRClass(RI.getRegClassForReg(MRI, MO.getReg()))) 5243 continue; // VGPRs are legal 5244 5245 // We can use one SGPR in each VOP3 instruction prior to GFX10 5246 // and two starting from GFX10. 5247 if (SGPRsUsed.count(MO.getReg())) 5248 continue; 5249 if (ConstantBusLimit > 0) { 5250 SGPRsUsed.insert(MO.getReg()); 5251 --ConstantBusLimit; 5252 continue; 5253 } 5254 5255 // If we make it this far, then the operand is not legal and we must 5256 // legalize it. 5257 legalizeOpWithMove(MI, Idx); 5258 } 5259 } 5260 5261 Register SIInstrInfo::readlaneVGPRToSGPR(Register SrcReg, MachineInstr &UseMI, 5262 MachineRegisterInfo &MRI) const { 5263 const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg); 5264 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC); 5265 Register DstReg = MRI.createVirtualRegister(SRC); 5266 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32; 5267 5268 if (RI.hasAGPRs(VRC)) { 5269 VRC = RI.getEquivalentVGPRClass(VRC); 5270 Register NewSrcReg = MRI.createVirtualRegister(VRC); 5271 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 5272 get(TargetOpcode::COPY), NewSrcReg) 5273 .addReg(SrcReg); 5274 SrcReg = NewSrcReg; 5275 } 5276 5277 if (SubRegs == 1) { 5278 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 5279 get(AMDGPU::V_READFIRSTLANE_B32), DstReg) 5280 .addReg(SrcReg); 5281 return DstReg; 5282 } 5283 5284 SmallVector<unsigned, 8> SRegs; 5285 for (unsigned i = 0; i < SubRegs; ++i) { 5286 Register SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 5287 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 5288 get(AMDGPU::V_READFIRSTLANE_B32), SGPR) 5289 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i)); 5290 SRegs.push_back(SGPR); 5291 } 5292 5293 MachineInstrBuilder MIB = 5294 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 5295 get(AMDGPU::REG_SEQUENCE), DstReg); 5296 for (unsigned i = 0; i < SubRegs; ++i) { 5297 MIB.addReg(SRegs[i]); 5298 MIB.addImm(RI.getSubRegFromChannel(i)); 5299 } 5300 return DstReg; 5301 } 5302 5303 void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI, 5304 MachineInstr &MI) const { 5305 5306 // If the pointer is store in VGPRs, then we need to move them to 5307 // SGPRs using v_readfirstlane. This is safe because we only select 5308 // loads with uniform pointers to SMRD instruction so we know the 5309 // pointer value is uniform. 5310 MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase); 5311 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) { 5312 Register SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI); 5313 SBase->setReg(SGPR); 5314 } 5315 MachineOperand *SOff = getNamedOperand(MI, AMDGPU::OpName::soffset); 5316 if (SOff && !RI.isSGPRClass(MRI.getRegClass(SOff->getReg()))) { 5317 Register SGPR = readlaneVGPRToSGPR(SOff->getReg(), MI, MRI); 5318 SOff->setReg(SGPR); 5319 } 5320 } 5321 5322 bool SIInstrInfo::moveFlatAddrToVGPR(MachineInstr &Inst) const { 5323 unsigned Opc = Inst.getOpcode(); 5324 int OldSAddrIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::saddr); 5325 if (OldSAddrIdx < 0) 5326 return false; 5327 5328 assert(isSegmentSpecificFLAT(Inst)); 5329 5330 int NewOpc = AMDGPU::getGlobalVaddrOp(Opc); 5331 if (NewOpc < 0) 5332 NewOpc = AMDGPU::getFlatScratchInstSVfromSS(Opc); 5333 if (NewOpc < 0) 5334 return false; 5335 5336 MachineRegisterInfo &MRI = Inst.getMF()->getRegInfo(); 5337 MachineOperand &SAddr = Inst.getOperand(OldSAddrIdx); 5338 if (RI.isSGPRReg(MRI, SAddr.getReg())) 5339 return false; 5340 5341 int NewVAddrIdx = AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vaddr); 5342 if (NewVAddrIdx < 0) 5343 return false; 5344 5345 int OldVAddrIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr); 5346 5347 // Check vaddr, it shall be zero or absent. 5348 MachineInstr *VAddrDef = nullptr; 5349 if (OldVAddrIdx >= 0) { 5350 MachineOperand &VAddr = Inst.getOperand(OldVAddrIdx); 5351 VAddrDef = MRI.getUniqueVRegDef(VAddr.getReg()); 5352 if (!VAddrDef || VAddrDef->getOpcode() != AMDGPU::V_MOV_B32_e32 || 5353 !VAddrDef->getOperand(1).isImm() || 5354 VAddrDef->getOperand(1).getImm() != 0) 5355 return false; 5356 } 5357 5358 const MCInstrDesc &NewDesc = get(NewOpc); 5359 Inst.setDesc(NewDesc); 5360 5361 // Callers expect iterator to be valid after this call, so modify the 5362 // instruction in place. 5363 if (OldVAddrIdx == NewVAddrIdx) { 5364 MachineOperand &NewVAddr = Inst.getOperand(NewVAddrIdx); 5365 // Clear use list from the old vaddr holding a zero register. 5366 MRI.removeRegOperandFromUseList(&NewVAddr); 5367 MRI.moveOperands(&NewVAddr, &SAddr, 1); 5368 Inst.removeOperand(OldSAddrIdx); 5369 // Update the use list with the pointer we have just moved from vaddr to 5370 // saddr position. Otherwise new vaddr will be missing from the use list. 5371 MRI.removeRegOperandFromUseList(&NewVAddr); 5372 MRI.addRegOperandToUseList(&NewVAddr); 5373 } else { 5374 assert(OldSAddrIdx == NewVAddrIdx); 5375 5376 if (OldVAddrIdx >= 0) { 5377 int NewVDstIn = AMDGPU::getNamedOperandIdx(NewOpc, 5378 AMDGPU::OpName::vdst_in); 5379 5380 // removeOperand doesn't try to fixup tied operand indexes at it goes, so 5381 // it asserts. Untie the operands for now and retie them afterwards. 5382 if (NewVDstIn != -1) { 5383 int OldVDstIn = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst_in); 5384 Inst.untieRegOperand(OldVDstIn); 5385 } 5386 5387 Inst.removeOperand(OldVAddrIdx); 5388 5389 if (NewVDstIn != -1) { 5390 int NewVDst = AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vdst); 5391 Inst.tieOperands(NewVDst, NewVDstIn); 5392 } 5393 } 5394 } 5395 5396 if (VAddrDef && MRI.use_nodbg_empty(VAddrDef->getOperand(0).getReg())) 5397 VAddrDef->eraseFromParent(); 5398 5399 return true; 5400 } 5401 5402 // FIXME: Remove this when SelectionDAG is obsoleted. 5403 void SIInstrInfo::legalizeOperandsFLAT(MachineRegisterInfo &MRI, 5404 MachineInstr &MI) const { 5405 if (!isSegmentSpecificFLAT(MI)) 5406 return; 5407 5408 // Fixup SGPR operands in VGPRs. We only select these when the DAG divergence 5409 // thinks they are uniform, so a readfirstlane should be valid. 5410 MachineOperand *SAddr = getNamedOperand(MI, AMDGPU::OpName::saddr); 5411 if (!SAddr || RI.isSGPRClass(MRI.getRegClass(SAddr->getReg()))) 5412 return; 5413 5414 if (moveFlatAddrToVGPR(MI)) 5415 return; 5416 5417 Register ToSGPR = readlaneVGPRToSGPR(SAddr->getReg(), MI, MRI); 5418 SAddr->setReg(ToSGPR); 5419 } 5420 5421 void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB, 5422 MachineBasicBlock::iterator I, 5423 const TargetRegisterClass *DstRC, 5424 MachineOperand &Op, 5425 MachineRegisterInfo &MRI, 5426 const DebugLoc &DL) const { 5427 Register OpReg = Op.getReg(); 5428 unsigned OpSubReg = Op.getSubReg(); 5429 5430 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg( 5431 RI.getRegClassForReg(MRI, OpReg), OpSubReg); 5432 5433 // Check if operand is already the correct register class. 5434 if (DstRC == OpRC) 5435 return; 5436 5437 Register DstReg = MRI.createVirtualRegister(DstRC); 5438 auto Copy = BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op); 5439 5440 Op.setReg(DstReg); 5441 Op.setSubReg(0); 5442 5443 MachineInstr *Def = MRI.getVRegDef(OpReg); 5444 if (!Def) 5445 return; 5446 5447 // Try to eliminate the copy if it is copying an immediate value. 5448 if (Def->isMoveImmediate() && DstRC != &AMDGPU::VReg_1RegClass) 5449 FoldImmediate(*Copy, *Def, OpReg, &MRI); 5450 5451 bool ImpDef = Def->isImplicitDef(); 5452 while (!ImpDef && Def && Def->isCopy()) { 5453 if (Def->getOperand(1).getReg().isPhysical()) 5454 break; 5455 Def = MRI.getUniqueVRegDef(Def->getOperand(1).getReg()); 5456 ImpDef = Def && Def->isImplicitDef(); 5457 } 5458 if (!RI.isSGPRClass(DstRC) && !Copy->readsRegister(AMDGPU::EXEC, &RI) && 5459 !ImpDef) 5460 Copy.addReg(AMDGPU::EXEC, RegState::Implicit); 5461 } 5462 5463 // Emit the actual waterfall loop, executing the wrapped instruction for each 5464 // unique value of \p Rsrc across all lanes. In the best case we execute 1 5465 // iteration, in the worst case we execute 64 (once per lane). 5466 static void 5467 emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI, 5468 MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB, 5469 MachineBasicBlock &BodyBB, const DebugLoc &DL, 5470 MachineOperand &Rsrc) { 5471 MachineFunction &MF = *OrigBB.getParent(); 5472 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 5473 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 5474 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 5475 unsigned SaveExecOpc = 5476 ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 : AMDGPU::S_AND_SAVEEXEC_B64; 5477 unsigned XorTermOpc = 5478 ST.isWave32() ? AMDGPU::S_XOR_B32_term : AMDGPU::S_XOR_B64_term; 5479 unsigned AndOpc = 5480 ST.isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64; 5481 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5482 5483 MachineBasicBlock::iterator I = LoopBB.begin(); 5484 5485 SmallVector<Register, 8> ReadlanePieces; 5486 Register CondReg = AMDGPU::NoRegister; 5487 5488 Register VRsrc = Rsrc.getReg(); 5489 unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef()); 5490 5491 unsigned RegSize = TRI->getRegSizeInBits(Rsrc.getReg(), MRI); 5492 unsigned NumSubRegs = RegSize / 32; 5493 assert(NumSubRegs % 2 == 0 && NumSubRegs <= 32 && "Unhandled register size"); 5494 5495 for (unsigned Idx = 0; Idx < NumSubRegs; Idx += 2) { 5496 5497 Register CurRegLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 5498 Register CurRegHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 5499 5500 // Read the next variant <- also loop target. 5501 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegLo) 5502 .addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx)); 5503 5504 // Read the next variant <- also loop target. 5505 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegHi) 5506 .addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx + 1)); 5507 5508 ReadlanePieces.push_back(CurRegLo); 5509 ReadlanePieces.push_back(CurRegHi); 5510 5511 // Comparison is to be done as 64-bit. 5512 Register CurReg = MRI.createVirtualRegister(&AMDGPU::SGPR_64RegClass); 5513 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), CurReg) 5514 .addReg(CurRegLo) 5515 .addImm(AMDGPU::sub0) 5516 .addReg(CurRegHi) 5517 .addImm(AMDGPU::sub1); 5518 5519 Register NewCondReg = MRI.createVirtualRegister(BoolXExecRC); 5520 auto Cmp = 5521 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), NewCondReg) 5522 .addReg(CurReg); 5523 if (NumSubRegs <= 2) 5524 Cmp.addReg(VRsrc); 5525 else 5526 Cmp.addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx, 2)); 5527 5528 // Combine the comparison results with AND. 5529 if (CondReg == AMDGPU::NoRegister) // First. 5530 CondReg = NewCondReg; 5531 else { // If not the first, we create an AND. 5532 Register AndReg = MRI.createVirtualRegister(BoolXExecRC); 5533 BuildMI(LoopBB, I, DL, TII.get(AndOpc), AndReg) 5534 .addReg(CondReg) 5535 .addReg(NewCondReg); 5536 CondReg = AndReg; 5537 } 5538 } // End for loop. 5539 5540 auto SRsrcRC = TRI->getEquivalentSGPRClass(MRI.getRegClass(VRsrc)); 5541 Register SRsrc = MRI.createVirtualRegister(SRsrcRC); 5542 5543 // Build scalar Rsrc. 5544 auto Merge = BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), SRsrc); 5545 unsigned Channel = 0; 5546 for (Register Piece : ReadlanePieces) { 5547 Merge.addReg(Piece) 5548 .addImm(TRI->getSubRegFromChannel(Channel++)); 5549 } 5550 5551 // Update Rsrc operand to use the SGPR Rsrc. 5552 Rsrc.setReg(SRsrc); 5553 Rsrc.setIsKill(true); 5554 5555 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); 5556 MRI.setSimpleHint(SaveExec, CondReg); 5557 5558 // Update EXEC to matching lanes, saving original to SaveExec. 5559 BuildMI(LoopBB, I, DL, TII.get(SaveExecOpc), SaveExec) 5560 .addReg(CondReg, RegState::Kill); 5561 5562 // The original instruction is here; we insert the terminators after it. 5563 I = BodyBB.end(); 5564 5565 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 5566 BuildMI(BodyBB, I, DL, TII.get(XorTermOpc), Exec) 5567 .addReg(Exec) 5568 .addReg(SaveExec); 5569 5570 BuildMI(BodyBB, I, DL, TII.get(AMDGPU::SI_WATERFALL_LOOP)).addMBB(&LoopBB); 5571 } 5572 5573 // Build a waterfall loop around \p MI, replacing the VGPR \p Rsrc register 5574 // with SGPRs by iterating over all unique values across all lanes. 5575 // Returns the loop basic block that now contains \p MI. 5576 static MachineBasicBlock * 5577 loadSRsrcFromVGPR(const SIInstrInfo &TII, MachineInstr &MI, 5578 MachineOperand &Rsrc, MachineDominatorTree *MDT, 5579 MachineBasicBlock::iterator Begin = nullptr, 5580 MachineBasicBlock::iterator End = nullptr) { 5581 MachineBasicBlock &MBB = *MI.getParent(); 5582 MachineFunction &MF = *MBB.getParent(); 5583 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 5584 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 5585 MachineRegisterInfo &MRI = MF.getRegInfo(); 5586 if (!Begin.isValid()) 5587 Begin = &MI; 5588 if (!End.isValid()) { 5589 End = &MI; 5590 ++End; 5591 } 5592 const DebugLoc &DL = MI.getDebugLoc(); 5593 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 5594 unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 5595 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5596 5597 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); 5598 5599 // Save the EXEC mask 5600 BuildMI(MBB, Begin, DL, TII.get(MovExecOpc), SaveExec).addReg(Exec); 5601 5602 // Killed uses in the instruction we are waterfalling around will be 5603 // incorrect due to the added control-flow. 5604 MachineBasicBlock::iterator AfterMI = MI; 5605 ++AfterMI; 5606 for (auto I = Begin; I != AfterMI; I++) { 5607 for (auto &MO : I->uses()) { 5608 if (MO.isReg() && MO.isUse()) { 5609 MRI.clearKillFlags(MO.getReg()); 5610 } 5611 } 5612 } 5613 5614 // To insert the loop we need to split the block. Move everything after this 5615 // point to a new block, and insert a new empty block between the two. 5616 MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock(); 5617 MachineBasicBlock *BodyBB = MF.CreateMachineBasicBlock(); 5618 MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock(); 5619 MachineFunction::iterator MBBI(MBB); 5620 ++MBBI; 5621 5622 MF.insert(MBBI, LoopBB); 5623 MF.insert(MBBI, BodyBB); 5624 MF.insert(MBBI, RemainderBB); 5625 5626 LoopBB->addSuccessor(BodyBB); 5627 BodyBB->addSuccessor(LoopBB); 5628 BodyBB->addSuccessor(RemainderBB); 5629 5630 // Move Begin to MI to the BodyBB, and the remainder of the block to 5631 // RemainderBB. 5632 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 5633 RemainderBB->splice(RemainderBB->begin(), &MBB, End, MBB.end()); 5634 BodyBB->splice(BodyBB->begin(), &MBB, Begin, MBB.end()); 5635 5636 MBB.addSuccessor(LoopBB); 5637 5638 // Update dominators. We know that MBB immediately dominates LoopBB, that 5639 // LoopBB immediately dominates BodyBB, and BodyBB immediately dominates 5640 // RemainderBB. RemainderBB immediately dominates all of the successors 5641 // transferred to it from MBB that MBB used to properly dominate. 5642 if (MDT) { 5643 MDT->addNewBlock(LoopBB, &MBB); 5644 MDT->addNewBlock(BodyBB, LoopBB); 5645 MDT->addNewBlock(RemainderBB, BodyBB); 5646 for (auto &Succ : RemainderBB->successors()) { 5647 if (MDT->properlyDominates(&MBB, Succ)) { 5648 MDT->changeImmediateDominator(Succ, RemainderBB); 5649 } 5650 } 5651 } 5652 5653 emitLoadSRsrcFromVGPRLoop(TII, MRI, MBB, *LoopBB, *BodyBB, DL, Rsrc); 5654 5655 // Restore the EXEC mask 5656 MachineBasicBlock::iterator First = RemainderBB->begin(); 5657 BuildMI(*RemainderBB, First, DL, TII.get(MovExecOpc), Exec).addReg(SaveExec); 5658 return BodyBB; 5659 } 5660 5661 // Extract pointer from Rsrc and return a zero-value Rsrc replacement. 5662 static std::tuple<unsigned, unsigned> 5663 extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc) { 5664 MachineBasicBlock &MBB = *MI.getParent(); 5665 MachineFunction &MF = *MBB.getParent(); 5666 MachineRegisterInfo &MRI = MF.getRegInfo(); 5667 5668 // Extract the ptr from the resource descriptor. 5669 unsigned RsrcPtr = 5670 TII.buildExtractSubReg(MI, MRI, Rsrc, &AMDGPU::VReg_128RegClass, 5671 AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass); 5672 5673 // Create an empty resource descriptor 5674 Register Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 5675 Register SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 5676 Register SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 5677 Register NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass); 5678 uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat(); 5679 5680 // Zero64 = 0 5681 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B64), Zero64) 5682 .addImm(0); 5683 5684 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0} 5685 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatLo) 5686 .addImm(RsrcDataFormat & 0xFFFFFFFF); 5687 5688 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32} 5689 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatHi) 5690 .addImm(RsrcDataFormat >> 32); 5691 5692 // NewSRsrc = {Zero64, SRsrcFormat} 5693 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::REG_SEQUENCE), NewSRsrc) 5694 .addReg(Zero64) 5695 .addImm(AMDGPU::sub0_sub1) 5696 .addReg(SRsrcFormatLo) 5697 .addImm(AMDGPU::sub2) 5698 .addReg(SRsrcFormatHi) 5699 .addImm(AMDGPU::sub3); 5700 5701 return std::make_tuple(RsrcPtr, NewSRsrc); 5702 } 5703 5704 MachineBasicBlock * 5705 SIInstrInfo::legalizeOperands(MachineInstr &MI, 5706 MachineDominatorTree *MDT) const { 5707 MachineFunction &MF = *MI.getParent()->getParent(); 5708 MachineRegisterInfo &MRI = MF.getRegInfo(); 5709 MachineBasicBlock *CreatedBB = nullptr; 5710 5711 // Legalize VOP2 5712 if (isVOP2(MI) || isVOPC(MI)) { 5713 legalizeOperandsVOP2(MRI, MI); 5714 return CreatedBB; 5715 } 5716 5717 // Legalize VOP3 5718 if (isVOP3(MI)) { 5719 legalizeOperandsVOP3(MRI, MI); 5720 return CreatedBB; 5721 } 5722 5723 // Legalize SMRD 5724 if (isSMRD(MI)) { 5725 legalizeOperandsSMRD(MRI, MI); 5726 return CreatedBB; 5727 } 5728 5729 // Legalize FLAT 5730 if (isFLAT(MI)) { 5731 legalizeOperandsFLAT(MRI, MI); 5732 return CreatedBB; 5733 } 5734 5735 // Legalize REG_SEQUENCE and PHI 5736 // The register class of the operands much be the same type as the register 5737 // class of the output. 5738 if (MI.getOpcode() == AMDGPU::PHI) { 5739 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr; 5740 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) { 5741 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).getReg().isVirtual()) 5742 continue; 5743 const TargetRegisterClass *OpRC = 5744 MRI.getRegClass(MI.getOperand(i).getReg()); 5745 if (RI.hasVectorRegisters(OpRC)) { 5746 VRC = OpRC; 5747 } else { 5748 SRC = OpRC; 5749 } 5750 } 5751 5752 // If any of the operands are VGPR registers, then they all most be 5753 // otherwise we will create illegal VGPR->SGPR copies when legalizing 5754 // them. 5755 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) { 5756 if (!VRC) { 5757 assert(SRC); 5758 if (getOpRegClass(MI, 0) == &AMDGPU::VReg_1RegClass) { 5759 VRC = &AMDGPU::VReg_1RegClass; 5760 } else 5761 VRC = RI.isAGPRClass(getOpRegClass(MI, 0)) 5762 ? RI.getEquivalentAGPRClass(SRC) 5763 : RI.getEquivalentVGPRClass(SRC); 5764 } else { 5765 VRC = RI.isAGPRClass(getOpRegClass(MI, 0)) 5766 ? RI.getEquivalentAGPRClass(VRC) 5767 : RI.getEquivalentVGPRClass(VRC); 5768 } 5769 RC = VRC; 5770 } else { 5771 RC = SRC; 5772 } 5773 5774 // Update all the operands so they have the same type. 5775 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 5776 MachineOperand &Op = MI.getOperand(I); 5777 if (!Op.isReg() || !Op.getReg().isVirtual()) 5778 continue; 5779 5780 // MI is a PHI instruction. 5781 MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB(); 5782 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator(); 5783 5784 // Avoid creating no-op copies with the same src and dst reg class. These 5785 // confuse some of the machine passes. 5786 legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc()); 5787 } 5788 } 5789 5790 // REG_SEQUENCE doesn't really require operand legalization, but if one has a 5791 // VGPR dest type and SGPR sources, insert copies so all operands are 5792 // VGPRs. This seems to help operand folding / the register coalescer. 5793 if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) { 5794 MachineBasicBlock *MBB = MI.getParent(); 5795 const TargetRegisterClass *DstRC = getOpRegClass(MI, 0); 5796 if (RI.hasVGPRs(DstRC)) { 5797 // Update all the operands so they are VGPR register classes. These may 5798 // not be the same register class because REG_SEQUENCE supports mixing 5799 // subregister index types e.g. sub0_sub1 + sub2 + sub3 5800 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 5801 MachineOperand &Op = MI.getOperand(I); 5802 if (!Op.isReg() || !Op.getReg().isVirtual()) 5803 continue; 5804 5805 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg()); 5806 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); 5807 if (VRC == OpRC) 5808 continue; 5809 5810 legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc()); 5811 Op.setIsKill(); 5812 } 5813 } 5814 5815 return CreatedBB; 5816 } 5817 5818 // Legalize INSERT_SUBREG 5819 // src0 must have the same register class as dst 5820 if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) { 5821 Register Dst = MI.getOperand(0).getReg(); 5822 Register Src0 = MI.getOperand(1).getReg(); 5823 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst); 5824 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0); 5825 if (DstRC != Src0RC) { 5826 MachineBasicBlock *MBB = MI.getParent(); 5827 MachineOperand &Op = MI.getOperand(1); 5828 legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc()); 5829 } 5830 return CreatedBB; 5831 } 5832 5833 // Legalize SI_INIT_M0 5834 if (MI.getOpcode() == AMDGPU::SI_INIT_M0) { 5835 MachineOperand &Src = MI.getOperand(0); 5836 if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg()))) 5837 Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI)); 5838 return CreatedBB; 5839 } 5840 5841 // Legalize MIMG and MUBUF/MTBUF for shaders. 5842 // 5843 // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via 5844 // scratch memory access. In both cases, the legalization never involves 5845 // conversion to the addr64 form. 5846 if (isMIMG(MI) || (AMDGPU::isGraphics(MF.getFunction().getCallingConv()) && 5847 (isMUBUF(MI) || isMTBUF(MI)))) { 5848 MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc); 5849 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) 5850 CreatedBB = loadSRsrcFromVGPR(*this, MI, *SRsrc, MDT); 5851 5852 MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp); 5853 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) 5854 CreatedBB = loadSRsrcFromVGPR(*this, MI, *SSamp, MDT); 5855 5856 return CreatedBB; 5857 } 5858 5859 // Legalize SI_CALL 5860 if (MI.getOpcode() == AMDGPU::SI_CALL_ISEL) { 5861 MachineOperand *Dest = &MI.getOperand(0); 5862 if (!RI.isSGPRClass(MRI.getRegClass(Dest->getReg()))) { 5863 // Move everything between ADJCALLSTACKUP and ADJCALLSTACKDOWN and 5864 // following copies, we also need to move copies from and to physical 5865 // registers into the loop block. 5866 unsigned FrameSetupOpcode = getCallFrameSetupOpcode(); 5867 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode(); 5868 5869 // Also move the copies to physical registers into the loop block 5870 MachineBasicBlock &MBB = *MI.getParent(); 5871 MachineBasicBlock::iterator Start(&MI); 5872 while (Start->getOpcode() != FrameSetupOpcode) 5873 --Start; 5874 MachineBasicBlock::iterator End(&MI); 5875 while (End->getOpcode() != FrameDestroyOpcode) 5876 ++End; 5877 // Also include following copies of the return value 5878 ++End; 5879 while (End != MBB.end() && End->isCopy() && End->getOperand(1).isReg() && 5880 MI.definesRegister(End->getOperand(1).getReg())) 5881 ++End; 5882 CreatedBB = loadSRsrcFromVGPR(*this, MI, *Dest, MDT, Start, End); 5883 } 5884 } 5885 5886 // Legalize MUBUF* instructions. 5887 int RsrcIdx = 5888 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc); 5889 if (RsrcIdx != -1) { 5890 // We have an MUBUF instruction 5891 MachineOperand *Rsrc = &MI.getOperand(RsrcIdx); 5892 unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass; 5893 if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()), 5894 RI.getRegClass(RsrcRC))) { 5895 // The operands are legal. 5896 // FIXME: We may need to legalize operands besides srsrc. 5897 return CreatedBB; 5898 } 5899 5900 // Legalize a VGPR Rsrc. 5901 // 5902 // If the instruction is _ADDR64, we can avoid a waterfall by extracting 5903 // the base pointer from the VGPR Rsrc, adding it to the VAddr, then using 5904 // a zero-value SRsrc. 5905 // 5906 // If the instruction is _OFFSET (both idxen and offen disabled), and we 5907 // support ADDR64 instructions, we can convert to ADDR64 and do the same as 5908 // above. 5909 // 5910 // Otherwise we are on non-ADDR64 hardware, and/or we have 5911 // idxen/offen/bothen and we fall back to a waterfall loop. 5912 5913 MachineBasicBlock &MBB = *MI.getParent(); 5914 5915 MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 5916 if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) { 5917 // This is already an ADDR64 instruction so we need to add the pointer 5918 // extracted from the resource descriptor to the current value of VAddr. 5919 Register NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5920 Register NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5921 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5922 5923 const auto *BoolXExecRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5924 Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC); 5925 Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC); 5926 5927 unsigned RsrcPtr, NewSRsrc; 5928 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 5929 5930 // NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0 5931 const DebugLoc &DL = MI.getDebugLoc(); 5932 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_CO_U32_e64), NewVAddrLo) 5933 .addDef(CondReg0) 5934 .addReg(RsrcPtr, 0, AMDGPU::sub0) 5935 .addReg(VAddr->getReg(), 0, AMDGPU::sub0) 5936 .addImm(0); 5937 5938 // NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1 5939 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e64), NewVAddrHi) 5940 .addDef(CondReg1, RegState::Dead) 5941 .addReg(RsrcPtr, 0, AMDGPU::sub1) 5942 .addReg(VAddr->getReg(), 0, AMDGPU::sub1) 5943 .addReg(CondReg0, RegState::Kill) 5944 .addImm(0); 5945 5946 // NewVaddr = {NewVaddrHi, NewVaddrLo} 5947 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) 5948 .addReg(NewVAddrLo) 5949 .addImm(AMDGPU::sub0) 5950 .addReg(NewVAddrHi) 5951 .addImm(AMDGPU::sub1); 5952 5953 VAddr->setReg(NewVAddr); 5954 Rsrc->setReg(NewSRsrc); 5955 } else if (!VAddr && ST.hasAddr64()) { 5956 // This instructions is the _OFFSET variant, so we need to convert it to 5957 // ADDR64. 5958 assert(ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS && 5959 "FIXME: Need to emit flat atomics here"); 5960 5961 unsigned RsrcPtr, NewSRsrc; 5962 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 5963 5964 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5965 MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata); 5966 MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 5967 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset); 5968 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode()); 5969 5970 // Atomics with return have an additional tied operand and are 5971 // missing some of the special bits. 5972 MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in); 5973 MachineInstr *Addr64; 5974 5975 if (!VDataIn) { 5976 // Regular buffer load / store. 5977 MachineInstrBuilder MIB = 5978 BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 5979 .add(*VData) 5980 .addReg(NewVAddr) 5981 .addReg(NewSRsrc) 5982 .add(*SOffset) 5983 .add(*Offset); 5984 5985 if (const MachineOperand *CPol = 5986 getNamedOperand(MI, AMDGPU::OpName::cpol)) { 5987 MIB.addImm(CPol->getImm()); 5988 } 5989 5990 if (const MachineOperand *TFE = 5991 getNamedOperand(MI, AMDGPU::OpName::tfe)) { 5992 MIB.addImm(TFE->getImm()); 5993 } 5994 5995 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::swz)); 5996 5997 MIB.cloneMemRefs(MI); 5998 Addr64 = MIB; 5999 } else { 6000 // Atomics with return. 6001 Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 6002 .add(*VData) 6003 .add(*VDataIn) 6004 .addReg(NewVAddr) 6005 .addReg(NewSRsrc) 6006 .add(*SOffset) 6007 .add(*Offset) 6008 .addImm(getNamedImmOperand(MI, AMDGPU::OpName::cpol)) 6009 .cloneMemRefs(MI); 6010 } 6011 6012 MI.removeFromParent(); 6013 6014 // NewVaddr = {NewVaddrHi, NewVaddrLo} 6015 BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), 6016 NewVAddr) 6017 .addReg(RsrcPtr, 0, AMDGPU::sub0) 6018 .addImm(AMDGPU::sub0) 6019 .addReg(RsrcPtr, 0, AMDGPU::sub1) 6020 .addImm(AMDGPU::sub1); 6021 } else { 6022 // This is another variant; legalize Rsrc with waterfall loop from VGPRs 6023 // to SGPRs. 6024 CreatedBB = loadSRsrcFromVGPR(*this, MI, *Rsrc, MDT); 6025 return CreatedBB; 6026 } 6027 } 6028 return CreatedBB; 6029 } 6030 6031 MachineBasicBlock *SIInstrInfo::moveToVALU(MachineInstr &TopInst, 6032 MachineDominatorTree *MDT) const { 6033 SetVectorType Worklist; 6034 Worklist.insert(&TopInst); 6035 MachineBasicBlock *CreatedBB = nullptr; 6036 MachineBasicBlock *CreatedBBTmp = nullptr; 6037 6038 while (!Worklist.empty()) { 6039 MachineInstr &Inst = *Worklist.pop_back_val(); 6040 MachineBasicBlock *MBB = Inst.getParent(); 6041 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 6042 6043 unsigned Opcode = Inst.getOpcode(); 6044 unsigned NewOpcode = getVALUOp(Inst); 6045 6046 // Handle some special cases 6047 switch (Opcode) { 6048 default: 6049 break; 6050 case AMDGPU::S_ADD_U64_PSEUDO: 6051 case AMDGPU::S_SUB_U64_PSEUDO: 6052 splitScalar64BitAddSub(Worklist, Inst, MDT); 6053 Inst.eraseFromParent(); 6054 continue; 6055 case AMDGPU::S_ADD_I32: 6056 case AMDGPU::S_SUB_I32: { 6057 // FIXME: The u32 versions currently selected use the carry. 6058 bool Changed; 6059 std::tie(Changed, CreatedBBTmp) = moveScalarAddSub(Worklist, Inst, MDT); 6060 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp) 6061 CreatedBB = CreatedBBTmp; 6062 if (Changed) 6063 continue; 6064 6065 // Default handling 6066 break; 6067 } 6068 case AMDGPU::S_AND_B64: 6069 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32, MDT); 6070 Inst.eraseFromParent(); 6071 continue; 6072 6073 case AMDGPU::S_OR_B64: 6074 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32, MDT); 6075 Inst.eraseFromParent(); 6076 continue; 6077 6078 case AMDGPU::S_XOR_B64: 6079 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32, MDT); 6080 Inst.eraseFromParent(); 6081 continue; 6082 6083 case AMDGPU::S_NAND_B64: 6084 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NAND_B32, MDT); 6085 Inst.eraseFromParent(); 6086 continue; 6087 6088 case AMDGPU::S_NOR_B64: 6089 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NOR_B32, MDT); 6090 Inst.eraseFromParent(); 6091 continue; 6092 6093 case AMDGPU::S_XNOR_B64: 6094 if (ST.hasDLInsts()) 6095 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT); 6096 else 6097 splitScalar64BitXnor(Worklist, Inst, MDT); 6098 Inst.eraseFromParent(); 6099 continue; 6100 6101 case AMDGPU::S_ANDN2_B64: 6102 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ANDN2_B32, MDT); 6103 Inst.eraseFromParent(); 6104 continue; 6105 6106 case AMDGPU::S_ORN2_B64: 6107 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ORN2_B32, MDT); 6108 Inst.eraseFromParent(); 6109 continue; 6110 6111 case AMDGPU::S_BREV_B64: 6112 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_BREV_B32, true); 6113 Inst.eraseFromParent(); 6114 continue; 6115 6116 case AMDGPU::S_NOT_B64: 6117 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32); 6118 Inst.eraseFromParent(); 6119 continue; 6120 6121 case AMDGPU::S_BCNT1_I32_B64: 6122 splitScalar64BitBCNT(Worklist, Inst); 6123 Inst.eraseFromParent(); 6124 continue; 6125 6126 case AMDGPU::S_BFE_I64: 6127 splitScalar64BitBFE(Worklist, Inst); 6128 Inst.eraseFromParent(); 6129 continue; 6130 6131 case AMDGPU::S_LSHL_B32: 6132 if (ST.hasOnlyRevVALUShifts()) { 6133 NewOpcode = AMDGPU::V_LSHLREV_B32_e64; 6134 swapOperands(Inst); 6135 } 6136 break; 6137 case AMDGPU::S_ASHR_I32: 6138 if (ST.hasOnlyRevVALUShifts()) { 6139 NewOpcode = AMDGPU::V_ASHRREV_I32_e64; 6140 swapOperands(Inst); 6141 } 6142 break; 6143 case AMDGPU::S_LSHR_B32: 6144 if (ST.hasOnlyRevVALUShifts()) { 6145 NewOpcode = AMDGPU::V_LSHRREV_B32_e64; 6146 swapOperands(Inst); 6147 } 6148 break; 6149 case AMDGPU::S_LSHL_B64: 6150 if (ST.hasOnlyRevVALUShifts()) { 6151 NewOpcode = AMDGPU::V_LSHLREV_B64_e64; 6152 swapOperands(Inst); 6153 } 6154 break; 6155 case AMDGPU::S_ASHR_I64: 6156 if (ST.hasOnlyRevVALUShifts()) { 6157 NewOpcode = AMDGPU::V_ASHRREV_I64_e64; 6158 swapOperands(Inst); 6159 } 6160 break; 6161 case AMDGPU::S_LSHR_B64: 6162 if (ST.hasOnlyRevVALUShifts()) { 6163 NewOpcode = AMDGPU::V_LSHRREV_B64_e64; 6164 swapOperands(Inst); 6165 } 6166 break; 6167 6168 case AMDGPU::S_ABS_I32: 6169 lowerScalarAbs(Worklist, Inst); 6170 Inst.eraseFromParent(); 6171 continue; 6172 6173 case AMDGPU::S_CBRANCH_SCC0: 6174 case AMDGPU::S_CBRANCH_SCC1: { 6175 // Clear unused bits of vcc 6176 Register CondReg = Inst.getOperand(1).getReg(); 6177 bool IsSCC = CondReg == AMDGPU::SCC; 6178 Register VCC = RI.getVCC(); 6179 Register EXEC = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 6180 unsigned Opc = ST.isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64; 6181 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(Opc), VCC) 6182 .addReg(EXEC) 6183 .addReg(IsSCC ? VCC : CondReg); 6184 Inst.removeOperand(1); 6185 } 6186 break; 6187 6188 case AMDGPU::S_BFE_U64: 6189 case AMDGPU::S_BFM_B64: 6190 llvm_unreachable("Moving this op to VALU not implemented"); 6191 6192 case AMDGPU::S_PACK_LL_B32_B16: 6193 case AMDGPU::S_PACK_LH_B32_B16: 6194 case AMDGPU::S_PACK_HH_B32_B16: 6195 movePackToVALU(Worklist, MRI, Inst); 6196 Inst.eraseFromParent(); 6197 continue; 6198 6199 case AMDGPU::S_XNOR_B32: 6200 lowerScalarXnor(Worklist, Inst); 6201 Inst.eraseFromParent(); 6202 continue; 6203 6204 case AMDGPU::S_NAND_B32: 6205 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_AND_B32); 6206 Inst.eraseFromParent(); 6207 continue; 6208 6209 case AMDGPU::S_NOR_B32: 6210 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_OR_B32); 6211 Inst.eraseFromParent(); 6212 continue; 6213 6214 case AMDGPU::S_ANDN2_B32: 6215 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_AND_B32); 6216 Inst.eraseFromParent(); 6217 continue; 6218 6219 case AMDGPU::S_ORN2_B32: 6220 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_OR_B32); 6221 Inst.eraseFromParent(); 6222 continue; 6223 6224 // TODO: remove as soon as everything is ready 6225 // to replace VGPR to SGPR copy with V_READFIRSTLANEs. 6226 // S_ADD/SUB_CO_PSEUDO as well as S_UADDO/USUBO_PSEUDO 6227 // can only be selected from the uniform SDNode. 6228 case AMDGPU::S_ADD_CO_PSEUDO: 6229 case AMDGPU::S_SUB_CO_PSEUDO: { 6230 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) 6231 ? AMDGPU::V_ADDC_U32_e64 6232 : AMDGPU::V_SUBB_U32_e64; 6233 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 6234 6235 Register CarryInReg = Inst.getOperand(4).getReg(); 6236 if (!MRI.constrainRegClass(CarryInReg, CarryRC)) { 6237 Register NewCarryReg = MRI.createVirtualRegister(CarryRC); 6238 BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(AMDGPU::COPY), NewCarryReg) 6239 .addReg(CarryInReg); 6240 } 6241 6242 Register CarryOutReg = Inst.getOperand(1).getReg(); 6243 6244 Register DestReg = MRI.createVirtualRegister(RI.getEquivalentVGPRClass( 6245 MRI.getRegClass(Inst.getOperand(0).getReg()))); 6246 MachineInstr *CarryOp = 6247 BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(Opc), DestReg) 6248 .addReg(CarryOutReg, RegState::Define) 6249 .add(Inst.getOperand(2)) 6250 .add(Inst.getOperand(3)) 6251 .addReg(CarryInReg) 6252 .addImm(0); 6253 CreatedBBTmp = legalizeOperands(*CarryOp); 6254 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp) 6255 CreatedBB = CreatedBBTmp; 6256 MRI.replaceRegWith(Inst.getOperand(0).getReg(), DestReg); 6257 addUsersToMoveToVALUWorklist(DestReg, MRI, Worklist); 6258 Inst.eraseFromParent(); 6259 } 6260 continue; 6261 case AMDGPU::S_UADDO_PSEUDO: 6262 case AMDGPU::S_USUBO_PSEUDO: { 6263 const DebugLoc &DL = Inst.getDebugLoc(); 6264 MachineOperand &Dest0 = Inst.getOperand(0); 6265 MachineOperand &Dest1 = Inst.getOperand(1); 6266 MachineOperand &Src0 = Inst.getOperand(2); 6267 MachineOperand &Src1 = Inst.getOperand(3); 6268 6269 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_UADDO_PSEUDO) 6270 ? AMDGPU::V_ADD_CO_U32_e64 6271 : AMDGPU::V_SUB_CO_U32_e64; 6272 const TargetRegisterClass *NewRC = 6273 RI.getEquivalentVGPRClass(MRI.getRegClass(Dest0.getReg())); 6274 Register DestReg = MRI.createVirtualRegister(NewRC); 6275 MachineInstr *NewInstr = BuildMI(*MBB, &Inst, DL, get(Opc), DestReg) 6276 .addReg(Dest1.getReg(), RegState::Define) 6277 .add(Src0) 6278 .add(Src1) 6279 .addImm(0); // clamp bit 6280 6281 CreatedBBTmp = legalizeOperands(*NewInstr, MDT); 6282 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp) 6283 CreatedBB = CreatedBBTmp; 6284 6285 MRI.replaceRegWith(Dest0.getReg(), DestReg); 6286 addUsersToMoveToVALUWorklist(NewInstr->getOperand(0).getReg(), MRI, 6287 Worklist); 6288 Inst.eraseFromParent(); 6289 } 6290 continue; 6291 6292 case AMDGPU::S_CSELECT_B32: 6293 case AMDGPU::S_CSELECT_B64: 6294 lowerSelect(Worklist, Inst, MDT); 6295 Inst.eraseFromParent(); 6296 continue; 6297 case AMDGPU::S_CMP_EQ_I32: 6298 case AMDGPU::S_CMP_LG_I32: 6299 case AMDGPU::S_CMP_GT_I32: 6300 case AMDGPU::S_CMP_GE_I32: 6301 case AMDGPU::S_CMP_LT_I32: 6302 case AMDGPU::S_CMP_LE_I32: 6303 case AMDGPU::S_CMP_EQ_U32: 6304 case AMDGPU::S_CMP_LG_U32: 6305 case AMDGPU::S_CMP_GT_U32: 6306 case AMDGPU::S_CMP_GE_U32: 6307 case AMDGPU::S_CMP_LT_U32: 6308 case AMDGPU::S_CMP_LE_U32: 6309 case AMDGPU::S_CMP_EQ_U64: 6310 case AMDGPU::S_CMP_LG_U64: { 6311 const MCInstrDesc &NewDesc = get(NewOpcode); 6312 Register CondReg = MRI.createVirtualRegister(RI.getWaveMaskRegClass()); 6313 MachineInstr *NewInstr = 6314 BuildMI(*MBB, Inst, Inst.getDebugLoc(), NewDesc, CondReg) 6315 .add(Inst.getOperand(0)) 6316 .add(Inst.getOperand(1)); 6317 legalizeOperands(*NewInstr, MDT); 6318 int SCCIdx = Inst.findRegisterDefOperandIdx(AMDGPU::SCC); 6319 MachineOperand SCCOp = Inst.getOperand(SCCIdx); 6320 addSCCDefUsersToVALUWorklist(SCCOp, Inst, Worklist, CondReg); 6321 Inst.eraseFromParent(); 6322 } 6323 continue; 6324 } 6325 6326 6327 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) { 6328 // We cannot move this instruction to the VALU, so we should try to 6329 // legalize its operands instead. 6330 CreatedBBTmp = legalizeOperands(Inst, MDT); 6331 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp) 6332 CreatedBB = CreatedBBTmp; 6333 continue; 6334 } 6335 6336 // Use the new VALU Opcode. 6337 const MCInstrDesc &NewDesc = get(NewOpcode); 6338 Inst.setDesc(NewDesc); 6339 6340 // Remove any references to SCC. Vector instructions can't read from it, and 6341 // We're just about to add the implicit use / defs of VCC, and we don't want 6342 // both. 6343 for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) { 6344 MachineOperand &Op = Inst.getOperand(i); 6345 if (Op.isReg() && Op.getReg() == AMDGPU::SCC) { 6346 // Only propagate through live-def of SCC. 6347 if (Op.isDef() && !Op.isDead()) 6348 addSCCDefUsersToVALUWorklist(Op, Inst, Worklist); 6349 if (Op.isUse()) 6350 addSCCDefsToVALUWorklist(Op, Worklist); 6351 Inst.removeOperand(i); 6352 } 6353 } 6354 6355 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) { 6356 // We are converting these to a BFE, so we need to add the missing 6357 // operands for the size and offset. 6358 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16; 6359 Inst.addOperand(MachineOperand::CreateImm(0)); 6360 Inst.addOperand(MachineOperand::CreateImm(Size)); 6361 6362 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) { 6363 // The VALU version adds the second operand to the result, so insert an 6364 // extra 0 operand. 6365 Inst.addOperand(MachineOperand::CreateImm(0)); 6366 } 6367 6368 Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent()); 6369 fixImplicitOperands(Inst); 6370 6371 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) { 6372 const MachineOperand &OffsetWidthOp = Inst.getOperand(2); 6373 // If we need to move this to VGPRs, we need to unpack the second operand 6374 // back into the 2 separate ones for bit offset and width. 6375 assert(OffsetWidthOp.isImm() && 6376 "Scalar BFE is only implemented for constant width and offset"); 6377 uint32_t Imm = OffsetWidthOp.getImm(); 6378 6379 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 6380 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 6381 Inst.removeOperand(2); // Remove old immediate. 6382 Inst.addOperand(MachineOperand::CreateImm(Offset)); 6383 Inst.addOperand(MachineOperand::CreateImm(BitWidth)); 6384 } 6385 6386 bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef(); 6387 unsigned NewDstReg = AMDGPU::NoRegister; 6388 if (HasDst) { 6389 Register DstReg = Inst.getOperand(0).getReg(); 6390 if (DstReg.isPhysical()) 6391 continue; 6392 6393 // Update the destination register class. 6394 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst); 6395 if (!NewDstRC) 6396 continue; 6397 6398 if (Inst.isCopy() && Inst.getOperand(1).getReg().isVirtual() && 6399 NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) { 6400 // Instead of creating a copy where src and dst are the same register 6401 // class, we just replace all uses of dst with src. These kinds of 6402 // copies interfere with the heuristics MachineSink uses to decide 6403 // whether or not to split a critical edge. Since the pass assumes 6404 // that copies will end up as machine instructions and not be 6405 // eliminated. 6406 addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist); 6407 MRI.replaceRegWith(DstReg, Inst.getOperand(1).getReg()); 6408 MRI.clearKillFlags(Inst.getOperand(1).getReg()); 6409 Inst.getOperand(0).setReg(DstReg); 6410 6411 // Make sure we don't leave around a dead VGPR->SGPR copy. Normally 6412 // these are deleted later, but at -O0 it would leave a suspicious 6413 // looking illegal copy of an undef register. 6414 for (unsigned I = Inst.getNumOperands() - 1; I != 0; --I) 6415 Inst.removeOperand(I); 6416 Inst.setDesc(get(AMDGPU::IMPLICIT_DEF)); 6417 continue; 6418 } 6419 6420 NewDstReg = MRI.createVirtualRegister(NewDstRC); 6421 MRI.replaceRegWith(DstReg, NewDstReg); 6422 } 6423 6424 // Legalize the operands 6425 CreatedBBTmp = legalizeOperands(Inst, MDT); 6426 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp) 6427 CreatedBB = CreatedBBTmp; 6428 6429 if (HasDst) 6430 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); 6431 } 6432 return CreatedBB; 6433 } 6434 6435 // Add/sub require special handling to deal with carry outs. 6436 std::pair<bool, MachineBasicBlock *> 6437 SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst, 6438 MachineDominatorTree *MDT) const { 6439 if (ST.hasAddNoCarry()) { 6440 // Assume there is no user of scc since we don't select this in that case. 6441 // Since scc isn't used, it doesn't really matter if the i32 or u32 variant 6442 // is used. 6443 6444 MachineBasicBlock &MBB = *Inst.getParent(); 6445 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6446 6447 Register OldDstReg = Inst.getOperand(0).getReg(); 6448 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6449 6450 unsigned Opc = Inst.getOpcode(); 6451 assert(Opc == AMDGPU::S_ADD_I32 || Opc == AMDGPU::S_SUB_I32); 6452 6453 unsigned NewOpc = Opc == AMDGPU::S_ADD_I32 ? 6454 AMDGPU::V_ADD_U32_e64 : AMDGPU::V_SUB_U32_e64; 6455 6456 assert(Inst.getOperand(3).getReg() == AMDGPU::SCC); 6457 Inst.removeOperand(3); 6458 6459 Inst.setDesc(get(NewOpc)); 6460 Inst.addOperand(MachineOperand::CreateImm(0)); // clamp bit 6461 Inst.addImplicitDefUseOperands(*MBB.getParent()); 6462 MRI.replaceRegWith(OldDstReg, ResultReg); 6463 MachineBasicBlock *NewBB = legalizeOperands(Inst, MDT); 6464 6465 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6466 return std::make_pair(true, NewBB); 6467 } 6468 6469 return std::make_pair(false, nullptr); 6470 } 6471 6472 void SIInstrInfo::lowerSelect(SetVectorType &Worklist, MachineInstr &Inst, 6473 MachineDominatorTree *MDT) const { 6474 6475 MachineBasicBlock &MBB = *Inst.getParent(); 6476 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6477 MachineBasicBlock::iterator MII = Inst; 6478 DebugLoc DL = Inst.getDebugLoc(); 6479 6480 MachineOperand &Dest = Inst.getOperand(0); 6481 MachineOperand &Src0 = Inst.getOperand(1); 6482 MachineOperand &Src1 = Inst.getOperand(2); 6483 MachineOperand &Cond = Inst.getOperand(3); 6484 6485 Register SCCSource = Cond.getReg(); 6486 bool IsSCC = (SCCSource == AMDGPU::SCC); 6487 6488 // If this is a trivial select where the condition is effectively not SCC 6489 // (SCCSource is a source of copy to SCC), then the select is semantically 6490 // equivalent to copying SCCSource. Hence, there is no need to create 6491 // V_CNDMASK, we can just use that and bail out. 6492 if (!IsSCC && Src0.isImm() && (Src0.getImm() == -1) && Src1.isImm() && 6493 (Src1.getImm() == 0)) { 6494 MRI.replaceRegWith(Dest.getReg(), SCCSource); 6495 return; 6496 } 6497 6498 const TargetRegisterClass *TC = 6499 RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 6500 6501 Register CopySCC = MRI.createVirtualRegister(TC); 6502 6503 if (IsSCC) { 6504 // Now look for the closest SCC def if it is a copy 6505 // replacing the SCCSource with the COPY source register 6506 bool CopyFound = false; 6507 for (MachineInstr &CandI : 6508 make_range(std::next(MachineBasicBlock::reverse_iterator(Inst)), 6509 Inst.getParent()->rend())) { 6510 if (CandI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != 6511 -1) { 6512 if (CandI.isCopy() && CandI.getOperand(0).getReg() == AMDGPU::SCC) { 6513 BuildMI(MBB, MII, DL, get(AMDGPU::COPY), CopySCC) 6514 .addReg(CandI.getOperand(1).getReg()); 6515 CopyFound = true; 6516 } 6517 break; 6518 } 6519 } 6520 if (!CopyFound) { 6521 // SCC def is not a copy 6522 // Insert a trivial select instead of creating a copy, because a copy from 6523 // SCC would semantically mean just copying a single bit, but we may need 6524 // the result to be a vector condition mask that needs preserving. 6525 unsigned Opcode = (ST.getWavefrontSize() == 64) ? AMDGPU::S_CSELECT_B64 6526 : AMDGPU::S_CSELECT_B32; 6527 auto NewSelect = 6528 BuildMI(MBB, MII, DL, get(Opcode), CopySCC).addImm(-1).addImm(0); 6529 NewSelect->getOperand(3).setIsUndef(Cond.isUndef()); 6530 } 6531 } 6532 6533 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6534 6535 auto UpdatedInst = 6536 BuildMI(MBB, MII, DL, get(AMDGPU::V_CNDMASK_B32_e64), ResultReg) 6537 .addImm(0) 6538 .add(Src1) // False 6539 .addImm(0) 6540 .add(Src0) // True 6541 .addReg(IsSCC ? CopySCC : SCCSource); 6542 6543 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6544 legalizeOperands(*UpdatedInst, MDT); 6545 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6546 } 6547 6548 void SIInstrInfo::lowerScalarAbs(SetVectorType &Worklist, 6549 MachineInstr &Inst) const { 6550 MachineBasicBlock &MBB = *Inst.getParent(); 6551 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6552 MachineBasicBlock::iterator MII = Inst; 6553 DebugLoc DL = Inst.getDebugLoc(); 6554 6555 MachineOperand &Dest = Inst.getOperand(0); 6556 MachineOperand &Src = Inst.getOperand(1); 6557 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6558 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6559 6560 unsigned SubOp = ST.hasAddNoCarry() ? 6561 AMDGPU::V_SUB_U32_e32 : AMDGPU::V_SUB_CO_U32_e32; 6562 6563 BuildMI(MBB, MII, DL, get(SubOp), TmpReg) 6564 .addImm(0) 6565 .addReg(Src.getReg()); 6566 6567 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg) 6568 .addReg(Src.getReg()) 6569 .addReg(TmpReg); 6570 6571 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6572 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6573 } 6574 6575 void SIInstrInfo::lowerScalarXnor(SetVectorType &Worklist, 6576 MachineInstr &Inst) const { 6577 MachineBasicBlock &MBB = *Inst.getParent(); 6578 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6579 MachineBasicBlock::iterator MII = Inst; 6580 const DebugLoc &DL = Inst.getDebugLoc(); 6581 6582 MachineOperand &Dest = Inst.getOperand(0); 6583 MachineOperand &Src0 = Inst.getOperand(1); 6584 MachineOperand &Src1 = Inst.getOperand(2); 6585 6586 if (ST.hasDLInsts()) { 6587 Register NewDest = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6588 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src0, MRI, DL); 6589 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src1, MRI, DL); 6590 6591 BuildMI(MBB, MII, DL, get(AMDGPU::V_XNOR_B32_e64), NewDest) 6592 .add(Src0) 6593 .add(Src1); 6594 6595 MRI.replaceRegWith(Dest.getReg(), NewDest); 6596 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 6597 } else { 6598 // Using the identity !(x ^ y) == (!x ^ y) == (x ^ !y), we can 6599 // invert either source and then perform the XOR. If either source is a 6600 // scalar register, then we can leave the inversion on the scalar unit to 6601 // achieve a better distribution of scalar and vector instructions. 6602 bool Src0IsSGPR = Src0.isReg() && 6603 RI.isSGPRClass(MRI.getRegClass(Src0.getReg())); 6604 bool Src1IsSGPR = Src1.isReg() && 6605 RI.isSGPRClass(MRI.getRegClass(Src1.getReg())); 6606 MachineInstr *Xor; 6607 Register Temp = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 6608 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 6609 6610 // Build a pair of scalar instructions and add them to the work list. 6611 // The next iteration over the work list will lower these to the vector 6612 // unit as necessary. 6613 if (Src0IsSGPR) { 6614 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src0); 6615 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 6616 .addReg(Temp) 6617 .add(Src1); 6618 } else if (Src1IsSGPR) { 6619 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src1); 6620 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 6621 .add(Src0) 6622 .addReg(Temp); 6623 } else { 6624 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), Temp) 6625 .add(Src0) 6626 .add(Src1); 6627 MachineInstr *Not = 6628 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest).addReg(Temp); 6629 Worklist.insert(Not); 6630 } 6631 6632 MRI.replaceRegWith(Dest.getReg(), NewDest); 6633 6634 Worklist.insert(Xor); 6635 6636 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 6637 } 6638 } 6639 6640 void SIInstrInfo::splitScalarNotBinop(SetVectorType &Worklist, 6641 MachineInstr &Inst, 6642 unsigned Opcode) const { 6643 MachineBasicBlock &MBB = *Inst.getParent(); 6644 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6645 MachineBasicBlock::iterator MII = Inst; 6646 const DebugLoc &DL = Inst.getDebugLoc(); 6647 6648 MachineOperand &Dest = Inst.getOperand(0); 6649 MachineOperand &Src0 = Inst.getOperand(1); 6650 MachineOperand &Src1 = Inst.getOperand(2); 6651 6652 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 6653 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 6654 6655 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), Interm) 6656 .add(Src0) 6657 .add(Src1); 6658 6659 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest) 6660 .addReg(Interm); 6661 6662 Worklist.insert(&Op); 6663 Worklist.insert(&Not); 6664 6665 MRI.replaceRegWith(Dest.getReg(), NewDest); 6666 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 6667 } 6668 6669 void SIInstrInfo::splitScalarBinOpN2(SetVectorType& Worklist, 6670 MachineInstr &Inst, 6671 unsigned Opcode) const { 6672 MachineBasicBlock &MBB = *Inst.getParent(); 6673 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6674 MachineBasicBlock::iterator MII = Inst; 6675 const DebugLoc &DL = Inst.getDebugLoc(); 6676 6677 MachineOperand &Dest = Inst.getOperand(0); 6678 MachineOperand &Src0 = Inst.getOperand(1); 6679 MachineOperand &Src1 = Inst.getOperand(2); 6680 6681 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 6682 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 6683 6684 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Interm) 6685 .add(Src1); 6686 6687 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), NewDest) 6688 .add(Src0) 6689 .addReg(Interm); 6690 6691 Worklist.insert(&Not); 6692 Worklist.insert(&Op); 6693 6694 MRI.replaceRegWith(Dest.getReg(), NewDest); 6695 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 6696 } 6697 6698 void SIInstrInfo::splitScalar64BitUnaryOp( 6699 SetVectorType &Worklist, MachineInstr &Inst, 6700 unsigned Opcode, bool Swap) const { 6701 MachineBasicBlock &MBB = *Inst.getParent(); 6702 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6703 6704 MachineOperand &Dest = Inst.getOperand(0); 6705 MachineOperand &Src0 = Inst.getOperand(1); 6706 DebugLoc DL = Inst.getDebugLoc(); 6707 6708 MachineBasicBlock::iterator MII = Inst; 6709 6710 const MCInstrDesc &InstDesc = get(Opcode); 6711 const TargetRegisterClass *Src0RC = Src0.isReg() ? 6712 MRI.getRegClass(Src0.getReg()) : 6713 &AMDGPU::SGPR_32RegClass; 6714 6715 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 6716 6717 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6718 AMDGPU::sub0, Src0SubRC); 6719 6720 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 6721 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 6722 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 6723 6724 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 6725 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0); 6726 6727 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6728 AMDGPU::sub1, Src0SubRC); 6729 6730 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 6731 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1); 6732 6733 if (Swap) 6734 std::swap(DestSub0, DestSub1); 6735 6736 Register FullDestReg = MRI.createVirtualRegister(NewDestRC); 6737 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 6738 .addReg(DestSub0) 6739 .addImm(AMDGPU::sub0) 6740 .addReg(DestSub1) 6741 .addImm(AMDGPU::sub1); 6742 6743 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 6744 6745 Worklist.insert(&LoHalf); 6746 Worklist.insert(&HiHalf); 6747 6748 // We don't need to legalizeOperands here because for a single operand, src0 6749 // will support any kind of input. 6750 6751 // Move all users of this moved value. 6752 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 6753 } 6754 6755 void SIInstrInfo::splitScalar64BitAddSub(SetVectorType &Worklist, 6756 MachineInstr &Inst, 6757 MachineDominatorTree *MDT) const { 6758 bool IsAdd = (Inst.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); 6759 6760 MachineBasicBlock &MBB = *Inst.getParent(); 6761 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6762 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 6763 6764 Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 6765 Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6766 Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6767 6768 Register CarryReg = MRI.createVirtualRegister(CarryRC); 6769 Register DeadCarryReg = MRI.createVirtualRegister(CarryRC); 6770 6771 MachineOperand &Dest = Inst.getOperand(0); 6772 MachineOperand &Src0 = Inst.getOperand(1); 6773 MachineOperand &Src1 = Inst.getOperand(2); 6774 const DebugLoc &DL = Inst.getDebugLoc(); 6775 MachineBasicBlock::iterator MII = Inst; 6776 6777 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg()); 6778 const TargetRegisterClass *Src1RC = MRI.getRegClass(Src1.getReg()); 6779 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 6780 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 6781 6782 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6783 AMDGPU::sub0, Src0SubRC); 6784 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 6785 AMDGPU::sub0, Src1SubRC); 6786 6787 6788 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6789 AMDGPU::sub1, Src0SubRC); 6790 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 6791 AMDGPU::sub1, Src1SubRC); 6792 6793 unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64; 6794 MachineInstr *LoHalf = 6795 BuildMI(MBB, MII, DL, get(LoOpc), DestSub0) 6796 .addReg(CarryReg, RegState::Define) 6797 .add(SrcReg0Sub0) 6798 .add(SrcReg1Sub0) 6799 .addImm(0); // clamp bit 6800 6801 unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; 6802 MachineInstr *HiHalf = 6803 BuildMI(MBB, MII, DL, get(HiOpc), DestSub1) 6804 .addReg(DeadCarryReg, RegState::Define | RegState::Dead) 6805 .add(SrcReg0Sub1) 6806 .add(SrcReg1Sub1) 6807 .addReg(CarryReg, RegState::Kill) 6808 .addImm(0); // clamp bit 6809 6810 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 6811 .addReg(DestSub0) 6812 .addImm(AMDGPU::sub0) 6813 .addReg(DestSub1) 6814 .addImm(AMDGPU::sub1); 6815 6816 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 6817 6818 // Try to legalize the operands in case we need to swap the order to keep it 6819 // valid. 6820 legalizeOperands(*LoHalf, MDT); 6821 legalizeOperands(*HiHalf, MDT); 6822 6823 // Move all users of this moved value. 6824 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 6825 } 6826 6827 void SIInstrInfo::splitScalar64BitBinaryOp(SetVectorType &Worklist, 6828 MachineInstr &Inst, unsigned Opcode, 6829 MachineDominatorTree *MDT) const { 6830 MachineBasicBlock &MBB = *Inst.getParent(); 6831 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6832 6833 MachineOperand &Dest = Inst.getOperand(0); 6834 MachineOperand &Src0 = Inst.getOperand(1); 6835 MachineOperand &Src1 = Inst.getOperand(2); 6836 DebugLoc DL = Inst.getDebugLoc(); 6837 6838 MachineBasicBlock::iterator MII = Inst; 6839 6840 const MCInstrDesc &InstDesc = get(Opcode); 6841 const TargetRegisterClass *Src0RC = Src0.isReg() ? 6842 MRI.getRegClass(Src0.getReg()) : 6843 &AMDGPU::SGPR_32RegClass; 6844 6845 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 6846 const TargetRegisterClass *Src1RC = Src1.isReg() ? 6847 MRI.getRegClass(Src1.getReg()) : 6848 &AMDGPU::SGPR_32RegClass; 6849 6850 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 6851 6852 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6853 AMDGPU::sub0, Src0SubRC); 6854 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 6855 AMDGPU::sub0, Src1SubRC); 6856 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6857 AMDGPU::sub1, Src0SubRC); 6858 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 6859 AMDGPU::sub1, Src1SubRC); 6860 6861 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 6862 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 6863 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 6864 6865 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 6866 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0) 6867 .add(SrcReg0Sub0) 6868 .add(SrcReg1Sub0); 6869 6870 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 6871 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1) 6872 .add(SrcReg0Sub1) 6873 .add(SrcReg1Sub1); 6874 6875 Register FullDestReg = MRI.createVirtualRegister(NewDestRC); 6876 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 6877 .addReg(DestSub0) 6878 .addImm(AMDGPU::sub0) 6879 .addReg(DestSub1) 6880 .addImm(AMDGPU::sub1); 6881 6882 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 6883 6884 Worklist.insert(&LoHalf); 6885 Worklist.insert(&HiHalf); 6886 6887 // Move all users of this moved value. 6888 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 6889 } 6890 6891 void SIInstrInfo::splitScalar64BitXnor(SetVectorType &Worklist, 6892 MachineInstr &Inst, 6893 MachineDominatorTree *MDT) const { 6894 MachineBasicBlock &MBB = *Inst.getParent(); 6895 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6896 6897 MachineOperand &Dest = Inst.getOperand(0); 6898 MachineOperand &Src0 = Inst.getOperand(1); 6899 MachineOperand &Src1 = Inst.getOperand(2); 6900 const DebugLoc &DL = Inst.getDebugLoc(); 6901 6902 MachineBasicBlock::iterator MII = Inst; 6903 6904 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 6905 6906 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 6907 6908 MachineOperand* Op0; 6909 MachineOperand* Op1; 6910 6911 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) { 6912 Op0 = &Src0; 6913 Op1 = &Src1; 6914 } else { 6915 Op0 = &Src1; 6916 Op1 = &Src0; 6917 } 6918 6919 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B64), Interm) 6920 .add(*Op0); 6921 6922 Register NewDest = MRI.createVirtualRegister(DestRC); 6923 6924 MachineInstr &Xor = *BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B64), NewDest) 6925 .addReg(Interm) 6926 .add(*Op1); 6927 6928 MRI.replaceRegWith(Dest.getReg(), NewDest); 6929 6930 Worklist.insert(&Xor); 6931 } 6932 6933 void SIInstrInfo::splitScalar64BitBCNT( 6934 SetVectorType &Worklist, MachineInstr &Inst) const { 6935 MachineBasicBlock &MBB = *Inst.getParent(); 6936 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6937 6938 MachineBasicBlock::iterator MII = Inst; 6939 const DebugLoc &DL = Inst.getDebugLoc(); 6940 6941 MachineOperand &Dest = Inst.getOperand(0); 6942 MachineOperand &Src = Inst.getOperand(1); 6943 6944 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64); 6945 const TargetRegisterClass *SrcRC = Src.isReg() ? 6946 MRI.getRegClass(Src.getReg()) : 6947 &AMDGPU::SGPR_32RegClass; 6948 6949 Register MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6950 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6951 6952 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); 6953 6954 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 6955 AMDGPU::sub0, SrcSubRC); 6956 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 6957 AMDGPU::sub1, SrcSubRC); 6958 6959 BuildMI(MBB, MII, DL, InstDesc, MidReg).add(SrcRegSub0).addImm(0); 6960 6961 BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg); 6962 6963 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6964 6965 // We don't need to legalize operands here. src0 for either instruction can be 6966 // an SGPR, and the second input is unused or determined here. 6967 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6968 } 6969 6970 void SIInstrInfo::splitScalar64BitBFE(SetVectorType &Worklist, 6971 MachineInstr &Inst) const { 6972 MachineBasicBlock &MBB = *Inst.getParent(); 6973 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6974 MachineBasicBlock::iterator MII = Inst; 6975 const DebugLoc &DL = Inst.getDebugLoc(); 6976 6977 MachineOperand &Dest = Inst.getOperand(0); 6978 uint32_t Imm = Inst.getOperand(2).getImm(); 6979 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 6980 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 6981 6982 (void) Offset; 6983 6984 // Only sext_inreg cases handled. 6985 assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 && 6986 Offset == 0 && "Not implemented"); 6987 6988 if (BitWidth < 32) { 6989 Register MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6990 Register MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6991 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 6992 6993 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32_e64), MidRegLo) 6994 .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0) 6995 .addImm(0) 6996 .addImm(BitWidth); 6997 6998 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi) 6999 .addImm(31) 7000 .addReg(MidRegLo); 7001 7002 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 7003 .addReg(MidRegLo) 7004 .addImm(AMDGPU::sub0) 7005 .addReg(MidRegHi) 7006 .addImm(AMDGPU::sub1); 7007 7008 MRI.replaceRegWith(Dest.getReg(), ResultReg); 7009 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 7010 return; 7011 } 7012 7013 MachineOperand &Src = Inst.getOperand(1); 7014 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 7015 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 7016 7017 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg) 7018 .addImm(31) 7019 .addReg(Src.getReg(), 0, AMDGPU::sub0); 7020 7021 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 7022 .addReg(Src.getReg(), 0, AMDGPU::sub0) 7023 .addImm(AMDGPU::sub0) 7024 .addReg(TmpReg) 7025 .addImm(AMDGPU::sub1); 7026 7027 MRI.replaceRegWith(Dest.getReg(), ResultReg); 7028 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 7029 } 7030 7031 void SIInstrInfo::addUsersToMoveToVALUWorklist( 7032 Register DstReg, 7033 MachineRegisterInfo &MRI, 7034 SetVectorType &Worklist) const { 7035 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg), 7036 E = MRI.use_end(); I != E;) { 7037 MachineInstr &UseMI = *I->getParent(); 7038 7039 unsigned OpNo = 0; 7040 7041 switch (UseMI.getOpcode()) { 7042 case AMDGPU::COPY: 7043 case AMDGPU::WQM: 7044 case AMDGPU::SOFT_WQM: 7045 case AMDGPU::STRICT_WWM: 7046 case AMDGPU::STRICT_WQM: 7047 case AMDGPU::REG_SEQUENCE: 7048 case AMDGPU::PHI: 7049 case AMDGPU::INSERT_SUBREG: 7050 break; 7051 default: 7052 OpNo = I.getOperandNo(); 7053 break; 7054 } 7055 7056 if (!RI.hasVectorRegisters(getOpRegClass(UseMI, OpNo))) { 7057 Worklist.insert(&UseMI); 7058 7059 do { 7060 ++I; 7061 } while (I != E && I->getParent() == &UseMI); 7062 } else { 7063 ++I; 7064 } 7065 } 7066 } 7067 7068 void SIInstrInfo::movePackToVALU(SetVectorType &Worklist, 7069 MachineRegisterInfo &MRI, 7070 MachineInstr &Inst) const { 7071 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 7072 MachineBasicBlock *MBB = Inst.getParent(); 7073 MachineOperand &Src0 = Inst.getOperand(1); 7074 MachineOperand &Src1 = Inst.getOperand(2); 7075 const DebugLoc &DL = Inst.getDebugLoc(); 7076 7077 switch (Inst.getOpcode()) { 7078 case AMDGPU::S_PACK_LL_B32_B16: { 7079 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 7080 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 7081 7082 // FIXME: Can do a lot better if we know the high bits of src0 or src1 are 7083 // 0. 7084 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 7085 .addImm(0xffff); 7086 7087 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_B32_e64), TmpReg) 7088 .addReg(ImmReg, RegState::Kill) 7089 .add(Src0); 7090 7091 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHL_OR_B32_e64), ResultReg) 7092 .add(Src1) 7093 .addImm(16) 7094 .addReg(TmpReg, RegState::Kill); 7095 break; 7096 } 7097 case AMDGPU::S_PACK_LH_B32_B16: { 7098 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 7099 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 7100 .addImm(0xffff); 7101 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_BFI_B32_e64), ResultReg) 7102 .addReg(ImmReg, RegState::Kill) 7103 .add(Src0) 7104 .add(Src1); 7105 break; 7106 } 7107 case AMDGPU::S_PACK_HH_B32_B16: { 7108 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 7109 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 7110 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg) 7111 .addImm(16) 7112 .add(Src0); 7113 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 7114 .addImm(0xffff0000); 7115 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_OR_B32_e64), ResultReg) 7116 .add(Src1) 7117 .addReg(ImmReg, RegState::Kill) 7118 .addReg(TmpReg, RegState::Kill); 7119 break; 7120 } 7121 default: 7122 llvm_unreachable("unhandled s_pack_* instruction"); 7123 } 7124 7125 MachineOperand &Dest = Inst.getOperand(0); 7126 MRI.replaceRegWith(Dest.getReg(), ResultReg); 7127 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 7128 } 7129 7130 void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op, 7131 MachineInstr &SCCDefInst, 7132 SetVectorType &Worklist, 7133 Register NewCond) const { 7134 7135 // Ensure that def inst defines SCC, which is still live. 7136 assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isDef() && 7137 !Op.isDead() && Op.getParent() == &SCCDefInst); 7138 SmallVector<MachineInstr *, 4> CopyToDelete; 7139 // This assumes that all the users of SCC are in the same block 7140 // as the SCC def. 7141 for (MachineInstr &MI : // Skip the def inst itself. 7142 make_range(std::next(MachineBasicBlock::iterator(SCCDefInst)), 7143 SCCDefInst.getParent()->end())) { 7144 // Check if SCC is used first. 7145 int SCCIdx = MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI); 7146 if (SCCIdx != -1) { 7147 if (MI.isCopy()) { 7148 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 7149 Register DestReg = MI.getOperand(0).getReg(); 7150 7151 MRI.replaceRegWith(DestReg, NewCond); 7152 CopyToDelete.push_back(&MI); 7153 } else { 7154 7155 if (NewCond.isValid()) 7156 MI.getOperand(SCCIdx).setReg(NewCond); 7157 7158 Worklist.insert(&MI); 7159 } 7160 } 7161 // Exit if we find another SCC def. 7162 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != -1) 7163 break; 7164 } 7165 for (auto &Copy : CopyToDelete) 7166 Copy->eraseFromParent(); 7167 } 7168 7169 // Instructions that use SCC may be converted to VALU instructions. When that 7170 // happens, the SCC register is changed to VCC_LO. The instruction that defines 7171 // SCC must be changed to an instruction that defines VCC. This function makes 7172 // sure that the instruction that defines SCC is added to the moveToVALU 7173 // worklist. 7174 void SIInstrInfo::addSCCDefsToVALUWorklist(MachineOperand &Op, 7175 SetVectorType &Worklist) const { 7176 assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isUse()); 7177 7178 MachineInstr *SCCUseInst = Op.getParent(); 7179 // Look for a preceding instruction that either defines VCC or SCC. If VCC 7180 // then there is nothing to do because the defining instruction has been 7181 // converted to a VALU already. If SCC then that instruction needs to be 7182 // converted to a VALU. 7183 for (MachineInstr &MI : 7184 make_range(std::next(MachineBasicBlock::reverse_iterator(SCCUseInst)), 7185 SCCUseInst->getParent()->rend())) { 7186 if (MI.modifiesRegister(AMDGPU::VCC, &RI)) 7187 break; 7188 if (MI.definesRegister(AMDGPU::SCC, &RI)) { 7189 Worklist.insert(&MI); 7190 break; 7191 } 7192 } 7193 } 7194 7195 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass( 7196 const MachineInstr &Inst) const { 7197 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0); 7198 7199 switch (Inst.getOpcode()) { 7200 // For target instructions, getOpRegClass just returns the virtual register 7201 // class associated with the operand, so we need to find an equivalent VGPR 7202 // register class in order to move the instruction to the VALU. 7203 case AMDGPU::COPY: 7204 case AMDGPU::PHI: 7205 case AMDGPU::REG_SEQUENCE: 7206 case AMDGPU::INSERT_SUBREG: 7207 case AMDGPU::WQM: 7208 case AMDGPU::SOFT_WQM: 7209 case AMDGPU::STRICT_WWM: 7210 case AMDGPU::STRICT_WQM: { 7211 const TargetRegisterClass *SrcRC = getOpRegClass(Inst, 1); 7212 if (RI.isAGPRClass(SrcRC)) { 7213 if (RI.isAGPRClass(NewDstRC)) 7214 return nullptr; 7215 7216 switch (Inst.getOpcode()) { 7217 case AMDGPU::PHI: 7218 case AMDGPU::REG_SEQUENCE: 7219 case AMDGPU::INSERT_SUBREG: 7220 NewDstRC = RI.getEquivalentAGPRClass(NewDstRC); 7221 break; 7222 default: 7223 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 7224 } 7225 7226 if (!NewDstRC) 7227 return nullptr; 7228 } else { 7229 if (RI.isVGPRClass(NewDstRC) || NewDstRC == &AMDGPU::VReg_1RegClass) 7230 return nullptr; 7231 7232 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 7233 if (!NewDstRC) 7234 return nullptr; 7235 } 7236 7237 return NewDstRC; 7238 } 7239 default: 7240 return NewDstRC; 7241 } 7242 } 7243 7244 // Find the one SGPR operand we are allowed to use. 7245 Register SIInstrInfo::findUsedSGPR(const MachineInstr &MI, 7246 int OpIndices[3]) const { 7247 const MCInstrDesc &Desc = MI.getDesc(); 7248 7249 // Find the one SGPR operand we are allowed to use. 7250 // 7251 // First we need to consider the instruction's operand requirements before 7252 // legalizing. Some operands are required to be SGPRs, such as implicit uses 7253 // of VCC, but we are still bound by the constant bus requirement to only use 7254 // one. 7255 // 7256 // If the operand's class is an SGPR, we can never move it. 7257 7258 Register SGPRReg = findImplicitSGPRRead(MI); 7259 if (SGPRReg != AMDGPU::NoRegister) 7260 return SGPRReg; 7261 7262 Register UsedSGPRs[3] = { AMDGPU::NoRegister }; 7263 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 7264 7265 for (unsigned i = 0; i < 3; ++i) { 7266 int Idx = OpIndices[i]; 7267 if (Idx == -1) 7268 break; 7269 7270 const MachineOperand &MO = MI.getOperand(Idx); 7271 if (!MO.isReg()) 7272 continue; 7273 7274 // Is this operand statically required to be an SGPR based on the operand 7275 // constraints? 7276 const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); 7277 bool IsRequiredSGPR = RI.isSGPRClass(OpRC); 7278 if (IsRequiredSGPR) 7279 return MO.getReg(); 7280 7281 // If this could be a VGPR or an SGPR, Check the dynamic register class. 7282 Register Reg = MO.getReg(); 7283 const TargetRegisterClass *RegRC = MRI.getRegClass(Reg); 7284 if (RI.isSGPRClass(RegRC)) 7285 UsedSGPRs[i] = Reg; 7286 } 7287 7288 // We don't have a required SGPR operand, so we have a bit more freedom in 7289 // selecting operands to move. 7290 7291 // Try to select the most used SGPR. If an SGPR is equal to one of the 7292 // others, we choose that. 7293 // 7294 // e.g. 7295 // V_FMA_F32 v0, s0, s0, s0 -> No moves 7296 // V_FMA_F32 v0, s0, s1, s0 -> Move s1 7297 7298 // TODO: If some of the operands are 64-bit SGPRs and some 32, we should 7299 // prefer those. 7300 7301 if (UsedSGPRs[0] != AMDGPU::NoRegister) { 7302 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2]) 7303 SGPRReg = UsedSGPRs[0]; 7304 } 7305 7306 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) { 7307 if (UsedSGPRs[1] == UsedSGPRs[2]) 7308 SGPRReg = UsedSGPRs[1]; 7309 } 7310 7311 return SGPRReg; 7312 } 7313 7314 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI, 7315 unsigned OperandName) const { 7316 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName); 7317 if (Idx == -1) 7318 return nullptr; 7319 7320 return &MI.getOperand(Idx); 7321 } 7322 7323 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const { 7324 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 7325 int64_t Format = ST.getGeneration() >= AMDGPUSubtarget::GFX11 ? 7326 AMDGPU::UfmtGFX11::UFMT_32_FLOAT : 7327 AMDGPU::UfmtGFX10::UFMT_32_FLOAT; 7328 return (Format << 44) | 7329 (1ULL << 56) | // RESOURCE_LEVEL = 1 7330 (3ULL << 60); // OOB_SELECT = 3 7331 } 7332 7333 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT; 7334 if (ST.isAmdHsaOS()) { 7335 // Set ATC = 1. GFX9 doesn't have this bit. 7336 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) 7337 RsrcDataFormat |= (1ULL << 56); 7338 7339 // Set MTYPE = 2 (MTYPE_UC = uncached). GFX9 doesn't have this. 7340 // BTW, it disables TC L2 and therefore decreases performance. 7341 if (ST.getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS) 7342 RsrcDataFormat |= (2ULL << 59); 7343 } 7344 7345 return RsrcDataFormat; 7346 } 7347 7348 uint64_t SIInstrInfo::getScratchRsrcWords23() const { 7349 uint64_t Rsrc23 = getDefaultRsrcDataFormat() | 7350 AMDGPU::RSRC_TID_ENABLE | 7351 0xffffffff; // Size; 7352 7353 // GFX9 doesn't have ELEMENT_SIZE. 7354 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 7355 uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize(true)) - 1; 7356 Rsrc23 |= EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT; 7357 } 7358 7359 // IndexStride = 64 / 32. 7360 uint64_t IndexStride = ST.getWavefrontSize() == 64 ? 3 : 2; 7361 Rsrc23 |= IndexStride << AMDGPU::RSRC_INDEX_STRIDE_SHIFT; 7362 7363 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17]. 7364 // Clear them unless we want a huge stride. 7365 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS && 7366 ST.getGeneration() <= AMDGPUSubtarget::GFX9) 7367 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT; 7368 7369 return Rsrc23; 7370 } 7371 7372 bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const { 7373 unsigned Opc = MI.getOpcode(); 7374 7375 return isSMRD(Opc); 7376 } 7377 7378 bool SIInstrInfo::isHighLatencyDef(int Opc) const { 7379 return get(Opc).mayLoad() && 7380 (isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc) || isFLAT(Opc)); 7381 } 7382 7383 unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI, 7384 int &FrameIndex) const { 7385 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 7386 if (!Addr || !Addr->isFI()) 7387 return AMDGPU::NoRegister; 7388 7389 assert(!MI.memoperands_empty() && 7390 (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS); 7391 7392 FrameIndex = Addr->getIndex(); 7393 return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg(); 7394 } 7395 7396 unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI, 7397 int &FrameIndex) const { 7398 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr); 7399 assert(Addr && Addr->isFI()); 7400 FrameIndex = Addr->getIndex(); 7401 return getNamedOperand(MI, AMDGPU::OpName::data)->getReg(); 7402 } 7403 7404 unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 7405 int &FrameIndex) const { 7406 if (!MI.mayLoad()) 7407 return AMDGPU::NoRegister; 7408 7409 if (isMUBUF(MI) || isVGPRSpill(MI)) 7410 return isStackAccess(MI, FrameIndex); 7411 7412 if (isSGPRSpill(MI)) 7413 return isSGPRStackAccess(MI, FrameIndex); 7414 7415 return AMDGPU::NoRegister; 7416 } 7417 7418 unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 7419 int &FrameIndex) const { 7420 if (!MI.mayStore()) 7421 return AMDGPU::NoRegister; 7422 7423 if (isMUBUF(MI) || isVGPRSpill(MI)) 7424 return isStackAccess(MI, FrameIndex); 7425 7426 if (isSGPRSpill(MI)) 7427 return isSGPRStackAccess(MI, FrameIndex); 7428 7429 return AMDGPU::NoRegister; 7430 } 7431 7432 unsigned SIInstrInfo::getInstBundleSize(const MachineInstr &MI) const { 7433 unsigned Size = 0; 7434 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 7435 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 7436 while (++I != E && I->isInsideBundle()) { 7437 assert(!I->isBundle() && "No nested bundle!"); 7438 Size += getInstSizeInBytes(*I); 7439 } 7440 7441 return Size; 7442 } 7443 7444 unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { 7445 unsigned Opc = MI.getOpcode(); 7446 const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc); 7447 unsigned DescSize = Desc.getSize(); 7448 7449 // If we have a definitive size, we can use it. Otherwise we need to inspect 7450 // the operands to know the size. 7451 if (isFixedSize(MI)) { 7452 unsigned Size = DescSize; 7453 7454 // If we hit the buggy offset, an extra nop will be inserted in MC so 7455 // estimate the worst case. 7456 if (MI.isBranch() && ST.hasOffset3fBug()) 7457 Size += 4; 7458 7459 return Size; 7460 } 7461 7462 // Instructions may have a 32-bit literal encoded after them. Check 7463 // operands that could ever be literals. 7464 if (isVALU(MI) || isSALU(MI)) { 7465 if (isDPP(MI)) 7466 return DescSize; 7467 bool HasLiteral = false; 7468 for (int I = 0, E = MI.getNumExplicitOperands(); I != E; ++I) { 7469 const MachineOperand &Op = MI.getOperand(I); 7470 const MCOperandInfo &OpInfo = Desc.OpInfo[I]; 7471 if (isLiteralConstantLike(Op, OpInfo)) { 7472 HasLiteral = true; 7473 break; 7474 } 7475 } 7476 return HasLiteral ? DescSize + 4 : DescSize; 7477 } 7478 7479 // Check whether we have extra NSA words. 7480 if (isMIMG(MI)) { 7481 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); 7482 if (VAddr0Idx < 0) 7483 return 8; 7484 7485 int RSrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); 7486 return 8 + 4 * ((RSrcIdx - VAddr0Idx + 2) / 4); 7487 } 7488 7489 switch (Opc) { 7490 case TargetOpcode::BUNDLE: 7491 return getInstBundleSize(MI); 7492 case TargetOpcode::INLINEASM: 7493 case TargetOpcode::INLINEASM_BR: { 7494 const MachineFunction *MF = MI.getParent()->getParent(); 7495 const char *AsmStr = MI.getOperand(0).getSymbolName(); 7496 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo(), &ST); 7497 } 7498 default: 7499 if (MI.isMetaInstruction()) 7500 return 0; 7501 return DescSize; 7502 } 7503 } 7504 7505 bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const { 7506 if (!isFLAT(MI)) 7507 return false; 7508 7509 if (MI.memoperands_empty()) 7510 return true; 7511 7512 for (const MachineMemOperand *MMO : MI.memoperands()) { 7513 if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS) 7514 return true; 7515 } 7516 return false; 7517 } 7518 7519 bool SIInstrInfo::isNonUniformBranchInstr(MachineInstr &Branch) const { 7520 return Branch.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO; 7521 } 7522 7523 void SIInstrInfo::convertNonUniformIfRegion(MachineBasicBlock *IfEntry, 7524 MachineBasicBlock *IfEnd) const { 7525 MachineBasicBlock::iterator TI = IfEntry->getFirstTerminator(); 7526 assert(TI != IfEntry->end()); 7527 7528 MachineInstr *Branch = &(*TI); 7529 MachineFunction *MF = IfEntry->getParent(); 7530 MachineRegisterInfo &MRI = IfEntry->getParent()->getRegInfo(); 7531 7532 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 7533 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 7534 MachineInstr *SIIF = 7535 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_IF), DstReg) 7536 .add(Branch->getOperand(0)) 7537 .add(Branch->getOperand(1)); 7538 MachineInstr *SIEND = 7539 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_END_CF)) 7540 .addReg(DstReg); 7541 7542 IfEntry->erase(TI); 7543 IfEntry->insert(IfEntry->end(), SIIF); 7544 IfEnd->insert(IfEnd->getFirstNonPHI(), SIEND); 7545 } 7546 } 7547 7548 void SIInstrInfo::convertNonUniformLoopRegion( 7549 MachineBasicBlock *LoopEntry, MachineBasicBlock *LoopEnd) const { 7550 MachineBasicBlock::iterator TI = LoopEnd->getFirstTerminator(); 7551 // We expect 2 terminators, one conditional and one unconditional. 7552 assert(TI != LoopEnd->end()); 7553 7554 MachineInstr *Branch = &(*TI); 7555 MachineFunction *MF = LoopEnd->getParent(); 7556 MachineRegisterInfo &MRI = LoopEnd->getParent()->getRegInfo(); 7557 7558 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 7559 7560 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 7561 Register BackEdgeReg = MRI.createVirtualRegister(RI.getBoolRC()); 7562 MachineInstrBuilder HeaderPHIBuilder = 7563 BuildMI(*(MF), Branch->getDebugLoc(), get(TargetOpcode::PHI), DstReg); 7564 for (MachineBasicBlock *PMBB : LoopEntry->predecessors()) { 7565 if (PMBB == LoopEnd) { 7566 HeaderPHIBuilder.addReg(BackEdgeReg); 7567 } else { 7568 Register ZeroReg = MRI.createVirtualRegister(RI.getBoolRC()); 7569 materializeImmediate(*PMBB, PMBB->getFirstTerminator(), DebugLoc(), 7570 ZeroReg, 0); 7571 HeaderPHIBuilder.addReg(ZeroReg); 7572 } 7573 HeaderPHIBuilder.addMBB(PMBB); 7574 } 7575 MachineInstr *HeaderPhi = HeaderPHIBuilder; 7576 MachineInstr *SIIFBREAK = BuildMI(*(MF), Branch->getDebugLoc(), 7577 get(AMDGPU::SI_IF_BREAK), BackEdgeReg) 7578 .addReg(DstReg) 7579 .add(Branch->getOperand(0)); 7580 MachineInstr *SILOOP = 7581 BuildMI(*(MF), Branch->getDebugLoc(), get(AMDGPU::SI_LOOP)) 7582 .addReg(BackEdgeReg) 7583 .addMBB(LoopEntry); 7584 7585 LoopEntry->insert(LoopEntry->begin(), HeaderPhi); 7586 LoopEnd->erase(TI); 7587 LoopEnd->insert(LoopEnd->end(), SIIFBREAK); 7588 LoopEnd->insert(LoopEnd->end(), SILOOP); 7589 } 7590 } 7591 7592 ArrayRef<std::pair<int, const char *>> 7593 SIInstrInfo::getSerializableTargetIndices() const { 7594 static const std::pair<int, const char *> TargetIndices[] = { 7595 {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"}, 7596 {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"}, 7597 {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"}, 7598 {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"}, 7599 {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}}; 7600 return makeArrayRef(TargetIndices); 7601 } 7602 7603 /// This is used by the post-RA scheduler (SchedulePostRAList.cpp). The 7604 /// post-RA version of misched uses CreateTargetMIHazardRecognizer. 7605 ScheduleHazardRecognizer * 7606 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 7607 const ScheduleDAG *DAG) const { 7608 return new GCNHazardRecognizer(DAG->MF); 7609 } 7610 7611 /// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer 7612 /// pass. 7613 ScheduleHazardRecognizer * 7614 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const { 7615 return new GCNHazardRecognizer(MF); 7616 } 7617 7618 // Called during: 7619 // - pre-RA scheduling and post-RA scheduling 7620 ScheduleHazardRecognizer * 7621 SIInstrInfo::CreateTargetMIHazardRecognizer(const InstrItineraryData *II, 7622 const ScheduleDAGMI *DAG) const { 7623 // Borrowed from Arm Target 7624 // We would like to restrict this hazard recognizer to only 7625 // post-RA scheduling; we can tell that we're post-RA because we don't 7626 // track VRegLiveness. 7627 if (!DAG->hasVRegLiveness()) 7628 return new GCNHazardRecognizer(DAG->MF); 7629 return TargetInstrInfo::CreateTargetMIHazardRecognizer(II, DAG); 7630 } 7631 7632 std::pair<unsigned, unsigned> 7633 SIInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { 7634 return std::make_pair(TF & MO_MASK, TF & ~MO_MASK); 7635 } 7636 7637 ArrayRef<std::pair<unsigned, const char *>> 7638 SIInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { 7639 static const std::pair<unsigned, const char *> TargetFlags[] = { 7640 { MO_GOTPCREL, "amdgpu-gotprel" }, 7641 { MO_GOTPCREL32_LO, "amdgpu-gotprel32-lo" }, 7642 { MO_GOTPCREL32_HI, "amdgpu-gotprel32-hi" }, 7643 { MO_REL32_LO, "amdgpu-rel32-lo" }, 7644 { MO_REL32_HI, "amdgpu-rel32-hi" }, 7645 { MO_ABS32_LO, "amdgpu-abs32-lo" }, 7646 { MO_ABS32_HI, "amdgpu-abs32-hi" }, 7647 }; 7648 7649 return makeArrayRef(TargetFlags); 7650 } 7651 7652 ArrayRef<std::pair<MachineMemOperand::Flags, const char *>> 7653 SIInstrInfo::getSerializableMachineMemOperandTargetFlags() const { 7654 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] = 7655 { 7656 {MONoClobber, "amdgpu-noclobber"}, 7657 }; 7658 7659 return makeArrayRef(TargetFlags); 7660 } 7661 7662 bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const { 7663 return !MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY && 7664 MI.modifiesRegister(AMDGPU::EXEC, &RI); 7665 } 7666 7667 MachineInstrBuilder 7668 SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 7669 MachineBasicBlock::iterator I, 7670 const DebugLoc &DL, 7671 Register DestReg) const { 7672 if (ST.hasAddNoCarry()) 7673 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg); 7674 7675 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 7676 Register UnusedCarry = MRI.createVirtualRegister(RI.getBoolRC()); 7677 MRI.setRegAllocationHint(UnusedCarry, 0, RI.getVCC()); 7678 7679 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_CO_U32_e64), DestReg) 7680 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 7681 } 7682 7683 MachineInstrBuilder SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 7684 MachineBasicBlock::iterator I, 7685 const DebugLoc &DL, 7686 Register DestReg, 7687 RegScavenger &RS) const { 7688 if (ST.hasAddNoCarry()) 7689 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e32), DestReg); 7690 7691 // If available, prefer to use vcc. 7692 Register UnusedCarry = !RS.isRegUsed(AMDGPU::VCC) 7693 ? Register(RI.getVCC()) 7694 : RS.scavengeRegister(RI.getBoolRC(), I, 0, false); 7695 7696 // TODO: Users need to deal with this. 7697 if (!UnusedCarry.isValid()) 7698 return MachineInstrBuilder(); 7699 7700 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_CO_U32_e64), DestReg) 7701 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 7702 } 7703 7704 bool SIInstrInfo::isKillTerminator(unsigned Opcode) { 7705 switch (Opcode) { 7706 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 7707 case AMDGPU::SI_KILL_I1_TERMINATOR: 7708 return true; 7709 default: 7710 return false; 7711 } 7712 } 7713 7714 const MCInstrDesc &SIInstrInfo::getKillTerminatorFromPseudo(unsigned Opcode) const { 7715 switch (Opcode) { 7716 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: 7717 return get(AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR); 7718 case AMDGPU::SI_KILL_I1_PSEUDO: 7719 return get(AMDGPU::SI_KILL_I1_TERMINATOR); 7720 default: 7721 llvm_unreachable("invalid opcode, expected SI_KILL_*_PSEUDO"); 7722 } 7723 } 7724 7725 void SIInstrInfo::fixImplicitOperands(MachineInstr &MI) const { 7726 if (!ST.isWave32()) 7727 return; 7728 7729 for (auto &Op : MI.implicit_operands()) { 7730 if (Op.isReg() && Op.getReg() == AMDGPU::VCC) 7731 Op.setReg(AMDGPU::VCC_LO); 7732 } 7733 } 7734 7735 bool SIInstrInfo::isBufferSMRD(const MachineInstr &MI) const { 7736 if (!isSMRD(MI)) 7737 return false; 7738 7739 // Check that it is using a buffer resource. 7740 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sbase); 7741 if (Idx == -1) // e.g. s_memtime 7742 return false; 7743 7744 const auto RCID = MI.getDesc().OpInfo[Idx].RegClass; 7745 return RI.getRegClass(RCID)->hasSubClassEq(&AMDGPU::SGPR_128RegClass); 7746 } 7747 7748 // Depending on the used address space and instructions, some immediate offsets 7749 // are allowed and some are not. 7750 // In general, flat instruction offsets can only be non-negative, global and 7751 // scratch instruction offsets can also be negative. 7752 // 7753 // There are several bugs related to these offsets: 7754 // On gfx10.1, flat instructions that go into the global address space cannot 7755 // use an offset. 7756 // 7757 // For scratch instructions, the address can be either an SGPR or a VGPR. 7758 // The following offsets can be used, depending on the architecture (x means 7759 // cannot be used): 7760 // +----------------------------+------+------+ 7761 // | Address-Mode | SGPR | VGPR | 7762 // +----------------------------+------+------+ 7763 // | gfx9 | | | 7764 // | negative, 4-aligned offset | x | ok | 7765 // | negative, unaligned offset | x | ok | 7766 // +----------------------------+------+------+ 7767 // | gfx10 | | | 7768 // | negative, 4-aligned offset | ok | ok | 7769 // | negative, unaligned offset | ok | x | 7770 // +----------------------------+------+------+ 7771 // | gfx10.3 | | | 7772 // | negative, 4-aligned offset | ok | ok | 7773 // | negative, unaligned offset | ok | ok | 7774 // +----------------------------+------+------+ 7775 // 7776 // This function ignores the addressing mode, so if an offset cannot be used in 7777 // one addressing mode, it is considered illegal. 7778 bool SIInstrInfo::isLegalFLATOffset(int64_t Offset, unsigned AddrSpace, 7779 uint64_t FlatVariant) const { 7780 // TODO: Should 0 be special cased? 7781 if (!ST.hasFlatInstOffsets()) 7782 return false; 7783 7784 if (ST.hasFlatSegmentOffsetBug() && FlatVariant == SIInstrFlags::FLAT && 7785 (AddrSpace == AMDGPUAS::FLAT_ADDRESS || 7786 AddrSpace == AMDGPUAS::GLOBAL_ADDRESS)) 7787 return false; 7788 7789 bool Signed = FlatVariant != SIInstrFlags::FLAT; 7790 if (ST.hasNegativeScratchOffsetBug() && 7791 FlatVariant == SIInstrFlags::FlatScratch) 7792 Signed = false; 7793 if (ST.hasNegativeUnalignedScratchOffsetBug() && 7794 FlatVariant == SIInstrFlags::FlatScratch && Offset < 0 && 7795 (Offset % 4) != 0) { 7796 return false; 7797 } 7798 7799 unsigned N = AMDGPU::getNumFlatOffsetBits(ST, Signed); 7800 return Signed ? isIntN(N, Offset) : isUIntN(N, Offset); 7801 } 7802 7803 // See comment on SIInstrInfo::isLegalFLATOffset for what is legal and what not. 7804 std::pair<int64_t, int64_t> 7805 SIInstrInfo::splitFlatOffset(int64_t COffsetVal, unsigned AddrSpace, 7806 uint64_t FlatVariant) const { 7807 int64_t RemainderOffset = COffsetVal; 7808 int64_t ImmField = 0; 7809 bool Signed = FlatVariant != SIInstrFlags::FLAT; 7810 if (ST.hasNegativeScratchOffsetBug() && 7811 FlatVariant == SIInstrFlags::FlatScratch) 7812 Signed = false; 7813 7814 const unsigned NumBits = AMDGPU::getNumFlatOffsetBits(ST, Signed); 7815 if (Signed) { 7816 // Use signed division by a power of two to truncate towards 0. 7817 int64_t D = 1LL << (NumBits - 1); 7818 RemainderOffset = (COffsetVal / D) * D; 7819 ImmField = COffsetVal - RemainderOffset; 7820 7821 if (ST.hasNegativeUnalignedScratchOffsetBug() && 7822 FlatVariant == SIInstrFlags::FlatScratch && ImmField < 0 && 7823 (ImmField % 4) != 0) { 7824 // Make ImmField a multiple of 4 7825 RemainderOffset += ImmField % 4; 7826 ImmField -= ImmField % 4; 7827 } 7828 } else if (COffsetVal >= 0) { 7829 ImmField = COffsetVal & maskTrailingOnes<uint64_t>(NumBits); 7830 RemainderOffset = COffsetVal - ImmField; 7831 } 7832 7833 assert(isLegalFLATOffset(ImmField, AddrSpace, FlatVariant)); 7834 assert(RemainderOffset + ImmField == COffsetVal); 7835 return {ImmField, RemainderOffset}; 7836 } 7837 7838 // This must be kept in sync with the SIEncodingFamily class in SIInstrInfo.td 7839 // and the columns of the getMCOpcodeGen table. 7840 enum SIEncodingFamily { 7841 SI = 0, 7842 VI = 1, 7843 SDWA = 2, 7844 SDWA9 = 3, 7845 GFX80 = 4, 7846 GFX9 = 5, 7847 GFX10 = 6, 7848 SDWA10 = 7, 7849 GFX90A = 8, 7850 GFX940 = 9, 7851 GFX11 = 10, 7852 }; 7853 7854 static SIEncodingFamily subtargetEncodingFamily(const GCNSubtarget &ST) { 7855 switch (ST.getGeneration()) { 7856 default: 7857 break; 7858 case AMDGPUSubtarget::SOUTHERN_ISLANDS: 7859 case AMDGPUSubtarget::SEA_ISLANDS: 7860 return SIEncodingFamily::SI; 7861 case AMDGPUSubtarget::VOLCANIC_ISLANDS: 7862 case AMDGPUSubtarget::GFX9: 7863 return SIEncodingFamily::VI; 7864 case AMDGPUSubtarget::GFX10: 7865 return SIEncodingFamily::GFX10; 7866 case AMDGPUSubtarget::GFX11: 7867 return SIEncodingFamily::GFX11; 7868 } 7869 llvm_unreachable("Unknown subtarget generation!"); 7870 } 7871 7872 bool SIInstrInfo::isAsmOnlyOpcode(int MCOp) const { 7873 switch(MCOp) { 7874 // These opcodes use indirect register addressing so 7875 // they need special handling by codegen (currently missing). 7876 // Therefore it is too risky to allow these opcodes 7877 // to be selected by dpp combiner or sdwa peepholer. 7878 case AMDGPU::V_MOVRELS_B32_dpp_gfx10: 7879 case AMDGPU::V_MOVRELS_B32_sdwa_gfx10: 7880 case AMDGPU::V_MOVRELD_B32_dpp_gfx10: 7881 case AMDGPU::V_MOVRELD_B32_sdwa_gfx10: 7882 case AMDGPU::V_MOVRELSD_B32_dpp_gfx10: 7883 case AMDGPU::V_MOVRELSD_B32_sdwa_gfx10: 7884 case AMDGPU::V_MOVRELSD_2_B32_dpp_gfx10: 7885 case AMDGPU::V_MOVRELSD_2_B32_sdwa_gfx10: 7886 return true; 7887 default: 7888 return false; 7889 } 7890 } 7891 7892 int SIInstrInfo::pseudoToMCOpcode(int Opcode) const { 7893 SIEncodingFamily Gen = subtargetEncodingFamily(ST); 7894 7895 if ((get(Opcode).TSFlags & SIInstrFlags::renamedInGFX9) != 0 && 7896 ST.getGeneration() == AMDGPUSubtarget::GFX9) 7897 Gen = SIEncodingFamily::GFX9; 7898 7899 // Adjust the encoding family to GFX80 for D16 buffer instructions when the 7900 // subtarget has UnpackedD16VMem feature. 7901 // TODO: remove this when we discard GFX80 encoding. 7902 if (ST.hasUnpackedD16VMem() && (get(Opcode).TSFlags & SIInstrFlags::D16Buf)) 7903 Gen = SIEncodingFamily::GFX80; 7904 7905 if (get(Opcode).TSFlags & SIInstrFlags::SDWA) { 7906 switch (ST.getGeneration()) { 7907 default: 7908 Gen = SIEncodingFamily::SDWA; 7909 break; 7910 case AMDGPUSubtarget::GFX9: 7911 Gen = SIEncodingFamily::SDWA9; 7912 break; 7913 case AMDGPUSubtarget::GFX10: 7914 Gen = SIEncodingFamily::SDWA10; 7915 break; 7916 } 7917 } 7918 7919 if (isMAI(Opcode)) { 7920 int MFMAOp = AMDGPU::getMFMAEarlyClobberOp(Opcode); 7921 if (MFMAOp != -1) 7922 Opcode = MFMAOp; 7923 } 7924 7925 int MCOp = AMDGPU::getMCOpcode(Opcode, Gen); 7926 7927 // -1 means that Opcode is already a native instruction. 7928 if (MCOp == -1) 7929 return Opcode; 7930 7931 if (ST.hasGFX90AInsts()) { 7932 uint16_t NMCOp = (uint16_t)-1; 7933 if (ST.hasGFX940Insts()) 7934 NMCOp = AMDGPU::getMCOpcode(Opcode, SIEncodingFamily::GFX940); 7935 if (NMCOp == (uint16_t)-1) 7936 NMCOp = AMDGPU::getMCOpcode(Opcode, SIEncodingFamily::GFX90A); 7937 if (NMCOp == (uint16_t)-1) 7938 NMCOp = AMDGPU::getMCOpcode(Opcode, SIEncodingFamily::GFX9); 7939 if (NMCOp != (uint16_t)-1) 7940 MCOp = NMCOp; 7941 } 7942 7943 // (uint16_t)-1 means that Opcode is a pseudo instruction that has 7944 // no encoding in the given subtarget generation. 7945 if (MCOp == (uint16_t)-1) 7946 return -1; 7947 7948 if (isAsmOnlyOpcode(MCOp)) 7949 return -1; 7950 7951 return MCOp; 7952 } 7953 7954 static 7955 TargetInstrInfo::RegSubRegPair getRegOrUndef(const MachineOperand &RegOpnd) { 7956 assert(RegOpnd.isReg()); 7957 return RegOpnd.isUndef() ? TargetInstrInfo::RegSubRegPair() : 7958 getRegSubRegPair(RegOpnd); 7959 } 7960 7961 TargetInstrInfo::RegSubRegPair 7962 llvm::getRegSequenceSubReg(MachineInstr &MI, unsigned SubReg) { 7963 assert(MI.isRegSequence()); 7964 for (unsigned I = 0, E = (MI.getNumOperands() - 1)/ 2; I < E; ++I) 7965 if (MI.getOperand(1 + 2 * I + 1).getImm() == SubReg) { 7966 auto &RegOp = MI.getOperand(1 + 2 * I); 7967 return getRegOrUndef(RegOp); 7968 } 7969 return TargetInstrInfo::RegSubRegPair(); 7970 } 7971 7972 // Try to find the definition of reg:subreg in subreg-manipulation pseudos 7973 // Following a subreg of reg:subreg isn't supported 7974 static bool followSubRegDef(MachineInstr &MI, 7975 TargetInstrInfo::RegSubRegPair &RSR) { 7976 if (!RSR.SubReg) 7977 return false; 7978 switch (MI.getOpcode()) { 7979 default: break; 7980 case AMDGPU::REG_SEQUENCE: 7981 RSR = getRegSequenceSubReg(MI, RSR.SubReg); 7982 return true; 7983 // EXTRACT_SUBREG ins't supported as this would follow a subreg of subreg 7984 case AMDGPU::INSERT_SUBREG: 7985 if (RSR.SubReg == (unsigned)MI.getOperand(3).getImm()) 7986 // inserted the subreg we're looking for 7987 RSR = getRegOrUndef(MI.getOperand(2)); 7988 else { // the subreg in the rest of the reg 7989 auto R1 = getRegOrUndef(MI.getOperand(1)); 7990 if (R1.SubReg) // subreg of subreg isn't supported 7991 return false; 7992 RSR.Reg = R1.Reg; 7993 } 7994 return true; 7995 } 7996 return false; 7997 } 7998 7999 MachineInstr *llvm::getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P, 8000 MachineRegisterInfo &MRI) { 8001 assert(MRI.isSSA()); 8002 if (!P.Reg.isVirtual()) 8003 return nullptr; 8004 8005 auto RSR = P; 8006 auto *DefInst = MRI.getVRegDef(RSR.Reg); 8007 while (auto *MI = DefInst) { 8008 DefInst = nullptr; 8009 switch (MI->getOpcode()) { 8010 case AMDGPU::COPY: 8011 case AMDGPU::V_MOV_B32_e32: { 8012 auto &Op1 = MI->getOperand(1); 8013 if (Op1.isReg() && Op1.getReg().isVirtual()) { 8014 if (Op1.isUndef()) 8015 return nullptr; 8016 RSR = getRegSubRegPair(Op1); 8017 DefInst = MRI.getVRegDef(RSR.Reg); 8018 } 8019 break; 8020 } 8021 default: 8022 if (followSubRegDef(*MI, RSR)) { 8023 if (!RSR.Reg) 8024 return nullptr; 8025 DefInst = MRI.getVRegDef(RSR.Reg); 8026 } 8027 } 8028 if (!DefInst) 8029 return MI; 8030 } 8031 return nullptr; 8032 } 8033 8034 bool llvm::execMayBeModifiedBeforeUse(const MachineRegisterInfo &MRI, 8035 Register VReg, 8036 const MachineInstr &DefMI, 8037 const MachineInstr &UseMI) { 8038 assert(MRI.isSSA() && "Must be run on SSA"); 8039 8040 auto *TRI = MRI.getTargetRegisterInfo(); 8041 auto *DefBB = DefMI.getParent(); 8042 8043 // Don't bother searching between blocks, although it is possible this block 8044 // doesn't modify exec. 8045 if (UseMI.getParent() != DefBB) 8046 return true; 8047 8048 const int MaxInstScan = 20; 8049 int NumInst = 0; 8050 8051 // Stop scan at the use. 8052 auto E = UseMI.getIterator(); 8053 for (auto I = std::next(DefMI.getIterator()); I != E; ++I) { 8054 if (I->isDebugInstr()) 8055 continue; 8056 8057 if (++NumInst > MaxInstScan) 8058 return true; 8059 8060 if (I->modifiesRegister(AMDGPU::EXEC, TRI)) 8061 return true; 8062 } 8063 8064 return false; 8065 } 8066 8067 bool llvm::execMayBeModifiedBeforeAnyUse(const MachineRegisterInfo &MRI, 8068 Register VReg, 8069 const MachineInstr &DefMI) { 8070 assert(MRI.isSSA() && "Must be run on SSA"); 8071 8072 auto *TRI = MRI.getTargetRegisterInfo(); 8073 auto *DefBB = DefMI.getParent(); 8074 8075 const int MaxUseScan = 10; 8076 int NumUse = 0; 8077 8078 for (auto &Use : MRI.use_nodbg_operands(VReg)) { 8079 auto &UseInst = *Use.getParent(); 8080 // Don't bother searching between blocks, although it is possible this block 8081 // doesn't modify exec. 8082 if (UseInst.getParent() != DefBB || UseInst.isPHI()) 8083 return true; 8084 8085 if (++NumUse > MaxUseScan) 8086 return true; 8087 } 8088 8089 if (NumUse == 0) 8090 return false; 8091 8092 const int MaxInstScan = 20; 8093 int NumInst = 0; 8094 8095 // Stop scan when we have seen all the uses. 8096 for (auto I = std::next(DefMI.getIterator()); ; ++I) { 8097 assert(I != DefBB->end()); 8098 8099 if (I->isDebugInstr()) 8100 continue; 8101 8102 if (++NumInst > MaxInstScan) 8103 return true; 8104 8105 for (const MachineOperand &Op : I->operands()) { 8106 // We don't check reg masks here as they're used only on calls: 8107 // 1. EXEC is only considered const within one BB 8108 // 2. Call should be a terminator instruction if present in a BB 8109 8110 if (!Op.isReg()) 8111 continue; 8112 8113 Register Reg = Op.getReg(); 8114 if (Op.isUse()) { 8115 if (Reg == VReg && --NumUse == 0) 8116 return false; 8117 } else if (TRI->regsOverlap(Reg, AMDGPU::EXEC)) 8118 return true; 8119 } 8120 } 8121 } 8122 8123 MachineInstr *SIInstrInfo::createPHIDestinationCopy( 8124 MachineBasicBlock &MBB, MachineBasicBlock::iterator LastPHIIt, 8125 const DebugLoc &DL, Register Src, Register Dst) const { 8126 auto Cur = MBB.begin(); 8127 if (Cur != MBB.end()) 8128 do { 8129 if (!Cur->isPHI() && Cur->readsRegister(Dst)) 8130 return BuildMI(MBB, Cur, DL, get(TargetOpcode::COPY), Dst).addReg(Src); 8131 ++Cur; 8132 } while (Cur != MBB.end() && Cur != LastPHIIt); 8133 8134 return TargetInstrInfo::createPHIDestinationCopy(MBB, LastPHIIt, DL, Src, 8135 Dst); 8136 } 8137 8138 MachineInstr *SIInstrInfo::createPHISourceCopy( 8139 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, 8140 const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const { 8141 if (InsPt != MBB.end() && 8142 (InsPt->getOpcode() == AMDGPU::SI_IF || 8143 InsPt->getOpcode() == AMDGPU::SI_ELSE || 8144 InsPt->getOpcode() == AMDGPU::SI_IF_BREAK) && 8145 InsPt->definesRegister(Src)) { 8146 InsPt++; 8147 return BuildMI(MBB, InsPt, DL, 8148 get(ST.isWave32() ? AMDGPU::S_MOV_B32_term 8149 : AMDGPU::S_MOV_B64_term), 8150 Dst) 8151 .addReg(Src, 0, SrcSubReg) 8152 .addReg(AMDGPU::EXEC, RegState::Implicit); 8153 } 8154 return TargetInstrInfo::createPHISourceCopy(MBB, InsPt, DL, Src, SrcSubReg, 8155 Dst); 8156 } 8157 8158 bool llvm::SIInstrInfo::isWave32() const { return ST.isWave32(); } 8159 8160 MachineInstr *SIInstrInfo::foldMemoryOperandImpl( 8161 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, 8162 MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS, 8163 VirtRegMap *VRM) const { 8164 // This is a bit of a hack (copied from AArch64). Consider this instruction: 8165 // 8166 // %0:sreg_32 = COPY $m0 8167 // 8168 // We explicitly chose SReg_32 for the virtual register so such a copy might 8169 // be eliminated by RegisterCoalescer. However, that may not be possible, and 8170 // %0 may even spill. We can't spill $m0 normally (it would require copying to 8171 // a numbered SGPR anyway), and since it is in the SReg_32 register class, 8172 // TargetInstrInfo::foldMemoryOperand() is going to try. 8173 // A similar issue also exists with spilling and reloading $exec registers. 8174 // 8175 // To prevent that, constrain the %0 register class here. 8176 if (MI.isFullCopy()) { 8177 Register DstReg = MI.getOperand(0).getReg(); 8178 Register SrcReg = MI.getOperand(1).getReg(); 8179 if ((DstReg.isVirtual() || SrcReg.isVirtual()) && 8180 (DstReg.isVirtual() != SrcReg.isVirtual())) { 8181 MachineRegisterInfo &MRI = MF.getRegInfo(); 8182 Register VirtReg = DstReg.isVirtual() ? DstReg : SrcReg; 8183 const TargetRegisterClass *RC = MRI.getRegClass(VirtReg); 8184 if (RC->hasSuperClassEq(&AMDGPU::SReg_32RegClass)) { 8185 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 8186 return nullptr; 8187 } else if (RC->hasSuperClassEq(&AMDGPU::SReg_64RegClass)) { 8188 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_64_XEXECRegClass); 8189 return nullptr; 8190 } 8191 } 8192 } 8193 8194 return nullptr; 8195 } 8196 8197 unsigned SIInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 8198 const MachineInstr &MI, 8199 unsigned *PredCost) const { 8200 if (MI.isBundle()) { 8201 MachineBasicBlock::const_instr_iterator I(MI.getIterator()); 8202 MachineBasicBlock::const_instr_iterator E(MI.getParent()->instr_end()); 8203 unsigned Lat = 0, Count = 0; 8204 for (++I; I != E && I->isBundledWithPred(); ++I) { 8205 ++Count; 8206 Lat = std::max(Lat, SchedModel.computeInstrLatency(&*I)); 8207 } 8208 return Lat + Count - 1; 8209 } 8210 8211 return SchedModel.computeInstrLatency(&MI); 8212 } 8213 8214 unsigned SIInstrInfo::getDSShaderTypeValue(const MachineFunction &MF) { 8215 switch (MF.getFunction().getCallingConv()) { 8216 case CallingConv::AMDGPU_PS: 8217 return 1; 8218 case CallingConv::AMDGPU_VS: 8219 return 2; 8220 case CallingConv::AMDGPU_GS: 8221 return 3; 8222 case CallingConv::AMDGPU_HS: 8223 case CallingConv::AMDGPU_LS: 8224 case CallingConv::AMDGPU_ES: 8225 report_fatal_error("ds_ordered_count unsupported for this calling conv"); 8226 case CallingConv::AMDGPU_CS: 8227 case CallingConv::AMDGPU_KERNEL: 8228 case CallingConv::C: 8229 case CallingConv::Fast: 8230 default: 8231 // Assume other calling conventions are various compute callable functions 8232 return 0; 8233 } 8234 } 8235 8236 bool SIInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg, 8237 Register &SrcReg2, int64_t &CmpMask, 8238 int64_t &CmpValue) const { 8239 if (!MI.getOperand(0).isReg() || MI.getOperand(0).getSubReg()) 8240 return false; 8241 8242 switch (MI.getOpcode()) { 8243 default: 8244 break; 8245 case AMDGPU::S_CMP_EQ_U32: 8246 case AMDGPU::S_CMP_EQ_I32: 8247 case AMDGPU::S_CMP_LG_U32: 8248 case AMDGPU::S_CMP_LG_I32: 8249 case AMDGPU::S_CMP_LT_U32: 8250 case AMDGPU::S_CMP_LT_I32: 8251 case AMDGPU::S_CMP_GT_U32: 8252 case AMDGPU::S_CMP_GT_I32: 8253 case AMDGPU::S_CMP_LE_U32: 8254 case AMDGPU::S_CMP_LE_I32: 8255 case AMDGPU::S_CMP_GE_U32: 8256 case AMDGPU::S_CMP_GE_I32: 8257 case AMDGPU::S_CMP_EQ_U64: 8258 case AMDGPU::S_CMP_LG_U64: 8259 SrcReg = MI.getOperand(0).getReg(); 8260 if (MI.getOperand(1).isReg()) { 8261 if (MI.getOperand(1).getSubReg()) 8262 return false; 8263 SrcReg2 = MI.getOperand(1).getReg(); 8264 CmpValue = 0; 8265 } else if (MI.getOperand(1).isImm()) { 8266 SrcReg2 = Register(); 8267 CmpValue = MI.getOperand(1).getImm(); 8268 } else { 8269 return false; 8270 } 8271 CmpMask = ~0; 8272 return true; 8273 case AMDGPU::S_CMPK_EQ_U32: 8274 case AMDGPU::S_CMPK_EQ_I32: 8275 case AMDGPU::S_CMPK_LG_U32: 8276 case AMDGPU::S_CMPK_LG_I32: 8277 case AMDGPU::S_CMPK_LT_U32: 8278 case AMDGPU::S_CMPK_LT_I32: 8279 case AMDGPU::S_CMPK_GT_U32: 8280 case AMDGPU::S_CMPK_GT_I32: 8281 case AMDGPU::S_CMPK_LE_U32: 8282 case AMDGPU::S_CMPK_LE_I32: 8283 case AMDGPU::S_CMPK_GE_U32: 8284 case AMDGPU::S_CMPK_GE_I32: 8285 SrcReg = MI.getOperand(0).getReg(); 8286 SrcReg2 = Register(); 8287 CmpValue = MI.getOperand(1).getImm(); 8288 CmpMask = ~0; 8289 return true; 8290 } 8291 8292 return false; 8293 } 8294 8295 bool SIInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, 8296 Register SrcReg2, int64_t CmpMask, 8297 int64_t CmpValue, 8298 const MachineRegisterInfo *MRI) const { 8299 if (!SrcReg || SrcReg.isPhysical()) 8300 return false; 8301 8302 if (SrcReg2 && !getFoldableImm(SrcReg2, *MRI, CmpValue)) 8303 return false; 8304 8305 const auto optimizeCmpAnd = [&CmpInstr, SrcReg, CmpValue, MRI, 8306 this](int64_t ExpectedValue, unsigned SrcSize, 8307 bool IsReversible, bool IsSigned) -> bool { 8308 // s_cmp_eq_u32 (s_and_b32 $src, 1 << n), 1 << n => s_and_b32 $src, 1 << n 8309 // s_cmp_eq_i32 (s_and_b32 $src, 1 << n), 1 << n => s_and_b32 $src, 1 << n 8310 // s_cmp_ge_u32 (s_and_b32 $src, 1 << n), 1 << n => s_and_b32 $src, 1 << n 8311 // s_cmp_ge_i32 (s_and_b32 $src, 1 << n), 1 << n => s_and_b32 $src, 1 << n 8312 // s_cmp_eq_u64 (s_and_b64 $src, 1 << n), 1 << n => s_and_b64 $src, 1 << n 8313 // s_cmp_lg_u32 (s_and_b32 $src, 1 << n), 0 => s_and_b32 $src, 1 << n 8314 // s_cmp_lg_i32 (s_and_b32 $src, 1 << n), 0 => s_and_b32 $src, 1 << n 8315 // s_cmp_gt_u32 (s_and_b32 $src, 1 << n), 0 => s_and_b32 $src, 1 << n 8316 // s_cmp_gt_i32 (s_and_b32 $src, 1 << n), 0 => s_and_b32 $src, 1 << n 8317 // s_cmp_lg_u64 (s_and_b64 $src, 1 << n), 0 => s_and_b64 $src, 1 << n 8318 // 8319 // Signed ge/gt are not used for the sign bit. 8320 // 8321 // If result of the AND is unused except in the compare: 8322 // s_and_b(32|64) $src, 1 << n => s_bitcmp1_b(32|64) $src, n 8323 // 8324 // s_cmp_eq_u32 (s_and_b32 $src, 1 << n), 0 => s_bitcmp0_b32 $src, n 8325 // s_cmp_eq_i32 (s_and_b32 $src, 1 << n), 0 => s_bitcmp0_b32 $src, n 8326 // s_cmp_eq_u64 (s_and_b64 $src, 1 << n), 0 => s_bitcmp0_b64 $src, n 8327 // s_cmp_lg_u32 (s_and_b32 $src, 1 << n), 1 << n => s_bitcmp0_b32 $src, n 8328 // s_cmp_lg_i32 (s_and_b32 $src, 1 << n), 1 << n => s_bitcmp0_b32 $src, n 8329 // s_cmp_lg_u64 (s_and_b64 $src, 1 << n), 1 << n => s_bitcmp0_b64 $src, n 8330 8331 MachineInstr *Def = MRI->getUniqueVRegDef(SrcReg); 8332 if (!Def || Def->getParent() != CmpInstr.getParent()) 8333 return false; 8334 8335 if (Def->getOpcode() != AMDGPU::S_AND_B32 && 8336 Def->getOpcode() != AMDGPU::S_AND_B64) 8337 return false; 8338 8339 int64_t Mask; 8340 const auto isMask = [&Mask, SrcSize](const MachineOperand *MO) -> bool { 8341 if (MO->isImm()) 8342 Mask = MO->getImm(); 8343 else if (!getFoldableImm(MO, Mask)) 8344 return false; 8345 Mask &= maxUIntN(SrcSize); 8346 return isPowerOf2_64(Mask); 8347 }; 8348 8349 MachineOperand *SrcOp = &Def->getOperand(1); 8350 if (isMask(SrcOp)) 8351 SrcOp = &Def->getOperand(2); 8352 else if (isMask(&Def->getOperand(2))) 8353 SrcOp = &Def->getOperand(1); 8354 else 8355 return false; 8356 8357 unsigned BitNo = countTrailingZeros((uint64_t)Mask); 8358 if (IsSigned && BitNo == SrcSize - 1) 8359 return false; 8360 8361 ExpectedValue <<= BitNo; 8362 8363 bool IsReversedCC = false; 8364 if (CmpValue != ExpectedValue) { 8365 if (!IsReversible) 8366 return false; 8367 IsReversedCC = CmpValue == (ExpectedValue ^ Mask); 8368 if (!IsReversedCC) 8369 return false; 8370 } 8371 8372 Register DefReg = Def->getOperand(0).getReg(); 8373 if (IsReversedCC && !MRI->hasOneNonDBGUse(DefReg)) 8374 return false; 8375 8376 for (auto I = std::next(Def->getIterator()), E = CmpInstr.getIterator(); 8377 I != E; ++I) { 8378 if (I->modifiesRegister(AMDGPU::SCC, &RI) || 8379 I->killsRegister(AMDGPU::SCC, &RI)) 8380 return false; 8381 } 8382 8383 MachineOperand *SccDef = Def->findRegisterDefOperand(AMDGPU::SCC); 8384 SccDef->setIsDead(false); 8385 CmpInstr.eraseFromParent(); 8386 8387 if (!MRI->use_nodbg_empty(DefReg)) { 8388 assert(!IsReversedCC); 8389 return true; 8390 } 8391 8392 // Replace AND with unused result with a S_BITCMP. 8393 MachineBasicBlock *MBB = Def->getParent(); 8394 8395 unsigned NewOpc = (SrcSize == 32) ? IsReversedCC ? AMDGPU::S_BITCMP0_B32 8396 : AMDGPU::S_BITCMP1_B32 8397 : IsReversedCC ? AMDGPU::S_BITCMP0_B64 8398 : AMDGPU::S_BITCMP1_B64; 8399 8400 BuildMI(*MBB, Def, Def->getDebugLoc(), get(NewOpc)) 8401 .add(*SrcOp) 8402 .addImm(BitNo); 8403 Def->eraseFromParent(); 8404 8405 return true; 8406 }; 8407 8408 switch (CmpInstr.getOpcode()) { 8409 default: 8410 break; 8411 case AMDGPU::S_CMP_EQ_U32: 8412 case AMDGPU::S_CMP_EQ_I32: 8413 case AMDGPU::S_CMPK_EQ_U32: 8414 case AMDGPU::S_CMPK_EQ_I32: 8415 return optimizeCmpAnd(1, 32, true, false); 8416 case AMDGPU::S_CMP_GE_U32: 8417 case AMDGPU::S_CMPK_GE_U32: 8418 return optimizeCmpAnd(1, 32, false, false); 8419 case AMDGPU::S_CMP_GE_I32: 8420 case AMDGPU::S_CMPK_GE_I32: 8421 return optimizeCmpAnd(1, 32, false, true); 8422 case AMDGPU::S_CMP_EQ_U64: 8423 return optimizeCmpAnd(1, 64, true, false); 8424 case AMDGPU::S_CMP_LG_U32: 8425 case AMDGPU::S_CMP_LG_I32: 8426 case AMDGPU::S_CMPK_LG_U32: 8427 case AMDGPU::S_CMPK_LG_I32: 8428 return optimizeCmpAnd(0, 32, true, false); 8429 case AMDGPU::S_CMP_GT_U32: 8430 case AMDGPU::S_CMPK_GT_U32: 8431 return optimizeCmpAnd(0, 32, false, false); 8432 case AMDGPU::S_CMP_GT_I32: 8433 case AMDGPU::S_CMPK_GT_I32: 8434 return optimizeCmpAnd(0, 32, false, true); 8435 case AMDGPU::S_CMP_LG_U64: 8436 return optimizeCmpAnd(0, 64, true, false); 8437 } 8438 8439 return false; 8440 } 8441 8442 void SIInstrInfo::enforceOperandRCAlignment(MachineInstr &MI, 8443 unsigned OpName) const { 8444 if (!ST.needsAlignedVGPRs()) 8445 return; 8446 8447 int OpNo = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OpName); 8448 if (OpNo < 0) 8449 return; 8450 MachineOperand &Op = MI.getOperand(OpNo); 8451 if (getOpSize(MI, OpNo) > 4) 8452 return; 8453 8454 // Add implicit aligned super-reg to force alignment on the data operand. 8455 const DebugLoc &DL = MI.getDebugLoc(); 8456 MachineBasicBlock *BB = MI.getParent(); 8457 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo(); 8458 Register DataReg = Op.getReg(); 8459 bool IsAGPR = RI.isAGPR(MRI, DataReg); 8460 Register Undef = MRI.createVirtualRegister( 8461 IsAGPR ? &AMDGPU::AGPR_32RegClass : &AMDGPU::VGPR_32RegClass); 8462 BuildMI(*BB, MI, DL, get(AMDGPU::IMPLICIT_DEF), Undef); 8463 Register NewVR = 8464 MRI.createVirtualRegister(IsAGPR ? &AMDGPU::AReg_64_Align2RegClass 8465 : &AMDGPU::VReg_64_Align2RegClass); 8466 BuildMI(*BB, MI, DL, get(AMDGPU::REG_SEQUENCE), NewVR) 8467 .addReg(DataReg, 0, Op.getSubReg()) 8468 .addImm(AMDGPU::sub0) 8469 .addReg(Undef) 8470 .addImm(AMDGPU::sub1); 8471 Op.setReg(NewVR); 8472 Op.setSubReg(AMDGPU::sub0); 8473 MI.addOperand(MachineOperand::CreateReg(NewVR, false, true)); 8474 } 8475