1 //===- SIInstrInfo.cpp - SI Instruction Information ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// SI Implementation of TargetInstrInfo. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SIInstrInfo.h" 15 #include "AMDGPU.h" 16 #include "AMDGPUInstrInfo.h" 17 #include "GCNHazardRecognizer.h" 18 #include "GCNSubtarget.h" 19 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 20 #include "SIMachineFunctionInfo.h" 21 #include "llvm/Analysis/ValueTracking.h" 22 #include "llvm/CodeGen/LiveVariables.h" 23 #include "llvm/CodeGen/MachineDominators.h" 24 #include "llvm/CodeGen/MachineScheduler.h" 25 #include "llvm/CodeGen/RegisterScavenging.h" 26 #include "llvm/CodeGen/ScheduleDAG.h" 27 #include "llvm/IR/DiagnosticInfo.h" 28 #include "llvm/IR/IntrinsicsAMDGPU.h" 29 #include "llvm/MC/MCContext.h" 30 #include "llvm/Support/CommandLine.h" 31 #include "llvm/Target/TargetMachine.h" 32 33 using namespace llvm; 34 35 #define DEBUG_TYPE "si-instr-info" 36 37 #define GET_INSTRINFO_CTOR_DTOR 38 #include "AMDGPUGenInstrInfo.inc" 39 40 namespace llvm { 41 42 class AAResults; 43 44 namespace AMDGPU { 45 #define GET_D16ImageDimIntrinsics_IMPL 46 #define GET_ImageDimIntrinsicTable_IMPL 47 #define GET_RsrcIntrinsics_IMPL 48 #include "AMDGPUGenSearchableTables.inc" 49 } 50 } 51 52 53 // Must be at least 4 to be able to branch over minimum unconditional branch 54 // code. This is only for making it possible to write reasonably small tests for 55 // long branches. 56 static cl::opt<unsigned> 57 BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16), 58 cl::desc("Restrict range of branch instructions (DEBUG)")); 59 60 static cl::opt<bool> Fix16BitCopies( 61 "amdgpu-fix-16-bit-physreg-copies", 62 cl::desc("Fix copies between 32 and 16 bit registers by extending to 32 bit"), 63 cl::init(true), 64 cl::ReallyHidden); 65 66 SIInstrInfo::SIInstrInfo(const GCNSubtarget &ST) 67 : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN), 68 RI(ST), ST(ST) { 69 SchedModel.init(&ST); 70 } 71 72 //===----------------------------------------------------------------------===// 73 // TargetInstrInfo callbacks 74 //===----------------------------------------------------------------------===// 75 76 static unsigned getNumOperandsNoGlue(SDNode *Node) { 77 unsigned N = Node->getNumOperands(); 78 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) 79 --N; 80 return N; 81 } 82 83 /// Returns true if both nodes have the same value for the given 84 /// operand \p Op, or if both nodes do not have this operand. 85 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { 86 unsigned Opc0 = N0->getMachineOpcode(); 87 unsigned Opc1 = N1->getMachineOpcode(); 88 89 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); 90 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); 91 92 if (Op0Idx == -1 && Op1Idx == -1) 93 return true; 94 95 96 if ((Op0Idx == -1 && Op1Idx != -1) || 97 (Op1Idx == -1 && Op0Idx != -1)) 98 return false; 99 100 // getNamedOperandIdx returns the index for the MachineInstr's operands, 101 // which includes the result as the first operand. We are indexing into the 102 // MachineSDNode's operands, so we need to skip the result operand to get 103 // the real index. 104 --Op0Idx; 105 --Op1Idx; 106 107 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx); 108 } 109 110 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, 111 AAResults *AA) const { 112 if (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isSDWA(MI)) { 113 // Normally VALU use of exec would block the rematerialization, but that 114 // is OK in this case to have an implicit exec read as all VALU do. 115 // We really want all of the generic logic for this except for this. 116 117 // Another potential implicit use is mode register. The core logic of 118 // the RA will not attempt rematerialization if mode is set anywhere 119 // in the function, otherwise it is safe since mode is not changed. 120 return !MI.hasImplicitDef() && 121 MI.getNumImplicitOperands() == MI.getDesc().getNumImplicitUses() && 122 !MI.mayRaiseFPException(); 123 } 124 125 return false; 126 } 127 128 bool SIInstrInfo::isIgnorableUse(const MachineOperand &MO) const { 129 // Any implicit use of exec by VALU is not a real register read. 130 return MO.getReg() == AMDGPU::EXEC && MO.isImplicit() && 131 isVALU(*MO.getParent()); 132 } 133 134 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, 135 int64_t &Offset0, 136 int64_t &Offset1) const { 137 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode()) 138 return false; 139 140 unsigned Opc0 = Load0->getMachineOpcode(); 141 unsigned Opc1 = Load1->getMachineOpcode(); 142 143 // Make sure both are actually loads. 144 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad()) 145 return false; 146 147 if (isDS(Opc0) && isDS(Opc1)) { 148 149 // FIXME: Handle this case: 150 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1)) 151 return false; 152 153 // Check base reg. 154 if (Load0->getOperand(0) != Load1->getOperand(0)) 155 return false; 156 157 // Skip read2 / write2 variants for simplicity. 158 // TODO: We should report true if the used offsets are adjacent (excluded 159 // st64 versions). 160 int Offset0Idx = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 161 int Offset1Idx = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 162 if (Offset0Idx == -1 || Offset1Idx == -1) 163 return false; 164 165 // XXX - be careful of datalesss loads 166 // getNamedOperandIdx returns the index for MachineInstrs. Since they 167 // include the output in the operand list, but SDNodes don't, we need to 168 // subtract the index by one. 169 Offset0Idx -= get(Opc0).NumDefs; 170 Offset1Idx -= get(Opc1).NumDefs; 171 Offset0 = cast<ConstantSDNode>(Load0->getOperand(Offset0Idx))->getZExtValue(); 172 Offset1 = cast<ConstantSDNode>(Load1->getOperand(Offset1Idx))->getZExtValue(); 173 return true; 174 } 175 176 if (isSMRD(Opc0) && isSMRD(Opc1)) { 177 // Skip time and cache invalidation instructions. 178 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::sbase) == -1 || 179 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::sbase) == -1) 180 return false; 181 182 assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1)); 183 184 // Check base reg. 185 if (Load0->getOperand(0) != Load1->getOperand(0)) 186 return false; 187 188 const ConstantSDNode *Load0Offset = 189 dyn_cast<ConstantSDNode>(Load0->getOperand(1)); 190 const ConstantSDNode *Load1Offset = 191 dyn_cast<ConstantSDNode>(Load1->getOperand(1)); 192 193 if (!Load0Offset || !Load1Offset) 194 return false; 195 196 Offset0 = Load0Offset->getZExtValue(); 197 Offset1 = Load1Offset->getZExtValue(); 198 return true; 199 } 200 201 // MUBUF and MTBUF can access the same addresses. 202 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) { 203 204 // MUBUF and MTBUF have vaddr at different indices. 205 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || 206 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || 207 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) 208 return false; 209 210 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 211 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 212 213 if (OffIdx0 == -1 || OffIdx1 == -1) 214 return false; 215 216 // getNamedOperandIdx returns the index for MachineInstrs. Since they 217 // include the output in the operand list, but SDNodes don't, we need to 218 // subtract the index by one. 219 OffIdx0 -= get(Opc0).NumDefs; 220 OffIdx1 -= get(Opc1).NumDefs; 221 222 SDValue Off0 = Load0->getOperand(OffIdx0); 223 SDValue Off1 = Load1->getOperand(OffIdx1); 224 225 // The offset might be a FrameIndexSDNode. 226 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1)) 227 return false; 228 229 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue(); 230 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue(); 231 return true; 232 } 233 234 return false; 235 } 236 237 static bool isStride64(unsigned Opc) { 238 switch (Opc) { 239 case AMDGPU::DS_READ2ST64_B32: 240 case AMDGPU::DS_READ2ST64_B64: 241 case AMDGPU::DS_WRITE2ST64_B32: 242 case AMDGPU::DS_WRITE2ST64_B64: 243 return true; 244 default: 245 return false; 246 } 247 } 248 249 bool SIInstrInfo::getMemOperandsWithOffsetWidth( 250 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps, 251 int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, 252 const TargetRegisterInfo *TRI) const { 253 if (!LdSt.mayLoadOrStore()) 254 return false; 255 256 unsigned Opc = LdSt.getOpcode(); 257 OffsetIsScalable = false; 258 const MachineOperand *BaseOp, *OffsetOp; 259 int DataOpIdx; 260 261 if (isDS(LdSt)) { 262 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr); 263 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset); 264 if (OffsetOp) { 265 // Normal, single offset LDS instruction. 266 if (!BaseOp) { 267 // DS_CONSUME/DS_APPEND use M0 for the base address. 268 // TODO: find the implicit use operand for M0 and use that as BaseOp? 269 return false; 270 } 271 BaseOps.push_back(BaseOp); 272 Offset = OffsetOp->getImm(); 273 // Get appropriate operand, and compute width accordingly. 274 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 275 if (DataOpIdx == -1) 276 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 277 Width = getOpSize(LdSt, DataOpIdx); 278 } else { 279 // The 2 offset instructions use offset0 and offset1 instead. We can treat 280 // these as a load with a single offset if the 2 offsets are consecutive. 281 // We will use this for some partially aligned loads. 282 const MachineOperand *Offset0Op = 283 getNamedOperand(LdSt, AMDGPU::OpName::offset0); 284 const MachineOperand *Offset1Op = 285 getNamedOperand(LdSt, AMDGPU::OpName::offset1); 286 287 unsigned Offset0 = Offset0Op->getImm(); 288 unsigned Offset1 = Offset1Op->getImm(); 289 if (Offset0 + 1 != Offset1) 290 return false; 291 292 // Each of these offsets is in element sized units, so we need to convert 293 // to bytes of the individual reads. 294 295 unsigned EltSize; 296 if (LdSt.mayLoad()) 297 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16; 298 else { 299 assert(LdSt.mayStore()); 300 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 301 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8; 302 } 303 304 if (isStride64(Opc)) 305 EltSize *= 64; 306 307 BaseOps.push_back(BaseOp); 308 Offset = EltSize * Offset0; 309 // Get appropriate operand(s), and compute width accordingly. 310 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 311 if (DataOpIdx == -1) { 312 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 313 Width = getOpSize(LdSt, DataOpIdx); 314 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1); 315 Width += getOpSize(LdSt, DataOpIdx); 316 } else { 317 Width = getOpSize(LdSt, DataOpIdx); 318 } 319 } 320 return true; 321 } 322 323 if (isMUBUF(LdSt) || isMTBUF(LdSt)) { 324 const MachineOperand *RSrc = getNamedOperand(LdSt, AMDGPU::OpName::srsrc); 325 if (!RSrc) // e.g. BUFFER_WBINVL1_VOL 326 return false; 327 BaseOps.push_back(RSrc); 328 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 329 if (BaseOp && !BaseOp->isFI()) 330 BaseOps.push_back(BaseOp); 331 const MachineOperand *OffsetImm = 332 getNamedOperand(LdSt, AMDGPU::OpName::offset); 333 Offset = OffsetImm->getImm(); 334 const MachineOperand *SOffset = 335 getNamedOperand(LdSt, AMDGPU::OpName::soffset); 336 if (SOffset) { 337 if (SOffset->isReg()) 338 BaseOps.push_back(SOffset); 339 else 340 Offset += SOffset->getImm(); 341 } 342 // Get appropriate operand, and compute width accordingly. 343 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 344 if (DataOpIdx == -1) 345 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 346 Width = getOpSize(LdSt, DataOpIdx); 347 return true; 348 } 349 350 if (isMIMG(LdSt)) { 351 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); 352 BaseOps.push_back(&LdSt.getOperand(SRsrcIdx)); 353 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); 354 if (VAddr0Idx >= 0) { 355 // GFX10 possible NSA encoding. 356 for (int I = VAddr0Idx; I < SRsrcIdx; ++I) 357 BaseOps.push_back(&LdSt.getOperand(I)); 358 } else { 359 BaseOps.push_back(getNamedOperand(LdSt, AMDGPU::OpName::vaddr)); 360 } 361 Offset = 0; 362 // Get appropriate operand, and compute width accordingly. 363 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 364 Width = getOpSize(LdSt, DataOpIdx); 365 return true; 366 } 367 368 if (isSMRD(LdSt)) { 369 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::sbase); 370 if (!BaseOp) // e.g. S_MEMTIME 371 return false; 372 BaseOps.push_back(BaseOp); 373 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset); 374 Offset = OffsetOp ? OffsetOp->getImm() : 0; 375 // Get appropriate operand, and compute width accordingly. 376 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::sdst); 377 Width = getOpSize(LdSt, DataOpIdx); 378 return true; 379 } 380 381 if (isFLAT(LdSt)) { 382 // Instructions have either vaddr or saddr or both or none. 383 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 384 if (BaseOp) 385 BaseOps.push_back(BaseOp); 386 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::saddr); 387 if (BaseOp) 388 BaseOps.push_back(BaseOp); 389 Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm(); 390 // Get appropriate operand, and compute width accordingly. 391 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 392 if (DataOpIdx == -1) 393 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 394 Width = getOpSize(LdSt, DataOpIdx); 395 return true; 396 } 397 398 return false; 399 } 400 401 static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, 402 ArrayRef<const MachineOperand *> BaseOps1, 403 const MachineInstr &MI2, 404 ArrayRef<const MachineOperand *> BaseOps2) { 405 // Only examine the first "base" operand of each instruction, on the 406 // assumption that it represents the real base address of the memory access. 407 // Other operands are typically offsets or indices from this base address. 408 if (BaseOps1.front()->isIdenticalTo(*BaseOps2.front())) 409 return true; 410 411 if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand()) 412 return false; 413 414 auto MO1 = *MI1.memoperands_begin(); 415 auto MO2 = *MI2.memoperands_begin(); 416 if (MO1->getAddrSpace() != MO2->getAddrSpace()) 417 return false; 418 419 auto Base1 = MO1->getValue(); 420 auto Base2 = MO2->getValue(); 421 if (!Base1 || !Base2) 422 return false; 423 Base1 = getUnderlyingObject(Base1); 424 Base2 = getUnderlyingObject(Base2); 425 426 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2)) 427 return false; 428 429 return Base1 == Base2; 430 } 431 432 bool SIInstrInfo::shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1, 433 ArrayRef<const MachineOperand *> BaseOps2, 434 unsigned NumLoads, 435 unsigned NumBytes) const { 436 // If the mem ops (to be clustered) do not have the same base ptr, then they 437 // should not be clustered 438 if (!BaseOps1.empty() && !BaseOps2.empty()) { 439 const MachineInstr &FirstLdSt = *BaseOps1.front()->getParent(); 440 const MachineInstr &SecondLdSt = *BaseOps2.front()->getParent(); 441 if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOps1, SecondLdSt, BaseOps2)) 442 return false; 443 } else if (!BaseOps1.empty() || !BaseOps2.empty()) { 444 // If only one base op is empty, they do not have the same base ptr 445 return false; 446 } 447 448 // In order to avoid regester pressure, on an average, the number of DWORDS 449 // loaded together by all clustered mem ops should not exceed 8. This is an 450 // empirical value based on certain observations and performance related 451 // experiments. 452 // The good thing about this heuristic is - it avoids clustering of too many 453 // sub-word loads, and also avoids clustering of wide loads. Below is the 454 // brief summary of how the heuristic behaves for various `LoadSize`. 455 // (1) 1 <= LoadSize <= 4: cluster at max 8 mem ops 456 // (2) 5 <= LoadSize <= 8: cluster at max 4 mem ops 457 // (3) 9 <= LoadSize <= 12: cluster at max 2 mem ops 458 // (4) 13 <= LoadSize <= 16: cluster at max 2 mem ops 459 // (5) LoadSize >= 17: do not cluster 460 const unsigned LoadSize = NumBytes / NumLoads; 461 const unsigned NumDWORDs = ((LoadSize + 3) / 4) * NumLoads; 462 return NumDWORDs <= 8; 463 } 464 465 // FIXME: This behaves strangely. If, for example, you have 32 load + stores, 466 // the first 16 loads will be interleaved with the stores, and the next 16 will 467 // be clustered as expected. It should really split into 2 16 store batches. 468 // 469 // Loads are clustered until this returns false, rather than trying to schedule 470 // groups of stores. This also means we have to deal with saying different 471 // address space loads should be clustered, and ones which might cause bank 472 // conflicts. 473 // 474 // This might be deprecated so it might not be worth that much effort to fix. 475 bool SIInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, 476 int64_t Offset0, int64_t Offset1, 477 unsigned NumLoads) const { 478 assert(Offset1 > Offset0 && 479 "Second offset should be larger than first offset!"); 480 // If we have less than 16 loads in a row, and the offsets are within 64 481 // bytes, then schedule together. 482 483 // A cacheline is 64 bytes (for global memory). 484 return (NumLoads <= 16 && (Offset1 - Offset0) < 64); 485 } 486 487 static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB, 488 MachineBasicBlock::iterator MI, 489 const DebugLoc &DL, MCRegister DestReg, 490 MCRegister SrcReg, bool KillSrc, 491 const char *Msg = "illegal SGPR to VGPR copy") { 492 MachineFunction *MF = MBB.getParent(); 493 DiagnosticInfoUnsupported IllegalCopy(MF->getFunction(), Msg, DL, DS_Error); 494 LLVMContext &C = MF->getFunction().getContext(); 495 C.diagnose(IllegalCopy); 496 497 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg) 498 .addReg(SrcReg, getKillRegState(KillSrc)); 499 } 500 501 /// Handle copying from SGPR to AGPR, or from AGPR to AGPR. It is not possible 502 /// to directly copy, so an intermediate VGPR needs to be used. 503 static void indirectCopyToAGPR(const SIInstrInfo &TII, 504 MachineBasicBlock &MBB, 505 MachineBasicBlock::iterator MI, 506 const DebugLoc &DL, MCRegister DestReg, 507 MCRegister SrcReg, bool KillSrc, 508 RegScavenger &RS, 509 Register ImpDefSuperReg = Register(), 510 Register ImpUseSuperReg = Register()) { 511 const SIRegisterInfo &RI = TII.getRegisterInfo(); 512 513 assert(AMDGPU::SReg_32RegClass.contains(SrcReg) || 514 AMDGPU::AGPR_32RegClass.contains(SrcReg)); 515 516 // First try to find defining accvgpr_write to avoid temporary registers. 517 for (auto Def = MI, E = MBB.begin(); Def != E; ) { 518 --Def; 519 if (!Def->definesRegister(SrcReg, &RI)) 520 continue; 521 if (Def->getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64) 522 break; 523 524 MachineOperand &DefOp = Def->getOperand(1); 525 assert(DefOp.isReg() || DefOp.isImm()); 526 527 if (DefOp.isReg()) { 528 // Check that register source operand if not clobbered before MI. 529 // Immediate operands are always safe to propagate. 530 bool SafeToPropagate = true; 531 for (auto I = Def; I != MI && SafeToPropagate; ++I) 532 if (I->modifiesRegister(DefOp.getReg(), &RI)) 533 SafeToPropagate = false; 534 535 if (!SafeToPropagate) 536 break; 537 538 DefOp.setIsKill(false); 539 } 540 541 MachineInstrBuilder Builder = 542 BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg) 543 .add(DefOp); 544 if (ImpDefSuperReg) 545 Builder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit); 546 547 if (ImpUseSuperReg) { 548 Builder.addReg(ImpUseSuperReg, 549 getKillRegState(KillSrc) | RegState::Implicit); 550 } 551 552 return; 553 } 554 555 RS.enterBasicBlock(MBB); 556 RS.forward(MI); 557 558 // Ideally we want to have three registers for a long reg_sequence copy 559 // to hide 2 waitstates between v_mov_b32 and accvgpr_write. 560 unsigned MaxVGPRs = RI.getRegPressureLimit(&AMDGPU::VGPR_32RegClass, 561 *MBB.getParent()); 562 563 // Registers in the sequence are allocated contiguously so we can just 564 // use register number to pick one of three round-robin temps. 565 unsigned RegNo = DestReg % 3; 566 Register Tmp = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0); 567 if (!Tmp) 568 report_fatal_error("Cannot scavenge VGPR to copy to AGPR"); 569 RS.setRegUsed(Tmp); 570 571 if (!TII.getSubtarget().hasGFX90AInsts()) { 572 // Only loop through if there are any free registers left, otherwise 573 // scavenger may report a fatal error without emergency spill slot 574 // or spill with the slot. 575 while (RegNo-- && RS.FindUnusedReg(&AMDGPU::VGPR_32RegClass)) { 576 Register Tmp2 = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0); 577 if (!Tmp2 || RI.getHWRegIndex(Tmp2) >= MaxVGPRs) 578 break; 579 Tmp = Tmp2; 580 RS.setRegUsed(Tmp); 581 } 582 } 583 584 // Insert copy to temporary VGPR. 585 unsigned TmpCopyOp = AMDGPU::V_MOV_B32_e32; 586 if (AMDGPU::AGPR_32RegClass.contains(SrcReg)) { 587 TmpCopyOp = AMDGPU::V_ACCVGPR_READ_B32_e64; 588 } else { 589 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 590 } 591 592 MachineInstrBuilder UseBuilder = BuildMI(MBB, MI, DL, TII.get(TmpCopyOp), Tmp) 593 .addReg(SrcReg, getKillRegState(KillSrc)); 594 if (ImpUseSuperReg) { 595 UseBuilder.addReg(ImpUseSuperReg, 596 getKillRegState(KillSrc) | RegState::Implicit); 597 } 598 599 MachineInstrBuilder DefBuilder 600 = BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg) 601 .addReg(Tmp, RegState::Kill); 602 603 if (ImpDefSuperReg) 604 DefBuilder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit); 605 } 606 607 static void expandSGPRCopy(const SIInstrInfo &TII, MachineBasicBlock &MBB, 608 MachineBasicBlock::iterator MI, const DebugLoc &DL, 609 MCRegister DestReg, MCRegister SrcReg, bool KillSrc, 610 const TargetRegisterClass *RC, bool Forward) { 611 const SIRegisterInfo &RI = TII.getRegisterInfo(); 612 ArrayRef<int16_t> BaseIndices = RI.getRegSplitParts(RC, 4); 613 MachineBasicBlock::iterator I = MI; 614 MachineInstr *FirstMI = nullptr, *LastMI = nullptr; 615 616 for (unsigned Idx = 0; Idx < BaseIndices.size(); ++Idx) { 617 int16_t SubIdx = BaseIndices[Idx]; 618 Register Reg = RI.getSubReg(DestReg, SubIdx); 619 unsigned Opcode = AMDGPU::S_MOV_B32; 620 621 // Is SGPR aligned? If so try to combine with next. 622 Register Src = RI.getSubReg(SrcReg, SubIdx); 623 bool AlignedDest = ((Reg - AMDGPU::SGPR0) % 2) == 0; 624 bool AlignedSrc = ((Src - AMDGPU::SGPR0) % 2) == 0; 625 if (AlignedDest && AlignedSrc && (Idx + 1 < BaseIndices.size())) { 626 // Can use SGPR64 copy 627 unsigned Channel = RI.getChannelFromSubReg(SubIdx); 628 SubIdx = RI.getSubRegFromChannel(Channel, 2); 629 Opcode = AMDGPU::S_MOV_B64; 630 Idx++; 631 } 632 633 LastMI = BuildMI(MBB, I, DL, TII.get(Opcode), RI.getSubReg(DestReg, SubIdx)) 634 .addReg(RI.getSubReg(SrcReg, SubIdx)) 635 .addReg(SrcReg, RegState::Implicit); 636 637 if (!FirstMI) 638 FirstMI = LastMI; 639 640 if (!Forward) 641 I--; 642 } 643 644 assert(FirstMI && LastMI); 645 if (!Forward) 646 std::swap(FirstMI, LastMI); 647 648 FirstMI->addOperand( 649 MachineOperand::CreateReg(DestReg, true /*IsDef*/, true /*IsImp*/)); 650 651 if (KillSrc) 652 LastMI->addRegisterKilled(SrcReg, &RI); 653 } 654 655 void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 656 MachineBasicBlock::iterator MI, 657 const DebugLoc &DL, MCRegister DestReg, 658 MCRegister SrcReg, bool KillSrc) const { 659 const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg); 660 661 // FIXME: This is hack to resolve copies between 16 bit and 32 bit 662 // registers until all patterns are fixed. 663 if (Fix16BitCopies && 664 ((RI.getRegSizeInBits(*RC) == 16) ^ 665 (RI.getRegSizeInBits(*RI.getPhysRegClass(SrcReg)) == 16))) { 666 MCRegister &RegToFix = (RI.getRegSizeInBits(*RC) == 16) ? DestReg : SrcReg; 667 MCRegister Super = RI.get32BitRegister(RegToFix); 668 assert(RI.getSubReg(Super, AMDGPU::lo16) == RegToFix); 669 RegToFix = Super; 670 671 if (DestReg == SrcReg) { 672 // Insert empty bundle since ExpandPostRA expects an instruction here. 673 BuildMI(MBB, MI, DL, get(AMDGPU::BUNDLE)); 674 return; 675 } 676 677 RC = RI.getPhysRegClass(DestReg); 678 } 679 680 if (RC == &AMDGPU::VGPR_32RegClass) { 681 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || 682 AMDGPU::SReg_32RegClass.contains(SrcReg) || 683 AMDGPU::AGPR_32RegClass.contains(SrcReg)); 684 unsigned Opc = AMDGPU::AGPR_32RegClass.contains(SrcReg) ? 685 AMDGPU::V_ACCVGPR_READ_B32_e64 : AMDGPU::V_MOV_B32_e32; 686 BuildMI(MBB, MI, DL, get(Opc), DestReg) 687 .addReg(SrcReg, getKillRegState(KillSrc)); 688 return; 689 } 690 691 if (RC == &AMDGPU::SReg_32_XM0RegClass || 692 RC == &AMDGPU::SReg_32RegClass) { 693 if (SrcReg == AMDGPU::SCC) { 694 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg) 695 .addImm(1) 696 .addImm(0); 697 return; 698 } 699 700 if (DestReg == AMDGPU::VCC_LO) { 701 if (AMDGPU::SReg_32RegClass.contains(SrcReg)) { 702 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), AMDGPU::VCC_LO) 703 .addReg(SrcReg, getKillRegState(KillSrc)); 704 } else { 705 // FIXME: Hack until VReg_1 removed. 706 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 707 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 708 .addImm(0) 709 .addReg(SrcReg, getKillRegState(KillSrc)); 710 } 711 712 return; 713 } 714 715 if (!AMDGPU::SReg_32RegClass.contains(SrcReg)) { 716 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 717 return; 718 } 719 720 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 721 .addReg(SrcReg, getKillRegState(KillSrc)); 722 return; 723 } 724 725 if (RC == &AMDGPU::SReg_64RegClass) { 726 if (SrcReg == AMDGPU::SCC) { 727 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B64), DestReg) 728 .addImm(1) 729 .addImm(0); 730 return; 731 } 732 733 if (DestReg == AMDGPU::VCC) { 734 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 735 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC) 736 .addReg(SrcReg, getKillRegState(KillSrc)); 737 } else { 738 // FIXME: Hack until VReg_1 removed. 739 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 740 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 741 .addImm(0) 742 .addReg(SrcReg, getKillRegState(KillSrc)); 743 } 744 745 return; 746 } 747 748 if (!AMDGPU::SReg_64RegClass.contains(SrcReg)) { 749 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 750 return; 751 } 752 753 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 754 .addReg(SrcReg, getKillRegState(KillSrc)); 755 return; 756 } 757 758 if (DestReg == AMDGPU::SCC) { 759 // Copying 64-bit or 32-bit sources to SCC barely makes sense, 760 // but SelectionDAG emits such copies for i1 sources. 761 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 762 // This copy can only be produced by patterns 763 // with explicit SCC, which are known to be enabled 764 // only for subtargets with S_CMP_LG_U64 present. 765 assert(ST.hasScalarCompareEq64()); 766 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U64)) 767 .addReg(SrcReg, getKillRegState(KillSrc)) 768 .addImm(0); 769 } else { 770 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 771 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32)) 772 .addReg(SrcReg, getKillRegState(KillSrc)) 773 .addImm(0); 774 } 775 776 return; 777 } 778 779 if (RC == &AMDGPU::AGPR_32RegClass) { 780 if (AMDGPU::VGPR_32RegClass.contains(SrcReg)) { 781 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg) 782 .addReg(SrcReg, getKillRegState(KillSrc)); 783 return; 784 } 785 786 if (AMDGPU::AGPR_32RegClass.contains(SrcReg) && ST.hasGFX90AInsts()) { 787 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_MOV_B32), DestReg) 788 .addReg(SrcReg, getKillRegState(KillSrc)); 789 return; 790 } 791 792 // FIXME: Pass should maintain scavenger to avoid scan through the block on 793 // every AGPR spill. 794 RegScavenger RS; 795 indirectCopyToAGPR(*this, MBB, MI, DL, DestReg, SrcReg, KillSrc, RS); 796 return; 797 } 798 799 const unsigned Size = RI.getRegSizeInBits(*RC); 800 if (Size == 16) { 801 assert(AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || 802 AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || 803 AMDGPU::SReg_LO16RegClass.contains(SrcReg) || 804 AMDGPU::AGPR_LO16RegClass.contains(SrcReg)); 805 806 bool IsSGPRDst = AMDGPU::SReg_LO16RegClass.contains(DestReg); 807 bool IsSGPRSrc = AMDGPU::SReg_LO16RegClass.contains(SrcReg); 808 bool IsAGPRDst = AMDGPU::AGPR_LO16RegClass.contains(DestReg); 809 bool IsAGPRSrc = AMDGPU::AGPR_LO16RegClass.contains(SrcReg); 810 bool DstLow = AMDGPU::VGPR_LO16RegClass.contains(DestReg) || 811 AMDGPU::SReg_LO16RegClass.contains(DestReg) || 812 AMDGPU::AGPR_LO16RegClass.contains(DestReg); 813 bool SrcLow = AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || 814 AMDGPU::SReg_LO16RegClass.contains(SrcReg) || 815 AMDGPU::AGPR_LO16RegClass.contains(SrcReg); 816 MCRegister NewDestReg = RI.get32BitRegister(DestReg); 817 MCRegister NewSrcReg = RI.get32BitRegister(SrcReg); 818 819 if (IsSGPRDst) { 820 if (!IsSGPRSrc) { 821 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 822 return; 823 } 824 825 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), NewDestReg) 826 .addReg(NewSrcReg, getKillRegState(KillSrc)); 827 return; 828 } 829 830 if (IsAGPRDst || IsAGPRSrc) { 831 if (!DstLow || !SrcLow) { 832 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc, 833 "Cannot use hi16 subreg with an AGPR!"); 834 } 835 836 copyPhysReg(MBB, MI, DL, NewDestReg, NewSrcReg, KillSrc); 837 return; 838 } 839 840 if (IsSGPRSrc && !ST.hasSDWAScalar()) { 841 if (!DstLow || !SrcLow) { 842 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc, 843 "Cannot use hi16 subreg on VI!"); 844 } 845 846 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), NewDestReg) 847 .addReg(NewSrcReg, getKillRegState(KillSrc)); 848 return; 849 } 850 851 auto MIB = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_sdwa), NewDestReg) 852 .addImm(0) // src0_modifiers 853 .addReg(NewSrcReg) 854 .addImm(0) // clamp 855 .addImm(DstLow ? AMDGPU::SDWA::SdwaSel::WORD_0 856 : AMDGPU::SDWA::SdwaSel::WORD_1) 857 .addImm(AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE) 858 .addImm(SrcLow ? AMDGPU::SDWA::SdwaSel::WORD_0 859 : AMDGPU::SDWA::SdwaSel::WORD_1) 860 .addReg(NewDestReg, RegState::Implicit | RegState::Undef); 861 // First implicit operand is $exec. 862 MIB->tieOperands(0, MIB->getNumOperands() - 1); 863 return; 864 } 865 866 const TargetRegisterClass *SrcRC = RI.getPhysRegClass(SrcReg); 867 if (RC == RI.getVGPR64Class() && (SrcRC == RC || RI.isSGPRClass(SrcRC))) { 868 if (ST.hasPackedFP32Ops()) { 869 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), DestReg) 870 .addImm(SISrcMods::OP_SEL_1) 871 .addReg(SrcReg) 872 .addImm(SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1) 873 .addReg(SrcReg) 874 .addImm(0) // op_sel_lo 875 .addImm(0) // op_sel_hi 876 .addImm(0) // neg_lo 877 .addImm(0) // neg_hi 878 .addImm(0) // clamp 879 .addReg(SrcReg, getKillRegState(KillSrc) | RegState::Implicit); 880 return; 881 } 882 } 883 884 const bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg); 885 if (RI.isSGPRClass(RC)) { 886 if (!RI.isSGPRClass(SrcRC)) { 887 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 888 return; 889 } 890 expandSGPRCopy(*this, MBB, MI, DL, DestReg, SrcReg, KillSrc, RC, Forward); 891 return; 892 } 893 894 unsigned EltSize = 4; 895 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 896 if (RI.hasAGPRs(RC)) { 897 Opcode = (RI.hasVGPRs(SrcRC)) ? 898 AMDGPU::V_ACCVGPR_WRITE_B32_e64 : AMDGPU::INSTRUCTION_LIST_END; 899 } else if (RI.hasVGPRs(RC) && RI.hasAGPRs(SrcRC)) { 900 Opcode = AMDGPU::V_ACCVGPR_READ_B32_e64; 901 } else if ((Size % 64 == 0) && RI.hasVGPRs(RC) && 902 (RI.isProperlyAlignedRC(*RC) && 903 (SrcRC == RC || RI.isSGPRClass(SrcRC)))) { 904 // TODO: In 96-bit case, could do a 64-bit mov and then a 32-bit mov. 905 if (ST.hasPackedFP32Ops()) { 906 Opcode = AMDGPU::V_PK_MOV_B32; 907 EltSize = 8; 908 } 909 } 910 911 // For the cases where we need an intermediate instruction/temporary register 912 // (destination is an AGPR), we need a scavenger. 913 // 914 // FIXME: The pass should maintain this for us so we don't have to re-scan the 915 // whole block for every handled copy. 916 std::unique_ptr<RegScavenger> RS; 917 if (Opcode == AMDGPU::INSTRUCTION_LIST_END) 918 RS.reset(new RegScavenger()); 919 920 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize); 921 922 // If there is an overlap, we can't kill the super-register on the last 923 // instruction, since it will also kill the components made live by this def. 924 const bool CanKillSuperReg = KillSrc && !RI.regsOverlap(SrcReg, DestReg); 925 926 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 927 unsigned SubIdx; 928 if (Forward) 929 SubIdx = SubIndices[Idx]; 930 else 931 SubIdx = SubIndices[SubIndices.size() - Idx - 1]; 932 933 bool UseKill = CanKillSuperReg && Idx == SubIndices.size() - 1; 934 935 if (Opcode == AMDGPU::INSTRUCTION_LIST_END) { 936 Register ImpDefSuper = Idx == 0 ? Register(DestReg) : Register(); 937 Register ImpUseSuper = SrcReg; 938 indirectCopyToAGPR(*this, MBB, MI, DL, RI.getSubReg(DestReg, SubIdx), 939 RI.getSubReg(SrcReg, SubIdx), UseKill, *RS, 940 ImpDefSuper, ImpUseSuper); 941 } else if (Opcode == AMDGPU::V_PK_MOV_B32) { 942 Register DstSubReg = RI.getSubReg(DestReg, SubIdx); 943 Register SrcSubReg = RI.getSubReg(SrcReg, SubIdx); 944 MachineInstrBuilder MIB = 945 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), DstSubReg) 946 .addImm(SISrcMods::OP_SEL_1) 947 .addReg(SrcSubReg) 948 .addImm(SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1) 949 .addReg(SrcSubReg) 950 .addImm(0) // op_sel_lo 951 .addImm(0) // op_sel_hi 952 .addImm(0) // neg_lo 953 .addImm(0) // neg_hi 954 .addImm(0) // clamp 955 .addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit); 956 if (Idx == 0) 957 MIB.addReg(DestReg, RegState::Define | RegState::Implicit); 958 } else { 959 MachineInstrBuilder Builder = 960 BuildMI(MBB, MI, DL, get(Opcode), RI.getSubReg(DestReg, SubIdx)) 961 .addReg(RI.getSubReg(SrcReg, SubIdx)); 962 if (Idx == 0) 963 Builder.addReg(DestReg, RegState::Define | RegState::Implicit); 964 965 Builder.addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit); 966 } 967 } 968 } 969 970 int SIInstrInfo::commuteOpcode(unsigned Opcode) const { 971 int NewOpc; 972 973 // Try to map original to commuted opcode 974 NewOpc = AMDGPU::getCommuteRev(Opcode); 975 if (NewOpc != -1) 976 // Check if the commuted (REV) opcode exists on the target. 977 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 978 979 // Try to map commuted to original opcode 980 NewOpc = AMDGPU::getCommuteOrig(Opcode); 981 if (NewOpc != -1) 982 // Check if the original (non-REV) opcode exists on the target. 983 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 984 985 return Opcode; 986 } 987 988 void SIInstrInfo::materializeImmediate(MachineBasicBlock &MBB, 989 MachineBasicBlock::iterator MI, 990 const DebugLoc &DL, unsigned DestReg, 991 int64_t Value) const { 992 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 993 const TargetRegisterClass *RegClass = MRI.getRegClass(DestReg); 994 if (RegClass == &AMDGPU::SReg_32RegClass || 995 RegClass == &AMDGPU::SGPR_32RegClass || 996 RegClass == &AMDGPU::SReg_32_XM0RegClass || 997 RegClass == &AMDGPU::SReg_32_XM0_XEXECRegClass) { 998 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 999 .addImm(Value); 1000 return; 1001 } 1002 1003 if (RegClass == &AMDGPU::SReg_64RegClass || 1004 RegClass == &AMDGPU::SGPR_64RegClass || 1005 RegClass == &AMDGPU::SReg_64_XEXECRegClass) { 1006 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 1007 .addImm(Value); 1008 return; 1009 } 1010 1011 if (RegClass == &AMDGPU::VGPR_32RegClass) { 1012 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) 1013 .addImm(Value); 1014 return; 1015 } 1016 if (RegClass->hasSuperClassEq(&AMDGPU::VReg_64RegClass)) { 1017 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), DestReg) 1018 .addImm(Value); 1019 return; 1020 } 1021 1022 unsigned EltSize = 4; 1023 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 1024 if (RI.isSGPRClass(RegClass)) { 1025 if (RI.getRegSizeInBits(*RegClass) > 32) { 1026 Opcode = AMDGPU::S_MOV_B64; 1027 EltSize = 8; 1028 } else { 1029 Opcode = AMDGPU::S_MOV_B32; 1030 EltSize = 4; 1031 } 1032 } 1033 1034 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RegClass, EltSize); 1035 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 1036 int64_t IdxValue = Idx == 0 ? Value : 0; 1037 1038 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 1039 get(Opcode), RI.getSubReg(DestReg, SubIndices[Idx])); 1040 Builder.addImm(IdxValue); 1041 } 1042 } 1043 1044 const TargetRegisterClass * 1045 SIInstrInfo::getPreferredSelectRegClass(unsigned Size) const { 1046 return &AMDGPU::VGPR_32RegClass; 1047 } 1048 1049 void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB, 1050 MachineBasicBlock::iterator I, 1051 const DebugLoc &DL, Register DstReg, 1052 ArrayRef<MachineOperand> Cond, 1053 Register TrueReg, 1054 Register FalseReg) const { 1055 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 1056 const TargetRegisterClass *BoolXExecRC = 1057 RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 1058 assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass && 1059 "Not a VGPR32 reg"); 1060 1061 if (Cond.size() == 1) { 1062 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1063 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1064 .add(Cond[0]); 1065 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1066 .addImm(0) 1067 .addReg(FalseReg) 1068 .addImm(0) 1069 .addReg(TrueReg) 1070 .addReg(SReg); 1071 } else if (Cond.size() == 2) { 1072 assert(Cond[0].isImm() && "Cond[0] is not an immediate"); 1073 switch (Cond[0].getImm()) { 1074 case SIInstrInfo::SCC_TRUE: { 1075 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1076 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1077 : AMDGPU::S_CSELECT_B64), SReg) 1078 .addImm(1) 1079 .addImm(0); 1080 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1081 .addImm(0) 1082 .addReg(FalseReg) 1083 .addImm(0) 1084 .addReg(TrueReg) 1085 .addReg(SReg); 1086 break; 1087 } 1088 case SIInstrInfo::SCC_FALSE: { 1089 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1090 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1091 : AMDGPU::S_CSELECT_B64), SReg) 1092 .addImm(0) 1093 .addImm(1); 1094 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1095 .addImm(0) 1096 .addReg(FalseReg) 1097 .addImm(0) 1098 .addReg(TrueReg) 1099 .addReg(SReg); 1100 break; 1101 } 1102 case SIInstrInfo::VCCNZ: { 1103 MachineOperand RegOp = Cond[1]; 1104 RegOp.setImplicit(false); 1105 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1106 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1107 .add(RegOp); 1108 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1109 .addImm(0) 1110 .addReg(FalseReg) 1111 .addImm(0) 1112 .addReg(TrueReg) 1113 .addReg(SReg); 1114 break; 1115 } 1116 case SIInstrInfo::VCCZ: { 1117 MachineOperand RegOp = Cond[1]; 1118 RegOp.setImplicit(false); 1119 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1120 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1121 .add(RegOp); 1122 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1123 .addImm(0) 1124 .addReg(TrueReg) 1125 .addImm(0) 1126 .addReg(FalseReg) 1127 .addReg(SReg); 1128 break; 1129 } 1130 case SIInstrInfo::EXECNZ: { 1131 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1132 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 1133 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1134 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 1135 .addImm(0); 1136 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1137 : AMDGPU::S_CSELECT_B64), SReg) 1138 .addImm(1) 1139 .addImm(0); 1140 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1141 .addImm(0) 1142 .addReg(FalseReg) 1143 .addImm(0) 1144 .addReg(TrueReg) 1145 .addReg(SReg); 1146 break; 1147 } 1148 case SIInstrInfo::EXECZ: { 1149 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1150 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 1151 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1152 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 1153 .addImm(0); 1154 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1155 : AMDGPU::S_CSELECT_B64), SReg) 1156 .addImm(0) 1157 .addImm(1); 1158 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1159 .addImm(0) 1160 .addReg(FalseReg) 1161 .addImm(0) 1162 .addReg(TrueReg) 1163 .addReg(SReg); 1164 llvm_unreachable("Unhandled branch predicate EXECZ"); 1165 break; 1166 } 1167 default: 1168 llvm_unreachable("invalid branch predicate"); 1169 } 1170 } else { 1171 llvm_unreachable("Can only handle Cond size 1 or 2"); 1172 } 1173 } 1174 1175 Register SIInstrInfo::insertEQ(MachineBasicBlock *MBB, 1176 MachineBasicBlock::iterator I, 1177 const DebugLoc &DL, 1178 Register SrcReg, int Value) const { 1179 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1180 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 1181 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg) 1182 .addImm(Value) 1183 .addReg(SrcReg); 1184 1185 return Reg; 1186 } 1187 1188 Register SIInstrInfo::insertNE(MachineBasicBlock *MBB, 1189 MachineBasicBlock::iterator I, 1190 const DebugLoc &DL, 1191 Register SrcReg, int Value) const { 1192 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1193 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 1194 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg) 1195 .addImm(Value) 1196 .addReg(SrcReg); 1197 1198 return Reg; 1199 } 1200 1201 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const { 1202 1203 if (RI.hasAGPRs(DstRC)) 1204 return AMDGPU::COPY; 1205 if (RI.getRegSizeInBits(*DstRC) == 32) { 1206 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 1207 } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) { 1208 return AMDGPU::S_MOV_B64; 1209 } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) { 1210 return AMDGPU::V_MOV_B64_PSEUDO; 1211 } 1212 return AMDGPU::COPY; 1213 } 1214 1215 const MCInstrDesc & 1216 SIInstrInfo::getIndirectGPRIDXPseudo(unsigned VecSize, 1217 bool IsIndirectSrc) const { 1218 if (IsIndirectSrc) { 1219 if (VecSize <= 32) // 4 bytes 1220 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V1); 1221 if (VecSize <= 64) // 8 bytes 1222 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V2); 1223 if (VecSize <= 96) // 12 bytes 1224 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V3); 1225 if (VecSize <= 128) // 16 bytes 1226 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V4); 1227 if (VecSize <= 160) // 20 bytes 1228 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V5); 1229 if (VecSize <= 256) // 32 bytes 1230 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V8); 1231 if (VecSize <= 512) // 64 bytes 1232 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V16); 1233 if (VecSize <= 1024) // 128 bytes 1234 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V32); 1235 1236 llvm_unreachable("unsupported size for IndirectRegReadGPRIDX pseudos"); 1237 } 1238 1239 if (VecSize <= 32) // 4 bytes 1240 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V1); 1241 if (VecSize <= 64) // 8 bytes 1242 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V2); 1243 if (VecSize <= 96) // 12 bytes 1244 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V3); 1245 if (VecSize <= 128) // 16 bytes 1246 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V4); 1247 if (VecSize <= 160) // 20 bytes 1248 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V5); 1249 if (VecSize <= 256) // 32 bytes 1250 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V8); 1251 if (VecSize <= 512) // 64 bytes 1252 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V16); 1253 if (VecSize <= 1024) // 128 bytes 1254 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V32); 1255 1256 llvm_unreachable("unsupported size for IndirectRegWriteGPRIDX pseudos"); 1257 } 1258 1259 static unsigned getIndirectVGPRWriteMovRelPseudoOpc(unsigned VecSize) { 1260 if (VecSize <= 32) // 4 bytes 1261 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V1; 1262 if (VecSize <= 64) // 8 bytes 1263 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V2; 1264 if (VecSize <= 96) // 12 bytes 1265 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V3; 1266 if (VecSize <= 128) // 16 bytes 1267 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V4; 1268 if (VecSize <= 160) // 20 bytes 1269 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V5; 1270 if (VecSize <= 256) // 32 bytes 1271 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V8; 1272 if (VecSize <= 512) // 64 bytes 1273 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V16; 1274 if (VecSize <= 1024) // 128 bytes 1275 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V32; 1276 1277 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1278 } 1279 1280 static unsigned getIndirectSGPRWriteMovRelPseudo32(unsigned VecSize) { 1281 if (VecSize <= 32) // 4 bytes 1282 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V1; 1283 if (VecSize <= 64) // 8 bytes 1284 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V2; 1285 if (VecSize <= 96) // 12 bytes 1286 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V3; 1287 if (VecSize <= 128) // 16 bytes 1288 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V4; 1289 if (VecSize <= 160) // 20 bytes 1290 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V5; 1291 if (VecSize <= 256) // 32 bytes 1292 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V8; 1293 if (VecSize <= 512) // 64 bytes 1294 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V16; 1295 if (VecSize <= 1024) // 128 bytes 1296 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V32; 1297 1298 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1299 } 1300 1301 static unsigned getIndirectSGPRWriteMovRelPseudo64(unsigned VecSize) { 1302 if (VecSize <= 64) // 8 bytes 1303 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V1; 1304 if (VecSize <= 128) // 16 bytes 1305 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V2; 1306 if (VecSize <= 256) // 32 bytes 1307 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V4; 1308 if (VecSize <= 512) // 64 bytes 1309 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V8; 1310 if (VecSize <= 1024) // 128 bytes 1311 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V16; 1312 1313 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1314 } 1315 1316 const MCInstrDesc & 1317 SIInstrInfo::getIndirectRegWriteMovRelPseudo(unsigned VecSize, unsigned EltSize, 1318 bool IsSGPR) const { 1319 if (IsSGPR) { 1320 switch (EltSize) { 1321 case 32: 1322 return get(getIndirectSGPRWriteMovRelPseudo32(VecSize)); 1323 case 64: 1324 return get(getIndirectSGPRWriteMovRelPseudo64(VecSize)); 1325 default: 1326 llvm_unreachable("invalid reg indexing elt size"); 1327 } 1328 } 1329 1330 assert(EltSize == 32 && "invalid reg indexing elt size"); 1331 return get(getIndirectVGPRWriteMovRelPseudoOpc(VecSize)); 1332 } 1333 1334 static unsigned getSGPRSpillSaveOpcode(unsigned Size) { 1335 switch (Size) { 1336 case 4: 1337 return AMDGPU::SI_SPILL_S32_SAVE; 1338 case 8: 1339 return AMDGPU::SI_SPILL_S64_SAVE; 1340 case 12: 1341 return AMDGPU::SI_SPILL_S96_SAVE; 1342 case 16: 1343 return AMDGPU::SI_SPILL_S128_SAVE; 1344 case 20: 1345 return AMDGPU::SI_SPILL_S160_SAVE; 1346 case 24: 1347 return AMDGPU::SI_SPILL_S192_SAVE; 1348 case 28: 1349 return AMDGPU::SI_SPILL_S224_SAVE; 1350 case 32: 1351 return AMDGPU::SI_SPILL_S256_SAVE; 1352 case 64: 1353 return AMDGPU::SI_SPILL_S512_SAVE; 1354 case 128: 1355 return AMDGPU::SI_SPILL_S1024_SAVE; 1356 default: 1357 llvm_unreachable("unknown register size"); 1358 } 1359 } 1360 1361 static unsigned getVGPRSpillSaveOpcode(unsigned Size) { 1362 switch (Size) { 1363 case 4: 1364 return AMDGPU::SI_SPILL_V32_SAVE; 1365 case 8: 1366 return AMDGPU::SI_SPILL_V64_SAVE; 1367 case 12: 1368 return AMDGPU::SI_SPILL_V96_SAVE; 1369 case 16: 1370 return AMDGPU::SI_SPILL_V128_SAVE; 1371 case 20: 1372 return AMDGPU::SI_SPILL_V160_SAVE; 1373 case 24: 1374 return AMDGPU::SI_SPILL_V192_SAVE; 1375 case 28: 1376 return AMDGPU::SI_SPILL_V224_SAVE; 1377 case 32: 1378 return AMDGPU::SI_SPILL_V256_SAVE; 1379 case 64: 1380 return AMDGPU::SI_SPILL_V512_SAVE; 1381 case 128: 1382 return AMDGPU::SI_SPILL_V1024_SAVE; 1383 default: 1384 llvm_unreachable("unknown register size"); 1385 } 1386 } 1387 1388 static unsigned getAGPRSpillSaveOpcode(unsigned Size) { 1389 switch (Size) { 1390 case 4: 1391 return AMDGPU::SI_SPILL_A32_SAVE; 1392 case 8: 1393 return AMDGPU::SI_SPILL_A64_SAVE; 1394 case 12: 1395 return AMDGPU::SI_SPILL_A96_SAVE; 1396 case 16: 1397 return AMDGPU::SI_SPILL_A128_SAVE; 1398 case 20: 1399 return AMDGPU::SI_SPILL_A160_SAVE; 1400 case 24: 1401 return AMDGPU::SI_SPILL_A192_SAVE; 1402 case 28: 1403 return AMDGPU::SI_SPILL_A224_SAVE; 1404 case 32: 1405 return AMDGPU::SI_SPILL_A256_SAVE; 1406 case 64: 1407 return AMDGPU::SI_SPILL_A512_SAVE; 1408 case 128: 1409 return AMDGPU::SI_SPILL_A1024_SAVE; 1410 default: 1411 llvm_unreachable("unknown register size"); 1412 } 1413 } 1414 1415 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 1416 MachineBasicBlock::iterator MI, 1417 Register SrcReg, bool isKill, 1418 int FrameIndex, 1419 const TargetRegisterClass *RC, 1420 const TargetRegisterInfo *TRI) const { 1421 MachineFunction *MF = MBB.getParent(); 1422 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1423 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 1424 const DebugLoc &DL = MBB.findDebugLoc(MI); 1425 1426 MachinePointerInfo PtrInfo 1427 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 1428 MachineMemOperand *MMO = MF->getMachineMemOperand( 1429 PtrInfo, MachineMemOperand::MOStore, FrameInfo.getObjectSize(FrameIndex), 1430 FrameInfo.getObjectAlign(FrameIndex)); 1431 unsigned SpillSize = TRI->getSpillSize(*RC); 1432 1433 if (RI.isSGPRClass(RC)) { 1434 MFI->setHasSpilledSGPRs(); 1435 assert(SrcReg != AMDGPU::M0 && "m0 should not be spilled"); 1436 assert(SrcReg != AMDGPU::EXEC_LO && SrcReg != AMDGPU::EXEC_HI && 1437 SrcReg != AMDGPU::EXEC && "exec should not be spilled"); 1438 1439 // We are only allowed to create one new instruction when spilling 1440 // registers, so we need to use pseudo instruction for spilling SGPRs. 1441 const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(SpillSize)); 1442 1443 // The SGPR spill/restore instructions only work on number sgprs, so we need 1444 // to make sure we are using the correct register class. 1445 if (SrcReg.isVirtual() && SpillSize == 4) { 1446 MachineRegisterInfo &MRI = MF->getRegInfo(); 1447 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 1448 } 1449 1450 BuildMI(MBB, MI, DL, OpDesc) 1451 .addReg(SrcReg, getKillRegState(isKill)) // data 1452 .addFrameIndex(FrameIndex) // addr 1453 .addMemOperand(MMO) 1454 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); 1455 1456 if (RI.spillSGPRToVGPR()) 1457 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill); 1458 return; 1459 } 1460 1461 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillSaveOpcode(SpillSize) 1462 : getVGPRSpillSaveOpcode(SpillSize); 1463 MFI->setHasSpilledVGPRs(); 1464 1465 BuildMI(MBB, MI, DL, get(Opcode)) 1466 .addReg(SrcReg, getKillRegState(isKill)) // data 1467 .addFrameIndex(FrameIndex) // addr 1468 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset 1469 .addImm(0) // offset 1470 .addMemOperand(MMO); 1471 } 1472 1473 static unsigned getSGPRSpillRestoreOpcode(unsigned Size) { 1474 switch (Size) { 1475 case 4: 1476 return AMDGPU::SI_SPILL_S32_RESTORE; 1477 case 8: 1478 return AMDGPU::SI_SPILL_S64_RESTORE; 1479 case 12: 1480 return AMDGPU::SI_SPILL_S96_RESTORE; 1481 case 16: 1482 return AMDGPU::SI_SPILL_S128_RESTORE; 1483 case 20: 1484 return AMDGPU::SI_SPILL_S160_RESTORE; 1485 case 24: 1486 return AMDGPU::SI_SPILL_S192_RESTORE; 1487 case 28: 1488 return AMDGPU::SI_SPILL_S224_RESTORE; 1489 case 32: 1490 return AMDGPU::SI_SPILL_S256_RESTORE; 1491 case 64: 1492 return AMDGPU::SI_SPILL_S512_RESTORE; 1493 case 128: 1494 return AMDGPU::SI_SPILL_S1024_RESTORE; 1495 default: 1496 llvm_unreachable("unknown register size"); 1497 } 1498 } 1499 1500 static unsigned getVGPRSpillRestoreOpcode(unsigned Size) { 1501 switch (Size) { 1502 case 4: 1503 return AMDGPU::SI_SPILL_V32_RESTORE; 1504 case 8: 1505 return AMDGPU::SI_SPILL_V64_RESTORE; 1506 case 12: 1507 return AMDGPU::SI_SPILL_V96_RESTORE; 1508 case 16: 1509 return AMDGPU::SI_SPILL_V128_RESTORE; 1510 case 20: 1511 return AMDGPU::SI_SPILL_V160_RESTORE; 1512 case 24: 1513 return AMDGPU::SI_SPILL_V192_RESTORE; 1514 case 28: 1515 return AMDGPU::SI_SPILL_V224_RESTORE; 1516 case 32: 1517 return AMDGPU::SI_SPILL_V256_RESTORE; 1518 case 64: 1519 return AMDGPU::SI_SPILL_V512_RESTORE; 1520 case 128: 1521 return AMDGPU::SI_SPILL_V1024_RESTORE; 1522 default: 1523 llvm_unreachable("unknown register size"); 1524 } 1525 } 1526 1527 static unsigned getAGPRSpillRestoreOpcode(unsigned Size) { 1528 switch (Size) { 1529 case 4: 1530 return AMDGPU::SI_SPILL_A32_RESTORE; 1531 case 8: 1532 return AMDGPU::SI_SPILL_A64_RESTORE; 1533 case 12: 1534 return AMDGPU::SI_SPILL_A96_RESTORE; 1535 case 16: 1536 return AMDGPU::SI_SPILL_A128_RESTORE; 1537 case 20: 1538 return AMDGPU::SI_SPILL_A160_RESTORE; 1539 case 24: 1540 return AMDGPU::SI_SPILL_A192_RESTORE; 1541 case 28: 1542 return AMDGPU::SI_SPILL_A224_RESTORE; 1543 case 32: 1544 return AMDGPU::SI_SPILL_A256_RESTORE; 1545 case 64: 1546 return AMDGPU::SI_SPILL_A512_RESTORE; 1547 case 128: 1548 return AMDGPU::SI_SPILL_A1024_RESTORE; 1549 default: 1550 llvm_unreachable("unknown register size"); 1551 } 1552 } 1553 1554 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 1555 MachineBasicBlock::iterator MI, 1556 Register DestReg, int FrameIndex, 1557 const TargetRegisterClass *RC, 1558 const TargetRegisterInfo *TRI) const { 1559 MachineFunction *MF = MBB.getParent(); 1560 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1561 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 1562 const DebugLoc &DL = MBB.findDebugLoc(MI); 1563 unsigned SpillSize = TRI->getSpillSize(*RC); 1564 1565 MachinePointerInfo PtrInfo 1566 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 1567 1568 MachineMemOperand *MMO = MF->getMachineMemOperand( 1569 PtrInfo, MachineMemOperand::MOLoad, FrameInfo.getObjectSize(FrameIndex), 1570 FrameInfo.getObjectAlign(FrameIndex)); 1571 1572 if (RI.isSGPRClass(RC)) { 1573 MFI->setHasSpilledSGPRs(); 1574 assert(DestReg != AMDGPU::M0 && "m0 should not be reloaded into"); 1575 assert(DestReg != AMDGPU::EXEC_LO && DestReg != AMDGPU::EXEC_HI && 1576 DestReg != AMDGPU::EXEC && "exec should not be spilled"); 1577 1578 // FIXME: Maybe this should not include a memoperand because it will be 1579 // lowered to non-memory instructions. 1580 const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize)); 1581 if (DestReg.isVirtual() && SpillSize == 4) { 1582 MachineRegisterInfo &MRI = MF->getRegInfo(); 1583 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 1584 } 1585 1586 if (RI.spillSGPRToVGPR()) 1587 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill); 1588 BuildMI(MBB, MI, DL, OpDesc, DestReg) 1589 .addFrameIndex(FrameIndex) // addr 1590 .addMemOperand(MMO) 1591 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); 1592 1593 return; 1594 } 1595 1596 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillRestoreOpcode(SpillSize) 1597 : getVGPRSpillRestoreOpcode(SpillSize); 1598 BuildMI(MBB, MI, DL, get(Opcode), DestReg) 1599 .addFrameIndex(FrameIndex) // vaddr 1600 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset 1601 .addImm(0) // offset 1602 .addMemOperand(MMO); 1603 } 1604 1605 void SIInstrInfo::insertNoop(MachineBasicBlock &MBB, 1606 MachineBasicBlock::iterator MI) const { 1607 insertNoops(MBB, MI, 1); 1608 } 1609 1610 void SIInstrInfo::insertNoops(MachineBasicBlock &MBB, 1611 MachineBasicBlock::iterator MI, 1612 unsigned Quantity) const { 1613 DebugLoc DL = MBB.findDebugLoc(MI); 1614 while (Quantity > 0) { 1615 unsigned Arg = std::min(Quantity, 8u); 1616 Quantity -= Arg; 1617 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP)).addImm(Arg - 1); 1618 } 1619 } 1620 1621 void SIInstrInfo::insertReturn(MachineBasicBlock &MBB) const { 1622 auto MF = MBB.getParent(); 1623 SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 1624 1625 assert(Info->isEntryFunction()); 1626 1627 if (MBB.succ_empty()) { 1628 bool HasNoTerminator = MBB.getFirstTerminator() == MBB.end(); 1629 if (HasNoTerminator) { 1630 if (Info->returnsVoid()) { 1631 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::S_ENDPGM)).addImm(0); 1632 } else { 1633 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::SI_RETURN_TO_EPILOG)); 1634 } 1635 } 1636 } 1637 } 1638 1639 unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) { 1640 switch (MI.getOpcode()) { 1641 default: 1642 if (MI.isMetaInstruction()) 1643 return 0; 1644 return 1; // FIXME: Do wait states equal cycles? 1645 1646 case AMDGPU::S_NOP: 1647 return MI.getOperand(0).getImm() + 1; 1648 1649 // FIXME: Any other pseudo instruction? 1650 // SI_RETURN_TO_EPILOG is a fallthrough to code outside of the function. The 1651 // hazard, even if one exist, won't really be visible. Should we handle it? 1652 case AMDGPU::SI_MASKED_UNREACHABLE: 1653 case AMDGPU::WAVE_BARRIER: 1654 return 0; 1655 } 1656 } 1657 1658 bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 1659 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1660 MachineBasicBlock &MBB = *MI.getParent(); 1661 DebugLoc DL = MBB.findDebugLoc(MI); 1662 switch (MI.getOpcode()) { 1663 default: return TargetInstrInfo::expandPostRAPseudo(MI); 1664 case AMDGPU::S_MOV_B64_term: 1665 // This is only a terminator to get the correct spill code placement during 1666 // register allocation. 1667 MI.setDesc(get(AMDGPU::S_MOV_B64)); 1668 break; 1669 1670 case AMDGPU::S_MOV_B32_term: 1671 // This is only a terminator to get the correct spill code placement during 1672 // register allocation. 1673 MI.setDesc(get(AMDGPU::S_MOV_B32)); 1674 break; 1675 1676 case AMDGPU::S_XOR_B64_term: 1677 // This is only a terminator to get the correct spill code placement during 1678 // register allocation. 1679 MI.setDesc(get(AMDGPU::S_XOR_B64)); 1680 break; 1681 1682 case AMDGPU::S_XOR_B32_term: 1683 // This is only a terminator to get the correct spill code placement during 1684 // register allocation. 1685 MI.setDesc(get(AMDGPU::S_XOR_B32)); 1686 break; 1687 case AMDGPU::S_OR_B64_term: 1688 // This is only a terminator to get the correct spill code placement during 1689 // register allocation. 1690 MI.setDesc(get(AMDGPU::S_OR_B64)); 1691 break; 1692 case AMDGPU::S_OR_B32_term: 1693 // This is only a terminator to get the correct spill code placement during 1694 // register allocation. 1695 MI.setDesc(get(AMDGPU::S_OR_B32)); 1696 break; 1697 1698 case AMDGPU::S_ANDN2_B64_term: 1699 // This is only a terminator to get the correct spill code placement during 1700 // register allocation. 1701 MI.setDesc(get(AMDGPU::S_ANDN2_B64)); 1702 break; 1703 1704 case AMDGPU::S_ANDN2_B32_term: 1705 // This is only a terminator to get the correct spill code placement during 1706 // register allocation. 1707 MI.setDesc(get(AMDGPU::S_ANDN2_B32)); 1708 break; 1709 1710 case AMDGPU::S_AND_B64_term: 1711 // This is only a terminator to get the correct spill code placement during 1712 // register allocation. 1713 MI.setDesc(get(AMDGPU::S_AND_B64)); 1714 break; 1715 1716 case AMDGPU::S_AND_B32_term: 1717 // This is only a terminator to get the correct spill code placement during 1718 // register allocation. 1719 MI.setDesc(get(AMDGPU::S_AND_B32)); 1720 break; 1721 1722 case AMDGPU::V_MOV_B64_PSEUDO: { 1723 Register Dst = MI.getOperand(0).getReg(); 1724 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 1725 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 1726 1727 const MachineOperand &SrcOp = MI.getOperand(1); 1728 // FIXME: Will this work for 64-bit floating point immediates? 1729 assert(!SrcOp.isFPImm()); 1730 if (SrcOp.isImm()) { 1731 APInt Imm(64, SrcOp.getImm()); 1732 APInt Lo(32, Imm.getLoBits(32).getZExtValue()); 1733 APInt Hi(32, Imm.getHiBits(32).getZExtValue()); 1734 if (ST.hasPackedFP32Ops() && Lo == Hi && isInlineConstant(Lo)) { 1735 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), Dst) 1736 .addImm(SISrcMods::OP_SEL_1) 1737 .addImm(Lo.getSExtValue()) 1738 .addImm(SISrcMods::OP_SEL_1) 1739 .addImm(Lo.getSExtValue()) 1740 .addImm(0) // op_sel_lo 1741 .addImm(0) // op_sel_hi 1742 .addImm(0) // neg_lo 1743 .addImm(0) // neg_hi 1744 .addImm(0); // clamp 1745 } else { 1746 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1747 .addImm(Lo.getSExtValue()) 1748 .addReg(Dst, RegState::Implicit | RegState::Define); 1749 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1750 .addImm(Hi.getSExtValue()) 1751 .addReg(Dst, RegState::Implicit | RegState::Define); 1752 } 1753 } else { 1754 assert(SrcOp.isReg()); 1755 if (ST.hasPackedFP32Ops() && 1756 !RI.isAGPR(MBB.getParent()->getRegInfo(), SrcOp.getReg())) { 1757 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), Dst) 1758 .addImm(SISrcMods::OP_SEL_1) // src0_mod 1759 .addReg(SrcOp.getReg()) 1760 .addImm(SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1) // src1_mod 1761 .addReg(SrcOp.getReg()) 1762 .addImm(0) // op_sel_lo 1763 .addImm(0) // op_sel_hi 1764 .addImm(0) // neg_lo 1765 .addImm(0) // neg_hi 1766 .addImm(0); // clamp 1767 } else { 1768 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1769 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) 1770 .addReg(Dst, RegState::Implicit | RegState::Define); 1771 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1772 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) 1773 .addReg(Dst, RegState::Implicit | RegState::Define); 1774 } 1775 } 1776 MI.eraseFromParent(); 1777 break; 1778 } 1779 case AMDGPU::V_MOV_B64_DPP_PSEUDO: { 1780 expandMovDPP64(MI); 1781 break; 1782 } 1783 case AMDGPU::S_MOV_B64_IMM_PSEUDO: { 1784 const MachineOperand &SrcOp = MI.getOperand(1); 1785 assert(!SrcOp.isFPImm()); 1786 APInt Imm(64, SrcOp.getImm()); 1787 if (Imm.isIntN(32) || isInlineConstant(Imm)) { 1788 MI.setDesc(get(AMDGPU::S_MOV_B64)); 1789 break; 1790 } 1791 1792 Register Dst = MI.getOperand(0).getReg(); 1793 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 1794 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 1795 1796 APInt Lo(32, Imm.getLoBits(32).getZExtValue()); 1797 APInt Hi(32, Imm.getHiBits(32).getZExtValue()); 1798 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DstLo) 1799 .addImm(Lo.getSExtValue()) 1800 .addReg(Dst, RegState::Implicit | RegState::Define); 1801 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DstHi) 1802 .addImm(Hi.getSExtValue()) 1803 .addReg(Dst, RegState::Implicit | RegState::Define); 1804 MI.eraseFromParent(); 1805 break; 1806 } 1807 case AMDGPU::V_SET_INACTIVE_B32: { 1808 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; 1809 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1810 auto FirstNot = BuildMI(MBB, MI, DL, get(NotOpc), Exec).addReg(Exec); 1811 FirstNot->addRegisterDead(AMDGPU::SCC, TRI); // SCC is overwritten 1812 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), MI.getOperand(0).getReg()) 1813 .add(MI.getOperand(2)); 1814 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1815 .addReg(Exec); 1816 MI.eraseFromParent(); 1817 break; 1818 } 1819 case AMDGPU::V_SET_INACTIVE_B64: { 1820 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; 1821 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1822 auto FirstNot = BuildMI(MBB, MI, DL, get(NotOpc), Exec).addReg(Exec); 1823 FirstNot->addRegisterDead(AMDGPU::SCC, TRI); // SCC is overwritten 1824 MachineInstr *Copy = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), 1825 MI.getOperand(0).getReg()) 1826 .add(MI.getOperand(2)); 1827 expandPostRAPseudo(*Copy); 1828 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1829 .addReg(Exec); 1830 MI.eraseFromParent(); 1831 break; 1832 } 1833 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V1: 1834 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V2: 1835 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V3: 1836 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V4: 1837 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V5: 1838 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V8: 1839 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V16: 1840 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V32: 1841 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V1: 1842 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V2: 1843 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V3: 1844 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V4: 1845 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V5: 1846 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V8: 1847 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V16: 1848 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V32: 1849 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V1: 1850 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V2: 1851 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V4: 1852 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V8: 1853 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V16: { 1854 const TargetRegisterClass *EltRC = getOpRegClass(MI, 2); 1855 1856 unsigned Opc; 1857 if (RI.hasVGPRs(EltRC)) { 1858 Opc = AMDGPU::V_MOVRELD_B32_e32; 1859 } else { 1860 Opc = RI.getRegSizeInBits(*EltRC) == 64 ? AMDGPU::S_MOVRELD_B64 1861 : AMDGPU::S_MOVRELD_B32; 1862 } 1863 1864 const MCInstrDesc &OpDesc = get(Opc); 1865 Register VecReg = MI.getOperand(0).getReg(); 1866 bool IsUndef = MI.getOperand(1).isUndef(); 1867 unsigned SubReg = MI.getOperand(3).getImm(); 1868 assert(VecReg == MI.getOperand(1).getReg()); 1869 1870 MachineInstrBuilder MIB = 1871 BuildMI(MBB, MI, DL, OpDesc) 1872 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 1873 .add(MI.getOperand(2)) 1874 .addReg(VecReg, RegState::ImplicitDefine) 1875 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0)); 1876 1877 const int ImpDefIdx = 1878 OpDesc.getNumOperands() + OpDesc.getNumImplicitUses(); 1879 const int ImpUseIdx = ImpDefIdx + 1; 1880 MIB->tieOperands(ImpDefIdx, ImpUseIdx); 1881 MI.eraseFromParent(); 1882 break; 1883 } 1884 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V1: 1885 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V2: 1886 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V3: 1887 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V4: 1888 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V5: 1889 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V8: 1890 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V16: 1891 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V32: { 1892 assert(ST.useVGPRIndexMode()); 1893 Register VecReg = MI.getOperand(0).getReg(); 1894 bool IsUndef = MI.getOperand(1).isUndef(); 1895 Register Idx = MI.getOperand(3).getReg(); 1896 Register SubReg = MI.getOperand(4).getImm(); 1897 1898 MachineInstr *SetOn = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_ON)) 1899 .addReg(Idx) 1900 .addImm(AMDGPU::VGPRIndexMode::DST_ENABLE); 1901 SetOn->getOperand(3).setIsUndef(); 1902 1903 const MCInstrDesc &OpDesc = get(AMDGPU::V_MOV_B32_indirect); 1904 MachineInstrBuilder MIB = 1905 BuildMI(MBB, MI, DL, OpDesc) 1906 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 1907 .add(MI.getOperand(2)) 1908 .addReg(VecReg, RegState::ImplicitDefine) 1909 .addReg(VecReg, 1910 RegState::Implicit | (IsUndef ? RegState::Undef : 0)); 1911 1912 const int ImpDefIdx = OpDesc.getNumOperands() + OpDesc.getNumImplicitUses(); 1913 const int ImpUseIdx = ImpDefIdx + 1; 1914 MIB->tieOperands(ImpDefIdx, ImpUseIdx); 1915 1916 MachineInstr *SetOff = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_OFF)); 1917 1918 finalizeBundle(MBB, SetOn->getIterator(), std::next(SetOff->getIterator())); 1919 1920 MI.eraseFromParent(); 1921 break; 1922 } 1923 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V1: 1924 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V2: 1925 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V3: 1926 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V4: 1927 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V5: 1928 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V8: 1929 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V16: 1930 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V32: { 1931 assert(ST.useVGPRIndexMode()); 1932 Register Dst = MI.getOperand(0).getReg(); 1933 Register VecReg = MI.getOperand(1).getReg(); 1934 bool IsUndef = MI.getOperand(1).isUndef(); 1935 Register Idx = MI.getOperand(2).getReg(); 1936 Register SubReg = MI.getOperand(3).getImm(); 1937 1938 MachineInstr *SetOn = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_ON)) 1939 .addReg(Idx) 1940 .addImm(AMDGPU::VGPRIndexMode::SRC0_ENABLE); 1941 SetOn->getOperand(3).setIsUndef(); 1942 1943 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32)) 1944 .addDef(Dst) 1945 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 1946 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0)) 1947 .addReg(AMDGPU::M0, RegState::Implicit); 1948 1949 MachineInstr *SetOff = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_OFF)); 1950 1951 finalizeBundle(MBB, SetOn->getIterator(), std::next(SetOff->getIterator())); 1952 1953 MI.eraseFromParent(); 1954 break; 1955 } 1956 case AMDGPU::SI_PC_ADD_REL_OFFSET: { 1957 MachineFunction &MF = *MBB.getParent(); 1958 Register Reg = MI.getOperand(0).getReg(); 1959 Register RegLo = RI.getSubReg(Reg, AMDGPU::sub0); 1960 Register RegHi = RI.getSubReg(Reg, AMDGPU::sub1); 1961 1962 // Create a bundle so these instructions won't be re-ordered by the 1963 // post-RA scheduler. 1964 MIBundleBuilder Bundler(MBB, MI); 1965 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg)); 1966 1967 // Add 32-bit offset from this instruction to the start of the 1968 // constant data. 1969 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo) 1970 .addReg(RegLo) 1971 .add(MI.getOperand(1))); 1972 1973 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi) 1974 .addReg(RegHi); 1975 MIB.add(MI.getOperand(2)); 1976 1977 Bundler.append(MIB); 1978 finalizeBundle(MBB, Bundler.begin()); 1979 1980 MI.eraseFromParent(); 1981 break; 1982 } 1983 case AMDGPU::ENTER_STRICT_WWM: { 1984 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 1985 // Whole Wave Mode is entered. 1986 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1987 : AMDGPU::S_OR_SAVEEXEC_B64)); 1988 break; 1989 } 1990 case AMDGPU::ENTER_STRICT_WQM: { 1991 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 1992 // STRICT_WQM is entered. 1993 const unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1994 const unsigned WQMOp = ST.isWave32() ? AMDGPU::S_WQM_B32 : AMDGPU::S_WQM_B64; 1995 const unsigned MovOp = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 1996 BuildMI(MBB, MI, DL, get(MovOp), MI.getOperand(0).getReg()).addReg(Exec); 1997 BuildMI(MBB, MI, DL, get(WQMOp), Exec).addReg(Exec); 1998 1999 MI.eraseFromParent(); 2000 break; 2001 } 2002 case AMDGPU::EXIT_STRICT_WWM: 2003 case AMDGPU::EXIT_STRICT_WQM: { 2004 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 2005 // WWM/STICT_WQM is exited. 2006 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64)); 2007 break; 2008 } 2009 } 2010 return true; 2011 } 2012 2013 std::pair<MachineInstr*, MachineInstr*> 2014 SIInstrInfo::expandMovDPP64(MachineInstr &MI) const { 2015 assert (MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO); 2016 2017 MachineBasicBlock &MBB = *MI.getParent(); 2018 DebugLoc DL = MBB.findDebugLoc(MI); 2019 MachineFunction *MF = MBB.getParent(); 2020 MachineRegisterInfo &MRI = MF->getRegInfo(); 2021 Register Dst = MI.getOperand(0).getReg(); 2022 unsigned Part = 0; 2023 MachineInstr *Split[2]; 2024 2025 for (auto Sub : { AMDGPU::sub0, AMDGPU::sub1 }) { 2026 auto MovDPP = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_dpp)); 2027 if (Dst.isPhysical()) { 2028 MovDPP.addDef(RI.getSubReg(Dst, Sub)); 2029 } else { 2030 assert(MRI.isSSA()); 2031 auto Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2032 MovDPP.addDef(Tmp); 2033 } 2034 2035 for (unsigned I = 1; I <= 2; ++I) { // old and src operands. 2036 const MachineOperand &SrcOp = MI.getOperand(I); 2037 assert(!SrcOp.isFPImm()); 2038 if (SrcOp.isImm()) { 2039 APInt Imm(64, SrcOp.getImm()); 2040 Imm.ashrInPlace(Part * 32); 2041 MovDPP.addImm(Imm.getLoBits(32).getZExtValue()); 2042 } else { 2043 assert(SrcOp.isReg()); 2044 Register Src = SrcOp.getReg(); 2045 if (Src.isPhysical()) 2046 MovDPP.addReg(RI.getSubReg(Src, Sub)); 2047 else 2048 MovDPP.addReg(Src, SrcOp.isUndef() ? RegState::Undef : 0, Sub); 2049 } 2050 } 2051 2052 for (unsigned I = 3; I < MI.getNumExplicitOperands(); ++I) 2053 MovDPP.addImm(MI.getOperand(I).getImm()); 2054 2055 Split[Part] = MovDPP; 2056 ++Part; 2057 } 2058 2059 if (Dst.isVirtual()) 2060 BuildMI(MBB, MI, DL, get(AMDGPU::REG_SEQUENCE), Dst) 2061 .addReg(Split[0]->getOperand(0).getReg()) 2062 .addImm(AMDGPU::sub0) 2063 .addReg(Split[1]->getOperand(0).getReg()) 2064 .addImm(AMDGPU::sub1); 2065 2066 MI.eraseFromParent(); 2067 return std::make_pair(Split[0], Split[1]); 2068 } 2069 2070 bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI, 2071 MachineOperand &Src0, 2072 unsigned Src0OpName, 2073 MachineOperand &Src1, 2074 unsigned Src1OpName) const { 2075 MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName); 2076 if (!Src0Mods) 2077 return false; 2078 2079 MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName); 2080 assert(Src1Mods && 2081 "All commutable instructions have both src0 and src1 modifiers"); 2082 2083 int Src0ModsVal = Src0Mods->getImm(); 2084 int Src1ModsVal = Src1Mods->getImm(); 2085 2086 Src1Mods->setImm(Src0ModsVal); 2087 Src0Mods->setImm(Src1ModsVal); 2088 return true; 2089 } 2090 2091 static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI, 2092 MachineOperand &RegOp, 2093 MachineOperand &NonRegOp) { 2094 Register Reg = RegOp.getReg(); 2095 unsigned SubReg = RegOp.getSubReg(); 2096 bool IsKill = RegOp.isKill(); 2097 bool IsDead = RegOp.isDead(); 2098 bool IsUndef = RegOp.isUndef(); 2099 bool IsDebug = RegOp.isDebug(); 2100 2101 if (NonRegOp.isImm()) 2102 RegOp.ChangeToImmediate(NonRegOp.getImm()); 2103 else if (NonRegOp.isFI()) 2104 RegOp.ChangeToFrameIndex(NonRegOp.getIndex()); 2105 else if (NonRegOp.isGlobal()) { 2106 RegOp.ChangeToGA(NonRegOp.getGlobal(), NonRegOp.getOffset(), 2107 NonRegOp.getTargetFlags()); 2108 } else 2109 return nullptr; 2110 2111 // Make sure we don't reinterpret a subreg index in the target flags. 2112 RegOp.setTargetFlags(NonRegOp.getTargetFlags()); 2113 2114 NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug); 2115 NonRegOp.setSubReg(SubReg); 2116 2117 return &MI; 2118 } 2119 2120 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, 2121 unsigned Src0Idx, 2122 unsigned Src1Idx) const { 2123 assert(!NewMI && "this should never be used"); 2124 2125 unsigned Opc = MI.getOpcode(); 2126 int CommutedOpcode = commuteOpcode(Opc); 2127 if (CommutedOpcode == -1) 2128 return nullptr; 2129 2130 assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == 2131 static_cast<int>(Src0Idx) && 2132 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == 2133 static_cast<int>(Src1Idx) && 2134 "inconsistency with findCommutedOpIndices"); 2135 2136 MachineOperand &Src0 = MI.getOperand(Src0Idx); 2137 MachineOperand &Src1 = MI.getOperand(Src1Idx); 2138 2139 MachineInstr *CommutedMI = nullptr; 2140 if (Src0.isReg() && Src1.isReg()) { 2141 if (isOperandLegal(MI, Src1Idx, &Src0)) { 2142 // Be sure to copy the source modifiers to the right place. 2143 CommutedMI 2144 = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx); 2145 } 2146 2147 } else if (Src0.isReg() && !Src1.isReg()) { 2148 // src0 should always be able to support any operand type, so no need to 2149 // check operand legality. 2150 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1); 2151 } else if (!Src0.isReg() && Src1.isReg()) { 2152 if (isOperandLegal(MI, Src1Idx, &Src0)) 2153 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0); 2154 } else { 2155 // FIXME: Found two non registers to commute. This does happen. 2156 return nullptr; 2157 } 2158 2159 if (CommutedMI) { 2160 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers, 2161 Src1, AMDGPU::OpName::src1_modifiers); 2162 2163 CommutedMI->setDesc(get(CommutedOpcode)); 2164 } 2165 2166 return CommutedMI; 2167 } 2168 2169 // This needs to be implemented because the source modifiers may be inserted 2170 // between the true commutable operands, and the base 2171 // TargetInstrInfo::commuteInstruction uses it. 2172 bool SIInstrInfo::findCommutedOpIndices(const MachineInstr &MI, 2173 unsigned &SrcOpIdx0, 2174 unsigned &SrcOpIdx1) const { 2175 return findCommutedOpIndices(MI.getDesc(), SrcOpIdx0, SrcOpIdx1); 2176 } 2177 2178 bool SIInstrInfo::findCommutedOpIndices(MCInstrDesc Desc, unsigned &SrcOpIdx0, 2179 unsigned &SrcOpIdx1) const { 2180 if (!Desc.isCommutable()) 2181 return false; 2182 2183 unsigned Opc = Desc.getOpcode(); 2184 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 2185 if (Src0Idx == -1) 2186 return false; 2187 2188 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 2189 if (Src1Idx == -1) 2190 return false; 2191 2192 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx); 2193 } 2194 2195 bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp, 2196 int64_t BrOffset) const { 2197 // BranchRelaxation should never have to check s_setpc_b64 because its dest 2198 // block is unanalyzable. 2199 assert(BranchOp != AMDGPU::S_SETPC_B64); 2200 2201 // Convert to dwords. 2202 BrOffset /= 4; 2203 2204 // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is 2205 // from the next instruction. 2206 BrOffset -= 1; 2207 2208 return isIntN(BranchOffsetBits, BrOffset); 2209 } 2210 2211 MachineBasicBlock *SIInstrInfo::getBranchDestBlock( 2212 const MachineInstr &MI) const { 2213 if (MI.getOpcode() == AMDGPU::S_SETPC_B64) { 2214 // This would be a difficult analysis to perform, but can always be legal so 2215 // there's no need to analyze it. 2216 return nullptr; 2217 } 2218 2219 return MI.getOperand(0).getMBB(); 2220 } 2221 2222 unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB, 2223 MachineBasicBlock &DestBB, 2224 const DebugLoc &DL, 2225 int64_t BrOffset, 2226 RegScavenger *RS) const { 2227 assert(RS && "RegScavenger required for long branching"); 2228 assert(MBB.empty() && 2229 "new block should be inserted for expanding unconditional branch"); 2230 assert(MBB.pred_size() == 1); 2231 2232 MachineFunction *MF = MBB.getParent(); 2233 MachineRegisterInfo &MRI = MF->getRegInfo(); 2234 2235 // FIXME: Virtual register workaround for RegScavenger not working with empty 2236 // blocks. 2237 Register PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 2238 2239 auto I = MBB.end(); 2240 2241 // We need to compute the offset relative to the instruction immediately after 2242 // s_getpc_b64. Insert pc arithmetic code before last terminator. 2243 MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg); 2244 2245 auto &MCCtx = MF->getContext(); 2246 MCSymbol *PostGetPCLabel = 2247 MCCtx.createTempSymbol("post_getpc", /*AlwaysAddSuffix=*/true); 2248 GetPC->setPostInstrSymbol(*MF, PostGetPCLabel); 2249 2250 MCSymbol *OffsetLo = 2251 MCCtx.createTempSymbol("offset_lo", /*AlwaysAddSuffix=*/true); 2252 MCSymbol *OffsetHi = 2253 MCCtx.createTempSymbol("offset_hi", /*AlwaysAddSuffix=*/true); 2254 BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32)) 2255 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 2256 .addReg(PCReg, 0, AMDGPU::sub0) 2257 .addSym(OffsetLo, MO_FAR_BRANCH_OFFSET); 2258 BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32)) 2259 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 2260 .addReg(PCReg, 0, AMDGPU::sub1) 2261 .addSym(OffsetHi, MO_FAR_BRANCH_OFFSET); 2262 2263 // Insert the indirect branch after the other terminator. 2264 BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64)) 2265 .addReg(PCReg); 2266 2267 auto ComputeBlockSize = [](const TargetInstrInfo *TII, 2268 const MachineBasicBlock &MBB) { 2269 unsigned Size = 0; 2270 for (const MachineInstr &MI : MBB) 2271 Size += TII->getInstSizeInBytes(MI); 2272 return Size; 2273 }; 2274 2275 // FIXME: If spilling is necessary, this will fail because this scavenger has 2276 // no emergency stack slots. It is non-trivial to spill in this situation, 2277 // because the restore code needs to be specially placed after the 2278 // jump. BranchRelaxation then needs to be made aware of the newly inserted 2279 // block. 2280 // 2281 // If a spill is needed for the pc register pair, we need to insert a spill 2282 // restore block right before the destination block, and insert a short branch 2283 // into the old destination block's fallthrough predecessor. 2284 // e.g.: 2285 // 2286 // s_cbranch_scc0 skip_long_branch: 2287 // 2288 // long_branch_bb: 2289 // spill s[8:9] 2290 // s_getpc_b64 s[8:9] 2291 // s_add_u32 s8, s8, restore_bb 2292 // s_addc_u32 s9, s9, 0 2293 // s_setpc_b64 s[8:9] 2294 // 2295 // skip_long_branch: 2296 // foo; 2297 // 2298 // ..... 2299 // 2300 // dest_bb_fallthrough_predecessor: 2301 // bar; 2302 // s_branch dest_bb 2303 // 2304 // restore_bb: 2305 // restore s[8:9] 2306 // fallthrough dest_bb 2307 /// 2308 // dest_bb: 2309 // buzz; 2310 2311 RS->enterBasicBlockEnd(MBB); 2312 Register Scav = RS->scavengeRegisterBackwards( 2313 AMDGPU::SReg_64RegClass, 2314 MachineBasicBlock::iterator(GetPC), false, 0); 2315 MRI.replaceRegWith(PCReg, Scav); 2316 MRI.clearVirtRegs(); 2317 RS->setRegUsed(Scav); 2318 2319 // Now, the distance could be defined. 2320 auto *Offset = MCBinaryExpr::createSub( 2321 MCSymbolRefExpr::create(DestBB.getSymbol(), MCCtx), 2322 MCSymbolRefExpr::create(PostGetPCLabel, MCCtx), MCCtx); 2323 // Add offset assignments. 2324 auto *Mask = MCConstantExpr::create(0xFFFFFFFFULL, MCCtx); 2325 OffsetLo->setVariableValue(MCBinaryExpr::createAnd(Offset, Mask, MCCtx)); 2326 auto *ShAmt = MCConstantExpr::create(32, MCCtx); 2327 OffsetHi->setVariableValue(MCBinaryExpr::createAShr(Offset, ShAmt, MCCtx)); 2328 return ComputeBlockSize(this, MBB); 2329 } 2330 2331 unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) { 2332 switch (Cond) { 2333 case SIInstrInfo::SCC_TRUE: 2334 return AMDGPU::S_CBRANCH_SCC1; 2335 case SIInstrInfo::SCC_FALSE: 2336 return AMDGPU::S_CBRANCH_SCC0; 2337 case SIInstrInfo::VCCNZ: 2338 return AMDGPU::S_CBRANCH_VCCNZ; 2339 case SIInstrInfo::VCCZ: 2340 return AMDGPU::S_CBRANCH_VCCZ; 2341 case SIInstrInfo::EXECNZ: 2342 return AMDGPU::S_CBRANCH_EXECNZ; 2343 case SIInstrInfo::EXECZ: 2344 return AMDGPU::S_CBRANCH_EXECZ; 2345 default: 2346 llvm_unreachable("invalid branch predicate"); 2347 } 2348 } 2349 2350 SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) { 2351 switch (Opcode) { 2352 case AMDGPU::S_CBRANCH_SCC0: 2353 return SCC_FALSE; 2354 case AMDGPU::S_CBRANCH_SCC1: 2355 return SCC_TRUE; 2356 case AMDGPU::S_CBRANCH_VCCNZ: 2357 return VCCNZ; 2358 case AMDGPU::S_CBRANCH_VCCZ: 2359 return VCCZ; 2360 case AMDGPU::S_CBRANCH_EXECNZ: 2361 return EXECNZ; 2362 case AMDGPU::S_CBRANCH_EXECZ: 2363 return EXECZ; 2364 default: 2365 return INVALID_BR; 2366 } 2367 } 2368 2369 bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB, 2370 MachineBasicBlock::iterator I, 2371 MachineBasicBlock *&TBB, 2372 MachineBasicBlock *&FBB, 2373 SmallVectorImpl<MachineOperand> &Cond, 2374 bool AllowModify) const { 2375 if (I->getOpcode() == AMDGPU::S_BRANCH) { 2376 // Unconditional Branch 2377 TBB = I->getOperand(0).getMBB(); 2378 return false; 2379 } 2380 2381 MachineBasicBlock *CondBB = nullptr; 2382 2383 if (I->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 2384 CondBB = I->getOperand(1).getMBB(); 2385 Cond.push_back(I->getOperand(0)); 2386 } else { 2387 BranchPredicate Pred = getBranchPredicate(I->getOpcode()); 2388 if (Pred == INVALID_BR) 2389 return true; 2390 2391 CondBB = I->getOperand(0).getMBB(); 2392 Cond.push_back(MachineOperand::CreateImm(Pred)); 2393 Cond.push_back(I->getOperand(1)); // Save the branch register. 2394 } 2395 ++I; 2396 2397 if (I == MBB.end()) { 2398 // Conditional branch followed by fall-through. 2399 TBB = CondBB; 2400 return false; 2401 } 2402 2403 if (I->getOpcode() == AMDGPU::S_BRANCH) { 2404 TBB = CondBB; 2405 FBB = I->getOperand(0).getMBB(); 2406 return false; 2407 } 2408 2409 return true; 2410 } 2411 2412 bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 2413 MachineBasicBlock *&FBB, 2414 SmallVectorImpl<MachineOperand> &Cond, 2415 bool AllowModify) const { 2416 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 2417 auto E = MBB.end(); 2418 if (I == E) 2419 return false; 2420 2421 // Skip over the instructions that are artificially terminators for special 2422 // exec management. 2423 while (I != E && !I->isBranch() && !I->isReturn()) { 2424 switch (I->getOpcode()) { 2425 case AMDGPU::S_MOV_B64_term: 2426 case AMDGPU::S_XOR_B64_term: 2427 case AMDGPU::S_OR_B64_term: 2428 case AMDGPU::S_ANDN2_B64_term: 2429 case AMDGPU::S_AND_B64_term: 2430 case AMDGPU::S_MOV_B32_term: 2431 case AMDGPU::S_XOR_B32_term: 2432 case AMDGPU::S_OR_B32_term: 2433 case AMDGPU::S_ANDN2_B32_term: 2434 case AMDGPU::S_AND_B32_term: 2435 break; 2436 case AMDGPU::SI_IF: 2437 case AMDGPU::SI_ELSE: 2438 case AMDGPU::SI_KILL_I1_TERMINATOR: 2439 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 2440 // FIXME: It's messy that these need to be considered here at all. 2441 return true; 2442 default: 2443 llvm_unreachable("unexpected non-branch terminator inst"); 2444 } 2445 2446 ++I; 2447 } 2448 2449 if (I == E) 2450 return false; 2451 2452 return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify); 2453 } 2454 2455 unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB, 2456 int *BytesRemoved) const { 2457 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 2458 2459 unsigned Count = 0; 2460 unsigned RemovedSize = 0; 2461 while (I != MBB.end()) { 2462 MachineBasicBlock::iterator Next = std::next(I); 2463 RemovedSize += getInstSizeInBytes(*I); 2464 I->eraseFromParent(); 2465 ++Count; 2466 I = Next; 2467 } 2468 2469 if (BytesRemoved) 2470 *BytesRemoved = RemovedSize; 2471 2472 return Count; 2473 } 2474 2475 // Copy the flags onto the implicit condition register operand. 2476 static void preserveCondRegFlags(MachineOperand &CondReg, 2477 const MachineOperand &OrigCond) { 2478 CondReg.setIsUndef(OrigCond.isUndef()); 2479 CondReg.setIsKill(OrigCond.isKill()); 2480 } 2481 2482 unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB, 2483 MachineBasicBlock *TBB, 2484 MachineBasicBlock *FBB, 2485 ArrayRef<MachineOperand> Cond, 2486 const DebugLoc &DL, 2487 int *BytesAdded) const { 2488 if (!FBB && Cond.empty()) { 2489 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 2490 .addMBB(TBB); 2491 if (BytesAdded) 2492 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4; 2493 return 1; 2494 } 2495 2496 if(Cond.size() == 1 && Cond[0].isReg()) { 2497 BuildMI(&MBB, DL, get(AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO)) 2498 .add(Cond[0]) 2499 .addMBB(TBB); 2500 return 1; 2501 } 2502 2503 assert(TBB && Cond[0].isImm()); 2504 2505 unsigned Opcode 2506 = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm())); 2507 2508 if (!FBB) { 2509 Cond[1].isUndef(); 2510 MachineInstr *CondBr = 2511 BuildMI(&MBB, DL, get(Opcode)) 2512 .addMBB(TBB); 2513 2514 // Copy the flags onto the implicit condition register operand. 2515 preserveCondRegFlags(CondBr->getOperand(1), Cond[1]); 2516 fixImplicitOperands(*CondBr); 2517 2518 if (BytesAdded) 2519 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4; 2520 return 1; 2521 } 2522 2523 assert(TBB && FBB); 2524 2525 MachineInstr *CondBr = 2526 BuildMI(&MBB, DL, get(Opcode)) 2527 .addMBB(TBB); 2528 fixImplicitOperands(*CondBr); 2529 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 2530 .addMBB(FBB); 2531 2532 MachineOperand &CondReg = CondBr->getOperand(1); 2533 CondReg.setIsUndef(Cond[1].isUndef()); 2534 CondReg.setIsKill(Cond[1].isKill()); 2535 2536 if (BytesAdded) 2537 *BytesAdded = ST.hasOffset3fBug() ? 16 : 8; 2538 2539 return 2; 2540 } 2541 2542 bool SIInstrInfo::reverseBranchCondition( 2543 SmallVectorImpl<MachineOperand> &Cond) const { 2544 if (Cond.size() != 2) { 2545 return true; 2546 } 2547 2548 if (Cond[0].isImm()) { 2549 Cond[0].setImm(-Cond[0].getImm()); 2550 return false; 2551 } 2552 2553 return true; 2554 } 2555 2556 bool SIInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, 2557 ArrayRef<MachineOperand> Cond, 2558 Register DstReg, Register TrueReg, 2559 Register FalseReg, int &CondCycles, 2560 int &TrueCycles, int &FalseCycles) const { 2561 switch (Cond[0].getImm()) { 2562 case VCCNZ: 2563 case VCCZ: { 2564 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2565 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 2566 if (MRI.getRegClass(FalseReg) != RC) 2567 return false; 2568 2569 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 2570 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 2571 2572 // Limit to equal cost for branch vs. N v_cndmask_b32s. 2573 return RI.hasVGPRs(RC) && NumInsts <= 6; 2574 } 2575 case SCC_TRUE: 2576 case SCC_FALSE: { 2577 // FIXME: We could insert for VGPRs if we could replace the original compare 2578 // with a vector one. 2579 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2580 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 2581 if (MRI.getRegClass(FalseReg) != RC) 2582 return false; 2583 2584 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 2585 2586 // Multiples of 8 can do s_cselect_b64 2587 if (NumInsts % 2 == 0) 2588 NumInsts /= 2; 2589 2590 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 2591 return RI.isSGPRClass(RC); 2592 } 2593 default: 2594 return false; 2595 } 2596 } 2597 2598 void SIInstrInfo::insertSelect(MachineBasicBlock &MBB, 2599 MachineBasicBlock::iterator I, const DebugLoc &DL, 2600 Register DstReg, ArrayRef<MachineOperand> Cond, 2601 Register TrueReg, Register FalseReg) const { 2602 BranchPredicate Pred = static_cast<BranchPredicate>(Cond[0].getImm()); 2603 if (Pred == VCCZ || Pred == SCC_FALSE) { 2604 Pred = static_cast<BranchPredicate>(-Pred); 2605 std::swap(TrueReg, FalseReg); 2606 } 2607 2608 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2609 const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg); 2610 unsigned DstSize = RI.getRegSizeInBits(*DstRC); 2611 2612 if (DstSize == 32) { 2613 MachineInstr *Select; 2614 if (Pred == SCC_TRUE) { 2615 Select = BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B32), DstReg) 2616 .addReg(TrueReg) 2617 .addReg(FalseReg); 2618 } else { 2619 // Instruction's operands are backwards from what is expected. 2620 Select = BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e32), DstReg) 2621 .addReg(FalseReg) 2622 .addReg(TrueReg); 2623 } 2624 2625 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2626 return; 2627 } 2628 2629 if (DstSize == 64 && Pred == SCC_TRUE) { 2630 MachineInstr *Select = 2631 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg) 2632 .addReg(TrueReg) 2633 .addReg(FalseReg); 2634 2635 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2636 return; 2637 } 2638 2639 static const int16_t Sub0_15[] = { 2640 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 2641 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 2642 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, 2643 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 2644 }; 2645 2646 static const int16_t Sub0_15_64[] = { 2647 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, 2648 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7, 2649 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11, 2650 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15, 2651 }; 2652 2653 unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32; 2654 const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass; 2655 const int16_t *SubIndices = Sub0_15; 2656 int NElts = DstSize / 32; 2657 2658 // 64-bit select is only available for SALU. 2659 // TODO: Split 96-bit into 64-bit and 32-bit, not 3x 32-bit. 2660 if (Pred == SCC_TRUE) { 2661 if (NElts % 2) { 2662 SelOp = AMDGPU::S_CSELECT_B32; 2663 EltRC = &AMDGPU::SGPR_32RegClass; 2664 } else { 2665 SelOp = AMDGPU::S_CSELECT_B64; 2666 EltRC = &AMDGPU::SGPR_64RegClass; 2667 SubIndices = Sub0_15_64; 2668 NElts /= 2; 2669 } 2670 } 2671 2672 MachineInstrBuilder MIB = BuildMI( 2673 MBB, I, DL, get(AMDGPU::REG_SEQUENCE), DstReg); 2674 2675 I = MIB->getIterator(); 2676 2677 SmallVector<Register, 8> Regs; 2678 for (int Idx = 0; Idx != NElts; ++Idx) { 2679 Register DstElt = MRI.createVirtualRegister(EltRC); 2680 Regs.push_back(DstElt); 2681 2682 unsigned SubIdx = SubIndices[Idx]; 2683 2684 MachineInstr *Select; 2685 if (SelOp == AMDGPU::V_CNDMASK_B32_e32) { 2686 Select = 2687 BuildMI(MBB, I, DL, get(SelOp), DstElt) 2688 .addReg(FalseReg, 0, SubIdx) 2689 .addReg(TrueReg, 0, SubIdx); 2690 } else { 2691 Select = 2692 BuildMI(MBB, I, DL, get(SelOp), DstElt) 2693 .addReg(TrueReg, 0, SubIdx) 2694 .addReg(FalseReg, 0, SubIdx); 2695 } 2696 2697 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2698 fixImplicitOperands(*Select); 2699 2700 MIB.addReg(DstElt) 2701 .addImm(SubIdx); 2702 } 2703 } 2704 2705 bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) { 2706 switch (MI.getOpcode()) { 2707 case AMDGPU::V_MOV_B32_e32: 2708 case AMDGPU::V_MOV_B32_e64: 2709 case AMDGPU::V_MOV_B64_PSEUDO: { 2710 // If there are additional implicit register operands, this may be used for 2711 // register indexing so the source register operand isn't simply copied. 2712 unsigned NumOps = MI.getDesc().getNumOperands() + 2713 MI.getDesc().getNumImplicitUses(); 2714 2715 return MI.getNumOperands() == NumOps; 2716 } 2717 case AMDGPU::S_MOV_B32: 2718 case AMDGPU::S_MOV_B64: 2719 case AMDGPU::COPY: 2720 case AMDGPU::V_ACCVGPR_WRITE_B32_e64: 2721 case AMDGPU::V_ACCVGPR_READ_B32_e64: 2722 case AMDGPU::V_ACCVGPR_MOV_B32: 2723 return true; 2724 default: 2725 return false; 2726 } 2727 } 2728 2729 unsigned SIInstrInfo::getAddressSpaceForPseudoSourceKind( 2730 unsigned Kind) const { 2731 switch(Kind) { 2732 case PseudoSourceValue::Stack: 2733 case PseudoSourceValue::FixedStack: 2734 return AMDGPUAS::PRIVATE_ADDRESS; 2735 case PseudoSourceValue::ConstantPool: 2736 case PseudoSourceValue::GOT: 2737 case PseudoSourceValue::JumpTable: 2738 case PseudoSourceValue::GlobalValueCallEntry: 2739 case PseudoSourceValue::ExternalSymbolCallEntry: 2740 case PseudoSourceValue::TargetCustom: 2741 return AMDGPUAS::CONSTANT_ADDRESS; 2742 } 2743 return AMDGPUAS::FLAT_ADDRESS; 2744 } 2745 2746 static void removeModOperands(MachineInstr &MI) { 2747 unsigned Opc = MI.getOpcode(); 2748 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2749 AMDGPU::OpName::src0_modifiers); 2750 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2751 AMDGPU::OpName::src1_modifiers); 2752 int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2753 AMDGPU::OpName::src2_modifiers); 2754 2755 MI.RemoveOperand(Src2ModIdx); 2756 MI.RemoveOperand(Src1ModIdx); 2757 MI.RemoveOperand(Src0ModIdx); 2758 } 2759 2760 bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, 2761 Register Reg, MachineRegisterInfo *MRI) const { 2762 if (!MRI->hasOneNonDBGUse(Reg)) 2763 return false; 2764 2765 switch (DefMI.getOpcode()) { 2766 default: 2767 return false; 2768 case AMDGPU::S_MOV_B64: 2769 // TODO: We could fold 64-bit immediates, but this get compilicated 2770 // when there are sub-registers. 2771 return false; 2772 2773 case AMDGPU::V_MOV_B32_e32: 2774 case AMDGPU::S_MOV_B32: 2775 case AMDGPU::V_ACCVGPR_WRITE_B32_e64: 2776 break; 2777 } 2778 2779 const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0); 2780 assert(ImmOp); 2781 // FIXME: We could handle FrameIndex values here. 2782 if (!ImmOp->isImm()) 2783 return false; 2784 2785 unsigned Opc = UseMI.getOpcode(); 2786 if (Opc == AMDGPU::COPY) { 2787 Register DstReg = UseMI.getOperand(0).getReg(); 2788 bool Is16Bit = getOpSize(UseMI, 0) == 2; 2789 bool isVGPRCopy = RI.isVGPR(*MRI, DstReg); 2790 unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32; 2791 APInt Imm(32, ImmOp->getImm()); 2792 2793 if (UseMI.getOperand(1).getSubReg() == AMDGPU::hi16) 2794 Imm = Imm.ashr(16); 2795 2796 if (RI.isAGPR(*MRI, DstReg)) { 2797 if (!isInlineConstant(Imm)) 2798 return false; 2799 NewOpc = AMDGPU::V_ACCVGPR_WRITE_B32_e64; 2800 } 2801 2802 if (Is16Bit) { 2803 if (isVGPRCopy) 2804 return false; // Do not clobber vgpr_hi16 2805 2806 if (DstReg.isVirtual() && 2807 UseMI.getOperand(0).getSubReg() != AMDGPU::lo16) 2808 return false; 2809 2810 UseMI.getOperand(0).setSubReg(0); 2811 if (DstReg.isPhysical()) { 2812 DstReg = RI.get32BitRegister(DstReg); 2813 UseMI.getOperand(0).setReg(DstReg); 2814 } 2815 assert(UseMI.getOperand(1).getReg().isVirtual()); 2816 } 2817 2818 UseMI.setDesc(get(NewOpc)); 2819 UseMI.getOperand(1).ChangeToImmediate(Imm.getSExtValue()); 2820 UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent()); 2821 return true; 2822 } 2823 2824 if (Opc == AMDGPU::V_MAD_F32_e64 || Opc == AMDGPU::V_MAC_F32_e64 || 2825 Opc == AMDGPU::V_MAD_F16_e64 || Opc == AMDGPU::V_MAC_F16_e64 || 2826 Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64 || 2827 Opc == AMDGPU::V_FMA_F16_e64 || Opc == AMDGPU::V_FMAC_F16_e64) { 2828 // Don't fold if we are using source or output modifiers. The new VOP2 2829 // instructions don't have them. 2830 if (hasAnyModifiersSet(UseMI)) 2831 return false; 2832 2833 // If this is a free constant, there's no reason to do this. 2834 // TODO: We could fold this here instead of letting SIFoldOperands do it 2835 // later. 2836 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0); 2837 2838 // Any src operand can be used for the legality check. 2839 if (isInlineConstant(UseMI, *Src0, *ImmOp)) 2840 return false; 2841 2842 bool IsF32 = Opc == AMDGPU::V_MAD_F32_e64 || Opc == AMDGPU::V_MAC_F32_e64 || 2843 Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64; 2844 bool IsFMA = Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64 || 2845 Opc == AMDGPU::V_FMA_F16_e64 || Opc == AMDGPU::V_FMAC_F16_e64; 2846 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1); 2847 MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2); 2848 2849 // Multiplied part is the constant: Use v_madmk_{f16, f32}. 2850 // We should only expect these to be on src0 due to canonicalizations. 2851 if (Src0->isReg() && Src0->getReg() == Reg) { 2852 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) 2853 return false; 2854 2855 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg()))) 2856 return false; 2857 2858 unsigned NewOpc = 2859 IsFMA ? (IsF32 ? AMDGPU::V_FMAMK_F32 : AMDGPU::V_FMAMK_F16) 2860 : (IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16); 2861 if (pseudoToMCOpcode(NewOpc) == -1) 2862 return false; 2863 2864 // We need to swap operands 0 and 1 since madmk constant is at operand 1. 2865 2866 const int64_t Imm = ImmOp->getImm(); 2867 2868 // FIXME: This would be a lot easier if we could return a new instruction 2869 // instead of having to modify in place. 2870 2871 // Remove these first since they are at the end. 2872 UseMI.RemoveOperand( 2873 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 2874 UseMI.RemoveOperand( 2875 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 2876 2877 Register Src1Reg = Src1->getReg(); 2878 unsigned Src1SubReg = Src1->getSubReg(); 2879 Src0->setReg(Src1Reg); 2880 Src0->setSubReg(Src1SubReg); 2881 Src0->setIsKill(Src1->isKill()); 2882 2883 if (Opc == AMDGPU::V_MAC_F32_e64 || 2884 Opc == AMDGPU::V_MAC_F16_e64 || 2885 Opc == AMDGPU::V_FMAC_F32_e64 || 2886 Opc == AMDGPU::V_FMAC_F16_e64) 2887 UseMI.untieRegOperand( 2888 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 2889 2890 Src1->ChangeToImmediate(Imm); 2891 2892 removeModOperands(UseMI); 2893 UseMI.setDesc(get(NewOpc)); 2894 2895 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 2896 if (DeleteDef) 2897 DefMI.eraseFromParent(); 2898 2899 return true; 2900 } 2901 2902 // Added part is the constant: Use v_madak_{f16, f32}. 2903 if (Src2->isReg() && Src2->getReg() == Reg) { 2904 // Not allowed to use constant bus for another operand. 2905 // We can however allow an inline immediate as src0. 2906 bool Src0Inlined = false; 2907 if (Src0->isReg()) { 2908 // Try to inline constant if possible. 2909 // If the Def moves immediate and the use is single 2910 // We are saving VGPR here. 2911 MachineInstr *Def = MRI->getUniqueVRegDef(Src0->getReg()); 2912 if (Def && Def->isMoveImmediate() && 2913 isInlineConstant(Def->getOperand(1)) && 2914 MRI->hasOneUse(Src0->getReg())) { 2915 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 2916 Src0Inlined = true; 2917 } else if ((Src0->getReg().isPhysical() && 2918 (ST.getConstantBusLimit(Opc) <= 1 && 2919 RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg())))) || 2920 (Src0->getReg().isVirtual() && 2921 (ST.getConstantBusLimit(Opc) <= 1 && 2922 RI.isSGPRClass(MRI->getRegClass(Src0->getReg()))))) 2923 return false; 2924 // VGPR is okay as Src0 - fallthrough 2925 } 2926 2927 if (Src1->isReg() && !Src0Inlined ) { 2928 // We have one slot for inlinable constant so far - try to fill it 2929 MachineInstr *Def = MRI->getUniqueVRegDef(Src1->getReg()); 2930 if (Def && Def->isMoveImmediate() && 2931 isInlineConstant(Def->getOperand(1)) && 2932 MRI->hasOneUse(Src1->getReg()) && 2933 commuteInstruction(UseMI)) { 2934 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 2935 } else if ((Src1->getReg().isPhysical() && 2936 RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) || 2937 (Src1->getReg().isVirtual() && 2938 RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) 2939 return false; 2940 // VGPR is okay as Src1 - fallthrough 2941 } 2942 2943 unsigned NewOpc = 2944 IsFMA ? (IsF32 ? AMDGPU::V_FMAAK_F32 : AMDGPU::V_FMAAK_F16) 2945 : (IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16); 2946 if (pseudoToMCOpcode(NewOpc) == -1) 2947 return false; 2948 2949 const int64_t Imm = ImmOp->getImm(); 2950 2951 // FIXME: This would be a lot easier if we could return a new instruction 2952 // instead of having to modify in place. 2953 2954 // Remove these first since they are at the end. 2955 UseMI.RemoveOperand( 2956 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 2957 UseMI.RemoveOperand( 2958 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 2959 2960 if (Opc == AMDGPU::V_MAC_F32_e64 || 2961 Opc == AMDGPU::V_MAC_F16_e64 || 2962 Opc == AMDGPU::V_FMAC_F32_e64 || 2963 Opc == AMDGPU::V_FMAC_F16_e64) 2964 UseMI.untieRegOperand( 2965 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 2966 2967 // ChangingToImmediate adds Src2 back to the instruction. 2968 Src2->ChangeToImmediate(Imm); 2969 2970 // These come before src2. 2971 removeModOperands(UseMI); 2972 UseMI.setDesc(get(NewOpc)); 2973 // It might happen that UseMI was commuted 2974 // and we now have SGPR as SRC1. If so 2 inlined 2975 // constant and SGPR are illegal. 2976 legalizeOperands(UseMI); 2977 2978 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 2979 if (DeleteDef) 2980 DefMI.eraseFromParent(); 2981 2982 return true; 2983 } 2984 } 2985 2986 return false; 2987 } 2988 2989 static bool 2990 memOpsHaveSameBaseOperands(ArrayRef<const MachineOperand *> BaseOps1, 2991 ArrayRef<const MachineOperand *> BaseOps2) { 2992 if (BaseOps1.size() != BaseOps2.size()) 2993 return false; 2994 for (size_t I = 0, E = BaseOps1.size(); I < E; ++I) { 2995 if (!BaseOps1[I]->isIdenticalTo(*BaseOps2[I])) 2996 return false; 2997 } 2998 return true; 2999 } 3000 3001 static bool offsetsDoNotOverlap(int WidthA, int OffsetA, 3002 int WidthB, int OffsetB) { 3003 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; 3004 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; 3005 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; 3006 return LowOffset + LowWidth <= HighOffset; 3007 } 3008 3009 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa, 3010 const MachineInstr &MIb) const { 3011 SmallVector<const MachineOperand *, 4> BaseOps0, BaseOps1; 3012 int64_t Offset0, Offset1; 3013 unsigned Dummy0, Dummy1; 3014 bool Offset0IsScalable, Offset1IsScalable; 3015 if (!getMemOperandsWithOffsetWidth(MIa, BaseOps0, Offset0, Offset0IsScalable, 3016 Dummy0, &RI) || 3017 !getMemOperandsWithOffsetWidth(MIb, BaseOps1, Offset1, Offset1IsScalable, 3018 Dummy1, &RI)) 3019 return false; 3020 3021 if (!memOpsHaveSameBaseOperands(BaseOps0, BaseOps1)) 3022 return false; 3023 3024 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) { 3025 // FIXME: Handle ds_read2 / ds_write2. 3026 return false; 3027 } 3028 unsigned Width0 = MIa.memoperands().front()->getSize(); 3029 unsigned Width1 = MIb.memoperands().front()->getSize(); 3030 return offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1); 3031 } 3032 3033 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, 3034 const MachineInstr &MIb) const { 3035 assert(MIa.mayLoadOrStore() && 3036 "MIa must load from or modify a memory location"); 3037 assert(MIb.mayLoadOrStore() && 3038 "MIb must load from or modify a memory location"); 3039 3040 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects()) 3041 return false; 3042 3043 // XXX - Can we relax this between address spaces? 3044 if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef()) 3045 return false; 3046 3047 // TODO: Should we check the address space from the MachineMemOperand? That 3048 // would allow us to distinguish objects we know don't alias based on the 3049 // underlying address space, even if it was lowered to a different one, 3050 // e.g. private accesses lowered to use MUBUF instructions on a scratch 3051 // buffer. 3052 if (isDS(MIa)) { 3053 if (isDS(MIb)) 3054 return checkInstOffsetsDoNotOverlap(MIa, MIb); 3055 3056 return !isFLAT(MIb) || isSegmentSpecificFLAT(MIb); 3057 } 3058 3059 if (isMUBUF(MIa) || isMTBUF(MIa)) { 3060 if (isMUBUF(MIb) || isMTBUF(MIb)) 3061 return checkInstOffsetsDoNotOverlap(MIa, MIb); 3062 3063 return !isFLAT(MIb) && !isSMRD(MIb); 3064 } 3065 3066 if (isSMRD(MIa)) { 3067 if (isSMRD(MIb)) 3068 return checkInstOffsetsDoNotOverlap(MIa, MIb); 3069 3070 return !isFLAT(MIb) && !isMUBUF(MIb) && !isMTBUF(MIb); 3071 } 3072 3073 if (isFLAT(MIa)) { 3074 if (isFLAT(MIb)) 3075 return checkInstOffsetsDoNotOverlap(MIa, MIb); 3076 3077 return false; 3078 } 3079 3080 return false; 3081 } 3082 3083 static bool getFoldableImm(Register Reg, const MachineRegisterInfo &MRI, 3084 int64_t &Imm) { 3085 if (Reg.isPhysical()) 3086 return false; 3087 auto *Def = MRI.getUniqueVRegDef(Reg); 3088 if (Def && SIInstrInfo::isFoldableCopy(*Def) && Def->getOperand(1).isImm()) { 3089 Imm = Def->getOperand(1).getImm(); 3090 return true; 3091 } 3092 return false; 3093 } 3094 3095 static bool getFoldableImm(const MachineOperand *MO, int64_t &Imm) { 3096 if (!MO->isReg()) 3097 return false; 3098 const MachineFunction *MF = MO->getParent()->getParent()->getParent(); 3099 const MachineRegisterInfo &MRI = MF->getRegInfo(); 3100 return getFoldableImm(MO->getReg(), MRI, Imm); 3101 } 3102 3103 static void updateLiveVariables(LiveVariables *LV, MachineInstr &MI, 3104 MachineInstr &NewMI) { 3105 if (LV) { 3106 unsigned NumOps = MI.getNumOperands(); 3107 for (unsigned I = 1; I < NumOps; ++I) { 3108 MachineOperand &Op = MI.getOperand(I); 3109 if (Op.isReg() && Op.isKill()) 3110 LV->replaceKillInstruction(Op.getReg(), MI, NewMI); 3111 } 3112 } 3113 } 3114 3115 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineFunction::iterator &MBB, 3116 MachineInstr &MI, 3117 LiveVariables *LV) const { 3118 unsigned Opc = MI.getOpcode(); 3119 bool IsF16 = false; 3120 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e32 || Opc == AMDGPU::V_FMAC_F32_e64 || 3121 Opc == AMDGPU::V_FMAC_F16_e32 || Opc == AMDGPU::V_FMAC_F16_e64 || 3122 Opc == AMDGPU::V_FMAC_F64_e32 || Opc == AMDGPU::V_FMAC_F64_e64; 3123 bool IsF64 = Opc == AMDGPU::V_FMAC_F64_e32 || Opc == AMDGPU::V_FMAC_F64_e64; 3124 3125 switch (Opc) { 3126 default: 3127 return nullptr; 3128 case AMDGPU::V_MAC_F16_e64: 3129 case AMDGPU::V_FMAC_F16_e64: 3130 IsF16 = true; 3131 LLVM_FALLTHROUGH; 3132 case AMDGPU::V_MAC_F32_e64: 3133 case AMDGPU::V_FMAC_F32_e64: 3134 case AMDGPU::V_FMAC_F64_e64: 3135 break; 3136 case AMDGPU::V_MAC_F16_e32: 3137 case AMDGPU::V_FMAC_F16_e32: 3138 IsF16 = true; 3139 LLVM_FALLTHROUGH; 3140 case AMDGPU::V_MAC_F32_e32: 3141 case AMDGPU::V_FMAC_F32_e32: 3142 case AMDGPU::V_FMAC_F64_e32: { 3143 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 3144 AMDGPU::OpName::src0); 3145 const MachineOperand *Src0 = &MI.getOperand(Src0Idx); 3146 if (!Src0->isReg() && !Src0->isImm()) 3147 return nullptr; 3148 3149 if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0)) 3150 return nullptr; 3151 3152 break; 3153 } 3154 } 3155 3156 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 3157 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0); 3158 const MachineOperand *Src0Mods = 3159 getNamedOperand(MI, AMDGPU::OpName::src0_modifiers); 3160 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3161 const MachineOperand *Src1Mods = 3162 getNamedOperand(MI, AMDGPU::OpName::src1_modifiers); 3163 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3164 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 3165 const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod); 3166 MachineInstrBuilder MIB; 3167 3168 if (!Src0Mods && !Src1Mods && !Clamp && !Omod && !IsF64 && 3169 // If we have an SGPR input, we will violate the constant bus restriction. 3170 (ST.getConstantBusLimit(Opc) > 1 || !Src0->isReg() || 3171 !RI.isSGPRReg(MBB->getParent()->getRegInfo(), Src0->getReg()))) { 3172 int64_t Imm; 3173 if (getFoldableImm(Src2, Imm)) { 3174 unsigned NewOpc = 3175 IsFMA ? (IsF16 ? AMDGPU::V_FMAAK_F16 : AMDGPU::V_FMAAK_F32) 3176 : (IsF16 ? AMDGPU::V_MADAK_F16 : AMDGPU::V_MADAK_F32); 3177 if (pseudoToMCOpcode(NewOpc) != -1) { 3178 MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 3179 .add(*Dst) 3180 .add(*Src0) 3181 .add(*Src1) 3182 .addImm(Imm); 3183 updateLiveVariables(LV, MI, *MIB); 3184 return MIB; 3185 } 3186 } 3187 unsigned NewOpc = IsFMA 3188 ? (IsF16 ? AMDGPU::V_FMAMK_F16 : AMDGPU::V_FMAMK_F32) 3189 : (IsF16 ? AMDGPU::V_MADMK_F16 : AMDGPU::V_MADMK_F32); 3190 if (getFoldableImm(Src1, Imm)) { 3191 if (pseudoToMCOpcode(NewOpc) != -1) { 3192 MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 3193 .add(*Dst) 3194 .add(*Src0) 3195 .addImm(Imm) 3196 .add(*Src2); 3197 updateLiveVariables(LV, MI, *MIB); 3198 return MIB; 3199 } 3200 } 3201 if (getFoldableImm(Src0, Imm)) { 3202 if (pseudoToMCOpcode(NewOpc) != -1 && 3203 isOperandLegal( 3204 MI, AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::src0), 3205 Src1)) { 3206 MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 3207 .add(*Dst) 3208 .add(*Src1) 3209 .addImm(Imm) 3210 .add(*Src2); 3211 updateLiveVariables(LV, MI, *MIB); 3212 return MIB; 3213 } 3214 } 3215 } 3216 3217 unsigned NewOpc = IsFMA ? (IsF16 ? AMDGPU::V_FMA_F16_e64 3218 : IsF64 ? AMDGPU::V_FMA_F64_e64 3219 : AMDGPU::V_FMA_F32_e64) 3220 : (IsF16 ? AMDGPU::V_MAD_F16_e64 : AMDGPU::V_MAD_F32_e64); 3221 if (pseudoToMCOpcode(NewOpc) == -1) 3222 return nullptr; 3223 3224 MIB = BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpc)) 3225 .add(*Dst) 3226 .addImm(Src0Mods ? Src0Mods->getImm() : 0) 3227 .add(*Src0) 3228 .addImm(Src1Mods ? Src1Mods->getImm() : 0) 3229 .add(*Src1) 3230 .addImm(0) // Src mods 3231 .add(*Src2) 3232 .addImm(Clamp ? Clamp->getImm() : 0) 3233 .addImm(Omod ? Omod->getImm() : 0); 3234 updateLiveVariables(LV, MI, *MIB); 3235 return MIB; 3236 } 3237 3238 // It's not generally safe to move VALU instructions across these since it will 3239 // start using the register as a base index rather than directly. 3240 // XXX - Why isn't hasSideEffects sufficient for these? 3241 static bool changesVGPRIndexingMode(const MachineInstr &MI) { 3242 switch (MI.getOpcode()) { 3243 case AMDGPU::S_SET_GPR_IDX_ON: 3244 case AMDGPU::S_SET_GPR_IDX_MODE: 3245 case AMDGPU::S_SET_GPR_IDX_OFF: 3246 return true; 3247 default: 3248 return false; 3249 } 3250 } 3251 3252 bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 3253 const MachineBasicBlock *MBB, 3254 const MachineFunction &MF) const { 3255 // Skipping the check for SP writes in the base implementation. The reason it 3256 // was added was apparently due to compile time concerns. 3257 // 3258 // TODO: Do we really want this barrier? It triggers unnecessary hazard nops 3259 // but is probably avoidable. 3260 3261 // Copied from base implementation. 3262 // Terminators and labels can't be scheduled around. 3263 if (MI.isTerminator() || MI.isPosition()) 3264 return true; 3265 3266 // INLINEASM_BR can jump to another block 3267 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR) 3268 return true; 3269 3270 // Target-independent instructions do not have an implicit-use of EXEC, even 3271 // when they operate on VGPRs. Treating EXEC modifications as scheduling 3272 // boundaries prevents incorrect movements of such instructions. 3273 return MI.modifiesRegister(AMDGPU::EXEC, &RI) || 3274 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 || 3275 MI.getOpcode() == AMDGPU::S_SETREG_B32 || 3276 changesVGPRIndexingMode(MI); 3277 } 3278 3279 bool SIInstrInfo::isAlwaysGDS(uint16_t Opcode) const { 3280 return Opcode == AMDGPU::DS_ORDERED_COUNT || 3281 Opcode == AMDGPU::DS_GWS_INIT || 3282 Opcode == AMDGPU::DS_GWS_SEMA_V || 3283 Opcode == AMDGPU::DS_GWS_SEMA_BR || 3284 Opcode == AMDGPU::DS_GWS_SEMA_P || 3285 Opcode == AMDGPU::DS_GWS_SEMA_RELEASE_ALL || 3286 Opcode == AMDGPU::DS_GWS_BARRIER; 3287 } 3288 3289 bool SIInstrInfo::modifiesModeRegister(const MachineInstr &MI) { 3290 // Skip the full operand and register alias search modifiesRegister 3291 // does. There's only a handful of instructions that touch this, it's only an 3292 // implicit def, and doesn't alias any other registers. 3293 if (const MCPhysReg *ImpDef = MI.getDesc().getImplicitDefs()) { 3294 for (; ImpDef && *ImpDef; ++ImpDef) { 3295 if (*ImpDef == AMDGPU::MODE) 3296 return true; 3297 } 3298 } 3299 3300 return false; 3301 } 3302 3303 bool SIInstrInfo::hasUnwantedEffectsWhenEXECEmpty(const MachineInstr &MI) const { 3304 unsigned Opcode = MI.getOpcode(); 3305 3306 if (MI.mayStore() && isSMRD(MI)) 3307 return true; // scalar store or atomic 3308 3309 // This will terminate the function when other lanes may need to continue. 3310 if (MI.isReturn()) 3311 return true; 3312 3313 // These instructions cause shader I/O that may cause hardware lockups 3314 // when executed with an empty EXEC mask. 3315 // 3316 // Note: exp with VM = DONE = 0 is automatically skipped by hardware when 3317 // EXEC = 0, but checking for that case here seems not worth it 3318 // given the typical code patterns. 3319 if (Opcode == AMDGPU::S_SENDMSG || Opcode == AMDGPU::S_SENDMSGHALT || 3320 isEXP(Opcode) || 3321 Opcode == AMDGPU::DS_ORDERED_COUNT || Opcode == AMDGPU::S_TRAP || 3322 Opcode == AMDGPU::DS_GWS_INIT || Opcode == AMDGPU::DS_GWS_BARRIER) 3323 return true; 3324 3325 if (MI.isCall() || MI.isInlineAsm()) 3326 return true; // conservative assumption 3327 3328 // A mode change is a scalar operation that influences vector instructions. 3329 if (modifiesModeRegister(MI)) 3330 return true; 3331 3332 // These are like SALU instructions in terms of effects, so it's questionable 3333 // whether we should return true for those. 3334 // 3335 // However, executing them with EXEC = 0 causes them to operate on undefined 3336 // data, which we avoid by returning true here. 3337 if (Opcode == AMDGPU::V_READFIRSTLANE_B32 || 3338 Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32) 3339 return true; 3340 3341 return false; 3342 } 3343 3344 bool SIInstrInfo::mayReadEXEC(const MachineRegisterInfo &MRI, 3345 const MachineInstr &MI) const { 3346 if (MI.isMetaInstruction()) 3347 return false; 3348 3349 // This won't read exec if this is an SGPR->SGPR copy. 3350 if (MI.isCopyLike()) { 3351 if (!RI.isSGPRReg(MRI, MI.getOperand(0).getReg())) 3352 return true; 3353 3354 // Make sure this isn't copying exec as a normal operand 3355 return MI.readsRegister(AMDGPU::EXEC, &RI); 3356 } 3357 3358 // Make a conservative assumption about the callee. 3359 if (MI.isCall()) 3360 return true; 3361 3362 // Be conservative with any unhandled generic opcodes. 3363 if (!isTargetSpecificOpcode(MI.getOpcode())) 3364 return true; 3365 3366 return !isSALU(MI) || MI.readsRegister(AMDGPU::EXEC, &RI); 3367 } 3368 3369 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const { 3370 switch (Imm.getBitWidth()) { 3371 case 1: // This likely will be a condition code mask. 3372 return true; 3373 3374 case 32: 3375 return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(), 3376 ST.hasInv2PiInlineImm()); 3377 case 64: 3378 return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(), 3379 ST.hasInv2PiInlineImm()); 3380 case 16: 3381 return ST.has16BitInsts() && 3382 AMDGPU::isInlinableLiteral16(Imm.getSExtValue(), 3383 ST.hasInv2PiInlineImm()); 3384 default: 3385 llvm_unreachable("invalid bitwidth"); 3386 } 3387 } 3388 3389 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO, 3390 uint8_t OperandType) const { 3391 if (!MO.isImm() || 3392 OperandType < AMDGPU::OPERAND_SRC_FIRST || 3393 OperandType > AMDGPU::OPERAND_SRC_LAST) 3394 return false; 3395 3396 // MachineOperand provides no way to tell the true operand size, since it only 3397 // records a 64-bit value. We need to know the size to determine if a 32-bit 3398 // floating point immediate bit pattern is legal for an integer immediate. It 3399 // would be for any 32-bit integer operand, but would not be for a 64-bit one. 3400 3401 int64_t Imm = MO.getImm(); 3402 switch (OperandType) { 3403 case AMDGPU::OPERAND_REG_IMM_INT32: 3404 case AMDGPU::OPERAND_REG_IMM_FP32: 3405 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 3406 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 3407 case AMDGPU::OPERAND_REG_IMM_V2FP32: 3408 case AMDGPU::OPERAND_REG_INLINE_C_V2FP32: 3409 case AMDGPU::OPERAND_REG_IMM_V2INT32: 3410 case AMDGPU::OPERAND_REG_INLINE_C_V2INT32: 3411 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 3412 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: { 3413 int32_t Trunc = static_cast<int32_t>(Imm); 3414 return AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm()); 3415 } 3416 case AMDGPU::OPERAND_REG_IMM_INT64: 3417 case AMDGPU::OPERAND_REG_IMM_FP64: 3418 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 3419 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 3420 case AMDGPU::OPERAND_REG_INLINE_AC_FP64: 3421 return AMDGPU::isInlinableLiteral64(MO.getImm(), 3422 ST.hasInv2PiInlineImm()); 3423 case AMDGPU::OPERAND_REG_IMM_INT16: 3424 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 3425 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 3426 // We would expect inline immediates to not be concerned with an integer/fp 3427 // distinction. However, in the case of 16-bit integer operations, the 3428 // "floating point" values appear to not work. It seems read the low 16-bits 3429 // of 32-bit immediates, which happens to always work for the integer 3430 // values. 3431 // 3432 // See llvm bugzilla 46302. 3433 // 3434 // TODO: Theoretically we could use op-sel to use the high bits of the 3435 // 32-bit FP values. 3436 return AMDGPU::isInlinableIntLiteral(Imm); 3437 case AMDGPU::OPERAND_REG_IMM_V2INT16: 3438 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 3439 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16: 3440 // This suffers the same problem as the scalar 16-bit cases. 3441 return AMDGPU::isInlinableIntLiteralV216(Imm); 3442 case AMDGPU::OPERAND_REG_IMM_FP16: 3443 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 3444 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: { 3445 if (isInt<16>(Imm) || isUInt<16>(Imm)) { 3446 // A few special case instructions have 16-bit operands on subtargets 3447 // where 16-bit instructions are not legal. 3448 // TODO: Do the 32-bit immediates work? We shouldn't really need to handle 3449 // constants in these cases 3450 int16_t Trunc = static_cast<int16_t>(Imm); 3451 return ST.has16BitInsts() && 3452 AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm()); 3453 } 3454 3455 return false; 3456 } 3457 case AMDGPU::OPERAND_REG_IMM_V2FP16: 3458 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 3459 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: { 3460 uint32_t Trunc = static_cast<uint32_t>(Imm); 3461 return AMDGPU::isInlinableLiteralV216(Trunc, ST.hasInv2PiInlineImm()); 3462 } 3463 default: 3464 llvm_unreachable("invalid bitwidth"); 3465 } 3466 } 3467 3468 bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO, 3469 const MCOperandInfo &OpInfo) const { 3470 switch (MO.getType()) { 3471 case MachineOperand::MO_Register: 3472 return false; 3473 case MachineOperand::MO_Immediate: 3474 return !isInlineConstant(MO, OpInfo); 3475 case MachineOperand::MO_FrameIndex: 3476 case MachineOperand::MO_MachineBasicBlock: 3477 case MachineOperand::MO_ExternalSymbol: 3478 case MachineOperand::MO_GlobalAddress: 3479 case MachineOperand::MO_MCSymbol: 3480 return true; 3481 default: 3482 llvm_unreachable("unexpected operand type"); 3483 } 3484 } 3485 3486 static bool compareMachineOp(const MachineOperand &Op0, 3487 const MachineOperand &Op1) { 3488 if (Op0.getType() != Op1.getType()) 3489 return false; 3490 3491 switch (Op0.getType()) { 3492 case MachineOperand::MO_Register: 3493 return Op0.getReg() == Op1.getReg(); 3494 case MachineOperand::MO_Immediate: 3495 return Op0.getImm() == Op1.getImm(); 3496 default: 3497 llvm_unreachable("Didn't expect to be comparing these operand types"); 3498 } 3499 } 3500 3501 bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo, 3502 const MachineOperand &MO) const { 3503 const MCInstrDesc &InstDesc = MI.getDesc(); 3504 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo]; 3505 3506 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()); 3507 3508 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE) 3509 return true; 3510 3511 if (OpInfo.RegClass < 0) 3512 return false; 3513 3514 if (MO.isImm() && isInlineConstant(MO, OpInfo)) { 3515 if (isMAI(MI) && ST.hasMFMAInlineLiteralBug() && 3516 OpNo ==(unsigned)AMDGPU::getNamedOperandIdx(MI.getOpcode(), 3517 AMDGPU::OpName::src2)) 3518 return false; 3519 return RI.opCanUseInlineConstant(OpInfo.OperandType); 3520 } 3521 3522 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType)) 3523 return false; 3524 3525 if (!isVOP3(MI) || !AMDGPU::isSISrcOperand(InstDesc, OpNo)) 3526 return true; 3527 3528 return ST.hasVOP3Literal(); 3529 } 3530 3531 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const { 3532 // GFX90A does not have V_MUL_LEGACY_F32_e32. 3533 if (Opcode == AMDGPU::V_MUL_LEGACY_F32_e64 && ST.hasGFX90AInsts()) 3534 return false; 3535 3536 int Op32 = AMDGPU::getVOPe32(Opcode); 3537 if (Op32 == -1) 3538 return false; 3539 3540 return pseudoToMCOpcode(Op32) != -1; 3541 } 3542 3543 bool SIInstrInfo::hasModifiers(unsigned Opcode) const { 3544 // The src0_modifier operand is present on all instructions 3545 // that have modifiers. 3546 3547 return AMDGPU::getNamedOperandIdx(Opcode, 3548 AMDGPU::OpName::src0_modifiers) != -1; 3549 } 3550 3551 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI, 3552 unsigned OpName) const { 3553 const MachineOperand *Mods = getNamedOperand(MI, OpName); 3554 return Mods && Mods->getImm(); 3555 } 3556 3557 bool SIInstrInfo::hasAnyModifiersSet(const MachineInstr &MI) const { 3558 return hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || 3559 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || 3560 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers) || 3561 hasModifiersSet(MI, AMDGPU::OpName::clamp) || 3562 hasModifiersSet(MI, AMDGPU::OpName::omod); 3563 } 3564 3565 bool SIInstrInfo::canShrink(const MachineInstr &MI, 3566 const MachineRegisterInfo &MRI) const { 3567 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3568 // Can't shrink instruction with three operands. 3569 // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add 3570 // a special case for it. It can only be shrunk if the third operand 3571 // is vcc, and src0_modifiers and src1_modifiers are not set. 3572 // We should handle this the same way we handle vopc, by addding 3573 // a register allocation hint pre-regalloc and then do the shrinking 3574 // post-regalloc. 3575 if (Src2) { 3576 switch (MI.getOpcode()) { 3577 default: return false; 3578 3579 case AMDGPU::V_ADDC_U32_e64: 3580 case AMDGPU::V_SUBB_U32_e64: 3581 case AMDGPU::V_SUBBREV_U32_e64: { 3582 const MachineOperand *Src1 3583 = getNamedOperand(MI, AMDGPU::OpName::src1); 3584 if (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg())) 3585 return false; 3586 // Additional verification is needed for sdst/src2. 3587 return true; 3588 } 3589 case AMDGPU::V_MAC_F32_e64: 3590 case AMDGPU::V_MAC_F16_e64: 3591 case AMDGPU::V_FMAC_F32_e64: 3592 case AMDGPU::V_FMAC_F16_e64: 3593 case AMDGPU::V_FMAC_F64_e64: 3594 if (!Src2->isReg() || !RI.isVGPR(MRI, Src2->getReg()) || 3595 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers)) 3596 return false; 3597 break; 3598 3599 case AMDGPU::V_CNDMASK_B32_e64: 3600 break; 3601 } 3602 } 3603 3604 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3605 if (Src1 && (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()) || 3606 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers))) 3607 return false; 3608 3609 // We don't need to check src0, all input types are legal, so just make sure 3610 // src0 isn't using any modifiers. 3611 if (hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers)) 3612 return false; 3613 3614 // Can it be shrunk to a valid 32 bit opcode? 3615 if (!hasVALU32BitEncoding(MI.getOpcode())) 3616 return false; 3617 3618 // Check output modifiers 3619 return !hasModifiersSet(MI, AMDGPU::OpName::omod) && 3620 !hasModifiersSet(MI, AMDGPU::OpName::clamp); 3621 } 3622 3623 // Set VCC operand with all flags from \p Orig, except for setting it as 3624 // implicit. 3625 static void copyFlagsToImplicitVCC(MachineInstr &MI, 3626 const MachineOperand &Orig) { 3627 3628 for (MachineOperand &Use : MI.implicit_operands()) { 3629 if (Use.isUse() && 3630 (Use.getReg() == AMDGPU::VCC || Use.getReg() == AMDGPU::VCC_LO)) { 3631 Use.setIsUndef(Orig.isUndef()); 3632 Use.setIsKill(Orig.isKill()); 3633 return; 3634 } 3635 } 3636 } 3637 3638 MachineInstr *SIInstrInfo::buildShrunkInst(MachineInstr &MI, 3639 unsigned Op32) const { 3640 MachineBasicBlock *MBB = MI.getParent();; 3641 MachineInstrBuilder Inst32 = 3642 BuildMI(*MBB, MI, MI.getDebugLoc(), get(Op32)) 3643 .setMIFlags(MI.getFlags()); 3644 3645 // Add the dst operand if the 32-bit encoding also has an explicit $vdst. 3646 // For VOPC instructions, this is replaced by an implicit def of vcc. 3647 int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst); 3648 if (Op32DstIdx != -1) { 3649 // dst 3650 Inst32.add(MI.getOperand(0)); 3651 } else { 3652 assert(((MI.getOperand(0).getReg() == AMDGPU::VCC) || 3653 (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) && 3654 "Unexpected case"); 3655 } 3656 3657 Inst32.add(*getNamedOperand(MI, AMDGPU::OpName::src0)); 3658 3659 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3660 if (Src1) 3661 Inst32.add(*Src1); 3662 3663 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3664 3665 if (Src2) { 3666 int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2); 3667 if (Op32Src2Idx != -1) { 3668 Inst32.add(*Src2); 3669 } else { 3670 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is 3671 // replaced with an implicit read of vcc or vcc_lo. The implicit read 3672 // of vcc was already added during the initial BuildMI, but we 3673 // 1) may need to change vcc to vcc_lo to preserve the original register 3674 // 2) have to preserve the original flags. 3675 fixImplicitOperands(*Inst32); 3676 copyFlagsToImplicitVCC(*Inst32, *Src2); 3677 } 3678 } 3679 3680 return Inst32; 3681 } 3682 3683 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI, 3684 const MachineOperand &MO, 3685 const MCOperandInfo &OpInfo) const { 3686 // Literal constants use the constant bus. 3687 //if (isLiteralConstantLike(MO, OpInfo)) 3688 // return true; 3689 if (MO.isImm()) 3690 return !isInlineConstant(MO, OpInfo); 3691 3692 if (!MO.isReg()) 3693 return true; // Misc other operands like FrameIndex 3694 3695 if (!MO.isUse()) 3696 return false; 3697 3698 if (MO.getReg().isVirtual()) 3699 return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); 3700 3701 // Null is free 3702 if (MO.getReg() == AMDGPU::SGPR_NULL) 3703 return false; 3704 3705 // SGPRs use the constant bus 3706 if (MO.isImplicit()) { 3707 return MO.getReg() == AMDGPU::M0 || 3708 MO.getReg() == AMDGPU::VCC || 3709 MO.getReg() == AMDGPU::VCC_LO; 3710 } else { 3711 return AMDGPU::SReg_32RegClass.contains(MO.getReg()) || 3712 AMDGPU::SReg_64RegClass.contains(MO.getReg()); 3713 } 3714 } 3715 3716 static Register findImplicitSGPRRead(const MachineInstr &MI) { 3717 for (const MachineOperand &MO : MI.implicit_operands()) { 3718 // We only care about reads. 3719 if (MO.isDef()) 3720 continue; 3721 3722 switch (MO.getReg()) { 3723 case AMDGPU::VCC: 3724 case AMDGPU::VCC_LO: 3725 case AMDGPU::VCC_HI: 3726 case AMDGPU::M0: 3727 case AMDGPU::FLAT_SCR: 3728 return MO.getReg(); 3729 3730 default: 3731 break; 3732 } 3733 } 3734 3735 return AMDGPU::NoRegister; 3736 } 3737 3738 static bool shouldReadExec(const MachineInstr &MI) { 3739 if (SIInstrInfo::isVALU(MI)) { 3740 switch (MI.getOpcode()) { 3741 case AMDGPU::V_READLANE_B32: 3742 case AMDGPU::V_WRITELANE_B32: 3743 return false; 3744 } 3745 3746 return true; 3747 } 3748 3749 if (MI.isPreISelOpcode() || 3750 SIInstrInfo::isGenericOpcode(MI.getOpcode()) || 3751 SIInstrInfo::isSALU(MI) || 3752 SIInstrInfo::isSMRD(MI)) 3753 return false; 3754 3755 return true; 3756 } 3757 3758 static bool isSubRegOf(const SIRegisterInfo &TRI, 3759 const MachineOperand &SuperVec, 3760 const MachineOperand &SubReg) { 3761 if (SubReg.getReg().isPhysical()) 3762 return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg()); 3763 3764 return SubReg.getSubReg() != AMDGPU::NoSubRegister && 3765 SubReg.getReg() == SuperVec.getReg(); 3766 } 3767 3768 bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, 3769 StringRef &ErrInfo) const { 3770 uint16_t Opcode = MI.getOpcode(); 3771 if (SIInstrInfo::isGenericOpcode(MI.getOpcode())) 3772 return true; 3773 3774 const MachineFunction *MF = MI.getParent()->getParent(); 3775 const MachineRegisterInfo &MRI = MF->getRegInfo(); 3776 3777 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); 3778 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1); 3779 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2); 3780 3781 // Make sure the number of operands is correct. 3782 const MCInstrDesc &Desc = get(Opcode); 3783 if (!Desc.isVariadic() && 3784 Desc.getNumOperands() != MI.getNumExplicitOperands()) { 3785 ErrInfo = "Instruction has wrong number of operands."; 3786 return false; 3787 } 3788 3789 if (MI.isInlineAsm()) { 3790 // Verify register classes for inlineasm constraints. 3791 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands(); 3792 I != E; ++I) { 3793 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI); 3794 if (!RC) 3795 continue; 3796 3797 const MachineOperand &Op = MI.getOperand(I); 3798 if (!Op.isReg()) 3799 continue; 3800 3801 Register Reg = Op.getReg(); 3802 if (!Reg.isVirtual() && !RC->contains(Reg)) { 3803 ErrInfo = "inlineasm operand has incorrect register class."; 3804 return false; 3805 } 3806 } 3807 3808 return true; 3809 } 3810 3811 if (isMIMG(MI) && MI.memoperands_empty() && MI.mayLoadOrStore()) { 3812 ErrInfo = "missing memory operand from MIMG instruction."; 3813 return false; 3814 } 3815 3816 // Make sure the register classes are correct. 3817 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) { 3818 const MachineOperand &MO = MI.getOperand(i); 3819 if (MO.isFPImm()) { 3820 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast " 3821 "all fp values to integers."; 3822 return false; 3823 } 3824 3825 int RegClass = Desc.OpInfo[i].RegClass; 3826 3827 switch (Desc.OpInfo[i].OperandType) { 3828 case MCOI::OPERAND_REGISTER: 3829 if (MI.getOperand(i).isImm() || MI.getOperand(i).isGlobal()) { 3830 ErrInfo = "Illegal immediate value for operand."; 3831 return false; 3832 } 3833 break; 3834 case AMDGPU::OPERAND_REG_IMM_INT32: 3835 case AMDGPU::OPERAND_REG_IMM_FP32: 3836 break; 3837 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 3838 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 3839 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 3840 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 3841 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 3842 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 3843 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 3844 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: 3845 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 3846 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: 3847 case AMDGPU::OPERAND_REG_INLINE_AC_FP64: { 3848 if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) { 3849 ErrInfo = "Illegal immediate value for operand."; 3850 return false; 3851 } 3852 break; 3853 } 3854 case MCOI::OPERAND_IMMEDIATE: 3855 case AMDGPU::OPERAND_KIMM32: 3856 // Check if this operand is an immediate. 3857 // FrameIndex operands will be replaced by immediates, so they are 3858 // allowed. 3859 if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) { 3860 ErrInfo = "Expected immediate, but got non-immediate"; 3861 return false; 3862 } 3863 LLVM_FALLTHROUGH; 3864 default: 3865 continue; 3866 } 3867 3868 if (!MO.isReg()) 3869 continue; 3870 Register Reg = MO.getReg(); 3871 if (!Reg) 3872 continue; 3873 3874 // FIXME: Ideally we would have separate instruction definitions with the 3875 // aligned register constraint. 3876 // FIXME: We do not verify inline asm operands, but custom inline asm 3877 // verification is broken anyway 3878 if (ST.needsAlignedVGPRs()) { 3879 const TargetRegisterClass *RC = RI.getRegClassForReg(MRI, Reg); 3880 const bool IsVGPR = RI.hasVGPRs(RC); 3881 const bool IsAGPR = !IsVGPR && RI.hasAGPRs(RC); 3882 if ((IsVGPR || IsAGPR) && MO.getSubReg()) { 3883 const TargetRegisterClass *SubRC = 3884 RI.getSubRegClass(RC, MO.getSubReg()); 3885 RC = RI.getCompatibleSubRegClass(RC, SubRC, MO.getSubReg()); 3886 if (RC) 3887 RC = SubRC; 3888 } 3889 3890 // Check that this is the aligned version of the class. 3891 if (!RC || !RI.isProperlyAlignedRC(*RC)) { 3892 ErrInfo = "Subtarget requires even aligned vector registers"; 3893 return false; 3894 } 3895 } 3896 3897 if (RegClass != -1) { 3898 if (Reg.isVirtual()) 3899 continue; 3900 3901 const TargetRegisterClass *RC = RI.getRegClass(RegClass); 3902 if (!RC->contains(Reg)) { 3903 ErrInfo = "Operand has incorrect register class."; 3904 return false; 3905 } 3906 } 3907 } 3908 3909 // Verify SDWA 3910 if (isSDWA(MI)) { 3911 if (!ST.hasSDWA()) { 3912 ErrInfo = "SDWA is not supported on this target"; 3913 return false; 3914 } 3915 3916 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst); 3917 3918 const int OpIndicies[] = { DstIdx, Src0Idx, Src1Idx, Src2Idx }; 3919 3920 for (int OpIdx: OpIndicies) { 3921 if (OpIdx == -1) 3922 continue; 3923 const MachineOperand &MO = MI.getOperand(OpIdx); 3924 3925 if (!ST.hasSDWAScalar()) { 3926 // Only VGPRS on VI 3927 if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) { 3928 ErrInfo = "Only VGPRs allowed as operands in SDWA instructions on VI"; 3929 return false; 3930 } 3931 } else { 3932 // No immediates on GFX9 3933 if (!MO.isReg()) { 3934 ErrInfo = 3935 "Only reg allowed as operands in SDWA instructions on GFX9+"; 3936 return false; 3937 } 3938 } 3939 } 3940 3941 if (!ST.hasSDWAOmod()) { 3942 // No omod allowed on VI 3943 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 3944 if (OMod != nullptr && 3945 (!OMod->isImm() || OMod->getImm() != 0)) { 3946 ErrInfo = "OMod not allowed in SDWA instructions on VI"; 3947 return false; 3948 } 3949 } 3950 3951 uint16_t BasicOpcode = AMDGPU::getBasicFromSDWAOp(Opcode); 3952 if (isVOPC(BasicOpcode)) { 3953 if (!ST.hasSDWASdst() && DstIdx != -1) { 3954 // Only vcc allowed as dst on VI for VOPC 3955 const MachineOperand &Dst = MI.getOperand(DstIdx); 3956 if (!Dst.isReg() || Dst.getReg() != AMDGPU::VCC) { 3957 ErrInfo = "Only VCC allowed as dst in SDWA instructions on VI"; 3958 return false; 3959 } 3960 } else if (!ST.hasSDWAOutModsVOPC()) { 3961 // No clamp allowed on GFX9 for VOPC 3962 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 3963 if (Clamp && (!Clamp->isImm() || Clamp->getImm() != 0)) { 3964 ErrInfo = "Clamp not allowed in VOPC SDWA instructions on VI"; 3965 return false; 3966 } 3967 3968 // No omod allowed on GFX9 for VOPC 3969 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 3970 if (OMod && (!OMod->isImm() || OMod->getImm() != 0)) { 3971 ErrInfo = "OMod not allowed in VOPC SDWA instructions on VI"; 3972 return false; 3973 } 3974 } 3975 } 3976 3977 const MachineOperand *DstUnused = getNamedOperand(MI, AMDGPU::OpName::dst_unused); 3978 if (DstUnused && DstUnused->isImm() && 3979 DstUnused->getImm() == AMDGPU::SDWA::UNUSED_PRESERVE) { 3980 const MachineOperand &Dst = MI.getOperand(DstIdx); 3981 if (!Dst.isReg() || !Dst.isTied()) { 3982 ErrInfo = "Dst register should have tied register"; 3983 return false; 3984 } 3985 3986 const MachineOperand &TiedMO = 3987 MI.getOperand(MI.findTiedOperandIdx(DstIdx)); 3988 if (!TiedMO.isReg() || !TiedMO.isImplicit() || !TiedMO.isUse()) { 3989 ErrInfo = 3990 "Dst register should be tied to implicit use of preserved register"; 3991 return false; 3992 } else if (TiedMO.getReg().isPhysical() && 3993 Dst.getReg() != TiedMO.getReg()) { 3994 ErrInfo = "Dst register should use same physical register as preserved"; 3995 return false; 3996 } 3997 } 3998 } 3999 4000 // Verify MIMG 4001 if (isMIMG(MI.getOpcode()) && !MI.mayStore()) { 4002 // Ensure that the return type used is large enough for all the options 4003 // being used TFE/LWE require an extra result register. 4004 const MachineOperand *DMask = getNamedOperand(MI, AMDGPU::OpName::dmask); 4005 if (DMask) { 4006 uint64_t DMaskImm = DMask->getImm(); 4007 uint32_t RegCount = 4008 isGather4(MI.getOpcode()) ? 4 : countPopulation(DMaskImm); 4009 const MachineOperand *TFE = getNamedOperand(MI, AMDGPU::OpName::tfe); 4010 const MachineOperand *LWE = getNamedOperand(MI, AMDGPU::OpName::lwe); 4011 const MachineOperand *D16 = getNamedOperand(MI, AMDGPU::OpName::d16); 4012 4013 // Adjust for packed 16 bit values 4014 if (D16 && D16->getImm() && !ST.hasUnpackedD16VMem()) 4015 RegCount >>= 1; 4016 4017 // Adjust if using LWE or TFE 4018 if ((LWE && LWE->getImm()) || (TFE && TFE->getImm())) 4019 RegCount += 1; 4020 4021 const uint32_t DstIdx = 4022 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata); 4023 const MachineOperand &Dst = MI.getOperand(DstIdx); 4024 if (Dst.isReg()) { 4025 const TargetRegisterClass *DstRC = getOpRegClass(MI, DstIdx); 4026 uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32; 4027 if (RegCount > DstSize) { 4028 ErrInfo = "MIMG instruction returns too many registers for dst " 4029 "register class"; 4030 return false; 4031 } 4032 } 4033 } 4034 } 4035 4036 // Verify VOP*. Ignore multiple sgpr operands on writelane. 4037 if (Desc.getOpcode() != AMDGPU::V_WRITELANE_B32 4038 && (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI) || isSDWA(MI))) { 4039 // Only look at the true operands. Only a real operand can use the constant 4040 // bus, and we don't want to check pseudo-operands like the source modifier 4041 // flags. 4042 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx }; 4043 4044 unsigned ConstantBusCount = 0; 4045 bool UsesLiteral = false; 4046 const MachineOperand *LiteralVal = nullptr; 4047 4048 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) 4049 ++ConstantBusCount; 4050 4051 SmallVector<Register, 2> SGPRsUsed; 4052 Register SGPRUsed; 4053 4054 for (int OpIdx : OpIndices) { 4055 if (OpIdx == -1) 4056 break; 4057 const MachineOperand &MO = MI.getOperand(OpIdx); 4058 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 4059 if (MO.isReg()) { 4060 SGPRUsed = MO.getReg(); 4061 if (llvm::all_of(SGPRsUsed, [SGPRUsed](unsigned SGPR) { 4062 return SGPRUsed != SGPR; 4063 })) { 4064 ++ConstantBusCount; 4065 SGPRsUsed.push_back(SGPRUsed); 4066 } 4067 } else { 4068 if (!UsesLiteral) { 4069 ++ConstantBusCount; 4070 UsesLiteral = true; 4071 LiteralVal = &MO; 4072 } else if (!MO.isIdenticalTo(*LiteralVal)) { 4073 assert(isVOP3(MI)); 4074 ErrInfo = "VOP3 instruction uses more than one literal"; 4075 return false; 4076 } 4077 } 4078 } 4079 } 4080 4081 SGPRUsed = findImplicitSGPRRead(MI); 4082 if (SGPRUsed != AMDGPU::NoRegister) { 4083 // Implicit uses may safely overlap true overands 4084 if (llvm::all_of(SGPRsUsed, [this, SGPRUsed](unsigned SGPR) { 4085 return !RI.regsOverlap(SGPRUsed, SGPR); 4086 })) { 4087 ++ConstantBusCount; 4088 SGPRsUsed.push_back(SGPRUsed); 4089 } 4090 } 4091 4092 // v_writelane_b32 is an exception from constant bus restriction: 4093 // vsrc0 can be sgpr, const or m0 and lane select sgpr, m0 or inline-const 4094 if (ConstantBusCount > ST.getConstantBusLimit(Opcode) && 4095 Opcode != AMDGPU::V_WRITELANE_B32) { 4096 ErrInfo = "VOP* instruction violates constant bus restriction"; 4097 return false; 4098 } 4099 4100 if (isVOP3(MI) && UsesLiteral && !ST.hasVOP3Literal()) { 4101 ErrInfo = "VOP3 instruction uses literal"; 4102 return false; 4103 } 4104 } 4105 4106 // Special case for writelane - this can break the multiple constant bus rule, 4107 // but still can't use more than one SGPR register 4108 if (Desc.getOpcode() == AMDGPU::V_WRITELANE_B32) { 4109 unsigned SGPRCount = 0; 4110 Register SGPRUsed = AMDGPU::NoRegister; 4111 4112 for (int OpIdx : {Src0Idx, Src1Idx, Src2Idx}) { 4113 if (OpIdx == -1) 4114 break; 4115 4116 const MachineOperand &MO = MI.getOperand(OpIdx); 4117 4118 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 4119 if (MO.isReg() && MO.getReg() != AMDGPU::M0) { 4120 if (MO.getReg() != SGPRUsed) 4121 ++SGPRCount; 4122 SGPRUsed = MO.getReg(); 4123 } 4124 } 4125 if (SGPRCount > ST.getConstantBusLimit(Opcode)) { 4126 ErrInfo = "WRITELANE instruction violates constant bus restriction"; 4127 return false; 4128 } 4129 } 4130 } 4131 4132 // Verify misc. restrictions on specific instructions. 4133 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32_e64 || 4134 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64_e64) { 4135 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 4136 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 4137 const MachineOperand &Src2 = MI.getOperand(Src2Idx); 4138 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) { 4139 if (!compareMachineOp(Src0, Src1) && 4140 !compareMachineOp(Src0, Src2)) { 4141 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2"; 4142 return false; 4143 } 4144 } 4145 if ((getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm() & 4146 SISrcMods::ABS) || 4147 (getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm() & 4148 SISrcMods::ABS) || 4149 (getNamedOperand(MI, AMDGPU::OpName::src2_modifiers)->getImm() & 4150 SISrcMods::ABS)) { 4151 ErrInfo = "ABS not allowed in VOP3B instructions"; 4152 return false; 4153 } 4154 } 4155 4156 if (isSOP2(MI) || isSOPC(MI)) { 4157 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 4158 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 4159 unsigned Immediates = 0; 4160 4161 if (!Src0.isReg() && 4162 !isInlineConstant(Src0, Desc.OpInfo[Src0Idx].OperandType)) 4163 Immediates++; 4164 if (!Src1.isReg() && 4165 !isInlineConstant(Src1, Desc.OpInfo[Src1Idx].OperandType)) 4166 Immediates++; 4167 4168 if (Immediates > 1) { 4169 ErrInfo = "SOP2/SOPC instruction requires too many immediate constants"; 4170 return false; 4171 } 4172 } 4173 4174 if (isSOPK(MI)) { 4175 auto Op = getNamedOperand(MI, AMDGPU::OpName::simm16); 4176 if (Desc.isBranch()) { 4177 if (!Op->isMBB()) { 4178 ErrInfo = "invalid branch target for SOPK instruction"; 4179 return false; 4180 } 4181 } else { 4182 uint64_t Imm = Op->getImm(); 4183 if (sopkIsZext(MI)) { 4184 if (!isUInt<16>(Imm)) { 4185 ErrInfo = "invalid immediate for SOPK instruction"; 4186 return false; 4187 } 4188 } else { 4189 if (!isInt<16>(Imm)) { 4190 ErrInfo = "invalid immediate for SOPK instruction"; 4191 return false; 4192 } 4193 } 4194 } 4195 } 4196 4197 if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 || 4198 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 || 4199 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 4200 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) { 4201 const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 4202 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64; 4203 4204 const unsigned StaticNumOps = Desc.getNumOperands() + 4205 Desc.getNumImplicitUses(); 4206 const unsigned NumImplicitOps = IsDst ? 2 : 1; 4207 4208 // Allow additional implicit operands. This allows a fixup done by the post 4209 // RA scheduler where the main implicit operand is killed and implicit-defs 4210 // are added for sub-registers that remain live after this instruction. 4211 if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) { 4212 ErrInfo = "missing implicit register operands"; 4213 return false; 4214 } 4215 4216 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 4217 if (IsDst) { 4218 if (!Dst->isUse()) { 4219 ErrInfo = "v_movreld_b32 vdst should be a use operand"; 4220 return false; 4221 } 4222 4223 unsigned UseOpIdx; 4224 if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) || 4225 UseOpIdx != StaticNumOps + 1) { 4226 ErrInfo = "movrel implicit operands should be tied"; 4227 return false; 4228 } 4229 } 4230 4231 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 4232 const MachineOperand &ImpUse 4233 = MI.getOperand(StaticNumOps + NumImplicitOps - 1); 4234 if (!ImpUse.isReg() || !ImpUse.isUse() || 4235 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) { 4236 ErrInfo = "src0 should be subreg of implicit vector use"; 4237 return false; 4238 } 4239 } 4240 4241 // Make sure we aren't losing exec uses in the td files. This mostly requires 4242 // being careful when using let Uses to try to add other use registers. 4243 if (shouldReadExec(MI)) { 4244 if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) { 4245 ErrInfo = "VALU instruction does not implicitly read exec mask"; 4246 return false; 4247 } 4248 } 4249 4250 if (isSMRD(MI)) { 4251 if (MI.mayStore()) { 4252 // The register offset form of scalar stores may only use m0 as the 4253 // soffset register. 4254 const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff); 4255 if (Soff && Soff->getReg() != AMDGPU::M0) { 4256 ErrInfo = "scalar stores must use m0 as offset register"; 4257 return false; 4258 } 4259 } 4260 } 4261 4262 if (isFLAT(MI) && !ST.hasFlatInstOffsets()) { 4263 const MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 4264 if (Offset->getImm() != 0) { 4265 ErrInfo = "subtarget does not support offsets in flat instructions"; 4266 return false; 4267 } 4268 } 4269 4270 if (isMIMG(MI)) { 4271 const MachineOperand *DimOp = getNamedOperand(MI, AMDGPU::OpName::dim); 4272 if (DimOp) { 4273 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opcode, 4274 AMDGPU::OpName::vaddr0); 4275 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc); 4276 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opcode); 4277 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = 4278 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode); 4279 const AMDGPU::MIMGDimInfo *Dim = 4280 AMDGPU::getMIMGDimInfoByEncoding(DimOp->getImm()); 4281 4282 if (!Dim) { 4283 ErrInfo = "dim is out of range"; 4284 return false; 4285 } 4286 4287 bool IsA16 = false; 4288 if (ST.hasR128A16()) { 4289 const MachineOperand *R128A16 = getNamedOperand(MI, AMDGPU::OpName::r128); 4290 IsA16 = R128A16->getImm() != 0; 4291 } else if (ST.hasGFX10A16()) { 4292 const MachineOperand *A16 = getNamedOperand(MI, AMDGPU::OpName::a16); 4293 IsA16 = A16->getImm() != 0; 4294 } 4295 4296 bool IsNSA = SRsrcIdx - VAddr0Idx > 1; 4297 4298 unsigned AddrWords = 4299 AMDGPU::getAddrSizeMIMGOp(BaseOpcode, Dim, IsA16, ST.hasG16()); 4300 4301 unsigned VAddrWords; 4302 if (IsNSA) { 4303 VAddrWords = SRsrcIdx - VAddr0Idx; 4304 } else { 4305 const TargetRegisterClass *RC = getOpRegClass(MI, VAddr0Idx); 4306 VAddrWords = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC) / 32; 4307 if (AddrWords > 8) 4308 AddrWords = 16; 4309 } 4310 4311 if (VAddrWords != AddrWords) { 4312 LLVM_DEBUG(dbgs() << "bad vaddr size, expected " << AddrWords 4313 << " but got " << VAddrWords << "\n"); 4314 ErrInfo = "bad vaddr size"; 4315 return false; 4316 } 4317 } 4318 } 4319 4320 const MachineOperand *DppCt = getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl); 4321 if (DppCt) { 4322 using namespace AMDGPU::DPP; 4323 4324 unsigned DC = DppCt->getImm(); 4325 if (DC == DppCtrl::DPP_UNUSED1 || DC == DppCtrl::DPP_UNUSED2 || 4326 DC == DppCtrl::DPP_UNUSED3 || DC > DppCtrl::DPP_LAST || 4327 (DC >= DppCtrl::DPP_UNUSED4_FIRST && DC <= DppCtrl::DPP_UNUSED4_LAST) || 4328 (DC >= DppCtrl::DPP_UNUSED5_FIRST && DC <= DppCtrl::DPP_UNUSED5_LAST) || 4329 (DC >= DppCtrl::DPP_UNUSED6_FIRST && DC <= DppCtrl::DPP_UNUSED6_LAST) || 4330 (DC >= DppCtrl::DPP_UNUSED7_FIRST && DC <= DppCtrl::DPP_UNUSED7_LAST) || 4331 (DC >= DppCtrl::DPP_UNUSED8_FIRST && DC <= DppCtrl::DPP_UNUSED8_LAST)) { 4332 ErrInfo = "Invalid dpp_ctrl value"; 4333 return false; 4334 } 4335 if (DC >= DppCtrl::WAVE_SHL1 && DC <= DppCtrl::WAVE_ROR1 && 4336 ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 4337 ErrInfo = "Invalid dpp_ctrl value: " 4338 "wavefront shifts are not supported on GFX10+"; 4339 return false; 4340 } 4341 if (DC >= DppCtrl::BCAST15 && DC <= DppCtrl::BCAST31 && 4342 ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 4343 ErrInfo = "Invalid dpp_ctrl value: " 4344 "broadcasts are not supported on GFX10+"; 4345 return false; 4346 } 4347 if (DC >= DppCtrl::ROW_SHARE_FIRST && DC <= DppCtrl::ROW_XMASK_LAST && 4348 ST.getGeneration() < AMDGPUSubtarget::GFX10) { 4349 if (DC >= DppCtrl::ROW_NEWBCAST_FIRST && 4350 DC <= DppCtrl::ROW_NEWBCAST_LAST && 4351 !ST.hasGFX90AInsts()) { 4352 ErrInfo = "Invalid dpp_ctrl value: " 4353 "row_newbroadcast/row_share is not supported before " 4354 "GFX90A/GFX10"; 4355 return false; 4356 } else if (DC > DppCtrl::ROW_NEWBCAST_LAST || !ST.hasGFX90AInsts()) { 4357 ErrInfo = "Invalid dpp_ctrl value: " 4358 "row_share and row_xmask are not supported before GFX10"; 4359 return false; 4360 } 4361 } 4362 4363 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst); 4364 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); 4365 4366 if (Opcode != AMDGPU::V_MOV_B64_DPP_PSEUDO && 4367 ((DstIdx >= 0 && 4368 (Desc.OpInfo[DstIdx].RegClass == AMDGPU::VReg_64RegClassID || 4369 Desc.OpInfo[DstIdx].RegClass == AMDGPU::VReg_64_Align2RegClassID)) || 4370 ((Src0Idx >= 0 && 4371 (Desc.OpInfo[Src0Idx].RegClass == AMDGPU::VReg_64RegClassID || 4372 Desc.OpInfo[Src0Idx].RegClass == 4373 AMDGPU::VReg_64_Align2RegClassID)))) && 4374 !AMDGPU::isLegal64BitDPPControl(DC)) { 4375 ErrInfo = "Invalid dpp_ctrl value: " 4376 "64 bit dpp only support row_newbcast"; 4377 return false; 4378 } 4379 } 4380 4381 if ((MI.mayStore() || MI.mayLoad()) && !isVGPRSpill(MI)) { 4382 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 4383 uint16_t DataNameIdx = isDS(Opcode) ? AMDGPU::OpName::data0 4384 : AMDGPU::OpName::vdata; 4385 const MachineOperand *Data = getNamedOperand(MI, DataNameIdx); 4386 const MachineOperand *Data2 = getNamedOperand(MI, AMDGPU::OpName::data1); 4387 if (Data && !Data->isReg()) 4388 Data = nullptr; 4389 4390 if (ST.hasGFX90AInsts()) { 4391 if (Dst && Data && 4392 (RI.isAGPR(MRI, Dst->getReg()) != RI.isAGPR(MRI, Data->getReg()))) { 4393 ErrInfo = "Invalid register class: " 4394 "vdata and vdst should be both VGPR or AGPR"; 4395 return false; 4396 } 4397 if (Data && Data2 && 4398 (RI.isAGPR(MRI, Data->getReg()) != RI.isAGPR(MRI, Data2->getReg()))) { 4399 ErrInfo = "Invalid register class: " 4400 "both data operands should be VGPR or AGPR"; 4401 return false; 4402 } 4403 } else { 4404 if ((Dst && RI.isAGPR(MRI, Dst->getReg())) || 4405 (Data && RI.isAGPR(MRI, Data->getReg())) || 4406 (Data2 && RI.isAGPR(MRI, Data2->getReg()))) { 4407 ErrInfo = "Invalid register class: " 4408 "agpr loads and stores not supported on this GPU"; 4409 return false; 4410 } 4411 } 4412 } 4413 4414 if (ST.needsAlignedVGPRs() && 4415 (MI.getOpcode() == AMDGPU::DS_GWS_INIT || 4416 MI.getOpcode() == AMDGPU::DS_GWS_SEMA_BR || 4417 MI.getOpcode() == AMDGPU::DS_GWS_BARRIER)) { 4418 const MachineOperand *Op = getNamedOperand(MI, AMDGPU::OpName::data0); 4419 Register Reg = Op->getReg(); 4420 bool Aligned = true; 4421 if (Reg.isPhysical()) { 4422 Aligned = !(RI.getHWRegIndex(Reg) & 1); 4423 } else { 4424 const TargetRegisterClass &RC = *MRI.getRegClass(Reg); 4425 Aligned = RI.getRegSizeInBits(RC) > 32 && RI.isProperlyAlignedRC(RC) && 4426 !(RI.getChannelFromSubReg(Op->getSubReg()) & 1); 4427 } 4428 4429 if (!Aligned) { 4430 ErrInfo = "Subtarget requires even aligned vector registers " 4431 "for DS_GWS instructions"; 4432 return false; 4433 } 4434 } 4435 4436 return true; 4437 } 4438 4439 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) const { 4440 switch (MI.getOpcode()) { 4441 default: return AMDGPU::INSTRUCTION_LIST_END; 4442 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE; 4443 case AMDGPU::COPY: return AMDGPU::COPY; 4444 case AMDGPU::PHI: return AMDGPU::PHI; 4445 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG; 4446 case AMDGPU::WQM: return AMDGPU::WQM; 4447 case AMDGPU::SOFT_WQM: return AMDGPU::SOFT_WQM; 4448 case AMDGPU::STRICT_WWM: return AMDGPU::STRICT_WWM; 4449 case AMDGPU::STRICT_WQM: return AMDGPU::STRICT_WQM; 4450 case AMDGPU::S_MOV_B32: { 4451 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 4452 return MI.getOperand(1).isReg() || 4453 RI.isAGPR(MRI, MI.getOperand(0).getReg()) ? 4454 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32; 4455 } 4456 case AMDGPU::S_ADD_I32: 4457 return ST.hasAddNoCarry() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_CO_U32_e32; 4458 case AMDGPU::S_ADDC_U32: 4459 return AMDGPU::V_ADDC_U32_e32; 4460 case AMDGPU::S_SUB_I32: 4461 return ST.hasAddNoCarry() ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_CO_U32_e32; 4462 // FIXME: These are not consistently handled, and selected when the carry is 4463 // used. 4464 case AMDGPU::S_ADD_U32: 4465 return AMDGPU::V_ADD_CO_U32_e32; 4466 case AMDGPU::S_SUB_U32: 4467 return AMDGPU::V_SUB_CO_U32_e32; 4468 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32; 4469 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_U32_e64; 4470 case AMDGPU::S_MUL_HI_U32: return AMDGPU::V_MUL_HI_U32_e64; 4471 case AMDGPU::S_MUL_HI_I32: return AMDGPU::V_MUL_HI_I32_e64; 4472 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64; 4473 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64; 4474 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64; 4475 case AMDGPU::S_XNOR_B32: 4476 return ST.hasDLInsts() ? AMDGPU::V_XNOR_B32_e64 : AMDGPU::INSTRUCTION_LIST_END; 4477 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64; 4478 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64; 4479 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64; 4480 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64; 4481 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32; 4482 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64_e64; 4483 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32; 4484 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64_e64; 4485 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32; 4486 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64_e64; 4487 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32_e64; 4488 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32_e64; 4489 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32_e64; 4490 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32_e64; 4491 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64; 4492 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32; 4493 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32; 4494 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32; 4495 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e64; 4496 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e64; 4497 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e64; 4498 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e64; 4499 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e64; 4500 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e64; 4501 case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e64; 4502 case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e64; 4503 case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e64; 4504 case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e64; 4505 case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e64; 4506 case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e64; 4507 case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e64; 4508 case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e64; 4509 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64; 4510 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32; 4511 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32; 4512 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64; 4513 case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ; 4514 case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ; 4515 } 4516 llvm_unreachable( 4517 "Unexpected scalar opcode without corresponding vector one!"); 4518 } 4519 4520 static unsigned adjustAllocatableRegClass(const GCNSubtarget &ST, 4521 const MachineRegisterInfo &MRI, 4522 const MCInstrDesc &TID, 4523 unsigned RCID, 4524 bool IsAllocatable) { 4525 if ((IsAllocatable || !ST.hasGFX90AInsts() || !MRI.reservedRegsFrozen()) && 4526 (TID.mayLoad() || TID.mayStore() || 4527 (TID.TSFlags & (SIInstrFlags::DS | SIInstrFlags::MIMG)))) { 4528 switch (RCID) { 4529 case AMDGPU::AV_32RegClassID: return AMDGPU::VGPR_32RegClassID; 4530 case AMDGPU::AV_64RegClassID: return AMDGPU::VReg_64RegClassID; 4531 case AMDGPU::AV_96RegClassID: return AMDGPU::VReg_96RegClassID; 4532 case AMDGPU::AV_128RegClassID: return AMDGPU::VReg_128RegClassID; 4533 case AMDGPU::AV_160RegClassID: return AMDGPU::VReg_160RegClassID; 4534 default: 4535 break; 4536 } 4537 } 4538 return RCID; 4539 } 4540 4541 const TargetRegisterClass *SIInstrInfo::getRegClass(const MCInstrDesc &TID, 4542 unsigned OpNum, const TargetRegisterInfo *TRI, 4543 const MachineFunction &MF) 4544 const { 4545 if (OpNum >= TID.getNumOperands()) 4546 return nullptr; 4547 auto RegClass = TID.OpInfo[OpNum].RegClass; 4548 bool IsAllocatable = false; 4549 if (TID.TSFlags & (SIInstrFlags::DS | SIInstrFlags::FLAT)) { 4550 // vdst and vdata should be both VGPR or AGPR, same for the DS instructions 4551 // with two data operands. Request register class constainted to VGPR only 4552 // of both operands present as Machine Copy Propagation can not check this 4553 // constraint and possibly other passes too. 4554 // 4555 // The check is limited to FLAT and DS because atomics in non-flat encoding 4556 // have their vdst and vdata tied to be the same register. 4557 const int VDstIdx = AMDGPU::getNamedOperandIdx(TID.Opcode, 4558 AMDGPU::OpName::vdst); 4559 const int DataIdx = AMDGPU::getNamedOperandIdx(TID.Opcode, 4560 (TID.TSFlags & SIInstrFlags::DS) ? AMDGPU::OpName::data0 4561 : AMDGPU::OpName::vdata); 4562 if (DataIdx != -1) { 4563 IsAllocatable = VDstIdx != -1 || 4564 AMDGPU::getNamedOperandIdx(TID.Opcode, 4565 AMDGPU::OpName::data1) != -1; 4566 } 4567 } 4568 RegClass = adjustAllocatableRegClass(ST, MF.getRegInfo(), TID, RegClass, 4569 IsAllocatable); 4570 return RI.getRegClass(RegClass); 4571 } 4572 4573 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI, 4574 unsigned OpNo) const { 4575 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 4576 const MCInstrDesc &Desc = get(MI.getOpcode()); 4577 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() || 4578 Desc.OpInfo[OpNo].RegClass == -1) { 4579 Register Reg = MI.getOperand(OpNo).getReg(); 4580 4581 if (Reg.isVirtual()) 4582 return MRI.getRegClass(Reg); 4583 return RI.getPhysRegClass(Reg); 4584 } 4585 4586 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 4587 RCID = adjustAllocatableRegClass(ST, MRI, Desc, RCID, true); 4588 return RI.getRegClass(RCID); 4589 } 4590 4591 void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const { 4592 MachineBasicBlock::iterator I = MI; 4593 MachineBasicBlock *MBB = MI.getParent(); 4594 MachineOperand &MO = MI.getOperand(OpIdx); 4595 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 4596 unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass; 4597 const TargetRegisterClass *RC = RI.getRegClass(RCID); 4598 unsigned Size = RI.getRegSizeInBits(*RC); 4599 unsigned Opcode = (Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO : AMDGPU::V_MOV_B32_e32; 4600 if (MO.isReg()) 4601 Opcode = AMDGPU::COPY; 4602 else if (RI.isSGPRClass(RC)) 4603 Opcode = (Size == 64) ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; 4604 4605 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); 4606 const TargetRegisterClass *VRC64 = RI.getVGPR64Class(); 4607 if (RI.getCommonSubClass(VRC64, VRC)) 4608 VRC = VRC64; 4609 else 4610 VRC = &AMDGPU::VGPR_32RegClass; 4611 4612 Register Reg = MRI.createVirtualRegister(VRC); 4613 DebugLoc DL = MBB->findDebugLoc(I); 4614 BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO); 4615 MO.ChangeToRegister(Reg, false); 4616 } 4617 4618 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI, 4619 MachineRegisterInfo &MRI, 4620 MachineOperand &SuperReg, 4621 const TargetRegisterClass *SuperRC, 4622 unsigned SubIdx, 4623 const TargetRegisterClass *SubRC) 4624 const { 4625 MachineBasicBlock *MBB = MI->getParent(); 4626 DebugLoc DL = MI->getDebugLoc(); 4627 Register SubReg = MRI.createVirtualRegister(SubRC); 4628 4629 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) { 4630 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 4631 .addReg(SuperReg.getReg(), 0, SubIdx); 4632 return SubReg; 4633 } 4634 4635 // Just in case the super register is itself a sub-register, copy it to a new 4636 // value so we don't need to worry about merging its subreg index with the 4637 // SubIdx passed to this function. The register coalescer should be able to 4638 // eliminate this extra copy. 4639 Register NewSuperReg = MRI.createVirtualRegister(SuperRC); 4640 4641 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg) 4642 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg()); 4643 4644 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 4645 .addReg(NewSuperReg, 0, SubIdx); 4646 4647 return SubReg; 4648 } 4649 4650 MachineOperand SIInstrInfo::buildExtractSubRegOrImm( 4651 MachineBasicBlock::iterator MII, 4652 MachineRegisterInfo &MRI, 4653 MachineOperand &Op, 4654 const TargetRegisterClass *SuperRC, 4655 unsigned SubIdx, 4656 const TargetRegisterClass *SubRC) const { 4657 if (Op.isImm()) { 4658 if (SubIdx == AMDGPU::sub0) 4659 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm())); 4660 if (SubIdx == AMDGPU::sub1) 4661 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32)); 4662 4663 llvm_unreachable("Unhandled register index for immediate"); 4664 } 4665 4666 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC, 4667 SubIdx, SubRC); 4668 return MachineOperand::CreateReg(SubReg, false); 4669 } 4670 4671 // Change the order of operands from (0, 1, 2) to (0, 2, 1) 4672 void SIInstrInfo::swapOperands(MachineInstr &Inst) const { 4673 assert(Inst.getNumExplicitOperands() == 3); 4674 MachineOperand Op1 = Inst.getOperand(1); 4675 Inst.RemoveOperand(1); 4676 Inst.addOperand(Op1); 4677 } 4678 4679 bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI, 4680 const MCOperandInfo &OpInfo, 4681 const MachineOperand &MO) const { 4682 if (!MO.isReg()) 4683 return false; 4684 4685 Register Reg = MO.getReg(); 4686 4687 const TargetRegisterClass *DRC = RI.getRegClass(OpInfo.RegClass); 4688 if (Reg.isPhysical()) 4689 return DRC->contains(Reg); 4690 4691 const TargetRegisterClass *RC = MRI.getRegClass(Reg); 4692 4693 if (MO.getSubReg()) { 4694 const MachineFunction *MF = MO.getParent()->getParent()->getParent(); 4695 const TargetRegisterClass *SuperRC = RI.getLargestLegalSuperClass(RC, *MF); 4696 if (!SuperRC) 4697 return false; 4698 4699 DRC = RI.getMatchingSuperRegClass(SuperRC, DRC, MO.getSubReg()); 4700 if (!DRC) 4701 return false; 4702 } 4703 return RC->hasSuperClassEq(DRC); 4704 } 4705 4706 bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI, 4707 const MCOperandInfo &OpInfo, 4708 const MachineOperand &MO) const { 4709 if (MO.isReg()) 4710 return isLegalRegOperand(MRI, OpInfo, MO); 4711 4712 // Handle non-register types that are treated like immediates. 4713 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()); 4714 return true; 4715 } 4716 4717 bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx, 4718 const MachineOperand *MO) const { 4719 const MachineFunction &MF = *MI.getParent()->getParent(); 4720 const MachineRegisterInfo &MRI = MF.getRegInfo(); 4721 const MCInstrDesc &InstDesc = MI.getDesc(); 4722 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; 4723 const TargetRegisterClass *DefinedRC = 4724 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; 4725 if (!MO) 4726 MO = &MI.getOperand(OpIdx); 4727 4728 int ConstantBusLimit = ST.getConstantBusLimit(MI.getOpcode()); 4729 int VOP3LiteralLimit = ST.hasVOP3Literal() ? 1 : 0; 4730 if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) { 4731 if (isVOP3(MI) && isLiteralConstantLike(*MO, OpInfo) && !VOP3LiteralLimit--) 4732 return false; 4733 4734 SmallDenseSet<RegSubRegPair> SGPRsUsed; 4735 if (MO->isReg()) 4736 SGPRsUsed.insert(RegSubRegPair(MO->getReg(), MO->getSubReg())); 4737 4738 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 4739 if (i == OpIdx) 4740 continue; 4741 const MachineOperand &Op = MI.getOperand(i); 4742 if (Op.isReg()) { 4743 RegSubRegPair SGPR(Op.getReg(), Op.getSubReg()); 4744 if (!SGPRsUsed.count(SGPR) && 4745 usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) { 4746 if (--ConstantBusLimit <= 0) 4747 return false; 4748 SGPRsUsed.insert(SGPR); 4749 } 4750 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) { 4751 if (--ConstantBusLimit <= 0) 4752 return false; 4753 } else if (isVOP3(MI) && AMDGPU::isSISrcOperand(InstDesc, i) && 4754 isLiteralConstantLike(Op, InstDesc.OpInfo[i])) { 4755 if (!VOP3LiteralLimit--) 4756 return false; 4757 if (--ConstantBusLimit <= 0) 4758 return false; 4759 } 4760 } 4761 } 4762 4763 if (MO->isReg()) { 4764 assert(DefinedRC); 4765 if (!isLegalRegOperand(MRI, OpInfo, *MO)) 4766 return false; 4767 bool IsAGPR = RI.isAGPR(MRI, MO->getReg()); 4768 if (IsAGPR && !ST.hasMAIInsts()) 4769 return false; 4770 unsigned Opc = MI.getOpcode(); 4771 if (IsAGPR && 4772 (!ST.hasGFX90AInsts() || !MRI.reservedRegsFrozen()) && 4773 (MI.mayLoad() || MI.mayStore() || isDS(Opc) || isMIMG(Opc))) 4774 return false; 4775 // Atomics should have both vdst and vdata either vgpr or agpr. 4776 const int VDstIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 4777 const int DataIdx = AMDGPU::getNamedOperandIdx(Opc, 4778 isDS(Opc) ? AMDGPU::OpName::data0 : AMDGPU::OpName::vdata); 4779 if ((int)OpIdx == VDstIdx && DataIdx != -1 && 4780 MI.getOperand(DataIdx).isReg() && 4781 RI.isAGPR(MRI, MI.getOperand(DataIdx).getReg()) != IsAGPR) 4782 return false; 4783 if ((int)OpIdx == DataIdx) { 4784 if (VDstIdx != -1 && 4785 RI.isAGPR(MRI, MI.getOperand(VDstIdx).getReg()) != IsAGPR) 4786 return false; 4787 // DS instructions with 2 src operands also must have tied RC. 4788 const int Data1Idx = AMDGPU::getNamedOperandIdx(Opc, 4789 AMDGPU::OpName::data1); 4790 if (Data1Idx != -1 && MI.getOperand(Data1Idx).isReg() && 4791 RI.isAGPR(MRI, MI.getOperand(Data1Idx).getReg()) != IsAGPR) 4792 return false; 4793 } 4794 if (Opc == AMDGPU::V_ACCVGPR_WRITE_B32_e64 && 4795 (int)OpIdx == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) && 4796 RI.isSGPRReg(MRI, MO->getReg())) 4797 return false; 4798 return true; 4799 } 4800 4801 // Handle non-register types that are treated like immediates. 4802 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI() || MO->isGlobal()); 4803 4804 if (!DefinedRC) { 4805 // This operand expects an immediate. 4806 return true; 4807 } 4808 4809 return isImmOperandLegal(MI, OpIdx, *MO); 4810 } 4811 4812 void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI, 4813 MachineInstr &MI) const { 4814 unsigned Opc = MI.getOpcode(); 4815 const MCInstrDesc &InstrDesc = get(Opc); 4816 4817 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 4818 MachineOperand &Src0 = MI.getOperand(Src0Idx); 4819 4820 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 4821 MachineOperand &Src1 = MI.getOperand(Src1Idx); 4822 4823 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32 4824 // we need to only have one constant bus use before GFX10. 4825 bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister; 4826 if (HasImplicitSGPR && ST.getConstantBusLimit(Opc) <= 1 && 4827 Src0.isReg() && (RI.isSGPRReg(MRI, Src0.getReg()) || 4828 isLiteralConstantLike(Src0, InstrDesc.OpInfo[Src0Idx]))) 4829 legalizeOpWithMove(MI, Src0Idx); 4830 4831 // Special case: V_WRITELANE_B32 accepts only immediate or SGPR operands for 4832 // both the value to write (src0) and lane select (src1). Fix up non-SGPR 4833 // src0/src1 with V_READFIRSTLANE. 4834 if (Opc == AMDGPU::V_WRITELANE_B32) { 4835 const DebugLoc &DL = MI.getDebugLoc(); 4836 if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) { 4837 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4838 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4839 .add(Src0); 4840 Src0.ChangeToRegister(Reg, false); 4841 } 4842 if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) { 4843 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4844 const DebugLoc &DL = MI.getDebugLoc(); 4845 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4846 .add(Src1); 4847 Src1.ChangeToRegister(Reg, false); 4848 } 4849 return; 4850 } 4851 4852 // No VOP2 instructions support AGPRs. 4853 if (Src0.isReg() && RI.isAGPR(MRI, Src0.getReg())) 4854 legalizeOpWithMove(MI, Src0Idx); 4855 4856 if (Src1.isReg() && RI.isAGPR(MRI, Src1.getReg())) 4857 legalizeOpWithMove(MI, Src1Idx); 4858 4859 // VOP2 src0 instructions support all operand types, so we don't need to check 4860 // their legality. If src1 is already legal, we don't need to do anything. 4861 if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1)) 4862 return; 4863 4864 // Special case: V_READLANE_B32 accepts only immediate or SGPR operands for 4865 // lane select. Fix up using V_READFIRSTLANE, since we assume that the lane 4866 // select is uniform. 4867 if (Opc == AMDGPU::V_READLANE_B32 && Src1.isReg() && 4868 RI.isVGPR(MRI, Src1.getReg())) { 4869 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4870 const DebugLoc &DL = MI.getDebugLoc(); 4871 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4872 .add(Src1); 4873 Src1.ChangeToRegister(Reg, false); 4874 return; 4875 } 4876 4877 // We do not use commuteInstruction here because it is too aggressive and will 4878 // commute if it is possible. We only want to commute here if it improves 4879 // legality. This can be called a fairly large number of times so don't waste 4880 // compile time pointlessly swapping and checking legality again. 4881 if (HasImplicitSGPR || !MI.isCommutable()) { 4882 legalizeOpWithMove(MI, Src1Idx); 4883 return; 4884 } 4885 4886 // If src0 can be used as src1, commuting will make the operands legal. 4887 // Otherwise we have to give up and insert a move. 4888 // 4889 // TODO: Other immediate-like operand kinds could be commuted if there was a 4890 // MachineOperand::ChangeTo* for them. 4891 if ((!Src1.isImm() && !Src1.isReg()) || 4892 !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) { 4893 legalizeOpWithMove(MI, Src1Idx); 4894 return; 4895 } 4896 4897 int CommutedOpc = commuteOpcode(MI); 4898 if (CommutedOpc == -1) { 4899 legalizeOpWithMove(MI, Src1Idx); 4900 return; 4901 } 4902 4903 MI.setDesc(get(CommutedOpc)); 4904 4905 Register Src0Reg = Src0.getReg(); 4906 unsigned Src0SubReg = Src0.getSubReg(); 4907 bool Src0Kill = Src0.isKill(); 4908 4909 if (Src1.isImm()) 4910 Src0.ChangeToImmediate(Src1.getImm()); 4911 else if (Src1.isReg()) { 4912 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill()); 4913 Src0.setSubReg(Src1.getSubReg()); 4914 } else 4915 llvm_unreachable("Should only have register or immediate operands"); 4916 4917 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill); 4918 Src1.setSubReg(Src0SubReg); 4919 fixImplicitOperands(MI); 4920 } 4921 4922 // Legalize VOP3 operands. All operand types are supported for any operand 4923 // but only one literal constant and only starting from GFX10. 4924 void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI, 4925 MachineInstr &MI) const { 4926 unsigned Opc = MI.getOpcode(); 4927 4928 int VOP3Idx[3] = { 4929 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), 4930 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1), 4931 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2) 4932 }; 4933 4934 if (Opc == AMDGPU::V_PERMLANE16_B32_e64 || 4935 Opc == AMDGPU::V_PERMLANEX16_B32_e64) { 4936 // src1 and src2 must be scalar 4937 MachineOperand &Src1 = MI.getOperand(VOP3Idx[1]); 4938 MachineOperand &Src2 = MI.getOperand(VOP3Idx[2]); 4939 const DebugLoc &DL = MI.getDebugLoc(); 4940 if (Src1.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) { 4941 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4942 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4943 .add(Src1); 4944 Src1.ChangeToRegister(Reg, false); 4945 } 4946 if (Src2.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src2.getReg()))) { 4947 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4948 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4949 .add(Src2); 4950 Src2.ChangeToRegister(Reg, false); 4951 } 4952 } 4953 4954 // Find the one SGPR operand we are allowed to use. 4955 int ConstantBusLimit = ST.getConstantBusLimit(Opc); 4956 int LiteralLimit = ST.hasVOP3Literal() ? 1 : 0; 4957 SmallDenseSet<unsigned> SGPRsUsed; 4958 Register SGPRReg = findUsedSGPR(MI, VOP3Idx); 4959 if (SGPRReg != AMDGPU::NoRegister) { 4960 SGPRsUsed.insert(SGPRReg); 4961 --ConstantBusLimit; 4962 } 4963 4964 for (unsigned i = 0; i < 3; ++i) { 4965 int Idx = VOP3Idx[i]; 4966 if (Idx == -1) 4967 break; 4968 MachineOperand &MO = MI.getOperand(Idx); 4969 4970 if (!MO.isReg()) { 4971 if (!isLiteralConstantLike(MO, get(Opc).OpInfo[Idx])) 4972 continue; 4973 4974 if (LiteralLimit > 0 && ConstantBusLimit > 0) { 4975 --LiteralLimit; 4976 --ConstantBusLimit; 4977 continue; 4978 } 4979 4980 --LiteralLimit; 4981 --ConstantBusLimit; 4982 legalizeOpWithMove(MI, Idx); 4983 continue; 4984 } 4985 4986 if (RI.hasAGPRs(RI.getRegClassForReg(MRI, MO.getReg())) && 4987 !isOperandLegal(MI, Idx, &MO)) { 4988 legalizeOpWithMove(MI, Idx); 4989 continue; 4990 } 4991 4992 if (!RI.isSGPRClass(RI.getRegClassForReg(MRI, MO.getReg()))) 4993 continue; // VGPRs are legal 4994 4995 // We can use one SGPR in each VOP3 instruction prior to GFX10 4996 // and two starting from GFX10. 4997 if (SGPRsUsed.count(MO.getReg())) 4998 continue; 4999 if (ConstantBusLimit > 0) { 5000 SGPRsUsed.insert(MO.getReg()); 5001 --ConstantBusLimit; 5002 continue; 5003 } 5004 5005 // If we make it this far, then the operand is not legal and we must 5006 // legalize it. 5007 legalizeOpWithMove(MI, Idx); 5008 } 5009 } 5010 5011 Register SIInstrInfo::readlaneVGPRToSGPR(Register SrcReg, MachineInstr &UseMI, 5012 MachineRegisterInfo &MRI) const { 5013 const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg); 5014 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC); 5015 Register DstReg = MRI.createVirtualRegister(SRC); 5016 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32; 5017 5018 if (RI.hasAGPRs(VRC)) { 5019 VRC = RI.getEquivalentVGPRClass(VRC); 5020 Register NewSrcReg = MRI.createVirtualRegister(VRC); 5021 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 5022 get(TargetOpcode::COPY), NewSrcReg) 5023 .addReg(SrcReg); 5024 SrcReg = NewSrcReg; 5025 } 5026 5027 if (SubRegs == 1) { 5028 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 5029 get(AMDGPU::V_READFIRSTLANE_B32), DstReg) 5030 .addReg(SrcReg); 5031 return DstReg; 5032 } 5033 5034 SmallVector<unsigned, 8> SRegs; 5035 for (unsigned i = 0; i < SubRegs; ++i) { 5036 Register SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 5037 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 5038 get(AMDGPU::V_READFIRSTLANE_B32), SGPR) 5039 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i)); 5040 SRegs.push_back(SGPR); 5041 } 5042 5043 MachineInstrBuilder MIB = 5044 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 5045 get(AMDGPU::REG_SEQUENCE), DstReg); 5046 for (unsigned i = 0; i < SubRegs; ++i) { 5047 MIB.addReg(SRegs[i]); 5048 MIB.addImm(RI.getSubRegFromChannel(i)); 5049 } 5050 return DstReg; 5051 } 5052 5053 void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI, 5054 MachineInstr &MI) const { 5055 5056 // If the pointer is store in VGPRs, then we need to move them to 5057 // SGPRs using v_readfirstlane. This is safe because we only select 5058 // loads with uniform pointers to SMRD instruction so we know the 5059 // pointer value is uniform. 5060 MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase); 5061 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) { 5062 Register SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI); 5063 SBase->setReg(SGPR); 5064 } 5065 MachineOperand *SOff = getNamedOperand(MI, AMDGPU::OpName::soff); 5066 if (SOff && !RI.isSGPRClass(MRI.getRegClass(SOff->getReg()))) { 5067 Register SGPR = readlaneVGPRToSGPR(SOff->getReg(), MI, MRI); 5068 SOff->setReg(SGPR); 5069 } 5070 } 5071 5072 bool SIInstrInfo::moveFlatAddrToVGPR(MachineInstr &Inst) const { 5073 unsigned Opc = Inst.getOpcode(); 5074 int OldSAddrIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::saddr); 5075 if (OldSAddrIdx < 0) 5076 return false; 5077 5078 assert(isSegmentSpecificFLAT(Inst)); 5079 5080 int NewOpc = AMDGPU::getGlobalVaddrOp(Opc); 5081 if (NewOpc < 0) 5082 NewOpc = AMDGPU::getFlatScratchInstSVfromSS(Opc); 5083 if (NewOpc < 0) 5084 return false; 5085 5086 MachineRegisterInfo &MRI = Inst.getMF()->getRegInfo(); 5087 MachineOperand &SAddr = Inst.getOperand(OldSAddrIdx); 5088 if (RI.isSGPRReg(MRI, SAddr.getReg())) 5089 return false; 5090 5091 int NewVAddrIdx = AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vaddr); 5092 if (NewVAddrIdx < 0) 5093 return false; 5094 5095 int OldVAddrIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr); 5096 5097 // Check vaddr, it shall be zero or absent. 5098 MachineInstr *VAddrDef = nullptr; 5099 if (OldVAddrIdx >= 0) { 5100 MachineOperand &VAddr = Inst.getOperand(OldVAddrIdx); 5101 VAddrDef = MRI.getUniqueVRegDef(VAddr.getReg()); 5102 if (!VAddrDef || VAddrDef->getOpcode() != AMDGPU::V_MOV_B32_e32 || 5103 !VAddrDef->getOperand(1).isImm() || 5104 VAddrDef->getOperand(1).getImm() != 0) 5105 return false; 5106 } 5107 5108 const MCInstrDesc &NewDesc = get(NewOpc); 5109 Inst.setDesc(NewDesc); 5110 5111 // Callers expect interator to be valid after this call, so modify the 5112 // instruction in place. 5113 if (OldVAddrIdx == NewVAddrIdx) { 5114 MachineOperand &NewVAddr = Inst.getOperand(NewVAddrIdx); 5115 // Clear use list from the old vaddr holding a zero register. 5116 MRI.removeRegOperandFromUseList(&NewVAddr); 5117 MRI.moveOperands(&NewVAddr, &SAddr, 1); 5118 Inst.RemoveOperand(OldSAddrIdx); 5119 // Update the use list with the pointer we have just moved from vaddr to 5120 // saddr poisition. Otherwise new vaddr will be missing from the use list. 5121 MRI.removeRegOperandFromUseList(&NewVAddr); 5122 MRI.addRegOperandToUseList(&NewVAddr); 5123 } else { 5124 assert(OldSAddrIdx == NewVAddrIdx); 5125 5126 if (OldVAddrIdx >= 0) { 5127 int NewVDstIn = AMDGPU::getNamedOperandIdx(NewOpc, 5128 AMDGPU::OpName::vdst_in); 5129 5130 // RemoveOperand doesn't try to fixup tied operand indexes at it goes, so 5131 // it asserts. Untie the operands for now and retie them afterwards. 5132 if (NewVDstIn != -1) { 5133 int OldVDstIn = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst_in); 5134 Inst.untieRegOperand(OldVDstIn); 5135 } 5136 5137 Inst.RemoveOperand(OldVAddrIdx); 5138 5139 if (NewVDstIn != -1) { 5140 int NewVDst = AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vdst); 5141 Inst.tieOperands(NewVDst, NewVDstIn); 5142 } 5143 } 5144 } 5145 5146 if (VAddrDef && MRI.use_nodbg_empty(VAddrDef->getOperand(0).getReg())) 5147 VAddrDef->eraseFromParent(); 5148 5149 return true; 5150 } 5151 5152 // FIXME: Remove this when SelectionDAG is obsoleted. 5153 void SIInstrInfo::legalizeOperandsFLAT(MachineRegisterInfo &MRI, 5154 MachineInstr &MI) const { 5155 if (!isSegmentSpecificFLAT(MI)) 5156 return; 5157 5158 // Fixup SGPR operands in VGPRs. We only select these when the DAG divergence 5159 // thinks they are uniform, so a readfirstlane should be valid. 5160 MachineOperand *SAddr = getNamedOperand(MI, AMDGPU::OpName::saddr); 5161 if (!SAddr || RI.isSGPRClass(MRI.getRegClass(SAddr->getReg()))) 5162 return; 5163 5164 if (moveFlatAddrToVGPR(MI)) 5165 return; 5166 5167 Register ToSGPR = readlaneVGPRToSGPR(SAddr->getReg(), MI, MRI); 5168 SAddr->setReg(ToSGPR); 5169 } 5170 5171 void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB, 5172 MachineBasicBlock::iterator I, 5173 const TargetRegisterClass *DstRC, 5174 MachineOperand &Op, 5175 MachineRegisterInfo &MRI, 5176 const DebugLoc &DL) const { 5177 Register OpReg = Op.getReg(); 5178 unsigned OpSubReg = Op.getSubReg(); 5179 5180 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg( 5181 RI.getRegClassForReg(MRI, OpReg), OpSubReg); 5182 5183 // Check if operand is already the correct register class. 5184 if (DstRC == OpRC) 5185 return; 5186 5187 Register DstReg = MRI.createVirtualRegister(DstRC); 5188 MachineInstr *Copy = 5189 BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op); 5190 5191 Op.setReg(DstReg); 5192 Op.setSubReg(0); 5193 5194 MachineInstr *Def = MRI.getVRegDef(OpReg); 5195 if (!Def) 5196 return; 5197 5198 // Try to eliminate the copy if it is copying an immediate value. 5199 if (Def->isMoveImmediate() && DstRC != &AMDGPU::VReg_1RegClass) 5200 FoldImmediate(*Copy, *Def, OpReg, &MRI); 5201 5202 bool ImpDef = Def->isImplicitDef(); 5203 while (!ImpDef && Def && Def->isCopy()) { 5204 if (Def->getOperand(1).getReg().isPhysical()) 5205 break; 5206 Def = MRI.getUniqueVRegDef(Def->getOperand(1).getReg()); 5207 ImpDef = Def && Def->isImplicitDef(); 5208 } 5209 if (!RI.isSGPRClass(DstRC) && !Copy->readsRegister(AMDGPU::EXEC, &RI) && 5210 !ImpDef) 5211 Copy->addOperand(MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 5212 } 5213 5214 // Emit the actual waterfall loop, executing the wrapped instruction for each 5215 // unique value of \p Rsrc across all lanes. In the best case we execute 1 5216 // iteration, in the worst case we execute 64 (once per lane). 5217 static void 5218 emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI, 5219 MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB, 5220 const DebugLoc &DL, MachineOperand &Rsrc) { 5221 MachineFunction &MF = *OrigBB.getParent(); 5222 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 5223 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 5224 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 5225 unsigned SaveExecOpc = 5226 ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 : AMDGPU::S_AND_SAVEEXEC_B64; 5227 unsigned XorTermOpc = 5228 ST.isWave32() ? AMDGPU::S_XOR_B32_term : AMDGPU::S_XOR_B64_term; 5229 unsigned AndOpc = 5230 ST.isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64; 5231 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5232 5233 MachineBasicBlock::iterator I = LoopBB.begin(); 5234 5235 SmallVector<Register, 8> ReadlanePieces; 5236 Register CondReg = AMDGPU::NoRegister; 5237 5238 Register VRsrc = Rsrc.getReg(); 5239 unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef()); 5240 5241 unsigned RegSize = TRI->getRegSizeInBits(Rsrc.getReg(), MRI); 5242 unsigned NumSubRegs = RegSize / 32; 5243 assert(NumSubRegs % 2 == 0 && NumSubRegs <= 32 && "Unhandled register size"); 5244 5245 for (unsigned Idx = 0; Idx < NumSubRegs; Idx += 2) { 5246 5247 Register CurRegLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 5248 Register CurRegHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 5249 5250 // Read the next variant <- also loop target. 5251 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegLo) 5252 .addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx)); 5253 5254 // Read the next variant <- also loop target. 5255 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegHi) 5256 .addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx + 1)); 5257 5258 ReadlanePieces.push_back(CurRegLo); 5259 ReadlanePieces.push_back(CurRegHi); 5260 5261 // Comparison is to be done as 64-bit. 5262 Register CurReg = MRI.createVirtualRegister(&AMDGPU::SGPR_64RegClass); 5263 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), CurReg) 5264 .addReg(CurRegLo) 5265 .addImm(AMDGPU::sub0) 5266 .addReg(CurRegHi) 5267 .addImm(AMDGPU::sub1); 5268 5269 Register NewCondReg = MRI.createVirtualRegister(BoolXExecRC); 5270 auto Cmp = 5271 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), NewCondReg) 5272 .addReg(CurReg); 5273 if (NumSubRegs <= 2) 5274 Cmp.addReg(VRsrc); 5275 else 5276 Cmp.addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx, 2)); 5277 5278 // Combine the comparision results with AND. 5279 if (CondReg == AMDGPU::NoRegister) // First. 5280 CondReg = NewCondReg; 5281 else { // If not the first, we create an AND. 5282 Register AndReg = MRI.createVirtualRegister(BoolXExecRC); 5283 BuildMI(LoopBB, I, DL, TII.get(AndOpc), AndReg) 5284 .addReg(CondReg) 5285 .addReg(NewCondReg); 5286 CondReg = AndReg; 5287 } 5288 } // End for loop. 5289 5290 auto SRsrcRC = TRI->getEquivalentSGPRClass(MRI.getRegClass(VRsrc)); 5291 Register SRsrc = MRI.createVirtualRegister(SRsrcRC); 5292 5293 // Build scalar Rsrc. 5294 auto Merge = BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), SRsrc); 5295 unsigned Channel = 0; 5296 for (Register Piece : ReadlanePieces) { 5297 Merge.addReg(Piece) 5298 .addImm(TRI->getSubRegFromChannel(Channel++)); 5299 } 5300 5301 // Update Rsrc operand to use the SGPR Rsrc. 5302 Rsrc.setReg(SRsrc); 5303 Rsrc.setIsKill(true); 5304 5305 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); 5306 MRI.setSimpleHint(SaveExec, CondReg); 5307 5308 // Update EXEC to matching lanes, saving original to SaveExec. 5309 BuildMI(LoopBB, I, DL, TII.get(SaveExecOpc), SaveExec) 5310 .addReg(CondReg, RegState::Kill); 5311 5312 // The original instruction is here; we insert the terminators after it. 5313 I = LoopBB.end(); 5314 5315 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 5316 BuildMI(LoopBB, I, DL, TII.get(XorTermOpc), Exec) 5317 .addReg(Exec) 5318 .addReg(SaveExec); 5319 5320 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::SI_WATERFALL_LOOP)).addMBB(&LoopBB); 5321 } 5322 5323 // Build a waterfall loop around \p MI, replacing the VGPR \p Rsrc register 5324 // with SGPRs by iterating over all unique values across all lanes. 5325 // Returns the loop basic block that now contains \p MI. 5326 static MachineBasicBlock * 5327 loadSRsrcFromVGPR(const SIInstrInfo &TII, MachineInstr &MI, 5328 MachineOperand &Rsrc, MachineDominatorTree *MDT, 5329 MachineBasicBlock::iterator Begin = nullptr, 5330 MachineBasicBlock::iterator End = nullptr) { 5331 MachineBasicBlock &MBB = *MI.getParent(); 5332 MachineFunction &MF = *MBB.getParent(); 5333 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 5334 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 5335 MachineRegisterInfo &MRI = MF.getRegInfo(); 5336 if (!Begin.isValid()) 5337 Begin = &MI; 5338 if (!End.isValid()) { 5339 End = &MI; 5340 ++End; 5341 } 5342 const DebugLoc &DL = MI.getDebugLoc(); 5343 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 5344 unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 5345 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5346 5347 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); 5348 5349 // Save the EXEC mask 5350 BuildMI(MBB, Begin, DL, TII.get(MovExecOpc), SaveExec).addReg(Exec); 5351 5352 // Killed uses in the instruction we are waterfalling around will be 5353 // incorrect due to the added control-flow. 5354 MachineBasicBlock::iterator AfterMI = MI; 5355 ++AfterMI; 5356 for (auto I = Begin; I != AfterMI; I++) { 5357 for (auto &MO : I->uses()) { 5358 if (MO.isReg() && MO.isUse()) { 5359 MRI.clearKillFlags(MO.getReg()); 5360 } 5361 } 5362 } 5363 5364 // To insert the loop we need to split the block. Move everything after this 5365 // point to a new block, and insert a new empty block between the two. 5366 MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock(); 5367 MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock(); 5368 MachineFunction::iterator MBBI(MBB); 5369 ++MBBI; 5370 5371 MF.insert(MBBI, LoopBB); 5372 MF.insert(MBBI, RemainderBB); 5373 5374 LoopBB->addSuccessor(LoopBB); 5375 LoopBB->addSuccessor(RemainderBB); 5376 5377 // Move Begin to MI to the LoopBB, and the remainder of the block to 5378 // RemainderBB. 5379 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 5380 RemainderBB->splice(RemainderBB->begin(), &MBB, End, MBB.end()); 5381 LoopBB->splice(LoopBB->begin(), &MBB, Begin, MBB.end()); 5382 5383 MBB.addSuccessor(LoopBB); 5384 5385 // Update dominators. We know that MBB immediately dominates LoopBB, that 5386 // LoopBB immediately dominates RemainderBB, and that RemainderBB immediately 5387 // dominates all of the successors transferred to it from MBB that MBB used 5388 // to properly dominate. 5389 if (MDT) { 5390 MDT->addNewBlock(LoopBB, &MBB); 5391 MDT->addNewBlock(RemainderBB, LoopBB); 5392 for (auto &Succ : RemainderBB->successors()) { 5393 if (MDT->properlyDominates(&MBB, Succ)) { 5394 MDT->changeImmediateDominator(Succ, RemainderBB); 5395 } 5396 } 5397 } 5398 5399 emitLoadSRsrcFromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, Rsrc); 5400 5401 // Restore the EXEC mask 5402 MachineBasicBlock::iterator First = RemainderBB->begin(); 5403 BuildMI(*RemainderBB, First, DL, TII.get(MovExecOpc), Exec).addReg(SaveExec); 5404 return LoopBB; 5405 } 5406 5407 // Extract pointer from Rsrc and return a zero-value Rsrc replacement. 5408 static std::tuple<unsigned, unsigned> 5409 extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc) { 5410 MachineBasicBlock &MBB = *MI.getParent(); 5411 MachineFunction &MF = *MBB.getParent(); 5412 MachineRegisterInfo &MRI = MF.getRegInfo(); 5413 5414 // Extract the ptr from the resource descriptor. 5415 unsigned RsrcPtr = 5416 TII.buildExtractSubReg(MI, MRI, Rsrc, &AMDGPU::VReg_128RegClass, 5417 AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass); 5418 5419 // Create an empty resource descriptor 5420 Register Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 5421 Register SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 5422 Register SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 5423 Register NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass); 5424 uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat(); 5425 5426 // Zero64 = 0 5427 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B64), Zero64) 5428 .addImm(0); 5429 5430 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0} 5431 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatLo) 5432 .addImm(RsrcDataFormat & 0xFFFFFFFF); 5433 5434 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32} 5435 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatHi) 5436 .addImm(RsrcDataFormat >> 32); 5437 5438 // NewSRsrc = {Zero64, SRsrcFormat} 5439 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::REG_SEQUENCE), NewSRsrc) 5440 .addReg(Zero64) 5441 .addImm(AMDGPU::sub0_sub1) 5442 .addReg(SRsrcFormatLo) 5443 .addImm(AMDGPU::sub2) 5444 .addReg(SRsrcFormatHi) 5445 .addImm(AMDGPU::sub3); 5446 5447 return std::make_tuple(RsrcPtr, NewSRsrc); 5448 } 5449 5450 MachineBasicBlock * 5451 SIInstrInfo::legalizeOperands(MachineInstr &MI, 5452 MachineDominatorTree *MDT) const { 5453 MachineFunction &MF = *MI.getParent()->getParent(); 5454 MachineRegisterInfo &MRI = MF.getRegInfo(); 5455 MachineBasicBlock *CreatedBB = nullptr; 5456 5457 // Legalize VOP2 5458 if (isVOP2(MI) || isVOPC(MI)) { 5459 legalizeOperandsVOP2(MRI, MI); 5460 return CreatedBB; 5461 } 5462 5463 // Legalize VOP3 5464 if (isVOP3(MI)) { 5465 legalizeOperandsVOP3(MRI, MI); 5466 return CreatedBB; 5467 } 5468 5469 // Legalize SMRD 5470 if (isSMRD(MI)) { 5471 legalizeOperandsSMRD(MRI, MI); 5472 return CreatedBB; 5473 } 5474 5475 // Legalize FLAT 5476 if (isFLAT(MI)) { 5477 legalizeOperandsFLAT(MRI, MI); 5478 return CreatedBB; 5479 } 5480 5481 // Legalize REG_SEQUENCE and PHI 5482 // The register class of the operands much be the same type as the register 5483 // class of the output. 5484 if (MI.getOpcode() == AMDGPU::PHI) { 5485 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr; 5486 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) { 5487 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).getReg().isVirtual()) 5488 continue; 5489 const TargetRegisterClass *OpRC = 5490 MRI.getRegClass(MI.getOperand(i).getReg()); 5491 if (RI.hasVectorRegisters(OpRC)) { 5492 VRC = OpRC; 5493 } else { 5494 SRC = OpRC; 5495 } 5496 } 5497 5498 // If any of the operands are VGPR registers, then they all most be 5499 // otherwise we will create illegal VGPR->SGPR copies when legalizing 5500 // them. 5501 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) { 5502 if (!VRC) { 5503 assert(SRC); 5504 if (getOpRegClass(MI, 0) == &AMDGPU::VReg_1RegClass) { 5505 VRC = &AMDGPU::VReg_1RegClass; 5506 } else 5507 VRC = RI.hasAGPRs(getOpRegClass(MI, 0)) 5508 ? RI.getEquivalentAGPRClass(SRC) 5509 : RI.getEquivalentVGPRClass(SRC); 5510 } else { 5511 VRC = RI.hasAGPRs(getOpRegClass(MI, 0)) 5512 ? RI.getEquivalentAGPRClass(VRC) 5513 : RI.getEquivalentVGPRClass(VRC); 5514 } 5515 RC = VRC; 5516 } else { 5517 RC = SRC; 5518 } 5519 5520 // Update all the operands so they have the same type. 5521 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 5522 MachineOperand &Op = MI.getOperand(I); 5523 if (!Op.isReg() || !Op.getReg().isVirtual()) 5524 continue; 5525 5526 // MI is a PHI instruction. 5527 MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB(); 5528 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator(); 5529 5530 // Avoid creating no-op copies with the same src and dst reg class. These 5531 // confuse some of the machine passes. 5532 legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc()); 5533 } 5534 } 5535 5536 // REG_SEQUENCE doesn't really require operand legalization, but if one has a 5537 // VGPR dest type and SGPR sources, insert copies so all operands are 5538 // VGPRs. This seems to help operand folding / the register coalescer. 5539 if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) { 5540 MachineBasicBlock *MBB = MI.getParent(); 5541 const TargetRegisterClass *DstRC = getOpRegClass(MI, 0); 5542 if (RI.hasVGPRs(DstRC)) { 5543 // Update all the operands so they are VGPR register classes. These may 5544 // not be the same register class because REG_SEQUENCE supports mixing 5545 // subregister index types e.g. sub0_sub1 + sub2 + sub3 5546 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 5547 MachineOperand &Op = MI.getOperand(I); 5548 if (!Op.isReg() || !Op.getReg().isVirtual()) 5549 continue; 5550 5551 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg()); 5552 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); 5553 if (VRC == OpRC) 5554 continue; 5555 5556 legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc()); 5557 Op.setIsKill(); 5558 } 5559 } 5560 5561 return CreatedBB; 5562 } 5563 5564 // Legalize INSERT_SUBREG 5565 // src0 must have the same register class as dst 5566 if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) { 5567 Register Dst = MI.getOperand(0).getReg(); 5568 Register Src0 = MI.getOperand(1).getReg(); 5569 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst); 5570 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0); 5571 if (DstRC != Src0RC) { 5572 MachineBasicBlock *MBB = MI.getParent(); 5573 MachineOperand &Op = MI.getOperand(1); 5574 legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc()); 5575 } 5576 return CreatedBB; 5577 } 5578 5579 // Legalize SI_INIT_M0 5580 if (MI.getOpcode() == AMDGPU::SI_INIT_M0) { 5581 MachineOperand &Src = MI.getOperand(0); 5582 if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg()))) 5583 Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI)); 5584 return CreatedBB; 5585 } 5586 5587 // Legalize MIMG and MUBUF/MTBUF for shaders. 5588 // 5589 // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via 5590 // scratch memory access. In both cases, the legalization never involves 5591 // conversion to the addr64 form. 5592 if (isMIMG(MI) || (AMDGPU::isGraphics(MF.getFunction().getCallingConv()) && 5593 (isMUBUF(MI) || isMTBUF(MI)))) { 5594 MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc); 5595 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) 5596 CreatedBB = loadSRsrcFromVGPR(*this, MI, *SRsrc, MDT); 5597 5598 MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp); 5599 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) 5600 CreatedBB = loadSRsrcFromVGPR(*this, MI, *SSamp, MDT); 5601 5602 return CreatedBB; 5603 } 5604 5605 // Legalize SI_CALL 5606 if (MI.getOpcode() == AMDGPU::SI_CALL_ISEL) { 5607 MachineOperand *Dest = &MI.getOperand(0); 5608 if (!RI.isSGPRClass(MRI.getRegClass(Dest->getReg()))) { 5609 // Move everything between ADJCALLSTACKUP and ADJCALLSTACKDOWN and 5610 // following copies, we also need to move copies from and to physical 5611 // registers into the loop block. 5612 unsigned FrameSetupOpcode = getCallFrameSetupOpcode(); 5613 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode(); 5614 5615 // Also move the copies to physical registers into the loop block 5616 MachineBasicBlock &MBB = *MI.getParent(); 5617 MachineBasicBlock::iterator Start(&MI); 5618 while (Start->getOpcode() != FrameSetupOpcode) 5619 --Start; 5620 MachineBasicBlock::iterator End(&MI); 5621 while (End->getOpcode() != FrameDestroyOpcode) 5622 ++End; 5623 // Also include following copies of the return value 5624 ++End; 5625 while (End != MBB.end() && End->isCopy() && End->getOperand(1).isReg() && 5626 MI.definesRegister(End->getOperand(1).getReg())) 5627 ++End; 5628 CreatedBB = loadSRsrcFromVGPR(*this, MI, *Dest, MDT, Start, End); 5629 } 5630 } 5631 5632 // Legalize MUBUF* instructions. 5633 int RsrcIdx = 5634 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc); 5635 if (RsrcIdx != -1) { 5636 // We have an MUBUF instruction 5637 MachineOperand *Rsrc = &MI.getOperand(RsrcIdx); 5638 unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass; 5639 if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()), 5640 RI.getRegClass(RsrcRC))) { 5641 // The operands are legal. 5642 // FIXME: We may need to legalize operands besided srsrc. 5643 return CreatedBB; 5644 } 5645 5646 // Legalize a VGPR Rsrc. 5647 // 5648 // If the instruction is _ADDR64, we can avoid a waterfall by extracting 5649 // the base pointer from the VGPR Rsrc, adding it to the VAddr, then using 5650 // a zero-value SRsrc. 5651 // 5652 // If the instruction is _OFFSET (both idxen and offen disabled), and we 5653 // support ADDR64 instructions, we can convert to ADDR64 and do the same as 5654 // above. 5655 // 5656 // Otherwise we are on non-ADDR64 hardware, and/or we have 5657 // idxen/offen/bothen and we fall back to a waterfall loop. 5658 5659 MachineBasicBlock &MBB = *MI.getParent(); 5660 5661 MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 5662 if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) { 5663 // This is already an ADDR64 instruction so we need to add the pointer 5664 // extracted from the resource descriptor to the current value of VAddr. 5665 Register NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5666 Register NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5667 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5668 5669 const auto *BoolXExecRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5670 Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC); 5671 Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC); 5672 5673 unsigned RsrcPtr, NewSRsrc; 5674 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 5675 5676 // NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0 5677 const DebugLoc &DL = MI.getDebugLoc(); 5678 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_CO_U32_e64), NewVAddrLo) 5679 .addDef(CondReg0) 5680 .addReg(RsrcPtr, 0, AMDGPU::sub0) 5681 .addReg(VAddr->getReg(), 0, AMDGPU::sub0) 5682 .addImm(0); 5683 5684 // NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1 5685 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e64), NewVAddrHi) 5686 .addDef(CondReg1, RegState::Dead) 5687 .addReg(RsrcPtr, 0, AMDGPU::sub1) 5688 .addReg(VAddr->getReg(), 0, AMDGPU::sub1) 5689 .addReg(CondReg0, RegState::Kill) 5690 .addImm(0); 5691 5692 // NewVaddr = {NewVaddrHi, NewVaddrLo} 5693 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) 5694 .addReg(NewVAddrLo) 5695 .addImm(AMDGPU::sub0) 5696 .addReg(NewVAddrHi) 5697 .addImm(AMDGPU::sub1); 5698 5699 VAddr->setReg(NewVAddr); 5700 Rsrc->setReg(NewSRsrc); 5701 } else if (!VAddr && ST.hasAddr64()) { 5702 // This instructions is the _OFFSET variant, so we need to convert it to 5703 // ADDR64. 5704 assert(ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS && 5705 "FIXME: Need to emit flat atomics here"); 5706 5707 unsigned RsrcPtr, NewSRsrc; 5708 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 5709 5710 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5711 MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata); 5712 MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 5713 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset); 5714 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode()); 5715 5716 // Atomics rith return have have an additional tied operand and are 5717 // missing some of the special bits. 5718 MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in); 5719 MachineInstr *Addr64; 5720 5721 if (!VDataIn) { 5722 // Regular buffer load / store. 5723 MachineInstrBuilder MIB = 5724 BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 5725 .add(*VData) 5726 .addReg(NewVAddr) 5727 .addReg(NewSRsrc) 5728 .add(*SOffset) 5729 .add(*Offset); 5730 5731 if (const MachineOperand *CPol = 5732 getNamedOperand(MI, AMDGPU::OpName::cpol)) { 5733 MIB.addImm(CPol->getImm()); 5734 } 5735 5736 if (const MachineOperand *TFE = 5737 getNamedOperand(MI, AMDGPU::OpName::tfe)) { 5738 MIB.addImm(TFE->getImm()); 5739 } 5740 5741 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::swz)); 5742 5743 MIB.cloneMemRefs(MI); 5744 Addr64 = MIB; 5745 } else { 5746 // Atomics with return. 5747 Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 5748 .add(*VData) 5749 .add(*VDataIn) 5750 .addReg(NewVAddr) 5751 .addReg(NewSRsrc) 5752 .add(*SOffset) 5753 .add(*Offset) 5754 .addImm(getNamedImmOperand(MI, AMDGPU::OpName::cpol)) 5755 .cloneMemRefs(MI); 5756 } 5757 5758 MI.removeFromParent(); 5759 5760 // NewVaddr = {NewVaddrHi, NewVaddrLo} 5761 BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), 5762 NewVAddr) 5763 .addReg(RsrcPtr, 0, AMDGPU::sub0) 5764 .addImm(AMDGPU::sub0) 5765 .addReg(RsrcPtr, 0, AMDGPU::sub1) 5766 .addImm(AMDGPU::sub1); 5767 } else { 5768 // This is another variant; legalize Rsrc with waterfall loop from VGPRs 5769 // to SGPRs. 5770 CreatedBB = loadSRsrcFromVGPR(*this, MI, *Rsrc, MDT); 5771 return CreatedBB; 5772 } 5773 } 5774 return CreatedBB; 5775 } 5776 5777 MachineBasicBlock *SIInstrInfo::moveToVALU(MachineInstr &TopInst, 5778 MachineDominatorTree *MDT) const { 5779 SetVectorType Worklist; 5780 Worklist.insert(&TopInst); 5781 MachineBasicBlock *CreatedBB = nullptr; 5782 MachineBasicBlock *CreatedBBTmp = nullptr; 5783 5784 while (!Worklist.empty()) { 5785 MachineInstr &Inst = *Worklist.pop_back_val(); 5786 MachineBasicBlock *MBB = Inst.getParent(); 5787 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 5788 5789 unsigned Opcode = Inst.getOpcode(); 5790 unsigned NewOpcode = getVALUOp(Inst); 5791 5792 // Handle some special cases 5793 switch (Opcode) { 5794 default: 5795 break; 5796 case AMDGPU::S_ADD_U64_PSEUDO: 5797 case AMDGPU::S_SUB_U64_PSEUDO: 5798 splitScalar64BitAddSub(Worklist, Inst, MDT); 5799 Inst.eraseFromParent(); 5800 continue; 5801 case AMDGPU::S_ADD_I32: 5802 case AMDGPU::S_SUB_I32: { 5803 // FIXME: The u32 versions currently selected use the carry. 5804 bool Changed; 5805 std::tie(Changed, CreatedBBTmp) = moveScalarAddSub(Worklist, Inst, MDT); 5806 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp) 5807 CreatedBB = CreatedBBTmp; 5808 if (Changed) 5809 continue; 5810 5811 // Default handling 5812 break; 5813 } 5814 case AMDGPU::S_AND_B64: 5815 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32, MDT); 5816 Inst.eraseFromParent(); 5817 continue; 5818 5819 case AMDGPU::S_OR_B64: 5820 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32, MDT); 5821 Inst.eraseFromParent(); 5822 continue; 5823 5824 case AMDGPU::S_XOR_B64: 5825 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32, MDT); 5826 Inst.eraseFromParent(); 5827 continue; 5828 5829 case AMDGPU::S_NAND_B64: 5830 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NAND_B32, MDT); 5831 Inst.eraseFromParent(); 5832 continue; 5833 5834 case AMDGPU::S_NOR_B64: 5835 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NOR_B32, MDT); 5836 Inst.eraseFromParent(); 5837 continue; 5838 5839 case AMDGPU::S_XNOR_B64: 5840 if (ST.hasDLInsts()) 5841 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT); 5842 else 5843 splitScalar64BitXnor(Worklist, Inst, MDT); 5844 Inst.eraseFromParent(); 5845 continue; 5846 5847 case AMDGPU::S_ANDN2_B64: 5848 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ANDN2_B32, MDT); 5849 Inst.eraseFromParent(); 5850 continue; 5851 5852 case AMDGPU::S_ORN2_B64: 5853 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ORN2_B32, MDT); 5854 Inst.eraseFromParent(); 5855 continue; 5856 5857 case AMDGPU::S_BREV_B64: 5858 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_BREV_B32, true); 5859 Inst.eraseFromParent(); 5860 continue; 5861 5862 case AMDGPU::S_NOT_B64: 5863 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32); 5864 Inst.eraseFromParent(); 5865 continue; 5866 5867 case AMDGPU::S_BCNT1_I32_B64: 5868 splitScalar64BitBCNT(Worklist, Inst); 5869 Inst.eraseFromParent(); 5870 continue; 5871 5872 case AMDGPU::S_BFE_I64: 5873 splitScalar64BitBFE(Worklist, Inst); 5874 Inst.eraseFromParent(); 5875 continue; 5876 5877 case AMDGPU::S_LSHL_B32: 5878 if (ST.hasOnlyRevVALUShifts()) { 5879 NewOpcode = AMDGPU::V_LSHLREV_B32_e64; 5880 swapOperands(Inst); 5881 } 5882 break; 5883 case AMDGPU::S_ASHR_I32: 5884 if (ST.hasOnlyRevVALUShifts()) { 5885 NewOpcode = AMDGPU::V_ASHRREV_I32_e64; 5886 swapOperands(Inst); 5887 } 5888 break; 5889 case AMDGPU::S_LSHR_B32: 5890 if (ST.hasOnlyRevVALUShifts()) { 5891 NewOpcode = AMDGPU::V_LSHRREV_B32_e64; 5892 swapOperands(Inst); 5893 } 5894 break; 5895 case AMDGPU::S_LSHL_B64: 5896 if (ST.hasOnlyRevVALUShifts()) { 5897 NewOpcode = AMDGPU::V_LSHLREV_B64_e64; 5898 swapOperands(Inst); 5899 } 5900 break; 5901 case AMDGPU::S_ASHR_I64: 5902 if (ST.hasOnlyRevVALUShifts()) { 5903 NewOpcode = AMDGPU::V_ASHRREV_I64_e64; 5904 swapOperands(Inst); 5905 } 5906 break; 5907 case AMDGPU::S_LSHR_B64: 5908 if (ST.hasOnlyRevVALUShifts()) { 5909 NewOpcode = AMDGPU::V_LSHRREV_B64_e64; 5910 swapOperands(Inst); 5911 } 5912 break; 5913 5914 case AMDGPU::S_ABS_I32: 5915 lowerScalarAbs(Worklist, Inst); 5916 Inst.eraseFromParent(); 5917 continue; 5918 5919 case AMDGPU::S_CBRANCH_SCC0: 5920 case AMDGPU::S_CBRANCH_SCC1: { 5921 // Clear unused bits of vcc 5922 Register CondReg = Inst.getOperand(1).getReg(); 5923 bool IsSCC = CondReg == AMDGPU::SCC; 5924 Register VCC = RI.getVCC(); 5925 Register EXEC = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 5926 unsigned Opc = ST.isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64; 5927 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(Opc), VCC) 5928 .addReg(EXEC) 5929 .addReg(IsSCC ? VCC : CondReg); 5930 Inst.RemoveOperand(1); 5931 } 5932 break; 5933 5934 case AMDGPU::S_BFE_U64: 5935 case AMDGPU::S_BFM_B64: 5936 llvm_unreachable("Moving this op to VALU not implemented"); 5937 5938 case AMDGPU::S_PACK_LL_B32_B16: 5939 case AMDGPU::S_PACK_LH_B32_B16: 5940 case AMDGPU::S_PACK_HH_B32_B16: 5941 movePackToVALU(Worklist, MRI, Inst); 5942 Inst.eraseFromParent(); 5943 continue; 5944 5945 case AMDGPU::S_XNOR_B32: 5946 lowerScalarXnor(Worklist, Inst); 5947 Inst.eraseFromParent(); 5948 continue; 5949 5950 case AMDGPU::S_NAND_B32: 5951 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_AND_B32); 5952 Inst.eraseFromParent(); 5953 continue; 5954 5955 case AMDGPU::S_NOR_B32: 5956 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_OR_B32); 5957 Inst.eraseFromParent(); 5958 continue; 5959 5960 case AMDGPU::S_ANDN2_B32: 5961 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_AND_B32); 5962 Inst.eraseFromParent(); 5963 continue; 5964 5965 case AMDGPU::S_ORN2_B32: 5966 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_OR_B32); 5967 Inst.eraseFromParent(); 5968 continue; 5969 5970 // TODO: remove as soon as everything is ready 5971 // to replace VGPR to SGPR copy with V_READFIRSTLANEs. 5972 // S_ADD/SUB_CO_PSEUDO as well as S_UADDO/USUBO_PSEUDO 5973 // can only be selected from the uniform SDNode. 5974 case AMDGPU::S_ADD_CO_PSEUDO: 5975 case AMDGPU::S_SUB_CO_PSEUDO: { 5976 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) 5977 ? AMDGPU::V_ADDC_U32_e64 5978 : AMDGPU::V_SUBB_U32_e64; 5979 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5980 5981 Register CarryInReg = Inst.getOperand(4).getReg(); 5982 if (!MRI.constrainRegClass(CarryInReg, CarryRC)) { 5983 Register NewCarryReg = MRI.createVirtualRegister(CarryRC); 5984 BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(AMDGPU::COPY), NewCarryReg) 5985 .addReg(CarryInReg); 5986 } 5987 5988 Register CarryOutReg = Inst.getOperand(1).getReg(); 5989 5990 Register DestReg = MRI.createVirtualRegister(RI.getEquivalentVGPRClass( 5991 MRI.getRegClass(Inst.getOperand(0).getReg()))); 5992 MachineInstr *CarryOp = 5993 BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(Opc), DestReg) 5994 .addReg(CarryOutReg, RegState::Define) 5995 .add(Inst.getOperand(2)) 5996 .add(Inst.getOperand(3)) 5997 .addReg(CarryInReg) 5998 .addImm(0); 5999 CreatedBBTmp = legalizeOperands(*CarryOp); 6000 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp) 6001 CreatedBB = CreatedBBTmp; 6002 MRI.replaceRegWith(Inst.getOperand(0).getReg(), DestReg); 6003 addUsersToMoveToVALUWorklist(DestReg, MRI, Worklist); 6004 Inst.eraseFromParent(); 6005 } 6006 continue; 6007 case AMDGPU::S_UADDO_PSEUDO: 6008 case AMDGPU::S_USUBO_PSEUDO: { 6009 const DebugLoc &DL = Inst.getDebugLoc(); 6010 MachineOperand &Dest0 = Inst.getOperand(0); 6011 MachineOperand &Dest1 = Inst.getOperand(1); 6012 MachineOperand &Src0 = Inst.getOperand(2); 6013 MachineOperand &Src1 = Inst.getOperand(3); 6014 6015 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_UADDO_PSEUDO) 6016 ? AMDGPU::V_ADD_CO_U32_e64 6017 : AMDGPU::V_SUB_CO_U32_e64; 6018 const TargetRegisterClass *NewRC = 6019 RI.getEquivalentVGPRClass(MRI.getRegClass(Dest0.getReg())); 6020 Register DestReg = MRI.createVirtualRegister(NewRC); 6021 MachineInstr *NewInstr = BuildMI(*MBB, &Inst, DL, get(Opc), DestReg) 6022 .addReg(Dest1.getReg(), RegState::Define) 6023 .add(Src0) 6024 .add(Src1) 6025 .addImm(0); // clamp bit 6026 6027 CreatedBBTmp = legalizeOperands(*NewInstr, MDT); 6028 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp) 6029 CreatedBB = CreatedBBTmp; 6030 6031 MRI.replaceRegWith(Dest0.getReg(), DestReg); 6032 addUsersToMoveToVALUWorklist(NewInstr->getOperand(0).getReg(), MRI, 6033 Worklist); 6034 Inst.eraseFromParent(); 6035 } 6036 continue; 6037 6038 case AMDGPU::S_CSELECT_B32: 6039 lowerSelect32(Worklist, Inst, MDT); 6040 Inst.eraseFromParent(); 6041 continue; 6042 case AMDGPU::S_CSELECT_B64: 6043 splitSelect64(Worklist, Inst, MDT); 6044 Inst.eraseFromParent(); 6045 continue; 6046 case AMDGPU::S_CMP_EQ_I32: 6047 case AMDGPU::S_CMP_LG_I32: 6048 case AMDGPU::S_CMP_GT_I32: 6049 case AMDGPU::S_CMP_GE_I32: 6050 case AMDGPU::S_CMP_LT_I32: 6051 case AMDGPU::S_CMP_LE_I32: 6052 case AMDGPU::S_CMP_EQ_U32: 6053 case AMDGPU::S_CMP_LG_U32: 6054 case AMDGPU::S_CMP_GT_U32: 6055 case AMDGPU::S_CMP_GE_U32: 6056 case AMDGPU::S_CMP_LT_U32: 6057 case AMDGPU::S_CMP_LE_U32: 6058 case AMDGPU::S_CMP_EQ_U64: 6059 case AMDGPU::S_CMP_LG_U64: { 6060 const MCInstrDesc &NewDesc = get(NewOpcode); 6061 Register CondReg = MRI.createVirtualRegister(RI.getWaveMaskRegClass()); 6062 MachineInstr *NewInstr = 6063 BuildMI(*MBB, Inst, Inst.getDebugLoc(), NewDesc, CondReg) 6064 .add(Inst.getOperand(0)) 6065 .add(Inst.getOperand(1)); 6066 legalizeOperands(*NewInstr, MDT); 6067 int SCCIdx = Inst.findRegisterDefOperandIdx(AMDGPU::SCC); 6068 MachineOperand SCCOp = Inst.getOperand(SCCIdx); 6069 addSCCDefUsersToVALUWorklist(SCCOp, Inst, Worklist, CondReg); 6070 Inst.eraseFromParent(); 6071 } 6072 continue; 6073 } 6074 6075 6076 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) { 6077 // We cannot move this instruction to the VALU, so we should try to 6078 // legalize its operands instead. 6079 CreatedBBTmp = legalizeOperands(Inst, MDT); 6080 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp) 6081 CreatedBB = CreatedBBTmp; 6082 continue; 6083 } 6084 6085 // Use the new VALU Opcode. 6086 const MCInstrDesc &NewDesc = get(NewOpcode); 6087 Inst.setDesc(NewDesc); 6088 6089 // Remove any references to SCC. Vector instructions can't read from it, and 6090 // We're just about to add the implicit use / defs of VCC, and we don't want 6091 // both. 6092 for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) { 6093 MachineOperand &Op = Inst.getOperand(i); 6094 if (Op.isReg() && Op.getReg() == AMDGPU::SCC) { 6095 // Only propagate through live-def of SCC. 6096 if (Op.isDef() && !Op.isDead()) 6097 addSCCDefUsersToVALUWorklist(Op, Inst, Worklist); 6098 if (Op.isUse()) 6099 addSCCDefsToVALUWorklist(Op, Worklist); 6100 Inst.RemoveOperand(i); 6101 } 6102 } 6103 6104 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) { 6105 // We are converting these to a BFE, so we need to add the missing 6106 // operands for the size and offset. 6107 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16; 6108 Inst.addOperand(MachineOperand::CreateImm(0)); 6109 Inst.addOperand(MachineOperand::CreateImm(Size)); 6110 6111 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) { 6112 // The VALU version adds the second operand to the result, so insert an 6113 // extra 0 operand. 6114 Inst.addOperand(MachineOperand::CreateImm(0)); 6115 } 6116 6117 Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent()); 6118 fixImplicitOperands(Inst); 6119 6120 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) { 6121 const MachineOperand &OffsetWidthOp = Inst.getOperand(2); 6122 // If we need to move this to VGPRs, we need to unpack the second operand 6123 // back into the 2 separate ones for bit offset and width. 6124 assert(OffsetWidthOp.isImm() && 6125 "Scalar BFE is only implemented for constant width and offset"); 6126 uint32_t Imm = OffsetWidthOp.getImm(); 6127 6128 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 6129 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 6130 Inst.RemoveOperand(2); // Remove old immediate. 6131 Inst.addOperand(MachineOperand::CreateImm(Offset)); 6132 Inst.addOperand(MachineOperand::CreateImm(BitWidth)); 6133 } 6134 6135 bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef(); 6136 unsigned NewDstReg = AMDGPU::NoRegister; 6137 if (HasDst) { 6138 Register DstReg = Inst.getOperand(0).getReg(); 6139 if (DstReg.isPhysical()) 6140 continue; 6141 6142 // Update the destination register class. 6143 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst); 6144 if (!NewDstRC) 6145 continue; 6146 6147 if (Inst.isCopy() && Inst.getOperand(1).getReg().isVirtual() && 6148 NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) { 6149 // Instead of creating a copy where src and dst are the same register 6150 // class, we just replace all uses of dst with src. These kinds of 6151 // copies interfere with the heuristics MachineSink uses to decide 6152 // whether or not to split a critical edge. Since the pass assumes 6153 // that copies will end up as machine instructions and not be 6154 // eliminated. 6155 addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist); 6156 MRI.replaceRegWith(DstReg, Inst.getOperand(1).getReg()); 6157 MRI.clearKillFlags(Inst.getOperand(1).getReg()); 6158 Inst.getOperand(0).setReg(DstReg); 6159 6160 // Make sure we don't leave around a dead VGPR->SGPR copy. Normally 6161 // these are deleted later, but at -O0 it would leave a suspicious 6162 // looking illegal copy of an undef register. 6163 for (unsigned I = Inst.getNumOperands() - 1; I != 0; --I) 6164 Inst.RemoveOperand(I); 6165 Inst.setDesc(get(AMDGPU::IMPLICIT_DEF)); 6166 continue; 6167 } 6168 6169 NewDstReg = MRI.createVirtualRegister(NewDstRC); 6170 MRI.replaceRegWith(DstReg, NewDstReg); 6171 } 6172 6173 // Legalize the operands 6174 CreatedBBTmp = legalizeOperands(Inst, MDT); 6175 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp) 6176 CreatedBB = CreatedBBTmp; 6177 6178 if (HasDst) 6179 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); 6180 } 6181 return CreatedBB; 6182 } 6183 6184 // Add/sub require special handling to deal with carry outs. 6185 std::pair<bool, MachineBasicBlock *> 6186 SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst, 6187 MachineDominatorTree *MDT) const { 6188 if (ST.hasAddNoCarry()) { 6189 // Assume there is no user of scc since we don't select this in that case. 6190 // Since scc isn't used, it doesn't really matter if the i32 or u32 variant 6191 // is used. 6192 6193 MachineBasicBlock &MBB = *Inst.getParent(); 6194 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6195 6196 Register OldDstReg = Inst.getOperand(0).getReg(); 6197 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6198 6199 unsigned Opc = Inst.getOpcode(); 6200 assert(Opc == AMDGPU::S_ADD_I32 || Opc == AMDGPU::S_SUB_I32); 6201 6202 unsigned NewOpc = Opc == AMDGPU::S_ADD_I32 ? 6203 AMDGPU::V_ADD_U32_e64 : AMDGPU::V_SUB_U32_e64; 6204 6205 assert(Inst.getOperand(3).getReg() == AMDGPU::SCC); 6206 Inst.RemoveOperand(3); 6207 6208 Inst.setDesc(get(NewOpc)); 6209 Inst.addOperand(MachineOperand::CreateImm(0)); // clamp bit 6210 Inst.addImplicitDefUseOperands(*MBB.getParent()); 6211 MRI.replaceRegWith(OldDstReg, ResultReg); 6212 MachineBasicBlock *NewBB = legalizeOperands(Inst, MDT); 6213 6214 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6215 return std::make_pair(true, NewBB); 6216 } 6217 6218 return std::make_pair(false, nullptr); 6219 } 6220 6221 void SIInstrInfo::lowerSelect32(SetVectorType &Worklist, MachineInstr &Inst, 6222 MachineDominatorTree *MDT) const { 6223 6224 MachineBasicBlock &MBB = *Inst.getParent(); 6225 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6226 MachineBasicBlock::iterator MII = Inst; 6227 DebugLoc DL = Inst.getDebugLoc(); 6228 6229 MachineOperand &Dest = Inst.getOperand(0); 6230 MachineOperand &Src0 = Inst.getOperand(1); 6231 MachineOperand &Src1 = Inst.getOperand(2); 6232 MachineOperand &Cond = Inst.getOperand(3); 6233 6234 Register SCCSource = Cond.getReg(); 6235 bool IsSCC = (SCCSource == AMDGPU::SCC); 6236 6237 // If this is a trivial select where the condition is effectively not SCC 6238 // (SCCSource is a source of copy to SCC), then the select is semantically 6239 // equivalent to copying SCCSource. Hence, there is no need to create 6240 // V_CNDMASK, we can just use that and bail out. 6241 if (!IsSCC && Src0.isImm() && (Src0.getImm() == -1) && Src1.isImm() && 6242 (Src1.getImm() == 0)) { 6243 MRI.replaceRegWith(Dest.getReg(), SCCSource); 6244 return; 6245 } 6246 6247 const TargetRegisterClass *TC = 6248 RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 6249 6250 Register CopySCC = MRI.createVirtualRegister(TC); 6251 6252 if (IsSCC) { 6253 // Now look for the closest SCC def if it is a copy 6254 // replacing the SCCSource with the COPY source register 6255 bool CopyFound = false; 6256 for (MachineInstr &CandI : 6257 make_range(std::next(MachineBasicBlock::reverse_iterator(Inst)), 6258 Inst.getParent()->rend())) { 6259 if (CandI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != 6260 -1) { 6261 if (CandI.isCopy() && CandI.getOperand(0).getReg() == AMDGPU::SCC) { 6262 BuildMI(MBB, MII, DL, get(AMDGPU::COPY), CopySCC) 6263 .addReg(CandI.getOperand(1).getReg()); 6264 CopyFound = true; 6265 } 6266 break; 6267 } 6268 } 6269 if (!CopyFound) { 6270 // SCC def is not a copy 6271 // Insert a trivial select instead of creating a copy, because a copy from 6272 // SCC would semantically mean just copying a single bit, but we may need 6273 // the result to be a vector condition mask that needs preserving. 6274 unsigned Opcode = (ST.getWavefrontSize() == 64) ? AMDGPU::S_CSELECT_B64 6275 : AMDGPU::S_CSELECT_B32; 6276 auto NewSelect = 6277 BuildMI(MBB, MII, DL, get(Opcode), CopySCC).addImm(-1).addImm(0); 6278 NewSelect->getOperand(3).setIsUndef(Cond.isUndef()); 6279 } 6280 } 6281 6282 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6283 6284 auto UpdatedInst = 6285 BuildMI(MBB, MII, DL, get(AMDGPU::V_CNDMASK_B32_e64), ResultReg) 6286 .addImm(0) 6287 .add(Src1) // False 6288 .addImm(0) 6289 .add(Src0) // True 6290 .addReg(IsSCC ? CopySCC : SCCSource); 6291 6292 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6293 legalizeOperands(*UpdatedInst, MDT); 6294 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6295 } 6296 6297 void SIInstrInfo::splitSelect64(SetVectorType &Worklist, MachineInstr &Inst, 6298 MachineDominatorTree *MDT) const { 6299 // Split S_CSELECT_B64 into a pair of S_CSELECT_B32 and lower them 6300 // further. 6301 const DebugLoc &DL = Inst.getDebugLoc(); 6302 MachineBasicBlock::iterator MII = Inst; 6303 MachineBasicBlock &MBB = *Inst.getParent(); 6304 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6305 6306 // Get the original operands. 6307 MachineOperand &Dest = Inst.getOperand(0); 6308 MachineOperand &Src0 = Inst.getOperand(1); 6309 MachineOperand &Src1 = Inst.getOperand(2); 6310 MachineOperand &Cond = Inst.getOperand(3); 6311 6312 Register SCCSource = Cond.getReg(); 6313 bool IsSCC = (SCCSource == AMDGPU::SCC); 6314 6315 // If this is a trivial select where the condition is effectively not SCC 6316 // (SCCSource is a source of copy to SCC), then the select is semantically 6317 // equivalent to copying SCCSource. Hence, there is no need to create 6318 // V_CNDMASK, we can just use that and bail out. 6319 if (!IsSCC && (Src0.isImm() && Src0.getImm() == -1) && 6320 (Src1.isImm() && Src1.getImm() == 0)) { 6321 MRI.replaceRegWith(Dest.getReg(), SCCSource); 6322 return; 6323 } 6324 6325 // Prepare the split destination. 6326 Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 6327 Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6328 Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6329 6330 // Split the source operands. 6331 const TargetRegisterClass *Src0RC = nullptr; 6332 const TargetRegisterClass *Src0SubRC = nullptr; 6333 if (Src0.isReg()) { 6334 Src0RC = MRI.getRegClass(Src0.getReg()); 6335 Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 6336 } 6337 const TargetRegisterClass *Src1RC = nullptr; 6338 const TargetRegisterClass *Src1SubRC = nullptr; 6339 if (Src1.isReg()) { 6340 Src1RC = MRI.getRegClass(Src1.getReg()); 6341 Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 6342 } 6343 // Split lo. 6344 MachineOperand SrcReg0Sub0 = 6345 buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC); 6346 MachineOperand SrcReg1Sub0 = 6347 buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, AMDGPU::sub0, Src1SubRC); 6348 // Split hi. 6349 MachineOperand SrcReg0Sub1 = 6350 buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC); 6351 MachineOperand SrcReg1Sub1 = 6352 buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, AMDGPU::sub1, Src1SubRC); 6353 // Select the lo part. 6354 MachineInstr *LoHalf = 6355 BuildMI(MBB, MII, DL, get(AMDGPU::S_CSELECT_B32), DestSub0) 6356 .add(SrcReg0Sub0) 6357 .add(SrcReg1Sub0); 6358 // Replace the condition operand with the original one. 6359 LoHalf->getOperand(3).setReg(SCCSource); 6360 Worklist.insert(LoHalf); 6361 // Select the hi part. 6362 MachineInstr *HiHalf = 6363 BuildMI(MBB, MII, DL, get(AMDGPU::S_CSELECT_B32), DestSub1) 6364 .add(SrcReg0Sub1) 6365 .add(SrcReg1Sub1); 6366 // Replace the condition operand with the original one. 6367 HiHalf->getOperand(3).setReg(SCCSource); 6368 Worklist.insert(HiHalf); 6369 // Merge them back to the original 64-bit one. 6370 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 6371 .addReg(DestSub0) 6372 .addImm(AMDGPU::sub0) 6373 .addReg(DestSub1) 6374 .addImm(AMDGPU::sub1); 6375 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 6376 6377 // Try to legalize the operands in case we need to swap the order to keep 6378 // it valid. 6379 legalizeOperands(*LoHalf, MDT); 6380 legalizeOperands(*HiHalf, MDT); 6381 6382 // Move all users of this moved value. 6383 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 6384 } 6385 6386 void SIInstrInfo::lowerScalarAbs(SetVectorType &Worklist, 6387 MachineInstr &Inst) const { 6388 MachineBasicBlock &MBB = *Inst.getParent(); 6389 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6390 MachineBasicBlock::iterator MII = Inst; 6391 DebugLoc DL = Inst.getDebugLoc(); 6392 6393 MachineOperand &Dest = Inst.getOperand(0); 6394 MachineOperand &Src = Inst.getOperand(1); 6395 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6396 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6397 6398 unsigned SubOp = ST.hasAddNoCarry() ? 6399 AMDGPU::V_SUB_U32_e32 : AMDGPU::V_SUB_CO_U32_e32; 6400 6401 BuildMI(MBB, MII, DL, get(SubOp), TmpReg) 6402 .addImm(0) 6403 .addReg(Src.getReg()); 6404 6405 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg) 6406 .addReg(Src.getReg()) 6407 .addReg(TmpReg); 6408 6409 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6410 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6411 } 6412 6413 void SIInstrInfo::lowerScalarXnor(SetVectorType &Worklist, 6414 MachineInstr &Inst) const { 6415 MachineBasicBlock &MBB = *Inst.getParent(); 6416 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6417 MachineBasicBlock::iterator MII = Inst; 6418 const DebugLoc &DL = Inst.getDebugLoc(); 6419 6420 MachineOperand &Dest = Inst.getOperand(0); 6421 MachineOperand &Src0 = Inst.getOperand(1); 6422 MachineOperand &Src1 = Inst.getOperand(2); 6423 6424 if (ST.hasDLInsts()) { 6425 Register NewDest = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6426 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src0, MRI, DL); 6427 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src1, MRI, DL); 6428 6429 BuildMI(MBB, MII, DL, get(AMDGPU::V_XNOR_B32_e64), NewDest) 6430 .add(Src0) 6431 .add(Src1); 6432 6433 MRI.replaceRegWith(Dest.getReg(), NewDest); 6434 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 6435 } else { 6436 // Using the identity !(x ^ y) == (!x ^ y) == (x ^ !y), we can 6437 // invert either source and then perform the XOR. If either source is a 6438 // scalar register, then we can leave the inversion on the scalar unit to 6439 // acheive a better distrubution of scalar and vector instructions. 6440 bool Src0IsSGPR = Src0.isReg() && 6441 RI.isSGPRClass(MRI.getRegClass(Src0.getReg())); 6442 bool Src1IsSGPR = Src1.isReg() && 6443 RI.isSGPRClass(MRI.getRegClass(Src1.getReg())); 6444 MachineInstr *Xor; 6445 Register Temp = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 6446 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 6447 6448 // Build a pair of scalar instructions and add them to the work list. 6449 // The next iteration over the work list will lower these to the vector 6450 // unit as necessary. 6451 if (Src0IsSGPR) { 6452 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src0); 6453 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 6454 .addReg(Temp) 6455 .add(Src1); 6456 } else if (Src1IsSGPR) { 6457 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src1); 6458 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 6459 .add(Src0) 6460 .addReg(Temp); 6461 } else { 6462 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), Temp) 6463 .add(Src0) 6464 .add(Src1); 6465 MachineInstr *Not = 6466 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest).addReg(Temp); 6467 Worklist.insert(Not); 6468 } 6469 6470 MRI.replaceRegWith(Dest.getReg(), NewDest); 6471 6472 Worklist.insert(Xor); 6473 6474 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 6475 } 6476 } 6477 6478 void SIInstrInfo::splitScalarNotBinop(SetVectorType &Worklist, 6479 MachineInstr &Inst, 6480 unsigned Opcode) const { 6481 MachineBasicBlock &MBB = *Inst.getParent(); 6482 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6483 MachineBasicBlock::iterator MII = Inst; 6484 const DebugLoc &DL = Inst.getDebugLoc(); 6485 6486 MachineOperand &Dest = Inst.getOperand(0); 6487 MachineOperand &Src0 = Inst.getOperand(1); 6488 MachineOperand &Src1 = Inst.getOperand(2); 6489 6490 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 6491 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 6492 6493 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), Interm) 6494 .add(Src0) 6495 .add(Src1); 6496 6497 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest) 6498 .addReg(Interm); 6499 6500 Worklist.insert(&Op); 6501 Worklist.insert(&Not); 6502 6503 MRI.replaceRegWith(Dest.getReg(), NewDest); 6504 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 6505 } 6506 6507 void SIInstrInfo::splitScalarBinOpN2(SetVectorType& Worklist, 6508 MachineInstr &Inst, 6509 unsigned Opcode) const { 6510 MachineBasicBlock &MBB = *Inst.getParent(); 6511 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6512 MachineBasicBlock::iterator MII = Inst; 6513 const DebugLoc &DL = Inst.getDebugLoc(); 6514 6515 MachineOperand &Dest = Inst.getOperand(0); 6516 MachineOperand &Src0 = Inst.getOperand(1); 6517 MachineOperand &Src1 = Inst.getOperand(2); 6518 6519 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 6520 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 6521 6522 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Interm) 6523 .add(Src1); 6524 6525 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), NewDest) 6526 .add(Src0) 6527 .addReg(Interm); 6528 6529 Worklist.insert(&Not); 6530 Worklist.insert(&Op); 6531 6532 MRI.replaceRegWith(Dest.getReg(), NewDest); 6533 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 6534 } 6535 6536 void SIInstrInfo::splitScalar64BitUnaryOp( 6537 SetVectorType &Worklist, MachineInstr &Inst, 6538 unsigned Opcode, bool Swap) const { 6539 MachineBasicBlock &MBB = *Inst.getParent(); 6540 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6541 6542 MachineOperand &Dest = Inst.getOperand(0); 6543 MachineOperand &Src0 = Inst.getOperand(1); 6544 DebugLoc DL = Inst.getDebugLoc(); 6545 6546 MachineBasicBlock::iterator MII = Inst; 6547 6548 const MCInstrDesc &InstDesc = get(Opcode); 6549 const TargetRegisterClass *Src0RC = Src0.isReg() ? 6550 MRI.getRegClass(Src0.getReg()) : 6551 &AMDGPU::SGPR_32RegClass; 6552 6553 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 6554 6555 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6556 AMDGPU::sub0, Src0SubRC); 6557 6558 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 6559 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 6560 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 6561 6562 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 6563 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0); 6564 6565 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6566 AMDGPU::sub1, Src0SubRC); 6567 6568 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 6569 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1); 6570 6571 if (Swap) 6572 std::swap(DestSub0, DestSub1); 6573 6574 Register FullDestReg = MRI.createVirtualRegister(NewDestRC); 6575 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 6576 .addReg(DestSub0) 6577 .addImm(AMDGPU::sub0) 6578 .addReg(DestSub1) 6579 .addImm(AMDGPU::sub1); 6580 6581 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 6582 6583 Worklist.insert(&LoHalf); 6584 Worklist.insert(&HiHalf); 6585 6586 // We don't need to legalizeOperands here because for a single operand, src0 6587 // will support any kind of input. 6588 6589 // Move all users of this moved value. 6590 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 6591 } 6592 6593 void SIInstrInfo::splitScalar64BitAddSub(SetVectorType &Worklist, 6594 MachineInstr &Inst, 6595 MachineDominatorTree *MDT) const { 6596 bool IsAdd = (Inst.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); 6597 6598 MachineBasicBlock &MBB = *Inst.getParent(); 6599 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6600 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 6601 6602 Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 6603 Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6604 Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6605 6606 Register CarryReg = MRI.createVirtualRegister(CarryRC); 6607 Register DeadCarryReg = MRI.createVirtualRegister(CarryRC); 6608 6609 MachineOperand &Dest = Inst.getOperand(0); 6610 MachineOperand &Src0 = Inst.getOperand(1); 6611 MachineOperand &Src1 = Inst.getOperand(2); 6612 const DebugLoc &DL = Inst.getDebugLoc(); 6613 MachineBasicBlock::iterator MII = Inst; 6614 6615 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg()); 6616 const TargetRegisterClass *Src1RC = MRI.getRegClass(Src1.getReg()); 6617 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 6618 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 6619 6620 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6621 AMDGPU::sub0, Src0SubRC); 6622 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 6623 AMDGPU::sub0, Src1SubRC); 6624 6625 6626 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6627 AMDGPU::sub1, Src0SubRC); 6628 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 6629 AMDGPU::sub1, Src1SubRC); 6630 6631 unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64; 6632 MachineInstr *LoHalf = 6633 BuildMI(MBB, MII, DL, get(LoOpc), DestSub0) 6634 .addReg(CarryReg, RegState::Define) 6635 .add(SrcReg0Sub0) 6636 .add(SrcReg1Sub0) 6637 .addImm(0); // clamp bit 6638 6639 unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; 6640 MachineInstr *HiHalf = 6641 BuildMI(MBB, MII, DL, get(HiOpc), DestSub1) 6642 .addReg(DeadCarryReg, RegState::Define | RegState::Dead) 6643 .add(SrcReg0Sub1) 6644 .add(SrcReg1Sub1) 6645 .addReg(CarryReg, RegState::Kill) 6646 .addImm(0); // clamp bit 6647 6648 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 6649 .addReg(DestSub0) 6650 .addImm(AMDGPU::sub0) 6651 .addReg(DestSub1) 6652 .addImm(AMDGPU::sub1); 6653 6654 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 6655 6656 // Try to legalize the operands in case we need to swap the order to keep it 6657 // valid. 6658 legalizeOperands(*LoHalf, MDT); 6659 legalizeOperands(*HiHalf, MDT); 6660 6661 // Move all users of this moved vlaue. 6662 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 6663 } 6664 6665 void SIInstrInfo::splitScalar64BitBinaryOp(SetVectorType &Worklist, 6666 MachineInstr &Inst, unsigned Opcode, 6667 MachineDominatorTree *MDT) const { 6668 MachineBasicBlock &MBB = *Inst.getParent(); 6669 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6670 6671 MachineOperand &Dest = Inst.getOperand(0); 6672 MachineOperand &Src0 = Inst.getOperand(1); 6673 MachineOperand &Src1 = Inst.getOperand(2); 6674 DebugLoc DL = Inst.getDebugLoc(); 6675 6676 MachineBasicBlock::iterator MII = Inst; 6677 6678 const MCInstrDesc &InstDesc = get(Opcode); 6679 const TargetRegisterClass *Src0RC = Src0.isReg() ? 6680 MRI.getRegClass(Src0.getReg()) : 6681 &AMDGPU::SGPR_32RegClass; 6682 6683 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 6684 const TargetRegisterClass *Src1RC = Src1.isReg() ? 6685 MRI.getRegClass(Src1.getReg()) : 6686 &AMDGPU::SGPR_32RegClass; 6687 6688 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 6689 6690 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6691 AMDGPU::sub0, Src0SubRC); 6692 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 6693 AMDGPU::sub0, Src1SubRC); 6694 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6695 AMDGPU::sub1, Src0SubRC); 6696 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 6697 AMDGPU::sub1, Src1SubRC); 6698 6699 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 6700 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 6701 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 6702 6703 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 6704 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0) 6705 .add(SrcReg0Sub0) 6706 .add(SrcReg1Sub0); 6707 6708 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 6709 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1) 6710 .add(SrcReg0Sub1) 6711 .add(SrcReg1Sub1); 6712 6713 Register FullDestReg = MRI.createVirtualRegister(NewDestRC); 6714 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 6715 .addReg(DestSub0) 6716 .addImm(AMDGPU::sub0) 6717 .addReg(DestSub1) 6718 .addImm(AMDGPU::sub1); 6719 6720 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 6721 6722 Worklist.insert(&LoHalf); 6723 Worklist.insert(&HiHalf); 6724 6725 // Move all users of this moved vlaue. 6726 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 6727 } 6728 6729 void SIInstrInfo::splitScalar64BitXnor(SetVectorType &Worklist, 6730 MachineInstr &Inst, 6731 MachineDominatorTree *MDT) const { 6732 MachineBasicBlock &MBB = *Inst.getParent(); 6733 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6734 6735 MachineOperand &Dest = Inst.getOperand(0); 6736 MachineOperand &Src0 = Inst.getOperand(1); 6737 MachineOperand &Src1 = Inst.getOperand(2); 6738 const DebugLoc &DL = Inst.getDebugLoc(); 6739 6740 MachineBasicBlock::iterator MII = Inst; 6741 6742 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 6743 6744 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 6745 6746 MachineOperand* Op0; 6747 MachineOperand* Op1; 6748 6749 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) { 6750 Op0 = &Src0; 6751 Op1 = &Src1; 6752 } else { 6753 Op0 = &Src1; 6754 Op1 = &Src0; 6755 } 6756 6757 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B64), Interm) 6758 .add(*Op0); 6759 6760 Register NewDest = MRI.createVirtualRegister(DestRC); 6761 6762 MachineInstr &Xor = *BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B64), NewDest) 6763 .addReg(Interm) 6764 .add(*Op1); 6765 6766 MRI.replaceRegWith(Dest.getReg(), NewDest); 6767 6768 Worklist.insert(&Xor); 6769 } 6770 6771 void SIInstrInfo::splitScalar64BitBCNT( 6772 SetVectorType &Worklist, MachineInstr &Inst) const { 6773 MachineBasicBlock &MBB = *Inst.getParent(); 6774 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6775 6776 MachineBasicBlock::iterator MII = Inst; 6777 const DebugLoc &DL = Inst.getDebugLoc(); 6778 6779 MachineOperand &Dest = Inst.getOperand(0); 6780 MachineOperand &Src = Inst.getOperand(1); 6781 6782 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64); 6783 const TargetRegisterClass *SrcRC = Src.isReg() ? 6784 MRI.getRegClass(Src.getReg()) : 6785 &AMDGPU::SGPR_32RegClass; 6786 6787 Register MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6788 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6789 6790 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); 6791 6792 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 6793 AMDGPU::sub0, SrcSubRC); 6794 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 6795 AMDGPU::sub1, SrcSubRC); 6796 6797 BuildMI(MBB, MII, DL, InstDesc, MidReg).add(SrcRegSub0).addImm(0); 6798 6799 BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg); 6800 6801 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6802 6803 // We don't need to legalize operands here. src0 for etiher instruction can be 6804 // an SGPR, and the second input is unused or determined here. 6805 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6806 } 6807 6808 void SIInstrInfo::splitScalar64BitBFE(SetVectorType &Worklist, 6809 MachineInstr &Inst) const { 6810 MachineBasicBlock &MBB = *Inst.getParent(); 6811 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6812 MachineBasicBlock::iterator MII = Inst; 6813 const DebugLoc &DL = Inst.getDebugLoc(); 6814 6815 MachineOperand &Dest = Inst.getOperand(0); 6816 uint32_t Imm = Inst.getOperand(2).getImm(); 6817 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 6818 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 6819 6820 (void) Offset; 6821 6822 // Only sext_inreg cases handled. 6823 assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 && 6824 Offset == 0 && "Not implemented"); 6825 6826 if (BitWidth < 32) { 6827 Register MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6828 Register MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6829 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 6830 6831 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32_e64), MidRegLo) 6832 .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0) 6833 .addImm(0) 6834 .addImm(BitWidth); 6835 6836 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi) 6837 .addImm(31) 6838 .addReg(MidRegLo); 6839 6840 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 6841 .addReg(MidRegLo) 6842 .addImm(AMDGPU::sub0) 6843 .addReg(MidRegHi) 6844 .addImm(AMDGPU::sub1); 6845 6846 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6847 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6848 return; 6849 } 6850 6851 MachineOperand &Src = Inst.getOperand(1); 6852 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6853 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 6854 6855 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg) 6856 .addImm(31) 6857 .addReg(Src.getReg(), 0, AMDGPU::sub0); 6858 6859 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 6860 .addReg(Src.getReg(), 0, AMDGPU::sub0) 6861 .addImm(AMDGPU::sub0) 6862 .addReg(TmpReg) 6863 .addImm(AMDGPU::sub1); 6864 6865 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6866 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6867 } 6868 6869 void SIInstrInfo::addUsersToMoveToVALUWorklist( 6870 Register DstReg, 6871 MachineRegisterInfo &MRI, 6872 SetVectorType &Worklist) const { 6873 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg), 6874 E = MRI.use_end(); I != E;) { 6875 MachineInstr &UseMI = *I->getParent(); 6876 6877 unsigned OpNo = 0; 6878 6879 switch (UseMI.getOpcode()) { 6880 case AMDGPU::COPY: 6881 case AMDGPU::WQM: 6882 case AMDGPU::SOFT_WQM: 6883 case AMDGPU::STRICT_WWM: 6884 case AMDGPU::STRICT_WQM: 6885 case AMDGPU::REG_SEQUENCE: 6886 case AMDGPU::PHI: 6887 case AMDGPU::INSERT_SUBREG: 6888 break; 6889 default: 6890 OpNo = I.getOperandNo(); 6891 break; 6892 } 6893 6894 if (!RI.hasVectorRegisters(getOpRegClass(UseMI, OpNo))) { 6895 Worklist.insert(&UseMI); 6896 6897 do { 6898 ++I; 6899 } while (I != E && I->getParent() == &UseMI); 6900 } else { 6901 ++I; 6902 } 6903 } 6904 } 6905 6906 void SIInstrInfo::movePackToVALU(SetVectorType &Worklist, 6907 MachineRegisterInfo &MRI, 6908 MachineInstr &Inst) const { 6909 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6910 MachineBasicBlock *MBB = Inst.getParent(); 6911 MachineOperand &Src0 = Inst.getOperand(1); 6912 MachineOperand &Src1 = Inst.getOperand(2); 6913 const DebugLoc &DL = Inst.getDebugLoc(); 6914 6915 switch (Inst.getOpcode()) { 6916 case AMDGPU::S_PACK_LL_B32_B16: { 6917 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6918 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6919 6920 // FIXME: Can do a lot better if we know the high bits of src0 or src1 are 6921 // 0. 6922 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 6923 .addImm(0xffff); 6924 6925 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_B32_e64), TmpReg) 6926 .addReg(ImmReg, RegState::Kill) 6927 .add(Src0); 6928 6929 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHL_OR_B32_e64), ResultReg) 6930 .add(Src1) 6931 .addImm(16) 6932 .addReg(TmpReg, RegState::Kill); 6933 break; 6934 } 6935 case AMDGPU::S_PACK_LH_B32_B16: { 6936 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6937 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 6938 .addImm(0xffff); 6939 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_BFI_B32_e64), ResultReg) 6940 .addReg(ImmReg, RegState::Kill) 6941 .add(Src0) 6942 .add(Src1); 6943 break; 6944 } 6945 case AMDGPU::S_PACK_HH_B32_B16: { 6946 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6947 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6948 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg) 6949 .addImm(16) 6950 .add(Src0); 6951 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 6952 .addImm(0xffff0000); 6953 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_OR_B32_e64), ResultReg) 6954 .add(Src1) 6955 .addReg(ImmReg, RegState::Kill) 6956 .addReg(TmpReg, RegState::Kill); 6957 break; 6958 } 6959 default: 6960 llvm_unreachable("unhandled s_pack_* instruction"); 6961 } 6962 6963 MachineOperand &Dest = Inst.getOperand(0); 6964 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6965 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6966 } 6967 6968 void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op, 6969 MachineInstr &SCCDefInst, 6970 SetVectorType &Worklist, 6971 Register NewCond) const { 6972 6973 // Ensure that def inst defines SCC, which is still live. 6974 assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isDef() && 6975 !Op.isDead() && Op.getParent() == &SCCDefInst); 6976 SmallVector<MachineInstr *, 4> CopyToDelete; 6977 // This assumes that all the users of SCC are in the same block 6978 // as the SCC def. 6979 for (MachineInstr &MI : // Skip the def inst itself. 6980 make_range(std::next(MachineBasicBlock::iterator(SCCDefInst)), 6981 SCCDefInst.getParent()->end())) { 6982 // Check if SCC is used first. 6983 int SCCIdx = MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI); 6984 if (SCCIdx != -1) { 6985 if (MI.isCopy()) { 6986 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 6987 Register DestReg = MI.getOperand(0).getReg(); 6988 6989 MRI.replaceRegWith(DestReg, NewCond); 6990 CopyToDelete.push_back(&MI); 6991 } else { 6992 6993 if (NewCond.isValid()) 6994 MI.getOperand(SCCIdx).setReg(NewCond); 6995 6996 Worklist.insert(&MI); 6997 } 6998 } 6999 // Exit if we find another SCC def. 7000 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != -1) 7001 break; 7002 } 7003 for (auto &Copy : CopyToDelete) 7004 Copy->eraseFromParent(); 7005 } 7006 7007 // Instructions that use SCC may be converted to VALU instructions. When that 7008 // happens, the SCC register is changed to VCC_LO. The instruction that defines 7009 // SCC must be changed to an instruction that defines VCC. This function makes 7010 // sure that the instruction that defines SCC is added to the moveToVALU 7011 // worklist. 7012 void SIInstrInfo::addSCCDefsToVALUWorklist(MachineOperand &Op, 7013 SetVectorType &Worklist) const { 7014 assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isUse()); 7015 7016 MachineInstr *SCCUseInst = Op.getParent(); 7017 // Look for a preceeding instruction that either defines VCC or SCC. If VCC 7018 // then there is nothing to do because the defining instruction has been 7019 // converted to a VALU already. If SCC then that instruction needs to be 7020 // converted to a VALU. 7021 for (MachineInstr &MI : 7022 make_range(std::next(MachineBasicBlock::reverse_iterator(SCCUseInst)), 7023 SCCUseInst->getParent()->rend())) { 7024 if (MI.modifiesRegister(AMDGPU::VCC, &RI)) 7025 break; 7026 if (MI.definesRegister(AMDGPU::SCC, &RI)) { 7027 Worklist.insert(&MI); 7028 break; 7029 } 7030 } 7031 } 7032 7033 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass( 7034 const MachineInstr &Inst) const { 7035 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0); 7036 7037 switch (Inst.getOpcode()) { 7038 // For target instructions, getOpRegClass just returns the virtual register 7039 // class associated with the operand, so we need to find an equivalent VGPR 7040 // register class in order to move the instruction to the VALU. 7041 case AMDGPU::COPY: 7042 case AMDGPU::PHI: 7043 case AMDGPU::REG_SEQUENCE: 7044 case AMDGPU::INSERT_SUBREG: 7045 case AMDGPU::WQM: 7046 case AMDGPU::SOFT_WQM: 7047 case AMDGPU::STRICT_WWM: 7048 case AMDGPU::STRICT_WQM: { 7049 const TargetRegisterClass *SrcRC = getOpRegClass(Inst, 1); 7050 if (RI.hasAGPRs(SrcRC)) { 7051 if (RI.hasAGPRs(NewDstRC)) 7052 return nullptr; 7053 7054 switch (Inst.getOpcode()) { 7055 case AMDGPU::PHI: 7056 case AMDGPU::REG_SEQUENCE: 7057 case AMDGPU::INSERT_SUBREG: 7058 NewDstRC = RI.getEquivalentAGPRClass(NewDstRC); 7059 break; 7060 default: 7061 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 7062 } 7063 7064 if (!NewDstRC) 7065 return nullptr; 7066 } else { 7067 if (RI.hasVGPRs(NewDstRC) || NewDstRC == &AMDGPU::VReg_1RegClass) 7068 return nullptr; 7069 7070 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 7071 if (!NewDstRC) 7072 return nullptr; 7073 } 7074 7075 return NewDstRC; 7076 } 7077 default: 7078 return NewDstRC; 7079 } 7080 } 7081 7082 // Find the one SGPR operand we are allowed to use. 7083 Register SIInstrInfo::findUsedSGPR(const MachineInstr &MI, 7084 int OpIndices[3]) const { 7085 const MCInstrDesc &Desc = MI.getDesc(); 7086 7087 // Find the one SGPR operand we are allowed to use. 7088 // 7089 // First we need to consider the instruction's operand requirements before 7090 // legalizing. Some operands are required to be SGPRs, such as implicit uses 7091 // of VCC, but we are still bound by the constant bus requirement to only use 7092 // one. 7093 // 7094 // If the operand's class is an SGPR, we can never move it. 7095 7096 Register SGPRReg = findImplicitSGPRRead(MI); 7097 if (SGPRReg != AMDGPU::NoRegister) 7098 return SGPRReg; 7099 7100 Register UsedSGPRs[3] = { AMDGPU::NoRegister }; 7101 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 7102 7103 for (unsigned i = 0; i < 3; ++i) { 7104 int Idx = OpIndices[i]; 7105 if (Idx == -1) 7106 break; 7107 7108 const MachineOperand &MO = MI.getOperand(Idx); 7109 if (!MO.isReg()) 7110 continue; 7111 7112 // Is this operand statically required to be an SGPR based on the operand 7113 // constraints? 7114 const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); 7115 bool IsRequiredSGPR = RI.isSGPRClass(OpRC); 7116 if (IsRequiredSGPR) 7117 return MO.getReg(); 7118 7119 // If this could be a VGPR or an SGPR, Check the dynamic register class. 7120 Register Reg = MO.getReg(); 7121 const TargetRegisterClass *RegRC = MRI.getRegClass(Reg); 7122 if (RI.isSGPRClass(RegRC)) 7123 UsedSGPRs[i] = Reg; 7124 } 7125 7126 // We don't have a required SGPR operand, so we have a bit more freedom in 7127 // selecting operands to move. 7128 7129 // Try to select the most used SGPR. If an SGPR is equal to one of the 7130 // others, we choose that. 7131 // 7132 // e.g. 7133 // V_FMA_F32 v0, s0, s0, s0 -> No moves 7134 // V_FMA_F32 v0, s0, s1, s0 -> Move s1 7135 7136 // TODO: If some of the operands are 64-bit SGPRs and some 32, we should 7137 // prefer those. 7138 7139 if (UsedSGPRs[0] != AMDGPU::NoRegister) { 7140 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2]) 7141 SGPRReg = UsedSGPRs[0]; 7142 } 7143 7144 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) { 7145 if (UsedSGPRs[1] == UsedSGPRs[2]) 7146 SGPRReg = UsedSGPRs[1]; 7147 } 7148 7149 return SGPRReg; 7150 } 7151 7152 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI, 7153 unsigned OperandName) const { 7154 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName); 7155 if (Idx == -1) 7156 return nullptr; 7157 7158 return &MI.getOperand(Idx); 7159 } 7160 7161 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const { 7162 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 7163 return (AMDGPU::MTBUFFormat::UFMT_32_FLOAT << 44) | 7164 (1ULL << 56) | // RESOURCE_LEVEL = 1 7165 (3ULL << 60); // OOB_SELECT = 3 7166 } 7167 7168 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT; 7169 if (ST.isAmdHsaOS()) { 7170 // Set ATC = 1. GFX9 doesn't have this bit. 7171 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) 7172 RsrcDataFormat |= (1ULL << 56); 7173 7174 // Set MTYPE = 2 (MTYPE_UC = uncached). GFX9 doesn't have this. 7175 // BTW, it disables TC L2 and therefore decreases performance. 7176 if (ST.getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS) 7177 RsrcDataFormat |= (2ULL << 59); 7178 } 7179 7180 return RsrcDataFormat; 7181 } 7182 7183 uint64_t SIInstrInfo::getScratchRsrcWords23() const { 7184 uint64_t Rsrc23 = getDefaultRsrcDataFormat() | 7185 AMDGPU::RSRC_TID_ENABLE | 7186 0xffffffff; // Size; 7187 7188 // GFX9 doesn't have ELEMENT_SIZE. 7189 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 7190 uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize(true)) - 1; 7191 Rsrc23 |= EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT; 7192 } 7193 7194 // IndexStride = 64 / 32. 7195 uint64_t IndexStride = ST.getWavefrontSize() == 64 ? 3 : 2; 7196 Rsrc23 |= IndexStride << AMDGPU::RSRC_INDEX_STRIDE_SHIFT; 7197 7198 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17]. 7199 // Clear them unless we want a huge stride. 7200 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS && 7201 ST.getGeneration() <= AMDGPUSubtarget::GFX9) 7202 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT; 7203 7204 return Rsrc23; 7205 } 7206 7207 bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const { 7208 unsigned Opc = MI.getOpcode(); 7209 7210 return isSMRD(Opc); 7211 } 7212 7213 bool SIInstrInfo::isHighLatencyDef(int Opc) const { 7214 return get(Opc).mayLoad() && 7215 (isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc) || isFLAT(Opc)); 7216 } 7217 7218 unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI, 7219 int &FrameIndex) const { 7220 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 7221 if (!Addr || !Addr->isFI()) 7222 return AMDGPU::NoRegister; 7223 7224 assert(!MI.memoperands_empty() && 7225 (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS); 7226 7227 FrameIndex = Addr->getIndex(); 7228 return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg(); 7229 } 7230 7231 unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI, 7232 int &FrameIndex) const { 7233 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr); 7234 assert(Addr && Addr->isFI()); 7235 FrameIndex = Addr->getIndex(); 7236 return getNamedOperand(MI, AMDGPU::OpName::data)->getReg(); 7237 } 7238 7239 unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 7240 int &FrameIndex) const { 7241 if (!MI.mayLoad()) 7242 return AMDGPU::NoRegister; 7243 7244 if (isMUBUF(MI) || isVGPRSpill(MI)) 7245 return isStackAccess(MI, FrameIndex); 7246 7247 if (isSGPRSpill(MI)) 7248 return isSGPRStackAccess(MI, FrameIndex); 7249 7250 return AMDGPU::NoRegister; 7251 } 7252 7253 unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 7254 int &FrameIndex) const { 7255 if (!MI.mayStore()) 7256 return AMDGPU::NoRegister; 7257 7258 if (isMUBUF(MI) || isVGPRSpill(MI)) 7259 return isStackAccess(MI, FrameIndex); 7260 7261 if (isSGPRSpill(MI)) 7262 return isSGPRStackAccess(MI, FrameIndex); 7263 7264 return AMDGPU::NoRegister; 7265 } 7266 7267 unsigned SIInstrInfo::getInstBundleSize(const MachineInstr &MI) const { 7268 unsigned Size = 0; 7269 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 7270 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 7271 while (++I != E && I->isInsideBundle()) { 7272 assert(!I->isBundle() && "No nested bundle!"); 7273 Size += getInstSizeInBytes(*I); 7274 } 7275 7276 return Size; 7277 } 7278 7279 unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { 7280 unsigned Opc = MI.getOpcode(); 7281 const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc); 7282 unsigned DescSize = Desc.getSize(); 7283 7284 // If we have a definitive size, we can use it. Otherwise we need to inspect 7285 // the operands to know the size. 7286 if (isFixedSize(MI)) { 7287 unsigned Size = DescSize; 7288 7289 // If we hit the buggy offset, an extra nop will be inserted in MC so 7290 // estimate the worst case. 7291 if (MI.isBranch() && ST.hasOffset3fBug()) 7292 Size += 4; 7293 7294 return Size; 7295 } 7296 7297 // 4-byte instructions may have a 32-bit literal encoded after them. Check 7298 // operands that coud ever be literals. 7299 if (isVALU(MI) || isSALU(MI)) { 7300 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 7301 if (Src0Idx == -1) 7302 return DescSize; // No operands. 7303 7304 if (isLiteralConstantLike(MI.getOperand(Src0Idx), Desc.OpInfo[Src0Idx])) 7305 return isVOP3(MI) ? 12 : (DescSize + 4); 7306 7307 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 7308 if (Src1Idx == -1) 7309 return DescSize; 7310 7311 if (isLiteralConstantLike(MI.getOperand(Src1Idx), Desc.OpInfo[Src1Idx])) 7312 return isVOP3(MI) ? 12 : (DescSize + 4); 7313 7314 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 7315 if (Src2Idx == -1) 7316 return DescSize; 7317 7318 if (isLiteralConstantLike(MI.getOperand(Src2Idx), Desc.OpInfo[Src2Idx])) 7319 return isVOP3(MI) ? 12 : (DescSize + 4); 7320 7321 return DescSize; 7322 } 7323 7324 // Check whether we have extra NSA words. 7325 if (isMIMG(MI)) { 7326 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); 7327 if (VAddr0Idx < 0) 7328 return 8; 7329 7330 int RSrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); 7331 return 8 + 4 * ((RSrcIdx - VAddr0Idx + 2) / 4); 7332 } 7333 7334 switch (Opc) { 7335 case TargetOpcode::BUNDLE: 7336 return getInstBundleSize(MI); 7337 case TargetOpcode::INLINEASM: 7338 case TargetOpcode::INLINEASM_BR: { 7339 const MachineFunction *MF = MI.getParent()->getParent(); 7340 const char *AsmStr = MI.getOperand(0).getSymbolName(); 7341 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo(), &ST); 7342 } 7343 default: 7344 if (MI.isMetaInstruction()) 7345 return 0; 7346 return DescSize; 7347 } 7348 } 7349 7350 bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const { 7351 if (!isFLAT(MI)) 7352 return false; 7353 7354 if (MI.memoperands_empty()) 7355 return true; 7356 7357 for (const MachineMemOperand *MMO : MI.memoperands()) { 7358 if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS) 7359 return true; 7360 } 7361 return false; 7362 } 7363 7364 bool SIInstrInfo::isNonUniformBranchInstr(MachineInstr &Branch) const { 7365 return Branch.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO; 7366 } 7367 7368 void SIInstrInfo::convertNonUniformIfRegion(MachineBasicBlock *IfEntry, 7369 MachineBasicBlock *IfEnd) const { 7370 MachineBasicBlock::iterator TI = IfEntry->getFirstTerminator(); 7371 assert(TI != IfEntry->end()); 7372 7373 MachineInstr *Branch = &(*TI); 7374 MachineFunction *MF = IfEntry->getParent(); 7375 MachineRegisterInfo &MRI = IfEntry->getParent()->getRegInfo(); 7376 7377 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 7378 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 7379 MachineInstr *SIIF = 7380 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_IF), DstReg) 7381 .add(Branch->getOperand(0)) 7382 .add(Branch->getOperand(1)); 7383 MachineInstr *SIEND = 7384 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_END_CF)) 7385 .addReg(DstReg); 7386 7387 IfEntry->erase(TI); 7388 IfEntry->insert(IfEntry->end(), SIIF); 7389 IfEnd->insert(IfEnd->getFirstNonPHI(), SIEND); 7390 } 7391 } 7392 7393 void SIInstrInfo::convertNonUniformLoopRegion( 7394 MachineBasicBlock *LoopEntry, MachineBasicBlock *LoopEnd) const { 7395 MachineBasicBlock::iterator TI = LoopEnd->getFirstTerminator(); 7396 // We expect 2 terminators, one conditional and one unconditional. 7397 assert(TI != LoopEnd->end()); 7398 7399 MachineInstr *Branch = &(*TI); 7400 MachineFunction *MF = LoopEnd->getParent(); 7401 MachineRegisterInfo &MRI = LoopEnd->getParent()->getRegInfo(); 7402 7403 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 7404 7405 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 7406 Register BackEdgeReg = MRI.createVirtualRegister(RI.getBoolRC()); 7407 MachineInstrBuilder HeaderPHIBuilder = 7408 BuildMI(*(MF), Branch->getDebugLoc(), get(TargetOpcode::PHI), DstReg); 7409 for (MachineBasicBlock::pred_iterator PI = LoopEntry->pred_begin(), 7410 E = LoopEntry->pred_end(); 7411 PI != E; ++PI) { 7412 if (*PI == LoopEnd) { 7413 HeaderPHIBuilder.addReg(BackEdgeReg); 7414 } else { 7415 MachineBasicBlock *PMBB = *PI; 7416 Register ZeroReg = MRI.createVirtualRegister(RI.getBoolRC()); 7417 materializeImmediate(*PMBB, PMBB->getFirstTerminator(), DebugLoc(), 7418 ZeroReg, 0); 7419 HeaderPHIBuilder.addReg(ZeroReg); 7420 } 7421 HeaderPHIBuilder.addMBB(*PI); 7422 } 7423 MachineInstr *HeaderPhi = HeaderPHIBuilder; 7424 MachineInstr *SIIFBREAK = BuildMI(*(MF), Branch->getDebugLoc(), 7425 get(AMDGPU::SI_IF_BREAK), BackEdgeReg) 7426 .addReg(DstReg) 7427 .add(Branch->getOperand(0)); 7428 MachineInstr *SILOOP = 7429 BuildMI(*(MF), Branch->getDebugLoc(), get(AMDGPU::SI_LOOP)) 7430 .addReg(BackEdgeReg) 7431 .addMBB(LoopEntry); 7432 7433 LoopEntry->insert(LoopEntry->begin(), HeaderPhi); 7434 LoopEnd->erase(TI); 7435 LoopEnd->insert(LoopEnd->end(), SIIFBREAK); 7436 LoopEnd->insert(LoopEnd->end(), SILOOP); 7437 } 7438 } 7439 7440 ArrayRef<std::pair<int, const char *>> 7441 SIInstrInfo::getSerializableTargetIndices() const { 7442 static const std::pair<int, const char *> TargetIndices[] = { 7443 {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"}, 7444 {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"}, 7445 {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"}, 7446 {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"}, 7447 {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}}; 7448 return makeArrayRef(TargetIndices); 7449 } 7450 7451 /// This is used by the post-RA scheduler (SchedulePostRAList.cpp). The 7452 /// post-RA version of misched uses CreateTargetMIHazardRecognizer. 7453 ScheduleHazardRecognizer * 7454 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 7455 const ScheduleDAG *DAG) const { 7456 return new GCNHazardRecognizer(DAG->MF); 7457 } 7458 7459 /// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer 7460 /// pass. 7461 ScheduleHazardRecognizer * 7462 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const { 7463 return new GCNHazardRecognizer(MF); 7464 } 7465 7466 // Called during: 7467 // - pre-RA scheduling and post-RA scheduling 7468 ScheduleHazardRecognizer * 7469 SIInstrInfo::CreateTargetMIHazardRecognizer(const InstrItineraryData *II, 7470 const ScheduleDAGMI *DAG) const { 7471 // Borrowed from Arm Target 7472 // We would like to restrict this hazard recognizer to only 7473 // post-RA scheduling; we can tell that we're post-RA because we don't 7474 // track VRegLiveness. 7475 if (!DAG->hasVRegLiveness()) 7476 return new GCNHazardRecognizer(DAG->MF); 7477 return TargetInstrInfo::CreateTargetMIHazardRecognizer(II, DAG); 7478 } 7479 7480 std::pair<unsigned, unsigned> 7481 SIInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { 7482 return std::make_pair(TF & MO_MASK, TF & ~MO_MASK); 7483 } 7484 7485 ArrayRef<std::pair<unsigned, const char *>> 7486 SIInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { 7487 static const std::pair<unsigned, const char *> TargetFlags[] = { 7488 { MO_GOTPCREL, "amdgpu-gotprel" }, 7489 { MO_GOTPCREL32_LO, "amdgpu-gotprel32-lo" }, 7490 { MO_GOTPCREL32_HI, "amdgpu-gotprel32-hi" }, 7491 { MO_REL32_LO, "amdgpu-rel32-lo" }, 7492 { MO_REL32_HI, "amdgpu-rel32-hi" }, 7493 { MO_ABS32_LO, "amdgpu-abs32-lo" }, 7494 { MO_ABS32_HI, "amdgpu-abs32-hi" }, 7495 }; 7496 7497 return makeArrayRef(TargetFlags); 7498 } 7499 7500 bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const { 7501 return !MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY && 7502 MI.modifiesRegister(AMDGPU::EXEC, &RI); 7503 } 7504 7505 MachineInstrBuilder 7506 SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 7507 MachineBasicBlock::iterator I, 7508 const DebugLoc &DL, 7509 Register DestReg) const { 7510 if (ST.hasAddNoCarry()) 7511 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg); 7512 7513 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 7514 Register UnusedCarry = MRI.createVirtualRegister(RI.getBoolRC()); 7515 MRI.setRegAllocationHint(UnusedCarry, 0, RI.getVCC()); 7516 7517 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_CO_U32_e64), DestReg) 7518 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 7519 } 7520 7521 MachineInstrBuilder SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 7522 MachineBasicBlock::iterator I, 7523 const DebugLoc &DL, 7524 Register DestReg, 7525 RegScavenger &RS) const { 7526 if (ST.hasAddNoCarry()) 7527 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e32), DestReg); 7528 7529 // If available, prefer to use vcc. 7530 Register UnusedCarry = !RS.isRegUsed(AMDGPU::VCC) 7531 ? Register(RI.getVCC()) 7532 : RS.scavengeRegister(RI.getBoolRC(), I, 0, false); 7533 7534 // TODO: Users need to deal with this. 7535 if (!UnusedCarry.isValid()) 7536 return MachineInstrBuilder(); 7537 7538 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_CO_U32_e64), DestReg) 7539 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 7540 } 7541 7542 bool SIInstrInfo::isKillTerminator(unsigned Opcode) { 7543 switch (Opcode) { 7544 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 7545 case AMDGPU::SI_KILL_I1_TERMINATOR: 7546 return true; 7547 default: 7548 return false; 7549 } 7550 } 7551 7552 const MCInstrDesc &SIInstrInfo::getKillTerminatorFromPseudo(unsigned Opcode) const { 7553 switch (Opcode) { 7554 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: 7555 return get(AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR); 7556 case AMDGPU::SI_KILL_I1_PSEUDO: 7557 return get(AMDGPU::SI_KILL_I1_TERMINATOR); 7558 default: 7559 llvm_unreachable("invalid opcode, expected SI_KILL_*_PSEUDO"); 7560 } 7561 } 7562 7563 void SIInstrInfo::fixImplicitOperands(MachineInstr &MI) const { 7564 if (!ST.isWave32()) 7565 return; 7566 7567 for (auto &Op : MI.implicit_operands()) { 7568 if (Op.isReg() && Op.getReg() == AMDGPU::VCC) 7569 Op.setReg(AMDGPU::VCC_LO); 7570 } 7571 } 7572 7573 bool SIInstrInfo::isBufferSMRD(const MachineInstr &MI) const { 7574 if (!isSMRD(MI)) 7575 return false; 7576 7577 // Check that it is using a buffer resource. 7578 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sbase); 7579 if (Idx == -1) // e.g. s_memtime 7580 return false; 7581 7582 const auto RCID = MI.getDesc().OpInfo[Idx].RegClass; 7583 return RI.getRegClass(RCID)->hasSubClassEq(&AMDGPU::SGPR_128RegClass); 7584 } 7585 7586 // Depending on the used address space and instructions, some immediate offsets 7587 // are allowed and some are not. 7588 // In general, flat instruction offsets can only be non-negative, global and 7589 // scratch instruction offsets can also be negative. 7590 // 7591 // There are several bugs related to these offsets: 7592 // On gfx10.1, flat instructions that go into the global address space cannot 7593 // use an offset. 7594 // 7595 // For scratch instructions, the address can be either an SGPR or a VGPR. 7596 // The following offsets can be used, depending on the architecture (x means 7597 // cannot be used): 7598 // +----------------------------+------+------+ 7599 // | Address-Mode | SGPR | VGPR | 7600 // +----------------------------+------+------+ 7601 // | gfx9 | | | 7602 // | negative, 4-aligned offset | x | ok | 7603 // | negative, unaligned offset | x | ok | 7604 // +----------------------------+------+------+ 7605 // | gfx10 | | | 7606 // | negative, 4-aligned offset | ok | ok | 7607 // | negative, unaligned offset | ok | x | 7608 // +----------------------------+------+------+ 7609 // | gfx10.3 | | | 7610 // | negative, 4-aligned offset | ok | ok | 7611 // | negative, unaligned offset | ok | ok | 7612 // +----------------------------+------+------+ 7613 // 7614 // This function ignores the addressing mode, so if an offset cannot be used in 7615 // one addressing mode, it is considered illegal. 7616 bool SIInstrInfo::isLegalFLATOffset(int64_t Offset, unsigned AddrSpace, 7617 uint64_t FlatVariant) const { 7618 // TODO: Should 0 be special cased? 7619 if (!ST.hasFlatInstOffsets()) 7620 return false; 7621 7622 if (ST.hasFlatSegmentOffsetBug() && FlatVariant == SIInstrFlags::FLAT && 7623 (AddrSpace == AMDGPUAS::FLAT_ADDRESS || 7624 AddrSpace == AMDGPUAS::GLOBAL_ADDRESS)) 7625 return false; 7626 7627 bool Signed = FlatVariant != SIInstrFlags::FLAT; 7628 if (ST.hasNegativeScratchOffsetBug() && 7629 FlatVariant == SIInstrFlags::FlatScratch) 7630 Signed = false; 7631 if (ST.hasNegativeUnalignedScratchOffsetBug() && 7632 FlatVariant == SIInstrFlags::FlatScratch && Offset < 0 && 7633 (Offset % 4) != 0) { 7634 return false; 7635 } 7636 7637 unsigned N = AMDGPU::getNumFlatOffsetBits(ST, Signed); 7638 return Signed ? isIntN(N, Offset) : isUIntN(N, Offset); 7639 } 7640 7641 // See comment on SIInstrInfo::isLegalFLATOffset for what is legal and what not. 7642 std::pair<int64_t, int64_t> 7643 SIInstrInfo::splitFlatOffset(int64_t COffsetVal, unsigned AddrSpace, 7644 uint64_t FlatVariant) const { 7645 int64_t RemainderOffset = COffsetVal; 7646 int64_t ImmField = 0; 7647 bool Signed = FlatVariant != SIInstrFlags::FLAT; 7648 if (ST.hasNegativeScratchOffsetBug() && 7649 FlatVariant == SIInstrFlags::FlatScratch) 7650 Signed = false; 7651 7652 const unsigned NumBits = AMDGPU::getNumFlatOffsetBits(ST, Signed); 7653 if (Signed) { 7654 // Use signed division by a power of two to truncate towards 0. 7655 int64_t D = 1LL << (NumBits - 1); 7656 RemainderOffset = (COffsetVal / D) * D; 7657 ImmField = COffsetVal - RemainderOffset; 7658 7659 if (ST.hasNegativeUnalignedScratchOffsetBug() && 7660 FlatVariant == SIInstrFlags::FlatScratch && ImmField < 0 && 7661 (ImmField % 4) != 0) { 7662 // Make ImmField a multiple of 4 7663 RemainderOffset += ImmField % 4; 7664 ImmField -= ImmField % 4; 7665 } 7666 } else if (COffsetVal >= 0) { 7667 ImmField = COffsetVal & maskTrailingOnes<uint64_t>(NumBits); 7668 RemainderOffset = COffsetVal - ImmField; 7669 } 7670 7671 assert(isLegalFLATOffset(ImmField, AddrSpace, FlatVariant)); 7672 assert(RemainderOffset + ImmField == COffsetVal); 7673 return {ImmField, RemainderOffset}; 7674 } 7675 7676 // This must be kept in sync with the SIEncodingFamily class in SIInstrInfo.td 7677 enum SIEncodingFamily { 7678 SI = 0, 7679 VI = 1, 7680 SDWA = 2, 7681 SDWA9 = 3, 7682 GFX80 = 4, 7683 GFX9 = 5, 7684 GFX10 = 6, 7685 SDWA10 = 7, 7686 GFX90A = 8 7687 }; 7688 7689 static SIEncodingFamily subtargetEncodingFamily(const GCNSubtarget &ST) { 7690 switch (ST.getGeneration()) { 7691 default: 7692 break; 7693 case AMDGPUSubtarget::SOUTHERN_ISLANDS: 7694 case AMDGPUSubtarget::SEA_ISLANDS: 7695 return SIEncodingFamily::SI; 7696 case AMDGPUSubtarget::VOLCANIC_ISLANDS: 7697 case AMDGPUSubtarget::GFX9: 7698 return SIEncodingFamily::VI; 7699 case AMDGPUSubtarget::GFX10: 7700 return SIEncodingFamily::GFX10; 7701 } 7702 llvm_unreachable("Unknown subtarget generation!"); 7703 } 7704 7705 bool SIInstrInfo::isAsmOnlyOpcode(int MCOp) const { 7706 switch(MCOp) { 7707 // These opcodes use indirect register addressing so 7708 // they need special handling by codegen (currently missing). 7709 // Therefore it is too risky to allow these opcodes 7710 // to be selected by dpp combiner or sdwa peepholer. 7711 case AMDGPU::V_MOVRELS_B32_dpp_gfx10: 7712 case AMDGPU::V_MOVRELS_B32_sdwa_gfx10: 7713 case AMDGPU::V_MOVRELD_B32_dpp_gfx10: 7714 case AMDGPU::V_MOVRELD_B32_sdwa_gfx10: 7715 case AMDGPU::V_MOVRELSD_B32_dpp_gfx10: 7716 case AMDGPU::V_MOVRELSD_B32_sdwa_gfx10: 7717 case AMDGPU::V_MOVRELSD_2_B32_dpp_gfx10: 7718 case AMDGPU::V_MOVRELSD_2_B32_sdwa_gfx10: 7719 return true; 7720 default: 7721 return false; 7722 } 7723 } 7724 7725 int SIInstrInfo::pseudoToMCOpcode(int Opcode) const { 7726 SIEncodingFamily Gen = subtargetEncodingFamily(ST); 7727 7728 if ((get(Opcode).TSFlags & SIInstrFlags::renamedInGFX9) != 0 && 7729 ST.getGeneration() == AMDGPUSubtarget::GFX9) 7730 Gen = SIEncodingFamily::GFX9; 7731 7732 // Adjust the encoding family to GFX80 for D16 buffer instructions when the 7733 // subtarget has UnpackedD16VMem feature. 7734 // TODO: remove this when we discard GFX80 encoding. 7735 if (ST.hasUnpackedD16VMem() && (get(Opcode).TSFlags & SIInstrFlags::D16Buf)) 7736 Gen = SIEncodingFamily::GFX80; 7737 7738 if (get(Opcode).TSFlags & SIInstrFlags::SDWA) { 7739 switch (ST.getGeneration()) { 7740 default: 7741 Gen = SIEncodingFamily::SDWA; 7742 break; 7743 case AMDGPUSubtarget::GFX9: 7744 Gen = SIEncodingFamily::SDWA9; 7745 break; 7746 case AMDGPUSubtarget::GFX10: 7747 Gen = SIEncodingFamily::SDWA10; 7748 break; 7749 } 7750 } 7751 7752 int MCOp = AMDGPU::getMCOpcode(Opcode, Gen); 7753 7754 // -1 means that Opcode is already a native instruction. 7755 if (MCOp == -1) 7756 return Opcode; 7757 7758 if (ST.hasGFX90AInsts()) { 7759 uint16_t NMCOp = (uint16_t)-1; 7760 NMCOp = AMDGPU::getMCOpcode(Opcode, SIEncodingFamily::GFX90A); 7761 if (NMCOp == (uint16_t)-1) 7762 NMCOp = AMDGPU::getMCOpcode(Opcode, SIEncodingFamily::GFX9); 7763 if (NMCOp != (uint16_t)-1) 7764 MCOp = NMCOp; 7765 } 7766 7767 // (uint16_t)-1 means that Opcode is a pseudo instruction that has 7768 // no encoding in the given subtarget generation. 7769 if (MCOp == (uint16_t)-1) 7770 return -1; 7771 7772 if (isAsmOnlyOpcode(MCOp)) 7773 return -1; 7774 7775 return MCOp; 7776 } 7777 7778 static 7779 TargetInstrInfo::RegSubRegPair getRegOrUndef(const MachineOperand &RegOpnd) { 7780 assert(RegOpnd.isReg()); 7781 return RegOpnd.isUndef() ? TargetInstrInfo::RegSubRegPair() : 7782 getRegSubRegPair(RegOpnd); 7783 } 7784 7785 TargetInstrInfo::RegSubRegPair 7786 llvm::getRegSequenceSubReg(MachineInstr &MI, unsigned SubReg) { 7787 assert(MI.isRegSequence()); 7788 for (unsigned I = 0, E = (MI.getNumOperands() - 1)/ 2; I < E; ++I) 7789 if (MI.getOperand(1 + 2 * I + 1).getImm() == SubReg) { 7790 auto &RegOp = MI.getOperand(1 + 2 * I); 7791 return getRegOrUndef(RegOp); 7792 } 7793 return TargetInstrInfo::RegSubRegPair(); 7794 } 7795 7796 // Try to find the definition of reg:subreg in subreg-manipulation pseudos 7797 // Following a subreg of reg:subreg isn't supported 7798 static bool followSubRegDef(MachineInstr &MI, 7799 TargetInstrInfo::RegSubRegPair &RSR) { 7800 if (!RSR.SubReg) 7801 return false; 7802 switch (MI.getOpcode()) { 7803 default: break; 7804 case AMDGPU::REG_SEQUENCE: 7805 RSR = getRegSequenceSubReg(MI, RSR.SubReg); 7806 return true; 7807 // EXTRACT_SUBREG ins't supported as this would follow a subreg of subreg 7808 case AMDGPU::INSERT_SUBREG: 7809 if (RSR.SubReg == (unsigned)MI.getOperand(3).getImm()) 7810 // inserted the subreg we're looking for 7811 RSR = getRegOrUndef(MI.getOperand(2)); 7812 else { // the subreg in the rest of the reg 7813 auto R1 = getRegOrUndef(MI.getOperand(1)); 7814 if (R1.SubReg) // subreg of subreg isn't supported 7815 return false; 7816 RSR.Reg = R1.Reg; 7817 } 7818 return true; 7819 } 7820 return false; 7821 } 7822 7823 MachineInstr *llvm::getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P, 7824 MachineRegisterInfo &MRI) { 7825 assert(MRI.isSSA()); 7826 if (!P.Reg.isVirtual()) 7827 return nullptr; 7828 7829 auto RSR = P; 7830 auto *DefInst = MRI.getVRegDef(RSR.Reg); 7831 while (auto *MI = DefInst) { 7832 DefInst = nullptr; 7833 switch (MI->getOpcode()) { 7834 case AMDGPU::COPY: 7835 case AMDGPU::V_MOV_B32_e32: { 7836 auto &Op1 = MI->getOperand(1); 7837 if (Op1.isReg() && Op1.getReg().isVirtual()) { 7838 if (Op1.isUndef()) 7839 return nullptr; 7840 RSR = getRegSubRegPair(Op1); 7841 DefInst = MRI.getVRegDef(RSR.Reg); 7842 } 7843 break; 7844 } 7845 default: 7846 if (followSubRegDef(*MI, RSR)) { 7847 if (!RSR.Reg) 7848 return nullptr; 7849 DefInst = MRI.getVRegDef(RSR.Reg); 7850 } 7851 } 7852 if (!DefInst) 7853 return MI; 7854 } 7855 return nullptr; 7856 } 7857 7858 bool llvm::execMayBeModifiedBeforeUse(const MachineRegisterInfo &MRI, 7859 Register VReg, 7860 const MachineInstr &DefMI, 7861 const MachineInstr &UseMI) { 7862 assert(MRI.isSSA() && "Must be run on SSA"); 7863 7864 auto *TRI = MRI.getTargetRegisterInfo(); 7865 auto *DefBB = DefMI.getParent(); 7866 7867 // Don't bother searching between blocks, although it is possible this block 7868 // doesn't modify exec. 7869 if (UseMI.getParent() != DefBB) 7870 return true; 7871 7872 const int MaxInstScan = 20; 7873 int NumInst = 0; 7874 7875 // Stop scan at the use. 7876 auto E = UseMI.getIterator(); 7877 for (auto I = std::next(DefMI.getIterator()); I != E; ++I) { 7878 if (I->isDebugInstr()) 7879 continue; 7880 7881 if (++NumInst > MaxInstScan) 7882 return true; 7883 7884 if (I->modifiesRegister(AMDGPU::EXEC, TRI)) 7885 return true; 7886 } 7887 7888 return false; 7889 } 7890 7891 bool llvm::execMayBeModifiedBeforeAnyUse(const MachineRegisterInfo &MRI, 7892 Register VReg, 7893 const MachineInstr &DefMI) { 7894 assert(MRI.isSSA() && "Must be run on SSA"); 7895 7896 auto *TRI = MRI.getTargetRegisterInfo(); 7897 auto *DefBB = DefMI.getParent(); 7898 7899 const int MaxUseScan = 10; 7900 int NumUse = 0; 7901 7902 for (auto &Use : MRI.use_nodbg_operands(VReg)) { 7903 auto &UseInst = *Use.getParent(); 7904 // Don't bother searching between blocks, although it is possible this block 7905 // doesn't modify exec. 7906 if (UseInst.getParent() != DefBB) 7907 return true; 7908 7909 if (++NumUse > MaxUseScan) 7910 return true; 7911 } 7912 7913 if (NumUse == 0) 7914 return false; 7915 7916 const int MaxInstScan = 20; 7917 int NumInst = 0; 7918 7919 // Stop scan when we have seen all the uses. 7920 for (auto I = std::next(DefMI.getIterator()); ; ++I) { 7921 assert(I != DefBB->end()); 7922 7923 if (I->isDebugInstr()) 7924 continue; 7925 7926 if (++NumInst > MaxInstScan) 7927 return true; 7928 7929 for (const MachineOperand &Op : I->operands()) { 7930 // We don't check reg masks here as they're used only on calls: 7931 // 1. EXEC is only considered const within one BB 7932 // 2. Call should be a terminator instruction if present in a BB 7933 7934 if (!Op.isReg()) 7935 continue; 7936 7937 Register Reg = Op.getReg(); 7938 if (Op.isUse()) { 7939 if (Reg == VReg && --NumUse == 0) 7940 return false; 7941 } else if (TRI->regsOverlap(Reg, AMDGPU::EXEC)) 7942 return true; 7943 } 7944 } 7945 } 7946 7947 MachineInstr *SIInstrInfo::createPHIDestinationCopy( 7948 MachineBasicBlock &MBB, MachineBasicBlock::iterator LastPHIIt, 7949 const DebugLoc &DL, Register Src, Register Dst) const { 7950 auto Cur = MBB.begin(); 7951 if (Cur != MBB.end()) 7952 do { 7953 if (!Cur->isPHI() && Cur->readsRegister(Dst)) 7954 return BuildMI(MBB, Cur, DL, get(TargetOpcode::COPY), Dst).addReg(Src); 7955 ++Cur; 7956 } while (Cur != MBB.end() && Cur != LastPHIIt); 7957 7958 return TargetInstrInfo::createPHIDestinationCopy(MBB, LastPHIIt, DL, Src, 7959 Dst); 7960 } 7961 7962 MachineInstr *SIInstrInfo::createPHISourceCopy( 7963 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, 7964 const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const { 7965 if (InsPt != MBB.end() && 7966 (InsPt->getOpcode() == AMDGPU::SI_IF || 7967 InsPt->getOpcode() == AMDGPU::SI_ELSE || 7968 InsPt->getOpcode() == AMDGPU::SI_IF_BREAK) && 7969 InsPt->definesRegister(Src)) { 7970 InsPt++; 7971 return BuildMI(MBB, InsPt, DL, 7972 get(ST.isWave32() ? AMDGPU::S_MOV_B32_term 7973 : AMDGPU::S_MOV_B64_term), 7974 Dst) 7975 .addReg(Src, 0, SrcSubReg) 7976 .addReg(AMDGPU::EXEC, RegState::Implicit); 7977 } 7978 return TargetInstrInfo::createPHISourceCopy(MBB, InsPt, DL, Src, SrcSubReg, 7979 Dst); 7980 } 7981 7982 bool llvm::SIInstrInfo::isWave32() const { return ST.isWave32(); } 7983 7984 MachineInstr *SIInstrInfo::foldMemoryOperandImpl( 7985 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, 7986 MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS, 7987 VirtRegMap *VRM) const { 7988 // This is a bit of a hack (copied from AArch64). Consider this instruction: 7989 // 7990 // %0:sreg_32 = COPY $m0 7991 // 7992 // We explicitly chose SReg_32 for the virtual register so such a copy might 7993 // be eliminated by RegisterCoalescer. However, that may not be possible, and 7994 // %0 may even spill. We can't spill $m0 normally (it would require copying to 7995 // a numbered SGPR anyway), and since it is in the SReg_32 register class, 7996 // TargetInstrInfo::foldMemoryOperand() is going to try. 7997 // A similar issue also exists with spilling and reloading $exec registers. 7998 // 7999 // To prevent that, constrain the %0 register class here. 8000 if (MI.isFullCopy()) { 8001 Register DstReg = MI.getOperand(0).getReg(); 8002 Register SrcReg = MI.getOperand(1).getReg(); 8003 if ((DstReg.isVirtual() || SrcReg.isVirtual()) && 8004 (DstReg.isVirtual() != SrcReg.isVirtual())) { 8005 MachineRegisterInfo &MRI = MF.getRegInfo(); 8006 Register VirtReg = DstReg.isVirtual() ? DstReg : SrcReg; 8007 const TargetRegisterClass *RC = MRI.getRegClass(VirtReg); 8008 if (RC->hasSuperClassEq(&AMDGPU::SReg_32RegClass)) { 8009 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 8010 return nullptr; 8011 } else if (RC->hasSuperClassEq(&AMDGPU::SReg_64RegClass)) { 8012 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_64_XEXECRegClass); 8013 return nullptr; 8014 } 8015 } 8016 } 8017 8018 return nullptr; 8019 } 8020 8021 unsigned SIInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 8022 const MachineInstr &MI, 8023 unsigned *PredCost) const { 8024 if (MI.isBundle()) { 8025 MachineBasicBlock::const_instr_iterator I(MI.getIterator()); 8026 MachineBasicBlock::const_instr_iterator E(MI.getParent()->instr_end()); 8027 unsigned Lat = 0, Count = 0; 8028 for (++I; I != E && I->isBundledWithPred(); ++I) { 8029 ++Count; 8030 Lat = std::max(Lat, SchedModel.computeInstrLatency(&*I)); 8031 } 8032 return Lat + Count - 1; 8033 } 8034 8035 return SchedModel.computeInstrLatency(&MI); 8036 } 8037 8038 unsigned SIInstrInfo::getDSShaderTypeValue(const MachineFunction &MF) { 8039 switch (MF.getFunction().getCallingConv()) { 8040 case CallingConv::AMDGPU_PS: 8041 return 1; 8042 case CallingConv::AMDGPU_VS: 8043 return 2; 8044 case CallingConv::AMDGPU_GS: 8045 return 3; 8046 case CallingConv::AMDGPU_HS: 8047 case CallingConv::AMDGPU_LS: 8048 case CallingConv::AMDGPU_ES: 8049 report_fatal_error("ds_ordered_count unsupported for this calling conv"); 8050 case CallingConv::AMDGPU_CS: 8051 case CallingConv::AMDGPU_KERNEL: 8052 case CallingConv::C: 8053 case CallingConv::Fast: 8054 default: 8055 // Assume other calling conventions are various compute callable functions 8056 return 0; 8057 } 8058 } 8059 8060 bool SIInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg, 8061 Register &SrcReg2, int64_t &CmpMask, 8062 int64_t &CmpValue) const { 8063 if (!MI.getOperand(0).isReg() || MI.getOperand(0).getSubReg()) 8064 return false; 8065 8066 switch (MI.getOpcode()) { 8067 default: 8068 break; 8069 case AMDGPU::S_CMP_EQ_U32: 8070 case AMDGPU::S_CMP_EQ_I32: 8071 case AMDGPU::S_CMP_LG_U32: 8072 case AMDGPU::S_CMP_LG_I32: 8073 case AMDGPU::S_CMP_LT_U32: 8074 case AMDGPU::S_CMP_LT_I32: 8075 case AMDGPU::S_CMP_GT_U32: 8076 case AMDGPU::S_CMP_GT_I32: 8077 case AMDGPU::S_CMP_LE_U32: 8078 case AMDGPU::S_CMP_LE_I32: 8079 case AMDGPU::S_CMP_GE_U32: 8080 case AMDGPU::S_CMP_GE_I32: 8081 case AMDGPU::S_CMP_EQ_U64: 8082 case AMDGPU::S_CMP_LG_U64: 8083 SrcReg = MI.getOperand(0).getReg(); 8084 if (MI.getOperand(1).isReg()) { 8085 if (MI.getOperand(1).getSubReg()) 8086 return false; 8087 SrcReg2 = MI.getOperand(1).getReg(); 8088 CmpValue = 0; 8089 } else if (MI.getOperand(1).isImm()) { 8090 SrcReg2 = Register(); 8091 CmpValue = MI.getOperand(1).getImm(); 8092 } else { 8093 return false; 8094 } 8095 CmpMask = ~0; 8096 return true; 8097 case AMDGPU::S_CMPK_EQ_U32: 8098 case AMDGPU::S_CMPK_EQ_I32: 8099 case AMDGPU::S_CMPK_LG_U32: 8100 case AMDGPU::S_CMPK_LG_I32: 8101 case AMDGPU::S_CMPK_LT_U32: 8102 case AMDGPU::S_CMPK_LT_I32: 8103 case AMDGPU::S_CMPK_GT_U32: 8104 case AMDGPU::S_CMPK_GT_I32: 8105 case AMDGPU::S_CMPK_LE_U32: 8106 case AMDGPU::S_CMPK_LE_I32: 8107 case AMDGPU::S_CMPK_GE_U32: 8108 case AMDGPU::S_CMPK_GE_I32: 8109 SrcReg = MI.getOperand(0).getReg(); 8110 SrcReg2 = Register(); 8111 CmpValue = MI.getOperand(1).getImm(); 8112 CmpMask = ~0; 8113 return true; 8114 } 8115 8116 return false; 8117 } 8118 8119 bool SIInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, 8120 Register SrcReg2, int64_t CmpMask, 8121 int64_t CmpValue, 8122 const MachineRegisterInfo *MRI) const { 8123 if (!SrcReg || SrcReg.isPhysical()) 8124 return false; 8125 8126 if (SrcReg2 && !getFoldableImm(SrcReg2, *MRI, CmpValue)) 8127 return false; 8128 8129 const auto optimizeCmpAnd = [&CmpInstr, SrcReg, CmpValue, MRI, 8130 this](int64_t ExpectedValue, unsigned SrcSize, 8131 bool IsReversable, bool IsSigned) -> bool { 8132 // s_cmp_eq_u32 (s_and_b32 $src, 1 << n), 1 << n => s_and_b32 $src, 1 << n 8133 // s_cmp_eq_i32 (s_and_b32 $src, 1 << n), 1 << n => s_and_b32 $src, 1 << n 8134 // s_cmp_ge_u32 (s_and_b32 $src, 1 << n), 1 << n => s_and_b32 $src, 1 << n 8135 // s_cmp_ge_i32 (s_and_b32 $src, 1 << n), 1 << n => s_and_b32 $src, 1 << n 8136 // s_cmp_eq_u64 (s_and_b64 $src, 1 << n), 1 << n => s_and_b64 $src, 1 << n 8137 // s_cmp_lg_u32 (s_and_b32 $src, 1 << n), 0 => s_and_b32 $src, 1 << n 8138 // s_cmp_lg_i32 (s_and_b32 $src, 1 << n), 0 => s_and_b32 $src, 1 << n 8139 // s_cmp_gt_u32 (s_and_b32 $src, 1 << n), 0 => s_and_b32 $src, 1 << n 8140 // s_cmp_gt_i32 (s_and_b32 $src, 1 << n), 0 => s_and_b32 $src, 1 << n 8141 // s_cmp_lg_u64 (s_and_b64 $src, 1 << n), 0 => s_and_b64 $src, 1 << n 8142 // 8143 // Signed ge/gt are not used for the sign bit. 8144 // 8145 // If result of the AND is unused except in the compare: 8146 // s_and_b(32|64) $src, 1 << n => s_bitcmp1_b(32|64) $src, n 8147 // 8148 // s_cmp_eq_u32 (s_and_b32 $src, 1 << n), 0 => s_bitcmp0_b32 $src, n 8149 // s_cmp_eq_i32 (s_and_b32 $src, 1 << n), 0 => s_bitcmp0_b32 $src, n 8150 // s_cmp_eq_u64 (s_and_b64 $src, 1 << n), 0 => s_bitcmp0_b64 $src, n 8151 // s_cmp_lg_u32 (s_and_b32 $src, 1 << n), 1 << n => s_bitcmp0_b32 $src, n 8152 // s_cmp_lg_i32 (s_and_b32 $src, 1 << n), 1 << n => s_bitcmp0_b32 $src, n 8153 // s_cmp_lg_u64 (s_and_b64 $src, 1 << n), 1 << n => s_bitcmp0_b64 $src, n 8154 8155 MachineInstr *Def = MRI->getUniqueVRegDef(SrcReg); 8156 if (!Def || Def->getParent() != CmpInstr.getParent()) 8157 return false; 8158 8159 if (Def->getOpcode() != AMDGPU::S_AND_B32 && 8160 Def->getOpcode() != AMDGPU::S_AND_B64) 8161 return false; 8162 8163 int64_t Mask; 8164 const auto isMask = [&Mask, SrcSize](const MachineOperand *MO) -> bool { 8165 if (MO->isImm()) 8166 Mask = MO->getImm(); 8167 else if (!getFoldableImm(MO, Mask)) 8168 return false; 8169 Mask &= maxUIntN(SrcSize); 8170 return isPowerOf2_64(Mask); 8171 }; 8172 8173 MachineOperand *SrcOp = &Def->getOperand(1); 8174 if (isMask(SrcOp)) 8175 SrcOp = &Def->getOperand(2); 8176 else if (isMask(&Def->getOperand(2))) 8177 SrcOp = &Def->getOperand(1); 8178 else 8179 return false; 8180 8181 unsigned BitNo = countTrailingZeros((uint64_t)Mask); 8182 if (IsSigned && BitNo == SrcSize - 1) 8183 return false; 8184 8185 ExpectedValue <<= BitNo; 8186 8187 bool IsReversedCC = false; 8188 if (CmpValue != ExpectedValue) { 8189 if (!IsReversable) 8190 return false; 8191 IsReversedCC = CmpValue == (ExpectedValue ^ Mask); 8192 if (!IsReversedCC) 8193 return false; 8194 } 8195 8196 Register DefReg = Def->getOperand(0).getReg(); 8197 if (IsReversedCC && !MRI->hasOneNonDBGUse(DefReg)) 8198 return false; 8199 8200 for (auto I = std::next(Def->getIterator()), E = CmpInstr.getIterator(); 8201 I != E; ++I) { 8202 if (I->modifiesRegister(AMDGPU::SCC, &RI) || 8203 I->killsRegister(AMDGPU::SCC, &RI)) 8204 return false; 8205 } 8206 8207 MachineOperand *SccDef = Def->findRegisterDefOperand(AMDGPU::SCC); 8208 SccDef->setIsDead(false); 8209 CmpInstr.eraseFromParent(); 8210 8211 if (!MRI->use_nodbg_empty(DefReg)) { 8212 assert(!IsReversedCC); 8213 return true; 8214 } 8215 8216 // Replace AND with unused result with a S_BITCMP. 8217 MachineBasicBlock *MBB = Def->getParent(); 8218 8219 unsigned NewOpc = (SrcSize == 32) ? IsReversedCC ? AMDGPU::S_BITCMP0_B32 8220 : AMDGPU::S_BITCMP1_B32 8221 : IsReversedCC ? AMDGPU::S_BITCMP0_B64 8222 : AMDGPU::S_BITCMP1_B64; 8223 8224 BuildMI(*MBB, Def, Def->getDebugLoc(), get(NewOpc)) 8225 .add(*SrcOp) 8226 .addImm(BitNo); 8227 Def->eraseFromParent(); 8228 8229 return true; 8230 }; 8231 8232 switch (CmpInstr.getOpcode()) { 8233 default: 8234 break; 8235 case AMDGPU::S_CMP_EQ_U32: 8236 case AMDGPU::S_CMP_EQ_I32: 8237 case AMDGPU::S_CMPK_EQ_U32: 8238 case AMDGPU::S_CMPK_EQ_I32: 8239 return optimizeCmpAnd(1, 32, true, false); 8240 case AMDGPU::S_CMP_GE_U32: 8241 case AMDGPU::S_CMPK_GE_U32: 8242 return optimizeCmpAnd(1, 32, false, false); 8243 case AMDGPU::S_CMP_GE_I32: 8244 case AMDGPU::S_CMPK_GE_I32: 8245 return optimizeCmpAnd(1, 32, false, true); 8246 case AMDGPU::S_CMP_EQ_U64: 8247 return optimizeCmpAnd(1, 64, true, false); 8248 case AMDGPU::S_CMP_LG_U32: 8249 case AMDGPU::S_CMP_LG_I32: 8250 case AMDGPU::S_CMPK_LG_U32: 8251 case AMDGPU::S_CMPK_LG_I32: 8252 return optimizeCmpAnd(0, 32, true, false); 8253 case AMDGPU::S_CMP_GT_U32: 8254 case AMDGPU::S_CMPK_GT_U32: 8255 return optimizeCmpAnd(0, 32, false, false); 8256 case AMDGPU::S_CMP_GT_I32: 8257 case AMDGPU::S_CMPK_GT_I32: 8258 return optimizeCmpAnd(0, 32, false, true); 8259 case AMDGPU::S_CMP_LG_U64: 8260 return optimizeCmpAnd(0, 64, true, false); 8261 } 8262 8263 return false; 8264 } 8265