1 //===- SIInstrInfo.cpp - SI Instruction Information ----------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// SI Implementation of TargetInstrInfo. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "SIInstrInfo.h" 15 #include "AMDGPU.h" 16 #include "AMDGPUInstrInfo.h" 17 #include "GCNHazardRecognizer.h" 18 #include "GCNSubtarget.h" 19 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 20 #include "SIMachineFunctionInfo.h" 21 #include "llvm/Analysis/ValueTracking.h" 22 #include "llvm/CodeGen/LiveVariables.h" 23 #include "llvm/CodeGen/MachineDominators.h" 24 #include "llvm/CodeGen/MachineScheduler.h" 25 #include "llvm/CodeGen/RegisterScavenging.h" 26 #include "llvm/CodeGen/ScheduleDAG.h" 27 #include "llvm/IR/DiagnosticInfo.h" 28 #include "llvm/IR/IntrinsicsAMDGPU.h" 29 #include "llvm/MC/MCContext.h" 30 #include "llvm/Support/CommandLine.h" 31 #include "llvm/Target/TargetMachine.h" 32 33 using namespace llvm; 34 35 #define DEBUG_TYPE "si-instr-info" 36 37 #define GET_INSTRINFO_CTOR_DTOR 38 #include "AMDGPUGenInstrInfo.inc" 39 40 namespace llvm { 41 42 class AAResults; 43 44 namespace AMDGPU { 45 #define GET_D16ImageDimIntrinsics_IMPL 46 #define GET_ImageDimIntrinsicTable_IMPL 47 #define GET_RsrcIntrinsics_IMPL 48 #include "AMDGPUGenSearchableTables.inc" 49 } 50 } 51 52 53 // Must be at least 4 to be able to branch over minimum unconditional branch 54 // code. This is only for making it possible to write reasonably small tests for 55 // long branches. 56 static cl::opt<unsigned> 57 BranchOffsetBits("amdgpu-s-branch-bits", cl::ReallyHidden, cl::init(16), 58 cl::desc("Restrict range of branch instructions (DEBUG)")); 59 60 static cl::opt<bool> Fix16BitCopies( 61 "amdgpu-fix-16-bit-physreg-copies", 62 cl::desc("Fix copies between 32 and 16 bit registers by extending to 32 bit"), 63 cl::init(true), 64 cl::ReallyHidden); 65 66 SIInstrInfo::SIInstrInfo(const GCNSubtarget &ST) 67 : AMDGPUGenInstrInfo(AMDGPU::ADJCALLSTACKUP, AMDGPU::ADJCALLSTACKDOWN), 68 RI(ST), ST(ST) { 69 SchedModel.init(&ST); 70 } 71 72 //===----------------------------------------------------------------------===// 73 // TargetInstrInfo callbacks 74 //===----------------------------------------------------------------------===// 75 76 static unsigned getNumOperandsNoGlue(SDNode *Node) { 77 unsigned N = Node->getNumOperands(); 78 while (N && Node->getOperand(N - 1).getValueType() == MVT::Glue) 79 --N; 80 return N; 81 } 82 83 /// Returns true if both nodes have the same value for the given 84 /// operand \p Op, or if both nodes do not have this operand. 85 static bool nodesHaveSameOperandValue(SDNode *N0, SDNode* N1, unsigned OpName) { 86 unsigned Opc0 = N0->getMachineOpcode(); 87 unsigned Opc1 = N1->getMachineOpcode(); 88 89 int Op0Idx = AMDGPU::getNamedOperandIdx(Opc0, OpName); 90 int Op1Idx = AMDGPU::getNamedOperandIdx(Opc1, OpName); 91 92 if (Op0Idx == -1 && Op1Idx == -1) 93 return true; 94 95 96 if ((Op0Idx == -1 && Op1Idx != -1) || 97 (Op1Idx == -1 && Op0Idx != -1)) 98 return false; 99 100 // getNamedOperandIdx returns the index for the MachineInstr's operands, 101 // which includes the result as the first operand. We are indexing into the 102 // MachineSDNode's operands, so we need to skip the result operand to get 103 // the real index. 104 --Op0Idx; 105 --Op1Idx; 106 107 return N0->getOperand(Op0Idx) == N1->getOperand(Op1Idx); 108 } 109 110 bool SIInstrInfo::isReallyTriviallyReMaterializable(const MachineInstr &MI, 111 AAResults *AA) const { 112 if (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isSDWA(MI)) { 113 // Normally VALU use of exec would block the rematerialization, but that 114 // is OK in this case to have an implicit exec read as all VALU do. 115 // We really want all of the generic logic for this except for this. 116 117 // Another potential implicit use is mode register. The core logic of 118 // the RA will not attempt rematerialization if mode is set anywhere 119 // in the function, otherwise it is safe since mode is not changed. 120 return !MI.hasImplicitDef() && 121 MI.getNumImplicitOperands() == MI.getDesc().getNumImplicitUses() && 122 !MI.mayRaiseFPException(); 123 } 124 125 return false; 126 } 127 128 bool SIInstrInfo::isIgnorableUse(const MachineOperand &MO) const { 129 // Any implicit use of exec by VALU is not a real register read. 130 return MO.getReg() == AMDGPU::EXEC && MO.isImplicit() && 131 isVALU(*MO.getParent()); 132 } 133 134 bool SIInstrInfo::areLoadsFromSameBasePtr(SDNode *Load0, SDNode *Load1, 135 int64_t &Offset0, 136 int64_t &Offset1) const { 137 if (!Load0->isMachineOpcode() || !Load1->isMachineOpcode()) 138 return false; 139 140 unsigned Opc0 = Load0->getMachineOpcode(); 141 unsigned Opc1 = Load1->getMachineOpcode(); 142 143 // Make sure both are actually loads. 144 if (!get(Opc0).mayLoad() || !get(Opc1).mayLoad()) 145 return false; 146 147 if (isDS(Opc0) && isDS(Opc1)) { 148 149 // FIXME: Handle this case: 150 if (getNumOperandsNoGlue(Load0) != getNumOperandsNoGlue(Load1)) 151 return false; 152 153 // Check base reg. 154 if (Load0->getOperand(0) != Load1->getOperand(0)) 155 return false; 156 157 // Skip read2 / write2 variants for simplicity. 158 // TODO: We should report true if the used offsets are adjacent (excluded 159 // st64 versions). 160 int Offset0Idx = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 161 int Offset1Idx = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 162 if (Offset0Idx == -1 || Offset1Idx == -1) 163 return false; 164 165 // XXX - be careful of datalesss loads 166 // getNamedOperandIdx returns the index for MachineInstrs. Since they 167 // include the output in the operand list, but SDNodes don't, we need to 168 // subtract the index by one. 169 Offset0Idx -= get(Opc0).NumDefs; 170 Offset1Idx -= get(Opc1).NumDefs; 171 Offset0 = cast<ConstantSDNode>(Load0->getOperand(Offset0Idx))->getZExtValue(); 172 Offset1 = cast<ConstantSDNode>(Load1->getOperand(Offset1Idx))->getZExtValue(); 173 return true; 174 } 175 176 if (isSMRD(Opc0) && isSMRD(Opc1)) { 177 // Skip time and cache invalidation instructions. 178 if (AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::sbase) == -1 || 179 AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::sbase) == -1) 180 return false; 181 182 assert(getNumOperandsNoGlue(Load0) == getNumOperandsNoGlue(Load1)); 183 184 // Check base reg. 185 if (Load0->getOperand(0) != Load1->getOperand(0)) 186 return false; 187 188 const ConstantSDNode *Load0Offset = 189 dyn_cast<ConstantSDNode>(Load0->getOperand(1)); 190 const ConstantSDNode *Load1Offset = 191 dyn_cast<ConstantSDNode>(Load1->getOperand(1)); 192 193 if (!Load0Offset || !Load1Offset) 194 return false; 195 196 Offset0 = Load0Offset->getZExtValue(); 197 Offset1 = Load1Offset->getZExtValue(); 198 return true; 199 } 200 201 // MUBUF and MTBUF can access the same addresses. 202 if ((isMUBUF(Opc0) || isMTBUF(Opc0)) && (isMUBUF(Opc1) || isMTBUF(Opc1))) { 203 204 // MUBUF and MTBUF have vaddr at different indices. 205 if (!nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::soffset) || 206 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::vaddr) || 207 !nodesHaveSameOperandValue(Load0, Load1, AMDGPU::OpName::srsrc)) 208 return false; 209 210 int OffIdx0 = AMDGPU::getNamedOperandIdx(Opc0, AMDGPU::OpName::offset); 211 int OffIdx1 = AMDGPU::getNamedOperandIdx(Opc1, AMDGPU::OpName::offset); 212 213 if (OffIdx0 == -1 || OffIdx1 == -1) 214 return false; 215 216 // getNamedOperandIdx returns the index for MachineInstrs. Since they 217 // include the output in the operand list, but SDNodes don't, we need to 218 // subtract the index by one. 219 OffIdx0 -= get(Opc0).NumDefs; 220 OffIdx1 -= get(Opc1).NumDefs; 221 222 SDValue Off0 = Load0->getOperand(OffIdx0); 223 SDValue Off1 = Load1->getOperand(OffIdx1); 224 225 // The offset might be a FrameIndexSDNode. 226 if (!isa<ConstantSDNode>(Off0) || !isa<ConstantSDNode>(Off1)) 227 return false; 228 229 Offset0 = cast<ConstantSDNode>(Off0)->getZExtValue(); 230 Offset1 = cast<ConstantSDNode>(Off1)->getZExtValue(); 231 return true; 232 } 233 234 return false; 235 } 236 237 static bool isStride64(unsigned Opc) { 238 switch (Opc) { 239 case AMDGPU::DS_READ2ST64_B32: 240 case AMDGPU::DS_READ2ST64_B64: 241 case AMDGPU::DS_WRITE2ST64_B32: 242 case AMDGPU::DS_WRITE2ST64_B64: 243 return true; 244 default: 245 return false; 246 } 247 } 248 249 bool SIInstrInfo::getMemOperandsWithOffsetWidth( 250 const MachineInstr &LdSt, SmallVectorImpl<const MachineOperand *> &BaseOps, 251 int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, 252 const TargetRegisterInfo *TRI) const { 253 if (!LdSt.mayLoadOrStore()) 254 return false; 255 256 unsigned Opc = LdSt.getOpcode(); 257 OffsetIsScalable = false; 258 const MachineOperand *BaseOp, *OffsetOp; 259 int DataOpIdx; 260 261 if (isDS(LdSt)) { 262 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::addr); 263 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset); 264 if (OffsetOp) { 265 // Normal, single offset LDS instruction. 266 if (!BaseOp) { 267 // DS_CONSUME/DS_APPEND use M0 for the base address. 268 // TODO: find the implicit use operand for M0 and use that as BaseOp? 269 return false; 270 } 271 BaseOps.push_back(BaseOp); 272 Offset = OffsetOp->getImm(); 273 // Get appropriate operand, and compute width accordingly. 274 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 275 if (DataOpIdx == -1) 276 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 277 Width = getOpSize(LdSt, DataOpIdx); 278 } else { 279 // The 2 offset instructions use offset0 and offset1 instead. We can treat 280 // these as a load with a single offset if the 2 offsets are consecutive. 281 // We will use this for some partially aligned loads. 282 const MachineOperand *Offset0Op = 283 getNamedOperand(LdSt, AMDGPU::OpName::offset0); 284 const MachineOperand *Offset1Op = 285 getNamedOperand(LdSt, AMDGPU::OpName::offset1); 286 287 unsigned Offset0 = Offset0Op->getImm(); 288 unsigned Offset1 = Offset1Op->getImm(); 289 if (Offset0 + 1 != Offset1) 290 return false; 291 292 // Each of these offsets is in element sized units, so we need to convert 293 // to bytes of the individual reads. 294 295 unsigned EltSize; 296 if (LdSt.mayLoad()) 297 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, 0)) / 16; 298 else { 299 assert(LdSt.mayStore()); 300 int Data0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 301 EltSize = TRI->getRegSizeInBits(*getOpRegClass(LdSt, Data0Idx)) / 8; 302 } 303 304 if (isStride64(Opc)) 305 EltSize *= 64; 306 307 BaseOps.push_back(BaseOp); 308 Offset = EltSize * Offset0; 309 // Get appropriate operand(s), and compute width accordingly. 310 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 311 if (DataOpIdx == -1) { 312 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data0); 313 Width = getOpSize(LdSt, DataOpIdx); 314 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::data1); 315 Width += getOpSize(LdSt, DataOpIdx); 316 } else { 317 Width = getOpSize(LdSt, DataOpIdx); 318 } 319 } 320 return true; 321 } 322 323 if (isMUBUF(LdSt) || isMTBUF(LdSt)) { 324 const MachineOperand *RSrc = getNamedOperand(LdSt, AMDGPU::OpName::srsrc); 325 if (!RSrc) // e.g. BUFFER_WBINVL1_VOL 326 return false; 327 BaseOps.push_back(RSrc); 328 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 329 if (BaseOp && !BaseOp->isFI()) 330 BaseOps.push_back(BaseOp); 331 const MachineOperand *OffsetImm = 332 getNamedOperand(LdSt, AMDGPU::OpName::offset); 333 Offset = OffsetImm->getImm(); 334 const MachineOperand *SOffset = 335 getNamedOperand(LdSt, AMDGPU::OpName::soffset); 336 if (SOffset) { 337 if (SOffset->isReg()) 338 BaseOps.push_back(SOffset); 339 else 340 Offset += SOffset->getImm(); 341 } 342 // Get appropriate operand, and compute width accordingly. 343 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 344 if (DataOpIdx == -1) 345 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 346 Width = getOpSize(LdSt, DataOpIdx); 347 return true; 348 } 349 350 if (isMIMG(LdSt)) { 351 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); 352 BaseOps.push_back(&LdSt.getOperand(SRsrcIdx)); 353 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); 354 if (VAddr0Idx >= 0) { 355 // GFX10 possible NSA encoding. 356 for (int I = VAddr0Idx; I < SRsrcIdx; ++I) 357 BaseOps.push_back(&LdSt.getOperand(I)); 358 } else { 359 BaseOps.push_back(getNamedOperand(LdSt, AMDGPU::OpName::vaddr)); 360 } 361 Offset = 0; 362 // Get appropriate operand, and compute width accordingly. 363 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 364 Width = getOpSize(LdSt, DataOpIdx); 365 return true; 366 } 367 368 if (isSMRD(LdSt)) { 369 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::sbase); 370 if (!BaseOp) // e.g. S_MEMTIME 371 return false; 372 BaseOps.push_back(BaseOp); 373 OffsetOp = getNamedOperand(LdSt, AMDGPU::OpName::offset); 374 Offset = OffsetOp ? OffsetOp->getImm() : 0; 375 // Get appropriate operand, and compute width accordingly. 376 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::sdst); 377 Width = getOpSize(LdSt, DataOpIdx); 378 return true; 379 } 380 381 if (isFLAT(LdSt)) { 382 // Instructions have either vaddr or saddr or both or none. 383 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::vaddr); 384 if (BaseOp) 385 BaseOps.push_back(BaseOp); 386 BaseOp = getNamedOperand(LdSt, AMDGPU::OpName::saddr); 387 if (BaseOp) 388 BaseOps.push_back(BaseOp); 389 Offset = getNamedOperand(LdSt, AMDGPU::OpName::offset)->getImm(); 390 // Get appropriate operand, and compute width accordingly. 391 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 392 if (DataOpIdx == -1) 393 DataOpIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata); 394 Width = getOpSize(LdSt, DataOpIdx); 395 return true; 396 } 397 398 return false; 399 } 400 401 static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, 402 ArrayRef<const MachineOperand *> BaseOps1, 403 const MachineInstr &MI2, 404 ArrayRef<const MachineOperand *> BaseOps2) { 405 // Only examine the first "base" operand of each instruction, on the 406 // assumption that it represents the real base address of the memory access. 407 // Other operands are typically offsets or indices from this base address. 408 if (BaseOps1.front()->isIdenticalTo(*BaseOps2.front())) 409 return true; 410 411 if (!MI1.hasOneMemOperand() || !MI2.hasOneMemOperand()) 412 return false; 413 414 auto MO1 = *MI1.memoperands_begin(); 415 auto MO2 = *MI2.memoperands_begin(); 416 if (MO1->getAddrSpace() != MO2->getAddrSpace()) 417 return false; 418 419 auto Base1 = MO1->getValue(); 420 auto Base2 = MO2->getValue(); 421 if (!Base1 || !Base2) 422 return false; 423 Base1 = getUnderlyingObject(Base1); 424 Base2 = getUnderlyingObject(Base2); 425 426 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2)) 427 return false; 428 429 return Base1 == Base2; 430 } 431 432 bool SIInstrInfo::shouldClusterMemOps(ArrayRef<const MachineOperand *> BaseOps1, 433 ArrayRef<const MachineOperand *> BaseOps2, 434 unsigned NumLoads, 435 unsigned NumBytes) const { 436 // If the mem ops (to be clustered) do not have the same base ptr, then they 437 // should not be clustered 438 if (!BaseOps1.empty() && !BaseOps2.empty()) { 439 const MachineInstr &FirstLdSt = *BaseOps1.front()->getParent(); 440 const MachineInstr &SecondLdSt = *BaseOps2.front()->getParent(); 441 if (!memOpsHaveSameBasePtr(FirstLdSt, BaseOps1, SecondLdSt, BaseOps2)) 442 return false; 443 } else if (!BaseOps1.empty() || !BaseOps2.empty()) { 444 // If only one base op is empty, they do not have the same base ptr 445 return false; 446 } 447 448 // In order to avoid regester pressure, on an average, the number of DWORDS 449 // loaded together by all clustered mem ops should not exceed 8. This is an 450 // empirical value based on certain observations and performance related 451 // experiments. 452 // The good thing about this heuristic is - it avoids clustering of too many 453 // sub-word loads, and also avoids clustering of wide loads. Below is the 454 // brief summary of how the heuristic behaves for various `LoadSize`. 455 // (1) 1 <= LoadSize <= 4: cluster at max 8 mem ops 456 // (2) 5 <= LoadSize <= 8: cluster at max 4 mem ops 457 // (3) 9 <= LoadSize <= 12: cluster at max 2 mem ops 458 // (4) 13 <= LoadSize <= 16: cluster at max 2 mem ops 459 // (5) LoadSize >= 17: do not cluster 460 const unsigned LoadSize = NumBytes / NumLoads; 461 const unsigned NumDWORDs = ((LoadSize + 3) / 4) * NumLoads; 462 return NumDWORDs <= 8; 463 } 464 465 // FIXME: This behaves strangely. If, for example, you have 32 load + stores, 466 // the first 16 loads will be interleaved with the stores, and the next 16 will 467 // be clustered as expected. It should really split into 2 16 store batches. 468 // 469 // Loads are clustered until this returns false, rather than trying to schedule 470 // groups of stores. This also means we have to deal with saying different 471 // address space loads should be clustered, and ones which might cause bank 472 // conflicts. 473 // 474 // This might be deprecated so it might not be worth that much effort to fix. 475 bool SIInstrInfo::shouldScheduleLoadsNear(SDNode *Load0, SDNode *Load1, 476 int64_t Offset0, int64_t Offset1, 477 unsigned NumLoads) const { 478 assert(Offset1 > Offset0 && 479 "Second offset should be larger than first offset!"); 480 // If we have less than 16 loads in a row, and the offsets are within 64 481 // bytes, then schedule together. 482 483 // A cacheline is 64 bytes (for global memory). 484 return (NumLoads <= 16 && (Offset1 - Offset0) < 64); 485 } 486 487 static void reportIllegalCopy(const SIInstrInfo *TII, MachineBasicBlock &MBB, 488 MachineBasicBlock::iterator MI, 489 const DebugLoc &DL, MCRegister DestReg, 490 MCRegister SrcReg, bool KillSrc, 491 const char *Msg = "illegal SGPR to VGPR copy") { 492 MachineFunction *MF = MBB.getParent(); 493 DiagnosticInfoUnsupported IllegalCopy(MF->getFunction(), Msg, DL, DS_Error); 494 LLVMContext &C = MF->getFunction().getContext(); 495 C.diagnose(IllegalCopy); 496 497 BuildMI(MBB, MI, DL, TII->get(AMDGPU::SI_ILLEGAL_COPY), DestReg) 498 .addReg(SrcReg, getKillRegState(KillSrc)); 499 } 500 501 /// Handle copying from SGPR to AGPR, or from AGPR to AGPR. It is not possible 502 /// to directly copy, so an intermediate VGPR needs to be used. 503 static void indirectCopyToAGPR(const SIInstrInfo &TII, 504 MachineBasicBlock &MBB, 505 MachineBasicBlock::iterator MI, 506 const DebugLoc &DL, MCRegister DestReg, 507 MCRegister SrcReg, bool KillSrc, 508 RegScavenger &RS, 509 Register ImpDefSuperReg = Register(), 510 Register ImpUseSuperReg = Register()) { 511 const SIRegisterInfo &RI = TII.getRegisterInfo(); 512 513 assert(AMDGPU::SReg_32RegClass.contains(SrcReg) || 514 AMDGPU::AGPR_32RegClass.contains(SrcReg)); 515 516 // First try to find defining accvgpr_write to avoid temporary registers. 517 for (auto Def = MI, E = MBB.begin(); Def != E; ) { 518 --Def; 519 if (!Def->definesRegister(SrcReg, &RI)) 520 continue; 521 if (Def->getOpcode() != AMDGPU::V_ACCVGPR_WRITE_B32_e64) 522 break; 523 524 MachineOperand &DefOp = Def->getOperand(1); 525 assert(DefOp.isReg() || DefOp.isImm()); 526 527 if (DefOp.isReg()) { 528 // Check that register source operand if not clobbered before MI. 529 // Immediate operands are always safe to propagate. 530 bool SafeToPropagate = true; 531 for (auto I = Def; I != MI && SafeToPropagate; ++I) 532 if (I->modifiesRegister(DefOp.getReg(), &RI)) 533 SafeToPropagate = false; 534 535 if (!SafeToPropagate) 536 break; 537 538 DefOp.setIsKill(false); 539 } 540 541 MachineInstrBuilder Builder = 542 BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg) 543 .add(DefOp); 544 if (ImpDefSuperReg) 545 Builder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit); 546 547 if (ImpUseSuperReg) { 548 Builder.addReg(ImpUseSuperReg, 549 getKillRegState(KillSrc) | RegState::Implicit); 550 } 551 552 return; 553 } 554 555 RS.enterBasicBlock(MBB); 556 RS.forward(MI); 557 558 // Ideally we want to have three registers for a long reg_sequence copy 559 // to hide 2 waitstates between v_mov_b32 and accvgpr_write. 560 unsigned MaxVGPRs = RI.getRegPressureLimit(&AMDGPU::VGPR_32RegClass, 561 *MBB.getParent()); 562 563 // Registers in the sequence are allocated contiguously so we can just 564 // use register number to pick one of three round-robin temps. 565 unsigned RegNo = DestReg % 3; 566 Register Tmp = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0); 567 if (!Tmp) 568 report_fatal_error("Cannot scavenge VGPR to copy to AGPR"); 569 RS.setRegUsed(Tmp); 570 571 if (!TII.getSubtarget().hasGFX90AInsts()) { 572 // Only loop through if there are any free registers left, otherwise 573 // scavenger may report a fatal error without emergency spill slot 574 // or spill with the slot. 575 while (RegNo-- && RS.FindUnusedReg(&AMDGPU::VGPR_32RegClass)) { 576 Register Tmp2 = RS.scavengeRegister(&AMDGPU::VGPR_32RegClass, 0); 577 if (!Tmp2 || RI.getHWRegIndex(Tmp2) >= MaxVGPRs) 578 break; 579 Tmp = Tmp2; 580 RS.setRegUsed(Tmp); 581 } 582 } 583 584 // Insert copy to temporary VGPR. 585 unsigned TmpCopyOp = AMDGPU::V_MOV_B32_e32; 586 if (AMDGPU::AGPR_32RegClass.contains(SrcReg)) { 587 TmpCopyOp = AMDGPU::V_ACCVGPR_READ_B32_e64; 588 } else { 589 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 590 } 591 592 MachineInstrBuilder UseBuilder = BuildMI(MBB, MI, DL, TII.get(TmpCopyOp), Tmp) 593 .addReg(SrcReg, getKillRegState(KillSrc)); 594 if (ImpUseSuperReg) { 595 UseBuilder.addReg(ImpUseSuperReg, 596 getKillRegState(KillSrc) | RegState::Implicit); 597 } 598 599 MachineInstrBuilder DefBuilder 600 = BuildMI(MBB, MI, DL, TII.get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg) 601 .addReg(Tmp, RegState::Kill); 602 603 if (ImpDefSuperReg) 604 DefBuilder.addReg(ImpDefSuperReg, RegState::Define | RegState::Implicit); 605 } 606 607 static void expandSGPRCopy(const SIInstrInfo &TII, MachineBasicBlock &MBB, 608 MachineBasicBlock::iterator MI, const DebugLoc &DL, 609 MCRegister DestReg, MCRegister SrcReg, bool KillSrc, 610 const TargetRegisterClass *RC, bool Forward) { 611 const SIRegisterInfo &RI = TII.getRegisterInfo(); 612 ArrayRef<int16_t> BaseIndices = RI.getRegSplitParts(RC, 4); 613 MachineBasicBlock::iterator I = MI; 614 MachineInstr *FirstMI = nullptr, *LastMI = nullptr; 615 616 for (unsigned Idx = 0; Idx < BaseIndices.size(); ++Idx) { 617 int16_t SubIdx = BaseIndices[Idx]; 618 Register Reg = RI.getSubReg(DestReg, SubIdx); 619 unsigned Opcode = AMDGPU::S_MOV_B32; 620 621 // Is SGPR aligned? If so try to combine with next. 622 Register Src = RI.getSubReg(SrcReg, SubIdx); 623 bool AlignedDest = ((Reg - AMDGPU::SGPR0) % 2) == 0; 624 bool AlignedSrc = ((Src - AMDGPU::SGPR0) % 2) == 0; 625 if (AlignedDest && AlignedSrc && (Idx + 1 < BaseIndices.size())) { 626 // Can use SGPR64 copy 627 unsigned Channel = RI.getChannelFromSubReg(SubIdx); 628 SubIdx = RI.getSubRegFromChannel(Channel, 2); 629 Opcode = AMDGPU::S_MOV_B64; 630 Idx++; 631 } 632 633 LastMI = BuildMI(MBB, I, DL, TII.get(Opcode), RI.getSubReg(DestReg, SubIdx)) 634 .addReg(RI.getSubReg(SrcReg, SubIdx)) 635 .addReg(SrcReg, RegState::Implicit); 636 637 if (!FirstMI) 638 FirstMI = LastMI; 639 640 if (!Forward) 641 I--; 642 } 643 644 assert(FirstMI && LastMI); 645 if (!Forward) 646 std::swap(FirstMI, LastMI); 647 648 FirstMI->addOperand( 649 MachineOperand::CreateReg(DestReg, true /*IsDef*/, true /*IsImp*/)); 650 651 if (KillSrc) 652 LastMI->addRegisterKilled(SrcReg, &RI); 653 } 654 655 void SIInstrInfo::copyPhysReg(MachineBasicBlock &MBB, 656 MachineBasicBlock::iterator MI, 657 const DebugLoc &DL, MCRegister DestReg, 658 MCRegister SrcReg, bool KillSrc) const { 659 const TargetRegisterClass *RC = RI.getPhysRegClass(DestReg); 660 661 // FIXME: This is hack to resolve copies between 16 bit and 32 bit 662 // registers until all patterns are fixed. 663 if (Fix16BitCopies && 664 ((RI.getRegSizeInBits(*RC) == 16) ^ 665 (RI.getRegSizeInBits(*RI.getPhysRegClass(SrcReg)) == 16))) { 666 MCRegister &RegToFix = (RI.getRegSizeInBits(*RC) == 16) ? DestReg : SrcReg; 667 MCRegister Super = RI.get32BitRegister(RegToFix); 668 assert(RI.getSubReg(Super, AMDGPU::lo16) == RegToFix); 669 RegToFix = Super; 670 671 if (DestReg == SrcReg) { 672 // Insert empty bundle since ExpandPostRA expects an instruction here. 673 BuildMI(MBB, MI, DL, get(AMDGPU::BUNDLE)); 674 return; 675 } 676 677 RC = RI.getPhysRegClass(DestReg); 678 } 679 680 if (RC == &AMDGPU::VGPR_32RegClass) { 681 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg) || 682 AMDGPU::SReg_32RegClass.contains(SrcReg) || 683 AMDGPU::AGPR_32RegClass.contains(SrcReg)); 684 unsigned Opc = AMDGPU::AGPR_32RegClass.contains(SrcReg) ? 685 AMDGPU::V_ACCVGPR_READ_B32_e64 : AMDGPU::V_MOV_B32_e32; 686 BuildMI(MBB, MI, DL, get(Opc), DestReg) 687 .addReg(SrcReg, getKillRegState(KillSrc)); 688 return; 689 } 690 691 if (RC == &AMDGPU::SReg_32_XM0RegClass || 692 RC == &AMDGPU::SReg_32RegClass) { 693 if (SrcReg == AMDGPU::SCC) { 694 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B32), DestReg) 695 .addImm(1) 696 .addImm(0); 697 return; 698 } 699 700 if (DestReg == AMDGPU::VCC_LO) { 701 if (AMDGPU::SReg_32RegClass.contains(SrcReg)) { 702 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), AMDGPU::VCC_LO) 703 .addReg(SrcReg, getKillRegState(KillSrc)); 704 } else { 705 // FIXME: Hack until VReg_1 removed. 706 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 707 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 708 .addImm(0) 709 .addReg(SrcReg, getKillRegState(KillSrc)); 710 } 711 712 return; 713 } 714 715 if (!AMDGPU::SReg_32RegClass.contains(SrcReg)) { 716 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 717 return; 718 } 719 720 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 721 .addReg(SrcReg, getKillRegState(KillSrc)); 722 return; 723 } 724 725 if (RC == &AMDGPU::SReg_64RegClass) { 726 if (SrcReg == AMDGPU::SCC) { 727 BuildMI(MBB, MI, DL, get(AMDGPU::S_CSELECT_B64), DestReg) 728 .addImm(1) 729 .addImm(0); 730 return; 731 } 732 733 if (DestReg == AMDGPU::VCC) { 734 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 735 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), AMDGPU::VCC) 736 .addReg(SrcReg, getKillRegState(KillSrc)); 737 } else { 738 // FIXME: Hack until VReg_1 removed. 739 assert(AMDGPU::VGPR_32RegClass.contains(SrcReg)); 740 BuildMI(MBB, MI, DL, get(AMDGPU::V_CMP_NE_U32_e32)) 741 .addImm(0) 742 .addReg(SrcReg, getKillRegState(KillSrc)); 743 } 744 745 return; 746 } 747 748 if (!AMDGPU::SReg_64RegClass.contains(SrcReg)) { 749 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 750 return; 751 } 752 753 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 754 .addReg(SrcReg, getKillRegState(KillSrc)); 755 return; 756 } 757 758 if (DestReg == AMDGPU::SCC) { 759 // Copying 64-bit or 32-bit sources to SCC barely makes sense, 760 // but SelectionDAG emits such copies for i1 sources. 761 if (AMDGPU::SReg_64RegClass.contains(SrcReg)) { 762 // This copy can only be produced by patterns 763 // with explicit SCC, which are known to be enabled 764 // only for subtargets with S_CMP_LG_U64 present. 765 assert(ST.hasScalarCompareEq64()); 766 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U64)) 767 .addReg(SrcReg, getKillRegState(KillSrc)) 768 .addImm(0); 769 } else { 770 assert(AMDGPU::SReg_32RegClass.contains(SrcReg)); 771 BuildMI(MBB, MI, DL, get(AMDGPU::S_CMP_LG_U32)) 772 .addReg(SrcReg, getKillRegState(KillSrc)) 773 .addImm(0); 774 } 775 776 return; 777 } 778 779 if (RC == &AMDGPU::AGPR_32RegClass) { 780 if (AMDGPU::VGPR_32RegClass.contains(SrcReg)) { 781 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_WRITE_B32_e64), DestReg) 782 .addReg(SrcReg, getKillRegState(KillSrc)); 783 return; 784 } 785 786 if (AMDGPU::AGPR_32RegClass.contains(SrcReg) && ST.hasGFX90AInsts()) { 787 BuildMI(MBB, MI, DL, get(AMDGPU::V_ACCVGPR_MOV_B32), DestReg) 788 .addReg(SrcReg, getKillRegState(KillSrc)); 789 return; 790 } 791 792 // FIXME: Pass should maintain scavenger to avoid scan through the block on 793 // every AGPR spill. 794 RegScavenger RS; 795 indirectCopyToAGPR(*this, MBB, MI, DL, DestReg, SrcReg, KillSrc, RS); 796 return; 797 } 798 799 const unsigned Size = RI.getRegSizeInBits(*RC); 800 if (Size == 16) { 801 assert(AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || 802 AMDGPU::VGPR_HI16RegClass.contains(SrcReg) || 803 AMDGPU::SReg_LO16RegClass.contains(SrcReg) || 804 AMDGPU::AGPR_LO16RegClass.contains(SrcReg)); 805 806 bool IsSGPRDst = AMDGPU::SReg_LO16RegClass.contains(DestReg); 807 bool IsSGPRSrc = AMDGPU::SReg_LO16RegClass.contains(SrcReg); 808 bool IsAGPRDst = AMDGPU::AGPR_LO16RegClass.contains(DestReg); 809 bool IsAGPRSrc = AMDGPU::AGPR_LO16RegClass.contains(SrcReg); 810 bool DstLow = AMDGPU::VGPR_LO16RegClass.contains(DestReg) || 811 AMDGPU::SReg_LO16RegClass.contains(DestReg) || 812 AMDGPU::AGPR_LO16RegClass.contains(DestReg); 813 bool SrcLow = AMDGPU::VGPR_LO16RegClass.contains(SrcReg) || 814 AMDGPU::SReg_LO16RegClass.contains(SrcReg) || 815 AMDGPU::AGPR_LO16RegClass.contains(SrcReg); 816 MCRegister NewDestReg = RI.get32BitRegister(DestReg); 817 MCRegister NewSrcReg = RI.get32BitRegister(SrcReg); 818 819 if (IsSGPRDst) { 820 if (!IsSGPRSrc) { 821 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 822 return; 823 } 824 825 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), NewDestReg) 826 .addReg(NewSrcReg, getKillRegState(KillSrc)); 827 return; 828 } 829 830 if (IsAGPRDst || IsAGPRSrc) { 831 if (!DstLow || !SrcLow) { 832 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc, 833 "Cannot use hi16 subreg with an AGPR!"); 834 } 835 836 copyPhysReg(MBB, MI, DL, NewDestReg, NewSrcReg, KillSrc); 837 return; 838 } 839 840 if (IsSGPRSrc && !ST.hasSDWAScalar()) { 841 if (!DstLow || !SrcLow) { 842 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc, 843 "Cannot use hi16 subreg on VI!"); 844 } 845 846 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), NewDestReg) 847 .addReg(NewSrcReg, getKillRegState(KillSrc)); 848 return; 849 } 850 851 auto MIB = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_sdwa), NewDestReg) 852 .addImm(0) // src0_modifiers 853 .addReg(NewSrcReg) 854 .addImm(0) // clamp 855 .addImm(DstLow ? AMDGPU::SDWA::SdwaSel::WORD_0 856 : AMDGPU::SDWA::SdwaSel::WORD_1) 857 .addImm(AMDGPU::SDWA::DstUnused::UNUSED_PRESERVE) 858 .addImm(SrcLow ? AMDGPU::SDWA::SdwaSel::WORD_0 859 : AMDGPU::SDWA::SdwaSel::WORD_1) 860 .addReg(NewDestReg, RegState::Implicit | RegState::Undef); 861 // First implicit operand is $exec. 862 MIB->tieOperands(0, MIB->getNumOperands() - 1); 863 return; 864 } 865 866 const TargetRegisterClass *SrcRC = RI.getPhysRegClass(SrcReg); 867 if (RC == RI.getVGPR64Class() && (SrcRC == RC || RI.isSGPRClass(SrcRC))) { 868 if (ST.hasPackedFP32Ops()) { 869 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), DestReg) 870 .addImm(SISrcMods::OP_SEL_1) 871 .addReg(SrcReg) 872 .addImm(SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1) 873 .addReg(SrcReg) 874 .addImm(0) // op_sel_lo 875 .addImm(0) // op_sel_hi 876 .addImm(0) // neg_lo 877 .addImm(0) // neg_hi 878 .addImm(0) // clamp 879 .addReg(SrcReg, getKillRegState(KillSrc) | RegState::Implicit); 880 return; 881 } 882 } 883 884 const bool Forward = RI.getHWRegIndex(DestReg) <= RI.getHWRegIndex(SrcReg); 885 if (RI.isSGPRClass(RC)) { 886 if (!RI.isSGPRClass(SrcRC)) { 887 reportIllegalCopy(this, MBB, MI, DL, DestReg, SrcReg, KillSrc); 888 return; 889 } 890 expandSGPRCopy(*this, MBB, MI, DL, DestReg, SrcReg, KillSrc, RC, Forward); 891 return; 892 } 893 894 unsigned EltSize = 4; 895 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 896 if (RI.hasAGPRs(RC)) { 897 Opcode = (RI.hasVGPRs(SrcRC)) ? 898 AMDGPU::V_ACCVGPR_WRITE_B32_e64 : AMDGPU::INSTRUCTION_LIST_END; 899 } else if (RI.hasVGPRs(RC) && RI.hasAGPRs(SrcRC)) { 900 Opcode = AMDGPU::V_ACCVGPR_READ_B32_e64; 901 } else if ((Size % 64 == 0) && RI.hasVGPRs(RC) && 902 (RI.isProperlyAlignedRC(*RC) && 903 (SrcRC == RC || RI.isSGPRClass(SrcRC)))) { 904 // TODO: In 96-bit case, could do a 64-bit mov and then a 32-bit mov. 905 if (ST.hasPackedFP32Ops()) { 906 Opcode = AMDGPU::V_PK_MOV_B32; 907 EltSize = 8; 908 } 909 } 910 911 // For the cases where we need an intermediate instruction/temporary register 912 // (destination is an AGPR), we need a scavenger. 913 // 914 // FIXME: The pass should maintain this for us so we don't have to re-scan the 915 // whole block for every handled copy. 916 std::unique_ptr<RegScavenger> RS; 917 if (Opcode == AMDGPU::INSTRUCTION_LIST_END) 918 RS.reset(new RegScavenger()); 919 920 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RC, EltSize); 921 922 // If there is an overlap, we can't kill the super-register on the last 923 // instruction, since it will also kill the components made live by this def. 924 const bool CanKillSuperReg = KillSrc && !RI.regsOverlap(SrcReg, DestReg); 925 926 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 927 unsigned SubIdx; 928 if (Forward) 929 SubIdx = SubIndices[Idx]; 930 else 931 SubIdx = SubIndices[SubIndices.size() - Idx - 1]; 932 933 bool UseKill = CanKillSuperReg && Idx == SubIndices.size() - 1; 934 935 if (Opcode == AMDGPU::INSTRUCTION_LIST_END) { 936 Register ImpDefSuper = Idx == 0 ? Register(DestReg) : Register(); 937 Register ImpUseSuper = SrcReg; 938 indirectCopyToAGPR(*this, MBB, MI, DL, RI.getSubReg(DestReg, SubIdx), 939 RI.getSubReg(SrcReg, SubIdx), UseKill, *RS, 940 ImpDefSuper, ImpUseSuper); 941 } else if (Opcode == AMDGPU::V_PK_MOV_B32) { 942 Register DstSubReg = RI.getSubReg(DestReg, SubIdx); 943 Register SrcSubReg = RI.getSubReg(SrcReg, SubIdx); 944 MachineInstrBuilder MIB = 945 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), DstSubReg) 946 .addImm(SISrcMods::OP_SEL_1) 947 .addReg(SrcSubReg) 948 .addImm(SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1) 949 .addReg(SrcSubReg) 950 .addImm(0) // op_sel_lo 951 .addImm(0) // op_sel_hi 952 .addImm(0) // neg_lo 953 .addImm(0) // neg_hi 954 .addImm(0) // clamp 955 .addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit); 956 if (Idx == 0) 957 MIB.addReg(DestReg, RegState::Define | RegState::Implicit); 958 } else { 959 MachineInstrBuilder Builder = 960 BuildMI(MBB, MI, DL, get(Opcode), RI.getSubReg(DestReg, SubIdx)) 961 .addReg(RI.getSubReg(SrcReg, SubIdx)); 962 if (Idx == 0) 963 Builder.addReg(DestReg, RegState::Define | RegState::Implicit); 964 965 Builder.addReg(SrcReg, getKillRegState(UseKill) | RegState::Implicit); 966 } 967 } 968 } 969 970 int SIInstrInfo::commuteOpcode(unsigned Opcode) const { 971 int NewOpc; 972 973 // Try to map original to commuted opcode 974 NewOpc = AMDGPU::getCommuteRev(Opcode); 975 if (NewOpc != -1) 976 // Check if the commuted (REV) opcode exists on the target. 977 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 978 979 // Try to map commuted to original opcode 980 NewOpc = AMDGPU::getCommuteOrig(Opcode); 981 if (NewOpc != -1) 982 // Check if the original (non-REV) opcode exists on the target. 983 return pseudoToMCOpcode(NewOpc) != -1 ? NewOpc : -1; 984 985 return Opcode; 986 } 987 988 void SIInstrInfo::materializeImmediate(MachineBasicBlock &MBB, 989 MachineBasicBlock::iterator MI, 990 const DebugLoc &DL, unsigned DestReg, 991 int64_t Value) const { 992 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 993 const TargetRegisterClass *RegClass = MRI.getRegClass(DestReg); 994 if (RegClass == &AMDGPU::SReg_32RegClass || 995 RegClass == &AMDGPU::SGPR_32RegClass || 996 RegClass == &AMDGPU::SReg_32_XM0RegClass || 997 RegClass == &AMDGPU::SReg_32_XM0_XEXECRegClass) { 998 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DestReg) 999 .addImm(Value); 1000 return; 1001 } 1002 1003 if (RegClass == &AMDGPU::SReg_64RegClass || 1004 RegClass == &AMDGPU::SGPR_64RegClass || 1005 RegClass == &AMDGPU::SReg_64_XEXECRegClass) { 1006 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B64), DestReg) 1007 .addImm(Value); 1008 return; 1009 } 1010 1011 if (RegClass == &AMDGPU::VGPR_32RegClass) { 1012 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DestReg) 1013 .addImm(Value); 1014 return; 1015 } 1016 if (RegClass->hasSuperClassEq(&AMDGPU::VReg_64RegClass)) { 1017 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), DestReg) 1018 .addImm(Value); 1019 return; 1020 } 1021 1022 unsigned EltSize = 4; 1023 unsigned Opcode = AMDGPU::V_MOV_B32_e32; 1024 if (RI.isSGPRClass(RegClass)) { 1025 if (RI.getRegSizeInBits(*RegClass) > 32) { 1026 Opcode = AMDGPU::S_MOV_B64; 1027 EltSize = 8; 1028 } else { 1029 Opcode = AMDGPU::S_MOV_B32; 1030 EltSize = 4; 1031 } 1032 } 1033 1034 ArrayRef<int16_t> SubIndices = RI.getRegSplitParts(RegClass, EltSize); 1035 for (unsigned Idx = 0; Idx < SubIndices.size(); ++Idx) { 1036 int64_t IdxValue = Idx == 0 ? Value : 0; 1037 1038 MachineInstrBuilder Builder = BuildMI(MBB, MI, DL, 1039 get(Opcode), RI.getSubReg(DestReg, SubIndices[Idx])); 1040 Builder.addImm(IdxValue); 1041 } 1042 } 1043 1044 const TargetRegisterClass * 1045 SIInstrInfo::getPreferredSelectRegClass(unsigned Size) const { 1046 return &AMDGPU::VGPR_32RegClass; 1047 } 1048 1049 void SIInstrInfo::insertVectorSelect(MachineBasicBlock &MBB, 1050 MachineBasicBlock::iterator I, 1051 const DebugLoc &DL, Register DstReg, 1052 ArrayRef<MachineOperand> Cond, 1053 Register TrueReg, 1054 Register FalseReg) const { 1055 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 1056 const TargetRegisterClass *BoolXExecRC = 1057 RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 1058 assert(MRI.getRegClass(DstReg) == &AMDGPU::VGPR_32RegClass && 1059 "Not a VGPR32 reg"); 1060 1061 if (Cond.size() == 1) { 1062 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1063 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1064 .add(Cond[0]); 1065 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1066 .addImm(0) 1067 .addReg(FalseReg) 1068 .addImm(0) 1069 .addReg(TrueReg) 1070 .addReg(SReg); 1071 } else if (Cond.size() == 2) { 1072 assert(Cond[0].isImm() && "Cond[0] is not an immediate"); 1073 switch (Cond[0].getImm()) { 1074 case SIInstrInfo::SCC_TRUE: { 1075 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1076 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1077 : AMDGPU::S_CSELECT_B64), SReg) 1078 .addImm(1) 1079 .addImm(0); 1080 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1081 .addImm(0) 1082 .addReg(FalseReg) 1083 .addImm(0) 1084 .addReg(TrueReg) 1085 .addReg(SReg); 1086 break; 1087 } 1088 case SIInstrInfo::SCC_FALSE: { 1089 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1090 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1091 : AMDGPU::S_CSELECT_B64), SReg) 1092 .addImm(0) 1093 .addImm(1); 1094 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1095 .addImm(0) 1096 .addReg(FalseReg) 1097 .addImm(0) 1098 .addReg(TrueReg) 1099 .addReg(SReg); 1100 break; 1101 } 1102 case SIInstrInfo::VCCNZ: { 1103 MachineOperand RegOp = Cond[1]; 1104 RegOp.setImplicit(false); 1105 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1106 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1107 .add(RegOp); 1108 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1109 .addImm(0) 1110 .addReg(FalseReg) 1111 .addImm(0) 1112 .addReg(TrueReg) 1113 .addReg(SReg); 1114 break; 1115 } 1116 case SIInstrInfo::VCCZ: { 1117 MachineOperand RegOp = Cond[1]; 1118 RegOp.setImplicit(false); 1119 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1120 BuildMI(MBB, I, DL, get(AMDGPU::COPY), SReg) 1121 .add(RegOp); 1122 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1123 .addImm(0) 1124 .addReg(TrueReg) 1125 .addImm(0) 1126 .addReg(FalseReg) 1127 .addReg(SReg); 1128 break; 1129 } 1130 case SIInstrInfo::EXECNZ: { 1131 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1132 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 1133 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1134 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 1135 .addImm(0); 1136 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1137 : AMDGPU::S_CSELECT_B64), SReg) 1138 .addImm(1) 1139 .addImm(0); 1140 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1141 .addImm(0) 1142 .addReg(FalseReg) 1143 .addImm(0) 1144 .addReg(TrueReg) 1145 .addReg(SReg); 1146 break; 1147 } 1148 case SIInstrInfo::EXECZ: { 1149 Register SReg = MRI.createVirtualRegister(BoolXExecRC); 1150 Register SReg2 = MRI.createVirtualRegister(RI.getBoolRC()); 1151 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1152 : AMDGPU::S_OR_SAVEEXEC_B64), SReg2) 1153 .addImm(0); 1154 BuildMI(MBB, I, DL, get(ST.isWave32() ? AMDGPU::S_CSELECT_B32 1155 : AMDGPU::S_CSELECT_B64), SReg) 1156 .addImm(0) 1157 .addImm(1); 1158 BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1159 .addImm(0) 1160 .addReg(FalseReg) 1161 .addImm(0) 1162 .addReg(TrueReg) 1163 .addReg(SReg); 1164 llvm_unreachable("Unhandled branch predicate EXECZ"); 1165 break; 1166 } 1167 default: 1168 llvm_unreachable("invalid branch predicate"); 1169 } 1170 } else { 1171 llvm_unreachable("Can only handle Cond size 1 or 2"); 1172 } 1173 } 1174 1175 Register SIInstrInfo::insertEQ(MachineBasicBlock *MBB, 1176 MachineBasicBlock::iterator I, 1177 const DebugLoc &DL, 1178 Register SrcReg, int Value) const { 1179 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1180 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 1181 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_EQ_I32_e64), Reg) 1182 .addImm(Value) 1183 .addReg(SrcReg); 1184 1185 return Reg; 1186 } 1187 1188 Register SIInstrInfo::insertNE(MachineBasicBlock *MBB, 1189 MachineBasicBlock::iterator I, 1190 const DebugLoc &DL, 1191 Register SrcReg, int Value) const { 1192 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 1193 Register Reg = MRI.createVirtualRegister(RI.getBoolRC()); 1194 BuildMI(*MBB, I, DL, get(AMDGPU::V_CMP_NE_I32_e64), Reg) 1195 .addImm(Value) 1196 .addReg(SrcReg); 1197 1198 return Reg; 1199 } 1200 1201 unsigned SIInstrInfo::getMovOpcode(const TargetRegisterClass *DstRC) const { 1202 1203 if (RI.hasAGPRs(DstRC)) 1204 return AMDGPU::COPY; 1205 if (RI.getRegSizeInBits(*DstRC) == 32) { 1206 return RI.isSGPRClass(DstRC) ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 1207 } else if (RI.getRegSizeInBits(*DstRC) == 64 && RI.isSGPRClass(DstRC)) { 1208 return AMDGPU::S_MOV_B64; 1209 } else if (RI.getRegSizeInBits(*DstRC) == 64 && !RI.isSGPRClass(DstRC)) { 1210 return AMDGPU::V_MOV_B64_PSEUDO; 1211 } 1212 return AMDGPU::COPY; 1213 } 1214 1215 const MCInstrDesc & 1216 SIInstrInfo::getIndirectGPRIDXPseudo(unsigned VecSize, 1217 bool IsIndirectSrc) const { 1218 if (IsIndirectSrc) { 1219 if (VecSize <= 32) // 4 bytes 1220 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V1); 1221 if (VecSize <= 64) // 8 bytes 1222 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V2); 1223 if (VecSize <= 96) // 12 bytes 1224 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V3); 1225 if (VecSize <= 128) // 16 bytes 1226 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V4); 1227 if (VecSize <= 160) // 20 bytes 1228 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V5); 1229 if (VecSize <= 256) // 32 bytes 1230 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V8); 1231 if (VecSize <= 512) // 64 bytes 1232 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V16); 1233 if (VecSize <= 1024) // 128 bytes 1234 return get(AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V32); 1235 1236 llvm_unreachable("unsupported size for IndirectRegReadGPRIDX pseudos"); 1237 } 1238 1239 if (VecSize <= 32) // 4 bytes 1240 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V1); 1241 if (VecSize <= 64) // 8 bytes 1242 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V2); 1243 if (VecSize <= 96) // 12 bytes 1244 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V3); 1245 if (VecSize <= 128) // 16 bytes 1246 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V4); 1247 if (VecSize <= 160) // 20 bytes 1248 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V5); 1249 if (VecSize <= 256) // 32 bytes 1250 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V8); 1251 if (VecSize <= 512) // 64 bytes 1252 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V16); 1253 if (VecSize <= 1024) // 128 bytes 1254 return get(AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V32); 1255 1256 llvm_unreachable("unsupported size for IndirectRegWriteGPRIDX pseudos"); 1257 } 1258 1259 static unsigned getIndirectVGPRWriteMovRelPseudoOpc(unsigned VecSize) { 1260 if (VecSize <= 32) // 4 bytes 1261 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V1; 1262 if (VecSize <= 64) // 8 bytes 1263 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V2; 1264 if (VecSize <= 96) // 12 bytes 1265 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V3; 1266 if (VecSize <= 128) // 16 bytes 1267 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V4; 1268 if (VecSize <= 160) // 20 bytes 1269 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V5; 1270 if (VecSize <= 256) // 32 bytes 1271 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V8; 1272 if (VecSize <= 512) // 64 bytes 1273 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V16; 1274 if (VecSize <= 1024) // 128 bytes 1275 return AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V32; 1276 1277 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1278 } 1279 1280 static unsigned getIndirectSGPRWriteMovRelPseudo32(unsigned VecSize) { 1281 if (VecSize <= 32) // 4 bytes 1282 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V1; 1283 if (VecSize <= 64) // 8 bytes 1284 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V2; 1285 if (VecSize <= 96) // 12 bytes 1286 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V3; 1287 if (VecSize <= 128) // 16 bytes 1288 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V4; 1289 if (VecSize <= 160) // 20 bytes 1290 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V5; 1291 if (VecSize <= 256) // 32 bytes 1292 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V8; 1293 if (VecSize <= 512) // 64 bytes 1294 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V16; 1295 if (VecSize <= 1024) // 128 bytes 1296 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V32; 1297 1298 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1299 } 1300 1301 static unsigned getIndirectSGPRWriteMovRelPseudo64(unsigned VecSize) { 1302 if (VecSize <= 64) // 8 bytes 1303 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V1; 1304 if (VecSize <= 128) // 16 bytes 1305 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V2; 1306 if (VecSize <= 256) // 32 bytes 1307 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V4; 1308 if (VecSize <= 512) // 64 bytes 1309 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V8; 1310 if (VecSize <= 1024) // 128 bytes 1311 return AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V16; 1312 1313 llvm_unreachable("unsupported size for IndirectRegWrite pseudos"); 1314 } 1315 1316 const MCInstrDesc & 1317 SIInstrInfo::getIndirectRegWriteMovRelPseudo(unsigned VecSize, unsigned EltSize, 1318 bool IsSGPR) const { 1319 if (IsSGPR) { 1320 switch (EltSize) { 1321 case 32: 1322 return get(getIndirectSGPRWriteMovRelPseudo32(VecSize)); 1323 case 64: 1324 return get(getIndirectSGPRWriteMovRelPseudo64(VecSize)); 1325 default: 1326 llvm_unreachable("invalid reg indexing elt size"); 1327 } 1328 } 1329 1330 assert(EltSize == 32 && "invalid reg indexing elt size"); 1331 return get(getIndirectVGPRWriteMovRelPseudoOpc(VecSize)); 1332 } 1333 1334 static unsigned getSGPRSpillSaveOpcode(unsigned Size) { 1335 switch (Size) { 1336 case 4: 1337 return AMDGPU::SI_SPILL_S32_SAVE; 1338 case 8: 1339 return AMDGPU::SI_SPILL_S64_SAVE; 1340 case 12: 1341 return AMDGPU::SI_SPILL_S96_SAVE; 1342 case 16: 1343 return AMDGPU::SI_SPILL_S128_SAVE; 1344 case 20: 1345 return AMDGPU::SI_SPILL_S160_SAVE; 1346 case 24: 1347 return AMDGPU::SI_SPILL_S192_SAVE; 1348 case 28: 1349 return AMDGPU::SI_SPILL_S224_SAVE; 1350 case 32: 1351 return AMDGPU::SI_SPILL_S256_SAVE; 1352 case 64: 1353 return AMDGPU::SI_SPILL_S512_SAVE; 1354 case 128: 1355 return AMDGPU::SI_SPILL_S1024_SAVE; 1356 default: 1357 llvm_unreachable("unknown register size"); 1358 } 1359 } 1360 1361 static unsigned getVGPRSpillSaveOpcode(unsigned Size) { 1362 switch (Size) { 1363 case 4: 1364 return AMDGPU::SI_SPILL_V32_SAVE; 1365 case 8: 1366 return AMDGPU::SI_SPILL_V64_SAVE; 1367 case 12: 1368 return AMDGPU::SI_SPILL_V96_SAVE; 1369 case 16: 1370 return AMDGPU::SI_SPILL_V128_SAVE; 1371 case 20: 1372 return AMDGPU::SI_SPILL_V160_SAVE; 1373 case 24: 1374 return AMDGPU::SI_SPILL_V192_SAVE; 1375 case 28: 1376 return AMDGPU::SI_SPILL_V224_SAVE; 1377 case 32: 1378 return AMDGPU::SI_SPILL_V256_SAVE; 1379 case 64: 1380 return AMDGPU::SI_SPILL_V512_SAVE; 1381 case 128: 1382 return AMDGPU::SI_SPILL_V1024_SAVE; 1383 default: 1384 llvm_unreachable("unknown register size"); 1385 } 1386 } 1387 1388 static unsigned getAGPRSpillSaveOpcode(unsigned Size) { 1389 switch (Size) { 1390 case 4: 1391 return AMDGPU::SI_SPILL_A32_SAVE; 1392 case 8: 1393 return AMDGPU::SI_SPILL_A64_SAVE; 1394 case 12: 1395 return AMDGPU::SI_SPILL_A96_SAVE; 1396 case 16: 1397 return AMDGPU::SI_SPILL_A128_SAVE; 1398 case 20: 1399 return AMDGPU::SI_SPILL_A160_SAVE; 1400 case 24: 1401 return AMDGPU::SI_SPILL_A192_SAVE; 1402 case 28: 1403 return AMDGPU::SI_SPILL_A224_SAVE; 1404 case 32: 1405 return AMDGPU::SI_SPILL_A256_SAVE; 1406 case 64: 1407 return AMDGPU::SI_SPILL_A512_SAVE; 1408 case 128: 1409 return AMDGPU::SI_SPILL_A1024_SAVE; 1410 default: 1411 llvm_unreachable("unknown register size"); 1412 } 1413 } 1414 1415 void SIInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, 1416 MachineBasicBlock::iterator MI, 1417 Register SrcReg, bool isKill, 1418 int FrameIndex, 1419 const TargetRegisterClass *RC, 1420 const TargetRegisterInfo *TRI) const { 1421 MachineFunction *MF = MBB.getParent(); 1422 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1423 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 1424 const DebugLoc &DL = MBB.findDebugLoc(MI); 1425 1426 MachinePointerInfo PtrInfo 1427 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 1428 MachineMemOperand *MMO = MF->getMachineMemOperand( 1429 PtrInfo, MachineMemOperand::MOStore, FrameInfo.getObjectSize(FrameIndex), 1430 FrameInfo.getObjectAlign(FrameIndex)); 1431 unsigned SpillSize = TRI->getSpillSize(*RC); 1432 1433 if (RI.isSGPRClass(RC)) { 1434 MFI->setHasSpilledSGPRs(); 1435 assert(SrcReg != AMDGPU::M0 && "m0 should not be spilled"); 1436 assert(SrcReg != AMDGPU::EXEC_LO && SrcReg != AMDGPU::EXEC_HI && 1437 SrcReg != AMDGPU::EXEC && "exec should not be spilled"); 1438 1439 // We are only allowed to create one new instruction when spilling 1440 // registers, so we need to use pseudo instruction for spilling SGPRs. 1441 const MCInstrDesc &OpDesc = get(getSGPRSpillSaveOpcode(SpillSize)); 1442 1443 // The SGPR spill/restore instructions only work on number sgprs, so we need 1444 // to make sure we are using the correct register class. 1445 if (SrcReg.isVirtual() && SpillSize == 4) { 1446 MachineRegisterInfo &MRI = MF->getRegInfo(); 1447 MRI.constrainRegClass(SrcReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 1448 } 1449 1450 BuildMI(MBB, MI, DL, OpDesc) 1451 .addReg(SrcReg, getKillRegState(isKill)) // data 1452 .addFrameIndex(FrameIndex) // addr 1453 .addMemOperand(MMO) 1454 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); 1455 1456 if (RI.spillSGPRToVGPR()) 1457 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill); 1458 return; 1459 } 1460 1461 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillSaveOpcode(SpillSize) 1462 : getVGPRSpillSaveOpcode(SpillSize); 1463 MFI->setHasSpilledVGPRs(); 1464 1465 BuildMI(MBB, MI, DL, get(Opcode)) 1466 .addReg(SrcReg, getKillRegState(isKill)) // data 1467 .addFrameIndex(FrameIndex) // addr 1468 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset 1469 .addImm(0) // offset 1470 .addMemOperand(MMO); 1471 } 1472 1473 static unsigned getSGPRSpillRestoreOpcode(unsigned Size) { 1474 switch (Size) { 1475 case 4: 1476 return AMDGPU::SI_SPILL_S32_RESTORE; 1477 case 8: 1478 return AMDGPU::SI_SPILL_S64_RESTORE; 1479 case 12: 1480 return AMDGPU::SI_SPILL_S96_RESTORE; 1481 case 16: 1482 return AMDGPU::SI_SPILL_S128_RESTORE; 1483 case 20: 1484 return AMDGPU::SI_SPILL_S160_RESTORE; 1485 case 24: 1486 return AMDGPU::SI_SPILL_S192_RESTORE; 1487 case 28: 1488 return AMDGPU::SI_SPILL_S224_RESTORE; 1489 case 32: 1490 return AMDGPU::SI_SPILL_S256_RESTORE; 1491 case 64: 1492 return AMDGPU::SI_SPILL_S512_RESTORE; 1493 case 128: 1494 return AMDGPU::SI_SPILL_S1024_RESTORE; 1495 default: 1496 llvm_unreachable("unknown register size"); 1497 } 1498 } 1499 1500 static unsigned getVGPRSpillRestoreOpcode(unsigned Size) { 1501 switch (Size) { 1502 case 4: 1503 return AMDGPU::SI_SPILL_V32_RESTORE; 1504 case 8: 1505 return AMDGPU::SI_SPILL_V64_RESTORE; 1506 case 12: 1507 return AMDGPU::SI_SPILL_V96_RESTORE; 1508 case 16: 1509 return AMDGPU::SI_SPILL_V128_RESTORE; 1510 case 20: 1511 return AMDGPU::SI_SPILL_V160_RESTORE; 1512 case 24: 1513 return AMDGPU::SI_SPILL_V192_RESTORE; 1514 case 28: 1515 return AMDGPU::SI_SPILL_V224_RESTORE; 1516 case 32: 1517 return AMDGPU::SI_SPILL_V256_RESTORE; 1518 case 64: 1519 return AMDGPU::SI_SPILL_V512_RESTORE; 1520 case 128: 1521 return AMDGPU::SI_SPILL_V1024_RESTORE; 1522 default: 1523 llvm_unreachable("unknown register size"); 1524 } 1525 } 1526 1527 static unsigned getAGPRSpillRestoreOpcode(unsigned Size) { 1528 switch (Size) { 1529 case 4: 1530 return AMDGPU::SI_SPILL_A32_RESTORE; 1531 case 8: 1532 return AMDGPU::SI_SPILL_A64_RESTORE; 1533 case 12: 1534 return AMDGPU::SI_SPILL_A96_RESTORE; 1535 case 16: 1536 return AMDGPU::SI_SPILL_A128_RESTORE; 1537 case 20: 1538 return AMDGPU::SI_SPILL_A160_RESTORE; 1539 case 24: 1540 return AMDGPU::SI_SPILL_A192_RESTORE; 1541 case 28: 1542 return AMDGPU::SI_SPILL_A224_RESTORE; 1543 case 32: 1544 return AMDGPU::SI_SPILL_A256_RESTORE; 1545 case 64: 1546 return AMDGPU::SI_SPILL_A512_RESTORE; 1547 case 128: 1548 return AMDGPU::SI_SPILL_A1024_RESTORE; 1549 default: 1550 llvm_unreachable("unknown register size"); 1551 } 1552 } 1553 1554 void SIInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB, 1555 MachineBasicBlock::iterator MI, 1556 Register DestReg, int FrameIndex, 1557 const TargetRegisterClass *RC, 1558 const TargetRegisterInfo *TRI) const { 1559 MachineFunction *MF = MBB.getParent(); 1560 SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1561 MachineFrameInfo &FrameInfo = MF->getFrameInfo(); 1562 const DebugLoc &DL = MBB.findDebugLoc(MI); 1563 unsigned SpillSize = TRI->getSpillSize(*RC); 1564 1565 MachinePointerInfo PtrInfo 1566 = MachinePointerInfo::getFixedStack(*MF, FrameIndex); 1567 1568 MachineMemOperand *MMO = MF->getMachineMemOperand( 1569 PtrInfo, MachineMemOperand::MOLoad, FrameInfo.getObjectSize(FrameIndex), 1570 FrameInfo.getObjectAlign(FrameIndex)); 1571 1572 if (RI.isSGPRClass(RC)) { 1573 MFI->setHasSpilledSGPRs(); 1574 assert(DestReg != AMDGPU::M0 && "m0 should not be reloaded into"); 1575 assert(DestReg != AMDGPU::EXEC_LO && DestReg != AMDGPU::EXEC_HI && 1576 DestReg != AMDGPU::EXEC && "exec should not be spilled"); 1577 1578 // FIXME: Maybe this should not include a memoperand because it will be 1579 // lowered to non-memory instructions. 1580 const MCInstrDesc &OpDesc = get(getSGPRSpillRestoreOpcode(SpillSize)); 1581 if (DestReg.isVirtual() && SpillSize == 4) { 1582 MachineRegisterInfo &MRI = MF->getRegInfo(); 1583 MRI.constrainRegClass(DestReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 1584 } 1585 1586 if (RI.spillSGPRToVGPR()) 1587 FrameInfo.setStackID(FrameIndex, TargetStackID::SGPRSpill); 1588 BuildMI(MBB, MI, DL, OpDesc, DestReg) 1589 .addFrameIndex(FrameIndex) // addr 1590 .addMemOperand(MMO) 1591 .addReg(MFI->getStackPtrOffsetReg(), RegState::Implicit); 1592 1593 return; 1594 } 1595 1596 unsigned Opcode = RI.hasAGPRs(RC) ? getAGPRSpillRestoreOpcode(SpillSize) 1597 : getVGPRSpillRestoreOpcode(SpillSize); 1598 BuildMI(MBB, MI, DL, get(Opcode), DestReg) 1599 .addFrameIndex(FrameIndex) // vaddr 1600 .addReg(MFI->getStackPtrOffsetReg()) // scratch_offset 1601 .addImm(0) // offset 1602 .addMemOperand(MMO); 1603 } 1604 1605 void SIInstrInfo::insertNoop(MachineBasicBlock &MBB, 1606 MachineBasicBlock::iterator MI) const { 1607 insertNoops(MBB, MI, 1); 1608 } 1609 1610 void SIInstrInfo::insertNoops(MachineBasicBlock &MBB, 1611 MachineBasicBlock::iterator MI, 1612 unsigned Quantity) const { 1613 DebugLoc DL = MBB.findDebugLoc(MI); 1614 while (Quantity > 0) { 1615 unsigned Arg = std::min(Quantity, 8u); 1616 Quantity -= Arg; 1617 BuildMI(MBB, MI, DL, get(AMDGPU::S_NOP)).addImm(Arg - 1); 1618 } 1619 } 1620 1621 void SIInstrInfo::insertReturn(MachineBasicBlock &MBB) const { 1622 auto MF = MBB.getParent(); 1623 SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 1624 1625 assert(Info->isEntryFunction()); 1626 1627 if (MBB.succ_empty()) { 1628 bool HasNoTerminator = MBB.getFirstTerminator() == MBB.end(); 1629 if (HasNoTerminator) { 1630 if (Info->returnsVoid()) { 1631 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::S_ENDPGM)).addImm(0); 1632 } else { 1633 BuildMI(MBB, MBB.end(), DebugLoc(), get(AMDGPU::SI_RETURN_TO_EPILOG)); 1634 } 1635 } 1636 } 1637 } 1638 1639 unsigned SIInstrInfo::getNumWaitStates(const MachineInstr &MI) { 1640 switch (MI.getOpcode()) { 1641 default: 1642 if (MI.isMetaInstruction()) 1643 return 0; 1644 return 1; // FIXME: Do wait states equal cycles? 1645 1646 case AMDGPU::S_NOP: 1647 return MI.getOperand(0).getImm() + 1; 1648 1649 // FIXME: Any other pseudo instruction? 1650 // SI_RETURN_TO_EPILOG is a fallthrough to code outside of the function. The 1651 // hazard, even if one exist, won't really be visible. Should we handle it? 1652 case AMDGPU::SI_MASKED_UNREACHABLE: 1653 case AMDGPU::WAVE_BARRIER: 1654 return 0; 1655 } 1656 } 1657 1658 bool SIInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { 1659 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 1660 MachineBasicBlock &MBB = *MI.getParent(); 1661 DebugLoc DL = MBB.findDebugLoc(MI); 1662 switch (MI.getOpcode()) { 1663 default: return TargetInstrInfo::expandPostRAPseudo(MI); 1664 case AMDGPU::S_MOV_B64_term: 1665 // This is only a terminator to get the correct spill code placement during 1666 // register allocation. 1667 MI.setDesc(get(AMDGPU::S_MOV_B64)); 1668 break; 1669 1670 case AMDGPU::S_MOV_B32_term: 1671 // This is only a terminator to get the correct spill code placement during 1672 // register allocation. 1673 MI.setDesc(get(AMDGPU::S_MOV_B32)); 1674 break; 1675 1676 case AMDGPU::S_XOR_B64_term: 1677 // This is only a terminator to get the correct spill code placement during 1678 // register allocation. 1679 MI.setDesc(get(AMDGPU::S_XOR_B64)); 1680 break; 1681 1682 case AMDGPU::S_XOR_B32_term: 1683 // This is only a terminator to get the correct spill code placement during 1684 // register allocation. 1685 MI.setDesc(get(AMDGPU::S_XOR_B32)); 1686 break; 1687 case AMDGPU::S_OR_B64_term: 1688 // This is only a terminator to get the correct spill code placement during 1689 // register allocation. 1690 MI.setDesc(get(AMDGPU::S_OR_B64)); 1691 break; 1692 case AMDGPU::S_OR_B32_term: 1693 // This is only a terminator to get the correct spill code placement during 1694 // register allocation. 1695 MI.setDesc(get(AMDGPU::S_OR_B32)); 1696 break; 1697 1698 case AMDGPU::S_ANDN2_B64_term: 1699 // This is only a terminator to get the correct spill code placement during 1700 // register allocation. 1701 MI.setDesc(get(AMDGPU::S_ANDN2_B64)); 1702 break; 1703 1704 case AMDGPU::S_ANDN2_B32_term: 1705 // This is only a terminator to get the correct spill code placement during 1706 // register allocation. 1707 MI.setDesc(get(AMDGPU::S_ANDN2_B32)); 1708 break; 1709 1710 case AMDGPU::S_AND_B64_term: 1711 // This is only a terminator to get the correct spill code placement during 1712 // register allocation. 1713 MI.setDesc(get(AMDGPU::S_AND_B64)); 1714 break; 1715 1716 case AMDGPU::S_AND_B32_term: 1717 // This is only a terminator to get the correct spill code placement during 1718 // register allocation. 1719 MI.setDesc(get(AMDGPU::S_AND_B32)); 1720 break; 1721 1722 case AMDGPU::V_MOV_B64_PSEUDO: { 1723 Register Dst = MI.getOperand(0).getReg(); 1724 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 1725 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 1726 1727 const MachineOperand &SrcOp = MI.getOperand(1); 1728 // FIXME: Will this work for 64-bit floating point immediates? 1729 assert(!SrcOp.isFPImm()); 1730 if (SrcOp.isImm()) { 1731 APInt Imm(64, SrcOp.getImm()); 1732 APInt Lo(32, Imm.getLoBits(32).getZExtValue()); 1733 APInt Hi(32, Imm.getHiBits(32).getZExtValue()); 1734 if (ST.hasPackedFP32Ops() && Lo == Hi && isInlineConstant(Lo)) { 1735 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), Dst) 1736 .addImm(SISrcMods::OP_SEL_1) 1737 .addImm(Lo.getSExtValue()) 1738 .addImm(SISrcMods::OP_SEL_1) 1739 .addImm(Lo.getSExtValue()) 1740 .addImm(0) // op_sel_lo 1741 .addImm(0) // op_sel_hi 1742 .addImm(0) // neg_lo 1743 .addImm(0) // neg_hi 1744 .addImm(0); // clamp 1745 } else { 1746 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1747 .addImm(Lo.getSExtValue()) 1748 .addReg(Dst, RegState::Implicit | RegState::Define); 1749 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1750 .addImm(Hi.getSExtValue()) 1751 .addReg(Dst, RegState::Implicit | RegState::Define); 1752 } 1753 } else { 1754 assert(SrcOp.isReg()); 1755 if (ST.hasPackedFP32Ops() && 1756 !RI.isAGPR(MBB.getParent()->getRegInfo(), SrcOp.getReg())) { 1757 BuildMI(MBB, MI, DL, get(AMDGPU::V_PK_MOV_B32), Dst) 1758 .addImm(SISrcMods::OP_SEL_1) // src0_mod 1759 .addReg(SrcOp.getReg()) 1760 .addImm(SISrcMods::OP_SEL_0 | SISrcMods::OP_SEL_1) // src1_mod 1761 .addReg(SrcOp.getReg()) 1762 .addImm(0) // op_sel_lo 1763 .addImm(0) // op_sel_hi 1764 .addImm(0) // neg_lo 1765 .addImm(0) // neg_hi 1766 .addImm(0); // clamp 1767 } else { 1768 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstLo) 1769 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub0)) 1770 .addReg(Dst, RegState::Implicit | RegState::Define); 1771 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), DstHi) 1772 .addReg(RI.getSubReg(SrcOp.getReg(), AMDGPU::sub1)) 1773 .addReg(Dst, RegState::Implicit | RegState::Define); 1774 } 1775 } 1776 MI.eraseFromParent(); 1777 break; 1778 } 1779 case AMDGPU::V_MOV_B64_DPP_PSEUDO: { 1780 expandMovDPP64(MI); 1781 break; 1782 } 1783 case AMDGPU::S_MOV_B64_IMM_PSEUDO: { 1784 const MachineOperand &SrcOp = MI.getOperand(1); 1785 assert(!SrcOp.isFPImm()); 1786 APInt Imm(64, SrcOp.getImm()); 1787 if (Imm.isIntN(32) || isInlineConstant(Imm)) { 1788 MI.setDesc(get(AMDGPU::S_MOV_B64)); 1789 break; 1790 } 1791 1792 Register Dst = MI.getOperand(0).getReg(); 1793 Register DstLo = RI.getSubReg(Dst, AMDGPU::sub0); 1794 Register DstHi = RI.getSubReg(Dst, AMDGPU::sub1); 1795 1796 APInt Lo(32, Imm.getLoBits(32).getZExtValue()); 1797 APInt Hi(32, Imm.getHiBits(32).getZExtValue()); 1798 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DstLo) 1799 .addImm(Lo.getSExtValue()) 1800 .addReg(Dst, RegState::Implicit | RegState::Define); 1801 BuildMI(MBB, MI, DL, get(AMDGPU::S_MOV_B32), DstHi) 1802 .addImm(Hi.getSExtValue()) 1803 .addReg(Dst, RegState::Implicit | RegState::Define); 1804 MI.eraseFromParent(); 1805 break; 1806 } 1807 case AMDGPU::V_SET_INACTIVE_B32: { 1808 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; 1809 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1810 auto FirstNot = BuildMI(MBB, MI, DL, get(NotOpc), Exec).addReg(Exec); 1811 FirstNot->addRegisterDead(AMDGPU::SCC, TRI); // SCC is overwritten 1812 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32), MI.getOperand(0).getReg()) 1813 .add(MI.getOperand(2)); 1814 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1815 .addReg(Exec); 1816 MI.eraseFromParent(); 1817 break; 1818 } 1819 case AMDGPU::V_SET_INACTIVE_B64: { 1820 unsigned NotOpc = ST.isWave32() ? AMDGPU::S_NOT_B32 : AMDGPU::S_NOT_B64; 1821 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1822 auto FirstNot = BuildMI(MBB, MI, DL, get(NotOpc), Exec).addReg(Exec); 1823 FirstNot->addRegisterDead(AMDGPU::SCC, TRI); // SCC is overwritten 1824 MachineInstr *Copy = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B64_PSEUDO), 1825 MI.getOperand(0).getReg()) 1826 .add(MI.getOperand(2)); 1827 expandPostRAPseudo(*Copy); 1828 BuildMI(MBB, MI, DL, get(NotOpc), Exec) 1829 .addReg(Exec); 1830 MI.eraseFromParent(); 1831 break; 1832 } 1833 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V1: 1834 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V2: 1835 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V3: 1836 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V4: 1837 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V5: 1838 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V8: 1839 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V16: 1840 case AMDGPU::V_INDIRECT_REG_WRITE_MOVREL_B32_V32: 1841 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V1: 1842 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V2: 1843 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V3: 1844 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V4: 1845 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V5: 1846 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V8: 1847 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V16: 1848 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B32_V32: 1849 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V1: 1850 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V2: 1851 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V4: 1852 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V8: 1853 case AMDGPU::S_INDIRECT_REG_WRITE_MOVREL_B64_V16: { 1854 const TargetRegisterClass *EltRC = getOpRegClass(MI, 2); 1855 1856 unsigned Opc; 1857 if (RI.hasVGPRs(EltRC)) { 1858 Opc = AMDGPU::V_MOVRELD_B32_e32; 1859 } else { 1860 Opc = RI.getRegSizeInBits(*EltRC) == 64 ? AMDGPU::S_MOVRELD_B64 1861 : AMDGPU::S_MOVRELD_B32; 1862 } 1863 1864 const MCInstrDesc &OpDesc = get(Opc); 1865 Register VecReg = MI.getOperand(0).getReg(); 1866 bool IsUndef = MI.getOperand(1).isUndef(); 1867 unsigned SubReg = MI.getOperand(3).getImm(); 1868 assert(VecReg == MI.getOperand(1).getReg()); 1869 1870 MachineInstrBuilder MIB = 1871 BuildMI(MBB, MI, DL, OpDesc) 1872 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 1873 .add(MI.getOperand(2)) 1874 .addReg(VecReg, RegState::ImplicitDefine) 1875 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0)); 1876 1877 const int ImpDefIdx = 1878 OpDesc.getNumOperands() + OpDesc.getNumImplicitUses(); 1879 const int ImpUseIdx = ImpDefIdx + 1; 1880 MIB->tieOperands(ImpDefIdx, ImpUseIdx); 1881 MI.eraseFromParent(); 1882 break; 1883 } 1884 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V1: 1885 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V2: 1886 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V3: 1887 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V4: 1888 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V5: 1889 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V8: 1890 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V16: 1891 case AMDGPU::V_INDIRECT_REG_WRITE_GPR_IDX_B32_V32: { 1892 assert(ST.useVGPRIndexMode()); 1893 Register VecReg = MI.getOperand(0).getReg(); 1894 bool IsUndef = MI.getOperand(1).isUndef(); 1895 Register Idx = MI.getOperand(3).getReg(); 1896 Register SubReg = MI.getOperand(4).getImm(); 1897 1898 MachineInstr *SetOn = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_ON)) 1899 .addReg(Idx) 1900 .addImm(AMDGPU::VGPRIndexMode::DST_ENABLE); 1901 SetOn->getOperand(3).setIsUndef(); 1902 1903 const MCInstrDesc &OpDesc = get(AMDGPU::V_MOV_B32_indirect); 1904 MachineInstrBuilder MIB = 1905 BuildMI(MBB, MI, DL, OpDesc) 1906 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 1907 .add(MI.getOperand(2)) 1908 .addReg(VecReg, RegState::ImplicitDefine) 1909 .addReg(VecReg, 1910 RegState::Implicit | (IsUndef ? RegState::Undef : 0)); 1911 1912 const int ImpDefIdx = OpDesc.getNumOperands() + OpDesc.getNumImplicitUses(); 1913 const int ImpUseIdx = ImpDefIdx + 1; 1914 MIB->tieOperands(ImpDefIdx, ImpUseIdx); 1915 1916 MachineInstr *SetOff = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_OFF)); 1917 1918 finalizeBundle(MBB, SetOn->getIterator(), std::next(SetOff->getIterator())); 1919 1920 MI.eraseFromParent(); 1921 break; 1922 } 1923 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V1: 1924 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V2: 1925 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V3: 1926 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V4: 1927 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V5: 1928 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V8: 1929 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V16: 1930 case AMDGPU::V_INDIRECT_REG_READ_GPR_IDX_B32_V32: { 1931 assert(ST.useVGPRIndexMode()); 1932 Register Dst = MI.getOperand(0).getReg(); 1933 Register VecReg = MI.getOperand(1).getReg(); 1934 bool IsUndef = MI.getOperand(1).isUndef(); 1935 Register Idx = MI.getOperand(2).getReg(); 1936 Register SubReg = MI.getOperand(3).getImm(); 1937 1938 MachineInstr *SetOn = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_ON)) 1939 .addReg(Idx) 1940 .addImm(AMDGPU::VGPRIndexMode::SRC0_ENABLE); 1941 SetOn->getOperand(3).setIsUndef(); 1942 1943 BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_e32)) 1944 .addDef(Dst) 1945 .addReg(RI.getSubReg(VecReg, SubReg), RegState::Undef) 1946 .addReg(VecReg, RegState::Implicit | (IsUndef ? RegState::Undef : 0)) 1947 .addReg(AMDGPU::M0, RegState::Implicit); 1948 1949 MachineInstr *SetOff = BuildMI(MBB, MI, DL, get(AMDGPU::S_SET_GPR_IDX_OFF)); 1950 1951 finalizeBundle(MBB, SetOn->getIterator(), std::next(SetOff->getIterator())); 1952 1953 MI.eraseFromParent(); 1954 break; 1955 } 1956 case AMDGPU::SI_PC_ADD_REL_OFFSET: { 1957 MachineFunction &MF = *MBB.getParent(); 1958 Register Reg = MI.getOperand(0).getReg(); 1959 Register RegLo = RI.getSubReg(Reg, AMDGPU::sub0); 1960 Register RegHi = RI.getSubReg(Reg, AMDGPU::sub1); 1961 1962 // Create a bundle so these instructions won't be re-ordered by the 1963 // post-RA scheduler. 1964 MIBundleBuilder Bundler(MBB, MI); 1965 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_GETPC_B64), Reg)); 1966 1967 // Add 32-bit offset from this instruction to the start of the 1968 // constant data. 1969 Bundler.append(BuildMI(MF, DL, get(AMDGPU::S_ADD_U32), RegLo) 1970 .addReg(RegLo) 1971 .add(MI.getOperand(1))); 1972 1973 MachineInstrBuilder MIB = BuildMI(MF, DL, get(AMDGPU::S_ADDC_U32), RegHi) 1974 .addReg(RegHi); 1975 MIB.add(MI.getOperand(2)); 1976 1977 Bundler.append(MIB); 1978 finalizeBundle(MBB, Bundler.begin()); 1979 1980 MI.eraseFromParent(); 1981 break; 1982 } 1983 case AMDGPU::ENTER_STRICT_WWM: { 1984 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 1985 // Whole Wave Mode is entered. 1986 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_OR_SAVEEXEC_B32 1987 : AMDGPU::S_OR_SAVEEXEC_B64)); 1988 break; 1989 } 1990 case AMDGPU::ENTER_STRICT_WQM: { 1991 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 1992 // STRICT_WQM is entered. 1993 const unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 1994 const unsigned WQMOp = ST.isWave32() ? AMDGPU::S_WQM_B32 : AMDGPU::S_WQM_B64; 1995 const unsigned MovOp = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 1996 BuildMI(MBB, MI, DL, get(MovOp), MI.getOperand(0).getReg()).addReg(Exec); 1997 BuildMI(MBB, MI, DL, get(WQMOp), Exec).addReg(Exec); 1998 1999 MI.eraseFromParent(); 2000 break; 2001 } 2002 case AMDGPU::EXIT_STRICT_WWM: 2003 case AMDGPU::EXIT_STRICT_WQM: { 2004 // This only gets its own opcode so that SIPreAllocateWWMRegs can tell when 2005 // WWM/STICT_WQM is exited. 2006 MI.setDesc(get(ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64)); 2007 break; 2008 } 2009 } 2010 return true; 2011 } 2012 2013 std::pair<MachineInstr*, MachineInstr*> 2014 SIInstrInfo::expandMovDPP64(MachineInstr &MI) const { 2015 assert (MI.getOpcode() == AMDGPU::V_MOV_B64_DPP_PSEUDO); 2016 2017 MachineBasicBlock &MBB = *MI.getParent(); 2018 DebugLoc DL = MBB.findDebugLoc(MI); 2019 MachineFunction *MF = MBB.getParent(); 2020 MachineRegisterInfo &MRI = MF->getRegInfo(); 2021 Register Dst = MI.getOperand(0).getReg(); 2022 unsigned Part = 0; 2023 MachineInstr *Split[2]; 2024 2025 for (auto Sub : { AMDGPU::sub0, AMDGPU::sub1 }) { 2026 auto MovDPP = BuildMI(MBB, MI, DL, get(AMDGPU::V_MOV_B32_dpp)); 2027 if (Dst.isPhysical()) { 2028 MovDPP.addDef(RI.getSubReg(Dst, Sub)); 2029 } else { 2030 assert(MRI.isSSA()); 2031 auto Tmp = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 2032 MovDPP.addDef(Tmp); 2033 } 2034 2035 for (unsigned I = 1; I <= 2; ++I) { // old and src operands. 2036 const MachineOperand &SrcOp = MI.getOperand(I); 2037 assert(!SrcOp.isFPImm()); 2038 if (SrcOp.isImm()) { 2039 APInt Imm(64, SrcOp.getImm()); 2040 Imm.ashrInPlace(Part * 32); 2041 MovDPP.addImm(Imm.getLoBits(32).getZExtValue()); 2042 } else { 2043 assert(SrcOp.isReg()); 2044 Register Src = SrcOp.getReg(); 2045 if (Src.isPhysical()) 2046 MovDPP.addReg(RI.getSubReg(Src, Sub)); 2047 else 2048 MovDPP.addReg(Src, SrcOp.isUndef() ? RegState::Undef : 0, Sub); 2049 } 2050 } 2051 2052 for (unsigned I = 3; I < MI.getNumExplicitOperands(); ++I) 2053 MovDPP.addImm(MI.getOperand(I).getImm()); 2054 2055 Split[Part] = MovDPP; 2056 ++Part; 2057 } 2058 2059 if (Dst.isVirtual()) 2060 BuildMI(MBB, MI, DL, get(AMDGPU::REG_SEQUENCE), Dst) 2061 .addReg(Split[0]->getOperand(0).getReg()) 2062 .addImm(AMDGPU::sub0) 2063 .addReg(Split[1]->getOperand(0).getReg()) 2064 .addImm(AMDGPU::sub1); 2065 2066 MI.eraseFromParent(); 2067 return std::make_pair(Split[0], Split[1]); 2068 } 2069 2070 bool SIInstrInfo::swapSourceModifiers(MachineInstr &MI, 2071 MachineOperand &Src0, 2072 unsigned Src0OpName, 2073 MachineOperand &Src1, 2074 unsigned Src1OpName) const { 2075 MachineOperand *Src0Mods = getNamedOperand(MI, Src0OpName); 2076 if (!Src0Mods) 2077 return false; 2078 2079 MachineOperand *Src1Mods = getNamedOperand(MI, Src1OpName); 2080 assert(Src1Mods && 2081 "All commutable instructions have both src0 and src1 modifiers"); 2082 2083 int Src0ModsVal = Src0Mods->getImm(); 2084 int Src1ModsVal = Src1Mods->getImm(); 2085 2086 Src1Mods->setImm(Src0ModsVal); 2087 Src0Mods->setImm(Src1ModsVal); 2088 return true; 2089 } 2090 2091 static MachineInstr *swapRegAndNonRegOperand(MachineInstr &MI, 2092 MachineOperand &RegOp, 2093 MachineOperand &NonRegOp) { 2094 Register Reg = RegOp.getReg(); 2095 unsigned SubReg = RegOp.getSubReg(); 2096 bool IsKill = RegOp.isKill(); 2097 bool IsDead = RegOp.isDead(); 2098 bool IsUndef = RegOp.isUndef(); 2099 bool IsDebug = RegOp.isDebug(); 2100 2101 if (NonRegOp.isImm()) 2102 RegOp.ChangeToImmediate(NonRegOp.getImm()); 2103 else if (NonRegOp.isFI()) 2104 RegOp.ChangeToFrameIndex(NonRegOp.getIndex()); 2105 else if (NonRegOp.isGlobal()) { 2106 RegOp.ChangeToGA(NonRegOp.getGlobal(), NonRegOp.getOffset(), 2107 NonRegOp.getTargetFlags()); 2108 } else 2109 return nullptr; 2110 2111 // Make sure we don't reinterpret a subreg index in the target flags. 2112 RegOp.setTargetFlags(NonRegOp.getTargetFlags()); 2113 2114 NonRegOp.ChangeToRegister(Reg, false, false, IsKill, IsDead, IsUndef, IsDebug); 2115 NonRegOp.setSubReg(SubReg); 2116 2117 return &MI; 2118 } 2119 2120 MachineInstr *SIInstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, 2121 unsigned Src0Idx, 2122 unsigned Src1Idx) const { 2123 assert(!NewMI && "this should never be used"); 2124 2125 unsigned Opc = MI.getOpcode(); 2126 int CommutedOpcode = commuteOpcode(Opc); 2127 if (CommutedOpcode == -1) 2128 return nullptr; 2129 2130 assert(AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) == 2131 static_cast<int>(Src0Idx) && 2132 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1) == 2133 static_cast<int>(Src1Idx) && 2134 "inconsistency with findCommutedOpIndices"); 2135 2136 MachineOperand &Src0 = MI.getOperand(Src0Idx); 2137 MachineOperand &Src1 = MI.getOperand(Src1Idx); 2138 2139 MachineInstr *CommutedMI = nullptr; 2140 if (Src0.isReg() && Src1.isReg()) { 2141 if (isOperandLegal(MI, Src1Idx, &Src0)) { 2142 // Be sure to copy the source modifiers to the right place. 2143 CommutedMI 2144 = TargetInstrInfo::commuteInstructionImpl(MI, NewMI, Src0Idx, Src1Idx); 2145 } 2146 2147 } else if (Src0.isReg() && !Src1.isReg()) { 2148 // src0 should always be able to support any operand type, so no need to 2149 // check operand legality. 2150 CommutedMI = swapRegAndNonRegOperand(MI, Src0, Src1); 2151 } else if (!Src0.isReg() && Src1.isReg()) { 2152 if (isOperandLegal(MI, Src1Idx, &Src0)) 2153 CommutedMI = swapRegAndNonRegOperand(MI, Src1, Src0); 2154 } else { 2155 // FIXME: Found two non registers to commute. This does happen. 2156 return nullptr; 2157 } 2158 2159 if (CommutedMI) { 2160 swapSourceModifiers(MI, Src0, AMDGPU::OpName::src0_modifiers, 2161 Src1, AMDGPU::OpName::src1_modifiers); 2162 2163 CommutedMI->setDesc(get(CommutedOpcode)); 2164 } 2165 2166 return CommutedMI; 2167 } 2168 2169 // This needs to be implemented because the source modifiers may be inserted 2170 // between the true commutable operands, and the base 2171 // TargetInstrInfo::commuteInstruction uses it. 2172 bool SIInstrInfo::findCommutedOpIndices(const MachineInstr &MI, 2173 unsigned &SrcOpIdx0, 2174 unsigned &SrcOpIdx1) const { 2175 return findCommutedOpIndices(MI.getDesc(), SrcOpIdx0, SrcOpIdx1); 2176 } 2177 2178 bool SIInstrInfo::findCommutedOpIndices(MCInstrDesc Desc, unsigned &SrcOpIdx0, 2179 unsigned &SrcOpIdx1) const { 2180 if (!Desc.isCommutable()) 2181 return false; 2182 2183 unsigned Opc = Desc.getOpcode(); 2184 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 2185 if (Src0Idx == -1) 2186 return false; 2187 2188 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 2189 if (Src1Idx == -1) 2190 return false; 2191 2192 return fixCommutedOpIndices(SrcOpIdx0, SrcOpIdx1, Src0Idx, Src1Idx); 2193 } 2194 2195 bool SIInstrInfo::isBranchOffsetInRange(unsigned BranchOp, 2196 int64_t BrOffset) const { 2197 // BranchRelaxation should never have to check s_setpc_b64 because its dest 2198 // block is unanalyzable. 2199 assert(BranchOp != AMDGPU::S_SETPC_B64); 2200 2201 // Convert to dwords. 2202 BrOffset /= 4; 2203 2204 // The branch instructions do PC += signext(SIMM16 * 4) + 4, so the offset is 2205 // from the next instruction. 2206 BrOffset -= 1; 2207 2208 return isIntN(BranchOffsetBits, BrOffset); 2209 } 2210 2211 MachineBasicBlock *SIInstrInfo::getBranchDestBlock( 2212 const MachineInstr &MI) const { 2213 if (MI.getOpcode() == AMDGPU::S_SETPC_B64) { 2214 // This would be a difficult analysis to perform, but can always be legal so 2215 // there's no need to analyze it. 2216 return nullptr; 2217 } 2218 2219 return MI.getOperand(0).getMBB(); 2220 } 2221 2222 unsigned SIInstrInfo::insertIndirectBranch(MachineBasicBlock &MBB, 2223 MachineBasicBlock &DestBB, 2224 const DebugLoc &DL, 2225 int64_t BrOffset, 2226 RegScavenger *RS) const { 2227 assert(RS && "RegScavenger required for long branching"); 2228 assert(MBB.empty() && 2229 "new block should be inserted for expanding unconditional branch"); 2230 assert(MBB.pred_size() == 1); 2231 2232 MachineFunction *MF = MBB.getParent(); 2233 MachineRegisterInfo &MRI = MF->getRegInfo(); 2234 2235 // FIXME: Virtual register workaround for RegScavenger not working with empty 2236 // blocks. 2237 Register PCReg = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 2238 2239 auto I = MBB.end(); 2240 2241 // We need to compute the offset relative to the instruction immediately after 2242 // s_getpc_b64. Insert pc arithmetic code before last terminator. 2243 MachineInstr *GetPC = BuildMI(MBB, I, DL, get(AMDGPU::S_GETPC_B64), PCReg); 2244 2245 auto &MCCtx = MF->getContext(); 2246 MCSymbol *PostGetPCLabel = 2247 MCCtx.createTempSymbol("post_getpc", /*AlwaysAddSuffix=*/true); 2248 GetPC->setPostInstrSymbol(*MF, PostGetPCLabel); 2249 2250 MCSymbol *OffsetLo = 2251 MCCtx.createTempSymbol("offset_lo", /*AlwaysAddSuffix=*/true); 2252 MCSymbol *OffsetHi = 2253 MCCtx.createTempSymbol("offset_hi", /*AlwaysAddSuffix=*/true); 2254 BuildMI(MBB, I, DL, get(AMDGPU::S_ADD_U32)) 2255 .addReg(PCReg, RegState::Define, AMDGPU::sub0) 2256 .addReg(PCReg, 0, AMDGPU::sub0) 2257 .addSym(OffsetLo, MO_FAR_BRANCH_OFFSET); 2258 BuildMI(MBB, I, DL, get(AMDGPU::S_ADDC_U32)) 2259 .addReg(PCReg, RegState::Define, AMDGPU::sub1) 2260 .addReg(PCReg, 0, AMDGPU::sub1) 2261 .addSym(OffsetHi, MO_FAR_BRANCH_OFFSET); 2262 2263 // Insert the indirect branch after the other terminator. 2264 BuildMI(&MBB, DL, get(AMDGPU::S_SETPC_B64)) 2265 .addReg(PCReg); 2266 2267 auto ComputeBlockSize = [](const TargetInstrInfo *TII, 2268 const MachineBasicBlock &MBB) { 2269 unsigned Size = 0; 2270 for (const MachineInstr &MI : MBB) 2271 Size += TII->getInstSizeInBytes(MI); 2272 return Size; 2273 }; 2274 2275 // FIXME: If spilling is necessary, this will fail because this scavenger has 2276 // no emergency stack slots. It is non-trivial to spill in this situation, 2277 // because the restore code needs to be specially placed after the 2278 // jump. BranchRelaxation then needs to be made aware of the newly inserted 2279 // block. 2280 // 2281 // If a spill is needed for the pc register pair, we need to insert a spill 2282 // restore block right before the destination block, and insert a short branch 2283 // into the old destination block's fallthrough predecessor. 2284 // e.g.: 2285 // 2286 // s_cbranch_scc0 skip_long_branch: 2287 // 2288 // long_branch_bb: 2289 // spill s[8:9] 2290 // s_getpc_b64 s[8:9] 2291 // s_add_u32 s8, s8, restore_bb 2292 // s_addc_u32 s9, s9, 0 2293 // s_setpc_b64 s[8:9] 2294 // 2295 // skip_long_branch: 2296 // foo; 2297 // 2298 // ..... 2299 // 2300 // dest_bb_fallthrough_predecessor: 2301 // bar; 2302 // s_branch dest_bb 2303 // 2304 // restore_bb: 2305 // restore s[8:9] 2306 // fallthrough dest_bb 2307 /// 2308 // dest_bb: 2309 // buzz; 2310 2311 RS->enterBasicBlockEnd(MBB); 2312 Register Scav = RS->scavengeRegisterBackwards( 2313 AMDGPU::SReg_64RegClass, 2314 MachineBasicBlock::iterator(GetPC), false, 0); 2315 MRI.replaceRegWith(PCReg, Scav); 2316 MRI.clearVirtRegs(); 2317 RS->setRegUsed(Scav); 2318 2319 // Now, the distance could be defined. 2320 auto *Offset = MCBinaryExpr::createSub( 2321 MCSymbolRefExpr::create(DestBB.getSymbol(), MCCtx), 2322 MCSymbolRefExpr::create(PostGetPCLabel, MCCtx), MCCtx); 2323 // Add offset assignments. 2324 auto *Mask = MCConstantExpr::create(0xFFFFFFFFULL, MCCtx); 2325 OffsetLo->setVariableValue(MCBinaryExpr::createAnd(Offset, Mask, MCCtx)); 2326 auto *ShAmt = MCConstantExpr::create(32, MCCtx); 2327 OffsetHi->setVariableValue(MCBinaryExpr::createAShr(Offset, ShAmt, MCCtx)); 2328 return ComputeBlockSize(this, MBB); 2329 } 2330 2331 unsigned SIInstrInfo::getBranchOpcode(SIInstrInfo::BranchPredicate Cond) { 2332 switch (Cond) { 2333 case SIInstrInfo::SCC_TRUE: 2334 return AMDGPU::S_CBRANCH_SCC1; 2335 case SIInstrInfo::SCC_FALSE: 2336 return AMDGPU::S_CBRANCH_SCC0; 2337 case SIInstrInfo::VCCNZ: 2338 return AMDGPU::S_CBRANCH_VCCNZ; 2339 case SIInstrInfo::VCCZ: 2340 return AMDGPU::S_CBRANCH_VCCZ; 2341 case SIInstrInfo::EXECNZ: 2342 return AMDGPU::S_CBRANCH_EXECNZ; 2343 case SIInstrInfo::EXECZ: 2344 return AMDGPU::S_CBRANCH_EXECZ; 2345 default: 2346 llvm_unreachable("invalid branch predicate"); 2347 } 2348 } 2349 2350 SIInstrInfo::BranchPredicate SIInstrInfo::getBranchPredicate(unsigned Opcode) { 2351 switch (Opcode) { 2352 case AMDGPU::S_CBRANCH_SCC0: 2353 return SCC_FALSE; 2354 case AMDGPU::S_CBRANCH_SCC1: 2355 return SCC_TRUE; 2356 case AMDGPU::S_CBRANCH_VCCNZ: 2357 return VCCNZ; 2358 case AMDGPU::S_CBRANCH_VCCZ: 2359 return VCCZ; 2360 case AMDGPU::S_CBRANCH_EXECNZ: 2361 return EXECNZ; 2362 case AMDGPU::S_CBRANCH_EXECZ: 2363 return EXECZ; 2364 default: 2365 return INVALID_BR; 2366 } 2367 } 2368 2369 bool SIInstrInfo::analyzeBranchImpl(MachineBasicBlock &MBB, 2370 MachineBasicBlock::iterator I, 2371 MachineBasicBlock *&TBB, 2372 MachineBasicBlock *&FBB, 2373 SmallVectorImpl<MachineOperand> &Cond, 2374 bool AllowModify) const { 2375 if (I->getOpcode() == AMDGPU::S_BRANCH) { 2376 // Unconditional Branch 2377 TBB = I->getOperand(0).getMBB(); 2378 return false; 2379 } 2380 2381 MachineBasicBlock *CondBB = nullptr; 2382 2383 if (I->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 2384 CondBB = I->getOperand(1).getMBB(); 2385 Cond.push_back(I->getOperand(0)); 2386 } else { 2387 BranchPredicate Pred = getBranchPredicate(I->getOpcode()); 2388 if (Pred == INVALID_BR) 2389 return true; 2390 2391 CondBB = I->getOperand(0).getMBB(); 2392 Cond.push_back(MachineOperand::CreateImm(Pred)); 2393 Cond.push_back(I->getOperand(1)); // Save the branch register. 2394 } 2395 ++I; 2396 2397 if (I == MBB.end()) { 2398 // Conditional branch followed by fall-through. 2399 TBB = CondBB; 2400 return false; 2401 } 2402 2403 if (I->getOpcode() == AMDGPU::S_BRANCH) { 2404 TBB = CondBB; 2405 FBB = I->getOperand(0).getMBB(); 2406 return false; 2407 } 2408 2409 return true; 2410 } 2411 2412 bool SIInstrInfo::analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 2413 MachineBasicBlock *&FBB, 2414 SmallVectorImpl<MachineOperand> &Cond, 2415 bool AllowModify) const { 2416 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 2417 auto E = MBB.end(); 2418 if (I == E) 2419 return false; 2420 2421 // Skip over the instructions that are artificially terminators for special 2422 // exec management. 2423 while (I != E && !I->isBranch() && !I->isReturn()) { 2424 switch (I->getOpcode()) { 2425 case AMDGPU::S_MOV_B64_term: 2426 case AMDGPU::S_XOR_B64_term: 2427 case AMDGPU::S_OR_B64_term: 2428 case AMDGPU::S_ANDN2_B64_term: 2429 case AMDGPU::S_AND_B64_term: 2430 case AMDGPU::S_MOV_B32_term: 2431 case AMDGPU::S_XOR_B32_term: 2432 case AMDGPU::S_OR_B32_term: 2433 case AMDGPU::S_ANDN2_B32_term: 2434 case AMDGPU::S_AND_B32_term: 2435 break; 2436 case AMDGPU::SI_IF: 2437 case AMDGPU::SI_ELSE: 2438 case AMDGPU::SI_KILL_I1_TERMINATOR: 2439 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 2440 // FIXME: It's messy that these need to be considered here at all. 2441 return true; 2442 default: 2443 llvm_unreachable("unexpected non-branch terminator inst"); 2444 } 2445 2446 ++I; 2447 } 2448 2449 if (I == E) 2450 return false; 2451 2452 return analyzeBranchImpl(MBB, I, TBB, FBB, Cond, AllowModify); 2453 } 2454 2455 unsigned SIInstrInfo::removeBranch(MachineBasicBlock &MBB, 2456 int *BytesRemoved) const { 2457 MachineBasicBlock::iterator I = MBB.getFirstTerminator(); 2458 2459 unsigned Count = 0; 2460 unsigned RemovedSize = 0; 2461 while (I != MBB.end()) { 2462 MachineBasicBlock::iterator Next = std::next(I); 2463 // Skip over artificial terminators when removing instructions. 2464 if (I->isBranch() || I->isReturn()) { 2465 RemovedSize += getInstSizeInBytes(*I); 2466 I->eraseFromParent(); 2467 ++Count; 2468 } 2469 I = Next; 2470 } 2471 2472 if (BytesRemoved) 2473 *BytesRemoved = RemovedSize; 2474 2475 return Count; 2476 } 2477 2478 // Copy the flags onto the implicit condition register operand. 2479 static void preserveCondRegFlags(MachineOperand &CondReg, 2480 const MachineOperand &OrigCond) { 2481 CondReg.setIsUndef(OrigCond.isUndef()); 2482 CondReg.setIsKill(OrigCond.isKill()); 2483 } 2484 2485 unsigned SIInstrInfo::insertBranch(MachineBasicBlock &MBB, 2486 MachineBasicBlock *TBB, 2487 MachineBasicBlock *FBB, 2488 ArrayRef<MachineOperand> Cond, 2489 const DebugLoc &DL, 2490 int *BytesAdded) const { 2491 if (!FBB && Cond.empty()) { 2492 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 2493 .addMBB(TBB); 2494 if (BytesAdded) 2495 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4; 2496 return 1; 2497 } 2498 2499 if(Cond.size() == 1 && Cond[0].isReg()) { 2500 BuildMI(&MBB, DL, get(AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO)) 2501 .add(Cond[0]) 2502 .addMBB(TBB); 2503 return 1; 2504 } 2505 2506 assert(TBB && Cond[0].isImm()); 2507 2508 unsigned Opcode 2509 = getBranchOpcode(static_cast<BranchPredicate>(Cond[0].getImm())); 2510 2511 if (!FBB) { 2512 Cond[1].isUndef(); 2513 MachineInstr *CondBr = 2514 BuildMI(&MBB, DL, get(Opcode)) 2515 .addMBB(TBB); 2516 2517 // Copy the flags onto the implicit condition register operand. 2518 preserveCondRegFlags(CondBr->getOperand(1), Cond[1]); 2519 fixImplicitOperands(*CondBr); 2520 2521 if (BytesAdded) 2522 *BytesAdded = ST.hasOffset3fBug() ? 8 : 4; 2523 return 1; 2524 } 2525 2526 assert(TBB && FBB); 2527 2528 MachineInstr *CondBr = 2529 BuildMI(&MBB, DL, get(Opcode)) 2530 .addMBB(TBB); 2531 fixImplicitOperands(*CondBr); 2532 BuildMI(&MBB, DL, get(AMDGPU::S_BRANCH)) 2533 .addMBB(FBB); 2534 2535 MachineOperand &CondReg = CondBr->getOperand(1); 2536 CondReg.setIsUndef(Cond[1].isUndef()); 2537 CondReg.setIsKill(Cond[1].isKill()); 2538 2539 if (BytesAdded) 2540 *BytesAdded = ST.hasOffset3fBug() ? 16 : 8; 2541 2542 return 2; 2543 } 2544 2545 bool SIInstrInfo::reverseBranchCondition( 2546 SmallVectorImpl<MachineOperand> &Cond) const { 2547 if (Cond.size() != 2) { 2548 return true; 2549 } 2550 2551 if (Cond[0].isImm()) { 2552 Cond[0].setImm(-Cond[0].getImm()); 2553 return false; 2554 } 2555 2556 return true; 2557 } 2558 2559 bool SIInstrInfo::canInsertSelect(const MachineBasicBlock &MBB, 2560 ArrayRef<MachineOperand> Cond, 2561 Register DstReg, Register TrueReg, 2562 Register FalseReg, int &CondCycles, 2563 int &TrueCycles, int &FalseCycles) const { 2564 switch (Cond[0].getImm()) { 2565 case VCCNZ: 2566 case VCCZ: { 2567 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2568 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 2569 if (MRI.getRegClass(FalseReg) != RC) 2570 return false; 2571 2572 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 2573 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 2574 2575 // Limit to equal cost for branch vs. N v_cndmask_b32s. 2576 return RI.hasVGPRs(RC) && NumInsts <= 6; 2577 } 2578 case SCC_TRUE: 2579 case SCC_FALSE: { 2580 // FIXME: We could insert for VGPRs if we could replace the original compare 2581 // with a vector one. 2582 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2583 const TargetRegisterClass *RC = MRI.getRegClass(TrueReg); 2584 if (MRI.getRegClass(FalseReg) != RC) 2585 return false; 2586 2587 int NumInsts = AMDGPU::getRegBitWidth(RC->getID()) / 32; 2588 2589 // Multiples of 8 can do s_cselect_b64 2590 if (NumInsts % 2 == 0) 2591 NumInsts /= 2; 2592 2593 CondCycles = TrueCycles = FalseCycles = NumInsts; // ??? 2594 return RI.isSGPRClass(RC); 2595 } 2596 default: 2597 return false; 2598 } 2599 } 2600 2601 void SIInstrInfo::insertSelect(MachineBasicBlock &MBB, 2602 MachineBasicBlock::iterator I, const DebugLoc &DL, 2603 Register DstReg, ArrayRef<MachineOperand> Cond, 2604 Register TrueReg, Register FalseReg) const { 2605 BranchPredicate Pred = static_cast<BranchPredicate>(Cond[0].getImm()); 2606 if (Pred == VCCZ || Pred == SCC_FALSE) { 2607 Pred = static_cast<BranchPredicate>(-Pred); 2608 std::swap(TrueReg, FalseReg); 2609 } 2610 2611 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 2612 const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg); 2613 unsigned DstSize = RI.getRegSizeInBits(*DstRC); 2614 2615 if (DstSize == 32) { 2616 MachineInstr *Select; 2617 if (Pred == SCC_TRUE) { 2618 Select = BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B32), DstReg) 2619 .addReg(TrueReg) 2620 .addReg(FalseReg); 2621 } else { 2622 // Instruction's operands are backwards from what is expected. 2623 Select = BuildMI(MBB, I, DL, get(AMDGPU::V_CNDMASK_B32_e32), DstReg) 2624 .addReg(FalseReg) 2625 .addReg(TrueReg); 2626 } 2627 2628 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2629 return; 2630 } 2631 2632 if (DstSize == 64 && Pred == SCC_TRUE) { 2633 MachineInstr *Select = 2634 BuildMI(MBB, I, DL, get(AMDGPU::S_CSELECT_B64), DstReg) 2635 .addReg(TrueReg) 2636 .addReg(FalseReg); 2637 2638 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2639 return; 2640 } 2641 2642 static const int16_t Sub0_15[] = { 2643 AMDGPU::sub0, AMDGPU::sub1, AMDGPU::sub2, AMDGPU::sub3, 2644 AMDGPU::sub4, AMDGPU::sub5, AMDGPU::sub6, AMDGPU::sub7, 2645 AMDGPU::sub8, AMDGPU::sub9, AMDGPU::sub10, AMDGPU::sub11, 2646 AMDGPU::sub12, AMDGPU::sub13, AMDGPU::sub14, AMDGPU::sub15, 2647 }; 2648 2649 static const int16_t Sub0_15_64[] = { 2650 AMDGPU::sub0_sub1, AMDGPU::sub2_sub3, 2651 AMDGPU::sub4_sub5, AMDGPU::sub6_sub7, 2652 AMDGPU::sub8_sub9, AMDGPU::sub10_sub11, 2653 AMDGPU::sub12_sub13, AMDGPU::sub14_sub15, 2654 }; 2655 2656 unsigned SelOp = AMDGPU::V_CNDMASK_B32_e32; 2657 const TargetRegisterClass *EltRC = &AMDGPU::VGPR_32RegClass; 2658 const int16_t *SubIndices = Sub0_15; 2659 int NElts = DstSize / 32; 2660 2661 // 64-bit select is only available for SALU. 2662 // TODO: Split 96-bit into 64-bit and 32-bit, not 3x 32-bit. 2663 if (Pred == SCC_TRUE) { 2664 if (NElts % 2) { 2665 SelOp = AMDGPU::S_CSELECT_B32; 2666 EltRC = &AMDGPU::SGPR_32RegClass; 2667 } else { 2668 SelOp = AMDGPU::S_CSELECT_B64; 2669 EltRC = &AMDGPU::SGPR_64RegClass; 2670 SubIndices = Sub0_15_64; 2671 NElts /= 2; 2672 } 2673 } 2674 2675 MachineInstrBuilder MIB = BuildMI( 2676 MBB, I, DL, get(AMDGPU::REG_SEQUENCE), DstReg); 2677 2678 I = MIB->getIterator(); 2679 2680 SmallVector<Register, 8> Regs; 2681 for (int Idx = 0; Idx != NElts; ++Idx) { 2682 Register DstElt = MRI.createVirtualRegister(EltRC); 2683 Regs.push_back(DstElt); 2684 2685 unsigned SubIdx = SubIndices[Idx]; 2686 2687 MachineInstr *Select; 2688 if (SelOp == AMDGPU::V_CNDMASK_B32_e32) { 2689 Select = 2690 BuildMI(MBB, I, DL, get(SelOp), DstElt) 2691 .addReg(FalseReg, 0, SubIdx) 2692 .addReg(TrueReg, 0, SubIdx); 2693 } else { 2694 Select = 2695 BuildMI(MBB, I, DL, get(SelOp), DstElt) 2696 .addReg(TrueReg, 0, SubIdx) 2697 .addReg(FalseReg, 0, SubIdx); 2698 } 2699 2700 preserveCondRegFlags(Select->getOperand(3), Cond[1]); 2701 fixImplicitOperands(*Select); 2702 2703 MIB.addReg(DstElt) 2704 .addImm(SubIdx); 2705 } 2706 } 2707 2708 bool SIInstrInfo::isFoldableCopy(const MachineInstr &MI) { 2709 switch (MI.getOpcode()) { 2710 case AMDGPU::V_MOV_B32_e32: 2711 case AMDGPU::V_MOV_B32_e64: 2712 case AMDGPU::V_MOV_B64_PSEUDO: { 2713 // If there are additional implicit register operands, this may be used for 2714 // register indexing so the source register operand isn't simply copied. 2715 unsigned NumOps = MI.getDesc().getNumOperands() + 2716 MI.getDesc().getNumImplicitUses(); 2717 2718 return MI.getNumOperands() == NumOps; 2719 } 2720 case AMDGPU::S_MOV_B32: 2721 case AMDGPU::S_MOV_B64: 2722 case AMDGPU::COPY: 2723 case AMDGPU::V_ACCVGPR_WRITE_B32_e64: 2724 case AMDGPU::V_ACCVGPR_READ_B32_e64: 2725 case AMDGPU::V_ACCVGPR_MOV_B32: 2726 return true; 2727 default: 2728 return false; 2729 } 2730 } 2731 2732 unsigned SIInstrInfo::getAddressSpaceForPseudoSourceKind( 2733 unsigned Kind) const { 2734 switch(Kind) { 2735 case PseudoSourceValue::Stack: 2736 case PseudoSourceValue::FixedStack: 2737 return AMDGPUAS::PRIVATE_ADDRESS; 2738 case PseudoSourceValue::ConstantPool: 2739 case PseudoSourceValue::GOT: 2740 case PseudoSourceValue::JumpTable: 2741 case PseudoSourceValue::GlobalValueCallEntry: 2742 case PseudoSourceValue::ExternalSymbolCallEntry: 2743 case PseudoSourceValue::TargetCustom: 2744 return AMDGPUAS::CONSTANT_ADDRESS; 2745 } 2746 return AMDGPUAS::FLAT_ADDRESS; 2747 } 2748 2749 static void removeModOperands(MachineInstr &MI) { 2750 unsigned Opc = MI.getOpcode(); 2751 int Src0ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2752 AMDGPU::OpName::src0_modifiers); 2753 int Src1ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2754 AMDGPU::OpName::src1_modifiers); 2755 int Src2ModIdx = AMDGPU::getNamedOperandIdx(Opc, 2756 AMDGPU::OpName::src2_modifiers); 2757 2758 MI.RemoveOperand(Src2ModIdx); 2759 MI.RemoveOperand(Src1ModIdx); 2760 MI.RemoveOperand(Src0ModIdx); 2761 } 2762 2763 bool SIInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, 2764 Register Reg, MachineRegisterInfo *MRI) const { 2765 if (!MRI->hasOneNonDBGUse(Reg)) 2766 return false; 2767 2768 switch (DefMI.getOpcode()) { 2769 default: 2770 return false; 2771 case AMDGPU::S_MOV_B64: 2772 // TODO: We could fold 64-bit immediates, but this get compilicated 2773 // when there are sub-registers. 2774 return false; 2775 2776 case AMDGPU::V_MOV_B32_e32: 2777 case AMDGPU::S_MOV_B32: 2778 case AMDGPU::V_ACCVGPR_WRITE_B32_e64: 2779 break; 2780 } 2781 2782 const MachineOperand *ImmOp = getNamedOperand(DefMI, AMDGPU::OpName::src0); 2783 assert(ImmOp); 2784 // FIXME: We could handle FrameIndex values here. 2785 if (!ImmOp->isImm()) 2786 return false; 2787 2788 unsigned Opc = UseMI.getOpcode(); 2789 if (Opc == AMDGPU::COPY) { 2790 Register DstReg = UseMI.getOperand(0).getReg(); 2791 bool Is16Bit = getOpSize(UseMI, 0) == 2; 2792 bool isVGPRCopy = RI.isVGPR(*MRI, DstReg); 2793 unsigned NewOpc = isVGPRCopy ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32; 2794 APInt Imm(32, ImmOp->getImm()); 2795 2796 if (UseMI.getOperand(1).getSubReg() == AMDGPU::hi16) 2797 Imm = Imm.ashr(16); 2798 2799 if (RI.isAGPR(*MRI, DstReg)) { 2800 if (!isInlineConstant(Imm)) 2801 return false; 2802 NewOpc = AMDGPU::V_ACCVGPR_WRITE_B32_e64; 2803 } 2804 2805 if (Is16Bit) { 2806 if (isVGPRCopy) 2807 return false; // Do not clobber vgpr_hi16 2808 2809 if (DstReg.isVirtual() && 2810 UseMI.getOperand(0).getSubReg() != AMDGPU::lo16) 2811 return false; 2812 2813 UseMI.getOperand(0).setSubReg(0); 2814 if (DstReg.isPhysical()) { 2815 DstReg = RI.get32BitRegister(DstReg); 2816 UseMI.getOperand(0).setReg(DstReg); 2817 } 2818 assert(UseMI.getOperand(1).getReg().isVirtual()); 2819 } 2820 2821 UseMI.setDesc(get(NewOpc)); 2822 UseMI.getOperand(1).ChangeToImmediate(Imm.getSExtValue()); 2823 UseMI.addImplicitDefUseOperands(*UseMI.getParent()->getParent()); 2824 return true; 2825 } 2826 2827 if (Opc == AMDGPU::V_MAD_F32_e64 || Opc == AMDGPU::V_MAC_F32_e64 || 2828 Opc == AMDGPU::V_MAD_F16_e64 || Opc == AMDGPU::V_MAC_F16_e64 || 2829 Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64 || 2830 Opc == AMDGPU::V_FMA_F16_e64 || Opc == AMDGPU::V_FMAC_F16_e64) { 2831 // Don't fold if we are using source or output modifiers. The new VOP2 2832 // instructions don't have them. 2833 if (hasAnyModifiersSet(UseMI)) 2834 return false; 2835 2836 // If this is a free constant, there's no reason to do this. 2837 // TODO: We could fold this here instead of letting SIFoldOperands do it 2838 // later. 2839 MachineOperand *Src0 = getNamedOperand(UseMI, AMDGPU::OpName::src0); 2840 2841 // Any src operand can be used for the legality check. 2842 if (isInlineConstant(UseMI, *Src0, *ImmOp)) 2843 return false; 2844 2845 bool IsF32 = Opc == AMDGPU::V_MAD_F32_e64 || Opc == AMDGPU::V_MAC_F32_e64 || 2846 Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64; 2847 bool IsFMA = Opc == AMDGPU::V_FMA_F32_e64 || Opc == AMDGPU::V_FMAC_F32_e64 || 2848 Opc == AMDGPU::V_FMA_F16_e64 || Opc == AMDGPU::V_FMAC_F16_e64; 2849 MachineOperand *Src1 = getNamedOperand(UseMI, AMDGPU::OpName::src1); 2850 MachineOperand *Src2 = getNamedOperand(UseMI, AMDGPU::OpName::src2); 2851 2852 // Multiplied part is the constant: Use v_madmk_{f16, f32}. 2853 // We should only expect these to be on src0 due to canonicalizations. 2854 if (Src0->isReg() && Src0->getReg() == Reg) { 2855 if (!Src1->isReg() || RI.isSGPRClass(MRI->getRegClass(Src1->getReg()))) 2856 return false; 2857 2858 if (!Src2->isReg() || RI.isSGPRClass(MRI->getRegClass(Src2->getReg()))) 2859 return false; 2860 2861 unsigned NewOpc = 2862 IsFMA ? (IsF32 ? AMDGPU::V_FMAMK_F32 : AMDGPU::V_FMAMK_F16) 2863 : (IsF32 ? AMDGPU::V_MADMK_F32 : AMDGPU::V_MADMK_F16); 2864 if (pseudoToMCOpcode(NewOpc) == -1) 2865 return false; 2866 2867 // We need to swap operands 0 and 1 since madmk constant is at operand 1. 2868 2869 const int64_t Imm = ImmOp->getImm(); 2870 2871 // FIXME: This would be a lot easier if we could return a new instruction 2872 // instead of having to modify in place. 2873 2874 // Remove these first since they are at the end. 2875 UseMI.RemoveOperand( 2876 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 2877 UseMI.RemoveOperand( 2878 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 2879 2880 Register Src1Reg = Src1->getReg(); 2881 unsigned Src1SubReg = Src1->getSubReg(); 2882 Src0->setReg(Src1Reg); 2883 Src0->setSubReg(Src1SubReg); 2884 Src0->setIsKill(Src1->isKill()); 2885 2886 if (Opc == AMDGPU::V_MAC_F32_e64 || 2887 Opc == AMDGPU::V_MAC_F16_e64 || 2888 Opc == AMDGPU::V_FMAC_F32_e64 || 2889 Opc == AMDGPU::V_FMAC_F16_e64) 2890 UseMI.untieRegOperand( 2891 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 2892 2893 Src1->ChangeToImmediate(Imm); 2894 2895 removeModOperands(UseMI); 2896 UseMI.setDesc(get(NewOpc)); 2897 2898 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 2899 if (DeleteDef) 2900 DefMI.eraseFromParent(); 2901 2902 return true; 2903 } 2904 2905 // Added part is the constant: Use v_madak_{f16, f32}. 2906 if (Src2->isReg() && Src2->getReg() == Reg) { 2907 // Not allowed to use constant bus for another operand. 2908 // We can however allow an inline immediate as src0. 2909 bool Src0Inlined = false; 2910 if (Src0->isReg()) { 2911 // Try to inline constant if possible. 2912 // If the Def moves immediate and the use is single 2913 // We are saving VGPR here. 2914 MachineInstr *Def = MRI->getUniqueVRegDef(Src0->getReg()); 2915 if (Def && Def->isMoveImmediate() && 2916 isInlineConstant(Def->getOperand(1)) && 2917 MRI->hasOneUse(Src0->getReg())) { 2918 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 2919 Src0Inlined = true; 2920 } else if ((Src0->getReg().isPhysical() && 2921 (ST.getConstantBusLimit(Opc) <= 1 && 2922 RI.isSGPRClass(RI.getPhysRegClass(Src0->getReg())))) || 2923 (Src0->getReg().isVirtual() && 2924 (ST.getConstantBusLimit(Opc) <= 1 && 2925 RI.isSGPRClass(MRI->getRegClass(Src0->getReg()))))) 2926 return false; 2927 // VGPR is okay as Src0 - fallthrough 2928 } 2929 2930 if (Src1->isReg() && !Src0Inlined ) { 2931 // We have one slot for inlinable constant so far - try to fill it 2932 MachineInstr *Def = MRI->getUniqueVRegDef(Src1->getReg()); 2933 if (Def && Def->isMoveImmediate() && 2934 isInlineConstant(Def->getOperand(1)) && 2935 MRI->hasOneUse(Src1->getReg()) && 2936 commuteInstruction(UseMI)) { 2937 Src0->ChangeToImmediate(Def->getOperand(1).getImm()); 2938 } else if ((Src1->getReg().isPhysical() && 2939 RI.isSGPRClass(RI.getPhysRegClass(Src1->getReg()))) || 2940 (Src1->getReg().isVirtual() && 2941 RI.isSGPRClass(MRI->getRegClass(Src1->getReg())))) 2942 return false; 2943 // VGPR is okay as Src1 - fallthrough 2944 } 2945 2946 unsigned NewOpc = 2947 IsFMA ? (IsF32 ? AMDGPU::V_FMAAK_F32 : AMDGPU::V_FMAAK_F16) 2948 : (IsF32 ? AMDGPU::V_MADAK_F32 : AMDGPU::V_MADAK_F16); 2949 if (pseudoToMCOpcode(NewOpc) == -1) 2950 return false; 2951 2952 const int64_t Imm = ImmOp->getImm(); 2953 2954 // FIXME: This would be a lot easier if we could return a new instruction 2955 // instead of having to modify in place. 2956 2957 // Remove these first since they are at the end. 2958 UseMI.RemoveOperand( 2959 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod)); 2960 UseMI.RemoveOperand( 2961 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp)); 2962 2963 if (Opc == AMDGPU::V_MAC_F32_e64 || 2964 Opc == AMDGPU::V_MAC_F16_e64 || 2965 Opc == AMDGPU::V_FMAC_F32_e64 || 2966 Opc == AMDGPU::V_FMAC_F16_e64) 2967 UseMI.untieRegOperand( 2968 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)); 2969 2970 // ChangingToImmediate adds Src2 back to the instruction. 2971 Src2->ChangeToImmediate(Imm); 2972 2973 // These come before src2. 2974 removeModOperands(UseMI); 2975 UseMI.setDesc(get(NewOpc)); 2976 // It might happen that UseMI was commuted 2977 // and we now have SGPR as SRC1. If so 2 inlined 2978 // constant and SGPR are illegal. 2979 legalizeOperands(UseMI); 2980 2981 bool DeleteDef = MRI->hasOneNonDBGUse(Reg); 2982 if (DeleteDef) 2983 DefMI.eraseFromParent(); 2984 2985 return true; 2986 } 2987 } 2988 2989 return false; 2990 } 2991 2992 static bool 2993 memOpsHaveSameBaseOperands(ArrayRef<const MachineOperand *> BaseOps1, 2994 ArrayRef<const MachineOperand *> BaseOps2) { 2995 if (BaseOps1.size() != BaseOps2.size()) 2996 return false; 2997 for (size_t I = 0, E = BaseOps1.size(); I < E; ++I) { 2998 if (!BaseOps1[I]->isIdenticalTo(*BaseOps2[I])) 2999 return false; 3000 } 3001 return true; 3002 } 3003 3004 static bool offsetsDoNotOverlap(int WidthA, int OffsetA, 3005 int WidthB, int OffsetB) { 3006 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB; 3007 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA; 3008 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB; 3009 return LowOffset + LowWidth <= HighOffset; 3010 } 3011 3012 bool SIInstrInfo::checkInstOffsetsDoNotOverlap(const MachineInstr &MIa, 3013 const MachineInstr &MIb) const { 3014 SmallVector<const MachineOperand *, 4> BaseOps0, BaseOps1; 3015 int64_t Offset0, Offset1; 3016 unsigned Dummy0, Dummy1; 3017 bool Offset0IsScalable, Offset1IsScalable; 3018 if (!getMemOperandsWithOffsetWidth(MIa, BaseOps0, Offset0, Offset0IsScalable, 3019 Dummy0, &RI) || 3020 !getMemOperandsWithOffsetWidth(MIb, BaseOps1, Offset1, Offset1IsScalable, 3021 Dummy1, &RI)) 3022 return false; 3023 3024 if (!memOpsHaveSameBaseOperands(BaseOps0, BaseOps1)) 3025 return false; 3026 3027 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand()) { 3028 // FIXME: Handle ds_read2 / ds_write2. 3029 return false; 3030 } 3031 unsigned Width0 = MIa.memoperands().front()->getSize(); 3032 unsigned Width1 = MIb.memoperands().front()->getSize(); 3033 return offsetsDoNotOverlap(Width0, Offset0, Width1, Offset1); 3034 } 3035 3036 bool SIInstrInfo::areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, 3037 const MachineInstr &MIb) const { 3038 assert(MIa.mayLoadOrStore() && 3039 "MIa must load from or modify a memory location"); 3040 assert(MIb.mayLoadOrStore() && 3041 "MIb must load from or modify a memory location"); 3042 3043 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects()) 3044 return false; 3045 3046 // XXX - Can we relax this between address spaces? 3047 if (MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef()) 3048 return false; 3049 3050 // TODO: Should we check the address space from the MachineMemOperand? That 3051 // would allow us to distinguish objects we know don't alias based on the 3052 // underlying address space, even if it was lowered to a different one, 3053 // e.g. private accesses lowered to use MUBUF instructions on a scratch 3054 // buffer. 3055 if (isDS(MIa)) { 3056 if (isDS(MIb)) 3057 return checkInstOffsetsDoNotOverlap(MIa, MIb); 3058 3059 return !isFLAT(MIb) || isSegmentSpecificFLAT(MIb); 3060 } 3061 3062 if (isMUBUF(MIa) || isMTBUF(MIa)) { 3063 if (isMUBUF(MIb) || isMTBUF(MIb)) 3064 return checkInstOffsetsDoNotOverlap(MIa, MIb); 3065 3066 return !isFLAT(MIb) && !isSMRD(MIb); 3067 } 3068 3069 if (isSMRD(MIa)) { 3070 if (isSMRD(MIb)) 3071 return checkInstOffsetsDoNotOverlap(MIa, MIb); 3072 3073 return !isFLAT(MIb) && !isMUBUF(MIb) && !isMTBUF(MIb); 3074 } 3075 3076 if (isFLAT(MIa)) { 3077 if (isFLAT(MIb)) 3078 return checkInstOffsetsDoNotOverlap(MIa, MIb); 3079 3080 return false; 3081 } 3082 3083 return false; 3084 } 3085 3086 static bool getFoldableImm(Register Reg, const MachineRegisterInfo &MRI, 3087 int64_t &Imm) { 3088 if (Reg.isPhysical()) 3089 return false; 3090 auto *Def = MRI.getUniqueVRegDef(Reg); 3091 if (Def && SIInstrInfo::isFoldableCopy(*Def) && Def->getOperand(1).isImm()) { 3092 Imm = Def->getOperand(1).getImm(); 3093 return true; 3094 } 3095 return false; 3096 } 3097 3098 static bool getFoldableImm(const MachineOperand *MO, int64_t &Imm) { 3099 if (!MO->isReg()) 3100 return false; 3101 const MachineFunction *MF = MO->getParent()->getParent()->getParent(); 3102 const MachineRegisterInfo &MRI = MF->getRegInfo(); 3103 return getFoldableImm(MO->getReg(), MRI, Imm); 3104 } 3105 3106 static void updateLiveVariables(LiveVariables *LV, MachineInstr &MI, 3107 MachineInstr &NewMI) { 3108 if (LV) { 3109 unsigned NumOps = MI.getNumOperands(); 3110 for (unsigned I = 1; I < NumOps; ++I) { 3111 MachineOperand &Op = MI.getOperand(I); 3112 if (Op.isReg() && Op.isKill()) 3113 LV->replaceKillInstruction(Op.getReg(), MI, NewMI); 3114 } 3115 } 3116 } 3117 3118 MachineInstr *SIInstrInfo::convertToThreeAddress(MachineInstr &MI, 3119 LiveVariables *LV) const { 3120 unsigned Opc = MI.getOpcode(); 3121 bool IsF16 = false; 3122 bool IsFMA = Opc == AMDGPU::V_FMAC_F32_e32 || Opc == AMDGPU::V_FMAC_F32_e64 || 3123 Opc == AMDGPU::V_FMAC_F16_e32 || Opc == AMDGPU::V_FMAC_F16_e64 || 3124 Opc == AMDGPU::V_FMAC_F64_e32 || Opc == AMDGPU::V_FMAC_F64_e64; 3125 bool IsF64 = Opc == AMDGPU::V_FMAC_F64_e32 || Opc == AMDGPU::V_FMAC_F64_e64; 3126 3127 switch (Opc) { 3128 default: 3129 return nullptr; 3130 case AMDGPU::V_MAC_F16_e64: 3131 case AMDGPU::V_FMAC_F16_e64: 3132 IsF16 = true; 3133 LLVM_FALLTHROUGH; 3134 case AMDGPU::V_MAC_F32_e64: 3135 case AMDGPU::V_FMAC_F32_e64: 3136 case AMDGPU::V_FMAC_F64_e64: 3137 break; 3138 case AMDGPU::V_MAC_F16_e32: 3139 case AMDGPU::V_FMAC_F16_e32: 3140 IsF16 = true; 3141 LLVM_FALLTHROUGH; 3142 case AMDGPU::V_MAC_F32_e32: 3143 case AMDGPU::V_FMAC_F32_e32: 3144 case AMDGPU::V_FMAC_F64_e32: { 3145 int Src0Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), 3146 AMDGPU::OpName::src0); 3147 const MachineOperand *Src0 = &MI.getOperand(Src0Idx); 3148 if (!Src0->isReg() && !Src0->isImm()) 3149 return nullptr; 3150 3151 if (Src0->isImm() && !isInlineConstant(MI, Src0Idx, *Src0)) 3152 return nullptr; 3153 3154 break; 3155 } 3156 } 3157 3158 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 3159 const MachineOperand *Src0 = getNamedOperand(MI, AMDGPU::OpName::src0); 3160 const MachineOperand *Src0Mods = 3161 getNamedOperand(MI, AMDGPU::OpName::src0_modifiers); 3162 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3163 const MachineOperand *Src1Mods = 3164 getNamedOperand(MI, AMDGPU::OpName::src1_modifiers); 3165 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3166 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 3167 const MachineOperand *Omod = getNamedOperand(MI, AMDGPU::OpName::omod); 3168 MachineInstrBuilder MIB; 3169 MachineBasicBlock &MBB = *MI.getParent(); 3170 3171 if (!Src0Mods && !Src1Mods && !Clamp && !Omod && !IsF64 && 3172 // If we have an SGPR input, we will violate the constant bus restriction. 3173 (ST.getConstantBusLimit(Opc) > 1 || !Src0->isReg() || 3174 !RI.isSGPRReg(MBB.getParent()->getRegInfo(), Src0->getReg()))) { 3175 int64_t Imm; 3176 if (getFoldableImm(Src2, Imm)) { 3177 unsigned NewOpc = 3178 IsFMA ? (IsF16 ? AMDGPU::V_FMAAK_F16 : AMDGPU::V_FMAAK_F32) 3179 : (IsF16 ? AMDGPU::V_MADAK_F16 : AMDGPU::V_MADAK_F32); 3180 if (pseudoToMCOpcode(NewOpc) != -1) { 3181 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc)) 3182 .add(*Dst) 3183 .add(*Src0) 3184 .add(*Src1) 3185 .addImm(Imm); 3186 updateLiveVariables(LV, MI, *MIB); 3187 return MIB; 3188 } 3189 } 3190 unsigned NewOpc = IsFMA 3191 ? (IsF16 ? AMDGPU::V_FMAMK_F16 : AMDGPU::V_FMAMK_F32) 3192 : (IsF16 ? AMDGPU::V_MADMK_F16 : AMDGPU::V_MADMK_F32); 3193 if (getFoldableImm(Src1, Imm)) { 3194 if (pseudoToMCOpcode(NewOpc) != -1) { 3195 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc)) 3196 .add(*Dst) 3197 .add(*Src0) 3198 .addImm(Imm) 3199 .add(*Src2); 3200 updateLiveVariables(LV, MI, *MIB); 3201 return MIB; 3202 } 3203 } 3204 if (getFoldableImm(Src0, Imm)) { 3205 if (pseudoToMCOpcode(NewOpc) != -1 && 3206 isOperandLegal( 3207 MI, AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::src0), 3208 Src1)) { 3209 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc)) 3210 .add(*Dst) 3211 .add(*Src1) 3212 .addImm(Imm) 3213 .add(*Src2); 3214 updateLiveVariables(LV, MI, *MIB); 3215 return MIB; 3216 } 3217 } 3218 } 3219 3220 unsigned NewOpc = IsFMA ? (IsF16 ? AMDGPU::V_FMA_F16_e64 3221 : IsF64 ? AMDGPU::V_FMA_F64_e64 3222 : AMDGPU::V_FMA_F32_e64) 3223 : (IsF16 ? AMDGPU::V_MAD_F16_e64 : AMDGPU::V_MAD_F32_e64); 3224 if (pseudoToMCOpcode(NewOpc) == -1) 3225 return nullptr; 3226 3227 MIB = BuildMI(MBB, MI, MI.getDebugLoc(), get(NewOpc)) 3228 .add(*Dst) 3229 .addImm(Src0Mods ? Src0Mods->getImm() : 0) 3230 .add(*Src0) 3231 .addImm(Src1Mods ? Src1Mods->getImm() : 0) 3232 .add(*Src1) 3233 .addImm(0) // Src mods 3234 .add(*Src2) 3235 .addImm(Clamp ? Clamp->getImm() : 0) 3236 .addImm(Omod ? Omod->getImm() : 0); 3237 updateLiveVariables(LV, MI, *MIB); 3238 return MIB; 3239 } 3240 3241 // It's not generally safe to move VALU instructions across these since it will 3242 // start using the register as a base index rather than directly. 3243 // XXX - Why isn't hasSideEffects sufficient for these? 3244 static bool changesVGPRIndexingMode(const MachineInstr &MI) { 3245 switch (MI.getOpcode()) { 3246 case AMDGPU::S_SET_GPR_IDX_ON: 3247 case AMDGPU::S_SET_GPR_IDX_MODE: 3248 case AMDGPU::S_SET_GPR_IDX_OFF: 3249 return true; 3250 default: 3251 return false; 3252 } 3253 } 3254 3255 bool SIInstrInfo::isSchedulingBoundary(const MachineInstr &MI, 3256 const MachineBasicBlock *MBB, 3257 const MachineFunction &MF) const { 3258 // Skipping the check for SP writes in the base implementation. The reason it 3259 // was added was apparently due to compile time concerns. 3260 // 3261 // TODO: Do we really want this barrier? It triggers unnecessary hazard nops 3262 // but is probably avoidable. 3263 3264 // Copied from base implementation. 3265 // Terminators and labels can't be scheduled around. 3266 if (MI.isTerminator() || MI.isPosition()) 3267 return true; 3268 3269 // INLINEASM_BR can jump to another block 3270 if (MI.getOpcode() == TargetOpcode::INLINEASM_BR) 3271 return true; 3272 3273 // Target-independent instructions do not have an implicit-use of EXEC, even 3274 // when they operate on VGPRs. Treating EXEC modifications as scheduling 3275 // boundaries prevents incorrect movements of such instructions. 3276 return MI.modifiesRegister(AMDGPU::EXEC, &RI) || 3277 MI.getOpcode() == AMDGPU::S_SETREG_IMM32_B32 || 3278 MI.getOpcode() == AMDGPU::S_SETREG_B32 || 3279 changesVGPRIndexingMode(MI); 3280 } 3281 3282 bool SIInstrInfo::isAlwaysGDS(uint16_t Opcode) const { 3283 return Opcode == AMDGPU::DS_ORDERED_COUNT || 3284 Opcode == AMDGPU::DS_GWS_INIT || 3285 Opcode == AMDGPU::DS_GWS_SEMA_V || 3286 Opcode == AMDGPU::DS_GWS_SEMA_BR || 3287 Opcode == AMDGPU::DS_GWS_SEMA_P || 3288 Opcode == AMDGPU::DS_GWS_SEMA_RELEASE_ALL || 3289 Opcode == AMDGPU::DS_GWS_BARRIER; 3290 } 3291 3292 bool SIInstrInfo::modifiesModeRegister(const MachineInstr &MI) { 3293 // Skip the full operand and register alias search modifiesRegister 3294 // does. There's only a handful of instructions that touch this, it's only an 3295 // implicit def, and doesn't alias any other registers. 3296 if (const MCPhysReg *ImpDef = MI.getDesc().getImplicitDefs()) { 3297 for (; ImpDef && *ImpDef; ++ImpDef) { 3298 if (*ImpDef == AMDGPU::MODE) 3299 return true; 3300 } 3301 } 3302 3303 return false; 3304 } 3305 3306 bool SIInstrInfo::hasUnwantedEffectsWhenEXECEmpty(const MachineInstr &MI) const { 3307 unsigned Opcode = MI.getOpcode(); 3308 3309 if (MI.mayStore() && isSMRD(MI)) 3310 return true; // scalar store or atomic 3311 3312 // This will terminate the function when other lanes may need to continue. 3313 if (MI.isReturn()) 3314 return true; 3315 3316 // These instructions cause shader I/O that may cause hardware lockups 3317 // when executed with an empty EXEC mask. 3318 // 3319 // Note: exp with VM = DONE = 0 is automatically skipped by hardware when 3320 // EXEC = 0, but checking for that case here seems not worth it 3321 // given the typical code patterns. 3322 if (Opcode == AMDGPU::S_SENDMSG || Opcode == AMDGPU::S_SENDMSGHALT || 3323 isEXP(Opcode) || 3324 Opcode == AMDGPU::DS_ORDERED_COUNT || Opcode == AMDGPU::S_TRAP || 3325 Opcode == AMDGPU::DS_GWS_INIT || Opcode == AMDGPU::DS_GWS_BARRIER) 3326 return true; 3327 3328 if (MI.isCall() || MI.isInlineAsm()) 3329 return true; // conservative assumption 3330 3331 // A mode change is a scalar operation that influences vector instructions. 3332 if (modifiesModeRegister(MI)) 3333 return true; 3334 3335 // These are like SALU instructions in terms of effects, so it's questionable 3336 // whether we should return true for those. 3337 // 3338 // However, executing them with EXEC = 0 causes them to operate on undefined 3339 // data, which we avoid by returning true here. 3340 if (Opcode == AMDGPU::V_READFIRSTLANE_B32 || 3341 Opcode == AMDGPU::V_READLANE_B32 || Opcode == AMDGPU::V_WRITELANE_B32) 3342 return true; 3343 3344 return false; 3345 } 3346 3347 bool SIInstrInfo::mayReadEXEC(const MachineRegisterInfo &MRI, 3348 const MachineInstr &MI) const { 3349 if (MI.isMetaInstruction()) 3350 return false; 3351 3352 // This won't read exec if this is an SGPR->SGPR copy. 3353 if (MI.isCopyLike()) { 3354 if (!RI.isSGPRReg(MRI, MI.getOperand(0).getReg())) 3355 return true; 3356 3357 // Make sure this isn't copying exec as a normal operand 3358 return MI.readsRegister(AMDGPU::EXEC, &RI); 3359 } 3360 3361 // Make a conservative assumption about the callee. 3362 if (MI.isCall()) 3363 return true; 3364 3365 // Be conservative with any unhandled generic opcodes. 3366 if (!isTargetSpecificOpcode(MI.getOpcode())) 3367 return true; 3368 3369 return !isSALU(MI) || MI.readsRegister(AMDGPU::EXEC, &RI); 3370 } 3371 3372 bool SIInstrInfo::isInlineConstant(const APInt &Imm) const { 3373 switch (Imm.getBitWidth()) { 3374 case 1: // This likely will be a condition code mask. 3375 return true; 3376 3377 case 32: 3378 return AMDGPU::isInlinableLiteral32(Imm.getSExtValue(), 3379 ST.hasInv2PiInlineImm()); 3380 case 64: 3381 return AMDGPU::isInlinableLiteral64(Imm.getSExtValue(), 3382 ST.hasInv2PiInlineImm()); 3383 case 16: 3384 return ST.has16BitInsts() && 3385 AMDGPU::isInlinableLiteral16(Imm.getSExtValue(), 3386 ST.hasInv2PiInlineImm()); 3387 default: 3388 llvm_unreachable("invalid bitwidth"); 3389 } 3390 } 3391 3392 bool SIInstrInfo::isInlineConstant(const MachineOperand &MO, 3393 uint8_t OperandType) const { 3394 if (!MO.isImm() || 3395 OperandType < AMDGPU::OPERAND_SRC_FIRST || 3396 OperandType > AMDGPU::OPERAND_SRC_LAST) 3397 return false; 3398 3399 // MachineOperand provides no way to tell the true operand size, since it only 3400 // records a 64-bit value. We need to know the size to determine if a 32-bit 3401 // floating point immediate bit pattern is legal for an integer immediate. It 3402 // would be for any 32-bit integer operand, but would not be for a 64-bit one. 3403 3404 int64_t Imm = MO.getImm(); 3405 switch (OperandType) { 3406 case AMDGPU::OPERAND_REG_IMM_INT32: 3407 case AMDGPU::OPERAND_REG_IMM_FP32: 3408 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 3409 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 3410 case AMDGPU::OPERAND_REG_IMM_V2FP32: 3411 case AMDGPU::OPERAND_REG_INLINE_C_V2FP32: 3412 case AMDGPU::OPERAND_REG_IMM_V2INT32: 3413 case AMDGPU::OPERAND_REG_INLINE_C_V2INT32: 3414 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 3415 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: { 3416 int32_t Trunc = static_cast<int32_t>(Imm); 3417 return AMDGPU::isInlinableLiteral32(Trunc, ST.hasInv2PiInlineImm()); 3418 } 3419 case AMDGPU::OPERAND_REG_IMM_INT64: 3420 case AMDGPU::OPERAND_REG_IMM_FP64: 3421 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 3422 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 3423 case AMDGPU::OPERAND_REG_INLINE_AC_FP64: 3424 return AMDGPU::isInlinableLiteral64(MO.getImm(), 3425 ST.hasInv2PiInlineImm()); 3426 case AMDGPU::OPERAND_REG_IMM_INT16: 3427 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 3428 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 3429 // We would expect inline immediates to not be concerned with an integer/fp 3430 // distinction. However, in the case of 16-bit integer operations, the 3431 // "floating point" values appear to not work. It seems read the low 16-bits 3432 // of 32-bit immediates, which happens to always work for the integer 3433 // values. 3434 // 3435 // See llvm bugzilla 46302. 3436 // 3437 // TODO: Theoretically we could use op-sel to use the high bits of the 3438 // 32-bit FP values. 3439 return AMDGPU::isInlinableIntLiteral(Imm); 3440 case AMDGPU::OPERAND_REG_IMM_V2INT16: 3441 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16: 3442 case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16: 3443 // This suffers the same problem as the scalar 16-bit cases. 3444 return AMDGPU::isInlinableIntLiteralV216(Imm); 3445 case AMDGPU::OPERAND_REG_IMM_FP16: 3446 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 3447 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: { 3448 if (isInt<16>(Imm) || isUInt<16>(Imm)) { 3449 // A few special case instructions have 16-bit operands on subtargets 3450 // where 16-bit instructions are not legal. 3451 // TODO: Do the 32-bit immediates work? We shouldn't really need to handle 3452 // constants in these cases 3453 int16_t Trunc = static_cast<int16_t>(Imm); 3454 return ST.has16BitInsts() && 3455 AMDGPU::isInlinableLiteral16(Trunc, ST.hasInv2PiInlineImm()); 3456 } 3457 3458 return false; 3459 } 3460 case AMDGPU::OPERAND_REG_IMM_V2FP16: 3461 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: 3462 case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16: { 3463 uint32_t Trunc = static_cast<uint32_t>(Imm); 3464 return AMDGPU::isInlinableLiteralV216(Trunc, ST.hasInv2PiInlineImm()); 3465 } 3466 default: 3467 llvm_unreachable("invalid bitwidth"); 3468 } 3469 } 3470 3471 bool SIInstrInfo::isLiteralConstantLike(const MachineOperand &MO, 3472 const MCOperandInfo &OpInfo) const { 3473 switch (MO.getType()) { 3474 case MachineOperand::MO_Register: 3475 return false; 3476 case MachineOperand::MO_Immediate: 3477 return !isInlineConstant(MO, OpInfo); 3478 case MachineOperand::MO_FrameIndex: 3479 case MachineOperand::MO_MachineBasicBlock: 3480 case MachineOperand::MO_ExternalSymbol: 3481 case MachineOperand::MO_GlobalAddress: 3482 case MachineOperand::MO_MCSymbol: 3483 return true; 3484 default: 3485 llvm_unreachable("unexpected operand type"); 3486 } 3487 } 3488 3489 static bool compareMachineOp(const MachineOperand &Op0, 3490 const MachineOperand &Op1) { 3491 if (Op0.getType() != Op1.getType()) 3492 return false; 3493 3494 switch (Op0.getType()) { 3495 case MachineOperand::MO_Register: 3496 return Op0.getReg() == Op1.getReg(); 3497 case MachineOperand::MO_Immediate: 3498 return Op0.getImm() == Op1.getImm(); 3499 default: 3500 llvm_unreachable("Didn't expect to be comparing these operand types"); 3501 } 3502 } 3503 3504 bool SIInstrInfo::isImmOperandLegal(const MachineInstr &MI, unsigned OpNo, 3505 const MachineOperand &MO) const { 3506 const MCInstrDesc &InstDesc = MI.getDesc(); 3507 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpNo]; 3508 3509 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()); 3510 3511 if (OpInfo.OperandType == MCOI::OPERAND_IMMEDIATE) 3512 return true; 3513 3514 if (OpInfo.RegClass < 0) 3515 return false; 3516 3517 if (MO.isImm() && isInlineConstant(MO, OpInfo)) { 3518 if (isMAI(MI) && ST.hasMFMAInlineLiteralBug() && 3519 OpNo ==(unsigned)AMDGPU::getNamedOperandIdx(MI.getOpcode(), 3520 AMDGPU::OpName::src2)) 3521 return false; 3522 return RI.opCanUseInlineConstant(OpInfo.OperandType); 3523 } 3524 3525 if (!RI.opCanUseLiteralConstant(OpInfo.OperandType)) 3526 return false; 3527 3528 if (!isVOP3(MI) || !AMDGPU::isSISrcOperand(InstDesc, OpNo)) 3529 return true; 3530 3531 return ST.hasVOP3Literal(); 3532 } 3533 3534 bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const { 3535 // GFX90A does not have V_MUL_LEGACY_F32_e32. 3536 if (Opcode == AMDGPU::V_MUL_LEGACY_F32_e64 && ST.hasGFX90AInsts()) 3537 return false; 3538 3539 int Op32 = AMDGPU::getVOPe32(Opcode); 3540 if (Op32 == -1) 3541 return false; 3542 3543 return pseudoToMCOpcode(Op32) != -1; 3544 } 3545 3546 bool SIInstrInfo::hasModifiers(unsigned Opcode) const { 3547 // The src0_modifier operand is present on all instructions 3548 // that have modifiers. 3549 3550 return AMDGPU::getNamedOperandIdx(Opcode, 3551 AMDGPU::OpName::src0_modifiers) != -1; 3552 } 3553 3554 bool SIInstrInfo::hasModifiersSet(const MachineInstr &MI, 3555 unsigned OpName) const { 3556 const MachineOperand *Mods = getNamedOperand(MI, OpName); 3557 return Mods && Mods->getImm(); 3558 } 3559 3560 bool SIInstrInfo::hasAnyModifiersSet(const MachineInstr &MI) const { 3561 return hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers) || 3562 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers) || 3563 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers) || 3564 hasModifiersSet(MI, AMDGPU::OpName::clamp) || 3565 hasModifiersSet(MI, AMDGPU::OpName::omod); 3566 } 3567 3568 bool SIInstrInfo::canShrink(const MachineInstr &MI, 3569 const MachineRegisterInfo &MRI) const { 3570 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3571 // Can't shrink instruction with three operands. 3572 // FIXME: v_cndmask_b32 has 3 operands and is shrinkable, but we need to add 3573 // a special case for it. It can only be shrunk if the third operand 3574 // is vcc, and src0_modifiers and src1_modifiers are not set. 3575 // We should handle this the same way we handle vopc, by addding 3576 // a register allocation hint pre-regalloc and then do the shrinking 3577 // post-regalloc. 3578 if (Src2) { 3579 switch (MI.getOpcode()) { 3580 default: return false; 3581 3582 case AMDGPU::V_ADDC_U32_e64: 3583 case AMDGPU::V_SUBB_U32_e64: 3584 case AMDGPU::V_SUBBREV_U32_e64: { 3585 const MachineOperand *Src1 3586 = getNamedOperand(MI, AMDGPU::OpName::src1); 3587 if (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg())) 3588 return false; 3589 // Additional verification is needed for sdst/src2. 3590 return true; 3591 } 3592 case AMDGPU::V_MAC_F32_e64: 3593 case AMDGPU::V_MAC_F16_e64: 3594 case AMDGPU::V_FMAC_F32_e64: 3595 case AMDGPU::V_FMAC_F16_e64: 3596 case AMDGPU::V_FMAC_F64_e64: 3597 if (!Src2->isReg() || !RI.isVGPR(MRI, Src2->getReg()) || 3598 hasModifiersSet(MI, AMDGPU::OpName::src2_modifiers)) 3599 return false; 3600 break; 3601 3602 case AMDGPU::V_CNDMASK_B32_e64: 3603 break; 3604 } 3605 } 3606 3607 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3608 if (Src1 && (!Src1->isReg() || !RI.isVGPR(MRI, Src1->getReg()) || 3609 hasModifiersSet(MI, AMDGPU::OpName::src1_modifiers))) 3610 return false; 3611 3612 // We don't need to check src0, all input types are legal, so just make sure 3613 // src0 isn't using any modifiers. 3614 if (hasModifiersSet(MI, AMDGPU::OpName::src0_modifiers)) 3615 return false; 3616 3617 // Can it be shrunk to a valid 32 bit opcode? 3618 if (!hasVALU32BitEncoding(MI.getOpcode())) 3619 return false; 3620 3621 // Check output modifiers 3622 return !hasModifiersSet(MI, AMDGPU::OpName::omod) && 3623 !hasModifiersSet(MI, AMDGPU::OpName::clamp); 3624 } 3625 3626 // Set VCC operand with all flags from \p Orig, except for setting it as 3627 // implicit. 3628 static void copyFlagsToImplicitVCC(MachineInstr &MI, 3629 const MachineOperand &Orig) { 3630 3631 for (MachineOperand &Use : MI.implicit_operands()) { 3632 if (Use.isUse() && 3633 (Use.getReg() == AMDGPU::VCC || Use.getReg() == AMDGPU::VCC_LO)) { 3634 Use.setIsUndef(Orig.isUndef()); 3635 Use.setIsKill(Orig.isKill()); 3636 return; 3637 } 3638 } 3639 } 3640 3641 MachineInstr *SIInstrInfo::buildShrunkInst(MachineInstr &MI, 3642 unsigned Op32) const { 3643 MachineBasicBlock *MBB = MI.getParent();; 3644 MachineInstrBuilder Inst32 = 3645 BuildMI(*MBB, MI, MI.getDebugLoc(), get(Op32)) 3646 .setMIFlags(MI.getFlags()); 3647 3648 // Add the dst operand if the 32-bit encoding also has an explicit $vdst. 3649 // For VOPC instructions, this is replaced by an implicit def of vcc. 3650 int Op32DstIdx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::vdst); 3651 if (Op32DstIdx != -1) { 3652 // dst 3653 Inst32.add(MI.getOperand(0)); 3654 } else { 3655 assert(((MI.getOperand(0).getReg() == AMDGPU::VCC) || 3656 (MI.getOperand(0).getReg() == AMDGPU::VCC_LO)) && 3657 "Unexpected case"); 3658 } 3659 3660 Inst32.add(*getNamedOperand(MI, AMDGPU::OpName::src0)); 3661 3662 const MachineOperand *Src1 = getNamedOperand(MI, AMDGPU::OpName::src1); 3663 if (Src1) 3664 Inst32.add(*Src1); 3665 3666 const MachineOperand *Src2 = getNamedOperand(MI, AMDGPU::OpName::src2); 3667 3668 if (Src2) { 3669 int Op32Src2Idx = AMDGPU::getNamedOperandIdx(Op32, AMDGPU::OpName::src2); 3670 if (Op32Src2Idx != -1) { 3671 Inst32.add(*Src2); 3672 } else { 3673 // In the case of V_CNDMASK_B32_e32, the explicit operand src2 is 3674 // replaced with an implicit read of vcc or vcc_lo. The implicit read 3675 // of vcc was already added during the initial BuildMI, but we 3676 // 1) may need to change vcc to vcc_lo to preserve the original register 3677 // 2) have to preserve the original flags. 3678 fixImplicitOperands(*Inst32); 3679 copyFlagsToImplicitVCC(*Inst32, *Src2); 3680 } 3681 } 3682 3683 return Inst32; 3684 } 3685 3686 bool SIInstrInfo::usesConstantBus(const MachineRegisterInfo &MRI, 3687 const MachineOperand &MO, 3688 const MCOperandInfo &OpInfo) const { 3689 // Literal constants use the constant bus. 3690 //if (isLiteralConstantLike(MO, OpInfo)) 3691 // return true; 3692 if (MO.isImm()) 3693 return !isInlineConstant(MO, OpInfo); 3694 3695 if (!MO.isReg()) 3696 return true; // Misc other operands like FrameIndex 3697 3698 if (!MO.isUse()) 3699 return false; 3700 3701 if (MO.getReg().isVirtual()) 3702 return RI.isSGPRClass(MRI.getRegClass(MO.getReg())); 3703 3704 // Null is free 3705 if (MO.getReg() == AMDGPU::SGPR_NULL) 3706 return false; 3707 3708 // SGPRs use the constant bus 3709 if (MO.isImplicit()) { 3710 return MO.getReg() == AMDGPU::M0 || 3711 MO.getReg() == AMDGPU::VCC || 3712 MO.getReg() == AMDGPU::VCC_LO; 3713 } else { 3714 return AMDGPU::SReg_32RegClass.contains(MO.getReg()) || 3715 AMDGPU::SReg_64RegClass.contains(MO.getReg()); 3716 } 3717 } 3718 3719 static Register findImplicitSGPRRead(const MachineInstr &MI) { 3720 for (const MachineOperand &MO : MI.implicit_operands()) { 3721 // We only care about reads. 3722 if (MO.isDef()) 3723 continue; 3724 3725 switch (MO.getReg()) { 3726 case AMDGPU::VCC: 3727 case AMDGPU::VCC_LO: 3728 case AMDGPU::VCC_HI: 3729 case AMDGPU::M0: 3730 case AMDGPU::FLAT_SCR: 3731 return MO.getReg(); 3732 3733 default: 3734 break; 3735 } 3736 } 3737 3738 return AMDGPU::NoRegister; 3739 } 3740 3741 static bool shouldReadExec(const MachineInstr &MI) { 3742 if (SIInstrInfo::isVALU(MI)) { 3743 switch (MI.getOpcode()) { 3744 case AMDGPU::V_READLANE_B32: 3745 case AMDGPU::V_WRITELANE_B32: 3746 return false; 3747 } 3748 3749 return true; 3750 } 3751 3752 if (MI.isPreISelOpcode() || 3753 SIInstrInfo::isGenericOpcode(MI.getOpcode()) || 3754 SIInstrInfo::isSALU(MI) || 3755 SIInstrInfo::isSMRD(MI)) 3756 return false; 3757 3758 return true; 3759 } 3760 3761 static bool isSubRegOf(const SIRegisterInfo &TRI, 3762 const MachineOperand &SuperVec, 3763 const MachineOperand &SubReg) { 3764 if (SubReg.getReg().isPhysical()) 3765 return TRI.isSubRegister(SuperVec.getReg(), SubReg.getReg()); 3766 3767 return SubReg.getSubReg() != AMDGPU::NoSubRegister && 3768 SubReg.getReg() == SuperVec.getReg(); 3769 } 3770 3771 bool SIInstrInfo::verifyInstruction(const MachineInstr &MI, 3772 StringRef &ErrInfo) const { 3773 uint16_t Opcode = MI.getOpcode(); 3774 if (SIInstrInfo::isGenericOpcode(MI.getOpcode())) 3775 return true; 3776 3777 const MachineFunction *MF = MI.getParent()->getParent(); 3778 const MachineRegisterInfo &MRI = MF->getRegInfo(); 3779 3780 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); 3781 int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1); 3782 int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2); 3783 3784 // Make sure the number of operands is correct. 3785 const MCInstrDesc &Desc = get(Opcode); 3786 if (!Desc.isVariadic() && 3787 Desc.getNumOperands() != MI.getNumExplicitOperands()) { 3788 ErrInfo = "Instruction has wrong number of operands."; 3789 return false; 3790 } 3791 3792 if (MI.isInlineAsm()) { 3793 // Verify register classes for inlineasm constraints. 3794 for (unsigned I = InlineAsm::MIOp_FirstOperand, E = MI.getNumOperands(); 3795 I != E; ++I) { 3796 const TargetRegisterClass *RC = MI.getRegClassConstraint(I, this, &RI); 3797 if (!RC) 3798 continue; 3799 3800 const MachineOperand &Op = MI.getOperand(I); 3801 if (!Op.isReg()) 3802 continue; 3803 3804 Register Reg = Op.getReg(); 3805 if (!Reg.isVirtual() && !RC->contains(Reg)) { 3806 ErrInfo = "inlineasm operand has incorrect register class."; 3807 return false; 3808 } 3809 } 3810 3811 return true; 3812 } 3813 3814 if (isMIMG(MI) && MI.memoperands_empty() && MI.mayLoadOrStore()) { 3815 ErrInfo = "missing memory operand from MIMG instruction."; 3816 return false; 3817 } 3818 3819 // Make sure the register classes are correct. 3820 for (int i = 0, e = Desc.getNumOperands(); i != e; ++i) { 3821 const MachineOperand &MO = MI.getOperand(i); 3822 if (MO.isFPImm()) { 3823 ErrInfo = "FPImm Machine Operands are not supported. ISel should bitcast " 3824 "all fp values to integers."; 3825 return false; 3826 } 3827 3828 int RegClass = Desc.OpInfo[i].RegClass; 3829 3830 switch (Desc.OpInfo[i].OperandType) { 3831 case MCOI::OPERAND_REGISTER: 3832 if (MI.getOperand(i).isImm() || MI.getOperand(i).isGlobal()) { 3833 ErrInfo = "Illegal immediate value for operand."; 3834 return false; 3835 } 3836 break; 3837 case AMDGPU::OPERAND_REG_IMM_INT32: 3838 case AMDGPU::OPERAND_REG_IMM_FP32: 3839 break; 3840 case AMDGPU::OPERAND_REG_INLINE_C_INT32: 3841 case AMDGPU::OPERAND_REG_INLINE_C_FP32: 3842 case AMDGPU::OPERAND_REG_INLINE_C_INT64: 3843 case AMDGPU::OPERAND_REG_INLINE_C_FP64: 3844 case AMDGPU::OPERAND_REG_INLINE_C_INT16: 3845 case AMDGPU::OPERAND_REG_INLINE_C_FP16: 3846 case AMDGPU::OPERAND_REG_INLINE_AC_INT32: 3847 case AMDGPU::OPERAND_REG_INLINE_AC_FP32: 3848 case AMDGPU::OPERAND_REG_INLINE_AC_INT16: 3849 case AMDGPU::OPERAND_REG_INLINE_AC_FP16: 3850 case AMDGPU::OPERAND_REG_INLINE_AC_FP64: { 3851 if (!MO.isReg() && (!MO.isImm() || !isInlineConstant(MI, i))) { 3852 ErrInfo = "Illegal immediate value for operand."; 3853 return false; 3854 } 3855 break; 3856 } 3857 case MCOI::OPERAND_IMMEDIATE: 3858 case AMDGPU::OPERAND_KIMM32: 3859 // Check if this operand is an immediate. 3860 // FrameIndex operands will be replaced by immediates, so they are 3861 // allowed. 3862 if (!MI.getOperand(i).isImm() && !MI.getOperand(i).isFI()) { 3863 ErrInfo = "Expected immediate, but got non-immediate"; 3864 return false; 3865 } 3866 LLVM_FALLTHROUGH; 3867 default: 3868 continue; 3869 } 3870 3871 if (!MO.isReg()) 3872 continue; 3873 Register Reg = MO.getReg(); 3874 if (!Reg) 3875 continue; 3876 3877 // FIXME: Ideally we would have separate instruction definitions with the 3878 // aligned register constraint. 3879 // FIXME: We do not verify inline asm operands, but custom inline asm 3880 // verification is broken anyway 3881 if (ST.needsAlignedVGPRs()) { 3882 const TargetRegisterClass *RC = RI.getRegClassForReg(MRI, Reg); 3883 const bool IsVGPR = RI.hasVGPRs(RC); 3884 const bool IsAGPR = !IsVGPR && RI.hasAGPRs(RC); 3885 if ((IsVGPR || IsAGPR) && MO.getSubReg()) { 3886 const TargetRegisterClass *SubRC = 3887 RI.getSubRegClass(RC, MO.getSubReg()); 3888 RC = RI.getCompatibleSubRegClass(RC, SubRC, MO.getSubReg()); 3889 if (RC) 3890 RC = SubRC; 3891 } 3892 3893 // Check that this is the aligned version of the class. 3894 if (!RC || !RI.isProperlyAlignedRC(*RC)) { 3895 ErrInfo = "Subtarget requires even aligned vector registers"; 3896 return false; 3897 } 3898 } 3899 3900 if (RegClass != -1) { 3901 if (Reg.isVirtual()) 3902 continue; 3903 3904 const TargetRegisterClass *RC = RI.getRegClass(RegClass); 3905 if (!RC->contains(Reg)) { 3906 ErrInfo = "Operand has incorrect register class."; 3907 return false; 3908 } 3909 } 3910 } 3911 3912 // Verify SDWA 3913 if (isSDWA(MI)) { 3914 if (!ST.hasSDWA()) { 3915 ErrInfo = "SDWA is not supported on this target"; 3916 return false; 3917 } 3918 3919 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst); 3920 3921 const int OpIndicies[] = { DstIdx, Src0Idx, Src1Idx, Src2Idx }; 3922 3923 for (int OpIdx: OpIndicies) { 3924 if (OpIdx == -1) 3925 continue; 3926 const MachineOperand &MO = MI.getOperand(OpIdx); 3927 3928 if (!ST.hasSDWAScalar()) { 3929 // Only VGPRS on VI 3930 if (!MO.isReg() || !RI.hasVGPRs(RI.getRegClassForReg(MRI, MO.getReg()))) { 3931 ErrInfo = "Only VGPRs allowed as operands in SDWA instructions on VI"; 3932 return false; 3933 } 3934 } else { 3935 // No immediates on GFX9 3936 if (!MO.isReg()) { 3937 ErrInfo = 3938 "Only reg allowed as operands in SDWA instructions on GFX9+"; 3939 return false; 3940 } 3941 } 3942 } 3943 3944 if (!ST.hasSDWAOmod()) { 3945 // No omod allowed on VI 3946 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 3947 if (OMod != nullptr && 3948 (!OMod->isImm() || OMod->getImm() != 0)) { 3949 ErrInfo = "OMod not allowed in SDWA instructions on VI"; 3950 return false; 3951 } 3952 } 3953 3954 uint16_t BasicOpcode = AMDGPU::getBasicFromSDWAOp(Opcode); 3955 if (isVOPC(BasicOpcode)) { 3956 if (!ST.hasSDWASdst() && DstIdx != -1) { 3957 // Only vcc allowed as dst on VI for VOPC 3958 const MachineOperand &Dst = MI.getOperand(DstIdx); 3959 if (!Dst.isReg() || Dst.getReg() != AMDGPU::VCC) { 3960 ErrInfo = "Only VCC allowed as dst in SDWA instructions on VI"; 3961 return false; 3962 } 3963 } else if (!ST.hasSDWAOutModsVOPC()) { 3964 // No clamp allowed on GFX9 for VOPC 3965 const MachineOperand *Clamp = getNamedOperand(MI, AMDGPU::OpName::clamp); 3966 if (Clamp && (!Clamp->isImm() || Clamp->getImm() != 0)) { 3967 ErrInfo = "Clamp not allowed in VOPC SDWA instructions on VI"; 3968 return false; 3969 } 3970 3971 // No omod allowed on GFX9 for VOPC 3972 const MachineOperand *OMod = getNamedOperand(MI, AMDGPU::OpName::omod); 3973 if (OMod && (!OMod->isImm() || OMod->getImm() != 0)) { 3974 ErrInfo = "OMod not allowed in VOPC SDWA instructions on VI"; 3975 return false; 3976 } 3977 } 3978 } 3979 3980 const MachineOperand *DstUnused = getNamedOperand(MI, AMDGPU::OpName::dst_unused); 3981 if (DstUnused && DstUnused->isImm() && 3982 DstUnused->getImm() == AMDGPU::SDWA::UNUSED_PRESERVE) { 3983 const MachineOperand &Dst = MI.getOperand(DstIdx); 3984 if (!Dst.isReg() || !Dst.isTied()) { 3985 ErrInfo = "Dst register should have tied register"; 3986 return false; 3987 } 3988 3989 const MachineOperand &TiedMO = 3990 MI.getOperand(MI.findTiedOperandIdx(DstIdx)); 3991 if (!TiedMO.isReg() || !TiedMO.isImplicit() || !TiedMO.isUse()) { 3992 ErrInfo = 3993 "Dst register should be tied to implicit use of preserved register"; 3994 return false; 3995 } else if (TiedMO.getReg().isPhysical() && 3996 Dst.getReg() != TiedMO.getReg()) { 3997 ErrInfo = "Dst register should use same physical register as preserved"; 3998 return false; 3999 } 4000 } 4001 } 4002 4003 // Verify MIMG 4004 if (isMIMG(MI.getOpcode()) && !MI.mayStore()) { 4005 // Ensure that the return type used is large enough for all the options 4006 // being used TFE/LWE require an extra result register. 4007 const MachineOperand *DMask = getNamedOperand(MI, AMDGPU::OpName::dmask); 4008 if (DMask) { 4009 uint64_t DMaskImm = DMask->getImm(); 4010 uint32_t RegCount = 4011 isGather4(MI.getOpcode()) ? 4 : countPopulation(DMaskImm); 4012 const MachineOperand *TFE = getNamedOperand(MI, AMDGPU::OpName::tfe); 4013 const MachineOperand *LWE = getNamedOperand(MI, AMDGPU::OpName::lwe); 4014 const MachineOperand *D16 = getNamedOperand(MI, AMDGPU::OpName::d16); 4015 4016 // Adjust for packed 16 bit values 4017 if (D16 && D16->getImm() && !ST.hasUnpackedD16VMem()) 4018 RegCount >>= 1; 4019 4020 // Adjust if using LWE or TFE 4021 if ((LWE && LWE->getImm()) || (TFE && TFE->getImm())) 4022 RegCount += 1; 4023 4024 const uint32_t DstIdx = 4025 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::vdata); 4026 const MachineOperand &Dst = MI.getOperand(DstIdx); 4027 if (Dst.isReg()) { 4028 const TargetRegisterClass *DstRC = getOpRegClass(MI, DstIdx); 4029 uint32_t DstSize = RI.getRegSizeInBits(*DstRC) / 32; 4030 if (RegCount > DstSize) { 4031 ErrInfo = "MIMG instruction returns too many registers for dst " 4032 "register class"; 4033 return false; 4034 } 4035 } 4036 } 4037 } 4038 4039 // Verify VOP*. Ignore multiple sgpr operands on writelane. 4040 if (Desc.getOpcode() != AMDGPU::V_WRITELANE_B32 4041 && (isVOP1(MI) || isVOP2(MI) || isVOP3(MI) || isVOPC(MI) || isSDWA(MI))) { 4042 // Only look at the true operands. Only a real operand can use the constant 4043 // bus, and we don't want to check pseudo-operands like the source modifier 4044 // flags. 4045 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx }; 4046 4047 unsigned ConstantBusCount = 0; 4048 bool UsesLiteral = false; 4049 const MachineOperand *LiteralVal = nullptr; 4050 4051 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) 4052 ++ConstantBusCount; 4053 4054 SmallVector<Register, 2> SGPRsUsed; 4055 Register SGPRUsed; 4056 4057 for (int OpIdx : OpIndices) { 4058 if (OpIdx == -1) 4059 break; 4060 const MachineOperand &MO = MI.getOperand(OpIdx); 4061 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 4062 if (MO.isReg()) { 4063 SGPRUsed = MO.getReg(); 4064 if (llvm::all_of(SGPRsUsed, [SGPRUsed](unsigned SGPR) { 4065 return SGPRUsed != SGPR; 4066 })) { 4067 ++ConstantBusCount; 4068 SGPRsUsed.push_back(SGPRUsed); 4069 } 4070 } else { 4071 if (!UsesLiteral) { 4072 ++ConstantBusCount; 4073 UsesLiteral = true; 4074 LiteralVal = &MO; 4075 } else if (!MO.isIdenticalTo(*LiteralVal)) { 4076 assert(isVOP3(MI)); 4077 ErrInfo = "VOP3 instruction uses more than one literal"; 4078 return false; 4079 } 4080 } 4081 } 4082 } 4083 4084 SGPRUsed = findImplicitSGPRRead(MI); 4085 if (SGPRUsed != AMDGPU::NoRegister) { 4086 // Implicit uses may safely overlap true overands 4087 if (llvm::all_of(SGPRsUsed, [this, SGPRUsed](unsigned SGPR) { 4088 return !RI.regsOverlap(SGPRUsed, SGPR); 4089 })) { 4090 ++ConstantBusCount; 4091 SGPRsUsed.push_back(SGPRUsed); 4092 } 4093 } 4094 4095 // v_writelane_b32 is an exception from constant bus restriction: 4096 // vsrc0 can be sgpr, const or m0 and lane select sgpr, m0 or inline-const 4097 if (ConstantBusCount > ST.getConstantBusLimit(Opcode) && 4098 Opcode != AMDGPU::V_WRITELANE_B32) { 4099 ErrInfo = "VOP* instruction violates constant bus restriction"; 4100 return false; 4101 } 4102 4103 if (isVOP3(MI) && UsesLiteral && !ST.hasVOP3Literal()) { 4104 ErrInfo = "VOP3 instruction uses literal"; 4105 return false; 4106 } 4107 } 4108 4109 // Special case for writelane - this can break the multiple constant bus rule, 4110 // but still can't use more than one SGPR register 4111 if (Desc.getOpcode() == AMDGPU::V_WRITELANE_B32) { 4112 unsigned SGPRCount = 0; 4113 Register SGPRUsed = AMDGPU::NoRegister; 4114 4115 for (int OpIdx : {Src0Idx, Src1Idx, Src2Idx}) { 4116 if (OpIdx == -1) 4117 break; 4118 4119 const MachineOperand &MO = MI.getOperand(OpIdx); 4120 4121 if (usesConstantBus(MRI, MO, MI.getDesc().OpInfo[OpIdx])) { 4122 if (MO.isReg() && MO.getReg() != AMDGPU::M0) { 4123 if (MO.getReg() != SGPRUsed) 4124 ++SGPRCount; 4125 SGPRUsed = MO.getReg(); 4126 } 4127 } 4128 if (SGPRCount > ST.getConstantBusLimit(Opcode)) { 4129 ErrInfo = "WRITELANE instruction violates constant bus restriction"; 4130 return false; 4131 } 4132 } 4133 } 4134 4135 // Verify misc. restrictions on specific instructions. 4136 if (Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F32_e64 || 4137 Desc.getOpcode() == AMDGPU::V_DIV_SCALE_F64_e64) { 4138 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 4139 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 4140 const MachineOperand &Src2 = MI.getOperand(Src2Idx); 4141 if (Src0.isReg() && Src1.isReg() && Src2.isReg()) { 4142 if (!compareMachineOp(Src0, Src1) && 4143 !compareMachineOp(Src0, Src2)) { 4144 ErrInfo = "v_div_scale_{f32|f64} require src0 = src1 or src2"; 4145 return false; 4146 } 4147 } 4148 if ((getNamedOperand(MI, AMDGPU::OpName::src0_modifiers)->getImm() & 4149 SISrcMods::ABS) || 4150 (getNamedOperand(MI, AMDGPU::OpName::src1_modifiers)->getImm() & 4151 SISrcMods::ABS) || 4152 (getNamedOperand(MI, AMDGPU::OpName::src2_modifiers)->getImm() & 4153 SISrcMods::ABS)) { 4154 ErrInfo = "ABS not allowed in VOP3B instructions"; 4155 return false; 4156 } 4157 } 4158 4159 if (isSOP2(MI) || isSOPC(MI)) { 4160 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 4161 const MachineOperand &Src1 = MI.getOperand(Src1Idx); 4162 unsigned Immediates = 0; 4163 4164 if (!Src0.isReg() && 4165 !isInlineConstant(Src0, Desc.OpInfo[Src0Idx].OperandType)) 4166 Immediates++; 4167 if (!Src1.isReg() && 4168 !isInlineConstant(Src1, Desc.OpInfo[Src1Idx].OperandType)) 4169 Immediates++; 4170 4171 if (Immediates > 1) { 4172 ErrInfo = "SOP2/SOPC instruction requires too many immediate constants"; 4173 return false; 4174 } 4175 } 4176 4177 if (isSOPK(MI)) { 4178 auto Op = getNamedOperand(MI, AMDGPU::OpName::simm16); 4179 if (Desc.isBranch()) { 4180 if (!Op->isMBB()) { 4181 ErrInfo = "invalid branch target for SOPK instruction"; 4182 return false; 4183 } 4184 } else { 4185 uint64_t Imm = Op->getImm(); 4186 if (sopkIsZext(MI)) { 4187 if (!isUInt<16>(Imm)) { 4188 ErrInfo = "invalid immediate for SOPK instruction"; 4189 return false; 4190 } 4191 } else { 4192 if (!isInt<16>(Imm)) { 4193 ErrInfo = "invalid immediate for SOPK instruction"; 4194 return false; 4195 } 4196 } 4197 } 4198 } 4199 4200 if (Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e32 || 4201 Desc.getOpcode() == AMDGPU::V_MOVRELS_B32_e64 || 4202 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 4203 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64) { 4204 const bool IsDst = Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e32 || 4205 Desc.getOpcode() == AMDGPU::V_MOVRELD_B32_e64; 4206 4207 const unsigned StaticNumOps = Desc.getNumOperands() + 4208 Desc.getNumImplicitUses(); 4209 const unsigned NumImplicitOps = IsDst ? 2 : 1; 4210 4211 // Allow additional implicit operands. This allows a fixup done by the post 4212 // RA scheduler where the main implicit operand is killed and implicit-defs 4213 // are added for sub-registers that remain live after this instruction. 4214 if (MI.getNumOperands() < StaticNumOps + NumImplicitOps) { 4215 ErrInfo = "missing implicit register operands"; 4216 return false; 4217 } 4218 4219 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 4220 if (IsDst) { 4221 if (!Dst->isUse()) { 4222 ErrInfo = "v_movreld_b32 vdst should be a use operand"; 4223 return false; 4224 } 4225 4226 unsigned UseOpIdx; 4227 if (!MI.isRegTiedToUseOperand(StaticNumOps, &UseOpIdx) || 4228 UseOpIdx != StaticNumOps + 1) { 4229 ErrInfo = "movrel implicit operands should be tied"; 4230 return false; 4231 } 4232 } 4233 4234 const MachineOperand &Src0 = MI.getOperand(Src0Idx); 4235 const MachineOperand &ImpUse 4236 = MI.getOperand(StaticNumOps + NumImplicitOps - 1); 4237 if (!ImpUse.isReg() || !ImpUse.isUse() || 4238 !isSubRegOf(RI, ImpUse, IsDst ? *Dst : Src0)) { 4239 ErrInfo = "src0 should be subreg of implicit vector use"; 4240 return false; 4241 } 4242 } 4243 4244 // Make sure we aren't losing exec uses in the td files. This mostly requires 4245 // being careful when using let Uses to try to add other use registers. 4246 if (shouldReadExec(MI)) { 4247 if (!MI.hasRegisterImplicitUseOperand(AMDGPU::EXEC)) { 4248 ErrInfo = "VALU instruction does not implicitly read exec mask"; 4249 return false; 4250 } 4251 } 4252 4253 if (isSMRD(MI)) { 4254 if (MI.mayStore()) { 4255 // The register offset form of scalar stores may only use m0 as the 4256 // soffset register. 4257 const MachineOperand *Soff = getNamedOperand(MI, AMDGPU::OpName::soff); 4258 if (Soff && Soff->getReg() != AMDGPU::M0) { 4259 ErrInfo = "scalar stores must use m0 as offset register"; 4260 return false; 4261 } 4262 } 4263 } 4264 4265 if (isFLAT(MI) && !ST.hasFlatInstOffsets()) { 4266 const MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 4267 if (Offset->getImm() != 0) { 4268 ErrInfo = "subtarget does not support offsets in flat instructions"; 4269 return false; 4270 } 4271 } 4272 4273 if (isMIMG(MI)) { 4274 const MachineOperand *DimOp = getNamedOperand(MI, AMDGPU::OpName::dim); 4275 if (DimOp) { 4276 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opcode, 4277 AMDGPU::OpName::vaddr0); 4278 int SRsrcIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::srsrc); 4279 const AMDGPU::MIMGInfo *Info = AMDGPU::getMIMGInfo(Opcode); 4280 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = 4281 AMDGPU::getMIMGBaseOpcodeInfo(Info->BaseOpcode); 4282 const AMDGPU::MIMGDimInfo *Dim = 4283 AMDGPU::getMIMGDimInfoByEncoding(DimOp->getImm()); 4284 4285 if (!Dim) { 4286 ErrInfo = "dim is out of range"; 4287 return false; 4288 } 4289 4290 bool IsA16 = false; 4291 if (ST.hasR128A16()) { 4292 const MachineOperand *R128A16 = getNamedOperand(MI, AMDGPU::OpName::r128); 4293 IsA16 = R128A16->getImm() != 0; 4294 } else if (ST.hasGFX10A16()) { 4295 const MachineOperand *A16 = getNamedOperand(MI, AMDGPU::OpName::a16); 4296 IsA16 = A16->getImm() != 0; 4297 } 4298 4299 bool IsNSA = SRsrcIdx - VAddr0Idx > 1; 4300 4301 unsigned AddrWords = 4302 AMDGPU::getAddrSizeMIMGOp(BaseOpcode, Dim, IsA16, ST.hasG16()); 4303 4304 unsigned VAddrWords; 4305 if (IsNSA) { 4306 VAddrWords = SRsrcIdx - VAddr0Idx; 4307 } else { 4308 const TargetRegisterClass *RC = getOpRegClass(MI, VAddr0Idx); 4309 VAddrWords = MRI.getTargetRegisterInfo()->getRegSizeInBits(*RC) / 32; 4310 if (AddrWords > 8) 4311 AddrWords = 16; 4312 } 4313 4314 if (VAddrWords != AddrWords) { 4315 LLVM_DEBUG(dbgs() << "bad vaddr size, expected " << AddrWords 4316 << " but got " << VAddrWords << "\n"); 4317 ErrInfo = "bad vaddr size"; 4318 return false; 4319 } 4320 } 4321 } 4322 4323 const MachineOperand *DppCt = getNamedOperand(MI, AMDGPU::OpName::dpp_ctrl); 4324 if (DppCt) { 4325 using namespace AMDGPU::DPP; 4326 4327 unsigned DC = DppCt->getImm(); 4328 if (DC == DppCtrl::DPP_UNUSED1 || DC == DppCtrl::DPP_UNUSED2 || 4329 DC == DppCtrl::DPP_UNUSED3 || DC > DppCtrl::DPP_LAST || 4330 (DC >= DppCtrl::DPP_UNUSED4_FIRST && DC <= DppCtrl::DPP_UNUSED4_LAST) || 4331 (DC >= DppCtrl::DPP_UNUSED5_FIRST && DC <= DppCtrl::DPP_UNUSED5_LAST) || 4332 (DC >= DppCtrl::DPP_UNUSED6_FIRST && DC <= DppCtrl::DPP_UNUSED6_LAST) || 4333 (DC >= DppCtrl::DPP_UNUSED7_FIRST && DC <= DppCtrl::DPP_UNUSED7_LAST) || 4334 (DC >= DppCtrl::DPP_UNUSED8_FIRST && DC <= DppCtrl::DPP_UNUSED8_LAST)) { 4335 ErrInfo = "Invalid dpp_ctrl value"; 4336 return false; 4337 } 4338 if (DC >= DppCtrl::WAVE_SHL1 && DC <= DppCtrl::WAVE_ROR1 && 4339 ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 4340 ErrInfo = "Invalid dpp_ctrl value: " 4341 "wavefront shifts are not supported on GFX10+"; 4342 return false; 4343 } 4344 if (DC >= DppCtrl::BCAST15 && DC <= DppCtrl::BCAST31 && 4345 ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 4346 ErrInfo = "Invalid dpp_ctrl value: " 4347 "broadcasts are not supported on GFX10+"; 4348 return false; 4349 } 4350 if (DC >= DppCtrl::ROW_SHARE_FIRST && DC <= DppCtrl::ROW_XMASK_LAST && 4351 ST.getGeneration() < AMDGPUSubtarget::GFX10) { 4352 if (DC >= DppCtrl::ROW_NEWBCAST_FIRST && 4353 DC <= DppCtrl::ROW_NEWBCAST_LAST && 4354 !ST.hasGFX90AInsts()) { 4355 ErrInfo = "Invalid dpp_ctrl value: " 4356 "row_newbroadcast/row_share is not supported before " 4357 "GFX90A/GFX10"; 4358 return false; 4359 } else if (DC > DppCtrl::ROW_NEWBCAST_LAST || !ST.hasGFX90AInsts()) { 4360 ErrInfo = "Invalid dpp_ctrl value: " 4361 "row_share and row_xmask are not supported before GFX10"; 4362 return false; 4363 } 4364 } 4365 4366 int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst); 4367 int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); 4368 4369 if (Opcode != AMDGPU::V_MOV_B64_DPP_PSEUDO && 4370 ((DstIdx >= 0 && 4371 (Desc.OpInfo[DstIdx].RegClass == AMDGPU::VReg_64RegClassID || 4372 Desc.OpInfo[DstIdx].RegClass == AMDGPU::VReg_64_Align2RegClassID)) || 4373 ((Src0Idx >= 0 && 4374 (Desc.OpInfo[Src0Idx].RegClass == AMDGPU::VReg_64RegClassID || 4375 Desc.OpInfo[Src0Idx].RegClass == 4376 AMDGPU::VReg_64_Align2RegClassID)))) && 4377 !AMDGPU::isLegal64BitDPPControl(DC)) { 4378 ErrInfo = "Invalid dpp_ctrl value: " 4379 "64 bit dpp only support row_newbcast"; 4380 return false; 4381 } 4382 } 4383 4384 if ((MI.mayStore() || MI.mayLoad()) && !isVGPRSpill(MI)) { 4385 const MachineOperand *Dst = getNamedOperand(MI, AMDGPU::OpName::vdst); 4386 uint16_t DataNameIdx = isDS(Opcode) ? AMDGPU::OpName::data0 4387 : AMDGPU::OpName::vdata; 4388 const MachineOperand *Data = getNamedOperand(MI, DataNameIdx); 4389 const MachineOperand *Data2 = getNamedOperand(MI, AMDGPU::OpName::data1); 4390 if (Data && !Data->isReg()) 4391 Data = nullptr; 4392 4393 if (ST.hasGFX90AInsts()) { 4394 if (Dst && Data && 4395 (RI.isAGPR(MRI, Dst->getReg()) != RI.isAGPR(MRI, Data->getReg()))) { 4396 ErrInfo = "Invalid register class: " 4397 "vdata and vdst should be both VGPR or AGPR"; 4398 return false; 4399 } 4400 if (Data && Data2 && 4401 (RI.isAGPR(MRI, Data->getReg()) != RI.isAGPR(MRI, Data2->getReg()))) { 4402 ErrInfo = "Invalid register class: " 4403 "both data operands should be VGPR or AGPR"; 4404 return false; 4405 } 4406 } else { 4407 if ((Dst && RI.isAGPR(MRI, Dst->getReg())) || 4408 (Data && RI.isAGPR(MRI, Data->getReg())) || 4409 (Data2 && RI.isAGPR(MRI, Data2->getReg()))) { 4410 ErrInfo = "Invalid register class: " 4411 "agpr loads and stores not supported on this GPU"; 4412 return false; 4413 } 4414 } 4415 } 4416 4417 if (ST.needsAlignedVGPRs() && 4418 (MI.getOpcode() == AMDGPU::DS_GWS_INIT || 4419 MI.getOpcode() == AMDGPU::DS_GWS_SEMA_BR || 4420 MI.getOpcode() == AMDGPU::DS_GWS_BARRIER)) { 4421 const MachineOperand *Op = getNamedOperand(MI, AMDGPU::OpName::data0); 4422 Register Reg = Op->getReg(); 4423 bool Aligned = true; 4424 if (Reg.isPhysical()) { 4425 Aligned = !(RI.getHWRegIndex(Reg) & 1); 4426 } else { 4427 const TargetRegisterClass &RC = *MRI.getRegClass(Reg); 4428 Aligned = RI.getRegSizeInBits(RC) > 32 && RI.isProperlyAlignedRC(RC) && 4429 !(RI.getChannelFromSubReg(Op->getSubReg()) & 1); 4430 } 4431 4432 if (!Aligned) { 4433 ErrInfo = "Subtarget requires even aligned vector registers " 4434 "for DS_GWS instructions"; 4435 return false; 4436 } 4437 } 4438 4439 return true; 4440 } 4441 4442 unsigned SIInstrInfo::getVALUOp(const MachineInstr &MI) const { 4443 switch (MI.getOpcode()) { 4444 default: return AMDGPU::INSTRUCTION_LIST_END; 4445 case AMDGPU::REG_SEQUENCE: return AMDGPU::REG_SEQUENCE; 4446 case AMDGPU::COPY: return AMDGPU::COPY; 4447 case AMDGPU::PHI: return AMDGPU::PHI; 4448 case AMDGPU::INSERT_SUBREG: return AMDGPU::INSERT_SUBREG; 4449 case AMDGPU::WQM: return AMDGPU::WQM; 4450 case AMDGPU::SOFT_WQM: return AMDGPU::SOFT_WQM; 4451 case AMDGPU::STRICT_WWM: return AMDGPU::STRICT_WWM; 4452 case AMDGPU::STRICT_WQM: return AMDGPU::STRICT_WQM; 4453 case AMDGPU::S_MOV_B32: { 4454 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 4455 return MI.getOperand(1).isReg() || 4456 RI.isAGPR(MRI, MI.getOperand(0).getReg()) ? 4457 AMDGPU::COPY : AMDGPU::V_MOV_B32_e32; 4458 } 4459 case AMDGPU::S_ADD_I32: 4460 return ST.hasAddNoCarry() ? AMDGPU::V_ADD_U32_e64 : AMDGPU::V_ADD_CO_U32_e32; 4461 case AMDGPU::S_ADDC_U32: 4462 return AMDGPU::V_ADDC_U32_e32; 4463 case AMDGPU::S_SUB_I32: 4464 return ST.hasAddNoCarry() ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_SUB_CO_U32_e32; 4465 // FIXME: These are not consistently handled, and selected when the carry is 4466 // used. 4467 case AMDGPU::S_ADD_U32: 4468 return AMDGPU::V_ADD_CO_U32_e32; 4469 case AMDGPU::S_SUB_U32: 4470 return AMDGPU::V_SUB_CO_U32_e32; 4471 case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32; 4472 case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_U32_e64; 4473 case AMDGPU::S_MUL_HI_U32: return AMDGPU::V_MUL_HI_U32_e64; 4474 case AMDGPU::S_MUL_HI_I32: return AMDGPU::V_MUL_HI_I32_e64; 4475 case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e64; 4476 case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e64; 4477 case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e64; 4478 case AMDGPU::S_XNOR_B32: 4479 return ST.hasDLInsts() ? AMDGPU::V_XNOR_B32_e64 : AMDGPU::INSTRUCTION_LIST_END; 4480 case AMDGPU::S_MIN_I32: return AMDGPU::V_MIN_I32_e64; 4481 case AMDGPU::S_MIN_U32: return AMDGPU::V_MIN_U32_e64; 4482 case AMDGPU::S_MAX_I32: return AMDGPU::V_MAX_I32_e64; 4483 case AMDGPU::S_MAX_U32: return AMDGPU::V_MAX_U32_e64; 4484 case AMDGPU::S_ASHR_I32: return AMDGPU::V_ASHR_I32_e32; 4485 case AMDGPU::S_ASHR_I64: return AMDGPU::V_ASHR_I64_e64; 4486 case AMDGPU::S_LSHL_B32: return AMDGPU::V_LSHL_B32_e32; 4487 case AMDGPU::S_LSHL_B64: return AMDGPU::V_LSHL_B64_e64; 4488 case AMDGPU::S_LSHR_B32: return AMDGPU::V_LSHR_B32_e32; 4489 case AMDGPU::S_LSHR_B64: return AMDGPU::V_LSHR_B64_e64; 4490 case AMDGPU::S_SEXT_I32_I8: return AMDGPU::V_BFE_I32_e64; 4491 case AMDGPU::S_SEXT_I32_I16: return AMDGPU::V_BFE_I32_e64; 4492 case AMDGPU::S_BFE_U32: return AMDGPU::V_BFE_U32_e64; 4493 case AMDGPU::S_BFE_I32: return AMDGPU::V_BFE_I32_e64; 4494 case AMDGPU::S_BFM_B32: return AMDGPU::V_BFM_B32_e64; 4495 case AMDGPU::S_BREV_B32: return AMDGPU::V_BFREV_B32_e32; 4496 case AMDGPU::S_NOT_B32: return AMDGPU::V_NOT_B32_e32; 4497 case AMDGPU::S_NOT_B64: return AMDGPU::V_NOT_B32_e32; 4498 case AMDGPU::S_CMP_EQ_I32: return AMDGPU::V_CMP_EQ_I32_e64; 4499 case AMDGPU::S_CMP_LG_I32: return AMDGPU::V_CMP_NE_I32_e64; 4500 case AMDGPU::S_CMP_GT_I32: return AMDGPU::V_CMP_GT_I32_e64; 4501 case AMDGPU::S_CMP_GE_I32: return AMDGPU::V_CMP_GE_I32_e64; 4502 case AMDGPU::S_CMP_LT_I32: return AMDGPU::V_CMP_LT_I32_e64; 4503 case AMDGPU::S_CMP_LE_I32: return AMDGPU::V_CMP_LE_I32_e64; 4504 case AMDGPU::S_CMP_EQ_U32: return AMDGPU::V_CMP_EQ_U32_e64; 4505 case AMDGPU::S_CMP_LG_U32: return AMDGPU::V_CMP_NE_U32_e64; 4506 case AMDGPU::S_CMP_GT_U32: return AMDGPU::V_CMP_GT_U32_e64; 4507 case AMDGPU::S_CMP_GE_U32: return AMDGPU::V_CMP_GE_U32_e64; 4508 case AMDGPU::S_CMP_LT_U32: return AMDGPU::V_CMP_LT_U32_e64; 4509 case AMDGPU::S_CMP_LE_U32: return AMDGPU::V_CMP_LE_U32_e64; 4510 case AMDGPU::S_CMP_EQ_U64: return AMDGPU::V_CMP_EQ_U64_e64; 4511 case AMDGPU::S_CMP_LG_U64: return AMDGPU::V_CMP_NE_U64_e64; 4512 case AMDGPU::S_BCNT1_I32_B32: return AMDGPU::V_BCNT_U32_B32_e64; 4513 case AMDGPU::S_FF1_I32_B32: return AMDGPU::V_FFBL_B32_e32; 4514 case AMDGPU::S_FLBIT_I32_B32: return AMDGPU::V_FFBH_U32_e32; 4515 case AMDGPU::S_FLBIT_I32: return AMDGPU::V_FFBH_I32_e64; 4516 case AMDGPU::S_CBRANCH_SCC0: return AMDGPU::S_CBRANCH_VCCZ; 4517 case AMDGPU::S_CBRANCH_SCC1: return AMDGPU::S_CBRANCH_VCCNZ; 4518 } 4519 llvm_unreachable( 4520 "Unexpected scalar opcode without corresponding vector one!"); 4521 } 4522 4523 static unsigned adjustAllocatableRegClass(const GCNSubtarget &ST, 4524 const MachineRegisterInfo &MRI, 4525 const MCInstrDesc &TID, 4526 unsigned RCID, 4527 bool IsAllocatable) { 4528 if ((IsAllocatable || !ST.hasGFX90AInsts() || !MRI.reservedRegsFrozen()) && 4529 (TID.mayLoad() || TID.mayStore() || 4530 (TID.TSFlags & (SIInstrFlags::DS | SIInstrFlags::MIMG)))) { 4531 switch (RCID) { 4532 case AMDGPU::AV_32RegClassID: return AMDGPU::VGPR_32RegClassID; 4533 case AMDGPU::AV_64RegClassID: return AMDGPU::VReg_64RegClassID; 4534 case AMDGPU::AV_96RegClassID: return AMDGPU::VReg_96RegClassID; 4535 case AMDGPU::AV_128RegClassID: return AMDGPU::VReg_128RegClassID; 4536 case AMDGPU::AV_160RegClassID: return AMDGPU::VReg_160RegClassID; 4537 default: 4538 break; 4539 } 4540 } 4541 return RCID; 4542 } 4543 4544 const TargetRegisterClass *SIInstrInfo::getRegClass(const MCInstrDesc &TID, 4545 unsigned OpNum, const TargetRegisterInfo *TRI, 4546 const MachineFunction &MF) 4547 const { 4548 if (OpNum >= TID.getNumOperands()) 4549 return nullptr; 4550 auto RegClass = TID.OpInfo[OpNum].RegClass; 4551 bool IsAllocatable = false; 4552 if (TID.TSFlags & (SIInstrFlags::DS | SIInstrFlags::FLAT)) { 4553 // vdst and vdata should be both VGPR or AGPR, same for the DS instructions 4554 // with two data operands. Request register class constainted to VGPR only 4555 // of both operands present as Machine Copy Propagation can not check this 4556 // constraint and possibly other passes too. 4557 // 4558 // The check is limited to FLAT and DS because atomics in non-flat encoding 4559 // have their vdst and vdata tied to be the same register. 4560 const int VDstIdx = AMDGPU::getNamedOperandIdx(TID.Opcode, 4561 AMDGPU::OpName::vdst); 4562 const int DataIdx = AMDGPU::getNamedOperandIdx(TID.Opcode, 4563 (TID.TSFlags & SIInstrFlags::DS) ? AMDGPU::OpName::data0 4564 : AMDGPU::OpName::vdata); 4565 if (DataIdx != -1) { 4566 IsAllocatable = VDstIdx != -1 || 4567 AMDGPU::getNamedOperandIdx(TID.Opcode, 4568 AMDGPU::OpName::data1) != -1; 4569 } 4570 } 4571 RegClass = adjustAllocatableRegClass(ST, MF.getRegInfo(), TID, RegClass, 4572 IsAllocatable); 4573 return RI.getRegClass(RegClass); 4574 } 4575 4576 const TargetRegisterClass *SIInstrInfo::getOpRegClass(const MachineInstr &MI, 4577 unsigned OpNo) const { 4578 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 4579 const MCInstrDesc &Desc = get(MI.getOpcode()); 4580 if (MI.isVariadic() || OpNo >= Desc.getNumOperands() || 4581 Desc.OpInfo[OpNo].RegClass == -1) { 4582 Register Reg = MI.getOperand(OpNo).getReg(); 4583 4584 if (Reg.isVirtual()) 4585 return MRI.getRegClass(Reg); 4586 return RI.getPhysRegClass(Reg); 4587 } 4588 4589 unsigned RCID = Desc.OpInfo[OpNo].RegClass; 4590 RCID = adjustAllocatableRegClass(ST, MRI, Desc, RCID, true); 4591 return RI.getRegClass(RCID); 4592 } 4593 4594 void SIInstrInfo::legalizeOpWithMove(MachineInstr &MI, unsigned OpIdx) const { 4595 MachineBasicBlock::iterator I = MI; 4596 MachineBasicBlock *MBB = MI.getParent(); 4597 MachineOperand &MO = MI.getOperand(OpIdx); 4598 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 4599 unsigned RCID = get(MI.getOpcode()).OpInfo[OpIdx].RegClass; 4600 const TargetRegisterClass *RC = RI.getRegClass(RCID); 4601 unsigned Size = RI.getRegSizeInBits(*RC); 4602 unsigned Opcode = (Size == 64) ? AMDGPU::V_MOV_B64_PSEUDO : AMDGPU::V_MOV_B32_e32; 4603 if (MO.isReg()) 4604 Opcode = AMDGPU::COPY; 4605 else if (RI.isSGPRClass(RC)) 4606 Opcode = (Size == 64) ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; 4607 4608 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC); 4609 const TargetRegisterClass *VRC64 = RI.getVGPR64Class(); 4610 if (RI.getCommonSubClass(VRC64, VRC)) 4611 VRC = VRC64; 4612 else 4613 VRC = &AMDGPU::VGPR_32RegClass; 4614 4615 Register Reg = MRI.createVirtualRegister(VRC); 4616 DebugLoc DL = MBB->findDebugLoc(I); 4617 BuildMI(*MI.getParent(), I, DL, get(Opcode), Reg).add(MO); 4618 MO.ChangeToRegister(Reg, false); 4619 } 4620 4621 unsigned SIInstrInfo::buildExtractSubReg(MachineBasicBlock::iterator MI, 4622 MachineRegisterInfo &MRI, 4623 MachineOperand &SuperReg, 4624 const TargetRegisterClass *SuperRC, 4625 unsigned SubIdx, 4626 const TargetRegisterClass *SubRC) 4627 const { 4628 MachineBasicBlock *MBB = MI->getParent(); 4629 DebugLoc DL = MI->getDebugLoc(); 4630 Register SubReg = MRI.createVirtualRegister(SubRC); 4631 4632 if (SuperReg.getSubReg() == AMDGPU::NoSubRegister) { 4633 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 4634 .addReg(SuperReg.getReg(), 0, SubIdx); 4635 return SubReg; 4636 } 4637 4638 // Just in case the super register is itself a sub-register, copy it to a new 4639 // value so we don't need to worry about merging its subreg index with the 4640 // SubIdx passed to this function. The register coalescer should be able to 4641 // eliminate this extra copy. 4642 Register NewSuperReg = MRI.createVirtualRegister(SuperRC); 4643 4644 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), NewSuperReg) 4645 .addReg(SuperReg.getReg(), 0, SuperReg.getSubReg()); 4646 4647 BuildMI(*MBB, MI, DL, get(TargetOpcode::COPY), SubReg) 4648 .addReg(NewSuperReg, 0, SubIdx); 4649 4650 return SubReg; 4651 } 4652 4653 MachineOperand SIInstrInfo::buildExtractSubRegOrImm( 4654 MachineBasicBlock::iterator MII, 4655 MachineRegisterInfo &MRI, 4656 MachineOperand &Op, 4657 const TargetRegisterClass *SuperRC, 4658 unsigned SubIdx, 4659 const TargetRegisterClass *SubRC) const { 4660 if (Op.isImm()) { 4661 if (SubIdx == AMDGPU::sub0) 4662 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm())); 4663 if (SubIdx == AMDGPU::sub1) 4664 return MachineOperand::CreateImm(static_cast<int32_t>(Op.getImm() >> 32)); 4665 4666 llvm_unreachable("Unhandled register index for immediate"); 4667 } 4668 4669 unsigned SubReg = buildExtractSubReg(MII, MRI, Op, SuperRC, 4670 SubIdx, SubRC); 4671 return MachineOperand::CreateReg(SubReg, false); 4672 } 4673 4674 // Change the order of operands from (0, 1, 2) to (0, 2, 1) 4675 void SIInstrInfo::swapOperands(MachineInstr &Inst) const { 4676 assert(Inst.getNumExplicitOperands() == 3); 4677 MachineOperand Op1 = Inst.getOperand(1); 4678 Inst.RemoveOperand(1); 4679 Inst.addOperand(Op1); 4680 } 4681 4682 bool SIInstrInfo::isLegalRegOperand(const MachineRegisterInfo &MRI, 4683 const MCOperandInfo &OpInfo, 4684 const MachineOperand &MO) const { 4685 if (!MO.isReg()) 4686 return false; 4687 4688 Register Reg = MO.getReg(); 4689 4690 const TargetRegisterClass *DRC = RI.getRegClass(OpInfo.RegClass); 4691 if (Reg.isPhysical()) 4692 return DRC->contains(Reg); 4693 4694 const TargetRegisterClass *RC = MRI.getRegClass(Reg); 4695 4696 if (MO.getSubReg()) { 4697 const MachineFunction *MF = MO.getParent()->getParent()->getParent(); 4698 const TargetRegisterClass *SuperRC = RI.getLargestLegalSuperClass(RC, *MF); 4699 if (!SuperRC) 4700 return false; 4701 4702 DRC = RI.getMatchingSuperRegClass(SuperRC, DRC, MO.getSubReg()); 4703 if (!DRC) 4704 return false; 4705 } 4706 return RC->hasSuperClassEq(DRC); 4707 } 4708 4709 bool SIInstrInfo::isLegalVSrcOperand(const MachineRegisterInfo &MRI, 4710 const MCOperandInfo &OpInfo, 4711 const MachineOperand &MO) const { 4712 if (MO.isReg()) 4713 return isLegalRegOperand(MRI, OpInfo, MO); 4714 4715 // Handle non-register types that are treated like immediates. 4716 assert(MO.isImm() || MO.isTargetIndex() || MO.isFI() || MO.isGlobal()); 4717 return true; 4718 } 4719 4720 bool SIInstrInfo::isOperandLegal(const MachineInstr &MI, unsigned OpIdx, 4721 const MachineOperand *MO) const { 4722 const MachineFunction &MF = *MI.getParent()->getParent(); 4723 const MachineRegisterInfo &MRI = MF.getRegInfo(); 4724 const MCInstrDesc &InstDesc = MI.getDesc(); 4725 const MCOperandInfo &OpInfo = InstDesc.OpInfo[OpIdx]; 4726 const TargetRegisterClass *DefinedRC = 4727 OpInfo.RegClass != -1 ? RI.getRegClass(OpInfo.RegClass) : nullptr; 4728 if (!MO) 4729 MO = &MI.getOperand(OpIdx); 4730 4731 int ConstantBusLimit = ST.getConstantBusLimit(MI.getOpcode()); 4732 int VOP3LiteralLimit = ST.hasVOP3Literal() ? 1 : 0; 4733 if (isVALU(MI) && usesConstantBus(MRI, *MO, OpInfo)) { 4734 if (isVOP3(MI) && isLiteralConstantLike(*MO, OpInfo) && !VOP3LiteralLimit--) 4735 return false; 4736 4737 SmallDenseSet<RegSubRegPair> SGPRsUsed; 4738 if (MO->isReg()) 4739 SGPRsUsed.insert(RegSubRegPair(MO->getReg(), MO->getSubReg())); 4740 4741 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 4742 if (i == OpIdx) 4743 continue; 4744 const MachineOperand &Op = MI.getOperand(i); 4745 if (Op.isReg()) { 4746 RegSubRegPair SGPR(Op.getReg(), Op.getSubReg()); 4747 if (!SGPRsUsed.count(SGPR) && 4748 usesConstantBus(MRI, Op, InstDesc.OpInfo[i])) { 4749 if (--ConstantBusLimit <= 0) 4750 return false; 4751 SGPRsUsed.insert(SGPR); 4752 } 4753 } else if (InstDesc.OpInfo[i].OperandType == AMDGPU::OPERAND_KIMM32) { 4754 if (--ConstantBusLimit <= 0) 4755 return false; 4756 } else if (isVOP3(MI) && AMDGPU::isSISrcOperand(InstDesc, i) && 4757 isLiteralConstantLike(Op, InstDesc.OpInfo[i])) { 4758 if (!VOP3LiteralLimit--) 4759 return false; 4760 if (--ConstantBusLimit <= 0) 4761 return false; 4762 } 4763 } 4764 } 4765 4766 if (MO->isReg()) { 4767 assert(DefinedRC); 4768 if (!isLegalRegOperand(MRI, OpInfo, *MO)) 4769 return false; 4770 bool IsAGPR = RI.isAGPR(MRI, MO->getReg()); 4771 if (IsAGPR && !ST.hasMAIInsts()) 4772 return false; 4773 unsigned Opc = MI.getOpcode(); 4774 if (IsAGPR && 4775 (!ST.hasGFX90AInsts() || !MRI.reservedRegsFrozen()) && 4776 (MI.mayLoad() || MI.mayStore() || isDS(Opc) || isMIMG(Opc))) 4777 return false; 4778 // Atomics should have both vdst and vdata either vgpr or agpr. 4779 const int VDstIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst); 4780 const int DataIdx = AMDGPU::getNamedOperandIdx(Opc, 4781 isDS(Opc) ? AMDGPU::OpName::data0 : AMDGPU::OpName::vdata); 4782 if ((int)OpIdx == VDstIdx && DataIdx != -1 && 4783 MI.getOperand(DataIdx).isReg() && 4784 RI.isAGPR(MRI, MI.getOperand(DataIdx).getReg()) != IsAGPR) 4785 return false; 4786 if ((int)OpIdx == DataIdx) { 4787 if (VDstIdx != -1 && 4788 RI.isAGPR(MRI, MI.getOperand(VDstIdx).getReg()) != IsAGPR) 4789 return false; 4790 // DS instructions with 2 src operands also must have tied RC. 4791 const int Data1Idx = AMDGPU::getNamedOperandIdx(Opc, 4792 AMDGPU::OpName::data1); 4793 if (Data1Idx != -1 && MI.getOperand(Data1Idx).isReg() && 4794 RI.isAGPR(MRI, MI.getOperand(Data1Idx).getReg()) != IsAGPR) 4795 return false; 4796 } 4797 if (Opc == AMDGPU::V_ACCVGPR_WRITE_B32_e64 && 4798 (int)OpIdx == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0) && 4799 RI.isSGPRReg(MRI, MO->getReg())) 4800 return false; 4801 return true; 4802 } 4803 4804 // Handle non-register types that are treated like immediates. 4805 assert(MO->isImm() || MO->isTargetIndex() || MO->isFI() || MO->isGlobal()); 4806 4807 if (!DefinedRC) { 4808 // This operand expects an immediate. 4809 return true; 4810 } 4811 4812 return isImmOperandLegal(MI, OpIdx, *MO); 4813 } 4814 4815 void SIInstrInfo::legalizeOperandsVOP2(MachineRegisterInfo &MRI, 4816 MachineInstr &MI) const { 4817 unsigned Opc = MI.getOpcode(); 4818 const MCInstrDesc &InstrDesc = get(Opc); 4819 4820 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 4821 MachineOperand &Src0 = MI.getOperand(Src0Idx); 4822 4823 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 4824 MachineOperand &Src1 = MI.getOperand(Src1Idx); 4825 4826 // If there is an implicit SGPR use such as VCC use for v_addc_u32/v_subb_u32 4827 // we need to only have one constant bus use before GFX10. 4828 bool HasImplicitSGPR = findImplicitSGPRRead(MI) != AMDGPU::NoRegister; 4829 if (HasImplicitSGPR && ST.getConstantBusLimit(Opc) <= 1 && 4830 Src0.isReg() && (RI.isSGPRReg(MRI, Src0.getReg()) || 4831 isLiteralConstantLike(Src0, InstrDesc.OpInfo[Src0Idx]))) 4832 legalizeOpWithMove(MI, Src0Idx); 4833 4834 // Special case: V_WRITELANE_B32 accepts only immediate or SGPR operands for 4835 // both the value to write (src0) and lane select (src1). Fix up non-SGPR 4836 // src0/src1 with V_READFIRSTLANE. 4837 if (Opc == AMDGPU::V_WRITELANE_B32) { 4838 const DebugLoc &DL = MI.getDebugLoc(); 4839 if (Src0.isReg() && RI.isVGPR(MRI, Src0.getReg())) { 4840 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4841 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4842 .add(Src0); 4843 Src0.ChangeToRegister(Reg, false); 4844 } 4845 if (Src1.isReg() && RI.isVGPR(MRI, Src1.getReg())) { 4846 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4847 const DebugLoc &DL = MI.getDebugLoc(); 4848 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4849 .add(Src1); 4850 Src1.ChangeToRegister(Reg, false); 4851 } 4852 return; 4853 } 4854 4855 // No VOP2 instructions support AGPRs. 4856 if (Src0.isReg() && RI.isAGPR(MRI, Src0.getReg())) 4857 legalizeOpWithMove(MI, Src0Idx); 4858 4859 if (Src1.isReg() && RI.isAGPR(MRI, Src1.getReg())) 4860 legalizeOpWithMove(MI, Src1Idx); 4861 4862 // VOP2 src0 instructions support all operand types, so we don't need to check 4863 // their legality. If src1 is already legal, we don't need to do anything. 4864 if (isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src1)) 4865 return; 4866 4867 // Special case: V_READLANE_B32 accepts only immediate or SGPR operands for 4868 // lane select. Fix up using V_READFIRSTLANE, since we assume that the lane 4869 // select is uniform. 4870 if (Opc == AMDGPU::V_READLANE_B32 && Src1.isReg() && 4871 RI.isVGPR(MRI, Src1.getReg())) { 4872 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4873 const DebugLoc &DL = MI.getDebugLoc(); 4874 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4875 .add(Src1); 4876 Src1.ChangeToRegister(Reg, false); 4877 return; 4878 } 4879 4880 // We do not use commuteInstruction here because it is too aggressive and will 4881 // commute if it is possible. We only want to commute here if it improves 4882 // legality. This can be called a fairly large number of times so don't waste 4883 // compile time pointlessly swapping and checking legality again. 4884 if (HasImplicitSGPR || !MI.isCommutable()) { 4885 legalizeOpWithMove(MI, Src1Idx); 4886 return; 4887 } 4888 4889 // If src0 can be used as src1, commuting will make the operands legal. 4890 // Otherwise we have to give up and insert a move. 4891 // 4892 // TODO: Other immediate-like operand kinds could be commuted if there was a 4893 // MachineOperand::ChangeTo* for them. 4894 if ((!Src1.isImm() && !Src1.isReg()) || 4895 !isLegalRegOperand(MRI, InstrDesc.OpInfo[Src1Idx], Src0)) { 4896 legalizeOpWithMove(MI, Src1Idx); 4897 return; 4898 } 4899 4900 int CommutedOpc = commuteOpcode(MI); 4901 if (CommutedOpc == -1) { 4902 legalizeOpWithMove(MI, Src1Idx); 4903 return; 4904 } 4905 4906 MI.setDesc(get(CommutedOpc)); 4907 4908 Register Src0Reg = Src0.getReg(); 4909 unsigned Src0SubReg = Src0.getSubReg(); 4910 bool Src0Kill = Src0.isKill(); 4911 4912 if (Src1.isImm()) 4913 Src0.ChangeToImmediate(Src1.getImm()); 4914 else if (Src1.isReg()) { 4915 Src0.ChangeToRegister(Src1.getReg(), false, false, Src1.isKill()); 4916 Src0.setSubReg(Src1.getSubReg()); 4917 } else 4918 llvm_unreachable("Should only have register or immediate operands"); 4919 4920 Src1.ChangeToRegister(Src0Reg, false, false, Src0Kill); 4921 Src1.setSubReg(Src0SubReg); 4922 fixImplicitOperands(MI); 4923 } 4924 4925 // Legalize VOP3 operands. All operand types are supported for any operand 4926 // but only one literal constant and only starting from GFX10. 4927 void SIInstrInfo::legalizeOperandsVOP3(MachineRegisterInfo &MRI, 4928 MachineInstr &MI) const { 4929 unsigned Opc = MI.getOpcode(); 4930 4931 int VOP3Idx[3] = { 4932 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0), 4933 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1), 4934 AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2) 4935 }; 4936 4937 if (Opc == AMDGPU::V_PERMLANE16_B32_e64 || 4938 Opc == AMDGPU::V_PERMLANEX16_B32_e64) { 4939 // src1 and src2 must be scalar 4940 MachineOperand &Src1 = MI.getOperand(VOP3Idx[1]); 4941 MachineOperand &Src2 = MI.getOperand(VOP3Idx[2]); 4942 const DebugLoc &DL = MI.getDebugLoc(); 4943 if (Src1.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src1.getReg()))) { 4944 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4945 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4946 .add(Src1); 4947 Src1.ChangeToRegister(Reg, false); 4948 } 4949 if (Src2.isReg() && !RI.isSGPRClass(MRI.getRegClass(Src2.getReg()))) { 4950 Register Reg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 4951 BuildMI(*MI.getParent(), MI, DL, get(AMDGPU::V_READFIRSTLANE_B32), Reg) 4952 .add(Src2); 4953 Src2.ChangeToRegister(Reg, false); 4954 } 4955 } 4956 4957 // Find the one SGPR operand we are allowed to use. 4958 int ConstantBusLimit = ST.getConstantBusLimit(Opc); 4959 int LiteralLimit = ST.hasVOP3Literal() ? 1 : 0; 4960 SmallDenseSet<unsigned> SGPRsUsed; 4961 Register SGPRReg = findUsedSGPR(MI, VOP3Idx); 4962 if (SGPRReg != AMDGPU::NoRegister) { 4963 SGPRsUsed.insert(SGPRReg); 4964 --ConstantBusLimit; 4965 } 4966 4967 for (unsigned i = 0; i < 3; ++i) { 4968 int Idx = VOP3Idx[i]; 4969 if (Idx == -1) 4970 break; 4971 MachineOperand &MO = MI.getOperand(Idx); 4972 4973 if (!MO.isReg()) { 4974 if (!isLiteralConstantLike(MO, get(Opc).OpInfo[Idx])) 4975 continue; 4976 4977 if (LiteralLimit > 0 && ConstantBusLimit > 0) { 4978 --LiteralLimit; 4979 --ConstantBusLimit; 4980 continue; 4981 } 4982 4983 --LiteralLimit; 4984 --ConstantBusLimit; 4985 legalizeOpWithMove(MI, Idx); 4986 continue; 4987 } 4988 4989 if (RI.hasAGPRs(RI.getRegClassForReg(MRI, MO.getReg())) && 4990 !isOperandLegal(MI, Idx, &MO)) { 4991 legalizeOpWithMove(MI, Idx); 4992 continue; 4993 } 4994 4995 if (!RI.isSGPRClass(RI.getRegClassForReg(MRI, MO.getReg()))) 4996 continue; // VGPRs are legal 4997 4998 // We can use one SGPR in each VOP3 instruction prior to GFX10 4999 // and two starting from GFX10. 5000 if (SGPRsUsed.count(MO.getReg())) 5001 continue; 5002 if (ConstantBusLimit > 0) { 5003 SGPRsUsed.insert(MO.getReg()); 5004 --ConstantBusLimit; 5005 continue; 5006 } 5007 5008 // If we make it this far, then the operand is not legal and we must 5009 // legalize it. 5010 legalizeOpWithMove(MI, Idx); 5011 } 5012 } 5013 5014 Register SIInstrInfo::readlaneVGPRToSGPR(Register SrcReg, MachineInstr &UseMI, 5015 MachineRegisterInfo &MRI) const { 5016 const TargetRegisterClass *VRC = MRI.getRegClass(SrcReg); 5017 const TargetRegisterClass *SRC = RI.getEquivalentSGPRClass(VRC); 5018 Register DstReg = MRI.createVirtualRegister(SRC); 5019 unsigned SubRegs = RI.getRegSizeInBits(*VRC) / 32; 5020 5021 if (RI.hasAGPRs(VRC)) { 5022 VRC = RI.getEquivalentVGPRClass(VRC); 5023 Register NewSrcReg = MRI.createVirtualRegister(VRC); 5024 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 5025 get(TargetOpcode::COPY), NewSrcReg) 5026 .addReg(SrcReg); 5027 SrcReg = NewSrcReg; 5028 } 5029 5030 if (SubRegs == 1) { 5031 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 5032 get(AMDGPU::V_READFIRSTLANE_B32), DstReg) 5033 .addReg(SrcReg); 5034 return DstReg; 5035 } 5036 5037 SmallVector<unsigned, 8> SRegs; 5038 for (unsigned i = 0; i < SubRegs; ++i) { 5039 Register SGPR = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 5040 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 5041 get(AMDGPU::V_READFIRSTLANE_B32), SGPR) 5042 .addReg(SrcReg, 0, RI.getSubRegFromChannel(i)); 5043 SRegs.push_back(SGPR); 5044 } 5045 5046 MachineInstrBuilder MIB = 5047 BuildMI(*UseMI.getParent(), UseMI, UseMI.getDebugLoc(), 5048 get(AMDGPU::REG_SEQUENCE), DstReg); 5049 for (unsigned i = 0; i < SubRegs; ++i) { 5050 MIB.addReg(SRegs[i]); 5051 MIB.addImm(RI.getSubRegFromChannel(i)); 5052 } 5053 return DstReg; 5054 } 5055 5056 void SIInstrInfo::legalizeOperandsSMRD(MachineRegisterInfo &MRI, 5057 MachineInstr &MI) const { 5058 5059 // If the pointer is store in VGPRs, then we need to move them to 5060 // SGPRs using v_readfirstlane. This is safe because we only select 5061 // loads with uniform pointers to SMRD instruction so we know the 5062 // pointer value is uniform. 5063 MachineOperand *SBase = getNamedOperand(MI, AMDGPU::OpName::sbase); 5064 if (SBase && !RI.isSGPRClass(MRI.getRegClass(SBase->getReg()))) { 5065 Register SGPR = readlaneVGPRToSGPR(SBase->getReg(), MI, MRI); 5066 SBase->setReg(SGPR); 5067 } 5068 MachineOperand *SOff = getNamedOperand(MI, AMDGPU::OpName::soff); 5069 if (SOff && !RI.isSGPRClass(MRI.getRegClass(SOff->getReg()))) { 5070 Register SGPR = readlaneVGPRToSGPR(SOff->getReg(), MI, MRI); 5071 SOff->setReg(SGPR); 5072 } 5073 } 5074 5075 bool SIInstrInfo::moveFlatAddrToVGPR(MachineInstr &Inst) const { 5076 unsigned Opc = Inst.getOpcode(); 5077 int OldSAddrIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::saddr); 5078 if (OldSAddrIdx < 0) 5079 return false; 5080 5081 assert(isSegmentSpecificFLAT(Inst)); 5082 5083 int NewOpc = AMDGPU::getGlobalVaddrOp(Opc); 5084 if (NewOpc < 0) 5085 NewOpc = AMDGPU::getFlatScratchInstSVfromSS(Opc); 5086 if (NewOpc < 0) 5087 return false; 5088 5089 MachineRegisterInfo &MRI = Inst.getMF()->getRegInfo(); 5090 MachineOperand &SAddr = Inst.getOperand(OldSAddrIdx); 5091 if (RI.isSGPRReg(MRI, SAddr.getReg())) 5092 return false; 5093 5094 int NewVAddrIdx = AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vaddr); 5095 if (NewVAddrIdx < 0) 5096 return false; 5097 5098 int OldVAddrIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr); 5099 5100 // Check vaddr, it shall be zero or absent. 5101 MachineInstr *VAddrDef = nullptr; 5102 if (OldVAddrIdx >= 0) { 5103 MachineOperand &VAddr = Inst.getOperand(OldVAddrIdx); 5104 VAddrDef = MRI.getUniqueVRegDef(VAddr.getReg()); 5105 if (!VAddrDef || VAddrDef->getOpcode() != AMDGPU::V_MOV_B32_e32 || 5106 !VAddrDef->getOperand(1).isImm() || 5107 VAddrDef->getOperand(1).getImm() != 0) 5108 return false; 5109 } 5110 5111 const MCInstrDesc &NewDesc = get(NewOpc); 5112 Inst.setDesc(NewDesc); 5113 5114 // Callers expect interator to be valid after this call, so modify the 5115 // instruction in place. 5116 if (OldVAddrIdx == NewVAddrIdx) { 5117 MachineOperand &NewVAddr = Inst.getOperand(NewVAddrIdx); 5118 // Clear use list from the old vaddr holding a zero register. 5119 MRI.removeRegOperandFromUseList(&NewVAddr); 5120 MRI.moveOperands(&NewVAddr, &SAddr, 1); 5121 Inst.RemoveOperand(OldSAddrIdx); 5122 // Update the use list with the pointer we have just moved from vaddr to 5123 // saddr poisition. Otherwise new vaddr will be missing from the use list. 5124 MRI.removeRegOperandFromUseList(&NewVAddr); 5125 MRI.addRegOperandToUseList(&NewVAddr); 5126 } else { 5127 assert(OldSAddrIdx == NewVAddrIdx); 5128 5129 if (OldVAddrIdx >= 0) { 5130 int NewVDstIn = AMDGPU::getNamedOperandIdx(NewOpc, 5131 AMDGPU::OpName::vdst_in); 5132 5133 // RemoveOperand doesn't try to fixup tied operand indexes at it goes, so 5134 // it asserts. Untie the operands for now and retie them afterwards. 5135 if (NewVDstIn != -1) { 5136 int OldVDstIn = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst_in); 5137 Inst.untieRegOperand(OldVDstIn); 5138 } 5139 5140 Inst.RemoveOperand(OldVAddrIdx); 5141 5142 if (NewVDstIn != -1) { 5143 int NewVDst = AMDGPU::getNamedOperandIdx(NewOpc, AMDGPU::OpName::vdst); 5144 Inst.tieOperands(NewVDst, NewVDstIn); 5145 } 5146 } 5147 } 5148 5149 if (VAddrDef && MRI.use_nodbg_empty(VAddrDef->getOperand(0).getReg())) 5150 VAddrDef->eraseFromParent(); 5151 5152 return true; 5153 } 5154 5155 // FIXME: Remove this when SelectionDAG is obsoleted. 5156 void SIInstrInfo::legalizeOperandsFLAT(MachineRegisterInfo &MRI, 5157 MachineInstr &MI) const { 5158 if (!isSegmentSpecificFLAT(MI)) 5159 return; 5160 5161 // Fixup SGPR operands in VGPRs. We only select these when the DAG divergence 5162 // thinks they are uniform, so a readfirstlane should be valid. 5163 MachineOperand *SAddr = getNamedOperand(MI, AMDGPU::OpName::saddr); 5164 if (!SAddr || RI.isSGPRClass(MRI.getRegClass(SAddr->getReg()))) 5165 return; 5166 5167 if (moveFlatAddrToVGPR(MI)) 5168 return; 5169 5170 Register ToSGPR = readlaneVGPRToSGPR(SAddr->getReg(), MI, MRI); 5171 SAddr->setReg(ToSGPR); 5172 } 5173 5174 void SIInstrInfo::legalizeGenericOperand(MachineBasicBlock &InsertMBB, 5175 MachineBasicBlock::iterator I, 5176 const TargetRegisterClass *DstRC, 5177 MachineOperand &Op, 5178 MachineRegisterInfo &MRI, 5179 const DebugLoc &DL) const { 5180 Register OpReg = Op.getReg(); 5181 unsigned OpSubReg = Op.getSubReg(); 5182 5183 const TargetRegisterClass *OpRC = RI.getSubClassWithSubReg( 5184 RI.getRegClassForReg(MRI, OpReg), OpSubReg); 5185 5186 // Check if operand is already the correct register class. 5187 if (DstRC == OpRC) 5188 return; 5189 5190 Register DstReg = MRI.createVirtualRegister(DstRC); 5191 MachineInstr *Copy = 5192 BuildMI(InsertMBB, I, DL, get(AMDGPU::COPY), DstReg).add(Op); 5193 5194 Op.setReg(DstReg); 5195 Op.setSubReg(0); 5196 5197 MachineInstr *Def = MRI.getVRegDef(OpReg); 5198 if (!Def) 5199 return; 5200 5201 // Try to eliminate the copy if it is copying an immediate value. 5202 if (Def->isMoveImmediate() && DstRC != &AMDGPU::VReg_1RegClass) 5203 FoldImmediate(*Copy, *Def, OpReg, &MRI); 5204 5205 bool ImpDef = Def->isImplicitDef(); 5206 while (!ImpDef && Def && Def->isCopy()) { 5207 if (Def->getOperand(1).getReg().isPhysical()) 5208 break; 5209 Def = MRI.getUniqueVRegDef(Def->getOperand(1).getReg()); 5210 ImpDef = Def && Def->isImplicitDef(); 5211 } 5212 if (!RI.isSGPRClass(DstRC) && !Copy->readsRegister(AMDGPU::EXEC, &RI) && 5213 !ImpDef) 5214 Copy->addOperand(MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 5215 } 5216 5217 // Emit the actual waterfall loop, executing the wrapped instruction for each 5218 // unique value of \p Rsrc across all lanes. In the best case we execute 1 5219 // iteration, in the worst case we execute 64 (once per lane). 5220 static void 5221 emitLoadSRsrcFromVGPRLoop(const SIInstrInfo &TII, MachineRegisterInfo &MRI, 5222 MachineBasicBlock &OrigBB, MachineBasicBlock &LoopBB, 5223 const DebugLoc &DL, MachineOperand &Rsrc) { 5224 MachineFunction &MF = *OrigBB.getParent(); 5225 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 5226 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 5227 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 5228 unsigned SaveExecOpc = 5229 ST.isWave32() ? AMDGPU::S_AND_SAVEEXEC_B32 : AMDGPU::S_AND_SAVEEXEC_B64; 5230 unsigned XorTermOpc = 5231 ST.isWave32() ? AMDGPU::S_XOR_B32_term : AMDGPU::S_XOR_B64_term; 5232 unsigned AndOpc = 5233 ST.isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64; 5234 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5235 5236 MachineBasicBlock::iterator I = LoopBB.begin(); 5237 5238 SmallVector<Register, 8> ReadlanePieces; 5239 Register CondReg = AMDGPU::NoRegister; 5240 5241 Register VRsrc = Rsrc.getReg(); 5242 unsigned VRsrcUndef = getUndefRegState(Rsrc.isUndef()); 5243 5244 unsigned RegSize = TRI->getRegSizeInBits(Rsrc.getReg(), MRI); 5245 unsigned NumSubRegs = RegSize / 32; 5246 assert(NumSubRegs % 2 == 0 && NumSubRegs <= 32 && "Unhandled register size"); 5247 5248 for (unsigned Idx = 0; Idx < NumSubRegs; Idx += 2) { 5249 5250 Register CurRegLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 5251 Register CurRegHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 5252 5253 // Read the next variant <- also loop target. 5254 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegLo) 5255 .addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx)); 5256 5257 // Read the next variant <- also loop target. 5258 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_READFIRSTLANE_B32), CurRegHi) 5259 .addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx + 1)); 5260 5261 ReadlanePieces.push_back(CurRegLo); 5262 ReadlanePieces.push_back(CurRegHi); 5263 5264 // Comparison is to be done as 64-bit. 5265 Register CurReg = MRI.createVirtualRegister(&AMDGPU::SGPR_64RegClass); 5266 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), CurReg) 5267 .addReg(CurRegLo) 5268 .addImm(AMDGPU::sub0) 5269 .addReg(CurRegHi) 5270 .addImm(AMDGPU::sub1); 5271 5272 Register NewCondReg = MRI.createVirtualRegister(BoolXExecRC); 5273 auto Cmp = 5274 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::V_CMP_EQ_U64_e64), NewCondReg) 5275 .addReg(CurReg); 5276 if (NumSubRegs <= 2) 5277 Cmp.addReg(VRsrc); 5278 else 5279 Cmp.addReg(VRsrc, VRsrcUndef, TRI->getSubRegFromChannel(Idx, 2)); 5280 5281 // Combine the comparision results with AND. 5282 if (CondReg == AMDGPU::NoRegister) // First. 5283 CondReg = NewCondReg; 5284 else { // If not the first, we create an AND. 5285 Register AndReg = MRI.createVirtualRegister(BoolXExecRC); 5286 BuildMI(LoopBB, I, DL, TII.get(AndOpc), AndReg) 5287 .addReg(CondReg) 5288 .addReg(NewCondReg); 5289 CondReg = AndReg; 5290 } 5291 } // End for loop. 5292 5293 auto SRsrcRC = TRI->getEquivalentSGPRClass(MRI.getRegClass(VRsrc)); 5294 Register SRsrc = MRI.createVirtualRegister(SRsrcRC); 5295 5296 // Build scalar Rsrc. 5297 auto Merge = BuildMI(LoopBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), SRsrc); 5298 unsigned Channel = 0; 5299 for (Register Piece : ReadlanePieces) { 5300 Merge.addReg(Piece) 5301 .addImm(TRI->getSubRegFromChannel(Channel++)); 5302 } 5303 5304 // Update Rsrc operand to use the SGPR Rsrc. 5305 Rsrc.setReg(SRsrc); 5306 Rsrc.setIsKill(true); 5307 5308 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); 5309 MRI.setSimpleHint(SaveExec, CondReg); 5310 5311 // Update EXEC to matching lanes, saving original to SaveExec. 5312 BuildMI(LoopBB, I, DL, TII.get(SaveExecOpc), SaveExec) 5313 .addReg(CondReg, RegState::Kill); 5314 5315 // The original instruction is here; we insert the terminators after it. 5316 I = LoopBB.end(); 5317 5318 // Update EXEC, switch all done bits to 0 and all todo bits to 1. 5319 BuildMI(LoopBB, I, DL, TII.get(XorTermOpc), Exec) 5320 .addReg(Exec) 5321 .addReg(SaveExec); 5322 5323 BuildMI(LoopBB, I, DL, TII.get(AMDGPU::SI_WATERFALL_LOOP)).addMBB(&LoopBB); 5324 } 5325 5326 // Build a waterfall loop around \p MI, replacing the VGPR \p Rsrc register 5327 // with SGPRs by iterating over all unique values across all lanes. 5328 // Returns the loop basic block that now contains \p MI. 5329 static MachineBasicBlock * 5330 loadSRsrcFromVGPR(const SIInstrInfo &TII, MachineInstr &MI, 5331 MachineOperand &Rsrc, MachineDominatorTree *MDT, 5332 MachineBasicBlock::iterator Begin = nullptr, 5333 MachineBasicBlock::iterator End = nullptr) { 5334 MachineBasicBlock &MBB = *MI.getParent(); 5335 MachineFunction &MF = *MBB.getParent(); 5336 const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>(); 5337 const SIRegisterInfo *TRI = ST.getRegisterInfo(); 5338 MachineRegisterInfo &MRI = MF.getRegInfo(); 5339 if (!Begin.isValid()) 5340 Begin = &MI; 5341 if (!End.isValid()) { 5342 End = &MI; 5343 ++End; 5344 } 5345 const DebugLoc &DL = MI.getDebugLoc(); 5346 unsigned Exec = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 5347 unsigned MovExecOpc = ST.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 5348 const auto *BoolXExecRC = TRI->getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5349 5350 Register SaveExec = MRI.createVirtualRegister(BoolXExecRC); 5351 5352 // Save the EXEC mask 5353 BuildMI(MBB, Begin, DL, TII.get(MovExecOpc), SaveExec).addReg(Exec); 5354 5355 // Killed uses in the instruction we are waterfalling around will be 5356 // incorrect due to the added control-flow. 5357 MachineBasicBlock::iterator AfterMI = MI; 5358 ++AfterMI; 5359 for (auto I = Begin; I != AfterMI; I++) { 5360 for (auto &MO : I->uses()) { 5361 if (MO.isReg() && MO.isUse()) { 5362 MRI.clearKillFlags(MO.getReg()); 5363 } 5364 } 5365 } 5366 5367 // To insert the loop we need to split the block. Move everything after this 5368 // point to a new block, and insert a new empty block between the two. 5369 MachineBasicBlock *LoopBB = MF.CreateMachineBasicBlock(); 5370 MachineBasicBlock *RemainderBB = MF.CreateMachineBasicBlock(); 5371 MachineFunction::iterator MBBI(MBB); 5372 ++MBBI; 5373 5374 MF.insert(MBBI, LoopBB); 5375 MF.insert(MBBI, RemainderBB); 5376 5377 LoopBB->addSuccessor(LoopBB); 5378 LoopBB->addSuccessor(RemainderBB); 5379 5380 // Move Begin to MI to the LoopBB, and the remainder of the block to 5381 // RemainderBB. 5382 RemainderBB->transferSuccessorsAndUpdatePHIs(&MBB); 5383 RemainderBB->splice(RemainderBB->begin(), &MBB, End, MBB.end()); 5384 LoopBB->splice(LoopBB->begin(), &MBB, Begin, MBB.end()); 5385 5386 MBB.addSuccessor(LoopBB); 5387 5388 // Update dominators. We know that MBB immediately dominates LoopBB, that 5389 // LoopBB immediately dominates RemainderBB, and that RemainderBB immediately 5390 // dominates all of the successors transferred to it from MBB that MBB used 5391 // to properly dominate. 5392 if (MDT) { 5393 MDT->addNewBlock(LoopBB, &MBB); 5394 MDT->addNewBlock(RemainderBB, LoopBB); 5395 for (auto &Succ : RemainderBB->successors()) { 5396 if (MDT->properlyDominates(&MBB, Succ)) { 5397 MDT->changeImmediateDominator(Succ, RemainderBB); 5398 } 5399 } 5400 } 5401 5402 emitLoadSRsrcFromVGPRLoop(TII, MRI, MBB, *LoopBB, DL, Rsrc); 5403 5404 // Restore the EXEC mask 5405 MachineBasicBlock::iterator First = RemainderBB->begin(); 5406 BuildMI(*RemainderBB, First, DL, TII.get(MovExecOpc), Exec).addReg(SaveExec); 5407 return LoopBB; 5408 } 5409 5410 // Extract pointer from Rsrc and return a zero-value Rsrc replacement. 5411 static std::tuple<unsigned, unsigned> 5412 extractRsrcPtr(const SIInstrInfo &TII, MachineInstr &MI, MachineOperand &Rsrc) { 5413 MachineBasicBlock &MBB = *MI.getParent(); 5414 MachineFunction &MF = *MBB.getParent(); 5415 MachineRegisterInfo &MRI = MF.getRegInfo(); 5416 5417 // Extract the ptr from the resource descriptor. 5418 unsigned RsrcPtr = 5419 TII.buildExtractSubReg(MI, MRI, Rsrc, &AMDGPU::VReg_128RegClass, 5420 AMDGPU::sub0_sub1, &AMDGPU::VReg_64RegClass); 5421 5422 // Create an empty resource descriptor 5423 Register Zero64 = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 5424 Register SRsrcFormatLo = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 5425 Register SRsrcFormatHi = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 5426 Register NewSRsrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass); 5427 uint64_t RsrcDataFormat = TII.getDefaultRsrcDataFormat(); 5428 5429 // Zero64 = 0 5430 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B64), Zero64) 5431 .addImm(0); 5432 5433 // SRsrcFormatLo = RSRC_DATA_FORMAT{31-0} 5434 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatLo) 5435 .addImm(RsrcDataFormat & 0xFFFFFFFF); 5436 5437 // SRsrcFormatHi = RSRC_DATA_FORMAT{63-32} 5438 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), SRsrcFormatHi) 5439 .addImm(RsrcDataFormat >> 32); 5440 5441 // NewSRsrc = {Zero64, SRsrcFormat} 5442 BuildMI(MBB, MI, MI.getDebugLoc(), TII.get(AMDGPU::REG_SEQUENCE), NewSRsrc) 5443 .addReg(Zero64) 5444 .addImm(AMDGPU::sub0_sub1) 5445 .addReg(SRsrcFormatLo) 5446 .addImm(AMDGPU::sub2) 5447 .addReg(SRsrcFormatHi) 5448 .addImm(AMDGPU::sub3); 5449 5450 return std::make_tuple(RsrcPtr, NewSRsrc); 5451 } 5452 5453 MachineBasicBlock * 5454 SIInstrInfo::legalizeOperands(MachineInstr &MI, 5455 MachineDominatorTree *MDT) const { 5456 MachineFunction &MF = *MI.getParent()->getParent(); 5457 MachineRegisterInfo &MRI = MF.getRegInfo(); 5458 MachineBasicBlock *CreatedBB = nullptr; 5459 5460 // Legalize VOP2 5461 if (isVOP2(MI) || isVOPC(MI)) { 5462 legalizeOperandsVOP2(MRI, MI); 5463 return CreatedBB; 5464 } 5465 5466 // Legalize VOP3 5467 if (isVOP3(MI)) { 5468 legalizeOperandsVOP3(MRI, MI); 5469 return CreatedBB; 5470 } 5471 5472 // Legalize SMRD 5473 if (isSMRD(MI)) { 5474 legalizeOperandsSMRD(MRI, MI); 5475 return CreatedBB; 5476 } 5477 5478 // Legalize FLAT 5479 if (isFLAT(MI)) { 5480 legalizeOperandsFLAT(MRI, MI); 5481 return CreatedBB; 5482 } 5483 5484 // Legalize REG_SEQUENCE and PHI 5485 // The register class of the operands much be the same type as the register 5486 // class of the output. 5487 if (MI.getOpcode() == AMDGPU::PHI) { 5488 const TargetRegisterClass *RC = nullptr, *SRC = nullptr, *VRC = nullptr; 5489 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) { 5490 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).getReg().isVirtual()) 5491 continue; 5492 const TargetRegisterClass *OpRC = 5493 MRI.getRegClass(MI.getOperand(i).getReg()); 5494 if (RI.hasVectorRegisters(OpRC)) { 5495 VRC = OpRC; 5496 } else { 5497 SRC = OpRC; 5498 } 5499 } 5500 5501 // If any of the operands are VGPR registers, then they all most be 5502 // otherwise we will create illegal VGPR->SGPR copies when legalizing 5503 // them. 5504 if (VRC || !RI.isSGPRClass(getOpRegClass(MI, 0))) { 5505 if (!VRC) { 5506 assert(SRC); 5507 if (getOpRegClass(MI, 0) == &AMDGPU::VReg_1RegClass) { 5508 VRC = &AMDGPU::VReg_1RegClass; 5509 } else 5510 VRC = RI.hasAGPRs(getOpRegClass(MI, 0)) 5511 ? RI.getEquivalentAGPRClass(SRC) 5512 : RI.getEquivalentVGPRClass(SRC); 5513 } else { 5514 VRC = RI.hasAGPRs(getOpRegClass(MI, 0)) 5515 ? RI.getEquivalentAGPRClass(VRC) 5516 : RI.getEquivalentVGPRClass(VRC); 5517 } 5518 RC = VRC; 5519 } else { 5520 RC = SRC; 5521 } 5522 5523 // Update all the operands so they have the same type. 5524 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 5525 MachineOperand &Op = MI.getOperand(I); 5526 if (!Op.isReg() || !Op.getReg().isVirtual()) 5527 continue; 5528 5529 // MI is a PHI instruction. 5530 MachineBasicBlock *InsertBB = MI.getOperand(I + 1).getMBB(); 5531 MachineBasicBlock::iterator Insert = InsertBB->getFirstTerminator(); 5532 5533 // Avoid creating no-op copies with the same src and dst reg class. These 5534 // confuse some of the machine passes. 5535 legalizeGenericOperand(*InsertBB, Insert, RC, Op, MRI, MI.getDebugLoc()); 5536 } 5537 } 5538 5539 // REG_SEQUENCE doesn't really require operand legalization, but if one has a 5540 // VGPR dest type and SGPR sources, insert copies so all operands are 5541 // VGPRs. This seems to help operand folding / the register coalescer. 5542 if (MI.getOpcode() == AMDGPU::REG_SEQUENCE) { 5543 MachineBasicBlock *MBB = MI.getParent(); 5544 const TargetRegisterClass *DstRC = getOpRegClass(MI, 0); 5545 if (RI.hasVGPRs(DstRC)) { 5546 // Update all the operands so they are VGPR register classes. These may 5547 // not be the same register class because REG_SEQUENCE supports mixing 5548 // subregister index types e.g. sub0_sub1 + sub2 + sub3 5549 for (unsigned I = 1, E = MI.getNumOperands(); I != E; I += 2) { 5550 MachineOperand &Op = MI.getOperand(I); 5551 if (!Op.isReg() || !Op.getReg().isVirtual()) 5552 continue; 5553 5554 const TargetRegisterClass *OpRC = MRI.getRegClass(Op.getReg()); 5555 const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(OpRC); 5556 if (VRC == OpRC) 5557 continue; 5558 5559 legalizeGenericOperand(*MBB, MI, VRC, Op, MRI, MI.getDebugLoc()); 5560 Op.setIsKill(); 5561 } 5562 } 5563 5564 return CreatedBB; 5565 } 5566 5567 // Legalize INSERT_SUBREG 5568 // src0 must have the same register class as dst 5569 if (MI.getOpcode() == AMDGPU::INSERT_SUBREG) { 5570 Register Dst = MI.getOperand(0).getReg(); 5571 Register Src0 = MI.getOperand(1).getReg(); 5572 const TargetRegisterClass *DstRC = MRI.getRegClass(Dst); 5573 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0); 5574 if (DstRC != Src0RC) { 5575 MachineBasicBlock *MBB = MI.getParent(); 5576 MachineOperand &Op = MI.getOperand(1); 5577 legalizeGenericOperand(*MBB, MI, DstRC, Op, MRI, MI.getDebugLoc()); 5578 } 5579 return CreatedBB; 5580 } 5581 5582 // Legalize SI_INIT_M0 5583 if (MI.getOpcode() == AMDGPU::SI_INIT_M0) { 5584 MachineOperand &Src = MI.getOperand(0); 5585 if (Src.isReg() && RI.hasVectorRegisters(MRI.getRegClass(Src.getReg()))) 5586 Src.setReg(readlaneVGPRToSGPR(Src.getReg(), MI, MRI)); 5587 return CreatedBB; 5588 } 5589 5590 // Legalize MIMG and MUBUF/MTBUF for shaders. 5591 // 5592 // Shaders only generate MUBUF/MTBUF instructions via intrinsics or via 5593 // scratch memory access. In both cases, the legalization never involves 5594 // conversion to the addr64 form. 5595 if (isMIMG(MI) || (AMDGPU::isGraphics(MF.getFunction().getCallingConv()) && 5596 (isMUBUF(MI) || isMTBUF(MI)))) { 5597 MachineOperand *SRsrc = getNamedOperand(MI, AMDGPU::OpName::srsrc); 5598 if (SRsrc && !RI.isSGPRClass(MRI.getRegClass(SRsrc->getReg()))) 5599 CreatedBB = loadSRsrcFromVGPR(*this, MI, *SRsrc, MDT); 5600 5601 MachineOperand *SSamp = getNamedOperand(MI, AMDGPU::OpName::ssamp); 5602 if (SSamp && !RI.isSGPRClass(MRI.getRegClass(SSamp->getReg()))) 5603 CreatedBB = loadSRsrcFromVGPR(*this, MI, *SSamp, MDT); 5604 5605 return CreatedBB; 5606 } 5607 5608 // Legalize SI_CALL 5609 if (MI.getOpcode() == AMDGPU::SI_CALL_ISEL) { 5610 MachineOperand *Dest = &MI.getOperand(0); 5611 if (!RI.isSGPRClass(MRI.getRegClass(Dest->getReg()))) { 5612 // Move everything between ADJCALLSTACKUP and ADJCALLSTACKDOWN and 5613 // following copies, we also need to move copies from and to physical 5614 // registers into the loop block. 5615 unsigned FrameSetupOpcode = getCallFrameSetupOpcode(); 5616 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode(); 5617 5618 // Also move the copies to physical registers into the loop block 5619 MachineBasicBlock &MBB = *MI.getParent(); 5620 MachineBasicBlock::iterator Start(&MI); 5621 while (Start->getOpcode() != FrameSetupOpcode) 5622 --Start; 5623 MachineBasicBlock::iterator End(&MI); 5624 while (End->getOpcode() != FrameDestroyOpcode) 5625 ++End; 5626 // Also include following copies of the return value 5627 ++End; 5628 while (End != MBB.end() && End->isCopy() && End->getOperand(1).isReg() && 5629 MI.definesRegister(End->getOperand(1).getReg())) 5630 ++End; 5631 CreatedBB = loadSRsrcFromVGPR(*this, MI, *Dest, MDT, Start, End); 5632 } 5633 } 5634 5635 // Legalize MUBUF* instructions. 5636 int RsrcIdx = 5637 AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::srsrc); 5638 if (RsrcIdx != -1) { 5639 // We have an MUBUF instruction 5640 MachineOperand *Rsrc = &MI.getOperand(RsrcIdx); 5641 unsigned RsrcRC = get(MI.getOpcode()).OpInfo[RsrcIdx].RegClass; 5642 if (RI.getCommonSubClass(MRI.getRegClass(Rsrc->getReg()), 5643 RI.getRegClass(RsrcRC))) { 5644 // The operands are legal. 5645 // FIXME: We may need to legalize operands besided srsrc. 5646 return CreatedBB; 5647 } 5648 5649 // Legalize a VGPR Rsrc. 5650 // 5651 // If the instruction is _ADDR64, we can avoid a waterfall by extracting 5652 // the base pointer from the VGPR Rsrc, adding it to the VAddr, then using 5653 // a zero-value SRsrc. 5654 // 5655 // If the instruction is _OFFSET (both idxen and offen disabled), and we 5656 // support ADDR64 instructions, we can convert to ADDR64 and do the same as 5657 // above. 5658 // 5659 // Otherwise we are on non-ADDR64 hardware, and/or we have 5660 // idxen/offen/bothen and we fall back to a waterfall loop. 5661 5662 MachineBasicBlock &MBB = *MI.getParent(); 5663 5664 MachineOperand *VAddr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 5665 if (VAddr && AMDGPU::getIfAddr64Inst(MI.getOpcode()) != -1) { 5666 // This is already an ADDR64 instruction so we need to add the pointer 5667 // extracted from the resource descriptor to the current value of VAddr. 5668 Register NewVAddrLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5669 Register NewVAddrHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 5670 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5671 5672 const auto *BoolXExecRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5673 Register CondReg0 = MRI.createVirtualRegister(BoolXExecRC); 5674 Register CondReg1 = MRI.createVirtualRegister(BoolXExecRC); 5675 5676 unsigned RsrcPtr, NewSRsrc; 5677 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 5678 5679 // NewVaddrLo = RsrcPtr:sub0 + VAddr:sub0 5680 const DebugLoc &DL = MI.getDebugLoc(); 5681 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADD_CO_U32_e64), NewVAddrLo) 5682 .addDef(CondReg0) 5683 .addReg(RsrcPtr, 0, AMDGPU::sub0) 5684 .addReg(VAddr->getReg(), 0, AMDGPU::sub0) 5685 .addImm(0); 5686 5687 // NewVaddrHi = RsrcPtr:sub1 + VAddr:sub1 5688 BuildMI(MBB, MI, DL, get(AMDGPU::V_ADDC_U32_e64), NewVAddrHi) 5689 .addDef(CondReg1, RegState::Dead) 5690 .addReg(RsrcPtr, 0, AMDGPU::sub1) 5691 .addReg(VAddr->getReg(), 0, AMDGPU::sub1) 5692 .addReg(CondReg0, RegState::Kill) 5693 .addImm(0); 5694 5695 // NewVaddr = {NewVaddrHi, NewVaddrLo} 5696 BuildMI(MBB, MI, MI.getDebugLoc(), get(AMDGPU::REG_SEQUENCE), NewVAddr) 5697 .addReg(NewVAddrLo) 5698 .addImm(AMDGPU::sub0) 5699 .addReg(NewVAddrHi) 5700 .addImm(AMDGPU::sub1); 5701 5702 VAddr->setReg(NewVAddr); 5703 Rsrc->setReg(NewSRsrc); 5704 } else if (!VAddr && ST.hasAddr64()) { 5705 // This instructions is the _OFFSET variant, so we need to convert it to 5706 // ADDR64. 5707 assert(ST.getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS && 5708 "FIXME: Need to emit flat atomics here"); 5709 5710 unsigned RsrcPtr, NewSRsrc; 5711 std::tie(RsrcPtr, NewSRsrc) = extractRsrcPtr(*this, MI, *Rsrc); 5712 5713 Register NewVAddr = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 5714 MachineOperand *VData = getNamedOperand(MI, AMDGPU::OpName::vdata); 5715 MachineOperand *Offset = getNamedOperand(MI, AMDGPU::OpName::offset); 5716 MachineOperand *SOffset = getNamedOperand(MI, AMDGPU::OpName::soffset); 5717 unsigned Addr64Opcode = AMDGPU::getAddr64Inst(MI.getOpcode()); 5718 5719 // Atomics rith return have have an additional tied operand and are 5720 // missing some of the special bits. 5721 MachineOperand *VDataIn = getNamedOperand(MI, AMDGPU::OpName::vdata_in); 5722 MachineInstr *Addr64; 5723 5724 if (!VDataIn) { 5725 // Regular buffer load / store. 5726 MachineInstrBuilder MIB = 5727 BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 5728 .add(*VData) 5729 .addReg(NewVAddr) 5730 .addReg(NewSRsrc) 5731 .add(*SOffset) 5732 .add(*Offset); 5733 5734 if (const MachineOperand *CPol = 5735 getNamedOperand(MI, AMDGPU::OpName::cpol)) { 5736 MIB.addImm(CPol->getImm()); 5737 } 5738 5739 if (const MachineOperand *TFE = 5740 getNamedOperand(MI, AMDGPU::OpName::tfe)) { 5741 MIB.addImm(TFE->getImm()); 5742 } 5743 5744 MIB.addImm(getNamedImmOperand(MI, AMDGPU::OpName::swz)); 5745 5746 MIB.cloneMemRefs(MI); 5747 Addr64 = MIB; 5748 } else { 5749 // Atomics with return. 5750 Addr64 = BuildMI(MBB, MI, MI.getDebugLoc(), get(Addr64Opcode)) 5751 .add(*VData) 5752 .add(*VDataIn) 5753 .addReg(NewVAddr) 5754 .addReg(NewSRsrc) 5755 .add(*SOffset) 5756 .add(*Offset) 5757 .addImm(getNamedImmOperand(MI, AMDGPU::OpName::cpol)) 5758 .cloneMemRefs(MI); 5759 } 5760 5761 MI.removeFromParent(); 5762 5763 // NewVaddr = {NewVaddrHi, NewVaddrLo} 5764 BuildMI(MBB, Addr64, Addr64->getDebugLoc(), get(AMDGPU::REG_SEQUENCE), 5765 NewVAddr) 5766 .addReg(RsrcPtr, 0, AMDGPU::sub0) 5767 .addImm(AMDGPU::sub0) 5768 .addReg(RsrcPtr, 0, AMDGPU::sub1) 5769 .addImm(AMDGPU::sub1); 5770 } else { 5771 // This is another variant; legalize Rsrc with waterfall loop from VGPRs 5772 // to SGPRs. 5773 CreatedBB = loadSRsrcFromVGPR(*this, MI, *Rsrc, MDT); 5774 return CreatedBB; 5775 } 5776 } 5777 return CreatedBB; 5778 } 5779 5780 MachineBasicBlock *SIInstrInfo::moveToVALU(MachineInstr &TopInst, 5781 MachineDominatorTree *MDT) const { 5782 SetVectorType Worklist; 5783 Worklist.insert(&TopInst); 5784 MachineBasicBlock *CreatedBB = nullptr; 5785 MachineBasicBlock *CreatedBBTmp = nullptr; 5786 5787 while (!Worklist.empty()) { 5788 MachineInstr &Inst = *Worklist.pop_back_val(); 5789 MachineBasicBlock *MBB = Inst.getParent(); 5790 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 5791 5792 unsigned Opcode = Inst.getOpcode(); 5793 unsigned NewOpcode = getVALUOp(Inst); 5794 5795 // Handle some special cases 5796 switch (Opcode) { 5797 default: 5798 break; 5799 case AMDGPU::S_ADD_U64_PSEUDO: 5800 case AMDGPU::S_SUB_U64_PSEUDO: 5801 splitScalar64BitAddSub(Worklist, Inst, MDT); 5802 Inst.eraseFromParent(); 5803 continue; 5804 case AMDGPU::S_ADD_I32: 5805 case AMDGPU::S_SUB_I32: { 5806 // FIXME: The u32 versions currently selected use the carry. 5807 bool Changed; 5808 std::tie(Changed, CreatedBBTmp) = moveScalarAddSub(Worklist, Inst, MDT); 5809 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp) 5810 CreatedBB = CreatedBBTmp; 5811 if (Changed) 5812 continue; 5813 5814 // Default handling 5815 break; 5816 } 5817 case AMDGPU::S_AND_B64: 5818 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_AND_B32, MDT); 5819 Inst.eraseFromParent(); 5820 continue; 5821 5822 case AMDGPU::S_OR_B64: 5823 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_OR_B32, MDT); 5824 Inst.eraseFromParent(); 5825 continue; 5826 5827 case AMDGPU::S_XOR_B64: 5828 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XOR_B32, MDT); 5829 Inst.eraseFromParent(); 5830 continue; 5831 5832 case AMDGPU::S_NAND_B64: 5833 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NAND_B32, MDT); 5834 Inst.eraseFromParent(); 5835 continue; 5836 5837 case AMDGPU::S_NOR_B64: 5838 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_NOR_B32, MDT); 5839 Inst.eraseFromParent(); 5840 continue; 5841 5842 case AMDGPU::S_XNOR_B64: 5843 if (ST.hasDLInsts()) 5844 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_XNOR_B32, MDT); 5845 else 5846 splitScalar64BitXnor(Worklist, Inst, MDT); 5847 Inst.eraseFromParent(); 5848 continue; 5849 5850 case AMDGPU::S_ANDN2_B64: 5851 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ANDN2_B32, MDT); 5852 Inst.eraseFromParent(); 5853 continue; 5854 5855 case AMDGPU::S_ORN2_B64: 5856 splitScalar64BitBinaryOp(Worklist, Inst, AMDGPU::S_ORN2_B32, MDT); 5857 Inst.eraseFromParent(); 5858 continue; 5859 5860 case AMDGPU::S_BREV_B64: 5861 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_BREV_B32, true); 5862 Inst.eraseFromParent(); 5863 continue; 5864 5865 case AMDGPU::S_NOT_B64: 5866 splitScalar64BitUnaryOp(Worklist, Inst, AMDGPU::S_NOT_B32); 5867 Inst.eraseFromParent(); 5868 continue; 5869 5870 case AMDGPU::S_BCNT1_I32_B64: 5871 splitScalar64BitBCNT(Worklist, Inst); 5872 Inst.eraseFromParent(); 5873 continue; 5874 5875 case AMDGPU::S_BFE_I64: 5876 splitScalar64BitBFE(Worklist, Inst); 5877 Inst.eraseFromParent(); 5878 continue; 5879 5880 case AMDGPU::S_LSHL_B32: 5881 if (ST.hasOnlyRevVALUShifts()) { 5882 NewOpcode = AMDGPU::V_LSHLREV_B32_e64; 5883 swapOperands(Inst); 5884 } 5885 break; 5886 case AMDGPU::S_ASHR_I32: 5887 if (ST.hasOnlyRevVALUShifts()) { 5888 NewOpcode = AMDGPU::V_ASHRREV_I32_e64; 5889 swapOperands(Inst); 5890 } 5891 break; 5892 case AMDGPU::S_LSHR_B32: 5893 if (ST.hasOnlyRevVALUShifts()) { 5894 NewOpcode = AMDGPU::V_LSHRREV_B32_e64; 5895 swapOperands(Inst); 5896 } 5897 break; 5898 case AMDGPU::S_LSHL_B64: 5899 if (ST.hasOnlyRevVALUShifts()) { 5900 NewOpcode = AMDGPU::V_LSHLREV_B64_e64; 5901 swapOperands(Inst); 5902 } 5903 break; 5904 case AMDGPU::S_ASHR_I64: 5905 if (ST.hasOnlyRevVALUShifts()) { 5906 NewOpcode = AMDGPU::V_ASHRREV_I64_e64; 5907 swapOperands(Inst); 5908 } 5909 break; 5910 case AMDGPU::S_LSHR_B64: 5911 if (ST.hasOnlyRevVALUShifts()) { 5912 NewOpcode = AMDGPU::V_LSHRREV_B64_e64; 5913 swapOperands(Inst); 5914 } 5915 break; 5916 5917 case AMDGPU::S_ABS_I32: 5918 lowerScalarAbs(Worklist, Inst); 5919 Inst.eraseFromParent(); 5920 continue; 5921 5922 case AMDGPU::S_CBRANCH_SCC0: 5923 case AMDGPU::S_CBRANCH_SCC1: { 5924 // Clear unused bits of vcc 5925 Register CondReg = Inst.getOperand(1).getReg(); 5926 bool IsSCC = CondReg == AMDGPU::SCC; 5927 Register VCC = RI.getVCC(); 5928 Register EXEC = ST.isWave32() ? AMDGPU::EXEC_LO : AMDGPU::EXEC; 5929 unsigned Opc = ST.isWave32() ? AMDGPU::S_AND_B32 : AMDGPU::S_AND_B64; 5930 BuildMI(*MBB, Inst, Inst.getDebugLoc(), get(Opc), VCC) 5931 .addReg(EXEC) 5932 .addReg(IsSCC ? VCC : CondReg); 5933 Inst.RemoveOperand(1); 5934 } 5935 break; 5936 5937 case AMDGPU::S_BFE_U64: 5938 case AMDGPU::S_BFM_B64: 5939 llvm_unreachable("Moving this op to VALU not implemented"); 5940 5941 case AMDGPU::S_PACK_LL_B32_B16: 5942 case AMDGPU::S_PACK_LH_B32_B16: 5943 case AMDGPU::S_PACK_HH_B32_B16: 5944 movePackToVALU(Worklist, MRI, Inst); 5945 Inst.eraseFromParent(); 5946 continue; 5947 5948 case AMDGPU::S_XNOR_B32: 5949 lowerScalarXnor(Worklist, Inst); 5950 Inst.eraseFromParent(); 5951 continue; 5952 5953 case AMDGPU::S_NAND_B32: 5954 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_AND_B32); 5955 Inst.eraseFromParent(); 5956 continue; 5957 5958 case AMDGPU::S_NOR_B32: 5959 splitScalarNotBinop(Worklist, Inst, AMDGPU::S_OR_B32); 5960 Inst.eraseFromParent(); 5961 continue; 5962 5963 case AMDGPU::S_ANDN2_B32: 5964 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_AND_B32); 5965 Inst.eraseFromParent(); 5966 continue; 5967 5968 case AMDGPU::S_ORN2_B32: 5969 splitScalarBinOpN2(Worklist, Inst, AMDGPU::S_OR_B32); 5970 Inst.eraseFromParent(); 5971 continue; 5972 5973 // TODO: remove as soon as everything is ready 5974 // to replace VGPR to SGPR copy with V_READFIRSTLANEs. 5975 // S_ADD/SUB_CO_PSEUDO as well as S_UADDO/USUBO_PSEUDO 5976 // can only be selected from the uniform SDNode. 5977 case AMDGPU::S_ADD_CO_PSEUDO: 5978 case AMDGPU::S_SUB_CO_PSEUDO: { 5979 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_ADD_CO_PSEUDO) 5980 ? AMDGPU::V_ADDC_U32_e64 5981 : AMDGPU::V_SUBB_U32_e64; 5982 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 5983 5984 Register CarryInReg = Inst.getOperand(4).getReg(); 5985 if (!MRI.constrainRegClass(CarryInReg, CarryRC)) { 5986 Register NewCarryReg = MRI.createVirtualRegister(CarryRC); 5987 BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(AMDGPU::COPY), NewCarryReg) 5988 .addReg(CarryInReg); 5989 } 5990 5991 Register CarryOutReg = Inst.getOperand(1).getReg(); 5992 5993 Register DestReg = MRI.createVirtualRegister(RI.getEquivalentVGPRClass( 5994 MRI.getRegClass(Inst.getOperand(0).getReg()))); 5995 MachineInstr *CarryOp = 5996 BuildMI(*MBB, &Inst, Inst.getDebugLoc(), get(Opc), DestReg) 5997 .addReg(CarryOutReg, RegState::Define) 5998 .add(Inst.getOperand(2)) 5999 .add(Inst.getOperand(3)) 6000 .addReg(CarryInReg) 6001 .addImm(0); 6002 CreatedBBTmp = legalizeOperands(*CarryOp); 6003 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp) 6004 CreatedBB = CreatedBBTmp; 6005 MRI.replaceRegWith(Inst.getOperand(0).getReg(), DestReg); 6006 addUsersToMoveToVALUWorklist(DestReg, MRI, Worklist); 6007 Inst.eraseFromParent(); 6008 } 6009 continue; 6010 case AMDGPU::S_UADDO_PSEUDO: 6011 case AMDGPU::S_USUBO_PSEUDO: { 6012 const DebugLoc &DL = Inst.getDebugLoc(); 6013 MachineOperand &Dest0 = Inst.getOperand(0); 6014 MachineOperand &Dest1 = Inst.getOperand(1); 6015 MachineOperand &Src0 = Inst.getOperand(2); 6016 MachineOperand &Src1 = Inst.getOperand(3); 6017 6018 unsigned Opc = (Inst.getOpcode() == AMDGPU::S_UADDO_PSEUDO) 6019 ? AMDGPU::V_ADD_CO_U32_e64 6020 : AMDGPU::V_SUB_CO_U32_e64; 6021 const TargetRegisterClass *NewRC = 6022 RI.getEquivalentVGPRClass(MRI.getRegClass(Dest0.getReg())); 6023 Register DestReg = MRI.createVirtualRegister(NewRC); 6024 MachineInstr *NewInstr = BuildMI(*MBB, &Inst, DL, get(Opc), DestReg) 6025 .addReg(Dest1.getReg(), RegState::Define) 6026 .add(Src0) 6027 .add(Src1) 6028 .addImm(0); // clamp bit 6029 6030 CreatedBBTmp = legalizeOperands(*NewInstr, MDT); 6031 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp) 6032 CreatedBB = CreatedBBTmp; 6033 6034 MRI.replaceRegWith(Dest0.getReg(), DestReg); 6035 addUsersToMoveToVALUWorklist(NewInstr->getOperand(0).getReg(), MRI, 6036 Worklist); 6037 Inst.eraseFromParent(); 6038 } 6039 continue; 6040 6041 case AMDGPU::S_CSELECT_B32: 6042 lowerSelect32(Worklist, Inst, MDT); 6043 Inst.eraseFromParent(); 6044 continue; 6045 case AMDGPU::S_CSELECT_B64: 6046 splitSelect64(Worklist, Inst, MDT); 6047 Inst.eraseFromParent(); 6048 continue; 6049 case AMDGPU::S_CMP_EQ_I32: 6050 case AMDGPU::S_CMP_LG_I32: 6051 case AMDGPU::S_CMP_GT_I32: 6052 case AMDGPU::S_CMP_GE_I32: 6053 case AMDGPU::S_CMP_LT_I32: 6054 case AMDGPU::S_CMP_LE_I32: 6055 case AMDGPU::S_CMP_EQ_U32: 6056 case AMDGPU::S_CMP_LG_U32: 6057 case AMDGPU::S_CMP_GT_U32: 6058 case AMDGPU::S_CMP_GE_U32: 6059 case AMDGPU::S_CMP_LT_U32: 6060 case AMDGPU::S_CMP_LE_U32: 6061 case AMDGPU::S_CMP_EQ_U64: 6062 case AMDGPU::S_CMP_LG_U64: { 6063 const MCInstrDesc &NewDesc = get(NewOpcode); 6064 Register CondReg = MRI.createVirtualRegister(RI.getWaveMaskRegClass()); 6065 MachineInstr *NewInstr = 6066 BuildMI(*MBB, Inst, Inst.getDebugLoc(), NewDesc, CondReg) 6067 .add(Inst.getOperand(0)) 6068 .add(Inst.getOperand(1)); 6069 legalizeOperands(*NewInstr, MDT); 6070 int SCCIdx = Inst.findRegisterDefOperandIdx(AMDGPU::SCC); 6071 MachineOperand SCCOp = Inst.getOperand(SCCIdx); 6072 addSCCDefUsersToVALUWorklist(SCCOp, Inst, Worklist, CondReg); 6073 Inst.eraseFromParent(); 6074 } 6075 continue; 6076 } 6077 6078 6079 if (NewOpcode == AMDGPU::INSTRUCTION_LIST_END) { 6080 // We cannot move this instruction to the VALU, so we should try to 6081 // legalize its operands instead. 6082 CreatedBBTmp = legalizeOperands(Inst, MDT); 6083 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp) 6084 CreatedBB = CreatedBBTmp; 6085 continue; 6086 } 6087 6088 // Use the new VALU Opcode. 6089 const MCInstrDesc &NewDesc = get(NewOpcode); 6090 Inst.setDesc(NewDesc); 6091 6092 // Remove any references to SCC. Vector instructions can't read from it, and 6093 // We're just about to add the implicit use / defs of VCC, and we don't want 6094 // both. 6095 for (unsigned i = Inst.getNumOperands() - 1; i > 0; --i) { 6096 MachineOperand &Op = Inst.getOperand(i); 6097 if (Op.isReg() && Op.getReg() == AMDGPU::SCC) { 6098 // Only propagate through live-def of SCC. 6099 if (Op.isDef() && !Op.isDead()) 6100 addSCCDefUsersToVALUWorklist(Op, Inst, Worklist); 6101 if (Op.isUse()) 6102 addSCCDefsToVALUWorklist(Op, Worklist); 6103 Inst.RemoveOperand(i); 6104 } 6105 } 6106 6107 if (Opcode == AMDGPU::S_SEXT_I32_I8 || Opcode == AMDGPU::S_SEXT_I32_I16) { 6108 // We are converting these to a BFE, so we need to add the missing 6109 // operands for the size and offset. 6110 unsigned Size = (Opcode == AMDGPU::S_SEXT_I32_I8) ? 8 : 16; 6111 Inst.addOperand(MachineOperand::CreateImm(0)); 6112 Inst.addOperand(MachineOperand::CreateImm(Size)); 6113 6114 } else if (Opcode == AMDGPU::S_BCNT1_I32_B32) { 6115 // The VALU version adds the second operand to the result, so insert an 6116 // extra 0 operand. 6117 Inst.addOperand(MachineOperand::CreateImm(0)); 6118 } 6119 6120 Inst.addImplicitDefUseOperands(*Inst.getParent()->getParent()); 6121 fixImplicitOperands(Inst); 6122 6123 if (Opcode == AMDGPU::S_BFE_I32 || Opcode == AMDGPU::S_BFE_U32) { 6124 const MachineOperand &OffsetWidthOp = Inst.getOperand(2); 6125 // If we need to move this to VGPRs, we need to unpack the second operand 6126 // back into the 2 separate ones for bit offset and width. 6127 assert(OffsetWidthOp.isImm() && 6128 "Scalar BFE is only implemented for constant width and offset"); 6129 uint32_t Imm = OffsetWidthOp.getImm(); 6130 6131 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 6132 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 6133 Inst.RemoveOperand(2); // Remove old immediate. 6134 Inst.addOperand(MachineOperand::CreateImm(Offset)); 6135 Inst.addOperand(MachineOperand::CreateImm(BitWidth)); 6136 } 6137 6138 bool HasDst = Inst.getOperand(0).isReg() && Inst.getOperand(0).isDef(); 6139 unsigned NewDstReg = AMDGPU::NoRegister; 6140 if (HasDst) { 6141 Register DstReg = Inst.getOperand(0).getReg(); 6142 if (DstReg.isPhysical()) 6143 continue; 6144 6145 // Update the destination register class. 6146 const TargetRegisterClass *NewDstRC = getDestEquivalentVGPRClass(Inst); 6147 if (!NewDstRC) 6148 continue; 6149 6150 if (Inst.isCopy() && Inst.getOperand(1).getReg().isVirtual() && 6151 NewDstRC == RI.getRegClassForReg(MRI, Inst.getOperand(1).getReg())) { 6152 // Instead of creating a copy where src and dst are the same register 6153 // class, we just replace all uses of dst with src. These kinds of 6154 // copies interfere with the heuristics MachineSink uses to decide 6155 // whether or not to split a critical edge. Since the pass assumes 6156 // that copies will end up as machine instructions and not be 6157 // eliminated. 6158 addUsersToMoveToVALUWorklist(DstReg, MRI, Worklist); 6159 MRI.replaceRegWith(DstReg, Inst.getOperand(1).getReg()); 6160 MRI.clearKillFlags(Inst.getOperand(1).getReg()); 6161 Inst.getOperand(0).setReg(DstReg); 6162 6163 // Make sure we don't leave around a dead VGPR->SGPR copy. Normally 6164 // these are deleted later, but at -O0 it would leave a suspicious 6165 // looking illegal copy of an undef register. 6166 for (unsigned I = Inst.getNumOperands() - 1; I != 0; --I) 6167 Inst.RemoveOperand(I); 6168 Inst.setDesc(get(AMDGPU::IMPLICIT_DEF)); 6169 continue; 6170 } 6171 6172 NewDstReg = MRI.createVirtualRegister(NewDstRC); 6173 MRI.replaceRegWith(DstReg, NewDstReg); 6174 } 6175 6176 // Legalize the operands 6177 CreatedBBTmp = legalizeOperands(Inst, MDT); 6178 if (CreatedBBTmp && TopInst.getParent() == CreatedBBTmp) 6179 CreatedBB = CreatedBBTmp; 6180 6181 if (HasDst) 6182 addUsersToMoveToVALUWorklist(NewDstReg, MRI, Worklist); 6183 } 6184 return CreatedBB; 6185 } 6186 6187 // Add/sub require special handling to deal with carry outs. 6188 std::pair<bool, MachineBasicBlock *> 6189 SIInstrInfo::moveScalarAddSub(SetVectorType &Worklist, MachineInstr &Inst, 6190 MachineDominatorTree *MDT) const { 6191 if (ST.hasAddNoCarry()) { 6192 // Assume there is no user of scc since we don't select this in that case. 6193 // Since scc isn't used, it doesn't really matter if the i32 or u32 variant 6194 // is used. 6195 6196 MachineBasicBlock &MBB = *Inst.getParent(); 6197 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6198 6199 Register OldDstReg = Inst.getOperand(0).getReg(); 6200 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6201 6202 unsigned Opc = Inst.getOpcode(); 6203 assert(Opc == AMDGPU::S_ADD_I32 || Opc == AMDGPU::S_SUB_I32); 6204 6205 unsigned NewOpc = Opc == AMDGPU::S_ADD_I32 ? 6206 AMDGPU::V_ADD_U32_e64 : AMDGPU::V_SUB_U32_e64; 6207 6208 assert(Inst.getOperand(3).getReg() == AMDGPU::SCC); 6209 Inst.RemoveOperand(3); 6210 6211 Inst.setDesc(get(NewOpc)); 6212 Inst.addOperand(MachineOperand::CreateImm(0)); // clamp bit 6213 Inst.addImplicitDefUseOperands(*MBB.getParent()); 6214 MRI.replaceRegWith(OldDstReg, ResultReg); 6215 MachineBasicBlock *NewBB = legalizeOperands(Inst, MDT); 6216 6217 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6218 return std::make_pair(true, NewBB); 6219 } 6220 6221 return std::make_pair(false, nullptr); 6222 } 6223 6224 void SIInstrInfo::lowerSelect32(SetVectorType &Worklist, MachineInstr &Inst, 6225 MachineDominatorTree *MDT) const { 6226 6227 MachineBasicBlock &MBB = *Inst.getParent(); 6228 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6229 MachineBasicBlock::iterator MII = Inst; 6230 DebugLoc DL = Inst.getDebugLoc(); 6231 6232 MachineOperand &Dest = Inst.getOperand(0); 6233 MachineOperand &Src0 = Inst.getOperand(1); 6234 MachineOperand &Src1 = Inst.getOperand(2); 6235 MachineOperand &Cond = Inst.getOperand(3); 6236 6237 Register SCCSource = Cond.getReg(); 6238 bool IsSCC = (SCCSource == AMDGPU::SCC); 6239 6240 // If this is a trivial select where the condition is effectively not SCC 6241 // (SCCSource is a source of copy to SCC), then the select is semantically 6242 // equivalent to copying SCCSource. Hence, there is no need to create 6243 // V_CNDMASK, we can just use that and bail out. 6244 if (!IsSCC && Src0.isImm() && (Src0.getImm() == -1) && Src1.isImm() && 6245 (Src1.getImm() == 0)) { 6246 MRI.replaceRegWith(Dest.getReg(), SCCSource); 6247 return; 6248 } 6249 6250 const TargetRegisterClass *TC = 6251 RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 6252 6253 Register CopySCC = MRI.createVirtualRegister(TC); 6254 6255 if (IsSCC) { 6256 // Now look for the closest SCC def if it is a copy 6257 // replacing the SCCSource with the COPY source register 6258 bool CopyFound = false; 6259 for (MachineInstr &CandI : 6260 make_range(std::next(MachineBasicBlock::reverse_iterator(Inst)), 6261 Inst.getParent()->rend())) { 6262 if (CandI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != 6263 -1) { 6264 if (CandI.isCopy() && CandI.getOperand(0).getReg() == AMDGPU::SCC) { 6265 BuildMI(MBB, MII, DL, get(AMDGPU::COPY), CopySCC) 6266 .addReg(CandI.getOperand(1).getReg()); 6267 CopyFound = true; 6268 } 6269 break; 6270 } 6271 } 6272 if (!CopyFound) { 6273 // SCC def is not a copy 6274 // Insert a trivial select instead of creating a copy, because a copy from 6275 // SCC would semantically mean just copying a single bit, but we may need 6276 // the result to be a vector condition mask that needs preserving. 6277 unsigned Opcode = (ST.getWavefrontSize() == 64) ? AMDGPU::S_CSELECT_B64 6278 : AMDGPU::S_CSELECT_B32; 6279 auto NewSelect = 6280 BuildMI(MBB, MII, DL, get(Opcode), CopySCC).addImm(-1).addImm(0); 6281 NewSelect->getOperand(3).setIsUndef(Cond.isUndef()); 6282 } 6283 } 6284 6285 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6286 6287 auto UpdatedInst = 6288 BuildMI(MBB, MII, DL, get(AMDGPU::V_CNDMASK_B32_e64), ResultReg) 6289 .addImm(0) 6290 .add(Src1) // False 6291 .addImm(0) 6292 .add(Src0) // True 6293 .addReg(IsSCC ? CopySCC : SCCSource); 6294 6295 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6296 legalizeOperands(*UpdatedInst, MDT); 6297 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6298 } 6299 6300 void SIInstrInfo::splitSelect64(SetVectorType &Worklist, MachineInstr &Inst, 6301 MachineDominatorTree *MDT) const { 6302 // Split S_CSELECT_B64 into a pair of S_CSELECT_B32 and lower them 6303 // further. 6304 const DebugLoc &DL = Inst.getDebugLoc(); 6305 MachineBasicBlock::iterator MII = Inst; 6306 MachineBasicBlock &MBB = *Inst.getParent(); 6307 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6308 6309 // Get the original operands. 6310 MachineOperand &Dest = Inst.getOperand(0); 6311 MachineOperand &Src0 = Inst.getOperand(1); 6312 MachineOperand &Src1 = Inst.getOperand(2); 6313 MachineOperand &Cond = Inst.getOperand(3); 6314 6315 Register SCCSource = Cond.getReg(); 6316 bool IsSCC = (SCCSource == AMDGPU::SCC); 6317 6318 // If this is a trivial select where the condition is effectively not SCC 6319 // (SCCSource is a source of copy to SCC), then the select is semantically 6320 // equivalent to copying SCCSource. Hence, there is no need to create 6321 // V_CNDMASK, we can just use that and bail out. 6322 if (!IsSCC && (Src0.isImm() && Src0.getImm() == -1) && 6323 (Src1.isImm() && Src1.getImm() == 0)) { 6324 MRI.replaceRegWith(Dest.getReg(), SCCSource); 6325 return; 6326 } 6327 6328 // Prepare the split destination. 6329 Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 6330 Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6331 Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6332 6333 // Split the source operands. 6334 const TargetRegisterClass *Src0RC = nullptr; 6335 const TargetRegisterClass *Src0SubRC = nullptr; 6336 if (Src0.isReg()) { 6337 Src0RC = MRI.getRegClass(Src0.getReg()); 6338 Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 6339 } 6340 const TargetRegisterClass *Src1RC = nullptr; 6341 const TargetRegisterClass *Src1SubRC = nullptr; 6342 if (Src1.isReg()) { 6343 Src1RC = MRI.getRegClass(Src1.getReg()); 6344 Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 6345 } 6346 // Split lo. 6347 MachineOperand SrcReg0Sub0 = 6348 buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub0, Src0SubRC); 6349 MachineOperand SrcReg1Sub0 = 6350 buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, AMDGPU::sub0, Src1SubRC); 6351 // Split hi. 6352 MachineOperand SrcReg0Sub1 = 6353 buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, AMDGPU::sub1, Src0SubRC); 6354 MachineOperand SrcReg1Sub1 = 6355 buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, AMDGPU::sub1, Src1SubRC); 6356 // Select the lo part. 6357 MachineInstr *LoHalf = 6358 BuildMI(MBB, MII, DL, get(AMDGPU::S_CSELECT_B32), DestSub0) 6359 .add(SrcReg0Sub0) 6360 .add(SrcReg1Sub0); 6361 // Replace the condition operand with the original one. 6362 LoHalf->getOperand(3).setReg(SCCSource); 6363 Worklist.insert(LoHalf); 6364 // Select the hi part. 6365 MachineInstr *HiHalf = 6366 BuildMI(MBB, MII, DL, get(AMDGPU::S_CSELECT_B32), DestSub1) 6367 .add(SrcReg0Sub1) 6368 .add(SrcReg1Sub1); 6369 // Replace the condition operand with the original one. 6370 HiHalf->getOperand(3).setReg(SCCSource); 6371 Worklist.insert(HiHalf); 6372 // Merge them back to the original 64-bit one. 6373 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 6374 .addReg(DestSub0) 6375 .addImm(AMDGPU::sub0) 6376 .addReg(DestSub1) 6377 .addImm(AMDGPU::sub1); 6378 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 6379 6380 // Try to legalize the operands in case we need to swap the order to keep 6381 // it valid. 6382 legalizeOperands(*LoHalf, MDT); 6383 legalizeOperands(*HiHalf, MDT); 6384 6385 // Move all users of this moved value. 6386 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 6387 } 6388 6389 void SIInstrInfo::lowerScalarAbs(SetVectorType &Worklist, 6390 MachineInstr &Inst) const { 6391 MachineBasicBlock &MBB = *Inst.getParent(); 6392 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6393 MachineBasicBlock::iterator MII = Inst; 6394 DebugLoc DL = Inst.getDebugLoc(); 6395 6396 MachineOperand &Dest = Inst.getOperand(0); 6397 MachineOperand &Src = Inst.getOperand(1); 6398 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6399 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6400 6401 unsigned SubOp = ST.hasAddNoCarry() ? 6402 AMDGPU::V_SUB_U32_e32 : AMDGPU::V_SUB_CO_U32_e32; 6403 6404 BuildMI(MBB, MII, DL, get(SubOp), TmpReg) 6405 .addImm(0) 6406 .addReg(Src.getReg()); 6407 6408 BuildMI(MBB, MII, DL, get(AMDGPU::V_MAX_I32_e64), ResultReg) 6409 .addReg(Src.getReg()) 6410 .addReg(TmpReg); 6411 6412 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6413 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6414 } 6415 6416 void SIInstrInfo::lowerScalarXnor(SetVectorType &Worklist, 6417 MachineInstr &Inst) const { 6418 MachineBasicBlock &MBB = *Inst.getParent(); 6419 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6420 MachineBasicBlock::iterator MII = Inst; 6421 const DebugLoc &DL = Inst.getDebugLoc(); 6422 6423 MachineOperand &Dest = Inst.getOperand(0); 6424 MachineOperand &Src0 = Inst.getOperand(1); 6425 MachineOperand &Src1 = Inst.getOperand(2); 6426 6427 if (ST.hasDLInsts()) { 6428 Register NewDest = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6429 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src0, MRI, DL); 6430 legalizeGenericOperand(MBB, MII, &AMDGPU::VGPR_32RegClass, Src1, MRI, DL); 6431 6432 BuildMI(MBB, MII, DL, get(AMDGPU::V_XNOR_B32_e64), NewDest) 6433 .add(Src0) 6434 .add(Src1); 6435 6436 MRI.replaceRegWith(Dest.getReg(), NewDest); 6437 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 6438 } else { 6439 // Using the identity !(x ^ y) == (!x ^ y) == (x ^ !y), we can 6440 // invert either source and then perform the XOR. If either source is a 6441 // scalar register, then we can leave the inversion on the scalar unit to 6442 // acheive a better distrubution of scalar and vector instructions. 6443 bool Src0IsSGPR = Src0.isReg() && 6444 RI.isSGPRClass(MRI.getRegClass(Src0.getReg())); 6445 bool Src1IsSGPR = Src1.isReg() && 6446 RI.isSGPRClass(MRI.getRegClass(Src1.getReg())); 6447 MachineInstr *Xor; 6448 Register Temp = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 6449 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 6450 6451 // Build a pair of scalar instructions and add them to the work list. 6452 // The next iteration over the work list will lower these to the vector 6453 // unit as necessary. 6454 if (Src0IsSGPR) { 6455 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src0); 6456 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 6457 .addReg(Temp) 6458 .add(Src1); 6459 } else if (Src1IsSGPR) { 6460 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Temp).add(Src1); 6461 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), NewDest) 6462 .add(Src0) 6463 .addReg(Temp); 6464 } else { 6465 Xor = BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B32), Temp) 6466 .add(Src0) 6467 .add(Src1); 6468 MachineInstr *Not = 6469 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest).addReg(Temp); 6470 Worklist.insert(Not); 6471 } 6472 6473 MRI.replaceRegWith(Dest.getReg(), NewDest); 6474 6475 Worklist.insert(Xor); 6476 6477 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 6478 } 6479 } 6480 6481 void SIInstrInfo::splitScalarNotBinop(SetVectorType &Worklist, 6482 MachineInstr &Inst, 6483 unsigned Opcode) const { 6484 MachineBasicBlock &MBB = *Inst.getParent(); 6485 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6486 MachineBasicBlock::iterator MII = Inst; 6487 const DebugLoc &DL = Inst.getDebugLoc(); 6488 6489 MachineOperand &Dest = Inst.getOperand(0); 6490 MachineOperand &Src0 = Inst.getOperand(1); 6491 MachineOperand &Src1 = Inst.getOperand(2); 6492 6493 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 6494 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 6495 6496 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), Interm) 6497 .add(Src0) 6498 .add(Src1); 6499 6500 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), NewDest) 6501 .addReg(Interm); 6502 6503 Worklist.insert(&Op); 6504 Worklist.insert(&Not); 6505 6506 MRI.replaceRegWith(Dest.getReg(), NewDest); 6507 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 6508 } 6509 6510 void SIInstrInfo::splitScalarBinOpN2(SetVectorType& Worklist, 6511 MachineInstr &Inst, 6512 unsigned Opcode) const { 6513 MachineBasicBlock &MBB = *Inst.getParent(); 6514 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6515 MachineBasicBlock::iterator MII = Inst; 6516 const DebugLoc &DL = Inst.getDebugLoc(); 6517 6518 MachineOperand &Dest = Inst.getOperand(0); 6519 MachineOperand &Src0 = Inst.getOperand(1); 6520 MachineOperand &Src1 = Inst.getOperand(2); 6521 6522 Register NewDest = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 6523 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 6524 6525 MachineInstr &Not = *BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B32), Interm) 6526 .add(Src1); 6527 6528 MachineInstr &Op = *BuildMI(MBB, MII, DL, get(Opcode), NewDest) 6529 .add(Src0) 6530 .addReg(Interm); 6531 6532 Worklist.insert(&Not); 6533 Worklist.insert(&Op); 6534 6535 MRI.replaceRegWith(Dest.getReg(), NewDest); 6536 addUsersToMoveToVALUWorklist(NewDest, MRI, Worklist); 6537 } 6538 6539 void SIInstrInfo::splitScalar64BitUnaryOp( 6540 SetVectorType &Worklist, MachineInstr &Inst, 6541 unsigned Opcode, bool Swap) const { 6542 MachineBasicBlock &MBB = *Inst.getParent(); 6543 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6544 6545 MachineOperand &Dest = Inst.getOperand(0); 6546 MachineOperand &Src0 = Inst.getOperand(1); 6547 DebugLoc DL = Inst.getDebugLoc(); 6548 6549 MachineBasicBlock::iterator MII = Inst; 6550 6551 const MCInstrDesc &InstDesc = get(Opcode); 6552 const TargetRegisterClass *Src0RC = Src0.isReg() ? 6553 MRI.getRegClass(Src0.getReg()) : 6554 &AMDGPU::SGPR_32RegClass; 6555 6556 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 6557 6558 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6559 AMDGPU::sub0, Src0SubRC); 6560 6561 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 6562 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 6563 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 6564 6565 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 6566 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0).add(SrcReg0Sub0); 6567 6568 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6569 AMDGPU::sub1, Src0SubRC); 6570 6571 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 6572 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1).add(SrcReg0Sub1); 6573 6574 if (Swap) 6575 std::swap(DestSub0, DestSub1); 6576 6577 Register FullDestReg = MRI.createVirtualRegister(NewDestRC); 6578 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 6579 .addReg(DestSub0) 6580 .addImm(AMDGPU::sub0) 6581 .addReg(DestSub1) 6582 .addImm(AMDGPU::sub1); 6583 6584 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 6585 6586 Worklist.insert(&LoHalf); 6587 Worklist.insert(&HiHalf); 6588 6589 // We don't need to legalizeOperands here because for a single operand, src0 6590 // will support any kind of input. 6591 6592 // Move all users of this moved value. 6593 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 6594 } 6595 6596 void SIInstrInfo::splitScalar64BitAddSub(SetVectorType &Worklist, 6597 MachineInstr &Inst, 6598 MachineDominatorTree *MDT) const { 6599 bool IsAdd = (Inst.getOpcode() == AMDGPU::S_ADD_U64_PSEUDO); 6600 6601 MachineBasicBlock &MBB = *Inst.getParent(); 6602 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6603 const auto *CarryRC = RI.getRegClass(AMDGPU::SReg_1_XEXECRegClassID); 6604 6605 Register FullDestReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 6606 Register DestSub0 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6607 Register DestSub1 = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6608 6609 Register CarryReg = MRI.createVirtualRegister(CarryRC); 6610 Register DeadCarryReg = MRI.createVirtualRegister(CarryRC); 6611 6612 MachineOperand &Dest = Inst.getOperand(0); 6613 MachineOperand &Src0 = Inst.getOperand(1); 6614 MachineOperand &Src1 = Inst.getOperand(2); 6615 const DebugLoc &DL = Inst.getDebugLoc(); 6616 MachineBasicBlock::iterator MII = Inst; 6617 6618 const TargetRegisterClass *Src0RC = MRI.getRegClass(Src0.getReg()); 6619 const TargetRegisterClass *Src1RC = MRI.getRegClass(Src1.getReg()); 6620 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 6621 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 6622 6623 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6624 AMDGPU::sub0, Src0SubRC); 6625 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 6626 AMDGPU::sub0, Src1SubRC); 6627 6628 6629 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6630 AMDGPU::sub1, Src0SubRC); 6631 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 6632 AMDGPU::sub1, Src1SubRC); 6633 6634 unsigned LoOpc = IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64; 6635 MachineInstr *LoHalf = 6636 BuildMI(MBB, MII, DL, get(LoOpc), DestSub0) 6637 .addReg(CarryReg, RegState::Define) 6638 .add(SrcReg0Sub0) 6639 .add(SrcReg1Sub0) 6640 .addImm(0); // clamp bit 6641 6642 unsigned HiOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; 6643 MachineInstr *HiHalf = 6644 BuildMI(MBB, MII, DL, get(HiOpc), DestSub1) 6645 .addReg(DeadCarryReg, RegState::Define | RegState::Dead) 6646 .add(SrcReg0Sub1) 6647 .add(SrcReg1Sub1) 6648 .addReg(CarryReg, RegState::Kill) 6649 .addImm(0); // clamp bit 6650 6651 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 6652 .addReg(DestSub0) 6653 .addImm(AMDGPU::sub0) 6654 .addReg(DestSub1) 6655 .addImm(AMDGPU::sub1); 6656 6657 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 6658 6659 // Try to legalize the operands in case we need to swap the order to keep it 6660 // valid. 6661 legalizeOperands(*LoHalf, MDT); 6662 legalizeOperands(*HiHalf, MDT); 6663 6664 // Move all users of this moved vlaue. 6665 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 6666 } 6667 6668 void SIInstrInfo::splitScalar64BitBinaryOp(SetVectorType &Worklist, 6669 MachineInstr &Inst, unsigned Opcode, 6670 MachineDominatorTree *MDT) const { 6671 MachineBasicBlock &MBB = *Inst.getParent(); 6672 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6673 6674 MachineOperand &Dest = Inst.getOperand(0); 6675 MachineOperand &Src0 = Inst.getOperand(1); 6676 MachineOperand &Src1 = Inst.getOperand(2); 6677 DebugLoc DL = Inst.getDebugLoc(); 6678 6679 MachineBasicBlock::iterator MII = Inst; 6680 6681 const MCInstrDesc &InstDesc = get(Opcode); 6682 const TargetRegisterClass *Src0RC = Src0.isReg() ? 6683 MRI.getRegClass(Src0.getReg()) : 6684 &AMDGPU::SGPR_32RegClass; 6685 6686 const TargetRegisterClass *Src0SubRC = RI.getSubRegClass(Src0RC, AMDGPU::sub0); 6687 const TargetRegisterClass *Src1RC = Src1.isReg() ? 6688 MRI.getRegClass(Src1.getReg()) : 6689 &AMDGPU::SGPR_32RegClass; 6690 6691 const TargetRegisterClass *Src1SubRC = RI.getSubRegClass(Src1RC, AMDGPU::sub0); 6692 6693 MachineOperand SrcReg0Sub0 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6694 AMDGPU::sub0, Src0SubRC); 6695 MachineOperand SrcReg1Sub0 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 6696 AMDGPU::sub0, Src1SubRC); 6697 MachineOperand SrcReg0Sub1 = buildExtractSubRegOrImm(MII, MRI, Src0, Src0RC, 6698 AMDGPU::sub1, Src0SubRC); 6699 MachineOperand SrcReg1Sub1 = buildExtractSubRegOrImm(MII, MRI, Src1, Src1RC, 6700 AMDGPU::sub1, Src1SubRC); 6701 6702 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 6703 const TargetRegisterClass *NewDestRC = RI.getEquivalentVGPRClass(DestRC); 6704 const TargetRegisterClass *NewDestSubRC = RI.getSubRegClass(NewDestRC, AMDGPU::sub0); 6705 6706 Register DestSub0 = MRI.createVirtualRegister(NewDestSubRC); 6707 MachineInstr &LoHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub0) 6708 .add(SrcReg0Sub0) 6709 .add(SrcReg1Sub0); 6710 6711 Register DestSub1 = MRI.createVirtualRegister(NewDestSubRC); 6712 MachineInstr &HiHalf = *BuildMI(MBB, MII, DL, InstDesc, DestSub1) 6713 .add(SrcReg0Sub1) 6714 .add(SrcReg1Sub1); 6715 6716 Register FullDestReg = MRI.createVirtualRegister(NewDestRC); 6717 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), FullDestReg) 6718 .addReg(DestSub0) 6719 .addImm(AMDGPU::sub0) 6720 .addReg(DestSub1) 6721 .addImm(AMDGPU::sub1); 6722 6723 MRI.replaceRegWith(Dest.getReg(), FullDestReg); 6724 6725 Worklist.insert(&LoHalf); 6726 Worklist.insert(&HiHalf); 6727 6728 // Move all users of this moved vlaue. 6729 addUsersToMoveToVALUWorklist(FullDestReg, MRI, Worklist); 6730 } 6731 6732 void SIInstrInfo::splitScalar64BitXnor(SetVectorType &Worklist, 6733 MachineInstr &Inst, 6734 MachineDominatorTree *MDT) const { 6735 MachineBasicBlock &MBB = *Inst.getParent(); 6736 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6737 6738 MachineOperand &Dest = Inst.getOperand(0); 6739 MachineOperand &Src0 = Inst.getOperand(1); 6740 MachineOperand &Src1 = Inst.getOperand(2); 6741 const DebugLoc &DL = Inst.getDebugLoc(); 6742 6743 MachineBasicBlock::iterator MII = Inst; 6744 6745 const TargetRegisterClass *DestRC = MRI.getRegClass(Dest.getReg()); 6746 6747 Register Interm = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 6748 6749 MachineOperand* Op0; 6750 MachineOperand* Op1; 6751 6752 if (Src0.isReg() && RI.isSGPRReg(MRI, Src0.getReg())) { 6753 Op0 = &Src0; 6754 Op1 = &Src1; 6755 } else { 6756 Op0 = &Src1; 6757 Op1 = &Src0; 6758 } 6759 6760 BuildMI(MBB, MII, DL, get(AMDGPU::S_NOT_B64), Interm) 6761 .add(*Op0); 6762 6763 Register NewDest = MRI.createVirtualRegister(DestRC); 6764 6765 MachineInstr &Xor = *BuildMI(MBB, MII, DL, get(AMDGPU::S_XOR_B64), NewDest) 6766 .addReg(Interm) 6767 .add(*Op1); 6768 6769 MRI.replaceRegWith(Dest.getReg(), NewDest); 6770 6771 Worklist.insert(&Xor); 6772 } 6773 6774 void SIInstrInfo::splitScalar64BitBCNT( 6775 SetVectorType &Worklist, MachineInstr &Inst) const { 6776 MachineBasicBlock &MBB = *Inst.getParent(); 6777 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6778 6779 MachineBasicBlock::iterator MII = Inst; 6780 const DebugLoc &DL = Inst.getDebugLoc(); 6781 6782 MachineOperand &Dest = Inst.getOperand(0); 6783 MachineOperand &Src = Inst.getOperand(1); 6784 6785 const MCInstrDesc &InstDesc = get(AMDGPU::V_BCNT_U32_B32_e64); 6786 const TargetRegisterClass *SrcRC = Src.isReg() ? 6787 MRI.getRegClass(Src.getReg()) : 6788 &AMDGPU::SGPR_32RegClass; 6789 6790 Register MidReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6791 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6792 6793 const TargetRegisterClass *SrcSubRC = RI.getSubRegClass(SrcRC, AMDGPU::sub0); 6794 6795 MachineOperand SrcRegSub0 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 6796 AMDGPU::sub0, SrcSubRC); 6797 MachineOperand SrcRegSub1 = buildExtractSubRegOrImm(MII, MRI, Src, SrcRC, 6798 AMDGPU::sub1, SrcSubRC); 6799 6800 BuildMI(MBB, MII, DL, InstDesc, MidReg).add(SrcRegSub0).addImm(0); 6801 6802 BuildMI(MBB, MII, DL, InstDesc, ResultReg).add(SrcRegSub1).addReg(MidReg); 6803 6804 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6805 6806 // We don't need to legalize operands here. src0 for etiher instruction can be 6807 // an SGPR, and the second input is unused or determined here. 6808 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6809 } 6810 6811 void SIInstrInfo::splitScalar64BitBFE(SetVectorType &Worklist, 6812 MachineInstr &Inst) const { 6813 MachineBasicBlock &MBB = *Inst.getParent(); 6814 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 6815 MachineBasicBlock::iterator MII = Inst; 6816 const DebugLoc &DL = Inst.getDebugLoc(); 6817 6818 MachineOperand &Dest = Inst.getOperand(0); 6819 uint32_t Imm = Inst.getOperand(2).getImm(); 6820 uint32_t Offset = Imm & 0x3f; // Extract bits [5:0]. 6821 uint32_t BitWidth = (Imm & 0x7f0000) >> 16; // Extract bits [22:16]. 6822 6823 (void) Offset; 6824 6825 // Only sext_inreg cases handled. 6826 assert(Inst.getOpcode() == AMDGPU::S_BFE_I64 && BitWidth <= 32 && 6827 Offset == 0 && "Not implemented"); 6828 6829 if (BitWidth < 32) { 6830 Register MidRegLo = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6831 Register MidRegHi = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6832 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 6833 6834 BuildMI(MBB, MII, DL, get(AMDGPU::V_BFE_I32_e64), MidRegLo) 6835 .addReg(Inst.getOperand(1).getReg(), 0, AMDGPU::sub0) 6836 .addImm(0) 6837 .addImm(BitWidth); 6838 6839 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e32), MidRegHi) 6840 .addImm(31) 6841 .addReg(MidRegLo); 6842 6843 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 6844 .addReg(MidRegLo) 6845 .addImm(AMDGPU::sub0) 6846 .addReg(MidRegHi) 6847 .addImm(AMDGPU::sub1); 6848 6849 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6850 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6851 return; 6852 } 6853 6854 MachineOperand &Src = Inst.getOperand(1); 6855 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6856 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VReg_64RegClass); 6857 6858 BuildMI(MBB, MII, DL, get(AMDGPU::V_ASHRREV_I32_e64), TmpReg) 6859 .addImm(31) 6860 .addReg(Src.getReg(), 0, AMDGPU::sub0); 6861 6862 BuildMI(MBB, MII, DL, get(TargetOpcode::REG_SEQUENCE), ResultReg) 6863 .addReg(Src.getReg(), 0, AMDGPU::sub0) 6864 .addImm(AMDGPU::sub0) 6865 .addReg(TmpReg) 6866 .addImm(AMDGPU::sub1); 6867 6868 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6869 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6870 } 6871 6872 void SIInstrInfo::addUsersToMoveToVALUWorklist( 6873 Register DstReg, 6874 MachineRegisterInfo &MRI, 6875 SetVectorType &Worklist) const { 6876 for (MachineRegisterInfo::use_iterator I = MRI.use_begin(DstReg), 6877 E = MRI.use_end(); I != E;) { 6878 MachineInstr &UseMI = *I->getParent(); 6879 6880 unsigned OpNo = 0; 6881 6882 switch (UseMI.getOpcode()) { 6883 case AMDGPU::COPY: 6884 case AMDGPU::WQM: 6885 case AMDGPU::SOFT_WQM: 6886 case AMDGPU::STRICT_WWM: 6887 case AMDGPU::STRICT_WQM: 6888 case AMDGPU::REG_SEQUENCE: 6889 case AMDGPU::PHI: 6890 case AMDGPU::INSERT_SUBREG: 6891 break; 6892 default: 6893 OpNo = I.getOperandNo(); 6894 break; 6895 } 6896 6897 if (!RI.hasVectorRegisters(getOpRegClass(UseMI, OpNo))) { 6898 Worklist.insert(&UseMI); 6899 6900 do { 6901 ++I; 6902 } while (I != E && I->getParent() == &UseMI); 6903 } else { 6904 ++I; 6905 } 6906 } 6907 } 6908 6909 void SIInstrInfo::movePackToVALU(SetVectorType &Worklist, 6910 MachineRegisterInfo &MRI, 6911 MachineInstr &Inst) const { 6912 Register ResultReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6913 MachineBasicBlock *MBB = Inst.getParent(); 6914 MachineOperand &Src0 = Inst.getOperand(1); 6915 MachineOperand &Src1 = Inst.getOperand(2); 6916 const DebugLoc &DL = Inst.getDebugLoc(); 6917 6918 switch (Inst.getOpcode()) { 6919 case AMDGPU::S_PACK_LL_B32_B16: { 6920 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6921 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6922 6923 // FIXME: Can do a lot better if we know the high bits of src0 or src1 are 6924 // 0. 6925 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 6926 .addImm(0xffff); 6927 6928 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_B32_e64), TmpReg) 6929 .addReg(ImmReg, RegState::Kill) 6930 .add(Src0); 6931 6932 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHL_OR_B32_e64), ResultReg) 6933 .add(Src1) 6934 .addImm(16) 6935 .addReg(TmpReg, RegState::Kill); 6936 break; 6937 } 6938 case AMDGPU::S_PACK_LH_B32_B16: { 6939 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6940 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 6941 .addImm(0xffff); 6942 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_BFI_B32_e64), ResultReg) 6943 .addReg(ImmReg, RegState::Kill) 6944 .add(Src0) 6945 .add(Src1); 6946 break; 6947 } 6948 case AMDGPU::S_PACK_HH_B32_B16: { 6949 Register ImmReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6950 Register TmpReg = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 6951 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_LSHRREV_B32_e64), TmpReg) 6952 .addImm(16) 6953 .add(Src0); 6954 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_MOV_B32_e32), ImmReg) 6955 .addImm(0xffff0000); 6956 BuildMI(*MBB, Inst, DL, get(AMDGPU::V_AND_OR_B32_e64), ResultReg) 6957 .add(Src1) 6958 .addReg(ImmReg, RegState::Kill) 6959 .addReg(TmpReg, RegState::Kill); 6960 break; 6961 } 6962 default: 6963 llvm_unreachable("unhandled s_pack_* instruction"); 6964 } 6965 6966 MachineOperand &Dest = Inst.getOperand(0); 6967 MRI.replaceRegWith(Dest.getReg(), ResultReg); 6968 addUsersToMoveToVALUWorklist(ResultReg, MRI, Worklist); 6969 } 6970 6971 void SIInstrInfo::addSCCDefUsersToVALUWorklist(MachineOperand &Op, 6972 MachineInstr &SCCDefInst, 6973 SetVectorType &Worklist, 6974 Register NewCond) const { 6975 6976 // Ensure that def inst defines SCC, which is still live. 6977 assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isDef() && 6978 !Op.isDead() && Op.getParent() == &SCCDefInst); 6979 SmallVector<MachineInstr *, 4> CopyToDelete; 6980 // This assumes that all the users of SCC are in the same block 6981 // as the SCC def. 6982 for (MachineInstr &MI : // Skip the def inst itself. 6983 make_range(std::next(MachineBasicBlock::iterator(SCCDefInst)), 6984 SCCDefInst.getParent()->end())) { 6985 // Check if SCC is used first. 6986 int SCCIdx = MI.findRegisterUseOperandIdx(AMDGPU::SCC, false, &RI); 6987 if (SCCIdx != -1) { 6988 if (MI.isCopy()) { 6989 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 6990 Register DestReg = MI.getOperand(0).getReg(); 6991 6992 MRI.replaceRegWith(DestReg, NewCond); 6993 CopyToDelete.push_back(&MI); 6994 } else { 6995 6996 if (NewCond.isValid()) 6997 MI.getOperand(SCCIdx).setReg(NewCond); 6998 6999 Worklist.insert(&MI); 7000 } 7001 } 7002 // Exit if we find another SCC def. 7003 if (MI.findRegisterDefOperandIdx(AMDGPU::SCC, false, false, &RI) != -1) 7004 break; 7005 } 7006 for (auto &Copy : CopyToDelete) 7007 Copy->eraseFromParent(); 7008 } 7009 7010 // Instructions that use SCC may be converted to VALU instructions. When that 7011 // happens, the SCC register is changed to VCC_LO. The instruction that defines 7012 // SCC must be changed to an instruction that defines VCC. This function makes 7013 // sure that the instruction that defines SCC is added to the moveToVALU 7014 // worklist. 7015 void SIInstrInfo::addSCCDefsToVALUWorklist(MachineOperand &Op, 7016 SetVectorType &Worklist) const { 7017 assert(Op.isReg() && Op.getReg() == AMDGPU::SCC && Op.isUse()); 7018 7019 MachineInstr *SCCUseInst = Op.getParent(); 7020 // Look for a preceeding instruction that either defines VCC or SCC. If VCC 7021 // then there is nothing to do because the defining instruction has been 7022 // converted to a VALU already. If SCC then that instruction needs to be 7023 // converted to a VALU. 7024 for (MachineInstr &MI : 7025 make_range(std::next(MachineBasicBlock::reverse_iterator(SCCUseInst)), 7026 SCCUseInst->getParent()->rend())) { 7027 if (MI.modifiesRegister(AMDGPU::VCC, &RI)) 7028 break; 7029 if (MI.definesRegister(AMDGPU::SCC, &RI)) { 7030 Worklist.insert(&MI); 7031 break; 7032 } 7033 } 7034 } 7035 7036 const TargetRegisterClass *SIInstrInfo::getDestEquivalentVGPRClass( 7037 const MachineInstr &Inst) const { 7038 const TargetRegisterClass *NewDstRC = getOpRegClass(Inst, 0); 7039 7040 switch (Inst.getOpcode()) { 7041 // For target instructions, getOpRegClass just returns the virtual register 7042 // class associated with the operand, so we need to find an equivalent VGPR 7043 // register class in order to move the instruction to the VALU. 7044 case AMDGPU::COPY: 7045 case AMDGPU::PHI: 7046 case AMDGPU::REG_SEQUENCE: 7047 case AMDGPU::INSERT_SUBREG: 7048 case AMDGPU::WQM: 7049 case AMDGPU::SOFT_WQM: 7050 case AMDGPU::STRICT_WWM: 7051 case AMDGPU::STRICT_WQM: { 7052 const TargetRegisterClass *SrcRC = getOpRegClass(Inst, 1); 7053 if (RI.hasAGPRs(SrcRC)) { 7054 if (RI.hasAGPRs(NewDstRC)) 7055 return nullptr; 7056 7057 switch (Inst.getOpcode()) { 7058 case AMDGPU::PHI: 7059 case AMDGPU::REG_SEQUENCE: 7060 case AMDGPU::INSERT_SUBREG: 7061 NewDstRC = RI.getEquivalentAGPRClass(NewDstRC); 7062 break; 7063 default: 7064 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 7065 } 7066 7067 if (!NewDstRC) 7068 return nullptr; 7069 } else { 7070 if (RI.hasVGPRs(NewDstRC) || NewDstRC == &AMDGPU::VReg_1RegClass) 7071 return nullptr; 7072 7073 NewDstRC = RI.getEquivalentVGPRClass(NewDstRC); 7074 if (!NewDstRC) 7075 return nullptr; 7076 } 7077 7078 return NewDstRC; 7079 } 7080 default: 7081 return NewDstRC; 7082 } 7083 } 7084 7085 // Find the one SGPR operand we are allowed to use. 7086 Register SIInstrInfo::findUsedSGPR(const MachineInstr &MI, 7087 int OpIndices[3]) const { 7088 const MCInstrDesc &Desc = MI.getDesc(); 7089 7090 // Find the one SGPR operand we are allowed to use. 7091 // 7092 // First we need to consider the instruction's operand requirements before 7093 // legalizing. Some operands are required to be SGPRs, such as implicit uses 7094 // of VCC, but we are still bound by the constant bus requirement to only use 7095 // one. 7096 // 7097 // If the operand's class is an SGPR, we can never move it. 7098 7099 Register SGPRReg = findImplicitSGPRRead(MI); 7100 if (SGPRReg != AMDGPU::NoRegister) 7101 return SGPRReg; 7102 7103 Register UsedSGPRs[3] = { AMDGPU::NoRegister }; 7104 const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo(); 7105 7106 for (unsigned i = 0; i < 3; ++i) { 7107 int Idx = OpIndices[i]; 7108 if (Idx == -1) 7109 break; 7110 7111 const MachineOperand &MO = MI.getOperand(Idx); 7112 if (!MO.isReg()) 7113 continue; 7114 7115 // Is this operand statically required to be an SGPR based on the operand 7116 // constraints? 7117 const TargetRegisterClass *OpRC = RI.getRegClass(Desc.OpInfo[Idx].RegClass); 7118 bool IsRequiredSGPR = RI.isSGPRClass(OpRC); 7119 if (IsRequiredSGPR) 7120 return MO.getReg(); 7121 7122 // If this could be a VGPR or an SGPR, Check the dynamic register class. 7123 Register Reg = MO.getReg(); 7124 const TargetRegisterClass *RegRC = MRI.getRegClass(Reg); 7125 if (RI.isSGPRClass(RegRC)) 7126 UsedSGPRs[i] = Reg; 7127 } 7128 7129 // We don't have a required SGPR operand, so we have a bit more freedom in 7130 // selecting operands to move. 7131 7132 // Try to select the most used SGPR. If an SGPR is equal to one of the 7133 // others, we choose that. 7134 // 7135 // e.g. 7136 // V_FMA_F32 v0, s0, s0, s0 -> No moves 7137 // V_FMA_F32 v0, s0, s1, s0 -> Move s1 7138 7139 // TODO: If some of the operands are 64-bit SGPRs and some 32, we should 7140 // prefer those. 7141 7142 if (UsedSGPRs[0] != AMDGPU::NoRegister) { 7143 if (UsedSGPRs[0] == UsedSGPRs[1] || UsedSGPRs[0] == UsedSGPRs[2]) 7144 SGPRReg = UsedSGPRs[0]; 7145 } 7146 7147 if (SGPRReg == AMDGPU::NoRegister && UsedSGPRs[1] != AMDGPU::NoRegister) { 7148 if (UsedSGPRs[1] == UsedSGPRs[2]) 7149 SGPRReg = UsedSGPRs[1]; 7150 } 7151 7152 return SGPRReg; 7153 } 7154 7155 MachineOperand *SIInstrInfo::getNamedOperand(MachineInstr &MI, 7156 unsigned OperandName) const { 7157 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), OperandName); 7158 if (Idx == -1) 7159 return nullptr; 7160 7161 return &MI.getOperand(Idx); 7162 } 7163 7164 uint64_t SIInstrInfo::getDefaultRsrcDataFormat() const { 7165 if (ST.getGeneration() >= AMDGPUSubtarget::GFX10) { 7166 return (AMDGPU::MTBUFFormat::UFMT_32_FLOAT << 44) | 7167 (1ULL << 56) | // RESOURCE_LEVEL = 1 7168 (3ULL << 60); // OOB_SELECT = 3 7169 } 7170 7171 uint64_t RsrcDataFormat = AMDGPU::RSRC_DATA_FORMAT; 7172 if (ST.isAmdHsaOS()) { 7173 // Set ATC = 1. GFX9 doesn't have this bit. 7174 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) 7175 RsrcDataFormat |= (1ULL << 56); 7176 7177 // Set MTYPE = 2 (MTYPE_UC = uncached). GFX9 doesn't have this. 7178 // BTW, it disables TC L2 and therefore decreases performance. 7179 if (ST.getGeneration() == AMDGPUSubtarget::VOLCANIC_ISLANDS) 7180 RsrcDataFormat |= (2ULL << 59); 7181 } 7182 7183 return RsrcDataFormat; 7184 } 7185 7186 uint64_t SIInstrInfo::getScratchRsrcWords23() const { 7187 uint64_t Rsrc23 = getDefaultRsrcDataFormat() | 7188 AMDGPU::RSRC_TID_ENABLE | 7189 0xffffffff; // Size; 7190 7191 // GFX9 doesn't have ELEMENT_SIZE. 7192 if (ST.getGeneration() <= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 7193 uint64_t EltSizeValue = Log2_32(ST.getMaxPrivateElementSize(true)) - 1; 7194 Rsrc23 |= EltSizeValue << AMDGPU::RSRC_ELEMENT_SIZE_SHIFT; 7195 } 7196 7197 // IndexStride = 64 / 32. 7198 uint64_t IndexStride = ST.getWavefrontSize() == 64 ? 3 : 2; 7199 Rsrc23 |= IndexStride << AMDGPU::RSRC_INDEX_STRIDE_SHIFT; 7200 7201 // If TID_ENABLE is set, DATA_FORMAT specifies stride bits [14:17]. 7202 // Clear them unless we want a huge stride. 7203 if (ST.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS && 7204 ST.getGeneration() <= AMDGPUSubtarget::GFX9) 7205 Rsrc23 &= ~AMDGPU::RSRC_DATA_FORMAT; 7206 7207 return Rsrc23; 7208 } 7209 7210 bool SIInstrInfo::isLowLatencyInstruction(const MachineInstr &MI) const { 7211 unsigned Opc = MI.getOpcode(); 7212 7213 return isSMRD(Opc); 7214 } 7215 7216 bool SIInstrInfo::isHighLatencyDef(int Opc) const { 7217 return get(Opc).mayLoad() && 7218 (isMUBUF(Opc) || isMTBUF(Opc) || isMIMG(Opc) || isFLAT(Opc)); 7219 } 7220 7221 unsigned SIInstrInfo::isStackAccess(const MachineInstr &MI, 7222 int &FrameIndex) const { 7223 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::vaddr); 7224 if (!Addr || !Addr->isFI()) 7225 return AMDGPU::NoRegister; 7226 7227 assert(!MI.memoperands_empty() && 7228 (*MI.memoperands_begin())->getAddrSpace() == AMDGPUAS::PRIVATE_ADDRESS); 7229 7230 FrameIndex = Addr->getIndex(); 7231 return getNamedOperand(MI, AMDGPU::OpName::vdata)->getReg(); 7232 } 7233 7234 unsigned SIInstrInfo::isSGPRStackAccess(const MachineInstr &MI, 7235 int &FrameIndex) const { 7236 const MachineOperand *Addr = getNamedOperand(MI, AMDGPU::OpName::addr); 7237 assert(Addr && Addr->isFI()); 7238 FrameIndex = Addr->getIndex(); 7239 return getNamedOperand(MI, AMDGPU::OpName::data)->getReg(); 7240 } 7241 7242 unsigned SIInstrInfo::isLoadFromStackSlot(const MachineInstr &MI, 7243 int &FrameIndex) const { 7244 if (!MI.mayLoad()) 7245 return AMDGPU::NoRegister; 7246 7247 if (isMUBUF(MI) || isVGPRSpill(MI)) 7248 return isStackAccess(MI, FrameIndex); 7249 7250 if (isSGPRSpill(MI)) 7251 return isSGPRStackAccess(MI, FrameIndex); 7252 7253 return AMDGPU::NoRegister; 7254 } 7255 7256 unsigned SIInstrInfo::isStoreToStackSlot(const MachineInstr &MI, 7257 int &FrameIndex) const { 7258 if (!MI.mayStore()) 7259 return AMDGPU::NoRegister; 7260 7261 if (isMUBUF(MI) || isVGPRSpill(MI)) 7262 return isStackAccess(MI, FrameIndex); 7263 7264 if (isSGPRSpill(MI)) 7265 return isSGPRStackAccess(MI, FrameIndex); 7266 7267 return AMDGPU::NoRegister; 7268 } 7269 7270 unsigned SIInstrInfo::getInstBundleSize(const MachineInstr &MI) const { 7271 unsigned Size = 0; 7272 MachineBasicBlock::const_instr_iterator I = MI.getIterator(); 7273 MachineBasicBlock::const_instr_iterator E = MI.getParent()->instr_end(); 7274 while (++I != E && I->isInsideBundle()) { 7275 assert(!I->isBundle() && "No nested bundle!"); 7276 Size += getInstSizeInBytes(*I); 7277 } 7278 7279 return Size; 7280 } 7281 7282 unsigned SIInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const { 7283 unsigned Opc = MI.getOpcode(); 7284 const MCInstrDesc &Desc = getMCOpcodeFromPseudo(Opc); 7285 unsigned DescSize = Desc.getSize(); 7286 7287 // If we have a definitive size, we can use it. Otherwise we need to inspect 7288 // the operands to know the size. 7289 if (isFixedSize(MI)) { 7290 unsigned Size = DescSize; 7291 7292 // If we hit the buggy offset, an extra nop will be inserted in MC so 7293 // estimate the worst case. 7294 if (MI.isBranch() && ST.hasOffset3fBug()) 7295 Size += 4; 7296 7297 return Size; 7298 } 7299 7300 // 4-byte instructions may have a 32-bit literal encoded after them. Check 7301 // operands that coud ever be literals. 7302 if (isVALU(MI) || isSALU(MI)) { 7303 int Src0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0); 7304 if (Src0Idx == -1) 7305 return DescSize; // No operands. 7306 7307 if (isLiteralConstantLike(MI.getOperand(Src0Idx), Desc.OpInfo[Src0Idx])) 7308 return isVOP3(MI) ? 12 : (DescSize + 4); 7309 7310 int Src1Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src1); 7311 if (Src1Idx == -1) 7312 return DescSize; 7313 7314 if (isLiteralConstantLike(MI.getOperand(Src1Idx), Desc.OpInfo[Src1Idx])) 7315 return isVOP3(MI) ? 12 : (DescSize + 4); 7316 7317 int Src2Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2); 7318 if (Src2Idx == -1) 7319 return DescSize; 7320 7321 if (isLiteralConstantLike(MI.getOperand(Src2Idx), Desc.OpInfo[Src2Idx])) 7322 return isVOP3(MI) ? 12 : (DescSize + 4); 7323 7324 return DescSize; 7325 } 7326 7327 // Check whether we have extra NSA words. 7328 if (isMIMG(MI)) { 7329 int VAddr0Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vaddr0); 7330 if (VAddr0Idx < 0) 7331 return 8; 7332 7333 int RSrcIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::srsrc); 7334 return 8 + 4 * ((RSrcIdx - VAddr0Idx + 2) / 4); 7335 } 7336 7337 switch (Opc) { 7338 case TargetOpcode::BUNDLE: 7339 return getInstBundleSize(MI); 7340 case TargetOpcode::INLINEASM: 7341 case TargetOpcode::INLINEASM_BR: { 7342 const MachineFunction *MF = MI.getParent()->getParent(); 7343 const char *AsmStr = MI.getOperand(0).getSymbolName(); 7344 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo(), &ST); 7345 } 7346 default: 7347 if (MI.isMetaInstruction()) 7348 return 0; 7349 return DescSize; 7350 } 7351 } 7352 7353 bool SIInstrInfo::mayAccessFlatAddressSpace(const MachineInstr &MI) const { 7354 if (!isFLAT(MI)) 7355 return false; 7356 7357 if (MI.memoperands_empty()) 7358 return true; 7359 7360 for (const MachineMemOperand *MMO : MI.memoperands()) { 7361 if (MMO->getAddrSpace() == AMDGPUAS::FLAT_ADDRESS) 7362 return true; 7363 } 7364 return false; 7365 } 7366 7367 bool SIInstrInfo::isNonUniformBranchInstr(MachineInstr &Branch) const { 7368 return Branch.getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO; 7369 } 7370 7371 void SIInstrInfo::convertNonUniformIfRegion(MachineBasicBlock *IfEntry, 7372 MachineBasicBlock *IfEnd) const { 7373 MachineBasicBlock::iterator TI = IfEntry->getFirstTerminator(); 7374 assert(TI != IfEntry->end()); 7375 7376 MachineInstr *Branch = &(*TI); 7377 MachineFunction *MF = IfEntry->getParent(); 7378 MachineRegisterInfo &MRI = IfEntry->getParent()->getRegInfo(); 7379 7380 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 7381 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 7382 MachineInstr *SIIF = 7383 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_IF), DstReg) 7384 .add(Branch->getOperand(0)) 7385 .add(Branch->getOperand(1)); 7386 MachineInstr *SIEND = 7387 BuildMI(*MF, Branch->getDebugLoc(), get(AMDGPU::SI_END_CF)) 7388 .addReg(DstReg); 7389 7390 IfEntry->erase(TI); 7391 IfEntry->insert(IfEntry->end(), SIIF); 7392 IfEnd->insert(IfEnd->getFirstNonPHI(), SIEND); 7393 } 7394 } 7395 7396 void SIInstrInfo::convertNonUniformLoopRegion( 7397 MachineBasicBlock *LoopEntry, MachineBasicBlock *LoopEnd) const { 7398 MachineBasicBlock::iterator TI = LoopEnd->getFirstTerminator(); 7399 // We expect 2 terminators, one conditional and one unconditional. 7400 assert(TI != LoopEnd->end()); 7401 7402 MachineInstr *Branch = &(*TI); 7403 MachineFunction *MF = LoopEnd->getParent(); 7404 MachineRegisterInfo &MRI = LoopEnd->getParent()->getRegInfo(); 7405 7406 if (Branch->getOpcode() == AMDGPU::SI_NON_UNIFORM_BRCOND_PSEUDO) { 7407 7408 Register DstReg = MRI.createVirtualRegister(RI.getBoolRC()); 7409 Register BackEdgeReg = MRI.createVirtualRegister(RI.getBoolRC()); 7410 MachineInstrBuilder HeaderPHIBuilder = 7411 BuildMI(*(MF), Branch->getDebugLoc(), get(TargetOpcode::PHI), DstReg); 7412 for (MachineBasicBlock::pred_iterator PI = LoopEntry->pred_begin(), 7413 E = LoopEntry->pred_end(); 7414 PI != E; ++PI) { 7415 if (*PI == LoopEnd) { 7416 HeaderPHIBuilder.addReg(BackEdgeReg); 7417 } else { 7418 MachineBasicBlock *PMBB = *PI; 7419 Register ZeroReg = MRI.createVirtualRegister(RI.getBoolRC()); 7420 materializeImmediate(*PMBB, PMBB->getFirstTerminator(), DebugLoc(), 7421 ZeroReg, 0); 7422 HeaderPHIBuilder.addReg(ZeroReg); 7423 } 7424 HeaderPHIBuilder.addMBB(*PI); 7425 } 7426 MachineInstr *HeaderPhi = HeaderPHIBuilder; 7427 MachineInstr *SIIFBREAK = BuildMI(*(MF), Branch->getDebugLoc(), 7428 get(AMDGPU::SI_IF_BREAK), BackEdgeReg) 7429 .addReg(DstReg) 7430 .add(Branch->getOperand(0)); 7431 MachineInstr *SILOOP = 7432 BuildMI(*(MF), Branch->getDebugLoc(), get(AMDGPU::SI_LOOP)) 7433 .addReg(BackEdgeReg) 7434 .addMBB(LoopEntry); 7435 7436 LoopEntry->insert(LoopEntry->begin(), HeaderPhi); 7437 LoopEnd->erase(TI); 7438 LoopEnd->insert(LoopEnd->end(), SIIFBREAK); 7439 LoopEnd->insert(LoopEnd->end(), SILOOP); 7440 } 7441 } 7442 7443 ArrayRef<std::pair<int, const char *>> 7444 SIInstrInfo::getSerializableTargetIndices() const { 7445 static const std::pair<int, const char *> TargetIndices[] = { 7446 {AMDGPU::TI_CONSTDATA_START, "amdgpu-constdata-start"}, 7447 {AMDGPU::TI_SCRATCH_RSRC_DWORD0, "amdgpu-scratch-rsrc-dword0"}, 7448 {AMDGPU::TI_SCRATCH_RSRC_DWORD1, "amdgpu-scratch-rsrc-dword1"}, 7449 {AMDGPU::TI_SCRATCH_RSRC_DWORD2, "amdgpu-scratch-rsrc-dword2"}, 7450 {AMDGPU::TI_SCRATCH_RSRC_DWORD3, "amdgpu-scratch-rsrc-dword3"}}; 7451 return makeArrayRef(TargetIndices); 7452 } 7453 7454 /// This is used by the post-RA scheduler (SchedulePostRAList.cpp). The 7455 /// post-RA version of misched uses CreateTargetMIHazardRecognizer. 7456 ScheduleHazardRecognizer * 7457 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 7458 const ScheduleDAG *DAG) const { 7459 return new GCNHazardRecognizer(DAG->MF); 7460 } 7461 7462 /// This is the hazard recognizer used at -O0 by the PostRAHazardRecognizer 7463 /// pass. 7464 ScheduleHazardRecognizer * 7465 SIInstrInfo::CreateTargetPostRAHazardRecognizer(const MachineFunction &MF) const { 7466 return new GCNHazardRecognizer(MF); 7467 } 7468 7469 // Called during: 7470 // - pre-RA scheduling and post-RA scheduling 7471 ScheduleHazardRecognizer * 7472 SIInstrInfo::CreateTargetMIHazardRecognizer(const InstrItineraryData *II, 7473 const ScheduleDAGMI *DAG) const { 7474 // Borrowed from Arm Target 7475 // We would like to restrict this hazard recognizer to only 7476 // post-RA scheduling; we can tell that we're post-RA because we don't 7477 // track VRegLiveness. 7478 if (!DAG->hasVRegLiveness()) 7479 return new GCNHazardRecognizer(DAG->MF); 7480 return TargetInstrInfo::CreateTargetMIHazardRecognizer(II, DAG); 7481 } 7482 7483 std::pair<unsigned, unsigned> 7484 SIInstrInfo::decomposeMachineOperandsTargetFlags(unsigned TF) const { 7485 return std::make_pair(TF & MO_MASK, TF & ~MO_MASK); 7486 } 7487 7488 ArrayRef<std::pair<unsigned, const char *>> 7489 SIInstrInfo::getSerializableDirectMachineOperandTargetFlags() const { 7490 static const std::pair<unsigned, const char *> TargetFlags[] = { 7491 { MO_GOTPCREL, "amdgpu-gotprel" }, 7492 { MO_GOTPCREL32_LO, "amdgpu-gotprel32-lo" }, 7493 { MO_GOTPCREL32_HI, "amdgpu-gotprel32-hi" }, 7494 { MO_REL32_LO, "amdgpu-rel32-lo" }, 7495 { MO_REL32_HI, "amdgpu-rel32-hi" }, 7496 { MO_ABS32_LO, "amdgpu-abs32-lo" }, 7497 { MO_ABS32_HI, "amdgpu-abs32-hi" }, 7498 }; 7499 7500 return makeArrayRef(TargetFlags); 7501 } 7502 7503 bool SIInstrInfo::isBasicBlockPrologue(const MachineInstr &MI) const { 7504 return !MI.isTerminator() && MI.getOpcode() != AMDGPU::COPY && 7505 MI.modifiesRegister(AMDGPU::EXEC, &RI); 7506 } 7507 7508 MachineInstrBuilder 7509 SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 7510 MachineBasicBlock::iterator I, 7511 const DebugLoc &DL, 7512 Register DestReg) const { 7513 if (ST.hasAddNoCarry()) 7514 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e64), DestReg); 7515 7516 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo(); 7517 Register UnusedCarry = MRI.createVirtualRegister(RI.getBoolRC()); 7518 MRI.setRegAllocationHint(UnusedCarry, 0, RI.getVCC()); 7519 7520 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_CO_U32_e64), DestReg) 7521 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 7522 } 7523 7524 MachineInstrBuilder SIInstrInfo::getAddNoCarry(MachineBasicBlock &MBB, 7525 MachineBasicBlock::iterator I, 7526 const DebugLoc &DL, 7527 Register DestReg, 7528 RegScavenger &RS) const { 7529 if (ST.hasAddNoCarry()) 7530 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_U32_e32), DestReg); 7531 7532 // If available, prefer to use vcc. 7533 Register UnusedCarry = !RS.isRegUsed(AMDGPU::VCC) 7534 ? Register(RI.getVCC()) 7535 : RS.scavengeRegister(RI.getBoolRC(), I, 0, false); 7536 7537 // TODO: Users need to deal with this. 7538 if (!UnusedCarry.isValid()) 7539 return MachineInstrBuilder(); 7540 7541 return BuildMI(MBB, I, DL, get(AMDGPU::V_ADD_CO_U32_e64), DestReg) 7542 .addReg(UnusedCarry, RegState::Define | RegState::Dead); 7543 } 7544 7545 bool SIInstrInfo::isKillTerminator(unsigned Opcode) { 7546 switch (Opcode) { 7547 case AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR: 7548 case AMDGPU::SI_KILL_I1_TERMINATOR: 7549 return true; 7550 default: 7551 return false; 7552 } 7553 } 7554 7555 const MCInstrDesc &SIInstrInfo::getKillTerminatorFromPseudo(unsigned Opcode) const { 7556 switch (Opcode) { 7557 case AMDGPU::SI_KILL_F32_COND_IMM_PSEUDO: 7558 return get(AMDGPU::SI_KILL_F32_COND_IMM_TERMINATOR); 7559 case AMDGPU::SI_KILL_I1_PSEUDO: 7560 return get(AMDGPU::SI_KILL_I1_TERMINATOR); 7561 default: 7562 llvm_unreachable("invalid opcode, expected SI_KILL_*_PSEUDO"); 7563 } 7564 } 7565 7566 void SIInstrInfo::fixImplicitOperands(MachineInstr &MI) const { 7567 if (!ST.isWave32()) 7568 return; 7569 7570 for (auto &Op : MI.implicit_operands()) { 7571 if (Op.isReg() && Op.getReg() == AMDGPU::VCC) 7572 Op.setReg(AMDGPU::VCC_LO); 7573 } 7574 } 7575 7576 bool SIInstrInfo::isBufferSMRD(const MachineInstr &MI) const { 7577 if (!isSMRD(MI)) 7578 return false; 7579 7580 // Check that it is using a buffer resource. 7581 int Idx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sbase); 7582 if (Idx == -1) // e.g. s_memtime 7583 return false; 7584 7585 const auto RCID = MI.getDesc().OpInfo[Idx].RegClass; 7586 return RI.getRegClass(RCID)->hasSubClassEq(&AMDGPU::SGPR_128RegClass); 7587 } 7588 7589 // Depending on the used address space and instructions, some immediate offsets 7590 // are allowed and some are not. 7591 // In general, flat instruction offsets can only be non-negative, global and 7592 // scratch instruction offsets can also be negative. 7593 // 7594 // There are several bugs related to these offsets: 7595 // On gfx10.1, flat instructions that go into the global address space cannot 7596 // use an offset. 7597 // 7598 // For scratch instructions, the address can be either an SGPR or a VGPR. 7599 // The following offsets can be used, depending on the architecture (x means 7600 // cannot be used): 7601 // +----------------------------+------+------+ 7602 // | Address-Mode | SGPR | VGPR | 7603 // +----------------------------+------+------+ 7604 // | gfx9 | | | 7605 // | negative, 4-aligned offset | x | ok | 7606 // | negative, unaligned offset | x | ok | 7607 // +----------------------------+------+------+ 7608 // | gfx10 | | | 7609 // | negative, 4-aligned offset | ok | ok | 7610 // | negative, unaligned offset | ok | x | 7611 // +----------------------------+------+------+ 7612 // | gfx10.3 | | | 7613 // | negative, 4-aligned offset | ok | ok | 7614 // | negative, unaligned offset | ok | ok | 7615 // +----------------------------+------+------+ 7616 // 7617 // This function ignores the addressing mode, so if an offset cannot be used in 7618 // one addressing mode, it is considered illegal. 7619 bool SIInstrInfo::isLegalFLATOffset(int64_t Offset, unsigned AddrSpace, 7620 uint64_t FlatVariant) const { 7621 // TODO: Should 0 be special cased? 7622 if (!ST.hasFlatInstOffsets()) 7623 return false; 7624 7625 if (ST.hasFlatSegmentOffsetBug() && FlatVariant == SIInstrFlags::FLAT && 7626 (AddrSpace == AMDGPUAS::FLAT_ADDRESS || 7627 AddrSpace == AMDGPUAS::GLOBAL_ADDRESS)) 7628 return false; 7629 7630 bool Signed = FlatVariant != SIInstrFlags::FLAT; 7631 if (ST.hasNegativeScratchOffsetBug() && 7632 FlatVariant == SIInstrFlags::FlatScratch) 7633 Signed = false; 7634 if (ST.hasNegativeUnalignedScratchOffsetBug() && 7635 FlatVariant == SIInstrFlags::FlatScratch && Offset < 0 && 7636 (Offset % 4) != 0) { 7637 return false; 7638 } 7639 7640 unsigned N = AMDGPU::getNumFlatOffsetBits(ST, Signed); 7641 return Signed ? isIntN(N, Offset) : isUIntN(N, Offset); 7642 } 7643 7644 // See comment on SIInstrInfo::isLegalFLATOffset for what is legal and what not. 7645 std::pair<int64_t, int64_t> 7646 SIInstrInfo::splitFlatOffset(int64_t COffsetVal, unsigned AddrSpace, 7647 uint64_t FlatVariant) const { 7648 int64_t RemainderOffset = COffsetVal; 7649 int64_t ImmField = 0; 7650 bool Signed = FlatVariant != SIInstrFlags::FLAT; 7651 if (ST.hasNegativeScratchOffsetBug() && 7652 FlatVariant == SIInstrFlags::FlatScratch) 7653 Signed = false; 7654 7655 const unsigned NumBits = AMDGPU::getNumFlatOffsetBits(ST, Signed); 7656 if (Signed) { 7657 // Use signed division by a power of two to truncate towards 0. 7658 int64_t D = 1LL << (NumBits - 1); 7659 RemainderOffset = (COffsetVal / D) * D; 7660 ImmField = COffsetVal - RemainderOffset; 7661 7662 if (ST.hasNegativeUnalignedScratchOffsetBug() && 7663 FlatVariant == SIInstrFlags::FlatScratch && ImmField < 0 && 7664 (ImmField % 4) != 0) { 7665 // Make ImmField a multiple of 4 7666 RemainderOffset += ImmField % 4; 7667 ImmField -= ImmField % 4; 7668 } 7669 } else if (COffsetVal >= 0) { 7670 ImmField = COffsetVal & maskTrailingOnes<uint64_t>(NumBits); 7671 RemainderOffset = COffsetVal - ImmField; 7672 } 7673 7674 assert(isLegalFLATOffset(ImmField, AddrSpace, FlatVariant)); 7675 assert(RemainderOffset + ImmField == COffsetVal); 7676 return {ImmField, RemainderOffset}; 7677 } 7678 7679 // This must be kept in sync with the SIEncodingFamily class in SIInstrInfo.td 7680 enum SIEncodingFamily { 7681 SI = 0, 7682 VI = 1, 7683 SDWA = 2, 7684 SDWA9 = 3, 7685 GFX80 = 4, 7686 GFX9 = 5, 7687 GFX10 = 6, 7688 SDWA10 = 7, 7689 GFX90A = 8 7690 }; 7691 7692 static SIEncodingFamily subtargetEncodingFamily(const GCNSubtarget &ST) { 7693 switch (ST.getGeneration()) { 7694 default: 7695 break; 7696 case AMDGPUSubtarget::SOUTHERN_ISLANDS: 7697 case AMDGPUSubtarget::SEA_ISLANDS: 7698 return SIEncodingFamily::SI; 7699 case AMDGPUSubtarget::VOLCANIC_ISLANDS: 7700 case AMDGPUSubtarget::GFX9: 7701 return SIEncodingFamily::VI; 7702 case AMDGPUSubtarget::GFX10: 7703 return SIEncodingFamily::GFX10; 7704 } 7705 llvm_unreachable("Unknown subtarget generation!"); 7706 } 7707 7708 bool SIInstrInfo::isAsmOnlyOpcode(int MCOp) const { 7709 switch(MCOp) { 7710 // These opcodes use indirect register addressing so 7711 // they need special handling by codegen (currently missing). 7712 // Therefore it is too risky to allow these opcodes 7713 // to be selected by dpp combiner or sdwa peepholer. 7714 case AMDGPU::V_MOVRELS_B32_dpp_gfx10: 7715 case AMDGPU::V_MOVRELS_B32_sdwa_gfx10: 7716 case AMDGPU::V_MOVRELD_B32_dpp_gfx10: 7717 case AMDGPU::V_MOVRELD_B32_sdwa_gfx10: 7718 case AMDGPU::V_MOVRELSD_B32_dpp_gfx10: 7719 case AMDGPU::V_MOVRELSD_B32_sdwa_gfx10: 7720 case AMDGPU::V_MOVRELSD_2_B32_dpp_gfx10: 7721 case AMDGPU::V_MOVRELSD_2_B32_sdwa_gfx10: 7722 return true; 7723 default: 7724 return false; 7725 } 7726 } 7727 7728 int SIInstrInfo::pseudoToMCOpcode(int Opcode) const { 7729 SIEncodingFamily Gen = subtargetEncodingFamily(ST); 7730 7731 if ((get(Opcode).TSFlags & SIInstrFlags::renamedInGFX9) != 0 && 7732 ST.getGeneration() == AMDGPUSubtarget::GFX9) 7733 Gen = SIEncodingFamily::GFX9; 7734 7735 // Adjust the encoding family to GFX80 for D16 buffer instructions when the 7736 // subtarget has UnpackedD16VMem feature. 7737 // TODO: remove this when we discard GFX80 encoding. 7738 if (ST.hasUnpackedD16VMem() && (get(Opcode).TSFlags & SIInstrFlags::D16Buf)) 7739 Gen = SIEncodingFamily::GFX80; 7740 7741 if (get(Opcode).TSFlags & SIInstrFlags::SDWA) { 7742 switch (ST.getGeneration()) { 7743 default: 7744 Gen = SIEncodingFamily::SDWA; 7745 break; 7746 case AMDGPUSubtarget::GFX9: 7747 Gen = SIEncodingFamily::SDWA9; 7748 break; 7749 case AMDGPUSubtarget::GFX10: 7750 Gen = SIEncodingFamily::SDWA10; 7751 break; 7752 } 7753 } 7754 7755 int MCOp = AMDGPU::getMCOpcode(Opcode, Gen); 7756 7757 // -1 means that Opcode is already a native instruction. 7758 if (MCOp == -1) 7759 return Opcode; 7760 7761 if (ST.hasGFX90AInsts()) { 7762 uint16_t NMCOp = (uint16_t)-1; 7763 NMCOp = AMDGPU::getMCOpcode(Opcode, SIEncodingFamily::GFX90A); 7764 if (NMCOp == (uint16_t)-1) 7765 NMCOp = AMDGPU::getMCOpcode(Opcode, SIEncodingFamily::GFX9); 7766 if (NMCOp != (uint16_t)-1) 7767 MCOp = NMCOp; 7768 } 7769 7770 // (uint16_t)-1 means that Opcode is a pseudo instruction that has 7771 // no encoding in the given subtarget generation. 7772 if (MCOp == (uint16_t)-1) 7773 return -1; 7774 7775 if (isAsmOnlyOpcode(MCOp)) 7776 return -1; 7777 7778 return MCOp; 7779 } 7780 7781 static 7782 TargetInstrInfo::RegSubRegPair getRegOrUndef(const MachineOperand &RegOpnd) { 7783 assert(RegOpnd.isReg()); 7784 return RegOpnd.isUndef() ? TargetInstrInfo::RegSubRegPair() : 7785 getRegSubRegPair(RegOpnd); 7786 } 7787 7788 TargetInstrInfo::RegSubRegPair 7789 llvm::getRegSequenceSubReg(MachineInstr &MI, unsigned SubReg) { 7790 assert(MI.isRegSequence()); 7791 for (unsigned I = 0, E = (MI.getNumOperands() - 1)/ 2; I < E; ++I) 7792 if (MI.getOperand(1 + 2 * I + 1).getImm() == SubReg) { 7793 auto &RegOp = MI.getOperand(1 + 2 * I); 7794 return getRegOrUndef(RegOp); 7795 } 7796 return TargetInstrInfo::RegSubRegPair(); 7797 } 7798 7799 // Try to find the definition of reg:subreg in subreg-manipulation pseudos 7800 // Following a subreg of reg:subreg isn't supported 7801 static bool followSubRegDef(MachineInstr &MI, 7802 TargetInstrInfo::RegSubRegPair &RSR) { 7803 if (!RSR.SubReg) 7804 return false; 7805 switch (MI.getOpcode()) { 7806 default: break; 7807 case AMDGPU::REG_SEQUENCE: 7808 RSR = getRegSequenceSubReg(MI, RSR.SubReg); 7809 return true; 7810 // EXTRACT_SUBREG ins't supported as this would follow a subreg of subreg 7811 case AMDGPU::INSERT_SUBREG: 7812 if (RSR.SubReg == (unsigned)MI.getOperand(3).getImm()) 7813 // inserted the subreg we're looking for 7814 RSR = getRegOrUndef(MI.getOperand(2)); 7815 else { // the subreg in the rest of the reg 7816 auto R1 = getRegOrUndef(MI.getOperand(1)); 7817 if (R1.SubReg) // subreg of subreg isn't supported 7818 return false; 7819 RSR.Reg = R1.Reg; 7820 } 7821 return true; 7822 } 7823 return false; 7824 } 7825 7826 MachineInstr *llvm::getVRegSubRegDef(const TargetInstrInfo::RegSubRegPair &P, 7827 MachineRegisterInfo &MRI) { 7828 assert(MRI.isSSA()); 7829 if (!P.Reg.isVirtual()) 7830 return nullptr; 7831 7832 auto RSR = P; 7833 auto *DefInst = MRI.getVRegDef(RSR.Reg); 7834 while (auto *MI = DefInst) { 7835 DefInst = nullptr; 7836 switch (MI->getOpcode()) { 7837 case AMDGPU::COPY: 7838 case AMDGPU::V_MOV_B32_e32: { 7839 auto &Op1 = MI->getOperand(1); 7840 if (Op1.isReg() && Op1.getReg().isVirtual()) { 7841 if (Op1.isUndef()) 7842 return nullptr; 7843 RSR = getRegSubRegPair(Op1); 7844 DefInst = MRI.getVRegDef(RSR.Reg); 7845 } 7846 break; 7847 } 7848 default: 7849 if (followSubRegDef(*MI, RSR)) { 7850 if (!RSR.Reg) 7851 return nullptr; 7852 DefInst = MRI.getVRegDef(RSR.Reg); 7853 } 7854 } 7855 if (!DefInst) 7856 return MI; 7857 } 7858 return nullptr; 7859 } 7860 7861 bool llvm::execMayBeModifiedBeforeUse(const MachineRegisterInfo &MRI, 7862 Register VReg, 7863 const MachineInstr &DefMI, 7864 const MachineInstr &UseMI) { 7865 assert(MRI.isSSA() && "Must be run on SSA"); 7866 7867 auto *TRI = MRI.getTargetRegisterInfo(); 7868 auto *DefBB = DefMI.getParent(); 7869 7870 // Don't bother searching between blocks, although it is possible this block 7871 // doesn't modify exec. 7872 if (UseMI.getParent() != DefBB) 7873 return true; 7874 7875 const int MaxInstScan = 20; 7876 int NumInst = 0; 7877 7878 // Stop scan at the use. 7879 auto E = UseMI.getIterator(); 7880 for (auto I = std::next(DefMI.getIterator()); I != E; ++I) { 7881 if (I->isDebugInstr()) 7882 continue; 7883 7884 if (++NumInst > MaxInstScan) 7885 return true; 7886 7887 if (I->modifiesRegister(AMDGPU::EXEC, TRI)) 7888 return true; 7889 } 7890 7891 return false; 7892 } 7893 7894 bool llvm::execMayBeModifiedBeforeAnyUse(const MachineRegisterInfo &MRI, 7895 Register VReg, 7896 const MachineInstr &DefMI) { 7897 assert(MRI.isSSA() && "Must be run on SSA"); 7898 7899 auto *TRI = MRI.getTargetRegisterInfo(); 7900 auto *DefBB = DefMI.getParent(); 7901 7902 const int MaxUseScan = 10; 7903 int NumUse = 0; 7904 7905 for (auto &Use : MRI.use_nodbg_operands(VReg)) { 7906 auto &UseInst = *Use.getParent(); 7907 // Don't bother searching between blocks, although it is possible this block 7908 // doesn't modify exec. 7909 if (UseInst.getParent() != DefBB) 7910 return true; 7911 7912 if (++NumUse > MaxUseScan) 7913 return true; 7914 } 7915 7916 if (NumUse == 0) 7917 return false; 7918 7919 const int MaxInstScan = 20; 7920 int NumInst = 0; 7921 7922 // Stop scan when we have seen all the uses. 7923 for (auto I = std::next(DefMI.getIterator()); ; ++I) { 7924 assert(I != DefBB->end()); 7925 7926 if (I->isDebugInstr()) 7927 continue; 7928 7929 if (++NumInst > MaxInstScan) 7930 return true; 7931 7932 for (const MachineOperand &Op : I->operands()) { 7933 // We don't check reg masks here as they're used only on calls: 7934 // 1. EXEC is only considered const within one BB 7935 // 2. Call should be a terminator instruction if present in a BB 7936 7937 if (!Op.isReg()) 7938 continue; 7939 7940 Register Reg = Op.getReg(); 7941 if (Op.isUse()) { 7942 if (Reg == VReg && --NumUse == 0) 7943 return false; 7944 } else if (TRI->regsOverlap(Reg, AMDGPU::EXEC)) 7945 return true; 7946 } 7947 } 7948 } 7949 7950 MachineInstr *SIInstrInfo::createPHIDestinationCopy( 7951 MachineBasicBlock &MBB, MachineBasicBlock::iterator LastPHIIt, 7952 const DebugLoc &DL, Register Src, Register Dst) const { 7953 auto Cur = MBB.begin(); 7954 if (Cur != MBB.end()) 7955 do { 7956 if (!Cur->isPHI() && Cur->readsRegister(Dst)) 7957 return BuildMI(MBB, Cur, DL, get(TargetOpcode::COPY), Dst).addReg(Src); 7958 ++Cur; 7959 } while (Cur != MBB.end() && Cur != LastPHIIt); 7960 7961 return TargetInstrInfo::createPHIDestinationCopy(MBB, LastPHIIt, DL, Src, 7962 Dst); 7963 } 7964 7965 MachineInstr *SIInstrInfo::createPHISourceCopy( 7966 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsPt, 7967 const DebugLoc &DL, Register Src, unsigned SrcSubReg, Register Dst) const { 7968 if (InsPt != MBB.end() && 7969 (InsPt->getOpcode() == AMDGPU::SI_IF || 7970 InsPt->getOpcode() == AMDGPU::SI_ELSE || 7971 InsPt->getOpcode() == AMDGPU::SI_IF_BREAK) && 7972 InsPt->definesRegister(Src)) { 7973 InsPt++; 7974 return BuildMI(MBB, InsPt, DL, 7975 get(ST.isWave32() ? AMDGPU::S_MOV_B32_term 7976 : AMDGPU::S_MOV_B64_term), 7977 Dst) 7978 .addReg(Src, 0, SrcSubReg) 7979 .addReg(AMDGPU::EXEC, RegState::Implicit); 7980 } 7981 return TargetInstrInfo::createPHISourceCopy(MBB, InsPt, DL, Src, SrcSubReg, 7982 Dst); 7983 } 7984 7985 bool llvm::SIInstrInfo::isWave32() const { return ST.isWave32(); } 7986 7987 MachineInstr *SIInstrInfo::foldMemoryOperandImpl( 7988 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops, 7989 MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS, 7990 VirtRegMap *VRM) const { 7991 // This is a bit of a hack (copied from AArch64). Consider this instruction: 7992 // 7993 // %0:sreg_32 = COPY $m0 7994 // 7995 // We explicitly chose SReg_32 for the virtual register so such a copy might 7996 // be eliminated by RegisterCoalescer. However, that may not be possible, and 7997 // %0 may even spill. We can't spill $m0 normally (it would require copying to 7998 // a numbered SGPR anyway), and since it is in the SReg_32 register class, 7999 // TargetInstrInfo::foldMemoryOperand() is going to try. 8000 // A similar issue also exists with spilling and reloading $exec registers. 8001 // 8002 // To prevent that, constrain the %0 register class here. 8003 if (MI.isFullCopy()) { 8004 Register DstReg = MI.getOperand(0).getReg(); 8005 Register SrcReg = MI.getOperand(1).getReg(); 8006 if ((DstReg.isVirtual() || SrcReg.isVirtual()) && 8007 (DstReg.isVirtual() != SrcReg.isVirtual())) { 8008 MachineRegisterInfo &MRI = MF.getRegInfo(); 8009 Register VirtReg = DstReg.isVirtual() ? DstReg : SrcReg; 8010 const TargetRegisterClass *RC = MRI.getRegClass(VirtReg); 8011 if (RC->hasSuperClassEq(&AMDGPU::SReg_32RegClass)) { 8012 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_32_XM0_XEXECRegClass); 8013 return nullptr; 8014 } else if (RC->hasSuperClassEq(&AMDGPU::SReg_64RegClass)) { 8015 MRI.constrainRegClass(VirtReg, &AMDGPU::SReg_64_XEXECRegClass); 8016 return nullptr; 8017 } 8018 } 8019 } 8020 8021 return nullptr; 8022 } 8023 8024 unsigned SIInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 8025 const MachineInstr &MI, 8026 unsigned *PredCost) const { 8027 if (MI.isBundle()) { 8028 MachineBasicBlock::const_instr_iterator I(MI.getIterator()); 8029 MachineBasicBlock::const_instr_iterator E(MI.getParent()->instr_end()); 8030 unsigned Lat = 0, Count = 0; 8031 for (++I; I != E && I->isBundledWithPred(); ++I) { 8032 ++Count; 8033 Lat = std::max(Lat, SchedModel.computeInstrLatency(&*I)); 8034 } 8035 return Lat + Count - 1; 8036 } 8037 8038 return SchedModel.computeInstrLatency(&MI); 8039 } 8040 8041 unsigned SIInstrInfo::getDSShaderTypeValue(const MachineFunction &MF) { 8042 switch (MF.getFunction().getCallingConv()) { 8043 case CallingConv::AMDGPU_PS: 8044 return 1; 8045 case CallingConv::AMDGPU_VS: 8046 return 2; 8047 case CallingConv::AMDGPU_GS: 8048 return 3; 8049 case CallingConv::AMDGPU_HS: 8050 case CallingConv::AMDGPU_LS: 8051 case CallingConv::AMDGPU_ES: 8052 report_fatal_error("ds_ordered_count unsupported for this calling conv"); 8053 case CallingConv::AMDGPU_CS: 8054 case CallingConv::AMDGPU_KERNEL: 8055 case CallingConv::C: 8056 case CallingConv::Fast: 8057 default: 8058 // Assume other calling conventions are various compute callable functions 8059 return 0; 8060 } 8061 } 8062 8063 bool SIInstrInfo::analyzeCompare(const MachineInstr &MI, Register &SrcReg, 8064 Register &SrcReg2, int64_t &CmpMask, 8065 int64_t &CmpValue) const { 8066 if (!MI.getOperand(0).isReg() || MI.getOperand(0).getSubReg()) 8067 return false; 8068 8069 switch (MI.getOpcode()) { 8070 default: 8071 break; 8072 case AMDGPU::S_CMP_EQ_U32: 8073 case AMDGPU::S_CMP_EQ_I32: 8074 case AMDGPU::S_CMP_LG_U32: 8075 case AMDGPU::S_CMP_LG_I32: 8076 case AMDGPU::S_CMP_LT_U32: 8077 case AMDGPU::S_CMP_LT_I32: 8078 case AMDGPU::S_CMP_GT_U32: 8079 case AMDGPU::S_CMP_GT_I32: 8080 case AMDGPU::S_CMP_LE_U32: 8081 case AMDGPU::S_CMP_LE_I32: 8082 case AMDGPU::S_CMP_GE_U32: 8083 case AMDGPU::S_CMP_GE_I32: 8084 case AMDGPU::S_CMP_EQ_U64: 8085 case AMDGPU::S_CMP_LG_U64: 8086 SrcReg = MI.getOperand(0).getReg(); 8087 if (MI.getOperand(1).isReg()) { 8088 if (MI.getOperand(1).getSubReg()) 8089 return false; 8090 SrcReg2 = MI.getOperand(1).getReg(); 8091 CmpValue = 0; 8092 } else if (MI.getOperand(1).isImm()) { 8093 SrcReg2 = Register(); 8094 CmpValue = MI.getOperand(1).getImm(); 8095 } else { 8096 return false; 8097 } 8098 CmpMask = ~0; 8099 return true; 8100 case AMDGPU::S_CMPK_EQ_U32: 8101 case AMDGPU::S_CMPK_EQ_I32: 8102 case AMDGPU::S_CMPK_LG_U32: 8103 case AMDGPU::S_CMPK_LG_I32: 8104 case AMDGPU::S_CMPK_LT_U32: 8105 case AMDGPU::S_CMPK_LT_I32: 8106 case AMDGPU::S_CMPK_GT_U32: 8107 case AMDGPU::S_CMPK_GT_I32: 8108 case AMDGPU::S_CMPK_LE_U32: 8109 case AMDGPU::S_CMPK_LE_I32: 8110 case AMDGPU::S_CMPK_GE_U32: 8111 case AMDGPU::S_CMPK_GE_I32: 8112 SrcReg = MI.getOperand(0).getReg(); 8113 SrcReg2 = Register(); 8114 CmpValue = MI.getOperand(1).getImm(); 8115 CmpMask = ~0; 8116 return true; 8117 } 8118 8119 return false; 8120 } 8121 8122 bool SIInstrInfo::optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, 8123 Register SrcReg2, int64_t CmpMask, 8124 int64_t CmpValue, 8125 const MachineRegisterInfo *MRI) const { 8126 if (!SrcReg || SrcReg.isPhysical()) 8127 return false; 8128 8129 if (SrcReg2 && !getFoldableImm(SrcReg2, *MRI, CmpValue)) 8130 return false; 8131 8132 const auto optimizeCmpAnd = [&CmpInstr, SrcReg, CmpValue, MRI, 8133 this](int64_t ExpectedValue, unsigned SrcSize, 8134 bool IsReversable, bool IsSigned) -> bool { 8135 // s_cmp_eq_u32 (s_and_b32 $src, 1 << n), 1 << n => s_and_b32 $src, 1 << n 8136 // s_cmp_eq_i32 (s_and_b32 $src, 1 << n), 1 << n => s_and_b32 $src, 1 << n 8137 // s_cmp_ge_u32 (s_and_b32 $src, 1 << n), 1 << n => s_and_b32 $src, 1 << n 8138 // s_cmp_ge_i32 (s_and_b32 $src, 1 << n), 1 << n => s_and_b32 $src, 1 << n 8139 // s_cmp_eq_u64 (s_and_b64 $src, 1 << n), 1 << n => s_and_b64 $src, 1 << n 8140 // s_cmp_lg_u32 (s_and_b32 $src, 1 << n), 0 => s_and_b32 $src, 1 << n 8141 // s_cmp_lg_i32 (s_and_b32 $src, 1 << n), 0 => s_and_b32 $src, 1 << n 8142 // s_cmp_gt_u32 (s_and_b32 $src, 1 << n), 0 => s_and_b32 $src, 1 << n 8143 // s_cmp_gt_i32 (s_and_b32 $src, 1 << n), 0 => s_and_b32 $src, 1 << n 8144 // s_cmp_lg_u64 (s_and_b64 $src, 1 << n), 0 => s_and_b64 $src, 1 << n 8145 // 8146 // Signed ge/gt are not used for the sign bit. 8147 // 8148 // If result of the AND is unused except in the compare: 8149 // s_and_b(32|64) $src, 1 << n => s_bitcmp1_b(32|64) $src, n 8150 // 8151 // s_cmp_eq_u32 (s_and_b32 $src, 1 << n), 0 => s_bitcmp0_b32 $src, n 8152 // s_cmp_eq_i32 (s_and_b32 $src, 1 << n), 0 => s_bitcmp0_b32 $src, n 8153 // s_cmp_eq_u64 (s_and_b64 $src, 1 << n), 0 => s_bitcmp0_b64 $src, n 8154 // s_cmp_lg_u32 (s_and_b32 $src, 1 << n), 1 << n => s_bitcmp0_b32 $src, n 8155 // s_cmp_lg_i32 (s_and_b32 $src, 1 << n), 1 << n => s_bitcmp0_b32 $src, n 8156 // s_cmp_lg_u64 (s_and_b64 $src, 1 << n), 1 << n => s_bitcmp0_b64 $src, n 8157 8158 MachineInstr *Def = MRI->getUniqueVRegDef(SrcReg); 8159 if (!Def || Def->getParent() != CmpInstr.getParent()) 8160 return false; 8161 8162 if (Def->getOpcode() != AMDGPU::S_AND_B32 && 8163 Def->getOpcode() != AMDGPU::S_AND_B64) 8164 return false; 8165 8166 int64_t Mask; 8167 const auto isMask = [&Mask, SrcSize](const MachineOperand *MO) -> bool { 8168 if (MO->isImm()) 8169 Mask = MO->getImm(); 8170 else if (!getFoldableImm(MO, Mask)) 8171 return false; 8172 Mask &= maxUIntN(SrcSize); 8173 return isPowerOf2_64(Mask); 8174 }; 8175 8176 MachineOperand *SrcOp = &Def->getOperand(1); 8177 if (isMask(SrcOp)) 8178 SrcOp = &Def->getOperand(2); 8179 else if (isMask(&Def->getOperand(2))) 8180 SrcOp = &Def->getOperand(1); 8181 else 8182 return false; 8183 8184 unsigned BitNo = countTrailingZeros((uint64_t)Mask); 8185 if (IsSigned && BitNo == SrcSize - 1) 8186 return false; 8187 8188 ExpectedValue <<= BitNo; 8189 8190 bool IsReversedCC = false; 8191 if (CmpValue != ExpectedValue) { 8192 if (!IsReversable) 8193 return false; 8194 IsReversedCC = CmpValue == (ExpectedValue ^ Mask); 8195 if (!IsReversedCC) 8196 return false; 8197 } 8198 8199 Register DefReg = Def->getOperand(0).getReg(); 8200 if (IsReversedCC && !MRI->hasOneNonDBGUse(DefReg)) 8201 return false; 8202 8203 for (auto I = std::next(Def->getIterator()), E = CmpInstr.getIterator(); 8204 I != E; ++I) { 8205 if (I->modifiesRegister(AMDGPU::SCC, &RI) || 8206 I->killsRegister(AMDGPU::SCC, &RI)) 8207 return false; 8208 } 8209 8210 MachineOperand *SccDef = Def->findRegisterDefOperand(AMDGPU::SCC); 8211 SccDef->setIsDead(false); 8212 CmpInstr.eraseFromParent(); 8213 8214 if (!MRI->use_nodbg_empty(DefReg)) { 8215 assert(!IsReversedCC); 8216 return true; 8217 } 8218 8219 // Replace AND with unused result with a S_BITCMP. 8220 MachineBasicBlock *MBB = Def->getParent(); 8221 8222 unsigned NewOpc = (SrcSize == 32) ? IsReversedCC ? AMDGPU::S_BITCMP0_B32 8223 : AMDGPU::S_BITCMP1_B32 8224 : IsReversedCC ? AMDGPU::S_BITCMP0_B64 8225 : AMDGPU::S_BITCMP1_B64; 8226 8227 BuildMI(*MBB, Def, Def->getDebugLoc(), get(NewOpc)) 8228 .add(*SrcOp) 8229 .addImm(BitNo); 8230 Def->eraseFromParent(); 8231 8232 return true; 8233 }; 8234 8235 switch (CmpInstr.getOpcode()) { 8236 default: 8237 break; 8238 case AMDGPU::S_CMP_EQ_U32: 8239 case AMDGPU::S_CMP_EQ_I32: 8240 case AMDGPU::S_CMPK_EQ_U32: 8241 case AMDGPU::S_CMPK_EQ_I32: 8242 return optimizeCmpAnd(1, 32, true, false); 8243 case AMDGPU::S_CMP_GE_U32: 8244 case AMDGPU::S_CMPK_GE_U32: 8245 return optimizeCmpAnd(1, 32, false, false); 8246 case AMDGPU::S_CMP_GE_I32: 8247 case AMDGPU::S_CMPK_GE_I32: 8248 return optimizeCmpAnd(1, 32, false, true); 8249 case AMDGPU::S_CMP_EQ_U64: 8250 return optimizeCmpAnd(1, 64, true, false); 8251 case AMDGPU::S_CMP_LG_U32: 8252 case AMDGPU::S_CMP_LG_I32: 8253 case AMDGPU::S_CMPK_LG_U32: 8254 case AMDGPU::S_CMPK_LG_I32: 8255 return optimizeCmpAnd(0, 32, true, false); 8256 case AMDGPU::S_CMP_GT_U32: 8257 case AMDGPU::S_CMPK_GT_U32: 8258 return optimizeCmpAnd(0, 32, false, false); 8259 case AMDGPU::S_CMP_GT_I32: 8260 case AMDGPU::S_CMPK_GT_I32: 8261 return optimizeCmpAnd(0, 32, false, true); 8262 case AMDGPU::S_CMP_LG_U64: 8263 return optimizeCmpAnd(0, 64, true, false); 8264 } 8265 8266 return false; 8267 } 8268