1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the targeting of the InstructionSelector class for 10 /// AMDGPU. 11 /// \todo This should be generated by TableGen. 12 //===----------------------------------------------------------------------===// 13 14 #include "AMDGPUInstructionSelector.h" 15 #include "AMDGPU.h" 16 #include "AMDGPUGlobalISelUtils.h" 17 #include "AMDGPUInstrInfo.h" 18 #include "AMDGPURegisterBankInfo.h" 19 #include "AMDGPUTargetMachine.h" 20 #include "SIMachineFunctionInfo.h" 21 #include "Utils/AMDGPUBaseInfo.h" 22 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" 23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h" 24 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" 25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 26 #include "llvm/CodeGen/MachineFrameInfo.h" 27 #include "llvm/IR/DiagnosticInfo.h" 28 #include "llvm/IR/IntrinsicsAMDGPU.h" 29 30 #define DEBUG_TYPE "amdgpu-isel" 31 32 using namespace llvm; 33 using namespace MIPatternMatch; 34 35 static cl::opt<bool> AllowRiskySelect( 36 "amdgpu-global-isel-risky-select", 37 cl::desc("Allow GlobalISel to select cases that are likely to not work yet"), 38 cl::init(false), 39 cl::ReallyHidden); 40 41 #define GET_GLOBALISEL_IMPL 42 #define AMDGPUSubtarget GCNSubtarget 43 #include "AMDGPUGenGlobalISel.inc" 44 #undef GET_GLOBALISEL_IMPL 45 #undef AMDGPUSubtarget 46 47 AMDGPUInstructionSelector::AMDGPUInstructionSelector( 48 const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI, 49 const AMDGPUTargetMachine &TM) 50 : TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM), 51 STI(STI), 52 EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG), 53 #define GET_GLOBALISEL_PREDICATES_INIT 54 #include "AMDGPUGenGlobalISel.inc" 55 #undef GET_GLOBALISEL_PREDICATES_INIT 56 #define GET_GLOBALISEL_TEMPORARIES_INIT 57 #include "AMDGPUGenGlobalISel.inc" 58 #undef GET_GLOBALISEL_TEMPORARIES_INIT 59 { 60 } 61 62 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; } 63 64 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB, 65 CodeGenCoverage &CoverageInfo, 66 ProfileSummaryInfo *PSI, 67 BlockFrequencyInfo *BFI) { 68 MRI = &MF.getRegInfo(); 69 Subtarget = &MF.getSubtarget<GCNSubtarget>(); 70 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI); 71 } 72 73 bool AMDGPUInstructionSelector::isVCC(Register Reg, 74 const MachineRegisterInfo &MRI) const { 75 // The verifier is oblivious to s1 being a valid value for wavesize registers. 76 if (Reg.isPhysical()) 77 return false; 78 79 auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg); 80 const TargetRegisterClass *RC = 81 RegClassOrBank.dyn_cast<const TargetRegisterClass*>(); 82 if (RC) { 83 const LLT Ty = MRI.getType(Reg); 84 if (!Ty.isValid() || Ty.getSizeInBits() != 1) 85 return false; 86 // G_TRUNC s1 result is never vcc. 87 return MRI.getVRegDef(Reg)->getOpcode() != AMDGPU::G_TRUNC && 88 RC->hasSuperClassEq(TRI.getBoolRC()); 89 } 90 91 const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>(); 92 return RB->getID() == AMDGPU::VCCRegBankID; 93 } 94 95 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI, 96 unsigned NewOpc) const { 97 MI.setDesc(TII.get(NewOpc)); 98 MI.removeOperand(1); // Remove intrinsic ID. 99 MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 100 101 MachineOperand &Dst = MI.getOperand(0); 102 MachineOperand &Src = MI.getOperand(1); 103 104 // TODO: This should be legalized to s32 if needed 105 if (MRI->getType(Dst.getReg()) == LLT::scalar(1)) 106 return false; 107 108 const TargetRegisterClass *DstRC 109 = TRI.getConstrainedRegClassForOperand(Dst, *MRI); 110 const TargetRegisterClass *SrcRC 111 = TRI.getConstrainedRegClassForOperand(Src, *MRI); 112 if (!DstRC || DstRC != SrcRC) 113 return false; 114 115 return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) && 116 RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI); 117 } 118 119 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const { 120 const DebugLoc &DL = I.getDebugLoc(); 121 MachineBasicBlock *BB = I.getParent(); 122 I.setDesc(TII.get(TargetOpcode::COPY)); 123 124 const MachineOperand &Src = I.getOperand(1); 125 MachineOperand &Dst = I.getOperand(0); 126 Register DstReg = Dst.getReg(); 127 Register SrcReg = Src.getReg(); 128 129 if (isVCC(DstReg, *MRI)) { 130 if (SrcReg == AMDGPU::SCC) { 131 const TargetRegisterClass *RC 132 = TRI.getConstrainedRegClassForOperand(Dst, *MRI); 133 if (!RC) 134 return true; 135 return RBI.constrainGenericRegister(DstReg, *RC, *MRI); 136 } 137 138 if (!isVCC(SrcReg, *MRI)) { 139 // TODO: Should probably leave the copy and let copyPhysReg expand it. 140 if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI)) 141 return false; 142 143 const TargetRegisterClass *SrcRC 144 = TRI.getConstrainedRegClassForOperand(Src, *MRI); 145 146 Optional<ValueAndVReg> ConstVal = 147 getIConstantVRegValWithLookThrough(SrcReg, *MRI, true); 148 if (ConstVal) { 149 unsigned MovOpc = 150 STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; 151 BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg) 152 .addImm(ConstVal->Value.getBoolValue() ? -1 : 0); 153 } else { 154 Register MaskedReg = MRI->createVirtualRegister(SrcRC); 155 156 // We can't trust the high bits at this point, so clear them. 157 158 // TODO: Skip masking high bits if def is known boolean. 159 160 unsigned AndOpc = 161 TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32; 162 BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg) 163 .addImm(1) 164 .addReg(SrcReg); 165 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg) 166 .addImm(0) 167 .addReg(MaskedReg); 168 } 169 170 if (!MRI->getRegClassOrNull(SrcReg)) 171 MRI->setRegClass(SrcReg, SrcRC); 172 I.eraseFromParent(); 173 return true; 174 } 175 176 const TargetRegisterClass *RC = 177 TRI.getConstrainedRegClassForOperand(Dst, *MRI); 178 if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI)) 179 return false; 180 181 return true; 182 } 183 184 for (const MachineOperand &MO : I.operands()) { 185 if (MO.getReg().isPhysical()) 186 continue; 187 188 const TargetRegisterClass *RC = 189 TRI.getConstrainedRegClassForOperand(MO, *MRI); 190 if (!RC) 191 continue; 192 RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI); 193 } 194 return true; 195 } 196 197 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const { 198 const Register DefReg = I.getOperand(0).getReg(); 199 const LLT DefTy = MRI->getType(DefReg); 200 if (DefTy == LLT::scalar(1)) { 201 if (!AllowRiskySelect) { 202 LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n"); 203 return false; 204 } 205 206 LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n"); 207 } 208 209 // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy) 210 211 const RegClassOrRegBank &RegClassOrBank = 212 MRI->getRegClassOrRegBank(DefReg); 213 214 const TargetRegisterClass *DefRC 215 = RegClassOrBank.dyn_cast<const TargetRegisterClass *>(); 216 if (!DefRC) { 217 if (!DefTy.isValid()) { 218 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n"); 219 return false; 220 } 221 222 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>(); 223 DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB); 224 if (!DefRC) { 225 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n"); 226 return false; 227 } 228 } 229 230 // TODO: Verify that all registers have the same bank 231 I.setDesc(TII.get(TargetOpcode::PHI)); 232 return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI); 233 } 234 235 MachineOperand 236 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO, 237 const TargetRegisterClass &SubRC, 238 unsigned SubIdx) const { 239 240 MachineInstr *MI = MO.getParent(); 241 MachineBasicBlock *BB = MO.getParent()->getParent(); 242 Register DstReg = MRI->createVirtualRegister(&SubRC); 243 244 if (MO.isReg()) { 245 unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx); 246 Register Reg = MO.getReg(); 247 BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg) 248 .addReg(Reg, 0, ComposedSubIdx); 249 250 return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(), 251 MO.isKill(), MO.isDead(), MO.isUndef(), 252 MO.isEarlyClobber(), 0, MO.isDebug(), 253 MO.isInternalRead()); 254 } 255 256 assert(MO.isImm()); 257 258 APInt Imm(64, MO.getImm()); 259 260 switch (SubIdx) { 261 default: 262 llvm_unreachable("do not know to split immediate with this sub index."); 263 case AMDGPU::sub0: 264 return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue()); 265 case AMDGPU::sub1: 266 return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue()); 267 } 268 } 269 270 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) { 271 switch (Opc) { 272 case AMDGPU::G_AND: 273 return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32; 274 case AMDGPU::G_OR: 275 return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32; 276 case AMDGPU::G_XOR: 277 return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32; 278 default: 279 llvm_unreachable("not a bit op"); 280 } 281 } 282 283 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const { 284 Register DstReg = I.getOperand(0).getReg(); 285 unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI); 286 287 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 288 if (DstRB->getID() != AMDGPU::SGPRRegBankID && 289 DstRB->getID() != AMDGPU::VCCRegBankID) 290 return false; 291 292 bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID && 293 STI.isWave64()); 294 I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64))); 295 296 // Dead implicit-def of scc 297 I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef 298 true, // isImp 299 false, // isKill 300 true)); // isDead 301 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 302 } 303 304 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const { 305 MachineBasicBlock *BB = I.getParent(); 306 MachineFunction *MF = BB->getParent(); 307 Register DstReg = I.getOperand(0).getReg(); 308 const DebugLoc &DL = I.getDebugLoc(); 309 LLT Ty = MRI->getType(DstReg); 310 if (Ty.isVector()) 311 return false; 312 313 unsigned Size = Ty.getSizeInBits(); 314 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 315 const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID; 316 const bool Sub = I.getOpcode() == TargetOpcode::G_SUB; 317 318 if (Size == 32) { 319 if (IsSALU) { 320 const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32; 321 MachineInstr *Add = 322 BuildMI(*BB, &I, DL, TII.get(Opc), DstReg) 323 .add(I.getOperand(1)) 324 .add(I.getOperand(2)); 325 I.eraseFromParent(); 326 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI); 327 } 328 329 if (STI.hasAddNoCarry()) { 330 const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64; 331 I.setDesc(TII.get(Opc)); 332 I.addOperand(*MF, MachineOperand::CreateImm(0)); 333 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 334 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 335 } 336 337 const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64; 338 339 Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass()); 340 MachineInstr *Add 341 = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg) 342 .addDef(UnusedCarry, RegState::Dead) 343 .add(I.getOperand(1)) 344 .add(I.getOperand(2)) 345 .addImm(0); 346 I.eraseFromParent(); 347 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI); 348 } 349 350 assert(!Sub && "illegal sub should not reach here"); 351 352 const TargetRegisterClass &RC 353 = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass; 354 const TargetRegisterClass &HalfRC 355 = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass; 356 357 MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0)); 358 MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0)); 359 MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1)); 360 MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1)); 361 362 Register DstLo = MRI->createVirtualRegister(&HalfRC); 363 Register DstHi = MRI->createVirtualRegister(&HalfRC); 364 365 if (IsSALU) { 366 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo) 367 .add(Lo1) 368 .add(Lo2); 369 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi) 370 .add(Hi1) 371 .add(Hi2); 372 } else { 373 const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass(); 374 Register CarryReg = MRI->createVirtualRegister(CarryRC); 375 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo) 376 .addDef(CarryReg) 377 .add(Lo1) 378 .add(Lo2) 379 .addImm(0); 380 MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi) 381 .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead) 382 .add(Hi1) 383 .add(Hi2) 384 .addReg(CarryReg, RegState::Kill) 385 .addImm(0); 386 387 if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI)) 388 return false; 389 } 390 391 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) 392 .addReg(DstLo) 393 .addImm(AMDGPU::sub0) 394 .addReg(DstHi) 395 .addImm(AMDGPU::sub1); 396 397 398 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI)) 399 return false; 400 401 I.eraseFromParent(); 402 return true; 403 } 404 405 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE( 406 MachineInstr &I) const { 407 MachineBasicBlock *BB = I.getParent(); 408 MachineFunction *MF = BB->getParent(); 409 const DebugLoc &DL = I.getDebugLoc(); 410 Register Dst0Reg = I.getOperand(0).getReg(); 411 Register Dst1Reg = I.getOperand(1).getReg(); 412 const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO || 413 I.getOpcode() == AMDGPU::G_UADDE; 414 const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE || 415 I.getOpcode() == AMDGPU::G_USUBE; 416 417 if (isVCC(Dst1Reg, *MRI)) { 418 unsigned NoCarryOpc = 419 IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64; 420 unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; 421 I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc)); 422 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 423 I.addOperand(*MF, MachineOperand::CreateImm(0)); 424 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 425 } 426 427 Register Src0Reg = I.getOperand(2).getReg(); 428 Register Src1Reg = I.getOperand(3).getReg(); 429 430 if (HasCarryIn) { 431 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC) 432 .addReg(I.getOperand(4).getReg()); 433 } 434 435 unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32; 436 unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32; 437 438 BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg) 439 .add(I.getOperand(2)) 440 .add(I.getOperand(3)); 441 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg) 442 .addReg(AMDGPU::SCC); 443 444 if (!MRI->getRegClassOrNull(Dst1Reg)) 445 MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass); 446 447 if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) || 448 !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) || 449 !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI)) 450 return false; 451 452 if (HasCarryIn && 453 !RBI.constrainGenericRegister(I.getOperand(4).getReg(), 454 AMDGPU::SReg_32RegClass, *MRI)) 455 return false; 456 457 I.eraseFromParent(); 458 return true; 459 } 460 461 bool AMDGPUInstructionSelector::selectG_AMDGPU_MAD_64_32( 462 MachineInstr &I) const { 463 MachineBasicBlock *BB = I.getParent(); 464 MachineFunction *MF = BB->getParent(); 465 const bool IsUnsigned = I.getOpcode() == AMDGPU::G_AMDGPU_MAD_U64_U32; 466 467 unsigned Opc; 468 if (Subtarget->getGeneration() == AMDGPUSubtarget::GFX11) 469 Opc = IsUnsigned ? AMDGPU::V_MAD_U64_U32_gfx11_e64 470 : AMDGPU::V_MAD_I64_I32_gfx11_e64; 471 else 472 Opc = IsUnsigned ? AMDGPU::V_MAD_U64_U32_e64 : AMDGPU::V_MAD_I64_I32_e64; 473 I.setDesc(TII.get(Opc)); 474 I.addOperand(*MF, MachineOperand::CreateImm(0)); 475 I.addImplicitDefUseOperands(*MF); 476 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 477 } 478 479 // TODO: We should probably legalize these to only using 32-bit results. 480 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const { 481 MachineBasicBlock *BB = I.getParent(); 482 Register DstReg = I.getOperand(0).getReg(); 483 Register SrcReg = I.getOperand(1).getReg(); 484 LLT DstTy = MRI->getType(DstReg); 485 LLT SrcTy = MRI->getType(SrcReg); 486 const unsigned SrcSize = SrcTy.getSizeInBits(); 487 unsigned DstSize = DstTy.getSizeInBits(); 488 489 // TODO: Should handle any multiple of 32 offset. 490 unsigned Offset = I.getOperand(2).getImm(); 491 if (Offset % 32 != 0 || DstSize > 128) 492 return false; 493 494 // 16-bit operations really use 32-bit registers. 495 // FIXME: Probably should not allow 16-bit G_EXTRACT results. 496 if (DstSize == 16) 497 DstSize = 32; 498 499 const TargetRegisterClass *DstRC = 500 TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI); 501 if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) 502 return false; 503 504 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI); 505 const TargetRegisterClass *SrcRC = 506 TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank); 507 if (!SrcRC) 508 return false; 509 unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32, 510 DstSize / 32); 511 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg); 512 if (!SrcRC) 513 return false; 514 515 SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I, 516 *SrcRC, I.getOperand(1)); 517 const DebugLoc &DL = I.getDebugLoc(); 518 BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg) 519 .addReg(SrcReg, 0, SubReg); 520 521 I.eraseFromParent(); 522 return true; 523 } 524 525 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const { 526 MachineBasicBlock *BB = MI.getParent(); 527 Register DstReg = MI.getOperand(0).getReg(); 528 LLT DstTy = MRI->getType(DstReg); 529 LLT SrcTy = MRI->getType(MI.getOperand(1).getReg()); 530 531 const unsigned SrcSize = SrcTy.getSizeInBits(); 532 if (SrcSize < 32) 533 return selectImpl(MI, *CoverageInfo); 534 535 const DebugLoc &DL = MI.getDebugLoc(); 536 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); 537 const unsigned DstSize = DstTy.getSizeInBits(); 538 const TargetRegisterClass *DstRC = 539 TRI.getRegClassForSizeOnBank(DstSize, *DstBank); 540 if (!DstRC) 541 return false; 542 543 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8); 544 MachineInstrBuilder MIB = 545 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg); 546 for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) { 547 MachineOperand &Src = MI.getOperand(I + 1); 548 MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef())); 549 MIB.addImm(SubRegs[I]); 550 551 const TargetRegisterClass *SrcRC 552 = TRI.getConstrainedRegClassForOperand(Src, *MRI); 553 if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI)) 554 return false; 555 } 556 557 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) 558 return false; 559 560 MI.eraseFromParent(); 561 return true; 562 } 563 564 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const { 565 MachineBasicBlock *BB = MI.getParent(); 566 const int NumDst = MI.getNumOperands() - 1; 567 568 MachineOperand &Src = MI.getOperand(NumDst); 569 570 Register SrcReg = Src.getReg(); 571 Register DstReg0 = MI.getOperand(0).getReg(); 572 LLT DstTy = MRI->getType(DstReg0); 573 LLT SrcTy = MRI->getType(SrcReg); 574 575 const unsigned DstSize = DstTy.getSizeInBits(); 576 const unsigned SrcSize = SrcTy.getSizeInBits(); 577 const DebugLoc &DL = MI.getDebugLoc(); 578 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI); 579 580 const TargetRegisterClass *SrcRC = 581 TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank); 582 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI)) 583 return false; 584 585 // Note we could have mixed SGPR and VGPR destination banks for an SGPR 586 // source, and this relies on the fact that the same subregister indices are 587 // used for both. 588 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8); 589 for (int I = 0, E = NumDst; I != E; ++I) { 590 MachineOperand &Dst = MI.getOperand(I); 591 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg()) 592 .addReg(SrcReg, 0, SubRegs[I]); 593 594 // Make sure the subregister index is valid for the source register. 595 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]); 596 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI)) 597 return false; 598 599 const TargetRegisterClass *DstRC = 600 TRI.getConstrainedRegClassForOperand(Dst, *MRI); 601 if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI)) 602 return false; 603 } 604 605 MI.eraseFromParent(); 606 return true; 607 } 608 609 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC( 610 MachineInstr &MI) const { 611 if (selectImpl(MI, *CoverageInfo)) 612 return true; 613 614 const LLT S32 = LLT::scalar(32); 615 const LLT V2S16 = LLT::fixed_vector(2, 16); 616 617 Register Dst = MI.getOperand(0).getReg(); 618 if (MRI->getType(Dst) != V2S16) 619 return false; 620 621 const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI); 622 if (DstBank->getID() != AMDGPU::SGPRRegBankID) 623 return false; 624 625 Register Src0 = MI.getOperand(1).getReg(); 626 Register Src1 = MI.getOperand(2).getReg(); 627 if (MRI->getType(Src0) != S32) 628 return false; 629 630 const DebugLoc &DL = MI.getDebugLoc(); 631 MachineBasicBlock *BB = MI.getParent(); 632 633 auto ConstSrc1 = getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true); 634 if (ConstSrc1) { 635 auto ConstSrc0 = 636 getAnyConstantVRegValWithLookThrough(Src0, *MRI, true, true); 637 if (ConstSrc0) { 638 const int64_t K0 = ConstSrc0->Value.getSExtValue(); 639 const int64_t K1 = ConstSrc1->Value.getSExtValue(); 640 uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff; 641 uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff; 642 643 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst) 644 .addImm(Lo16 | (Hi16 << 16)); 645 MI.eraseFromParent(); 646 return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI); 647 } 648 } 649 650 // TODO: This should probably be a combine somewhere 651 // (build_vector_trunc $src0, undef -> copy $src0 652 MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI); 653 if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) { 654 MI.setDesc(TII.get(AMDGPU::COPY)); 655 MI.removeOperand(2); 656 return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) && 657 RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI); 658 } 659 660 Register ShiftSrc0; 661 Register ShiftSrc1; 662 663 // With multiple uses of the shift, this will duplicate the shift and 664 // increase register pressure. 665 // 666 // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16) 667 // => (S_PACK_HH_B32_B16 $src0, $src1) 668 // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16)) 669 // => (S_PACK_LH_B32_B16 $src0, $src1) 670 // (build_vector_trunc $src0, $src1) 671 // => (S_PACK_LL_B32_B16 $src0, $src1) 672 673 bool Shift0 = mi_match( 674 Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16)))); 675 676 bool Shift1 = mi_match( 677 Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16)))); 678 679 unsigned Opc = AMDGPU::S_PACK_LL_B32_B16; 680 if (Shift0 && Shift1) { 681 Opc = AMDGPU::S_PACK_HH_B32_B16; 682 MI.getOperand(1).setReg(ShiftSrc0); 683 MI.getOperand(2).setReg(ShiftSrc1); 684 } else if (Shift1) { 685 Opc = AMDGPU::S_PACK_LH_B32_B16; 686 MI.getOperand(2).setReg(ShiftSrc1); 687 } else if (Shift0 && ConstSrc1 && ConstSrc1->Value == 0) { 688 // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16 689 auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst) 690 .addReg(ShiftSrc0) 691 .addImm(16); 692 693 MI.eraseFromParent(); 694 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 695 } 696 697 MI.setDesc(TII.get(Opc)); 698 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI); 699 } 700 701 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const { 702 return selectG_ADD_SUB(I); 703 } 704 705 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const { 706 const MachineOperand &MO = I.getOperand(0); 707 708 // FIXME: Interface for getConstrainedRegClassForOperand needs work. The 709 // regbank check here is to know why getConstrainedRegClassForOperand failed. 710 const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI); 711 if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) || 712 (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) { 713 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF)); 714 return true; 715 } 716 717 return false; 718 } 719 720 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const { 721 MachineBasicBlock *BB = I.getParent(); 722 723 Register DstReg = I.getOperand(0).getReg(); 724 Register Src0Reg = I.getOperand(1).getReg(); 725 Register Src1Reg = I.getOperand(2).getReg(); 726 LLT Src1Ty = MRI->getType(Src1Reg); 727 728 unsigned DstSize = MRI->getType(DstReg).getSizeInBits(); 729 unsigned InsSize = Src1Ty.getSizeInBits(); 730 731 int64_t Offset = I.getOperand(3).getImm(); 732 733 // FIXME: These cases should have been illegal and unnecessary to check here. 734 if (Offset % 32 != 0 || InsSize % 32 != 0) 735 return false; 736 737 // Currently not handled by getSubRegFromChannel. 738 if (InsSize > 128) 739 return false; 740 741 unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32); 742 if (SubReg == AMDGPU::NoSubRegister) 743 return false; 744 745 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); 746 const TargetRegisterClass *DstRC = 747 TRI.getRegClassForSizeOnBank(DstSize, *DstBank); 748 if (!DstRC) 749 return false; 750 751 const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI); 752 const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI); 753 const TargetRegisterClass *Src0RC = 754 TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank); 755 const TargetRegisterClass *Src1RC = 756 TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank); 757 758 // Deal with weird cases where the class only partially supports the subreg 759 // index. 760 Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg); 761 if (!Src0RC || !Src1RC) 762 return false; 763 764 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) || 765 !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) || 766 !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI)) 767 return false; 768 769 const DebugLoc &DL = I.getDebugLoc(); 770 BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg) 771 .addReg(Src0Reg) 772 .addReg(Src1Reg) 773 .addImm(SubReg); 774 775 I.eraseFromParent(); 776 return true; 777 } 778 779 bool AMDGPUInstructionSelector::selectG_SBFX_UBFX(MachineInstr &MI) const { 780 Register DstReg = MI.getOperand(0).getReg(); 781 Register SrcReg = MI.getOperand(1).getReg(); 782 Register OffsetReg = MI.getOperand(2).getReg(); 783 Register WidthReg = MI.getOperand(3).getReg(); 784 785 assert(RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID && 786 "scalar BFX instructions are expanded in regbankselect"); 787 assert(MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 && 788 "64-bit vector BFX instructions are expanded in regbankselect"); 789 790 const DebugLoc &DL = MI.getDebugLoc(); 791 MachineBasicBlock *MBB = MI.getParent(); 792 793 bool IsSigned = MI.getOpcode() == TargetOpcode::G_SBFX; 794 unsigned Opc = IsSigned ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64; 795 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), DstReg) 796 .addReg(SrcReg) 797 .addReg(OffsetReg) 798 .addReg(WidthReg); 799 MI.eraseFromParent(); 800 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 801 } 802 803 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const { 804 if (STI.getLDSBankCount() != 16) 805 return selectImpl(MI, *CoverageInfo); 806 807 Register Dst = MI.getOperand(0).getReg(); 808 Register Src0 = MI.getOperand(2).getReg(); 809 Register M0Val = MI.getOperand(6).getReg(); 810 if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) || 811 !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) || 812 !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI)) 813 return false; 814 815 // This requires 2 instructions. It is possible to write a pattern to support 816 // this, but the generated isel emitter doesn't correctly deal with multiple 817 // output instructions using the same physical register input. The copy to m0 818 // is incorrectly placed before the second instruction. 819 // 820 // TODO: Match source modifiers. 821 822 Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 823 const DebugLoc &DL = MI.getDebugLoc(); 824 MachineBasicBlock *MBB = MI.getParent(); 825 826 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 827 .addReg(M0Val); 828 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov) 829 .addImm(2) 830 .addImm(MI.getOperand(4).getImm()) // $attr 831 .addImm(MI.getOperand(3).getImm()); // $attrchan 832 833 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst) 834 .addImm(0) // $src0_modifiers 835 .addReg(Src0) // $src0 836 .addImm(MI.getOperand(4).getImm()) // $attr 837 .addImm(MI.getOperand(3).getImm()) // $attrchan 838 .addImm(0) // $src2_modifiers 839 .addReg(InterpMov) // $src2 - 2 f16 values selected by high 840 .addImm(MI.getOperand(5).getImm()) // $high 841 .addImm(0) // $clamp 842 .addImm(0); // $omod 843 844 MI.eraseFromParent(); 845 return true; 846 } 847 848 // Writelane is special in that it can use SGPR and M0 (which would normally 849 // count as using the constant bus twice - but in this case it is allowed since 850 // the lane selector doesn't count as a use of the constant bus). However, it is 851 // still required to abide by the 1 SGPR rule. Fix this up if we might have 852 // multiple SGPRs. 853 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const { 854 // With a constant bus limit of at least 2, there's no issue. 855 if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1) 856 return selectImpl(MI, *CoverageInfo); 857 858 MachineBasicBlock *MBB = MI.getParent(); 859 const DebugLoc &DL = MI.getDebugLoc(); 860 Register VDst = MI.getOperand(0).getReg(); 861 Register Val = MI.getOperand(2).getReg(); 862 Register LaneSelect = MI.getOperand(3).getReg(); 863 Register VDstIn = MI.getOperand(4).getReg(); 864 865 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst); 866 867 Optional<ValueAndVReg> ConstSelect = 868 getIConstantVRegValWithLookThrough(LaneSelect, *MRI); 869 if (ConstSelect) { 870 // The selector has to be an inline immediate, so we can use whatever for 871 // the other operands. 872 MIB.addReg(Val); 873 MIB.addImm(ConstSelect->Value.getSExtValue() & 874 maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2())); 875 } else { 876 Optional<ValueAndVReg> ConstVal = 877 getIConstantVRegValWithLookThrough(Val, *MRI); 878 879 // If the value written is an inline immediate, we can get away without a 880 // copy to m0. 881 if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(), 882 STI.hasInv2PiInlineImm())) { 883 MIB.addImm(ConstVal->Value.getSExtValue()); 884 MIB.addReg(LaneSelect); 885 } else { 886 MIB.addReg(Val); 887 888 // If the lane selector was originally in a VGPR and copied with 889 // readfirstlane, there's a hazard to read the same SGPR from the 890 // VALU. Constrain to a different SGPR to help avoid needing a nop later. 891 RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI); 892 893 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 894 .addReg(LaneSelect); 895 MIB.addReg(AMDGPU::M0); 896 } 897 } 898 899 MIB.addReg(VDstIn); 900 901 MI.eraseFromParent(); 902 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 903 } 904 905 // We need to handle this here because tablegen doesn't support matching 906 // instructions with multiple outputs. 907 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const { 908 Register Dst0 = MI.getOperand(0).getReg(); 909 Register Dst1 = MI.getOperand(1).getReg(); 910 911 LLT Ty = MRI->getType(Dst0); 912 unsigned Opc; 913 if (Ty == LLT::scalar(32)) 914 Opc = AMDGPU::V_DIV_SCALE_F32_e64; 915 else if (Ty == LLT::scalar(64)) 916 Opc = AMDGPU::V_DIV_SCALE_F64_e64; 917 else 918 return false; 919 920 // TODO: Match source modifiers. 921 922 const DebugLoc &DL = MI.getDebugLoc(); 923 MachineBasicBlock *MBB = MI.getParent(); 924 925 Register Numer = MI.getOperand(3).getReg(); 926 Register Denom = MI.getOperand(4).getReg(); 927 unsigned ChooseDenom = MI.getOperand(5).getImm(); 928 929 Register Src0 = ChooseDenom != 0 ? Numer : Denom; 930 931 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0) 932 .addDef(Dst1) 933 .addImm(0) // $src0_modifiers 934 .addUse(Src0) // $src0 935 .addImm(0) // $src1_modifiers 936 .addUse(Denom) // $src1 937 .addImm(0) // $src2_modifiers 938 .addUse(Numer) // $src2 939 .addImm(0) // $clamp 940 .addImm(0); // $omod 941 942 MI.eraseFromParent(); 943 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 944 } 945 946 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const { 947 unsigned IntrinsicID = I.getIntrinsicID(); 948 switch (IntrinsicID) { 949 case Intrinsic::amdgcn_if_break: { 950 MachineBasicBlock *BB = I.getParent(); 951 952 // FIXME: Manually selecting to avoid dealing with the SReg_1 trick 953 // SelectionDAG uses for wave32 vs wave64. 954 BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK)) 955 .add(I.getOperand(0)) 956 .add(I.getOperand(2)) 957 .add(I.getOperand(3)); 958 959 Register DstReg = I.getOperand(0).getReg(); 960 Register Src0Reg = I.getOperand(2).getReg(); 961 Register Src1Reg = I.getOperand(3).getReg(); 962 963 I.eraseFromParent(); 964 965 for (Register Reg : { DstReg, Src0Reg, Src1Reg }) 966 MRI->setRegClass(Reg, TRI.getWaveMaskRegClass()); 967 968 return true; 969 } 970 case Intrinsic::amdgcn_interp_p1_f16: 971 return selectInterpP1F16(I); 972 case Intrinsic::amdgcn_wqm: 973 return constrainCopyLikeIntrin(I, AMDGPU::WQM); 974 case Intrinsic::amdgcn_softwqm: 975 return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM); 976 case Intrinsic::amdgcn_strict_wwm: 977 case Intrinsic::amdgcn_wwm: 978 return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WWM); 979 case Intrinsic::amdgcn_strict_wqm: 980 return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WQM); 981 case Intrinsic::amdgcn_writelane: 982 return selectWritelane(I); 983 case Intrinsic::amdgcn_div_scale: 984 return selectDivScale(I); 985 case Intrinsic::amdgcn_icmp: 986 return selectIntrinsicIcmp(I); 987 case Intrinsic::amdgcn_ballot: 988 return selectBallot(I); 989 case Intrinsic::amdgcn_reloc_constant: 990 return selectRelocConstant(I); 991 case Intrinsic::amdgcn_groupstaticsize: 992 return selectGroupStaticSize(I); 993 case Intrinsic::returnaddress: 994 return selectReturnAddress(I); 995 case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16: 996 case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16: 997 case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16: 998 case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16: 999 case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8: 1000 case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8: 1001 return selectSMFMACIntrin(I); 1002 default: 1003 return selectImpl(I, *CoverageInfo); 1004 } 1005 } 1006 1007 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) { 1008 if (Size != 32 && Size != 64) 1009 return -1; 1010 switch (P) { 1011 default: 1012 llvm_unreachable("Unknown condition code!"); 1013 case CmpInst::ICMP_NE: 1014 return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64; 1015 case CmpInst::ICMP_EQ: 1016 return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64; 1017 case CmpInst::ICMP_SGT: 1018 return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64; 1019 case CmpInst::ICMP_SGE: 1020 return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64; 1021 case CmpInst::ICMP_SLT: 1022 return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64; 1023 case CmpInst::ICMP_SLE: 1024 return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64; 1025 case CmpInst::ICMP_UGT: 1026 return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64; 1027 case CmpInst::ICMP_UGE: 1028 return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64; 1029 case CmpInst::ICMP_ULT: 1030 return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64; 1031 case CmpInst::ICMP_ULE: 1032 return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64; 1033 } 1034 } 1035 1036 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P, 1037 unsigned Size) const { 1038 if (Size == 64) { 1039 if (!STI.hasScalarCompareEq64()) 1040 return -1; 1041 1042 switch (P) { 1043 case CmpInst::ICMP_NE: 1044 return AMDGPU::S_CMP_LG_U64; 1045 case CmpInst::ICMP_EQ: 1046 return AMDGPU::S_CMP_EQ_U64; 1047 default: 1048 return -1; 1049 } 1050 } 1051 1052 if (Size != 32) 1053 return -1; 1054 1055 switch (P) { 1056 case CmpInst::ICMP_NE: 1057 return AMDGPU::S_CMP_LG_U32; 1058 case CmpInst::ICMP_EQ: 1059 return AMDGPU::S_CMP_EQ_U32; 1060 case CmpInst::ICMP_SGT: 1061 return AMDGPU::S_CMP_GT_I32; 1062 case CmpInst::ICMP_SGE: 1063 return AMDGPU::S_CMP_GE_I32; 1064 case CmpInst::ICMP_SLT: 1065 return AMDGPU::S_CMP_LT_I32; 1066 case CmpInst::ICMP_SLE: 1067 return AMDGPU::S_CMP_LE_I32; 1068 case CmpInst::ICMP_UGT: 1069 return AMDGPU::S_CMP_GT_U32; 1070 case CmpInst::ICMP_UGE: 1071 return AMDGPU::S_CMP_GE_U32; 1072 case CmpInst::ICMP_ULT: 1073 return AMDGPU::S_CMP_LT_U32; 1074 case CmpInst::ICMP_ULE: 1075 return AMDGPU::S_CMP_LE_U32; 1076 default: 1077 llvm_unreachable("Unknown condition code!"); 1078 } 1079 } 1080 1081 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const { 1082 MachineBasicBlock *BB = I.getParent(); 1083 const DebugLoc &DL = I.getDebugLoc(); 1084 1085 Register SrcReg = I.getOperand(2).getReg(); 1086 unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI); 1087 1088 auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate(); 1089 1090 Register CCReg = I.getOperand(0).getReg(); 1091 if (!isVCC(CCReg, *MRI)) { 1092 int Opcode = getS_CMPOpcode(Pred, Size); 1093 if (Opcode == -1) 1094 return false; 1095 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode)) 1096 .add(I.getOperand(2)) 1097 .add(I.getOperand(3)); 1098 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg) 1099 .addReg(AMDGPU::SCC); 1100 bool Ret = 1101 constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) && 1102 RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI); 1103 I.eraseFromParent(); 1104 return Ret; 1105 } 1106 1107 int Opcode = getV_CMPOpcode(Pred, Size); 1108 if (Opcode == -1) 1109 return false; 1110 1111 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), 1112 I.getOperand(0).getReg()) 1113 .add(I.getOperand(2)) 1114 .add(I.getOperand(3)); 1115 RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), 1116 *TRI.getBoolRC(), *MRI); 1117 bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI); 1118 I.eraseFromParent(); 1119 return Ret; 1120 } 1121 1122 bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const { 1123 Register Dst = I.getOperand(0).getReg(); 1124 if (isVCC(Dst, *MRI)) 1125 return false; 1126 1127 if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize()) 1128 return false; 1129 1130 MachineBasicBlock *BB = I.getParent(); 1131 const DebugLoc &DL = I.getDebugLoc(); 1132 Register SrcReg = I.getOperand(2).getReg(); 1133 unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI); 1134 1135 auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm()); 1136 if (!ICmpInst::isIntPredicate(static_cast<ICmpInst::Predicate>(Pred))) { 1137 MachineInstr *ICmp = 1138 BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Dst); 1139 1140 if (!RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), 1141 *TRI.getBoolRC(), *MRI)) 1142 return false; 1143 I.eraseFromParent(); 1144 return true; 1145 } 1146 1147 int Opcode = getV_CMPOpcode(Pred, Size); 1148 if (Opcode == -1) 1149 return false; 1150 1151 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst) 1152 .add(I.getOperand(2)) 1153 .add(I.getOperand(3)); 1154 RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(), 1155 *MRI); 1156 bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI); 1157 I.eraseFromParent(); 1158 return Ret; 1159 } 1160 1161 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const { 1162 MachineBasicBlock *BB = I.getParent(); 1163 const DebugLoc &DL = I.getDebugLoc(); 1164 Register DstReg = I.getOperand(0).getReg(); 1165 const unsigned Size = MRI->getType(DstReg).getSizeInBits(); 1166 const bool Is64 = Size == 64; 1167 1168 if (Size != STI.getWavefrontSize()) 1169 return false; 1170 1171 Optional<ValueAndVReg> Arg = 1172 getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI); 1173 1174 if (Arg.hasValue()) { 1175 const int64_t Value = Arg.getValue().Value.getSExtValue(); 1176 if (Value == 0) { 1177 unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; 1178 BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0); 1179 } else if (Value == -1) { // all ones 1180 Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO; 1181 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg); 1182 } else 1183 return false; 1184 } else { 1185 Register SrcReg = I.getOperand(2).getReg(); 1186 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg); 1187 } 1188 1189 I.eraseFromParent(); 1190 return true; 1191 } 1192 1193 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const { 1194 Register DstReg = I.getOperand(0).getReg(); 1195 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); 1196 const TargetRegisterClass *DstRC = TRI.getRegClassForSizeOnBank(32, *DstBank); 1197 if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) 1198 return false; 1199 1200 const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID; 1201 1202 Module *M = MF->getFunction().getParent(); 1203 const MDNode *Metadata = I.getOperand(2).getMetadata(); 1204 auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString(); 1205 auto RelocSymbol = cast<GlobalVariable>( 1206 M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext()))); 1207 1208 MachineBasicBlock *BB = I.getParent(); 1209 BuildMI(*BB, &I, I.getDebugLoc(), 1210 TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg) 1211 .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO); 1212 1213 I.eraseFromParent(); 1214 return true; 1215 } 1216 1217 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const { 1218 Triple::OSType OS = MF->getTarget().getTargetTriple().getOS(); 1219 1220 Register DstReg = I.getOperand(0).getReg(); 1221 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 1222 unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ? 1223 AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 1224 1225 MachineBasicBlock *MBB = I.getParent(); 1226 const DebugLoc &DL = I.getDebugLoc(); 1227 1228 auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg); 1229 1230 if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) { 1231 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1232 MIB.addImm(MFI->getLDSSize()); 1233 } else { 1234 Module *M = MF->getFunction().getParent(); 1235 const GlobalValue *GV 1236 = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize); 1237 MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO); 1238 } 1239 1240 I.eraseFromParent(); 1241 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 1242 } 1243 1244 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const { 1245 MachineBasicBlock *MBB = I.getParent(); 1246 MachineFunction &MF = *MBB->getParent(); 1247 const DebugLoc &DL = I.getDebugLoc(); 1248 1249 MachineOperand &Dst = I.getOperand(0); 1250 Register DstReg = Dst.getReg(); 1251 unsigned Depth = I.getOperand(2).getImm(); 1252 1253 const TargetRegisterClass *RC 1254 = TRI.getConstrainedRegClassForOperand(Dst, *MRI); 1255 if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) || 1256 !RBI.constrainGenericRegister(DstReg, *RC, *MRI)) 1257 return false; 1258 1259 // Check for kernel and shader functions 1260 if (Depth != 0 || 1261 MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) { 1262 BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg) 1263 .addImm(0); 1264 I.eraseFromParent(); 1265 return true; 1266 } 1267 1268 MachineFrameInfo &MFI = MF.getFrameInfo(); 1269 // There is a call to @llvm.returnaddress in this function 1270 MFI.setReturnAddressIsTaken(true); 1271 1272 // Get the return address reg and mark it as an implicit live-in 1273 Register ReturnAddrReg = TRI.getReturnAddressReg(MF); 1274 Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg, 1275 AMDGPU::SReg_64RegClass, DL); 1276 BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg) 1277 .addReg(LiveIn); 1278 I.eraseFromParent(); 1279 return true; 1280 } 1281 1282 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const { 1283 // FIXME: Manually selecting to avoid dealing with the SReg_1 trick 1284 // SelectionDAG uses for wave32 vs wave64. 1285 MachineBasicBlock *BB = MI.getParent(); 1286 BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF)) 1287 .add(MI.getOperand(1)); 1288 1289 Register Reg = MI.getOperand(1).getReg(); 1290 MI.eraseFromParent(); 1291 1292 if (!MRI->getRegClassOrNull(Reg)) 1293 MRI->setRegClass(Reg, TRI.getWaveMaskRegClass()); 1294 return true; 1295 } 1296 1297 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic( 1298 MachineInstr &MI, Intrinsic::ID IntrID) const { 1299 MachineBasicBlock *MBB = MI.getParent(); 1300 MachineFunction *MF = MBB->getParent(); 1301 const DebugLoc &DL = MI.getDebugLoc(); 1302 1303 unsigned IndexOperand = MI.getOperand(7).getImm(); 1304 bool WaveRelease = MI.getOperand(8).getImm() != 0; 1305 bool WaveDone = MI.getOperand(9).getImm() != 0; 1306 1307 if (WaveDone && !WaveRelease) 1308 report_fatal_error("ds_ordered_count: wave_done requires wave_release"); 1309 1310 unsigned OrderedCountIndex = IndexOperand & 0x3f; 1311 IndexOperand &= ~0x3f; 1312 unsigned CountDw = 0; 1313 1314 if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) { 1315 CountDw = (IndexOperand >> 24) & 0xf; 1316 IndexOperand &= ~(0xf << 24); 1317 1318 if (CountDw < 1 || CountDw > 4) { 1319 report_fatal_error( 1320 "ds_ordered_count: dword count must be between 1 and 4"); 1321 } 1322 } 1323 1324 if (IndexOperand) 1325 report_fatal_error("ds_ordered_count: bad index operand"); 1326 1327 unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1; 1328 unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF); 1329 1330 unsigned Offset0 = OrderedCountIndex << 2; 1331 unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) | 1332 (Instruction << 4); 1333 1334 if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) 1335 Offset1 |= (CountDw - 1) << 6; 1336 1337 unsigned Offset = Offset0 | (Offset1 << 8); 1338 1339 Register M0Val = MI.getOperand(2).getReg(); 1340 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 1341 .addReg(M0Val); 1342 1343 Register DstReg = MI.getOperand(0).getReg(); 1344 Register ValReg = MI.getOperand(3).getReg(); 1345 MachineInstrBuilder DS = 1346 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg) 1347 .addReg(ValReg) 1348 .addImm(Offset) 1349 .cloneMemRefs(MI); 1350 1351 if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI)) 1352 return false; 1353 1354 bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI); 1355 MI.eraseFromParent(); 1356 return Ret; 1357 } 1358 1359 static unsigned gwsIntrinToOpcode(unsigned IntrID) { 1360 switch (IntrID) { 1361 case Intrinsic::amdgcn_ds_gws_init: 1362 return AMDGPU::DS_GWS_INIT; 1363 case Intrinsic::amdgcn_ds_gws_barrier: 1364 return AMDGPU::DS_GWS_BARRIER; 1365 case Intrinsic::amdgcn_ds_gws_sema_v: 1366 return AMDGPU::DS_GWS_SEMA_V; 1367 case Intrinsic::amdgcn_ds_gws_sema_br: 1368 return AMDGPU::DS_GWS_SEMA_BR; 1369 case Intrinsic::amdgcn_ds_gws_sema_p: 1370 return AMDGPU::DS_GWS_SEMA_P; 1371 case Intrinsic::amdgcn_ds_gws_sema_release_all: 1372 return AMDGPU::DS_GWS_SEMA_RELEASE_ALL; 1373 default: 1374 llvm_unreachable("not a gws intrinsic"); 1375 } 1376 } 1377 1378 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI, 1379 Intrinsic::ID IID) const { 1380 if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all && 1381 !STI.hasGWSSemaReleaseAll()) 1382 return false; 1383 1384 // intrinsic ID, vsrc, offset 1385 const bool HasVSrc = MI.getNumOperands() == 3; 1386 assert(HasVSrc || MI.getNumOperands() == 2); 1387 1388 Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg(); 1389 const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI); 1390 if (OffsetRB->getID() != AMDGPU::SGPRRegBankID) 1391 return false; 1392 1393 MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI); 1394 assert(OffsetDef); 1395 1396 unsigned ImmOffset; 1397 1398 MachineBasicBlock *MBB = MI.getParent(); 1399 const DebugLoc &DL = MI.getDebugLoc(); 1400 1401 MachineInstr *Readfirstlane = nullptr; 1402 1403 // If we legalized the VGPR input, strip out the readfirstlane to analyze the 1404 // incoming offset, in case there's an add of a constant. We'll have to put it 1405 // back later. 1406 if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) { 1407 Readfirstlane = OffsetDef; 1408 BaseOffset = OffsetDef->getOperand(1).getReg(); 1409 OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI); 1410 } 1411 1412 if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) { 1413 // If we have a constant offset, try to use the 0 in m0 as the base. 1414 // TODO: Look into changing the default m0 initialization value. If the 1415 // default -1 only set the low 16-bits, we could leave it as-is and add 1 to 1416 // the immediate offset. 1417 1418 ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue(); 1419 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0) 1420 .addImm(0); 1421 } else { 1422 std::tie(BaseOffset, ImmOffset) = 1423 AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset); 1424 1425 if (Readfirstlane) { 1426 // We have the constant offset now, so put the readfirstlane back on the 1427 // variable component. 1428 if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI)) 1429 return false; 1430 1431 Readfirstlane->getOperand(1).setReg(BaseOffset); 1432 BaseOffset = Readfirstlane->getOperand(0).getReg(); 1433 } else { 1434 if (!RBI.constrainGenericRegister(BaseOffset, 1435 AMDGPU::SReg_32RegClass, *MRI)) 1436 return false; 1437 } 1438 1439 Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 1440 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base) 1441 .addReg(BaseOffset) 1442 .addImm(16); 1443 1444 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 1445 .addReg(M0Base); 1446 } 1447 1448 // The resource id offset is computed as (<isa opaque base> + M0[21:16] + 1449 // offset field) % 64. Some versions of the programming guide omit the m0 1450 // part, or claim it's from offset 0. 1451 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID))); 1452 1453 if (HasVSrc) { 1454 Register VSrc = MI.getOperand(1).getReg(); 1455 MIB.addReg(VSrc); 1456 1457 if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI)) 1458 return false; 1459 } 1460 1461 MIB.addImm(ImmOffset) 1462 .cloneMemRefs(MI); 1463 1464 TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::data0); 1465 1466 MI.eraseFromParent(); 1467 return true; 1468 } 1469 1470 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI, 1471 bool IsAppend) const { 1472 Register PtrBase = MI.getOperand(2).getReg(); 1473 LLT PtrTy = MRI->getType(PtrBase); 1474 bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS; 1475 1476 unsigned Offset; 1477 std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2)); 1478 1479 // TODO: Should this try to look through readfirstlane like GWS? 1480 if (!isDSOffsetLegal(PtrBase, Offset)) { 1481 PtrBase = MI.getOperand(2).getReg(); 1482 Offset = 0; 1483 } 1484 1485 MachineBasicBlock *MBB = MI.getParent(); 1486 const DebugLoc &DL = MI.getDebugLoc(); 1487 const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME; 1488 1489 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 1490 .addReg(PtrBase); 1491 if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI)) 1492 return false; 1493 1494 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg()) 1495 .addImm(Offset) 1496 .addImm(IsGDS ? -1 : 0) 1497 .cloneMemRefs(MI); 1498 MI.eraseFromParent(); 1499 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 1500 } 1501 1502 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const { 1503 if (TM.getOptLevel() > CodeGenOpt::None) { 1504 unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second; 1505 if (WGSize <= STI.getWavefrontSize()) { 1506 MachineBasicBlock *MBB = MI.getParent(); 1507 const DebugLoc &DL = MI.getDebugLoc(); 1508 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER)); 1509 MI.eraseFromParent(); 1510 return true; 1511 } 1512 } 1513 return selectImpl(MI, *CoverageInfo); 1514 } 1515 1516 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE, 1517 bool &IsTexFail) { 1518 if (TexFailCtrl) 1519 IsTexFail = true; 1520 1521 TFE = (TexFailCtrl & 0x1) ? true : false; 1522 TexFailCtrl &= ~(uint64_t)0x1; 1523 LWE = (TexFailCtrl & 0x2) ? true : false; 1524 TexFailCtrl &= ~(uint64_t)0x2; 1525 1526 return TexFailCtrl == 0; 1527 } 1528 1529 bool AMDGPUInstructionSelector::selectImageIntrinsic( 1530 MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const { 1531 MachineBasicBlock *MBB = MI.getParent(); 1532 const DebugLoc &DL = MI.getDebugLoc(); 1533 1534 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = 1535 AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode); 1536 1537 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim); 1538 unsigned IntrOpcode = Intr->BaseOpcode; 1539 const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI); 1540 const bool IsGFX11Plus = AMDGPU::isGFX11Plus(STI); 1541 1542 const unsigned ArgOffset = MI.getNumExplicitDefs() + 1; 1543 1544 Register VDataIn, VDataOut; 1545 LLT VDataTy; 1546 int NumVDataDwords = -1; 1547 bool IsD16 = MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16 || 1548 MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16; 1549 1550 bool Unorm; 1551 if (!BaseOpcode->Sampler) 1552 Unorm = true; 1553 else 1554 Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0; 1555 1556 bool TFE; 1557 bool LWE; 1558 bool IsTexFail = false; 1559 if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(), 1560 TFE, LWE, IsTexFail)) 1561 return false; 1562 1563 const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm(); 1564 const bool IsA16 = (Flags & 1) != 0; 1565 const bool IsG16 = (Flags & 2) != 0; 1566 1567 // A16 implies 16 bit gradients if subtarget doesn't support G16 1568 if (IsA16 && !STI.hasG16() && !IsG16) 1569 return false; 1570 1571 unsigned DMask = 0; 1572 unsigned DMaskLanes = 0; 1573 1574 if (BaseOpcode->Atomic) { 1575 VDataOut = MI.getOperand(0).getReg(); 1576 VDataIn = MI.getOperand(2).getReg(); 1577 LLT Ty = MRI->getType(VDataIn); 1578 1579 // Be careful to allow atomic swap on 16-bit element vectors. 1580 const bool Is64Bit = BaseOpcode->AtomicX2 ? 1581 Ty.getSizeInBits() == 128 : 1582 Ty.getSizeInBits() == 64; 1583 1584 if (BaseOpcode->AtomicX2) { 1585 assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister); 1586 1587 DMask = Is64Bit ? 0xf : 0x3; 1588 NumVDataDwords = Is64Bit ? 4 : 2; 1589 } else { 1590 DMask = Is64Bit ? 0x3 : 0x1; 1591 NumVDataDwords = Is64Bit ? 2 : 1; 1592 } 1593 } else { 1594 DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm(); 1595 DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask); 1596 1597 if (BaseOpcode->Store) { 1598 VDataIn = MI.getOperand(1).getReg(); 1599 VDataTy = MRI->getType(VDataIn); 1600 NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32; 1601 } else { 1602 VDataOut = MI.getOperand(0).getReg(); 1603 VDataTy = MRI->getType(VDataOut); 1604 NumVDataDwords = DMaskLanes; 1605 1606 if (IsD16 && !STI.hasUnpackedD16VMem()) 1607 NumVDataDwords = (DMaskLanes + 1) / 2; 1608 } 1609 } 1610 1611 // Set G16 opcode 1612 if (IsG16 && !IsA16) { 1613 const AMDGPU::MIMGG16MappingInfo *G16MappingInfo = 1614 AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode); 1615 assert(G16MappingInfo); 1616 IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16 1617 } 1618 1619 // TODO: Check this in verifier. 1620 assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this"); 1621 1622 unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm(); 1623 if (BaseOpcode->Atomic) 1624 CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization 1625 if (CPol & ~AMDGPU::CPol::ALL) 1626 return false; 1627 1628 int NumVAddrRegs = 0; 1629 int NumVAddrDwords = 0; 1630 for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) { 1631 // Skip the $noregs and 0s inserted during legalization. 1632 MachineOperand &AddrOp = MI.getOperand(ArgOffset + I); 1633 if (!AddrOp.isReg()) 1634 continue; // XXX - Break? 1635 1636 Register Addr = AddrOp.getReg(); 1637 if (!Addr) 1638 break; 1639 1640 ++NumVAddrRegs; 1641 NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32; 1642 } 1643 1644 // The legalizer preprocessed the intrinsic arguments. If we aren't using 1645 // NSA, these should have been packed into a single value in the first 1646 // address register 1647 const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs; 1648 if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) { 1649 LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n"); 1650 return false; 1651 } 1652 1653 if (IsTexFail) 1654 ++NumVDataDwords; 1655 1656 int Opcode = -1; 1657 if (IsGFX11Plus) { 1658 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, 1659 UseNSA ? AMDGPU::MIMGEncGfx11NSA 1660 : AMDGPU::MIMGEncGfx11Default, 1661 NumVDataDwords, NumVAddrDwords); 1662 } else if (IsGFX10Plus) { 1663 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, 1664 UseNSA ? AMDGPU::MIMGEncGfx10NSA 1665 : AMDGPU::MIMGEncGfx10Default, 1666 NumVDataDwords, NumVAddrDwords); 1667 } else { 1668 if (Subtarget->hasGFX90AInsts()) { 1669 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx90a, 1670 NumVDataDwords, NumVAddrDwords); 1671 if (Opcode == -1) { 1672 LLVM_DEBUG( 1673 dbgs() 1674 << "requested image instruction is not supported on this GPU\n"); 1675 return false; 1676 } 1677 } 1678 if (Opcode == -1 && 1679 STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 1680 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8, 1681 NumVDataDwords, NumVAddrDwords); 1682 if (Opcode == -1) 1683 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6, 1684 NumVDataDwords, NumVAddrDwords); 1685 } 1686 assert(Opcode != -1); 1687 1688 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode)) 1689 .cloneMemRefs(MI); 1690 1691 if (VDataOut) { 1692 if (BaseOpcode->AtomicX2) { 1693 const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64; 1694 1695 Register TmpReg = MRI->createVirtualRegister( 1696 Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass); 1697 unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0; 1698 1699 MIB.addDef(TmpReg); 1700 if (!MRI->use_empty(VDataOut)) { 1701 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut) 1702 .addReg(TmpReg, RegState::Kill, SubReg); 1703 } 1704 1705 } else { 1706 MIB.addDef(VDataOut); // vdata output 1707 } 1708 } 1709 1710 if (VDataIn) 1711 MIB.addReg(VDataIn); // vdata input 1712 1713 for (int I = 0; I != NumVAddrRegs; ++I) { 1714 MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I); 1715 if (SrcOp.isReg()) { 1716 assert(SrcOp.getReg() != 0); 1717 MIB.addReg(SrcOp.getReg()); 1718 } 1719 } 1720 1721 MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg()); 1722 if (BaseOpcode->Sampler) 1723 MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg()); 1724 1725 MIB.addImm(DMask); // dmask 1726 1727 if (IsGFX10Plus) 1728 MIB.addImm(DimInfo->Encoding); 1729 MIB.addImm(Unorm); 1730 1731 MIB.addImm(CPol); 1732 MIB.addImm(IsA16 && // a16 or r128 1733 STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0); 1734 if (IsGFX10Plus) 1735 MIB.addImm(IsA16 ? -1 : 0); 1736 1737 if (!Subtarget->hasGFX90AInsts()) { 1738 MIB.addImm(TFE); // tfe 1739 } else if (TFE) { 1740 LLVM_DEBUG(dbgs() << "TFE is not supported on this GPU\n"); 1741 return false; 1742 } 1743 1744 MIB.addImm(LWE); // lwe 1745 if (!IsGFX10Plus) 1746 MIB.addImm(DimInfo->DA ? -1 : 0); 1747 if (BaseOpcode->HasD16) 1748 MIB.addImm(IsD16 ? -1 : 0); 1749 1750 if (IsTexFail) { 1751 // An image load instruction with TFE/LWE only conditionally writes to its 1752 // result registers. Initialize them to zero so that we always get well 1753 // defined result values. 1754 assert(VDataOut && !VDataIn); 1755 Register Tied = MRI->cloneVirtualRegister(VDataOut); 1756 Register Zero = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1757 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::V_MOV_B32_e32), Zero) 1758 .addImm(0); 1759 auto Parts = TRI.getRegSplitParts(MRI->getRegClass(Tied), 4); 1760 if (STI.usePRTStrictNull()) { 1761 // With enable-prt-strict-null enabled, initialize all result registers to 1762 // zero. 1763 auto RegSeq = 1764 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied); 1765 for (auto Sub : Parts) 1766 RegSeq.addReg(Zero).addImm(Sub); 1767 } else { 1768 // With enable-prt-strict-null disabled, only initialize the extra TFE/LWE 1769 // result register. 1770 Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1771 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef); 1772 auto RegSeq = 1773 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied); 1774 for (auto Sub : Parts.drop_back(1)) 1775 RegSeq.addReg(Undef).addImm(Sub); 1776 RegSeq.addReg(Zero).addImm(Parts.back()); 1777 } 1778 MIB.addReg(Tied, RegState::Implicit); 1779 MIB->tieOperands(0, MIB->getNumOperands() - 1); 1780 } 1781 1782 MI.eraseFromParent(); 1783 constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 1784 TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::vaddr); 1785 return true; 1786 } 1787 1788 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS( 1789 MachineInstr &I) const { 1790 unsigned IntrinsicID = I.getIntrinsicID(); 1791 switch (IntrinsicID) { 1792 case Intrinsic::amdgcn_end_cf: 1793 return selectEndCfIntrinsic(I); 1794 case Intrinsic::amdgcn_ds_ordered_add: 1795 case Intrinsic::amdgcn_ds_ordered_swap: 1796 return selectDSOrderedIntrinsic(I, IntrinsicID); 1797 case Intrinsic::amdgcn_ds_gws_init: 1798 case Intrinsic::amdgcn_ds_gws_barrier: 1799 case Intrinsic::amdgcn_ds_gws_sema_v: 1800 case Intrinsic::amdgcn_ds_gws_sema_br: 1801 case Intrinsic::amdgcn_ds_gws_sema_p: 1802 case Intrinsic::amdgcn_ds_gws_sema_release_all: 1803 return selectDSGWSIntrinsic(I, IntrinsicID); 1804 case Intrinsic::amdgcn_ds_append: 1805 return selectDSAppendConsume(I, true); 1806 case Intrinsic::amdgcn_ds_consume: 1807 return selectDSAppendConsume(I, false); 1808 case Intrinsic::amdgcn_s_barrier: 1809 return selectSBarrier(I); 1810 case Intrinsic::amdgcn_global_atomic_fadd: 1811 return selectGlobalAtomicFadd(I, I.getOperand(2), I.getOperand(3)); 1812 case Intrinsic::amdgcn_raw_buffer_load_lds: 1813 case Intrinsic::amdgcn_struct_buffer_load_lds: 1814 return selectBufferLoadLds(I); 1815 case Intrinsic::amdgcn_global_load_lds: 1816 return selectGlobalLoadLds(I); 1817 default: { 1818 return selectImpl(I, *CoverageInfo); 1819 } 1820 } 1821 } 1822 1823 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const { 1824 if (selectImpl(I, *CoverageInfo)) 1825 return true; 1826 1827 MachineBasicBlock *BB = I.getParent(); 1828 const DebugLoc &DL = I.getDebugLoc(); 1829 1830 Register DstReg = I.getOperand(0).getReg(); 1831 unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI); 1832 assert(Size <= 32 || Size == 64); 1833 const MachineOperand &CCOp = I.getOperand(1); 1834 Register CCReg = CCOp.getReg(); 1835 if (!isVCC(CCReg, *MRI)) { 1836 unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 : 1837 AMDGPU::S_CSELECT_B32; 1838 MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC) 1839 .addReg(CCReg); 1840 1841 // The generic constrainSelectedInstRegOperands doesn't work for the scc register 1842 // bank, because it does not cover the register class that we used to represent 1843 // for it. So we need to manually set the register class here. 1844 if (!MRI->getRegClassOrNull(CCReg)) 1845 MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI)); 1846 MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg) 1847 .add(I.getOperand(2)) 1848 .add(I.getOperand(3)); 1849 1850 bool Ret = false; 1851 Ret |= constrainSelectedInstRegOperands(*Select, TII, TRI, RBI); 1852 Ret |= constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI); 1853 I.eraseFromParent(); 1854 return Ret; 1855 } 1856 1857 // Wide VGPR select should have been split in RegBankSelect. 1858 if (Size > 32) 1859 return false; 1860 1861 MachineInstr *Select = 1862 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1863 .addImm(0) 1864 .add(I.getOperand(3)) 1865 .addImm(0) 1866 .add(I.getOperand(2)) 1867 .add(I.getOperand(1)); 1868 1869 bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI); 1870 I.eraseFromParent(); 1871 return Ret; 1872 } 1873 1874 static int sizeToSubRegIndex(unsigned Size) { 1875 switch (Size) { 1876 case 32: 1877 return AMDGPU::sub0; 1878 case 64: 1879 return AMDGPU::sub0_sub1; 1880 case 96: 1881 return AMDGPU::sub0_sub1_sub2; 1882 case 128: 1883 return AMDGPU::sub0_sub1_sub2_sub3; 1884 case 256: 1885 return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7; 1886 default: 1887 if (Size < 32) 1888 return AMDGPU::sub0; 1889 if (Size > 256) 1890 return -1; 1891 return sizeToSubRegIndex(PowerOf2Ceil(Size)); 1892 } 1893 } 1894 1895 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const { 1896 Register DstReg = I.getOperand(0).getReg(); 1897 Register SrcReg = I.getOperand(1).getReg(); 1898 const LLT DstTy = MRI->getType(DstReg); 1899 const LLT SrcTy = MRI->getType(SrcReg); 1900 const LLT S1 = LLT::scalar(1); 1901 1902 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI); 1903 const RegisterBank *DstRB; 1904 if (DstTy == S1) { 1905 // This is a special case. We don't treat s1 for legalization artifacts as 1906 // vcc booleans. 1907 DstRB = SrcRB; 1908 } else { 1909 DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 1910 if (SrcRB != DstRB) 1911 return false; 1912 } 1913 1914 const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID; 1915 1916 unsigned DstSize = DstTy.getSizeInBits(); 1917 unsigned SrcSize = SrcTy.getSizeInBits(); 1918 1919 const TargetRegisterClass *SrcRC = 1920 TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB); 1921 const TargetRegisterClass *DstRC = 1922 TRI.getRegClassForSizeOnBank(DstSize, *DstRB); 1923 if (!SrcRC || !DstRC) 1924 return false; 1925 1926 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) || 1927 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) { 1928 LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n"); 1929 return false; 1930 } 1931 1932 if (DstTy == LLT::fixed_vector(2, 16) && SrcTy == LLT::fixed_vector(2, 32)) { 1933 MachineBasicBlock *MBB = I.getParent(); 1934 const DebugLoc &DL = I.getDebugLoc(); 1935 1936 Register LoReg = MRI->createVirtualRegister(DstRC); 1937 Register HiReg = MRI->createVirtualRegister(DstRC); 1938 BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg) 1939 .addReg(SrcReg, 0, AMDGPU::sub0); 1940 BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg) 1941 .addReg(SrcReg, 0, AMDGPU::sub1); 1942 1943 if (IsVALU && STI.hasSDWA()) { 1944 // Write the low 16-bits of the high element into the high 16-bits of the 1945 // low element. 1946 MachineInstr *MovSDWA = 1947 BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg) 1948 .addImm(0) // $src0_modifiers 1949 .addReg(HiReg) // $src0 1950 .addImm(0) // $clamp 1951 .addImm(AMDGPU::SDWA::WORD_1) // $dst_sel 1952 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused 1953 .addImm(AMDGPU::SDWA::WORD_0) // $src0_sel 1954 .addReg(LoReg, RegState::Implicit); 1955 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1); 1956 } else { 1957 Register TmpReg0 = MRI->createVirtualRegister(DstRC); 1958 Register TmpReg1 = MRI->createVirtualRegister(DstRC); 1959 Register ImmReg = MRI->createVirtualRegister(DstRC); 1960 if (IsVALU) { 1961 BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0) 1962 .addImm(16) 1963 .addReg(HiReg); 1964 } else { 1965 BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0) 1966 .addReg(HiReg) 1967 .addImm(16); 1968 } 1969 1970 unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32; 1971 unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32; 1972 unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32; 1973 1974 BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg) 1975 .addImm(0xffff); 1976 BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1) 1977 .addReg(LoReg) 1978 .addReg(ImmReg); 1979 BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg) 1980 .addReg(TmpReg0) 1981 .addReg(TmpReg1); 1982 } 1983 1984 I.eraseFromParent(); 1985 return true; 1986 } 1987 1988 if (!DstTy.isScalar()) 1989 return false; 1990 1991 if (SrcSize > 32) { 1992 int SubRegIdx = sizeToSubRegIndex(DstSize); 1993 if (SubRegIdx == -1) 1994 return false; 1995 1996 // Deal with weird cases where the class only partially supports the subreg 1997 // index. 1998 const TargetRegisterClass *SrcWithSubRC 1999 = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx); 2000 if (!SrcWithSubRC) 2001 return false; 2002 2003 if (SrcWithSubRC != SrcRC) { 2004 if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI)) 2005 return false; 2006 } 2007 2008 I.getOperand(1).setSubReg(SubRegIdx); 2009 } 2010 2011 I.setDesc(TII.get(TargetOpcode::COPY)); 2012 return true; 2013 } 2014 2015 /// \returns true if a bitmask for \p Size bits will be an inline immediate. 2016 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) { 2017 Mask = maskTrailingOnes<unsigned>(Size); 2018 int SignedMask = static_cast<int>(Mask); 2019 return SignedMask >= -16 && SignedMask <= 64; 2020 } 2021 2022 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1. 2023 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank( 2024 Register Reg, const MachineRegisterInfo &MRI, 2025 const TargetRegisterInfo &TRI) const { 2026 const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg); 2027 if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>()) 2028 return RB; 2029 2030 // Ignore the type, since we don't use vcc in artifacts. 2031 if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>()) 2032 return &RBI.getRegBankFromRegClass(*RC, LLT()); 2033 return nullptr; 2034 } 2035 2036 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const { 2037 bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG; 2038 bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg; 2039 const DebugLoc &DL = I.getDebugLoc(); 2040 MachineBasicBlock &MBB = *I.getParent(); 2041 const Register DstReg = I.getOperand(0).getReg(); 2042 const Register SrcReg = I.getOperand(1).getReg(); 2043 2044 const LLT DstTy = MRI->getType(DstReg); 2045 const LLT SrcTy = MRI->getType(SrcReg); 2046 const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ? 2047 I.getOperand(2).getImm() : SrcTy.getSizeInBits(); 2048 const unsigned DstSize = DstTy.getSizeInBits(); 2049 if (!DstTy.isScalar()) 2050 return false; 2051 2052 // Artifact casts should never use vcc. 2053 const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI); 2054 2055 // FIXME: This should probably be illegal and split earlier. 2056 if (I.getOpcode() == AMDGPU::G_ANYEXT) { 2057 if (DstSize <= 32) 2058 return selectCOPY(I); 2059 2060 const TargetRegisterClass *SrcRC = 2061 TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank); 2062 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); 2063 const TargetRegisterClass *DstRC = 2064 TRI.getRegClassForSizeOnBank(DstSize, *DstBank); 2065 2066 Register UndefReg = MRI->createVirtualRegister(SrcRC); 2067 BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg); 2068 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) 2069 .addReg(SrcReg) 2070 .addImm(AMDGPU::sub0) 2071 .addReg(UndefReg) 2072 .addImm(AMDGPU::sub1); 2073 I.eraseFromParent(); 2074 2075 return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) && 2076 RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI); 2077 } 2078 2079 if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) { 2080 // 64-bit should have been split up in RegBankSelect 2081 2082 // Try to use an and with a mask if it will save code size. 2083 unsigned Mask; 2084 if (!Signed && shouldUseAndMask(SrcSize, Mask)) { 2085 MachineInstr *ExtI = 2086 BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg) 2087 .addImm(Mask) 2088 .addReg(SrcReg); 2089 I.eraseFromParent(); 2090 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI); 2091 } 2092 2093 const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64; 2094 MachineInstr *ExtI = 2095 BuildMI(MBB, I, DL, TII.get(BFE), DstReg) 2096 .addReg(SrcReg) 2097 .addImm(0) // Offset 2098 .addImm(SrcSize); // Width 2099 I.eraseFromParent(); 2100 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI); 2101 } 2102 2103 if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) { 2104 const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ? 2105 AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass; 2106 if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI)) 2107 return false; 2108 2109 if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) { 2110 const unsigned SextOpc = SrcSize == 8 ? 2111 AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16; 2112 BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg) 2113 .addReg(SrcReg); 2114 I.eraseFromParent(); 2115 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI); 2116 } 2117 2118 const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64; 2119 const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32; 2120 2121 // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width. 2122 if (DstSize > 32 && (SrcSize <= 32 || InReg)) { 2123 // We need a 64-bit register source, but the high bits don't matter. 2124 Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); 2125 Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2126 unsigned SubReg = InReg ? AMDGPU::sub0 : 0; 2127 2128 BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg); 2129 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg) 2130 .addReg(SrcReg, 0, SubReg) 2131 .addImm(AMDGPU::sub0) 2132 .addReg(UndefReg) 2133 .addImm(AMDGPU::sub1); 2134 2135 BuildMI(MBB, I, DL, TII.get(BFE64), DstReg) 2136 .addReg(ExtReg) 2137 .addImm(SrcSize << 16); 2138 2139 I.eraseFromParent(); 2140 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI); 2141 } 2142 2143 unsigned Mask; 2144 if (!Signed && shouldUseAndMask(SrcSize, Mask)) { 2145 BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg) 2146 .addReg(SrcReg) 2147 .addImm(Mask); 2148 } else { 2149 BuildMI(MBB, I, DL, TII.get(BFE32), DstReg) 2150 .addReg(SrcReg) 2151 .addImm(SrcSize << 16); 2152 } 2153 2154 I.eraseFromParent(); 2155 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI); 2156 } 2157 2158 return false; 2159 } 2160 2161 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const { 2162 MachineBasicBlock *BB = I.getParent(); 2163 MachineOperand &ImmOp = I.getOperand(1); 2164 Register DstReg = I.getOperand(0).getReg(); 2165 unsigned Size = MRI->getType(DstReg).getSizeInBits(); 2166 2167 // The AMDGPU backend only supports Imm operands and not CImm or FPImm. 2168 if (ImmOp.isFPImm()) { 2169 const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt(); 2170 ImmOp.ChangeToImmediate(Imm.getZExtValue()); 2171 } else if (ImmOp.isCImm()) { 2172 ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue()); 2173 } else { 2174 llvm_unreachable("Not supported by g_constants"); 2175 } 2176 2177 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 2178 const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID; 2179 2180 unsigned Opcode; 2181 if (DstRB->getID() == AMDGPU::VCCRegBankID) { 2182 Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 2183 } else { 2184 Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 2185 2186 // We should never produce s1 values on banks other than VCC. If the user of 2187 // this already constrained the register, we may incorrectly think it's VCC 2188 // if it wasn't originally. 2189 if (Size == 1) 2190 return false; 2191 } 2192 2193 if (Size != 64) { 2194 I.setDesc(TII.get(Opcode)); 2195 I.addImplicitDefUseOperands(*MF); 2196 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 2197 } 2198 2199 const DebugLoc &DL = I.getDebugLoc(); 2200 2201 APInt Imm(Size, I.getOperand(1).getImm()); 2202 2203 MachineInstr *ResInst; 2204 if (IsSgpr && TII.isInlineConstant(Imm)) { 2205 ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg) 2206 .addImm(I.getOperand(1).getImm()); 2207 } else { 2208 const TargetRegisterClass *RC = IsSgpr ? 2209 &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass; 2210 Register LoReg = MRI->createVirtualRegister(RC); 2211 Register HiReg = MRI->createVirtualRegister(RC); 2212 2213 BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg) 2214 .addImm(Imm.trunc(32).getZExtValue()); 2215 2216 BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg) 2217 .addImm(Imm.ashr(32).getZExtValue()); 2218 2219 ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) 2220 .addReg(LoReg) 2221 .addImm(AMDGPU::sub0) 2222 .addReg(HiReg) 2223 .addImm(AMDGPU::sub1); 2224 } 2225 2226 // We can't call constrainSelectedInstRegOperands here, because it doesn't 2227 // work for target independent opcodes 2228 I.eraseFromParent(); 2229 const TargetRegisterClass *DstRC = 2230 TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI); 2231 if (!DstRC) 2232 return true; 2233 return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI); 2234 } 2235 2236 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const { 2237 // Only manually handle the f64 SGPR case. 2238 // 2239 // FIXME: This is a workaround for 2.5 different tablegen problems. Because 2240 // the bit ops theoretically have a second result due to the implicit def of 2241 // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing 2242 // that is easy by disabling the check. The result works, but uses a 2243 // nonsensical sreg32orlds_and_sreg_1 regclass. 2244 // 2245 // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to 2246 // the variadic REG_SEQUENCE operands. 2247 2248 Register Dst = MI.getOperand(0).getReg(); 2249 const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI); 2250 if (DstRB->getID() != AMDGPU::SGPRRegBankID || 2251 MRI->getType(Dst) != LLT::scalar(64)) 2252 return false; 2253 2254 Register Src = MI.getOperand(1).getReg(); 2255 MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI); 2256 if (Fabs) 2257 Src = Fabs->getOperand(1).getReg(); 2258 2259 if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) || 2260 !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI)) 2261 return false; 2262 2263 MachineBasicBlock *BB = MI.getParent(); 2264 const DebugLoc &DL = MI.getDebugLoc(); 2265 Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2266 Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2267 Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2268 Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2269 2270 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg) 2271 .addReg(Src, 0, AMDGPU::sub0); 2272 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg) 2273 .addReg(Src, 0, AMDGPU::sub1); 2274 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg) 2275 .addImm(0x80000000); 2276 2277 // Set or toggle sign bit. 2278 unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32; 2279 BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg) 2280 .addReg(HiReg) 2281 .addReg(ConstReg); 2282 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst) 2283 .addReg(LoReg) 2284 .addImm(AMDGPU::sub0) 2285 .addReg(OpReg) 2286 .addImm(AMDGPU::sub1); 2287 MI.eraseFromParent(); 2288 return true; 2289 } 2290 2291 // FIXME: This is a workaround for the same tablegen problems as G_FNEG 2292 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const { 2293 Register Dst = MI.getOperand(0).getReg(); 2294 const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI); 2295 if (DstRB->getID() != AMDGPU::SGPRRegBankID || 2296 MRI->getType(Dst) != LLT::scalar(64)) 2297 return false; 2298 2299 Register Src = MI.getOperand(1).getReg(); 2300 MachineBasicBlock *BB = MI.getParent(); 2301 const DebugLoc &DL = MI.getDebugLoc(); 2302 Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2303 Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2304 Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2305 Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2306 2307 if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) || 2308 !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI)) 2309 return false; 2310 2311 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg) 2312 .addReg(Src, 0, AMDGPU::sub0); 2313 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg) 2314 .addReg(Src, 0, AMDGPU::sub1); 2315 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg) 2316 .addImm(0x7fffffff); 2317 2318 // Clear sign bit. 2319 // TODO: Should this used S_BITSET0_*? 2320 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg) 2321 .addReg(HiReg) 2322 .addReg(ConstReg); 2323 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst) 2324 .addReg(LoReg) 2325 .addImm(AMDGPU::sub0) 2326 .addReg(OpReg) 2327 .addImm(AMDGPU::sub1); 2328 2329 MI.eraseFromParent(); 2330 return true; 2331 } 2332 2333 static bool isConstant(const MachineInstr &MI) { 2334 return MI.getOpcode() == TargetOpcode::G_CONSTANT; 2335 } 2336 2337 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load, 2338 const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const { 2339 2340 const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg()); 2341 2342 assert(PtrMI); 2343 2344 if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD) 2345 return; 2346 2347 GEPInfo GEPInfo(*PtrMI); 2348 2349 for (unsigned i = 1; i != 3; ++i) { 2350 const MachineOperand &GEPOp = PtrMI->getOperand(i); 2351 const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg()); 2352 assert(OpDef); 2353 if (i == 2 && isConstant(*OpDef)) { 2354 // TODO: Could handle constant base + variable offset, but a combine 2355 // probably should have commuted it. 2356 assert(GEPInfo.Imm == 0); 2357 GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue(); 2358 continue; 2359 } 2360 const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI); 2361 if (OpBank->getID() == AMDGPU::SGPRRegBankID) 2362 GEPInfo.SgprParts.push_back(GEPOp.getReg()); 2363 else 2364 GEPInfo.VgprParts.push_back(GEPOp.getReg()); 2365 } 2366 2367 AddrInfo.push_back(GEPInfo); 2368 getAddrModeInfo(*PtrMI, MRI, AddrInfo); 2369 } 2370 2371 bool AMDGPUInstructionSelector::isSGPR(Register Reg) const { 2372 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID; 2373 } 2374 2375 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const { 2376 if (!MI.hasOneMemOperand()) 2377 return false; 2378 2379 const MachineMemOperand *MMO = *MI.memoperands_begin(); 2380 const Value *Ptr = MMO->getValue(); 2381 2382 // UndefValue means this is a load of a kernel input. These are uniform. 2383 // Sometimes LDS instructions have constant pointers. 2384 // If Ptr is null, then that means this mem operand contains a 2385 // PseudoSourceValue like GOT. 2386 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) || 2387 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr)) 2388 return true; 2389 2390 if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) 2391 return true; 2392 2393 const Instruction *I = dyn_cast<Instruction>(Ptr); 2394 return I && I->getMetadata("amdgpu.uniform"); 2395 } 2396 2397 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const { 2398 for (const GEPInfo &GEPInfo : AddrInfo) { 2399 if (!GEPInfo.VgprParts.empty()) 2400 return true; 2401 } 2402 return false; 2403 } 2404 2405 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const { 2406 const LLT PtrTy = MRI->getType(I.getOperand(1).getReg()); 2407 unsigned AS = PtrTy.getAddressSpace(); 2408 if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) && 2409 STI.ldsRequiresM0Init()) { 2410 MachineBasicBlock *BB = I.getParent(); 2411 2412 // If DS instructions require M0 initialization, insert it before selecting. 2413 BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0) 2414 .addImm(-1); 2415 } 2416 } 2417 2418 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW( 2419 MachineInstr &I) const { 2420 if (I.getOpcode() == TargetOpcode::G_ATOMICRMW_FADD) { 2421 const LLT PtrTy = MRI->getType(I.getOperand(1).getReg()); 2422 unsigned AS = PtrTy.getAddressSpace(); 2423 if (AS == AMDGPUAS::GLOBAL_ADDRESS) 2424 return selectGlobalAtomicFadd(I, I.getOperand(1), I.getOperand(2)); 2425 } 2426 2427 initM0(I); 2428 return selectImpl(I, *CoverageInfo); 2429 } 2430 2431 static bool isVCmpResult(Register Reg, MachineRegisterInfo &MRI) { 2432 if (Reg.isPhysical()) 2433 return false; 2434 2435 MachineInstr &MI = *MRI.getUniqueVRegDef(Reg); 2436 const unsigned Opcode = MI.getOpcode(); 2437 2438 if (Opcode == AMDGPU::COPY) 2439 return isVCmpResult(MI.getOperand(1).getReg(), MRI); 2440 2441 if (Opcode == AMDGPU::G_AND || Opcode == AMDGPU::G_OR || 2442 Opcode == AMDGPU::G_XOR) 2443 return isVCmpResult(MI.getOperand(1).getReg(), MRI) && 2444 isVCmpResult(MI.getOperand(2).getReg(), MRI); 2445 2446 if (Opcode == TargetOpcode::G_INTRINSIC) 2447 return MI.getIntrinsicID() == Intrinsic::amdgcn_class; 2448 2449 return Opcode == AMDGPU::G_ICMP || Opcode == AMDGPU::G_FCMP; 2450 } 2451 2452 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const { 2453 MachineBasicBlock *BB = I.getParent(); 2454 MachineOperand &CondOp = I.getOperand(0); 2455 Register CondReg = CondOp.getReg(); 2456 const DebugLoc &DL = I.getDebugLoc(); 2457 2458 unsigned BrOpcode; 2459 Register CondPhysReg; 2460 const TargetRegisterClass *ConstrainRC; 2461 2462 // In SelectionDAG, we inspect the IR block for uniformity metadata to decide 2463 // whether the branch is uniform when selecting the instruction. In 2464 // GlobalISel, we should push that decision into RegBankSelect. Assume for now 2465 // RegBankSelect knows what it's doing if the branch condition is scc, even 2466 // though it currently does not. 2467 if (!isVCC(CondReg, *MRI)) { 2468 if (MRI->getType(CondReg) != LLT::scalar(32)) 2469 return false; 2470 2471 CondPhysReg = AMDGPU::SCC; 2472 BrOpcode = AMDGPU::S_CBRANCH_SCC1; 2473 ConstrainRC = &AMDGPU::SReg_32RegClass; 2474 } else { 2475 // FIXME: Should scc->vcc copies and with exec? 2476 2477 // Unless the value of CondReg is a result of a V_CMP* instruction then we 2478 // need to insert an and with exec. 2479 if (!isVCmpResult(CondReg, *MRI)) { 2480 const bool Is64 = STI.isWave64(); 2481 const unsigned Opcode = Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32; 2482 const Register Exec = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO; 2483 2484 Register TmpReg = MRI->createVirtualRegister(TRI.getBoolRC()); 2485 BuildMI(*BB, &I, DL, TII.get(Opcode), TmpReg) 2486 .addReg(CondReg) 2487 .addReg(Exec); 2488 CondReg = TmpReg; 2489 } 2490 2491 CondPhysReg = TRI.getVCC(); 2492 BrOpcode = AMDGPU::S_CBRANCH_VCCNZ; 2493 ConstrainRC = TRI.getBoolRC(); 2494 } 2495 2496 if (!MRI->getRegClassOrNull(CondReg)) 2497 MRI->setRegClass(CondReg, ConstrainRC); 2498 2499 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg) 2500 .addReg(CondReg); 2501 BuildMI(*BB, &I, DL, TII.get(BrOpcode)) 2502 .addMBB(I.getOperand(1).getMBB()); 2503 2504 I.eraseFromParent(); 2505 return true; 2506 } 2507 2508 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE( 2509 MachineInstr &I) const { 2510 Register DstReg = I.getOperand(0).getReg(); 2511 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 2512 const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID; 2513 I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32)); 2514 if (IsVGPR) 2515 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 2516 2517 return RBI.constrainGenericRegister( 2518 DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI); 2519 } 2520 2521 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const { 2522 Register DstReg = I.getOperand(0).getReg(); 2523 Register SrcReg = I.getOperand(1).getReg(); 2524 Register MaskReg = I.getOperand(2).getReg(); 2525 LLT Ty = MRI->getType(DstReg); 2526 LLT MaskTy = MRI->getType(MaskReg); 2527 MachineBasicBlock *BB = I.getParent(); 2528 const DebugLoc &DL = I.getDebugLoc(); 2529 2530 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 2531 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI); 2532 const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI); 2533 const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID; 2534 if (DstRB != SrcRB) // Should only happen for hand written MIR. 2535 return false; 2536 2537 // Try to avoid emitting a bit operation when we only need to touch half of 2538 // the 64-bit pointer. 2539 APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zext(64); 2540 const APInt MaskHi32 = APInt::getHighBitsSet(64, 32); 2541 const APInt MaskLo32 = APInt::getLowBitsSet(64, 32); 2542 2543 const bool CanCopyLow32 = (MaskOnes & MaskLo32) == MaskLo32; 2544 const bool CanCopyHi32 = (MaskOnes & MaskHi32) == MaskHi32; 2545 2546 if (!IsVGPR && Ty.getSizeInBits() == 64 && 2547 !CanCopyLow32 && !CanCopyHi32) { 2548 auto MIB = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_AND_B64), DstReg) 2549 .addReg(SrcReg) 2550 .addReg(MaskReg); 2551 I.eraseFromParent(); 2552 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 2553 } 2554 2555 unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32; 2556 const TargetRegisterClass &RegRC 2557 = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass; 2558 2559 const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB); 2560 const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB); 2561 const TargetRegisterClass *MaskRC = 2562 TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB); 2563 2564 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) || 2565 !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) || 2566 !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI)) 2567 return false; 2568 2569 if (Ty.getSizeInBits() == 32) { 2570 assert(MaskTy.getSizeInBits() == 32 && 2571 "ptrmask should have been narrowed during legalize"); 2572 2573 BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg) 2574 .addReg(SrcReg) 2575 .addReg(MaskReg); 2576 I.eraseFromParent(); 2577 return true; 2578 } 2579 2580 Register HiReg = MRI->createVirtualRegister(&RegRC); 2581 Register LoReg = MRI->createVirtualRegister(&RegRC); 2582 2583 // Extract the subregisters from the source pointer. 2584 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg) 2585 .addReg(SrcReg, 0, AMDGPU::sub0); 2586 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg) 2587 .addReg(SrcReg, 0, AMDGPU::sub1); 2588 2589 Register MaskedLo, MaskedHi; 2590 2591 if (CanCopyLow32) { 2592 // If all the bits in the low half are 1, we only need a copy for it. 2593 MaskedLo = LoReg; 2594 } else { 2595 // Extract the mask subregister and apply the and. 2596 Register MaskLo = MRI->createVirtualRegister(&RegRC); 2597 MaskedLo = MRI->createVirtualRegister(&RegRC); 2598 2599 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo) 2600 .addReg(MaskReg, 0, AMDGPU::sub0); 2601 BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo) 2602 .addReg(LoReg) 2603 .addReg(MaskLo); 2604 } 2605 2606 if (CanCopyHi32) { 2607 // If all the bits in the high half are 1, we only need a copy for it. 2608 MaskedHi = HiReg; 2609 } else { 2610 Register MaskHi = MRI->createVirtualRegister(&RegRC); 2611 MaskedHi = MRI->createVirtualRegister(&RegRC); 2612 2613 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi) 2614 .addReg(MaskReg, 0, AMDGPU::sub1); 2615 BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi) 2616 .addReg(HiReg) 2617 .addReg(MaskHi); 2618 } 2619 2620 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) 2621 .addReg(MaskedLo) 2622 .addImm(AMDGPU::sub0) 2623 .addReg(MaskedHi) 2624 .addImm(AMDGPU::sub1); 2625 I.eraseFromParent(); 2626 return true; 2627 } 2628 2629 /// Return the register to use for the index value, and the subregister to use 2630 /// for the indirectly accessed register. 2631 static std::pair<Register, unsigned> 2632 computeIndirectRegIndex(MachineRegisterInfo &MRI, 2633 const SIRegisterInfo &TRI, 2634 const TargetRegisterClass *SuperRC, 2635 Register IdxReg, 2636 unsigned EltSize) { 2637 Register IdxBaseReg; 2638 int Offset; 2639 2640 std::tie(IdxBaseReg, Offset) = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg); 2641 if (IdxBaseReg == AMDGPU::NoRegister) { 2642 // This will happen if the index is a known constant. This should ordinarily 2643 // be legalized out, but handle it as a register just in case. 2644 assert(Offset == 0); 2645 IdxBaseReg = IdxReg; 2646 } 2647 2648 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize); 2649 2650 // Skip out of bounds offsets, or else we would end up using an undefined 2651 // register. 2652 if (static_cast<unsigned>(Offset) >= SubRegs.size()) 2653 return std::make_pair(IdxReg, SubRegs[0]); 2654 return std::make_pair(IdxBaseReg, SubRegs[Offset]); 2655 } 2656 2657 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT( 2658 MachineInstr &MI) const { 2659 Register DstReg = MI.getOperand(0).getReg(); 2660 Register SrcReg = MI.getOperand(1).getReg(); 2661 Register IdxReg = MI.getOperand(2).getReg(); 2662 2663 LLT DstTy = MRI->getType(DstReg); 2664 LLT SrcTy = MRI->getType(SrcReg); 2665 2666 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 2667 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI); 2668 const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI); 2669 2670 // The index must be scalar. If it wasn't RegBankSelect should have moved this 2671 // into a waterfall loop. 2672 if (IdxRB->getID() != AMDGPU::SGPRRegBankID) 2673 return false; 2674 2675 const TargetRegisterClass *SrcRC = 2676 TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB); 2677 const TargetRegisterClass *DstRC = 2678 TRI.getRegClassForTypeOnBank(DstTy, *DstRB); 2679 if (!SrcRC || !DstRC) 2680 return false; 2681 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) || 2682 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) || 2683 !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI)) 2684 return false; 2685 2686 MachineBasicBlock *BB = MI.getParent(); 2687 const DebugLoc &DL = MI.getDebugLoc(); 2688 const bool Is64 = DstTy.getSizeInBits() == 64; 2689 2690 unsigned SubReg; 2691 std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg, 2692 DstTy.getSizeInBits() / 8); 2693 2694 if (SrcRB->getID() == AMDGPU::SGPRRegBankID) { 2695 if (DstTy.getSizeInBits() != 32 && !Is64) 2696 return false; 2697 2698 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 2699 .addReg(IdxReg); 2700 2701 unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32; 2702 BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg) 2703 .addReg(SrcReg, 0, SubReg) 2704 .addReg(SrcReg, RegState::Implicit); 2705 MI.eraseFromParent(); 2706 return true; 2707 } 2708 2709 if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32) 2710 return false; 2711 2712 if (!STI.useVGPRIndexMode()) { 2713 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 2714 .addReg(IdxReg); 2715 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg) 2716 .addReg(SrcReg, 0, SubReg) 2717 .addReg(SrcReg, RegState::Implicit); 2718 MI.eraseFromParent(); 2719 return true; 2720 } 2721 2722 const MCInstrDesc &GPRIDXDesc = 2723 TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true); 2724 BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg) 2725 .addReg(SrcReg) 2726 .addReg(IdxReg) 2727 .addImm(SubReg); 2728 2729 MI.eraseFromParent(); 2730 return true; 2731 } 2732 2733 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd 2734 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT( 2735 MachineInstr &MI) const { 2736 Register DstReg = MI.getOperand(0).getReg(); 2737 Register VecReg = MI.getOperand(1).getReg(); 2738 Register ValReg = MI.getOperand(2).getReg(); 2739 Register IdxReg = MI.getOperand(3).getReg(); 2740 2741 LLT VecTy = MRI->getType(DstReg); 2742 LLT ValTy = MRI->getType(ValReg); 2743 unsigned VecSize = VecTy.getSizeInBits(); 2744 unsigned ValSize = ValTy.getSizeInBits(); 2745 2746 const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI); 2747 const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI); 2748 const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI); 2749 2750 assert(VecTy.getElementType() == ValTy); 2751 2752 // The index must be scalar. If it wasn't RegBankSelect should have moved this 2753 // into a waterfall loop. 2754 if (IdxRB->getID() != AMDGPU::SGPRRegBankID) 2755 return false; 2756 2757 const TargetRegisterClass *VecRC = 2758 TRI.getRegClassForTypeOnBank(VecTy, *VecRB); 2759 const TargetRegisterClass *ValRC = 2760 TRI.getRegClassForTypeOnBank(ValTy, *ValRB); 2761 2762 if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) || 2763 !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) || 2764 !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) || 2765 !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI)) 2766 return false; 2767 2768 if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32) 2769 return false; 2770 2771 unsigned SubReg; 2772 std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg, 2773 ValSize / 8); 2774 2775 const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID && 2776 STI.useVGPRIndexMode(); 2777 2778 MachineBasicBlock *BB = MI.getParent(); 2779 const DebugLoc &DL = MI.getDebugLoc(); 2780 2781 if (!IndexMode) { 2782 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 2783 .addReg(IdxReg); 2784 2785 const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo( 2786 VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID); 2787 BuildMI(*BB, MI, DL, RegWriteOp, DstReg) 2788 .addReg(VecReg) 2789 .addReg(ValReg) 2790 .addImm(SubReg); 2791 MI.eraseFromParent(); 2792 return true; 2793 } 2794 2795 const MCInstrDesc &GPRIDXDesc = 2796 TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false); 2797 BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg) 2798 .addReg(VecReg) 2799 .addReg(ValReg) 2800 .addReg(IdxReg) 2801 .addImm(SubReg); 2802 2803 MI.eraseFromParent(); 2804 return true; 2805 } 2806 2807 static bool isZeroOrUndef(int X) { 2808 return X == 0 || X == -1; 2809 } 2810 2811 static bool isOneOrUndef(int X) { 2812 return X == 1 || X == -1; 2813 } 2814 2815 static bool isZeroOrOneOrUndef(int X) { 2816 return X == 0 || X == 1 || X == -1; 2817 } 2818 2819 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single 2820 // 32-bit register. 2821 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1, 2822 ArrayRef<int> Mask) { 2823 NewMask[0] = Mask[0]; 2824 NewMask[1] = Mask[1]; 2825 if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1])) 2826 return Src0; 2827 2828 assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1); 2829 assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1); 2830 2831 // Shift the mask inputs to be 0/1; 2832 NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2; 2833 NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2; 2834 return Src1; 2835 } 2836 2837 // This is only legal with VOP3P instructions as an aid to op_sel matching. 2838 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR( 2839 MachineInstr &MI) const { 2840 Register DstReg = MI.getOperand(0).getReg(); 2841 Register Src0Reg = MI.getOperand(1).getReg(); 2842 Register Src1Reg = MI.getOperand(2).getReg(); 2843 ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask(); 2844 2845 const LLT V2S16 = LLT::fixed_vector(2, 16); 2846 if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16) 2847 return false; 2848 2849 if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask)) 2850 return false; 2851 2852 assert(ShufMask.size() == 2); 2853 assert(STI.hasSDWA() && "no target has VOP3P but not SDWA"); 2854 2855 MachineBasicBlock *MBB = MI.getParent(); 2856 const DebugLoc &DL = MI.getDebugLoc(); 2857 2858 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 2859 const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID; 2860 const TargetRegisterClass &RC = IsVALU ? 2861 AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass; 2862 2863 // Handle the degenerate case which should have folded out. 2864 if (ShufMask[0] == -1 && ShufMask[1] == -1) { 2865 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg); 2866 2867 MI.eraseFromParent(); 2868 return RBI.constrainGenericRegister(DstReg, RC, *MRI); 2869 } 2870 2871 // A legal VOP3P mask only reads one of the sources. 2872 int Mask[2]; 2873 Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask); 2874 2875 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) || 2876 !RBI.constrainGenericRegister(SrcVec, RC, *MRI)) 2877 return false; 2878 2879 // TODO: This also should have been folded out 2880 if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) { 2881 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg) 2882 .addReg(SrcVec); 2883 2884 MI.eraseFromParent(); 2885 return true; 2886 } 2887 2888 if (Mask[0] == 1 && Mask[1] == -1) { 2889 if (IsVALU) { 2890 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg) 2891 .addImm(16) 2892 .addReg(SrcVec); 2893 } else { 2894 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg) 2895 .addReg(SrcVec) 2896 .addImm(16); 2897 } 2898 } else if (Mask[0] == -1 && Mask[1] == 0) { 2899 if (IsVALU) { 2900 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg) 2901 .addImm(16) 2902 .addReg(SrcVec); 2903 } else { 2904 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg) 2905 .addReg(SrcVec) 2906 .addImm(16); 2907 } 2908 } else if (Mask[0] == 0 && Mask[1] == 0) { 2909 if (IsVALU) { 2910 // Write low half of the register into the high half. 2911 MachineInstr *MovSDWA = 2912 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg) 2913 .addImm(0) // $src0_modifiers 2914 .addReg(SrcVec) // $src0 2915 .addImm(0) // $clamp 2916 .addImm(AMDGPU::SDWA::WORD_1) // $dst_sel 2917 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused 2918 .addImm(AMDGPU::SDWA::WORD_0) // $src0_sel 2919 .addReg(SrcVec, RegState::Implicit); 2920 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1); 2921 } else { 2922 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg) 2923 .addReg(SrcVec) 2924 .addReg(SrcVec); 2925 } 2926 } else if (Mask[0] == 1 && Mask[1] == 1) { 2927 if (IsVALU) { 2928 // Write high half of the register into the low half. 2929 MachineInstr *MovSDWA = 2930 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg) 2931 .addImm(0) // $src0_modifiers 2932 .addReg(SrcVec) // $src0 2933 .addImm(0) // $clamp 2934 .addImm(AMDGPU::SDWA::WORD_0) // $dst_sel 2935 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused 2936 .addImm(AMDGPU::SDWA::WORD_1) // $src0_sel 2937 .addReg(SrcVec, RegState::Implicit); 2938 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1); 2939 } else { 2940 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg) 2941 .addReg(SrcVec) 2942 .addReg(SrcVec); 2943 } 2944 } else if (Mask[0] == 1 && Mask[1] == 0) { 2945 if (IsVALU) { 2946 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32_e64), DstReg) 2947 .addReg(SrcVec) 2948 .addReg(SrcVec) 2949 .addImm(16); 2950 } else { 2951 Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2952 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg) 2953 .addReg(SrcVec) 2954 .addImm(16); 2955 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg) 2956 .addReg(TmpReg) 2957 .addReg(SrcVec); 2958 } 2959 } else 2960 llvm_unreachable("all shuffle masks should be handled"); 2961 2962 MI.eraseFromParent(); 2963 return true; 2964 } 2965 2966 bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD( 2967 MachineInstr &MI) const { 2968 if (STI.hasGFX90AInsts()) 2969 return selectImpl(MI, *CoverageInfo); 2970 2971 MachineBasicBlock *MBB = MI.getParent(); 2972 const DebugLoc &DL = MI.getDebugLoc(); 2973 2974 if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) { 2975 Function &F = MBB->getParent()->getFunction(); 2976 DiagnosticInfoUnsupported 2977 NoFpRet(F, "return versions of fp atomics not supported", 2978 MI.getDebugLoc(), DS_Error); 2979 F.getContext().diagnose(NoFpRet); 2980 return false; 2981 } 2982 2983 // FIXME: This is only needed because tablegen requires number of dst operands 2984 // in match and replace pattern to be the same. Otherwise patterns can be 2985 // exported from SDag path. 2986 MachineOperand &VDataIn = MI.getOperand(1); 2987 MachineOperand &VIndex = MI.getOperand(3); 2988 MachineOperand &VOffset = MI.getOperand(4); 2989 MachineOperand &SOffset = MI.getOperand(5); 2990 int16_t Offset = MI.getOperand(6).getImm(); 2991 2992 bool HasVOffset = !isOperandImmEqual(VOffset, 0, *MRI); 2993 bool HasVIndex = !isOperandImmEqual(VIndex, 0, *MRI); 2994 2995 unsigned Opcode; 2996 if (HasVOffset) { 2997 Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN 2998 : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN; 2999 } else { 3000 Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN 3001 : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET; 3002 } 3003 3004 if (MRI->getType(VDataIn.getReg()).isVector()) { 3005 switch (Opcode) { 3006 case AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN: 3007 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN; 3008 break; 3009 case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN: 3010 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFEN; 3011 break; 3012 case AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN: 3013 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_IDXEN; 3014 break; 3015 case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET: 3016 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFSET; 3017 break; 3018 } 3019 } 3020 3021 auto I = BuildMI(*MBB, MI, DL, TII.get(Opcode)); 3022 I.add(VDataIn); 3023 3024 if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN || 3025 Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) { 3026 Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class()); 3027 BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg) 3028 .addReg(VIndex.getReg()) 3029 .addImm(AMDGPU::sub0) 3030 .addReg(VOffset.getReg()) 3031 .addImm(AMDGPU::sub1); 3032 3033 I.addReg(IdxReg); 3034 } else if (HasVIndex) { 3035 I.add(VIndex); 3036 } else if (HasVOffset) { 3037 I.add(VOffset); 3038 } 3039 3040 I.add(MI.getOperand(2)); // rsrc 3041 I.add(SOffset); 3042 I.addImm(Offset); 3043 I.addImm(MI.getOperand(7).getImm()); // cpol 3044 I.cloneMemRefs(MI); 3045 3046 MI.eraseFromParent(); 3047 3048 return true; 3049 } 3050 3051 bool AMDGPUInstructionSelector::selectGlobalAtomicFadd( 3052 MachineInstr &MI, MachineOperand &AddrOp, MachineOperand &DataOp) const { 3053 3054 if (STI.hasGFX90AInsts()) { 3055 // gfx90a adds return versions of the global atomic fadd instructions so no 3056 // special handling is required. 3057 return selectImpl(MI, *CoverageInfo); 3058 } 3059 3060 MachineBasicBlock *MBB = MI.getParent(); 3061 const DebugLoc &DL = MI.getDebugLoc(); 3062 3063 if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) { 3064 Function &F = MBB->getParent()->getFunction(); 3065 DiagnosticInfoUnsupported 3066 NoFpRet(F, "return versions of fp atomics not supported", 3067 MI.getDebugLoc(), DS_Error); 3068 F.getContext().diagnose(NoFpRet); 3069 return false; 3070 } 3071 3072 // FIXME: This is only needed because tablegen requires number of dst operands 3073 // in match and replace pattern to be the same. Otherwise patterns can be 3074 // exported from SDag path. 3075 auto Addr = selectFlatOffsetImpl(AddrOp, SIInstrFlags::FlatGlobal); 3076 3077 Register Data = DataOp.getReg(); 3078 const unsigned Opc = MRI->getType(Data).isVector() ? 3079 AMDGPU::GLOBAL_ATOMIC_PK_ADD_F16 : AMDGPU::GLOBAL_ATOMIC_ADD_F32; 3080 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc)) 3081 .addReg(Addr.first) 3082 .addReg(Data) 3083 .addImm(Addr.second) 3084 .addImm(0) // cpol 3085 .cloneMemRefs(MI); 3086 3087 MI.eraseFromParent(); 3088 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 3089 } 3090 3091 bool AMDGPUInstructionSelector::selectBufferLoadLds(MachineInstr &MI) const { 3092 unsigned Opc; 3093 unsigned Size = MI.getOperand(3).getImm(); 3094 3095 // The struct intrinsic variants add one additional operand over raw. 3096 const bool HasVIndex = MI.getNumOperands() == 9; 3097 Register VIndex; 3098 int OpOffset = 0; 3099 if (HasVIndex) { 3100 VIndex = MI.getOperand(4).getReg(); 3101 OpOffset = 1; 3102 } 3103 3104 Register VOffset = MI.getOperand(4 + OpOffset).getReg(); 3105 Optional<ValueAndVReg> MaybeVOffset = 3106 getIConstantVRegValWithLookThrough(VOffset, *MRI); 3107 const bool HasVOffset = !MaybeVOffset || MaybeVOffset->Value.getZExtValue(); 3108 3109 switch (Size) { 3110 default: 3111 return false; 3112 case 1: 3113 Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_BOTHEN 3114 : AMDGPU::BUFFER_LOAD_UBYTE_LDS_IDXEN 3115 : HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFEN 3116 : AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFSET; 3117 break; 3118 case 2: 3119 Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_BOTHEN 3120 : AMDGPU::BUFFER_LOAD_USHORT_LDS_IDXEN 3121 : HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFEN 3122 : AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFSET; 3123 break; 3124 case 4: 3125 Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_BOTHEN 3126 : AMDGPU::BUFFER_LOAD_DWORD_LDS_IDXEN 3127 : HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFEN 3128 : AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFSET; 3129 break; 3130 } 3131 3132 MachineBasicBlock *MBB = MI.getParent(); 3133 const DebugLoc &DL = MI.getDebugLoc(); 3134 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 3135 .add(MI.getOperand(2)); 3136 3137 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc)); 3138 3139 if (HasVIndex && HasVOffset) { 3140 Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class()); 3141 BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg) 3142 .addReg(VIndex) 3143 .addImm(AMDGPU::sub0) 3144 .addReg(VOffset) 3145 .addImm(AMDGPU::sub1); 3146 3147 MIB.addReg(IdxReg); 3148 } else if (HasVIndex) { 3149 MIB.addReg(VIndex); 3150 } else if (HasVOffset) { 3151 MIB.addReg(VOffset); 3152 } 3153 3154 MIB.add(MI.getOperand(1)); // rsrc 3155 MIB.add(MI.getOperand(5 + OpOffset)); // soffset 3156 MIB.add(MI.getOperand(6 + OpOffset)); // imm offset 3157 unsigned Aux = MI.getOperand(7 + OpOffset).getImm(); 3158 MIB.addImm(Aux & AMDGPU::CPol::ALL); // cpol 3159 MIB.addImm((Aux >> 3) & 1); // swz 3160 3161 MachineMemOperand *LoadMMO = *MI.memoperands_begin(); 3162 MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo(); 3163 LoadPtrI.Offset = MI.getOperand(6 + OpOffset).getImm(); 3164 MachinePointerInfo StorePtrI = LoadPtrI; 3165 StorePtrI.V = nullptr; 3166 StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS; 3167 3168 auto F = LoadMMO->getFlags() & 3169 ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad); 3170 LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad, 3171 Size, LoadMMO->getBaseAlign()); 3172 3173 MachineMemOperand *StoreMMO = 3174 MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore, 3175 sizeof(int32_t), LoadMMO->getBaseAlign()); 3176 3177 MIB.setMemRefs({LoadMMO, StoreMMO}); 3178 3179 MI.eraseFromParent(); 3180 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 3181 } 3182 3183 /// Match a zero extend from a 32-bit value to 64-bits. 3184 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) { 3185 Register ZExtSrc; 3186 if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc)))) 3187 return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register(); 3188 3189 // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0) 3190 const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI); 3191 if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES) 3192 return false; 3193 3194 if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) { 3195 return Def->getOperand(1).getReg(); 3196 } 3197 3198 return Register(); 3199 } 3200 3201 bool AMDGPUInstructionSelector::selectGlobalLoadLds(MachineInstr &MI) const{ 3202 unsigned Opc; 3203 unsigned Size = MI.getOperand(3).getImm(); 3204 3205 switch (Size) { 3206 default: 3207 return false; 3208 case 1: 3209 Opc = AMDGPU::GLOBAL_LOAD_LDS_UBYTE; 3210 break; 3211 case 2: 3212 Opc = AMDGPU::GLOBAL_LOAD_LDS_USHORT; 3213 break; 3214 case 4: 3215 Opc = AMDGPU::GLOBAL_LOAD_LDS_DWORD; 3216 break; 3217 } 3218 3219 MachineBasicBlock *MBB = MI.getParent(); 3220 const DebugLoc &DL = MI.getDebugLoc(); 3221 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 3222 .add(MI.getOperand(2)); 3223 3224 Register Addr = MI.getOperand(1).getReg(); 3225 Register VOffset; 3226 // Try to split SAddr and VOffset. Global and LDS pointers share the same 3227 // immediate offset, so we cannot use a regular SelectGlobalSAddr(). 3228 if (!isSGPR(Addr)) { 3229 auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI); 3230 if (isSGPR(AddrDef->Reg)) { 3231 Addr = AddrDef->Reg; 3232 } else if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) { 3233 Register SAddr = 3234 getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI); 3235 if (SAddr && isSGPR(SAddr)) { 3236 Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg(); 3237 if (Register Off = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) { 3238 Addr = SAddr; 3239 VOffset = Off; 3240 } 3241 } 3242 } 3243 } 3244 3245 if (isSGPR(Addr)) { 3246 Opc = AMDGPU::getGlobalSaddrOp(Opc); 3247 if (!VOffset) { 3248 VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3249 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), VOffset) 3250 .addImm(0); 3251 } 3252 } 3253 3254 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc)) 3255 .addReg(Addr); 3256 3257 if (isSGPR(Addr)) 3258 MIB.addReg(VOffset); 3259 3260 MIB.add(MI.getOperand(4)) // offset 3261 .add(MI.getOperand(5)); // cpol 3262 3263 MachineMemOperand *LoadMMO = *MI.memoperands_begin(); 3264 MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo(); 3265 LoadPtrI.Offset = MI.getOperand(4).getImm(); 3266 MachinePointerInfo StorePtrI = LoadPtrI; 3267 LoadPtrI.AddrSpace = AMDGPUAS::GLOBAL_ADDRESS; 3268 StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS; 3269 auto F = LoadMMO->getFlags() & 3270 ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad); 3271 LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad, 3272 Size, LoadMMO->getBaseAlign()); 3273 MachineMemOperand *StoreMMO = 3274 MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore, 3275 sizeof(int32_t), Align(4)); 3276 3277 MIB.setMemRefs({LoadMMO, StoreMMO}); 3278 3279 MI.eraseFromParent(); 3280 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 3281 } 3282 3283 bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{ 3284 MI.setDesc(TII.get(MI.getOperand(1).getImm())); 3285 MI.removeOperand(1); 3286 MI.addImplicitDefUseOperands(*MI.getParent()->getParent()); 3287 return true; 3288 } 3289 3290 bool AMDGPUInstructionSelector::selectSMFMACIntrin(MachineInstr &MI) const { 3291 unsigned Opc; 3292 switch (MI.getIntrinsicID()) { 3293 case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16: 3294 Opc = AMDGPU::V_SMFMAC_F32_16X16X32_F16_e64; 3295 break; 3296 case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16: 3297 Opc = AMDGPU::V_SMFMAC_F32_32X32X16_F16_e64; 3298 break; 3299 case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16: 3300 Opc = AMDGPU::V_SMFMAC_F32_16X16X32_BF16_e64; 3301 break; 3302 case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16: 3303 Opc = AMDGPU::V_SMFMAC_F32_32X32X16_BF16_e64; 3304 break; 3305 case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8: 3306 Opc = AMDGPU::V_SMFMAC_I32_16X16X64_I8_e64; 3307 break; 3308 case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8: 3309 Opc = AMDGPU::V_SMFMAC_I32_32X32X32_I8_e64; 3310 break; 3311 default: 3312 llvm_unreachable("unhandled smfmac intrinsic"); 3313 } 3314 3315 auto VDst_In = MI.getOperand(4); 3316 3317 MI.setDesc(TII.get(Opc)); 3318 MI.removeOperand(4); // VDst_In 3319 MI.removeOperand(1); // Intrinsic ID 3320 MI.addOperand(VDst_In); // Readd VDst_In to the end 3321 MI.addImplicitDefUseOperands(*MI.getParent()->getParent()); 3322 return true; 3323 } 3324 3325 bool AMDGPUInstructionSelector::selectWaveAddress(MachineInstr &MI) const { 3326 Register DstReg = MI.getOperand(0).getReg(); 3327 Register SrcReg = MI.getOperand(1).getReg(); 3328 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 3329 const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID; 3330 MachineBasicBlock *MBB = MI.getParent(); 3331 const DebugLoc &DL = MI.getDebugLoc(); 3332 3333 if (IsVALU) { 3334 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg) 3335 .addImm(Subtarget->getWavefrontSizeLog2()) 3336 .addReg(SrcReg); 3337 } else { 3338 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg) 3339 .addReg(SrcReg) 3340 .addImm(Subtarget->getWavefrontSizeLog2()); 3341 } 3342 3343 const TargetRegisterClass &RC = 3344 IsVALU ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass; 3345 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI)) 3346 return false; 3347 3348 MI.eraseFromParent(); 3349 return true; 3350 } 3351 3352 bool AMDGPUInstructionSelector::select(MachineInstr &I) { 3353 if (I.isPHI()) 3354 return selectPHI(I); 3355 3356 if (!I.isPreISelOpcode()) { 3357 if (I.isCopy()) 3358 return selectCOPY(I); 3359 return true; 3360 } 3361 3362 switch (I.getOpcode()) { 3363 case TargetOpcode::G_AND: 3364 case TargetOpcode::G_OR: 3365 case TargetOpcode::G_XOR: 3366 if (selectImpl(I, *CoverageInfo)) 3367 return true; 3368 return selectG_AND_OR_XOR(I); 3369 case TargetOpcode::G_ADD: 3370 case TargetOpcode::G_SUB: 3371 if (selectImpl(I, *CoverageInfo)) 3372 return true; 3373 return selectG_ADD_SUB(I); 3374 case TargetOpcode::G_UADDO: 3375 case TargetOpcode::G_USUBO: 3376 case TargetOpcode::G_UADDE: 3377 case TargetOpcode::G_USUBE: 3378 return selectG_UADDO_USUBO_UADDE_USUBE(I); 3379 case AMDGPU::G_AMDGPU_MAD_U64_U32: 3380 case AMDGPU::G_AMDGPU_MAD_I64_I32: 3381 return selectG_AMDGPU_MAD_64_32(I); 3382 case TargetOpcode::G_INTTOPTR: 3383 case TargetOpcode::G_BITCAST: 3384 case TargetOpcode::G_PTRTOINT: 3385 return selectCOPY(I); 3386 case TargetOpcode::G_CONSTANT: 3387 case TargetOpcode::G_FCONSTANT: 3388 return selectG_CONSTANT(I); 3389 case TargetOpcode::G_FNEG: 3390 if (selectImpl(I, *CoverageInfo)) 3391 return true; 3392 return selectG_FNEG(I); 3393 case TargetOpcode::G_FABS: 3394 if (selectImpl(I, *CoverageInfo)) 3395 return true; 3396 return selectG_FABS(I); 3397 case TargetOpcode::G_EXTRACT: 3398 return selectG_EXTRACT(I); 3399 case TargetOpcode::G_MERGE_VALUES: 3400 case TargetOpcode::G_BUILD_VECTOR: 3401 case TargetOpcode::G_CONCAT_VECTORS: 3402 return selectG_MERGE_VALUES(I); 3403 case TargetOpcode::G_UNMERGE_VALUES: 3404 return selectG_UNMERGE_VALUES(I); 3405 case TargetOpcode::G_BUILD_VECTOR_TRUNC: 3406 return selectG_BUILD_VECTOR_TRUNC(I); 3407 case TargetOpcode::G_PTR_ADD: 3408 return selectG_PTR_ADD(I); 3409 case TargetOpcode::G_IMPLICIT_DEF: 3410 return selectG_IMPLICIT_DEF(I); 3411 case TargetOpcode::G_FREEZE: 3412 return selectCOPY(I); 3413 case TargetOpcode::G_INSERT: 3414 return selectG_INSERT(I); 3415 case TargetOpcode::G_INTRINSIC: 3416 return selectG_INTRINSIC(I); 3417 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: 3418 return selectG_INTRINSIC_W_SIDE_EFFECTS(I); 3419 case TargetOpcode::G_ICMP: 3420 if (selectG_ICMP(I)) 3421 return true; 3422 return selectImpl(I, *CoverageInfo); 3423 case TargetOpcode::G_LOAD: 3424 case TargetOpcode::G_STORE: 3425 case TargetOpcode::G_ATOMIC_CMPXCHG: 3426 case TargetOpcode::G_ATOMICRMW_XCHG: 3427 case TargetOpcode::G_ATOMICRMW_ADD: 3428 case TargetOpcode::G_ATOMICRMW_SUB: 3429 case TargetOpcode::G_ATOMICRMW_AND: 3430 case TargetOpcode::G_ATOMICRMW_OR: 3431 case TargetOpcode::G_ATOMICRMW_XOR: 3432 case TargetOpcode::G_ATOMICRMW_MIN: 3433 case TargetOpcode::G_ATOMICRMW_MAX: 3434 case TargetOpcode::G_ATOMICRMW_UMIN: 3435 case TargetOpcode::G_ATOMICRMW_UMAX: 3436 case TargetOpcode::G_ATOMICRMW_FADD: 3437 case AMDGPU::G_AMDGPU_ATOMIC_INC: 3438 case AMDGPU::G_AMDGPU_ATOMIC_DEC: 3439 case AMDGPU::G_AMDGPU_ATOMIC_FMIN: 3440 case AMDGPU::G_AMDGPU_ATOMIC_FMAX: 3441 return selectG_LOAD_STORE_ATOMICRMW(I); 3442 case TargetOpcode::G_SELECT: 3443 return selectG_SELECT(I); 3444 case TargetOpcode::G_TRUNC: 3445 return selectG_TRUNC(I); 3446 case TargetOpcode::G_SEXT: 3447 case TargetOpcode::G_ZEXT: 3448 case TargetOpcode::G_ANYEXT: 3449 case TargetOpcode::G_SEXT_INREG: 3450 if (selectImpl(I, *CoverageInfo)) 3451 return true; 3452 return selectG_SZA_EXT(I); 3453 case TargetOpcode::G_BRCOND: 3454 return selectG_BRCOND(I); 3455 case TargetOpcode::G_GLOBAL_VALUE: 3456 return selectG_GLOBAL_VALUE(I); 3457 case TargetOpcode::G_PTRMASK: 3458 return selectG_PTRMASK(I); 3459 case TargetOpcode::G_EXTRACT_VECTOR_ELT: 3460 return selectG_EXTRACT_VECTOR_ELT(I); 3461 case TargetOpcode::G_INSERT_VECTOR_ELT: 3462 return selectG_INSERT_VECTOR_ELT(I); 3463 case TargetOpcode::G_SHUFFLE_VECTOR: 3464 return selectG_SHUFFLE_VECTOR(I); 3465 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD: 3466 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16: 3467 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: 3468 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: { 3469 const AMDGPU::ImageDimIntrinsicInfo *Intr 3470 = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID()); 3471 assert(Intr && "not an image intrinsic with image pseudo"); 3472 return selectImageIntrinsic(I, Intr); 3473 } 3474 case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY: 3475 return selectBVHIntrinsic(I); 3476 case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD: 3477 return selectAMDGPU_BUFFER_ATOMIC_FADD(I); 3478 case AMDGPU::G_SBFX: 3479 case AMDGPU::G_UBFX: 3480 return selectG_SBFX_UBFX(I); 3481 case AMDGPU::G_SI_CALL: 3482 I.setDesc(TII.get(AMDGPU::SI_CALL)); 3483 return true; 3484 case AMDGPU::G_AMDGPU_WAVE_ADDRESS: 3485 return selectWaveAddress(I); 3486 default: 3487 return selectImpl(I, *CoverageInfo); 3488 } 3489 return false; 3490 } 3491 3492 InstructionSelector::ComplexRendererFns 3493 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const { 3494 return {{ 3495 [=](MachineInstrBuilder &MIB) { MIB.add(Root); } 3496 }}; 3497 3498 } 3499 3500 std::pair<Register, unsigned> AMDGPUInstructionSelector::selectVOP3ModsImpl( 3501 MachineOperand &Root, bool AllowAbs, bool OpSel, bool ForceVGPR) const { 3502 Register Src = Root.getReg(); 3503 Register OrigSrc = Src; 3504 unsigned Mods = 0; 3505 MachineInstr *MI = getDefIgnoringCopies(Src, *MRI); 3506 3507 if (MI && MI->getOpcode() == AMDGPU::G_FNEG) { 3508 Src = MI->getOperand(1).getReg(); 3509 Mods |= SISrcMods::NEG; 3510 MI = getDefIgnoringCopies(Src, *MRI); 3511 } 3512 3513 if (AllowAbs && MI && MI->getOpcode() == AMDGPU::G_FABS) { 3514 Src = MI->getOperand(1).getReg(); 3515 Mods |= SISrcMods::ABS; 3516 } 3517 3518 if (OpSel) 3519 Mods |= SISrcMods::OP_SEL_0; 3520 3521 if ((Mods != 0 || ForceVGPR) && 3522 RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) { 3523 MachineInstr *UseMI = Root.getParent(); 3524 3525 // If we looked through copies to find source modifiers on an SGPR operand, 3526 // we now have an SGPR register source. To avoid potentially violating the 3527 // constant bus restriction, we need to insert a copy to a VGPR. 3528 Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc); 3529 BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(), 3530 TII.get(AMDGPU::COPY), VGPRSrc) 3531 .addReg(Src); 3532 Src = VGPRSrc; 3533 } 3534 3535 return std::make_pair(Src, Mods); 3536 } 3537 3538 /// 3539 /// This will select either an SGPR or VGPR operand and will save us from 3540 /// having to write an extra tablegen pattern. 3541 InstructionSelector::ComplexRendererFns 3542 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const { 3543 return {{ 3544 [=](MachineInstrBuilder &MIB) { MIB.add(Root); } 3545 }}; 3546 } 3547 3548 InstructionSelector::ComplexRendererFns 3549 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const { 3550 Register Src; 3551 unsigned Mods; 3552 std::tie(Src, Mods) = selectVOP3ModsImpl(Root); 3553 3554 return {{ 3555 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3556 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods 3557 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp 3558 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod 3559 }}; 3560 } 3561 3562 InstructionSelector::ComplexRendererFns 3563 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const { 3564 Register Src; 3565 unsigned Mods; 3566 std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false); 3567 3568 return {{ 3569 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3570 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods 3571 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp 3572 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod 3573 }}; 3574 } 3575 3576 InstructionSelector::ComplexRendererFns 3577 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const { 3578 return {{ 3579 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }, 3580 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp 3581 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod 3582 }}; 3583 } 3584 3585 InstructionSelector::ComplexRendererFns 3586 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const { 3587 Register Src; 3588 unsigned Mods; 3589 std::tie(Src, Mods) = selectVOP3ModsImpl(Root); 3590 3591 return {{ 3592 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3593 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods 3594 }}; 3595 } 3596 3597 InstructionSelector::ComplexRendererFns 3598 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const { 3599 Register Src; 3600 unsigned Mods; 3601 std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false); 3602 3603 return {{ 3604 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3605 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods 3606 }}; 3607 } 3608 3609 InstructionSelector::ComplexRendererFns 3610 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const { 3611 Register Reg = Root.getReg(); 3612 const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI); 3613 if (Def && (Def->getOpcode() == AMDGPU::G_FNEG || 3614 Def->getOpcode() == AMDGPU::G_FABS)) 3615 return {}; 3616 return {{ 3617 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); }, 3618 }}; 3619 } 3620 3621 std::pair<Register, unsigned> 3622 AMDGPUInstructionSelector::selectVOP3PModsImpl( 3623 Register Src, const MachineRegisterInfo &MRI, bool IsDOT) const { 3624 unsigned Mods = 0; 3625 MachineInstr *MI = MRI.getVRegDef(Src); 3626 3627 if (MI && MI->getOpcode() == AMDGPU::G_FNEG && 3628 // It's possible to see an f32 fneg here, but unlikely. 3629 // TODO: Treat f32 fneg as only high bit. 3630 MRI.getType(Src) == LLT::fixed_vector(2, 16)) { 3631 Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI); 3632 Src = MI->getOperand(1).getReg(); 3633 MI = MRI.getVRegDef(Src); 3634 } 3635 3636 // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector. 3637 (void)IsDOT; // DOTs do not use OPSEL on gfx940+, check ST.hasDOTOpSelHazard() 3638 3639 // Packed instructions do not have abs modifiers. 3640 Mods |= SISrcMods::OP_SEL_1; 3641 3642 return std::make_pair(Src, Mods); 3643 } 3644 3645 InstructionSelector::ComplexRendererFns 3646 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const { 3647 MachineRegisterInfo &MRI 3648 = Root.getParent()->getParent()->getParent()->getRegInfo(); 3649 3650 Register Src; 3651 unsigned Mods; 3652 std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI); 3653 3654 return {{ 3655 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3656 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods 3657 }}; 3658 } 3659 3660 InstructionSelector::ComplexRendererFns 3661 AMDGPUInstructionSelector::selectVOP3PModsDOT(MachineOperand &Root) const { 3662 MachineRegisterInfo &MRI 3663 = Root.getParent()->getParent()->getParent()->getRegInfo(); 3664 3665 Register Src; 3666 unsigned Mods; 3667 std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI, true); 3668 3669 return {{ 3670 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3671 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods 3672 }}; 3673 } 3674 3675 InstructionSelector::ComplexRendererFns 3676 AMDGPUInstructionSelector::selectDotIUVOP3PMods(MachineOperand &Root) const { 3677 // Literal i1 value set in intrinsic, represents SrcMods for the next operand. 3678 // Value is in Imm operand as i1 sign extended to int64_t. 3679 // 1(-1) promotes packed values to signed, 0 treats them as unsigned. 3680 assert((Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) && 3681 "expected i1 value"); 3682 unsigned Mods = SISrcMods::OP_SEL_1; 3683 if (Root.getImm() == -1) 3684 Mods ^= SISrcMods::NEG; 3685 return {{ 3686 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods 3687 }}; 3688 } 3689 3690 InstructionSelector::ComplexRendererFns 3691 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const { 3692 Register Src; 3693 unsigned Mods; 3694 std::tie(Src, Mods) = selectVOP3ModsImpl(Root); 3695 if (!isKnownNeverNaN(Src, *MRI)) 3696 return None; 3697 3698 return {{ 3699 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3700 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods 3701 }}; 3702 } 3703 3704 InstructionSelector::ComplexRendererFns 3705 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const { 3706 // FIXME: Handle op_sel 3707 return {{ 3708 [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); }, 3709 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods 3710 }}; 3711 } 3712 3713 InstructionSelector::ComplexRendererFns 3714 AMDGPUInstructionSelector::selectVINTERPMods(MachineOperand &Root) const { 3715 Register Src; 3716 unsigned Mods; 3717 std::tie(Src, Mods) = selectVOP3ModsImpl(Root, 3718 /* AllowAbs */ false, 3719 /* OpSel */ false, 3720 /* ForceVGPR */ true); 3721 3722 return {{ 3723 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3724 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods 3725 }}; 3726 } 3727 3728 InstructionSelector::ComplexRendererFns 3729 AMDGPUInstructionSelector::selectVINTERPModsHi(MachineOperand &Root) const { 3730 Register Src; 3731 unsigned Mods; 3732 std::tie(Src, Mods) = selectVOP3ModsImpl(Root, 3733 /* AllowAbs */ false, 3734 /* OpSel */ true, 3735 /* ForceVGPR */ true); 3736 3737 return {{ 3738 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3739 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods 3740 }}; 3741 } 3742 3743 InstructionSelector::ComplexRendererFns 3744 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const { 3745 SmallVector<GEPInfo, 4> AddrInfo; 3746 getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo); 3747 3748 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1) 3749 return None; 3750 3751 const GEPInfo &GEPInfo = AddrInfo[0]; 3752 Optional<int64_t> EncodedImm = 3753 AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false); 3754 if (!EncodedImm) 3755 return None; 3756 3757 unsigned PtrReg = GEPInfo.SgprParts[0]; 3758 return {{ 3759 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); }, 3760 [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } 3761 }}; 3762 } 3763 3764 InstructionSelector::ComplexRendererFns 3765 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const { 3766 SmallVector<GEPInfo, 4> AddrInfo; 3767 getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo); 3768 3769 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1) 3770 return None; 3771 3772 const GEPInfo &GEPInfo = AddrInfo[0]; 3773 Register PtrReg = GEPInfo.SgprParts[0]; 3774 Optional<int64_t> EncodedImm = 3775 AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm); 3776 if (!EncodedImm) 3777 return None; 3778 3779 return {{ 3780 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); }, 3781 [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } 3782 }}; 3783 } 3784 3785 InstructionSelector::ComplexRendererFns 3786 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const { 3787 MachineInstr *MI = Root.getParent(); 3788 MachineBasicBlock *MBB = MI->getParent(); 3789 3790 SmallVector<GEPInfo, 4> AddrInfo; 3791 getAddrModeInfo(*MI, *MRI, AddrInfo); 3792 3793 // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits, 3794 // then we can select all ptr + 32-bit offsets not just immediate offsets. 3795 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1) 3796 return None; 3797 3798 const GEPInfo &GEPInfo = AddrInfo[0]; 3799 // SGPR offset is unsigned. 3800 if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm)) 3801 return None; 3802 3803 // If we make it this far we have a load with an 32-bit immediate offset. 3804 // It is OK to select this using a sgpr offset, because we have already 3805 // failed trying to select this load into one of the _IMM variants since 3806 // the _IMM Patterns are considered before the _SGPR patterns. 3807 Register PtrReg = GEPInfo.SgprParts[0]; 3808 Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 3809 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg) 3810 .addImm(GEPInfo.Imm); 3811 return {{ 3812 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); }, 3813 [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); } 3814 }}; 3815 } 3816 3817 std::pair<Register, int> 3818 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root, 3819 uint64_t FlatVariant) const { 3820 MachineInstr *MI = Root.getParent(); 3821 3822 auto Default = std::make_pair(Root.getReg(), 0); 3823 3824 if (!STI.hasFlatInstOffsets()) 3825 return Default; 3826 3827 Register PtrBase; 3828 int64_t ConstOffset; 3829 std::tie(PtrBase, ConstOffset) = 3830 getPtrBaseWithConstantOffset(Root.getReg(), *MRI); 3831 if (ConstOffset == 0) 3832 return Default; 3833 3834 unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace(); 3835 if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, FlatVariant)) 3836 return Default; 3837 3838 return std::make_pair(PtrBase, ConstOffset); 3839 } 3840 3841 InstructionSelector::ComplexRendererFns 3842 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const { 3843 auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FLAT); 3844 3845 return {{ 3846 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); }, 3847 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); }, 3848 }}; 3849 } 3850 3851 InstructionSelector::ComplexRendererFns 3852 AMDGPUInstructionSelector::selectGlobalOffset(MachineOperand &Root) const { 3853 auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatGlobal); 3854 3855 return {{ 3856 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); }, 3857 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); }, 3858 }}; 3859 } 3860 3861 InstructionSelector::ComplexRendererFns 3862 AMDGPUInstructionSelector::selectScratchOffset(MachineOperand &Root) const { 3863 auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatScratch); 3864 3865 return {{ 3866 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); }, 3867 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); }, 3868 }}; 3869 } 3870 3871 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset) 3872 InstructionSelector::ComplexRendererFns 3873 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const { 3874 Register Addr = Root.getReg(); 3875 Register PtrBase; 3876 int64_t ConstOffset; 3877 int64_t ImmOffset = 0; 3878 3879 // Match the immediate offset first, which canonically is moved as low as 3880 // possible. 3881 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI); 3882 3883 if (ConstOffset != 0) { 3884 if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, 3885 SIInstrFlags::FlatGlobal)) { 3886 Addr = PtrBase; 3887 ImmOffset = ConstOffset; 3888 } else { 3889 auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI); 3890 if (isSGPR(PtrBaseDef->Reg)) { 3891 if (ConstOffset > 0) { 3892 // Offset is too large. 3893 // 3894 // saddr + large_offset -> saddr + 3895 // (voffset = large_offset & ~MaxOffset) + 3896 // (large_offset & MaxOffset); 3897 int64_t SplitImmOffset, RemainderOffset; 3898 std::tie(SplitImmOffset, RemainderOffset) = TII.splitFlatOffset( 3899 ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal); 3900 3901 if (isUInt<32>(RemainderOffset)) { 3902 MachineInstr *MI = Root.getParent(); 3903 MachineBasicBlock *MBB = MI->getParent(); 3904 Register HighBits = 3905 MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3906 3907 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), 3908 HighBits) 3909 .addImm(RemainderOffset); 3910 3911 return {{ 3912 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); }, // saddr 3913 [=](MachineInstrBuilder &MIB) { 3914 MIB.addReg(HighBits); 3915 }, // voffset 3916 [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); }, 3917 }}; 3918 } 3919 } 3920 3921 // We are adding a 64 bit SGPR and a constant. If constant bus limit 3922 // is 1 we would need to perform 1 or 2 extra moves for each half of 3923 // the constant and it is better to do a scalar add and then issue a 3924 // single VALU instruction to materialize zero. Otherwise it is less 3925 // instructions to perform VALU adds with immediates or inline literals. 3926 unsigned NumLiterals = 3927 !TII.isInlineConstant(APInt(32, ConstOffset & 0xffffffff)) + 3928 !TII.isInlineConstant(APInt(32, ConstOffset >> 32)); 3929 if (STI.getConstantBusLimit(AMDGPU::V_ADD_U32_e64) > NumLiterals) 3930 return None; 3931 } 3932 } 3933 } 3934 3935 // Match the variable offset. 3936 auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI); 3937 if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) { 3938 // Look through the SGPR->VGPR copy. 3939 Register SAddr = 3940 getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI); 3941 3942 if (SAddr && isSGPR(SAddr)) { 3943 Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg(); 3944 3945 // It's possible voffset is an SGPR here, but the copy to VGPR will be 3946 // inserted later. 3947 if (Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) { 3948 return {{[=](MachineInstrBuilder &MIB) { // saddr 3949 MIB.addReg(SAddr); 3950 }, 3951 [=](MachineInstrBuilder &MIB) { // voffset 3952 MIB.addReg(VOffset); 3953 }, 3954 [=](MachineInstrBuilder &MIB) { // offset 3955 MIB.addImm(ImmOffset); 3956 }}}; 3957 } 3958 } 3959 } 3960 3961 // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and 3962 // drop this. 3963 if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF || 3964 AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT || !isSGPR(AddrDef->Reg)) 3965 return None; 3966 3967 // It's cheaper to materialize a single 32-bit zero for vaddr than the two 3968 // moves required to copy a 64-bit SGPR to VGPR. 3969 MachineInstr *MI = Root.getParent(); 3970 MachineBasicBlock *MBB = MI->getParent(); 3971 Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3972 3973 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), VOffset) 3974 .addImm(0); 3975 3976 return {{ 3977 [=](MachineInstrBuilder &MIB) { MIB.addReg(AddrDef->Reg); }, // saddr 3978 [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); }, // voffset 3979 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset 3980 }}; 3981 } 3982 3983 InstructionSelector::ComplexRendererFns 3984 AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const { 3985 Register Addr = Root.getReg(); 3986 Register PtrBase; 3987 int64_t ConstOffset; 3988 int64_t ImmOffset = 0; 3989 3990 // Match the immediate offset first, which canonically is moved as low as 3991 // possible. 3992 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI); 3993 3994 if (ConstOffset != 0 && 3995 TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS, 3996 SIInstrFlags::FlatScratch)) { 3997 Addr = PtrBase; 3998 ImmOffset = ConstOffset; 3999 } 4000 4001 auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI); 4002 if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) { 4003 int FI = AddrDef->MI->getOperand(1).getIndex(); 4004 return {{ 4005 [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr 4006 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset 4007 }}; 4008 } 4009 4010 Register SAddr = AddrDef->Reg; 4011 4012 if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) { 4013 Register LHS = AddrDef->MI->getOperand(1).getReg(); 4014 Register RHS = AddrDef->MI->getOperand(2).getReg(); 4015 auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI); 4016 auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI); 4017 4018 if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX && 4019 isSGPR(RHSDef->Reg)) { 4020 int FI = LHSDef->MI->getOperand(1).getIndex(); 4021 MachineInstr &I = *Root.getParent(); 4022 MachineBasicBlock *BB = I.getParent(); 4023 const DebugLoc &DL = I.getDebugLoc(); 4024 SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 4025 4026 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_I32), SAddr) 4027 .addFrameIndex(FI) 4028 .addReg(RHSDef->Reg); 4029 } 4030 } 4031 4032 if (!isSGPR(SAddr)) 4033 return None; 4034 4035 return {{ 4036 [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr 4037 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset 4038 }}; 4039 } 4040 4041 // Check whether the flat scratch SVS swizzle bug affects this access. 4042 bool AMDGPUInstructionSelector::checkFlatScratchSVSSwizzleBug( 4043 Register VAddr, Register SAddr, uint64_t ImmOffset) const { 4044 if (!Subtarget->hasFlatScratchSVSSwizzleBug()) 4045 return false; 4046 4047 // The bug affects the swizzling of SVS accesses if there is any carry out 4048 // from the two low order bits (i.e. from bit 1 into bit 2) when adding 4049 // voffset to (soffset + inst_offset). 4050 auto VKnown = KnownBits->getKnownBits(VAddr); 4051 auto SKnown = KnownBits::computeForAddSub( 4052 true, false, KnownBits->getKnownBits(SAddr), 4053 KnownBits::makeConstant(APInt(32, ImmOffset))); 4054 uint64_t VMax = VKnown.getMaxValue().getZExtValue(); 4055 uint64_t SMax = SKnown.getMaxValue().getZExtValue(); 4056 return (VMax & 3) + (SMax & 3) >= 4; 4057 } 4058 4059 InstructionSelector::ComplexRendererFns 4060 AMDGPUInstructionSelector::selectScratchSVAddr(MachineOperand &Root) const { 4061 Register Addr = Root.getReg(); 4062 Register PtrBase; 4063 int64_t ConstOffset; 4064 int64_t ImmOffset = 0; 4065 4066 // Match the immediate offset first, which canonically is moved as low as 4067 // possible. 4068 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI); 4069 4070 if (ConstOffset != 0 && 4071 TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS, true)) { 4072 Addr = PtrBase; 4073 ImmOffset = ConstOffset; 4074 } 4075 4076 auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI); 4077 if (AddrDef->MI->getOpcode() != AMDGPU::G_PTR_ADD) 4078 return None; 4079 4080 Register RHS = AddrDef->MI->getOperand(2).getReg(); 4081 if (RBI.getRegBank(RHS, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) 4082 return None; 4083 4084 Register LHS = AddrDef->MI->getOperand(1).getReg(); 4085 auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI); 4086 4087 if (checkFlatScratchSVSSwizzleBug(RHS, LHS, ImmOffset)) 4088 return None; 4089 4090 if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) { 4091 int FI = LHSDef->MI->getOperand(1).getIndex(); 4092 return {{ 4093 [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr 4094 [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr 4095 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset 4096 }}; 4097 } 4098 4099 if (!isSGPR(LHS)) 4100 return None; 4101 4102 return {{ 4103 [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr 4104 [=](MachineInstrBuilder &MIB) { MIB.addReg(LHS); }, // saddr 4105 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset 4106 }}; 4107 } 4108 4109 InstructionSelector::ComplexRendererFns 4110 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const { 4111 MachineInstr *MI = Root.getParent(); 4112 MachineBasicBlock *MBB = MI->getParent(); 4113 MachineFunction *MF = MBB->getParent(); 4114 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 4115 4116 int64_t Offset = 0; 4117 if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) && 4118 Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) { 4119 Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 4120 4121 // TODO: Should this be inside the render function? The iterator seems to 4122 // move. 4123 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), 4124 HighBits) 4125 .addImm(Offset & ~4095); 4126 4127 return {{[=](MachineInstrBuilder &MIB) { // rsrc 4128 MIB.addReg(Info->getScratchRSrcReg()); 4129 }, 4130 [=](MachineInstrBuilder &MIB) { // vaddr 4131 MIB.addReg(HighBits); 4132 }, 4133 [=](MachineInstrBuilder &MIB) { // soffset 4134 // Use constant zero for soffset and rely on eliminateFrameIndex 4135 // to choose the appropriate frame register if need be. 4136 MIB.addImm(0); 4137 }, 4138 [=](MachineInstrBuilder &MIB) { // offset 4139 MIB.addImm(Offset & 4095); 4140 }}}; 4141 } 4142 4143 assert(Offset == 0 || Offset == -1); 4144 4145 // Try to fold a frame index directly into the MUBUF vaddr field, and any 4146 // offsets. 4147 Optional<int> FI; 4148 Register VAddr = Root.getReg(); 4149 if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) { 4150 Register PtrBase; 4151 int64_t ConstOffset; 4152 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(VAddr, *MRI); 4153 if (ConstOffset != 0) { 4154 if (SIInstrInfo::isLegalMUBUFImmOffset(ConstOffset) && 4155 (!STI.privateMemoryResourceIsRangeChecked() || 4156 KnownBits->signBitIsZero(PtrBase))) { 4157 const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase); 4158 if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX) 4159 FI = PtrBaseDef->getOperand(1).getIndex(); 4160 else 4161 VAddr = PtrBase; 4162 Offset = ConstOffset; 4163 } 4164 } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) { 4165 FI = RootDef->getOperand(1).getIndex(); 4166 } 4167 } 4168 4169 return {{[=](MachineInstrBuilder &MIB) { // rsrc 4170 MIB.addReg(Info->getScratchRSrcReg()); 4171 }, 4172 [=](MachineInstrBuilder &MIB) { // vaddr 4173 if (FI.hasValue()) 4174 MIB.addFrameIndex(FI.getValue()); 4175 else 4176 MIB.addReg(VAddr); 4177 }, 4178 [=](MachineInstrBuilder &MIB) { // soffset 4179 // Use constant zero for soffset and rely on eliminateFrameIndex 4180 // to choose the appropriate frame register if need be. 4181 MIB.addImm(0); 4182 }, 4183 [=](MachineInstrBuilder &MIB) { // offset 4184 MIB.addImm(Offset); 4185 }}}; 4186 } 4187 4188 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base, 4189 int64_t Offset) const { 4190 if (!isUInt<16>(Offset)) 4191 return false; 4192 4193 if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled()) 4194 return true; 4195 4196 // On Southern Islands instruction with a negative base value and an offset 4197 // don't seem to work. 4198 return KnownBits->signBitIsZero(Base); 4199 } 4200 4201 bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0, 4202 int64_t Offset1, 4203 unsigned Size) const { 4204 if (Offset0 % Size != 0 || Offset1 % Size != 0) 4205 return false; 4206 if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size)) 4207 return false; 4208 4209 if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled()) 4210 return true; 4211 4212 // On Southern Islands instruction with a negative base value and an offset 4213 // don't seem to work. 4214 return KnownBits->signBitIsZero(Base); 4215 } 4216 4217 bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI, 4218 unsigned ShAmtBits) const { 4219 assert(MI.getOpcode() == TargetOpcode::G_AND); 4220 4221 Optional<APInt> RHS = getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI); 4222 if (!RHS) 4223 return false; 4224 4225 if (RHS->countTrailingOnes() >= ShAmtBits) 4226 return true; 4227 4228 const APInt &LHSKnownZeros = 4229 KnownBits->getKnownZeroes(MI.getOperand(1).getReg()); 4230 return (LHSKnownZeros | *RHS).countTrailingOnes() >= ShAmtBits; 4231 } 4232 4233 // Return the wave level SGPR base address if this is a wave address. 4234 static Register getWaveAddress(const MachineInstr *Def) { 4235 return Def->getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS 4236 ? Def->getOperand(1).getReg() 4237 : Register(); 4238 } 4239 4240 InstructionSelector::ComplexRendererFns 4241 AMDGPUInstructionSelector::selectMUBUFScratchOffset( 4242 MachineOperand &Root) const { 4243 Register Reg = Root.getReg(); 4244 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 4245 4246 const MachineInstr *Def = MRI->getVRegDef(Reg); 4247 if (Register WaveBase = getWaveAddress(Def)) { 4248 return {{ 4249 [=](MachineInstrBuilder &MIB) { // rsrc 4250 MIB.addReg(Info->getScratchRSrcReg()); 4251 }, 4252 [=](MachineInstrBuilder &MIB) { // soffset 4253 MIB.addReg(WaveBase); 4254 }, 4255 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // offset 4256 }}; 4257 } 4258 4259 int64_t Offset = 0; 4260 4261 // FIXME: Copy check is a hack 4262 Register BasePtr; 4263 if (mi_match(Reg, *MRI, m_GPtrAdd(m_Reg(BasePtr), m_Copy(m_ICst(Offset))))) { 4264 if (!SIInstrInfo::isLegalMUBUFImmOffset(Offset)) 4265 return {}; 4266 const MachineInstr *BasePtrDef = MRI->getVRegDef(BasePtr); 4267 Register WaveBase = getWaveAddress(BasePtrDef); 4268 if (!WaveBase) 4269 return {}; 4270 4271 return {{ 4272 [=](MachineInstrBuilder &MIB) { // rsrc 4273 MIB.addReg(Info->getScratchRSrcReg()); 4274 }, 4275 [=](MachineInstrBuilder &MIB) { // soffset 4276 MIB.addReg(WaveBase); 4277 }, 4278 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset 4279 }}; 4280 } 4281 4282 if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) || 4283 !SIInstrInfo::isLegalMUBUFImmOffset(Offset)) 4284 return {}; 4285 4286 return {{ 4287 [=](MachineInstrBuilder &MIB) { // rsrc 4288 MIB.addReg(Info->getScratchRSrcReg()); 4289 }, 4290 [=](MachineInstrBuilder &MIB) { // soffset 4291 MIB.addImm(0); 4292 }, 4293 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset 4294 }}; 4295 } 4296 4297 std::pair<Register, unsigned> 4298 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const { 4299 const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg()); 4300 if (!RootDef) 4301 return std::make_pair(Root.getReg(), 0); 4302 4303 int64_t ConstAddr = 0; 4304 4305 Register PtrBase; 4306 int64_t Offset; 4307 std::tie(PtrBase, Offset) = 4308 getPtrBaseWithConstantOffset(Root.getReg(), *MRI); 4309 4310 if (Offset) { 4311 if (isDSOffsetLegal(PtrBase, Offset)) { 4312 // (add n0, c0) 4313 return std::make_pair(PtrBase, Offset); 4314 } 4315 } else if (RootDef->getOpcode() == AMDGPU::G_SUB) { 4316 // TODO 4317 4318 4319 } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) { 4320 // TODO 4321 4322 } 4323 4324 return std::make_pair(Root.getReg(), 0); 4325 } 4326 4327 InstructionSelector::ComplexRendererFns 4328 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const { 4329 Register Reg; 4330 unsigned Offset; 4331 std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root); 4332 return {{ 4333 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); }, 4334 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } 4335 }}; 4336 } 4337 4338 InstructionSelector::ComplexRendererFns 4339 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const { 4340 return selectDSReadWrite2(Root, 4); 4341 } 4342 4343 InstructionSelector::ComplexRendererFns 4344 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const { 4345 return selectDSReadWrite2(Root, 8); 4346 } 4347 4348 InstructionSelector::ComplexRendererFns 4349 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root, 4350 unsigned Size) const { 4351 Register Reg; 4352 unsigned Offset; 4353 std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size); 4354 return {{ 4355 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); }, 4356 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, 4357 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); } 4358 }}; 4359 } 4360 4361 std::pair<Register, unsigned> 4362 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root, 4363 unsigned Size) const { 4364 const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg()); 4365 if (!RootDef) 4366 return std::make_pair(Root.getReg(), 0); 4367 4368 int64_t ConstAddr = 0; 4369 4370 Register PtrBase; 4371 int64_t Offset; 4372 std::tie(PtrBase, Offset) = 4373 getPtrBaseWithConstantOffset(Root.getReg(), *MRI); 4374 4375 if (Offset) { 4376 int64_t OffsetValue0 = Offset; 4377 int64_t OffsetValue1 = Offset + Size; 4378 if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) { 4379 // (add n0, c0) 4380 return std::make_pair(PtrBase, OffsetValue0 / Size); 4381 } 4382 } else if (RootDef->getOpcode() == AMDGPU::G_SUB) { 4383 // TODO 4384 4385 } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) { 4386 // TODO 4387 4388 } 4389 4390 return std::make_pair(Root.getReg(), 0); 4391 } 4392 4393 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return 4394 /// the base value with the constant offset. There may be intervening copies 4395 /// between \p Root and the identified constant. Returns \p Root, 0 if this does 4396 /// not match the pattern. 4397 std::pair<Register, int64_t> 4398 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset( 4399 Register Root, const MachineRegisterInfo &MRI) const { 4400 MachineInstr *RootI = getDefIgnoringCopies(Root, MRI); 4401 if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD) 4402 return {Root, 0}; 4403 4404 MachineOperand &RHS = RootI->getOperand(2); 4405 Optional<ValueAndVReg> MaybeOffset = 4406 getIConstantVRegValWithLookThrough(RHS.getReg(), MRI); 4407 if (!MaybeOffset) 4408 return {Root, 0}; 4409 return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()}; 4410 } 4411 4412 static void addZeroImm(MachineInstrBuilder &MIB) { 4413 MIB.addImm(0); 4414 } 4415 4416 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p 4417 /// BasePtr is not valid, a null base pointer will be used. 4418 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI, 4419 uint32_t FormatLo, uint32_t FormatHi, 4420 Register BasePtr) { 4421 Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 4422 Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 4423 Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 4424 Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass); 4425 4426 B.buildInstr(AMDGPU::S_MOV_B32) 4427 .addDef(RSrc2) 4428 .addImm(FormatLo); 4429 B.buildInstr(AMDGPU::S_MOV_B32) 4430 .addDef(RSrc3) 4431 .addImm(FormatHi); 4432 4433 // Build the half of the subregister with the constants before building the 4434 // full 128-bit register. If we are building multiple resource descriptors, 4435 // this will allow CSEing of the 2-component register. 4436 B.buildInstr(AMDGPU::REG_SEQUENCE) 4437 .addDef(RSrcHi) 4438 .addReg(RSrc2) 4439 .addImm(AMDGPU::sub0) 4440 .addReg(RSrc3) 4441 .addImm(AMDGPU::sub1); 4442 4443 Register RSrcLo = BasePtr; 4444 if (!BasePtr) { 4445 RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 4446 B.buildInstr(AMDGPU::S_MOV_B64) 4447 .addDef(RSrcLo) 4448 .addImm(0); 4449 } 4450 4451 B.buildInstr(AMDGPU::REG_SEQUENCE) 4452 .addDef(RSrc) 4453 .addReg(RSrcLo) 4454 .addImm(AMDGPU::sub0_sub1) 4455 .addReg(RSrcHi) 4456 .addImm(AMDGPU::sub2_sub3); 4457 4458 return RSrc; 4459 } 4460 4461 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI, 4462 const SIInstrInfo &TII, Register BasePtr) { 4463 uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat(); 4464 4465 // FIXME: Why are half the "default" bits ignored based on the addressing 4466 // mode? 4467 return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr); 4468 } 4469 4470 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI, 4471 const SIInstrInfo &TII, Register BasePtr) { 4472 uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat(); 4473 4474 // FIXME: Why are half the "default" bits ignored based on the addressing 4475 // mode? 4476 return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr); 4477 } 4478 4479 AMDGPUInstructionSelector::MUBUFAddressData 4480 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const { 4481 MUBUFAddressData Data; 4482 Data.N0 = Src; 4483 4484 Register PtrBase; 4485 int64_t Offset; 4486 4487 std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI); 4488 if (isUInt<32>(Offset)) { 4489 Data.N0 = PtrBase; 4490 Data.Offset = Offset; 4491 } 4492 4493 if (MachineInstr *InputAdd 4494 = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) { 4495 Data.N2 = InputAdd->getOperand(1).getReg(); 4496 Data.N3 = InputAdd->getOperand(2).getReg(); 4497 4498 // FIXME: Need to fix extra SGPR->VGPRcopies inserted 4499 // FIXME: Don't know this was defined by operand 0 4500 // 4501 // TODO: Remove this when we have copy folding optimizations after 4502 // RegBankSelect. 4503 Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg(); 4504 Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg(); 4505 } 4506 4507 return Data; 4508 } 4509 4510 /// Return if the addr64 mubuf mode should be used for the given address. 4511 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const { 4512 // (ptr_add N2, N3) -> addr64, or 4513 // (ptr_add (ptr_add N2, N3), C1) -> addr64 4514 if (Addr.N2) 4515 return true; 4516 4517 const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI); 4518 return N0Bank->getID() == AMDGPU::VGPRRegBankID; 4519 } 4520 4521 /// Split an immediate offset \p ImmOffset depending on whether it fits in the 4522 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable 4523 /// component. 4524 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset( 4525 MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const { 4526 if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset)) 4527 return; 4528 4529 // Illegal offset, store it in soffset. 4530 SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 4531 B.buildInstr(AMDGPU::S_MOV_B32) 4532 .addDef(SOffset) 4533 .addImm(ImmOffset); 4534 ImmOffset = 0; 4535 } 4536 4537 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl( 4538 MachineOperand &Root, Register &VAddr, Register &RSrcReg, 4539 Register &SOffset, int64_t &Offset) const { 4540 // FIXME: Predicates should stop this from reaching here. 4541 // addr64 bit was removed for volcanic islands. 4542 if (!STI.hasAddr64() || STI.useFlatForGlobal()) 4543 return false; 4544 4545 MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg()); 4546 if (!shouldUseAddr64(AddrData)) 4547 return false; 4548 4549 Register N0 = AddrData.N0; 4550 Register N2 = AddrData.N2; 4551 Register N3 = AddrData.N3; 4552 Offset = AddrData.Offset; 4553 4554 // Base pointer for the SRD. 4555 Register SRDPtr; 4556 4557 if (N2) { 4558 if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) { 4559 assert(N3); 4560 if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) { 4561 // Both N2 and N3 are divergent. Use N0 (the result of the add) as the 4562 // addr64, and construct the default resource from a 0 address. 4563 VAddr = N0; 4564 } else { 4565 SRDPtr = N3; 4566 VAddr = N2; 4567 } 4568 } else { 4569 // N2 is not divergent. 4570 SRDPtr = N2; 4571 VAddr = N3; 4572 } 4573 } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) { 4574 // Use the default null pointer in the resource 4575 VAddr = N0; 4576 } else { 4577 // N0 -> offset, or 4578 // (N0 + C1) -> offset 4579 SRDPtr = N0; 4580 } 4581 4582 MachineIRBuilder B(*Root.getParent()); 4583 RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr); 4584 splitIllegalMUBUFOffset(B, SOffset, Offset); 4585 return true; 4586 } 4587 4588 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl( 4589 MachineOperand &Root, Register &RSrcReg, Register &SOffset, 4590 int64_t &Offset) const { 4591 4592 // FIXME: Pattern should not reach here. 4593 if (STI.useFlatForGlobal()) 4594 return false; 4595 4596 MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg()); 4597 if (shouldUseAddr64(AddrData)) 4598 return false; 4599 4600 // N0 -> offset, or 4601 // (N0 + C1) -> offset 4602 Register SRDPtr = AddrData.N0; 4603 Offset = AddrData.Offset; 4604 4605 // TODO: Look through extensions for 32-bit soffset. 4606 MachineIRBuilder B(*Root.getParent()); 4607 4608 RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr); 4609 splitIllegalMUBUFOffset(B, SOffset, Offset); 4610 return true; 4611 } 4612 4613 InstructionSelector::ComplexRendererFns 4614 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const { 4615 Register VAddr; 4616 Register RSrcReg; 4617 Register SOffset; 4618 int64_t Offset = 0; 4619 4620 if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset)) 4621 return {}; 4622 4623 // FIXME: Use defaulted operands for trailing 0s and remove from the complex 4624 // pattern. 4625 return {{ 4626 [=](MachineInstrBuilder &MIB) { // rsrc 4627 MIB.addReg(RSrcReg); 4628 }, 4629 [=](MachineInstrBuilder &MIB) { // vaddr 4630 MIB.addReg(VAddr); 4631 }, 4632 [=](MachineInstrBuilder &MIB) { // soffset 4633 if (SOffset) 4634 MIB.addReg(SOffset); 4635 else 4636 MIB.addImm(0); 4637 }, 4638 [=](MachineInstrBuilder &MIB) { // offset 4639 MIB.addImm(Offset); 4640 }, 4641 addZeroImm, // cpol 4642 addZeroImm, // tfe 4643 addZeroImm // swz 4644 }}; 4645 } 4646 4647 InstructionSelector::ComplexRendererFns 4648 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const { 4649 Register RSrcReg; 4650 Register SOffset; 4651 int64_t Offset = 0; 4652 4653 if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset)) 4654 return {}; 4655 4656 return {{ 4657 [=](MachineInstrBuilder &MIB) { // rsrc 4658 MIB.addReg(RSrcReg); 4659 }, 4660 [=](MachineInstrBuilder &MIB) { // soffset 4661 if (SOffset) 4662 MIB.addReg(SOffset); 4663 else 4664 MIB.addImm(0); 4665 }, 4666 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset 4667 addZeroImm, // cpol 4668 addZeroImm, // tfe 4669 addZeroImm, // swz 4670 }}; 4671 } 4672 4673 InstructionSelector::ComplexRendererFns 4674 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const { 4675 Register VAddr; 4676 Register RSrcReg; 4677 Register SOffset; 4678 int64_t Offset = 0; 4679 4680 if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset)) 4681 return {}; 4682 4683 // FIXME: Use defaulted operands for trailing 0s and remove from the complex 4684 // pattern. 4685 return {{ 4686 [=](MachineInstrBuilder &MIB) { // rsrc 4687 MIB.addReg(RSrcReg); 4688 }, 4689 [=](MachineInstrBuilder &MIB) { // vaddr 4690 MIB.addReg(VAddr); 4691 }, 4692 [=](MachineInstrBuilder &MIB) { // soffset 4693 if (SOffset) 4694 MIB.addReg(SOffset); 4695 else 4696 MIB.addImm(0); 4697 }, 4698 [=](MachineInstrBuilder &MIB) { // offset 4699 MIB.addImm(Offset); 4700 }, 4701 [=](MachineInstrBuilder &MIB) { 4702 MIB.addImm(AMDGPU::CPol::GLC); // cpol 4703 } 4704 }}; 4705 } 4706 4707 InstructionSelector::ComplexRendererFns 4708 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const { 4709 Register RSrcReg; 4710 Register SOffset; 4711 int64_t Offset = 0; 4712 4713 if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset)) 4714 return {}; 4715 4716 return {{ 4717 [=](MachineInstrBuilder &MIB) { // rsrc 4718 MIB.addReg(RSrcReg); 4719 }, 4720 [=](MachineInstrBuilder &MIB) { // soffset 4721 if (SOffset) 4722 MIB.addReg(SOffset); 4723 else 4724 MIB.addImm(0); 4725 }, 4726 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset 4727 [=](MachineInstrBuilder &MIB) { MIB.addImm(AMDGPU::CPol::GLC); } // cpol 4728 }}; 4729 } 4730 4731 /// Get an immediate that must be 32-bits, and treated as zero extended. 4732 static Optional<uint64_t> getConstantZext32Val(Register Reg, 4733 const MachineRegisterInfo &MRI) { 4734 // getIConstantVRegVal sexts any values, so see if that matters. 4735 Optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI); 4736 if (!OffsetVal || !isInt<32>(*OffsetVal)) 4737 return None; 4738 return Lo_32(*OffsetVal); 4739 } 4740 4741 InstructionSelector::ComplexRendererFns 4742 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const { 4743 Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI); 4744 if (!OffsetVal) 4745 return {}; 4746 4747 Optional<int64_t> EncodedImm = 4748 AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true); 4749 if (!EncodedImm) 4750 return {}; 4751 4752 return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } }}; 4753 } 4754 4755 InstructionSelector::ComplexRendererFns 4756 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const { 4757 assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS); 4758 4759 Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI); 4760 if (!OffsetVal) 4761 return {}; 4762 4763 Optional<int64_t> EncodedImm 4764 = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal); 4765 if (!EncodedImm) 4766 return {}; 4767 4768 return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } }}; 4769 } 4770 4771 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB, 4772 const MachineInstr &MI, 4773 int OpIdx) const { 4774 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 4775 "Expected G_CONSTANT"); 4776 MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue()); 4777 } 4778 4779 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB, 4780 const MachineInstr &MI, 4781 int OpIdx) const { 4782 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 4783 "Expected G_CONSTANT"); 4784 MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue()); 4785 } 4786 4787 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB, 4788 const MachineInstr &MI, 4789 int OpIdx) const { 4790 assert(OpIdx == -1); 4791 4792 const MachineOperand &Op = MI.getOperand(1); 4793 if (MI.getOpcode() == TargetOpcode::G_FCONSTANT) 4794 MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue()); 4795 else { 4796 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT"); 4797 MIB.addImm(Op.getCImm()->getSExtValue()); 4798 } 4799 } 4800 4801 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB, 4802 const MachineInstr &MI, 4803 int OpIdx) const { 4804 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 4805 "Expected G_CONSTANT"); 4806 MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation()); 4807 } 4808 4809 /// This only really exists to satisfy DAG type checking machinery, so is a 4810 /// no-op here. 4811 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB, 4812 const MachineInstr &MI, 4813 int OpIdx) const { 4814 MIB.addImm(MI.getOperand(OpIdx).getImm()); 4815 } 4816 4817 void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB, 4818 const MachineInstr &MI, 4819 int OpIdx) const { 4820 assert(OpIdx >= 0 && "expected to match an immediate operand"); 4821 MIB.addImm(MI.getOperand(OpIdx).getImm() & AMDGPU::CPol::ALL); 4822 } 4823 4824 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB, 4825 const MachineInstr &MI, 4826 int OpIdx) const { 4827 assert(OpIdx >= 0 && "expected to match an immediate operand"); 4828 MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1); 4829 } 4830 4831 void AMDGPUInstructionSelector::renderSetGLC(MachineInstrBuilder &MIB, 4832 const MachineInstr &MI, 4833 int OpIdx) const { 4834 assert(OpIdx >= 0 && "expected to match an immediate operand"); 4835 MIB.addImm(MI.getOperand(OpIdx).getImm() | AMDGPU::CPol::GLC); 4836 } 4837 4838 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB, 4839 const MachineInstr &MI, 4840 int OpIdx) const { 4841 MIB.addFrameIndex((MI.getOperand(1).getIndex())); 4842 } 4843 4844 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const { 4845 return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm()); 4846 } 4847 4848 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const { 4849 return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm()); 4850 } 4851 4852 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const { 4853 return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm()); 4854 } 4855 4856 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const { 4857 return TII.isInlineConstant(Imm); 4858 } 4859