1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the targeting of the InstructionSelector class for 10 /// AMDGPU. 11 /// \todo This should be generated by TableGen. 12 //===----------------------------------------------------------------------===// 13 14 #include "AMDGPUInstructionSelector.h" 15 #include "AMDGPUInstrInfo.h" 16 #include "AMDGPUGlobalISelUtils.h" 17 #include "AMDGPURegisterBankInfo.h" 18 #include "AMDGPUSubtarget.h" 19 #include "AMDGPUTargetMachine.h" 20 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 21 #include "SIMachineFunctionInfo.h" 22 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" 23 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h" 24 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h" 25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 26 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" 27 #include "llvm/CodeGen/GlobalISel/Utils.h" 28 #include "llvm/CodeGen/MachineBasicBlock.h" 29 #include "llvm/CodeGen/MachineFunction.h" 30 #include "llvm/CodeGen/MachineInstr.h" 31 #include "llvm/CodeGen/MachineInstrBuilder.h" 32 #include "llvm/CodeGen/MachineRegisterInfo.h" 33 #include "llvm/IR/DiagnosticInfo.h" 34 #include "llvm/IR/Type.h" 35 #include "llvm/Support/Debug.h" 36 #include "llvm/Support/raw_ostream.h" 37 38 #define DEBUG_TYPE "amdgpu-isel" 39 40 using namespace llvm; 41 using namespace MIPatternMatch; 42 43 static cl::opt<bool> AllowRiskySelect( 44 "amdgpu-global-isel-risky-select", 45 cl::desc("Allow GlobalISel to select cases that are likely to not work yet"), 46 cl::init(false), 47 cl::ReallyHidden); 48 49 #define GET_GLOBALISEL_IMPL 50 #define AMDGPUSubtarget GCNSubtarget 51 #include "AMDGPUGenGlobalISel.inc" 52 #undef GET_GLOBALISEL_IMPL 53 #undef AMDGPUSubtarget 54 55 AMDGPUInstructionSelector::AMDGPUInstructionSelector( 56 const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI, 57 const AMDGPUTargetMachine &TM) 58 : InstructionSelector(), TII(*STI.getInstrInfo()), 59 TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM), 60 STI(STI), 61 EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG), 62 #define GET_GLOBALISEL_PREDICATES_INIT 63 #include "AMDGPUGenGlobalISel.inc" 64 #undef GET_GLOBALISEL_PREDICATES_INIT 65 #define GET_GLOBALISEL_TEMPORARIES_INIT 66 #include "AMDGPUGenGlobalISel.inc" 67 #undef GET_GLOBALISEL_TEMPORARIES_INIT 68 { 69 } 70 71 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; } 72 73 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits &KB, 74 CodeGenCoverage &CoverageInfo) { 75 MRI = &MF.getRegInfo(); 76 Subtarget = &MF.getSubtarget<GCNSubtarget>(); 77 InstructionSelector::setupMF(MF, KB, CoverageInfo); 78 } 79 80 bool AMDGPUInstructionSelector::isVCC(Register Reg, 81 const MachineRegisterInfo &MRI) const { 82 // The verifier is oblivious to s1 being a valid value for wavesize registers. 83 if (Reg.isPhysical()) 84 return false; 85 86 auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg); 87 const TargetRegisterClass *RC = 88 RegClassOrBank.dyn_cast<const TargetRegisterClass*>(); 89 if (RC) { 90 const LLT Ty = MRI.getType(Reg); 91 return RC->hasSuperClassEq(TRI.getBoolRC()) && 92 Ty.isValid() && Ty.getSizeInBits() == 1; 93 } 94 95 const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>(); 96 return RB->getID() == AMDGPU::VCCRegBankID; 97 } 98 99 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI, 100 unsigned NewOpc) const { 101 MI.setDesc(TII.get(NewOpc)); 102 MI.RemoveOperand(1); // Remove intrinsic ID. 103 MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 104 105 MachineOperand &Dst = MI.getOperand(0); 106 MachineOperand &Src = MI.getOperand(1); 107 108 // TODO: This should be legalized to s32 if needed 109 if (MRI->getType(Dst.getReg()) == LLT::scalar(1)) 110 return false; 111 112 const TargetRegisterClass *DstRC 113 = TRI.getConstrainedRegClassForOperand(Dst, *MRI); 114 const TargetRegisterClass *SrcRC 115 = TRI.getConstrainedRegClassForOperand(Src, *MRI); 116 if (!DstRC || DstRC != SrcRC) 117 return false; 118 119 return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) && 120 RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI); 121 } 122 123 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const { 124 const DebugLoc &DL = I.getDebugLoc(); 125 MachineBasicBlock *BB = I.getParent(); 126 I.setDesc(TII.get(TargetOpcode::COPY)); 127 128 const MachineOperand &Src = I.getOperand(1); 129 MachineOperand &Dst = I.getOperand(0); 130 Register DstReg = Dst.getReg(); 131 Register SrcReg = Src.getReg(); 132 133 if (isVCC(DstReg, *MRI)) { 134 if (SrcReg == AMDGPU::SCC) { 135 const TargetRegisterClass *RC 136 = TRI.getConstrainedRegClassForOperand(Dst, *MRI); 137 if (!RC) 138 return true; 139 return RBI.constrainGenericRegister(DstReg, *RC, *MRI); 140 } 141 142 if (!isVCC(SrcReg, *MRI)) { 143 // TODO: Should probably leave the copy and let copyPhysReg expand it. 144 if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI)) 145 return false; 146 147 const TargetRegisterClass *SrcRC 148 = TRI.getConstrainedRegClassForOperand(Src, *MRI); 149 150 Register MaskedReg = MRI->createVirtualRegister(SrcRC); 151 152 // We can't trust the high bits at this point, so clear them. 153 154 // TODO: Skip masking high bits if def is known boolean. 155 156 unsigned AndOpc = TRI.isSGPRClass(SrcRC) ? 157 AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32; 158 BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg) 159 .addImm(1) 160 .addReg(SrcReg); 161 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg) 162 .addImm(0) 163 .addReg(MaskedReg); 164 165 if (!MRI->getRegClassOrNull(SrcReg)) 166 MRI->setRegClass(SrcReg, SrcRC); 167 I.eraseFromParent(); 168 return true; 169 } 170 171 const TargetRegisterClass *RC = 172 TRI.getConstrainedRegClassForOperand(Dst, *MRI); 173 if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI)) 174 return false; 175 176 return true; 177 } 178 179 for (const MachineOperand &MO : I.operands()) { 180 if (MO.getReg().isPhysical()) 181 continue; 182 183 const TargetRegisterClass *RC = 184 TRI.getConstrainedRegClassForOperand(MO, *MRI); 185 if (!RC) 186 continue; 187 RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI); 188 } 189 return true; 190 } 191 192 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const { 193 const Register DefReg = I.getOperand(0).getReg(); 194 const LLT DefTy = MRI->getType(DefReg); 195 if (DefTy == LLT::scalar(1)) { 196 if (!AllowRiskySelect) { 197 LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n"); 198 return false; 199 } 200 201 LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n"); 202 } 203 204 // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy) 205 206 const RegClassOrRegBank &RegClassOrBank = 207 MRI->getRegClassOrRegBank(DefReg); 208 209 const TargetRegisterClass *DefRC 210 = RegClassOrBank.dyn_cast<const TargetRegisterClass *>(); 211 if (!DefRC) { 212 if (!DefTy.isValid()) { 213 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n"); 214 return false; 215 } 216 217 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>(); 218 DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI); 219 if (!DefRC) { 220 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n"); 221 return false; 222 } 223 } 224 225 // TODO: Verify that all registers have the same bank 226 I.setDesc(TII.get(TargetOpcode::PHI)); 227 return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI); 228 } 229 230 MachineOperand 231 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO, 232 const TargetRegisterClass &SubRC, 233 unsigned SubIdx) const { 234 235 MachineInstr *MI = MO.getParent(); 236 MachineBasicBlock *BB = MO.getParent()->getParent(); 237 Register DstReg = MRI->createVirtualRegister(&SubRC); 238 239 if (MO.isReg()) { 240 unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx); 241 Register Reg = MO.getReg(); 242 BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg) 243 .addReg(Reg, 0, ComposedSubIdx); 244 245 return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(), 246 MO.isKill(), MO.isDead(), MO.isUndef(), 247 MO.isEarlyClobber(), 0, MO.isDebug(), 248 MO.isInternalRead()); 249 } 250 251 assert(MO.isImm()); 252 253 APInt Imm(64, MO.getImm()); 254 255 switch (SubIdx) { 256 default: 257 llvm_unreachable("do not know to split immediate with this sub index."); 258 case AMDGPU::sub0: 259 return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue()); 260 case AMDGPU::sub1: 261 return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue()); 262 } 263 } 264 265 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) { 266 switch (Opc) { 267 case AMDGPU::G_AND: 268 return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32; 269 case AMDGPU::G_OR: 270 return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32; 271 case AMDGPU::G_XOR: 272 return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32; 273 default: 274 llvm_unreachable("not a bit op"); 275 } 276 } 277 278 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const { 279 Register DstReg = I.getOperand(0).getReg(); 280 unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI); 281 282 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 283 if (DstRB->getID() != AMDGPU::SGPRRegBankID && 284 DstRB->getID() != AMDGPU::VCCRegBankID) 285 return false; 286 287 bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID && 288 STI.isWave64()); 289 I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64))); 290 291 // Dead implicit-def of scc 292 I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef 293 true, // isImp 294 false, // isKill 295 true)); // isDead 296 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 297 } 298 299 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const { 300 MachineBasicBlock *BB = I.getParent(); 301 MachineFunction *MF = BB->getParent(); 302 Register DstReg = I.getOperand(0).getReg(); 303 const DebugLoc &DL = I.getDebugLoc(); 304 LLT Ty = MRI->getType(DstReg); 305 if (Ty.isVector()) 306 return false; 307 308 unsigned Size = Ty.getSizeInBits(); 309 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 310 const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID; 311 const bool Sub = I.getOpcode() == TargetOpcode::G_SUB; 312 313 if (Size == 32) { 314 if (IsSALU) { 315 const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32; 316 MachineInstr *Add = 317 BuildMI(*BB, &I, DL, TII.get(Opc), DstReg) 318 .add(I.getOperand(1)) 319 .add(I.getOperand(2)); 320 I.eraseFromParent(); 321 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI); 322 } 323 324 if (STI.hasAddNoCarry()) { 325 const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64; 326 I.setDesc(TII.get(Opc)); 327 I.addOperand(*MF, MachineOperand::CreateImm(0)); 328 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 329 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 330 } 331 332 const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64; 333 334 Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass()); 335 MachineInstr *Add 336 = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg) 337 .addDef(UnusedCarry, RegState::Dead) 338 .add(I.getOperand(1)) 339 .add(I.getOperand(2)) 340 .addImm(0); 341 I.eraseFromParent(); 342 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI); 343 } 344 345 assert(!Sub && "illegal sub should not reach here"); 346 347 const TargetRegisterClass &RC 348 = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass; 349 const TargetRegisterClass &HalfRC 350 = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass; 351 352 MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0)); 353 MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0)); 354 MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1)); 355 MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1)); 356 357 Register DstLo = MRI->createVirtualRegister(&HalfRC); 358 Register DstHi = MRI->createVirtualRegister(&HalfRC); 359 360 if (IsSALU) { 361 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo) 362 .add(Lo1) 363 .add(Lo2); 364 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi) 365 .add(Hi1) 366 .add(Hi2); 367 } else { 368 const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass(); 369 Register CarryReg = MRI->createVirtualRegister(CarryRC); 370 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo) 371 .addDef(CarryReg) 372 .add(Lo1) 373 .add(Lo2) 374 .addImm(0); 375 MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi) 376 .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead) 377 .add(Hi1) 378 .add(Hi2) 379 .addReg(CarryReg, RegState::Kill) 380 .addImm(0); 381 382 if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI)) 383 return false; 384 } 385 386 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) 387 .addReg(DstLo) 388 .addImm(AMDGPU::sub0) 389 .addReg(DstHi) 390 .addImm(AMDGPU::sub1); 391 392 393 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI)) 394 return false; 395 396 I.eraseFromParent(); 397 return true; 398 } 399 400 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE( 401 MachineInstr &I) const { 402 MachineBasicBlock *BB = I.getParent(); 403 MachineFunction *MF = BB->getParent(); 404 const DebugLoc &DL = I.getDebugLoc(); 405 Register Dst0Reg = I.getOperand(0).getReg(); 406 Register Dst1Reg = I.getOperand(1).getReg(); 407 const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO || 408 I.getOpcode() == AMDGPU::G_UADDE; 409 const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE || 410 I.getOpcode() == AMDGPU::G_USUBE; 411 412 if (isVCC(Dst1Reg, *MRI)) { 413 unsigned NoCarryOpc = 414 IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64; 415 unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; 416 I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc)); 417 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 418 I.addOperand(*MF, MachineOperand::CreateImm(0)); 419 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 420 } 421 422 Register Src0Reg = I.getOperand(2).getReg(); 423 Register Src1Reg = I.getOperand(3).getReg(); 424 425 if (HasCarryIn) { 426 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC) 427 .addReg(I.getOperand(4).getReg()); 428 } 429 430 unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32; 431 unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32; 432 433 BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg) 434 .add(I.getOperand(2)) 435 .add(I.getOperand(3)); 436 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg) 437 .addReg(AMDGPU::SCC); 438 439 if (!MRI->getRegClassOrNull(Dst1Reg)) 440 MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass); 441 442 if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) || 443 !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) || 444 !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI)) 445 return false; 446 447 if (HasCarryIn && 448 !RBI.constrainGenericRegister(I.getOperand(4).getReg(), 449 AMDGPU::SReg_32RegClass, *MRI)) 450 return false; 451 452 I.eraseFromParent(); 453 return true; 454 } 455 456 // TODO: We should probably legalize these to only using 32-bit results. 457 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const { 458 MachineBasicBlock *BB = I.getParent(); 459 Register DstReg = I.getOperand(0).getReg(); 460 Register SrcReg = I.getOperand(1).getReg(); 461 LLT DstTy = MRI->getType(DstReg); 462 LLT SrcTy = MRI->getType(SrcReg); 463 const unsigned SrcSize = SrcTy.getSizeInBits(); 464 unsigned DstSize = DstTy.getSizeInBits(); 465 466 // TODO: Should handle any multiple of 32 offset. 467 unsigned Offset = I.getOperand(2).getImm(); 468 if (Offset % 32 != 0 || DstSize > 128) 469 return false; 470 471 // 16-bit operations really use 32-bit registers. 472 // FIXME: Probably should not allow 16-bit G_EXTRACT results. 473 if (DstSize == 16) 474 DstSize = 32; 475 476 const TargetRegisterClass *DstRC = 477 TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI); 478 if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) 479 return false; 480 481 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI); 482 const TargetRegisterClass *SrcRC = 483 TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI); 484 if (!SrcRC) 485 return false; 486 unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32, 487 DstSize / 32); 488 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg); 489 if (!SrcRC) 490 return false; 491 492 SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I, 493 *SrcRC, I.getOperand(1)); 494 const DebugLoc &DL = I.getDebugLoc(); 495 BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg) 496 .addReg(SrcReg, 0, SubReg); 497 498 I.eraseFromParent(); 499 return true; 500 } 501 502 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const { 503 MachineBasicBlock *BB = MI.getParent(); 504 Register DstReg = MI.getOperand(0).getReg(); 505 LLT DstTy = MRI->getType(DstReg); 506 LLT SrcTy = MRI->getType(MI.getOperand(1).getReg()); 507 508 const unsigned SrcSize = SrcTy.getSizeInBits(); 509 if (SrcSize < 32) 510 return selectImpl(MI, *CoverageInfo); 511 512 const DebugLoc &DL = MI.getDebugLoc(); 513 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); 514 const unsigned DstSize = DstTy.getSizeInBits(); 515 const TargetRegisterClass *DstRC = 516 TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI); 517 if (!DstRC) 518 return false; 519 520 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8); 521 MachineInstrBuilder MIB = 522 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg); 523 for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) { 524 MachineOperand &Src = MI.getOperand(I + 1); 525 MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef())); 526 MIB.addImm(SubRegs[I]); 527 528 const TargetRegisterClass *SrcRC 529 = TRI.getConstrainedRegClassForOperand(Src, *MRI); 530 if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI)) 531 return false; 532 } 533 534 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) 535 return false; 536 537 MI.eraseFromParent(); 538 return true; 539 } 540 541 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const { 542 MachineBasicBlock *BB = MI.getParent(); 543 const int NumDst = MI.getNumOperands() - 1; 544 545 MachineOperand &Src = MI.getOperand(NumDst); 546 547 Register SrcReg = Src.getReg(); 548 Register DstReg0 = MI.getOperand(0).getReg(); 549 LLT DstTy = MRI->getType(DstReg0); 550 LLT SrcTy = MRI->getType(SrcReg); 551 552 const unsigned DstSize = DstTy.getSizeInBits(); 553 const unsigned SrcSize = SrcTy.getSizeInBits(); 554 const DebugLoc &DL = MI.getDebugLoc(); 555 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI); 556 557 const TargetRegisterClass *SrcRC = 558 TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI); 559 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI)) 560 return false; 561 562 // Note we could have mixed SGPR and VGPR destination banks for an SGPR 563 // source, and this relies on the fact that the same subregister indices are 564 // used for both. 565 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8); 566 for (int I = 0, E = NumDst; I != E; ++I) { 567 MachineOperand &Dst = MI.getOperand(I); 568 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg()) 569 .addReg(SrcReg, 0, SubRegs[I]); 570 571 // Make sure the subregister index is valid for the source register. 572 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]); 573 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI)) 574 return false; 575 576 const TargetRegisterClass *DstRC = 577 TRI.getConstrainedRegClassForOperand(Dst, *MRI); 578 if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI)) 579 return false; 580 } 581 582 MI.eraseFromParent(); 583 return true; 584 } 585 586 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC( 587 MachineInstr &MI) const { 588 if (selectImpl(MI, *CoverageInfo)) 589 return true; 590 591 const LLT S32 = LLT::scalar(32); 592 const LLT V2S16 = LLT::vector(2, 16); 593 594 Register Dst = MI.getOperand(0).getReg(); 595 if (MRI->getType(Dst) != V2S16) 596 return false; 597 598 const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI); 599 if (DstBank->getID() != AMDGPU::SGPRRegBankID) 600 return false; 601 602 Register Src0 = MI.getOperand(1).getReg(); 603 Register Src1 = MI.getOperand(2).getReg(); 604 if (MRI->getType(Src0) != S32) 605 return false; 606 607 const DebugLoc &DL = MI.getDebugLoc(); 608 MachineBasicBlock *BB = MI.getParent(); 609 610 auto ConstSrc1 = getConstantVRegValWithLookThrough(Src1, *MRI, true, true); 611 if (ConstSrc1) { 612 auto ConstSrc0 = getConstantVRegValWithLookThrough(Src0, *MRI, true, true); 613 if (ConstSrc0) { 614 uint32_t Lo16 = static_cast<uint32_t>(ConstSrc0->Value) & 0xffff; 615 uint32_t Hi16 = static_cast<uint32_t>(ConstSrc1->Value) & 0xffff; 616 617 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst) 618 .addImm(Lo16 | (Hi16 << 16)); 619 MI.eraseFromParent(); 620 return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI); 621 } 622 } 623 624 // TODO: This should probably be a combine somewhere 625 // (build_vector_trunc $src0, undef -> copy $src0 626 MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI); 627 if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) { 628 MI.setDesc(TII.get(AMDGPU::COPY)); 629 MI.RemoveOperand(2); 630 return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) && 631 RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI); 632 } 633 634 Register ShiftSrc0; 635 Register ShiftSrc1; 636 int64_t ShiftAmt; 637 638 // With multiple uses of the shift, this will duplicate the shift and 639 // increase register pressure. 640 // 641 // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16) 642 // => (S_PACK_HH_B32_B16 $src0, $src1) 643 // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16)) 644 // => (S_PACK_LH_B32_B16 $src0, $src1) 645 // (build_vector_trunc $src0, $src1) 646 // => (S_PACK_LL_B32_B16 $src0, $src1) 647 648 // FIXME: This is an inconvenient way to check a specific value 649 bool Shift0 = mi_match( 650 Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_ICst(ShiftAmt)))) && 651 ShiftAmt == 16; 652 653 bool Shift1 = mi_match( 654 Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_ICst(ShiftAmt)))) && 655 ShiftAmt == 16; 656 657 unsigned Opc = AMDGPU::S_PACK_LL_B32_B16; 658 if (Shift0 && Shift1) { 659 Opc = AMDGPU::S_PACK_HH_B32_B16; 660 MI.getOperand(1).setReg(ShiftSrc0); 661 MI.getOperand(2).setReg(ShiftSrc1); 662 } else if (Shift1) { 663 Opc = AMDGPU::S_PACK_LH_B32_B16; 664 MI.getOperand(2).setReg(ShiftSrc1); 665 } else if (Shift0 && ConstSrc1 && ConstSrc1->Value == 0) { 666 // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16 667 auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst) 668 .addReg(ShiftSrc0) 669 .addImm(16); 670 671 MI.eraseFromParent(); 672 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 673 } 674 675 MI.setDesc(TII.get(Opc)); 676 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI); 677 } 678 679 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const { 680 return selectG_ADD_SUB(I); 681 } 682 683 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const { 684 const MachineOperand &MO = I.getOperand(0); 685 686 // FIXME: Interface for getConstrainedRegClassForOperand needs work. The 687 // regbank check here is to know why getConstrainedRegClassForOperand failed. 688 const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI); 689 if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) || 690 (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) { 691 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF)); 692 return true; 693 } 694 695 return false; 696 } 697 698 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const { 699 MachineBasicBlock *BB = I.getParent(); 700 701 Register DstReg = I.getOperand(0).getReg(); 702 Register Src0Reg = I.getOperand(1).getReg(); 703 Register Src1Reg = I.getOperand(2).getReg(); 704 LLT Src1Ty = MRI->getType(Src1Reg); 705 706 unsigned DstSize = MRI->getType(DstReg).getSizeInBits(); 707 unsigned InsSize = Src1Ty.getSizeInBits(); 708 709 int64_t Offset = I.getOperand(3).getImm(); 710 711 // FIXME: These cases should have been illegal and unnecessary to check here. 712 if (Offset % 32 != 0 || InsSize % 32 != 0) 713 return false; 714 715 // Currently not handled by getSubRegFromChannel. 716 if (InsSize > 128) 717 return false; 718 719 unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32); 720 if (SubReg == AMDGPU::NoSubRegister) 721 return false; 722 723 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); 724 const TargetRegisterClass *DstRC = 725 TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI); 726 if (!DstRC) 727 return false; 728 729 const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI); 730 const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI); 731 const TargetRegisterClass *Src0RC = 732 TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI); 733 const TargetRegisterClass *Src1RC = 734 TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI); 735 736 // Deal with weird cases where the class only partially supports the subreg 737 // index. 738 Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg); 739 if (!Src0RC || !Src1RC) 740 return false; 741 742 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) || 743 !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) || 744 !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI)) 745 return false; 746 747 const DebugLoc &DL = I.getDebugLoc(); 748 BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg) 749 .addReg(Src0Reg) 750 .addReg(Src1Reg) 751 .addImm(SubReg); 752 753 I.eraseFromParent(); 754 return true; 755 } 756 757 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const { 758 if (STI.getLDSBankCount() != 16) 759 return selectImpl(MI, *CoverageInfo); 760 761 Register Dst = MI.getOperand(0).getReg(); 762 Register Src0 = MI.getOperand(2).getReg(); 763 Register M0Val = MI.getOperand(6).getReg(); 764 if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) || 765 !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) || 766 !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI)) 767 return false; 768 769 // This requires 2 instructions. It is possible to write a pattern to support 770 // this, but the generated isel emitter doesn't correctly deal with multiple 771 // output instructions using the same physical register input. The copy to m0 772 // is incorrectly placed before the second instruction. 773 // 774 // TODO: Match source modifiers. 775 776 Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 777 const DebugLoc &DL = MI.getDebugLoc(); 778 MachineBasicBlock *MBB = MI.getParent(); 779 780 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 781 .addReg(M0Val); 782 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov) 783 .addImm(2) 784 .addImm(MI.getOperand(4).getImm()) // $attr 785 .addImm(MI.getOperand(3).getImm()); // $attrchan 786 787 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst) 788 .addImm(0) // $src0_modifiers 789 .addReg(Src0) // $src0 790 .addImm(MI.getOperand(4).getImm()) // $attr 791 .addImm(MI.getOperand(3).getImm()) // $attrchan 792 .addImm(0) // $src2_modifiers 793 .addReg(InterpMov) // $src2 - 2 f16 values selected by high 794 .addImm(MI.getOperand(5).getImm()) // $high 795 .addImm(0) // $clamp 796 .addImm(0); // $omod 797 798 MI.eraseFromParent(); 799 return true; 800 } 801 802 // Writelane is special in that it can use SGPR and M0 (which would normally 803 // count as using the constant bus twice - but in this case it is allowed since 804 // the lane selector doesn't count as a use of the constant bus). However, it is 805 // still required to abide by the 1 SGPR rule. Fix this up if we might have 806 // multiple SGPRs. 807 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const { 808 // With a constant bus limit of at least 2, there's no issue. 809 if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1) 810 return selectImpl(MI, *CoverageInfo); 811 812 MachineBasicBlock *MBB = MI.getParent(); 813 const DebugLoc &DL = MI.getDebugLoc(); 814 Register VDst = MI.getOperand(0).getReg(); 815 Register Val = MI.getOperand(2).getReg(); 816 Register LaneSelect = MI.getOperand(3).getReg(); 817 Register VDstIn = MI.getOperand(4).getReg(); 818 819 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst); 820 821 Optional<ValueAndVReg> ConstSelect = 822 getConstantVRegValWithLookThrough(LaneSelect, *MRI, true, true); 823 if (ConstSelect) { 824 // The selector has to be an inline immediate, so we can use whatever for 825 // the other operands. 826 MIB.addReg(Val); 827 MIB.addImm(ConstSelect->Value & 828 maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2())); 829 } else { 830 Optional<ValueAndVReg> ConstVal = 831 getConstantVRegValWithLookThrough(Val, *MRI, true, true); 832 833 // If the value written is an inline immediate, we can get away without a 834 // copy to m0. 835 if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value, 836 STI.hasInv2PiInlineImm())) { 837 MIB.addImm(ConstVal->Value); 838 MIB.addReg(LaneSelect); 839 } else { 840 MIB.addReg(Val); 841 842 // If the lane selector was originally in a VGPR and copied with 843 // readfirstlane, there's a hazard to read the same SGPR from the 844 // VALU. Constrain to a different SGPR to help avoid needing a nop later. 845 RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI); 846 847 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 848 .addReg(LaneSelect); 849 MIB.addReg(AMDGPU::M0); 850 } 851 } 852 853 MIB.addReg(VDstIn); 854 855 MI.eraseFromParent(); 856 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 857 } 858 859 // We need to handle this here because tablegen doesn't support matching 860 // instructions with multiple outputs. 861 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const { 862 Register Dst0 = MI.getOperand(0).getReg(); 863 Register Dst1 = MI.getOperand(1).getReg(); 864 865 LLT Ty = MRI->getType(Dst0); 866 unsigned Opc; 867 if (Ty == LLT::scalar(32)) 868 Opc = AMDGPU::V_DIV_SCALE_F32; 869 else if (Ty == LLT::scalar(64)) 870 Opc = AMDGPU::V_DIV_SCALE_F64; 871 else 872 return false; 873 874 // TODO: Match source modifiers. 875 876 const DebugLoc &DL = MI.getDebugLoc(); 877 MachineBasicBlock *MBB = MI.getParent(); 878 879 Register Numer = MI.getOperand(3).getReg(); 880 Register Denom = MI.getOperand(4).getReg(); 881 unsigned ChooseDenom = MI.getOperand(5).getImm(); 882 883 Register Src0 = ChooseDenom != 0 ? Numer : Denom; 884 885 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0) 886 .addDef(Dst1) 887 .addImm(0) // $src0_modifiers 888 .addUse(Src0) // $src0 889 .addImm(0) // $src1_modifiers 890 .addUse(Denom) // $src1 891 .addImm(0) // $src2_modifiers 892 .addUse(Numer) // $src2 893 .addImm(0) // $clamp 894 .addImm(0); // $omod 895 896 MI.eraseFromParent(); 897 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 898 } 899 900 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const { 901 unsigned IntrinsicID = I.getIntrinsicID(); 902 switch (IntrinsicID) { 903 case Intrinsic::amdgcn_if_break: { 904 MachineBasicBlock *BB = I.getParent(); 905 906 // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick 907 // SelectionDAG uses for wave32 vs wave64. 908 BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK)) 909 .add(I.getOperand(0)) 910 .add(I.getOperand(2)) 911 .add(I.getOperand(3)); 912 913 Register DstReg = I.getOperand(0).getReg(); 914 Register Src0Reg = I.getOperand(2).getReg(); 915 Register Src1Reg = I.getOperand(3).getReg(); 916 917 I.eraseFromParent(); 918 919 for (Register Reg : { DstReg, Src0Reg, Src1Reg }) 920 MRI->setRegClass(Reg, TRI.getWaveMaskRegClass()); 921 922 return true; 923 } 924 case Intrinsic::amdgcn_interp_p1_f16: 925 return selectInterpP1F16(I); 926 case Intrinsic::amdgcn_wqm: 927 return constrainCopyLikeIntrin(I, AMDGPU::WQM); 928 case Intrinsic::amdgcn_softwqm: 929 return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM); 930 case Intrinsic::amdgcn_wwm: 931 return constrainCopyLikeIntrin(I, AMDGPU::WWM); 932 case Intrinsic::amdgcn_writelane: 933 return selectWritelane(I); 934 case Intrinsic::amdgcn_div_scale: 935 return selectDivScale(I); 936 case Intrinsic::amdgcn_icmp: 937 return selectIntrinsicIcmp(I); 938 case Intrinsic::amdgcn_ballot: 939 return selectBallot(I); 940 case Intrinsic::amdgcn_reloc_constant: 941 return selectRelocConstant(I); 942 case Intrinsic::amdgcn_groupstaticsize: 943 return selectGroupStaticSize(I); 944 case Intrinsic::returnaddress: 945 return selectReturnAddress(I); 946 default: 947 return selectImpl(I, *CoverageInfo); 948 } 949 } 950 951 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) { 952 if (Size != 32 && Size != 64) 953 return -1; 954 switch (P) { 955 default: 956 llvm_unreachable("Unknown condition code!"); 957 case CmpInst::ICMP_NE: 958 return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64; 959 case CmpInst::ICMP_EQ: 960 return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64; 961 case CmpInst::ICMP_SGT: 962 return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64; 963 case CmpInst::ICMP_SGE: 964 return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64; 965 case CmpInst::ICMP_SLT: 966 return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64; 967 case CmpInst::ICMP_SLE: 968 return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64; 969 case CmpInst::ICMP_UGT: 970 return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64; 971 case CmpInst::ICMP_UGE: 972 return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64; 973 case CmpInst::ICMP_ULT: 974 return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64; 975 case CmpInst::ICMP_ULE: 976 return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64; 977 } 978 } 979 980 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P, 981 unsigned Size) const { 982 if (Size == 64) { 983 if (!STI.hasScalarCompareEq64()) 984 return -1; 985 986 switch (P) { 987 case CmpInst::ICMP_NE: 988 return AMDGPU::S_CMP_LG_U64; 989 case CmpInst::ICMP_EQ: 990 return AMDGPU::S_CMP_EQ_U64; 991 default: 992 return -1; 993 } 994 } 995 996 if (Size != 32) 997 return -1; 998 999 switch (P) { 1000 case CmpInst::ICMP_NE: 1001 return AMDGPU::S_CMP_LG_U32; 1002 case CmpInst::ICMP_EQ: 1003 return AMDGPU::S_CMP_EQ_U32; 1004 case CmpInst::ICMP_SGT: 1005 return AMDGPU::S_CMP_GT_I32; 1006 case CmpInst::ICMP_SGE: 1007 return AMDGPU::S_CMP_GE_I32; 1008 case CmpInst::ICMP_SLT: 1009 return AMDGPU::S_CMP_LT_I32; 1010 case CmpInst::ICMP_SLE: 1011 return AMDGPU::S_CMP_LE_I32; 1012 case CmpInst::ICMP_UGT: 1013 return AMDGPU::S_CMP_GT_U32; 1014 case CmpInst::ICMP_UGE: 1015 return AMDGPU::S_CMP_GE_U32; 1016 case CmpInst::ICMP_ULT: 1017 return AMDGPU::S_CMP_LT_U32; 1018 case CmpInst::ICMP_ULE: 1019 return AMDGPU::S_CMP_LE_U32; 1020 default: 1021 llvm_unreachable("Unknown condition code!"); 1022 } 1023 } 1024 1025 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const { 1026 MachineBasicBlock *BB = I.getParent(); 1027 const DebugLoc &DL = I.getDebugLoc(); 1028 1029 Register SrcReg = I.getOperand(2).getReg(); 1030 unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI); 1031 1032 auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate(); 1033 1034 Register CCReg = I.getOperand(0).getReg(); 1035 if (!isVCC(CCReg, *MRI)) { 1036 int Opcode = getS_CMPOpcode(Pred, Size); 1037 if (Opcode == -1) 1038 return false; 1039 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode)) 1040 .add(I.getOperand(2)) 1041 .add(I.getOperand(3)); 1042 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg) 1043 .addReg(AMDGPU::SCC); 1044 bool Ret = 1045 constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) && 1046 RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI); 1047 I.eraseFromParent(); 1048 return Ret; 1049 } 1050 1051 int Opcode = getV_CMPOpcode(Pred, Size); 1052 if (Opcode == -1) 1053 return false; 1054 1055 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), 1056 I.getOperand(0).getReg()) 1057 .add(I.getOperand(2)) 1058 .add(I.getOperand(3)); 1059 RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), 1060 *TRI.getBoolRC(), *MRI); 1061 bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI); 1062 I.eraseFromParent(); 1063 return Ret; 1064 } 1065 1066 bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const { 1067 Register Dst = I.getOperand(0).getReg(); 1068 if (isVCC(Dst, *MRI)) 1069 return false; 1070 1071 if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize()) 1072 return false; 1073 1074 MachineBasicBlock *BB = I.getParent(); 1075 const DebugLoc &DL = I.getDebugLoc(); 1076 Register SrcReg = I.getOperand(2).getReg(); 1077 unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI); 1078 auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm()); 1079 1080 int Opcode = getV_CMPOpcode(Pred, Size); 1081 if (Opcode == -1) 1082 return false; 1083 1084 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst) 1085 .add(I.getOperand(2)) 1086 .add(I.getOperand(3)); 1087 RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(), 1088 *MRI); 1089 bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI); 1090 I.eraseFromParent(); 1091 return Ret; 1092 } 1093 1094 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const { 1095 MachineBasicBlock *BB = I.getParent(); 1096 const DebugLoc &DL = I.getDebugLoc(); 1097 Register DstReg = I.getOperand(0).getReg(); 1098 const unsigned Size = MRI->getType(DstReg).getSizeInBits(); 1099 const bool Is64 = Size == 64; 1100 1101 if (Size != STI.getWavefrontSize()) 1102 return false; 1103 1104 Optional<ValueAndVReg> Arg = 1105 getConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI, true); 1106 1107 if (Arg.hasValue()) { 1108 const int64_t Value = Arg.getValue().Value; 1109 if (Value == 0) { 1110 unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; 1111 BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0); 1112 } else if (Value == -1) { // all ones 1113 Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO; 1114 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg); 1115 } else 1116 return false; 1117 } else { 1118 Register SrcReg = I.getOperand(2).getReg(); 1119 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg); 1120 } 1121 1122 I.eraseFromParent(); 1123 return true; 1124 } 1125 1126 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const { 1127 Register DstReg = I.getOperand(0).getReg(); 1128 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); 1129 const TargetRegisterClass *DstRC = 1130 TRI.getRegClassForSizeOnBank(32, *DstBank, *MRI); 1131 if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) 1132 return false; 1133 1134 const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID; 1135 1136 Module *M = MF->getFunction().getParent(); 1137 const MDNode *Metadata = I.getOperand(2).getMetadata(); 1138 auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString(); 1139 auto RelocSymbol = cast<GlobalVariable>( 1140 M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext()))); 1141 1142 MachineBasicBlock *BB = I.getParent(); 1143 BuildMI(*BB, &I, I.getDebugLoc(), 1144 TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg) 1145 .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO); 1146 1147 I.eraseFromParent(); 1148 return true; 1149 } 1150 1151 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const { 1152 Triple::OSType OS = MF->getTarget().getTargetTriple().getOS(); 1153 1154 Register DstReg = I.getOperand(0).getReg(); 1155 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 1156 unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ? 1157 AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 1158 1159 MachineBasicBlock *MBB = I.getParent(); 1160 const DebugLoc &DL = I.getDebugLoc(); 1161 1162 auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg); 1163 1164 if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) { 1165 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1166 MIB.addImm(MFI->getLDSSize()); 1167 } else { 1168 Module *M = MF->getFunction().getParent(); 1169 const GlobalValue *GV 1170 = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize); 1171 MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO); 1172 } 1173 1174 I.eraseFromParent(); 1175 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 1176 } 1177 1178 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const { 1179 MachineBasicBlock *MBB = I.getParent(); 1180 MachineFunction &MF = *MBB->getParent(); 1181 const DebugLoc &DL = I.getDebugLoc(); 1182 1183 MachineOperand &Dst = I.getOperand(0); 1184 Register DstReg = Dst.getReg(); 1185 unsigned Depth = I.getOperand(2).getImm(); 1186 1187 const TargetRegisterClass *RC 1188 = TRI.getConstrainedRegClassForOperand(Dst, *MRI); 1189 if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) || 1190 !RBI.constrainGenericRegister(DstReg, *RC, *MRI)) 1191 return false; 1192 1193 // Check for kernel and shader functions 1194 if (Depth != 0 || 1195 MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) { 1196 BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg) 1197 .addImm(0); 1198 I.eraseFromParent(); 1199 return true; 1200 } 1201 1202 MachineFrameInfo &MFI = MF.getFrameInfo(); 1203 // There is a call to @llvm.returnaddress in this function 1204 MFI.setReturnAddressIsTaken(true); 1205 1206 // Get the return address reg and mark it as an implicit live-in 1207 Register ReturnAddrReg = TRI.getReturnAddressReg(MF); 1208 Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg, 1209 AMDGPU::SReg_64RegClass); 1210 BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg) 1211 .addReg(LiveIn); 1212 I.eraseFromParent(); 1213 return true; 1214 } 1215 1216 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const { 1217 // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick 1218 // SelectionDAG uses for wave32 vs wave64. 1219 MachineBasicBlock *BB = MI.getParent(); 1220 BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF)) 1221 .add(MI.getOperand(1)); 1222 1223 Register Reg = MI.getOperand(1).getReg(); 1224 MI.eraseFromParent(); 1225 1226 if (!MRI->getRegClassOrNull(Reg)) 1227 MRI->setRegClass(Reg, TRI.getWaveMaskRegClass()); 1228 return true; 1229 } 1230 1231 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic( 1232 MachineInstr &MI, Intrinsic::ID IntrID) const { 1233 MachineBasicBlock *MBB = MI.getParent(); 1234 MachineFunction *MF = MBB->getParent(); 1235 const DebugLoc &DL = MI.getDebugLoc(); 1236 1237 unsigned IndexOperand = MI.getOperand(7).getImm(); 1238 bool WaveRelease = MI.getOperand(8).getImm() != 0; 1239 bool WaveDone = MI.getOperand(9).getImm() != 0; 1240 1241 if (WaveDone && !WaveRelease) 1242 report_fatal_error("ds_ordered_count: wave_done requires wave_release"); 1243 1244 unsigned OrderedCountIndex = IndexOperand & 0x3f; 1245 IndexOperand &= ~0x3f; 1246 unsigned CountDw = 0; 1247 1248 if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) { 1249 CountDw = (IndexOperand >> 24) & 0xf; 1250 IndexOperand &= ~(0xf << 24); 1251 1252 if (CountDw < 1 || CountDw > 4) { 1253 report_fatal_error( 1254 "ds_ordered_count: dword count must be between 1 and 4"); 1255 } 1256 } 1257 1258 if (IndexOperand) 1259 report_fatal_error("ds_ordered_count: bad index operand"); 1260 1261 unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1; 1262 unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF); 1263 1264 unsigned Offset0 = OrderedCountIndex << 2; 1265 unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) | 1266 (Instruction << 4); 1267 1268 if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) 1269 Offset1 |= (CountDw - 1) << 6; 1270 1271 unsigned Offset = Offset0 | (Offset1 << 8); 1272 1273 Register M0Val = MI.getOperand(2).getReg(); 1274 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 1275 .addReg(M0Val); 1276 1277 Register DstReg = MI.getOperand(0).getReg(); 1278 Register ValReg = MI.getOperand(3).getReg(); 1279 MachineInstrBuilder DS = 1280 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg) 1281 .addReg(ValReg) 1282 .addImm(Offset) 1283 .cloneMemRefs(MI); 1284 1285 if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI)) 1286 return false; 1287 1288 bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI); 1289 MI.eraseFromParent(); 1290 return Ret; 1291 } 1292 1293 static unsigned gwsIntrinToOpcode(unsigned IntrID) { 1294 switch (IntrID) { 1295 case Intrinsic::amdgcn_ds_gws_init: 1296 return AMDGPU::DS_GWS_INIT; 1297 case Intrinsic::amdgcn_ds_gws_barrier: 1298 return AMDGPU::DS_GWS_BARRIER; 1299 case Intrinsic::amdgcn_ds_gws_sema_v: 1300 return AMDGPU::DS_GWS_SEMA_V; 1301 case Intrinsic::amdgcn_ds_gws_sema_br: 1302 return AMDGPU::DS_GWS_SEMA_BR; 1303 case Intrinsic::amdgcn_ds_gws_sema_p: 1304 return AMDGPU::DS_GWS_SEMA_P; 1305 case Intrinsic::amdgcn_ds_gws_sema_release_all: 1306 return AMDGPU::DS_GWS_SEMA_RELEASE_ALL; 1307 default: 1308 llvm_unreachable("not a gws intrinsic"); 1309 } 1310 } 1311 1312 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI, 1313 Intrinsic::ID IID) const { 1314 if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all && 1315 !STI.hasGWSSemaReleaseAll()) 1316 return false; 1317 1318 // intrinsic ID, vsrc, offset 1319 const bool HasVSrc = MI.getNumOperands() == 3; 1320 assert(HasVSrc || MI.getNumOperands() == 2); 1321 1322 Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg(); 1323 const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI); 1324 if (OffsetRB->getID() != AMDGPU::SGPRRegBankID) 1325 return false; 1326 1327 MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI); 1328 assert(OffsetDef); 1329 1330 unsigned ImmOffset; 1331 1332 MachineBasicBlock *MBB = MI.getParent(); 1333 const DebugLoc &DL = MI.getDebugLoc(); 1334 1335 MachineInstr *Readfirstlane = nullptr; 1336 1337 // If we legalized the VGPR input, strip out the readfirstlane to analyze the 1338 // incoming offset, in case there's an add of a constant. We'll have to put it 1339 // back later. 1340 if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) { 1341 Readfirstlane = OffsetDef; 1342 BaseOffset = OffsetDef->getOperand(1).getReg(); 1343 OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI); 1344 } 1345 1346 if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) { 1347 // If we have a constant offset, try to use the 0 in m0 as the base. 1348 // TODO: Look into changing the default m0 initialization value. If the 1349 // default -1 only set the low 16-bits, we could leave it as-is and add 1 to 1350 // the immediate offset. 1351 1352 ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue(); 1353 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0) 1354 .addImm(0); 1355 } else { 1356 std::tie(BaseOffset, ImmOffset) = 1357 AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset); 1358 1359 if (Readfirstlane) { 1360 // We have the constant offset now, so put the readfirstlane back on the 1361 // variable component. 1362 if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI)) 1363 return false; 1364 1365 Readfirstlane->getOperand(1).setReg(BaseOffset); 1366 BaseOffset = Readfirstlane->getOperand(0).getReg(); 1367 } else { 1368 if (!RBI.constrainGenericRegister(BaseOffset, 1369 AMDGPU::SReg_32RegClass, *MRI)) 1370 return false; 1371 } 1372 1373 Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 1374 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base) 1375 .addReg(BaseOffset) 1376 .addImm(16); 1377 1378 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 1379 .addReg(M0Base); 1380 } 1381 1382 // The resource id offset is computed as (<isa opaque base> + M0[21:16] + 1383 // offset field) % 64. Some versions of the programming guide omit the m0 1384 // part, or claim it's from offset 0. 1385 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID))); 1386 1387 if (HasVSrc) { 1388 Register VSrc = MI.getOperand(1).getReg(); 1389 MIB.addReg(VSrc); 1390 if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI)) 1391 return false; 1392 } 1393 1394 MIB.addImm(ImmOffset) 1395 .cloneMemRefs(MI); 1396 1397 MI.eraseFromParent(); 1398 return true; 1399 } 1400 1401 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI, 1402 bool IsAppend) const { 1403 Register PtrBase = MI.getOperand(2).getReg(); 1404 LLT PtrTy = MRI->getType(PtrBase); 1405 bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS; 1406 1407 unsigned Offset; 1408 std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2)); 1409 1410 // TODO: Should this try to look through readfirstlane like GWS? 1411 if (!isDSOffsetLegal(PtrBase, Offset)) { 1412 PtrBase = MI.getOperand(2).getReg(); 1413 Offset = 0; 1414 } 1415 1416 MachineBasicBlock *MBB = MI.getParent(); 1417 const DebugLoc &DL = MI.getDebugLoc(); 1418 const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME; 1419 1420 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 1421 .addReg(PtrBase); 1422 if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI)) 1423 return false; 1424 1425 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg()) 1426 .addImm(Offset) 1427 .addImm(IsGDS ? -1 : 0) 1428 .cloneMemRefs(MI); 1429 MI.eraseFromParent(); 1430 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 1431 } 1432 1433 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const { 1434 if (TM.getOptLevel() > CodeGenOpt::None) { 1435 unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second; 1436 if (WGSize <= STI.getWavefrontSize()) { 1437 MachineBasicBlock *MBB = MI.getParent(); 1438 const DebugLoc &DL = MI.getDebugLoc(); 1439 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER)); 1440 MI.eraseFromParent(); 1441 return true; 1442 } 1443 } 1444 return selectImpl(MI, *CoverageInfo); 1445 } 1446 1447 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE, 1448 bool &IsTexFail) { 1449 if (TexFailCtrl) 1450 IsTexFail = true; 1451 1452 TFE = (TexFailCtrl & 0x1) ? 1 : 0; 1453 TexFailCtrl &= ~(uint64_t)0x1; 1454 LWE = (TexFailCtrl & 0x2) ? 1 : 0; 1455 TexFailCtrl &= ~(uint64_t)0x2; 1456 1457 return TexFailCtrl == 0; 1458 } 1459 1460 static bool parseCachePolicy(uint64_t Value, 1461 bool *GLC, bool *SLC, bool *DLC) { 1462 if (GLC) { 1463 *GLC = (Value & 0x1) ? 1 : 0; 1464 Value &= ~(uint64_t)0x1; 1465 } 1466 if (SLC) { 1467 *SLC = (Value & 0x2) ? 1 : 0; 1468 Value &= ~(uint64_t)0x2; 1469 } 1470 if (DLC) { 1471 *DLC = (Value & 0x4) ? 1 : 0; 1472 Value &= ~(uint64_t)0x4; 1473 } 1474 1475 return Value == 0; 1476 } 1477 1478 bool AMDGPUInstructionSelector::selectImageIntrinsic( 1479 MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const { 1480 MachineBasicBlock *MBB = MI.getParent(); 1481 const DebugLoc &DL = MI.getDebugLoc(); 1482 1483 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = 1484 AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode); 1485 1486 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim); 1487 const AMDGPU::MIMGLZMappingInfo *LZMappingInfo = 1488 AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode); 1489 const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo = 1490 AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode); 1491 unsigned IntrOpcode = Intr->BaseOpcode; 1492 const bool IsGFX10 = STI.getGeneration() >= AMDGPUSubtarget::GFX10; 1493 1494 const unsigned ArgOffset = MI.getNumExplicitDefs() + 1; 1495 1496 Register VDataIn, VDataOut; 1497 LLT VDataTy; 1498 int NumVDataDwords = -1; 1499 bool IsD16 = false; 1500 1501 bool Unorm; 1502 if (!BaseOpcode->Sampler) 1503 Unorm = true; 1504 else 1505 Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0; 1506 1507 bool TFE; 1508 bool LWE; 1509 bool IsTexFail = false; 1510 if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(), 1511 TFE, LWE, IsTexFail)) 1512 return false; 1513 1514 const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm(); 1515 const bool IsA16 = (Flags & 1) != 0; 1516 const bool IsG16 = (Flags & 2) != 0; 1517 1518 // A16 implies 16 bit gradients 1519 if (IsA16 && !IsG16) 1520 return false; 1521 1522 unsigned DMask = 0; 1523 unsigned DMaskLanes = 0; 1524 1525 if (BaseOpcode->Atomic) { 1526 VDataOut = MI.getOperand(0).getReg(); 1527 VDataIn = MI.getOperand(2).getReg(); 1528 LLT Ty = MRI->getType(VDataIn); 1529 1530 // Be careful to allow atomic swap on 16-bit element vectors. 1531 const bool Is64Bit = BaseOpcode->AtomicX2 ? 1532 Ty.getSizeInBits() == 128 : 1533 Ty.getSizeInBits() == 64; 1534 1535 if (BaseOpcode->AtomicX2) { 1536 assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister); 1537 1538 DMask = Is64Bit ? 0xf : 0x3; 1539 NumVDataDwords = Is64Bit ? 4 : 2; 1540 } else { 1541 DMask = Is64Bit ? 0x3 : 0x1; 1542 NumVDataDwords = Is64Bit ? 2 : 1; 1543 } 1544 } else { 1545 DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm(); 1546 DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask); 1547 1548 // One memoperand is mandatory, except for getresinfo. 1549 // FIXME: Check this in verifier. 1550 if (!MI.memoperands_empty()) { 1551 const MachineMemOperand *MMO = *MI.memoperands_begin(); 1552 1553 // Infer d16 from the memory size, as the register type will be mangled by 1554 // unpacked subtargets, or by TFE. 1555 IsD16 = ((8 * MMO->getSize()) / DMaskLanes) < 32; 1556 } 1557 1558 if (BaseOpcode->Store) { 1559 VDataIn = MI.getOperand(1).getReg(); 1560 VDataTy = MRI->getType(VDataIn); 1561 NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32; 1562 } else { 1563 VDataOut = MI.getOperand(0).getReg(); 1564 VDataTy = MRI->getType(VDataOut); 1565 NumVDataDwords = DMaskLanes; 1566 1567 if (IsD16 && !STI.hasUnpackedD16VMem()) 1568 NumVDataDwords = (DMaskLanes + 1) / 2; 1569 } 1570 } 1571 1572 // Optimize _L to _LZ when _L is zero 1573 if (LZMappingInfo) { 1574 // The legalizer replaced the register with an immediate 0 if we need to 1575 // change the opcode. 1576 const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->LodIndex); 1577 if (Lod.isImm()) { 1578 assert(Lod.getImm() == 0); 1579 IntrOpcode = LZMappingInfo->LZ; // set new opcode to _lz variant of _l 1580 } 1581 } 1582 1583 // Optimize _mip away, when 'lod' is zero 1584 if (MIPMappingInfo) { 1585 const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->MipIndex); 1586 if (Lod.isImm()) { 1587 assert(Lod.getImm() == 0); 1588 IntrOpcode = MIPMappingInfo->NONMIP; // set new opcode to variant without _mip 1589 } 1590 } 1591 1592 // Set G16 opcode 1593 if (IsG16 && !IsA16) { 1594 const AMDGPU::MIMGG16MappingInfo *G16MappingInfo = 1595 AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode); 1596 assert(G16MappingInfo); 1597 IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16 1598 } 1599 1600 // TODO: Check this in verifier. 1601 assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this"); 1602 1603 bool GLC = false; 1604 bool SLC = false; 1605 bool DLC = false; 1606 if (BaseOpcode->Atomic) { 1607 GLC = true; // TODO no-return optimization 1608 if (!parseCachePolicy( 1609 MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm(), nullptr, 1610 &SLC, IsGFX10 ? &DLC : nullptr)) 1611 return false; 1612 } else { 1613 if (!parseCachePolicy( 1614 MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm(), &GLC, 1615 &SLC, IsGFX10 ? &DLC : nullptr)) 1616 return false; 1617 } 1618 1619 int NumVAddrRegs = 0; 1620 int NumVAddrDwords = 0; 1621 for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) { 1622 // Skip the $noregs and 0s inserted during legalization. 1623 MachineOperand &AddrOp = MI.getOperand(ArgOffset + I); 1624 if (!AddrOp.isReg()) 1625 continue; // XXX - Break? 1626 1627 Register Addr = AddrOp.getReg(); 1628 if (!Addr) 1629 break; 1630 1631 ++NumVAddrRegs; 1632 NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32; 1633 } 1634 1635 // The legalizer preprocessed the intrinsic arguments. If we aren't using 1636 // NSA, these should have beeen packed into a single value in the first 1637 // address register 1638 const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs; 1639 if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) { 1640 LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n"); 1641 return false; 1642 } 1643 1644 if (IsTexFail) 1645 ++NumVDataDwords; 1646 1647 int Opcode = -1; 1648 if (IsGFX10) { 1649 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, 1650 UseNSA ? AMDGPU::MIMGEncGfx10NSA 1651 : AMDGPU::MIMGEncGfx10Default, 1652 NumVDataDwords, NumVAddrDwords); 1653 } else { 1654 if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 1655 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8, 1656 NumVDataDwords, NumVAddrDwords); 1657 if (Opcode == -1) 1658 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6, 1659 NumVDataDwords, NumVAddrDwords); 1660 } 1661 assert(Opcode != -1); 1662 1663 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode)) 1664 .cloneMemRefs(MI); 1665 1666 if (VDataOut) { 1667 if (BaseOpcode->AtomicX2) { 1668 const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64; 1669 1670 Register TmpReg = MRI->createVirtualRegister( 1671 Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass); 1672 unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0; 1673 1674 MIB.addDef(TmpReg); 1675 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut) 1676 .addReg(TmpReg, RegState::Kill, SubReg); 1677 1678 } else { 1679 MIB.addDef(VDataOut); // vdata output 1680 } 1681 } 1682 1683 if (VDataIn) 1684 MIB.addReg(VDataIn); // vdata input 1685 1686 for (int I = 0; I != NumVAddrRegs; ++I) { 1687 MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I); 1688 if (SrcOp.isReg()) { 1689 assert(SrcOp.getReg() != 0); 1690 MIB.addReg(SrcOp.getReg()); 1691 } 1692 } 1693 1694 MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg()); 1695 if (BaseOpcode->Sampler) 1696 MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg()); 1697 1698 MIB.addImm(DMask); // dmask 1699 1700 if (IsGFX10) 1701 MIB.addImm(DimInfo->Encoding); 1702 MIB.addImm(Unorm); 1703 if (IsGFX10) 1704 MIB.addImm(DLC); 1705 1706 MIB.addImm(GLC); 1707 MIB.addImm(SLC); 1708 MIB.addImm(IsA16 && // a16 or r128 1709 STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0); 1710 if (IsGFX10) 1711 MIB.addImm(IsA16 ? -1 : 0); 1712 1713 MIB.addImm(TFE); // tfe 1714 MIB.addImm(LWE); // lwe 1715 if (!IsGFX10) 1716 MIB.addImm(DimInfo->DA ? -1 : 0); 1717 if (BaseOpcode->HasD16) 1718 MIB.addImm(IsD16 ? -1 : 0); 1719 1720 MI.eraseFromParent(); 1721 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 1722 } 1723 1724 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS( 1725 MachineInstr &I) const { 1726 unsigned IntrinsicID = I.getIntrinsicID(); 1727 switch (IntrinsicID) { 1728 case Intrinsic::amdgcn_end_cf: 1729 return selectEndCfIntrinsic(I); 1730 case Intrinsic::amdgcn_ds_ordered_add: 1731 case Intrinsic::amdgcn_ds_ordered_swap: 1732 return selectDSOrderedIntrinsic(I, IntrinsicID); 1733 case Intrinsic::amdgcn_ds_gws_init: 1734 case Intrinsic::amdgcn_ds_gws_barrier: 1735 case Intrinsic::amdgcn_ds_gws_sema_v: 1736 case Intrinsic::amdgcn_ds_gws_sema_br: 1737 case Intrinsic::amdgcn_ds_gws_sema_p: 1738 case Intrinsic::amdgcn_ds_gws_sema_release_all: 1739 return selectDSGWSIntrinsic(I, IntrinsicID); 1740 case Intrinsic::amdgcn_ds_append: 1741 return selectDSAppendConsume(I, true); 1742 case Intrinsic::amdgcn_ds_consume: 1743 return selectDSAppendConsume(I, false); 1744 case Intrinsic::amdgcn_s_barrier: 1745 return selectSBarrier(I); 1746 case Intrinsic::amdgcn_global_atomic_fadd: 1747 return selectGlobalAtomicFaddIntrinsic(I); 1748 default: { 1749 return selectImpl(I, *CoverageInfo); 1750 } 1751 } 1752 } 1753 1754 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const { 1755 if (selectImpl(I, *CoverageInfo)) 1756 return true; 1757 1758 MachineBasicBlock *BB = I.getParent(); 1759 const DebugLoc &DL = I.getDebugLoc(); 1760 1761 Register DstReg = I.getOperand(0).getReg(); 1762 unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI); 1763 assert(Size <= 32 || Size == 64); 1764 const MachineOperand &CCOp = I.getOperand(1); 1765 Register CCReg = CCOp.getReg(); 1766 if (!isVCC(CCReg, *MRI)) { 1767 unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 : 1768 AMDGPU::S_CSELECT_B32; 1769 MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC) 1770 .addReg(CCReg); 1771 1772 // The generic constrainSelectedInstRegOperands doesn't work for the scc register 1773 // bank, because it does not cover the register class that we used to represent 1774 // for it. So we need to manually set the register class here. 1775 if (!MRI->getRegClassOrNull(CCReg)) 1776 MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI)); 1777 MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg) 1778 .add(I.getOperand(2)) 1779 .add(I.getOperand(3)); 1780 1781 bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI) | 1782 constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI); 1783 I.eraseFromParent(); 1784 return Ret; 1785 } 1786 1787 // Wide VGPR select should have been split in RegBankSelect. 1788 if (Size > 32) 1789 return false; 1790 1791 MachineInstr *Select = 1792 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1793 .addImm(0) 1794 .add(I.getOperand(3)) 1795 .addImm(0) 1796 .add(I.getOperand(2)) 1797 .add(I.getOperand(1)); 1798 1799 bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI); 1800 I.eraseFromParent(); 1801 return Ret; 1802 } 1803 1804 static int sizeToSubRegIndex(unsigned Size) { 1805 switch (Size) { 1806 case 32: 1807 return AMDGPU::sub0; 1808 case 64: 1809 return AMDGPU::sub0_sub1; 1810 case 96: 1811 return AMDGPU::sub0_sub1_sub2; 1812 case 128: 1813 return AMDGPU::sub0_sub1_sub2_sub3; 1814 case 256: 1815 return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7; 1816 default: 1817 if (Size < 32) 1818 return AMDGPU::sub0; 1819 if (Size > 256) 1820 return -1; 1821 return sizeToSubRegIndex(PowerOf2Ceil(Size)); 1822 } 1823 } 1824 1825 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const { 1826 Register DstReg = I.getOperand(0).getReg(); 1827 Register SrcReg = I.getOperand(1).getReg(); 1828 const LLT DstTy = MRI->getType(DstReg); 1829 const LLT SrcTy = MRI->getType(SrcReg); 1830 const LLT S1 = LLT::scalar(1); 1831 1832 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI); 1833 const RegisterBank *DstRB; 1834 if (DstTy == S1) { 1835 // This is a special case. We don't treat s1 for legalization artifacts as 1836 // vcc booleans. 1837 DstRB = SrcRB; 1838 } else { 1839 DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 1840 if (SrcRB != DstRB) 1841 return false; 1842 } 1843 1844 const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID; 1845 1846 unsigned DstSize = DstTy.getSizeInBits(); 1847 unsigned SrcSize = SrcTy.getSizeInBits(); 1848 1849 const TargetRegisterClass *SrcRC 1850 = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI); 1851 const TargetRegisterClass *DstRC 1852 = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI); 1853 if (!SrcRC || !DstRC) 1854 return false; 1855 1856 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) || 1857 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) { 1858 LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n"); 1859 return false; 1860 } 1861 1862 if (DstTy == LLT::vector(2, 16) && SrcTy == LLT::vector(2, 32)) { 1863 MachineBasicBlock *MBB = I.getParent(); 1864 const DebugLoc &DL = I.getDebugLoc(); 1865 1866 Register LoReg = MRI->createVirtualRegister(DstRC); 1867 Register HiReg = MRI->createVirtualRegister(DstRC); 1868 BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg) 1869 .addReg(SrcReg, 0, AMDGPU::sub0); 1870 BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg) 1871 .addReg(SrcReg, 0, AMDGPU::sub1); 1872 1873 if (IsVALU && STI.hasSDWA()) { 1874 // Write the low 16-bits of the high element into the high 16-bits of the 1875 // low element. 1876 MachineInstr *MovSDWA = 1877 BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg) 1878 .addImm(0) // $src0_modifiers 1879 .addReg(HiReg) // $src0 1880 .addImm(0) // $clamp 1881 .addImm(AMDGPU::SDWA::WORD_1) // $dst_sel 1882 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused 1883 .addImm(AMDGPU::SDWA::WORD_0) // $src0_sel 1884 .addReg(LoReg, RegState::Implicit); 1885 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1); 1886 } else { 1887 Register TmpReg0 = MRI->createVirtualRegister(DstRC); 1888 Register TmpReg1 = MRI->createVirtualRegister(DstRC); 1889 Register ImmReg = MRI->createVirtualRegister(DstRC); 1890 if (IsVALU) { 1891 BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0) 1892 .addImm(16) 1893 .addReg(HiReg); 1894 } else { 1895 BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0) 1896 .addReg(HiReg) 1897 .addImm(16); 1898 } 1899 1900 unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32; 1901 unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32; 1902 unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32; 1903 1904 BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg) 1905 .addImm(0xffff); 1906 BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1) 1907 .addReg(LoReg) 1908 .addReg(ImmReg); 1909 BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg) 1910 .addReg(TmpReg0) 1911 .addReg(TmpReg1); 1912 } 1913 1914 I.eraseFromParent(); 1915 return true; 1916 } 1917 1918 if (!DstTy.isScalar()) 1919 return false; 1920 1921 if (SrcSize > 32) { 1922 int SubRegIdx = sizeToSubRegIndex(DstSize); 1923 if (SubRegIdx == -1) 1924 return false; 1925 1926 // Deal with weird cases where the class only partially supports the subreg 1927 // index. 1928 const TargetRegisterClass *SrcWithSubRC 1929 = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx); 1930 if (!SrcWithSubRC) 1931 return false; 1932 1933 if (SrcWithSubRC != SrcRC) { 1934 if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI)) 1935 return false; 1936 } 1937 1938 I.getOperand(1).setSubReg(SubRegIdx); 1939 } 1940 1941 I.setDesc(TII.get(TargetOpcode::COPY)); 1942 return true; 1943 } 1944 1945 /// \returns true if a bitmask for \p Size bits will be an inline immediate. 1946 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) { 1947 Mask = maskTrailingOnes<unsigned>(Size); 1948 int SignedMask = static_cast<int>(Mask); 1949 return SignedMask >= -16 && SignedMask <= 64; 1950 } 1951 1952 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1. 1953 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank( 1954 Register Reg, const MachineRegisterInfo &MRI, 1955 const TargetRegisterInfo &TRI) const { 1956 const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg); 1957 if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>()) 1958 return RB; 1959 1960 // Ignore the type, since we don't use vcc in artifacts. 1961 if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>()) 1962 return &RBI.getRegBankFromRegClass(*RC, LLT()); 1963 return nullptr; 1964 } 1965 1966 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const { 1967 bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG; 1968 bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg; 1969 const DebugLoc &DL = I.getDebugLoc(); 1970 MachineBasicBlock &MBB = *I.getParent(); 1971 const Register DstReg = I.getOperand(0).getReg(); 1972 const Register SrcReg = I.getOperand(1).getReg(); 1973 1974 const LLT DstTy = MRI->getType(DstReg); 1975 const LLT SrcTy = MRI->getType(SrcReg); 1976 const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ? 1977 I.getOperand(2).getImm() : SrcTy.getSizeInBits(); 1978 const unsigned DstSize = DstTy.getSizeInBits(); 1979 if (!DstTy.isScalar()) 1980 return false; 1981 1982 // Artifact casts should never use vcc. 1983 const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI); 1984 1985 // FIXME: This should probably be illegal and split earlier. 1986 if (I.getOpcode() == AMDGPU::G_ANYEXT) { 1987 if (DstSize <= 32) 1988 return selectCOPY(I); 1989 1990 const TargetRegisterClass *SrcRC = 1991 TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank, *MRI); 1992 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); 1993 const TargetRegisterClass *DstRC = 1994 TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI); 1995 1996 Register UndefReg = MRI->createVirtualRegister(SrcRC); 1997 BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg); 1998 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) 1999 .addReg(SrcReg) 2000 .addImm(AMDGPU::sub0) 2001 .addReg(UndefReg) 2002 .addImm(AMDGPU::sub1); 2003 I.eraseFromParent(); 2004 2005 return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) && 2006 RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI); 2007 } 2008 2009 if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) { 2010 // 64-bit should have been split up in RegBankSelect 2011 2012 // Try to use an and with a mask if it will save code size. 2013 unsigned Mask; 2014 if (!Signed && shouldUseAndMask(SrcSize, Mask)) { 2015 MachineInstr *ExtI = 2016 BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg) 2017 .addImm(Mask) 2018 .addReg(SrcReg); 2019 I.eraseFromParent(); 2020 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI); 2021 } 2022 2023 const unsigned BFE = Signed ? AMDGPU::V_BFE_I32 : AMDGPU::V_BFE_U32; 2024 MachineInstr *ExtI = 2025 BuildMI(MBB, I, DL, TII.get(BFE), DstReg) 2026 .addReg(SrcReg) 2027 .addImm(0) // Offset 2028 .addImm(SrcSize); // Width 2029 I.eraseFromParent(); 2030 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI); 2031 } 2032 2033 if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) { 2034 const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ? 2035 AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass; 2036 if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI)) 2037 return false; 2038 2039 if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) { 2040 const unsigned SextOpc = SrcSize == 8 ? 2041 AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16; 2042 BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg) 2043 .addReg(SrcReg); 2044 I.eraseFromParent(); 2045 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI); 2046 } 2047 2048 const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64; 2049 const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32; 2050 2051 // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width. 2052 if (DstSize > 32 && (SrcSize <= 32 || InReg)) { 2053 // We need a 64-bit register source, but the high bits don't matter. 2054 Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); 2055 Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2056 unsigned SubReg = InReg ? AMDGPU::sub0 : 0; 2057 2058 BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg); 2059 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg) 2060 .addReg(SrcReg, 0, SubReg) 2061 .addImm(AMDGPU::sub0) 2062 .addReg(UndefReg) 2063 .addImm(AMDGPU::sub1); 2064 2065 BuildMI(MBB, I, DL, TII.get(BFE64), DstReg) 2066 .addReg(ExtReg) 2067 .addImm(SrcSize << 16); 2068 2069 I.eraseFromParent(); 2070 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI); 2071 } 2072 2073 unsigned Mask; 2074 if (!Signed && shouldUseAndMask(SrcSize, Mask)) { 2075 BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg) 2076 .addReg(SrcReg) 2077 .addImm(Mask); 2078 } else { 2079 BuildMI(MBB, I, DL, TII.get(BFE32), DstReg) 2080 .addReg(SrcReg) 2081 .addImm(SrcSize << 16); 2082 } 2083 2084 I.eraseFromParent(); 2085 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI); 2086 } 2087 2088 return false; 2089 } 2090 2091 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const { 2092 MachineBasicBlock *BB = I.getParent(); 2093 MachineOperand &ImmOp = I.getOperand(1); 2094 Register DstReg = I.getOperand(0).getReg(); 2095 unsigned Size = MRI->getType(DstReg).getSizeInBits(); 2096 2097 // The AMDGPU backend only supports Imm operands and not CImm or FPImm. 2098 if (ImmOp.isFPImm()) { 2099 const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt(); 2100 ImmOp.ChangeToImmediate(Imm.getZExtValue()); 2101 } else if (ImmOp.isCImm()) { 2102 ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue()); 2103 } else { 2104 llvm_unreachable("Not supported by g_constants"); 2105 } 2106 2107 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 2108 const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID; 2109 2110 unsigned Opcode; 2111 if (DstRB->getID() == AMDGPU::VCCRegBankID) { 2112 Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 2113 } else { 2114 Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 2115 2116 // We should never produce s1 values on banks other than VCC. If the user of 2117 // this already constrained the register, we may incorrectly think it's VCC 2118 // if it wasn't originally. 2119 if (Size == 1) 2120 return false; 2121 } 2122 2123 if (Size != 64) { 2124 I.setDesc(TII.get(Opcode)); 2125 I.addImplicitDefUseOperands(*MF); 2126 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 2127 } 2128 2129 const DebugLoc &DL = I.getDebugLoc(); 2130 2131 APInt Imm(Size, I.getOperand(1).getImm()); 2132 2133 MachineInstr *ResInst; 2134 if (IsSgpr && TII.isInlineConstant(Imm)) { 2135 ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg) 2136 .addImm(I.getOperand(1).getImm()); 2137 } else { 2138 const TargetRegisterClass *RC = IsSgpr ? 2139 &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass; 2140 Register LoReg = MRI->createVirtualRegister(RC); 2141 Register HiReg = MRI->createVirtualRegister(RC); 2142 2143 BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg) 2144 .addImm(Imm.trunc(32).getZExtValue()); 2145 2146 BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg) 2147 .addImm(Imm.ashr(32).getZExtValue()); 2148 2149 ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) 2150 .addReg(LoReg) 2151 .addImm(AMDGPU::sub0) 2152 .addReg(HiReg) 2153 .addImm(AMDGPU::sub1); 2154 } 2155 2156 // We can't call constrainSelectedInstRegOperands here, because it doesn't 2157 // work for target independent opcodes 2158 I.eraseFromParent(); 2159 const TargetRegisterClass *DstRC = 2160 TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI); 2161 if (!DstRC) 2162 return true; 2163 return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI); 2164 } 2165 2166 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const { 2167 // Only manually handle the f64 SGPR case. 2168 // 2169 // FIXME: This is a workaround for 2.5 different tablegen problems. Because 2170 // the bit ops theoretically have a second result due to the implicit def of 2171 // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing 2172 // that is easy by disabling the check. The result works, but uses a 2173 // nonsensical sreg32orlds_and_sreg_1 regclass. 2174 // 2175 // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to 2176 // the variadic REG_SEQUENCE operands. 2177 2178 Register Dst = MI.getOperand(0).getReg(); 2179 const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI); 2180 if (DstRB->getID() != AMDGPU::SGPRRegBankID || 2181 MRI->getType(Dst) != LLT::scalar(64)) 2182 return false; 2183 2184 Register Src = MI.getOperand(1).getReg(); 2185 MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI); 2186 if (Fabs) 2187 Src = Fabs->getOperand(1).getReg(); 2188 2189 if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) || 2190 !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI)) 2191 return false; 2192 2193 MachineBasicBlock *BB = MI.getParent(); 2194 const DebugLoc &DL = MI.getDebugLoc(); 2195 Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2196 Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2197 Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2198 Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2199 2200 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg) 2201 .addReg(Src, 0, AMDGPU::sub0); 2202 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg) 2203 .addReg(Src, 0, AMDGPU::sub1); 2204 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg) 2205 .addImm(0x80000000); 2206 2207 // Set or toggle sign bit. 2208 unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32; 2209 BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg) 2210 .addReg(HiReg) 2211 .addReg(ConstReg); 2212 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst) 2213 .addReg(LoReg) 2214 .addImm(AMDGPU::sub0) 2215 .addReg(OpReg) 2216 .addImm(AMDGPU::sub1); 2217 MI.eraseFromParent(); 2218 return true; 2219 } 2220 2221 // FIXME: This is a workaround for the same tablegen problems as G_FNEG 2222 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const { 2223 Register Dst = MI.getOperand(0).getReg(); 2224 const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI); 2225 if (DstRB->getID() != AMDGPU::SGPRRegBankID || 2226 MRI->getType(Dst) != LLT::scalar(64)) 2227 return false; 2228 2229 Register Src = MI.getOperand(1).getReg(); 2230 MachineBasicBlock *BB = MI.getParent(); 2231 const DebugLoc &DL = MI.getDebugLoc(); 2232 Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2233 Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2234 Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2235 Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2236 2237 if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) || 2238 !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI)) 2239 return false; 2240 2241 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg) 2242 .addReg(Src, 0, AMDGPU::sub0); 2243 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg) 2244 .addReg(Src, 0, AMDGPU::sub1); 2245 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg) 2246 .addImm(0x7fffffff); 2247 2248 // Clear sign bit. 2249 // TODO: Should this used S_BITSET0_*? 2250 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg) 2251 .addReg(HiReg) 2252 .addReg(ConstReg); 2253 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst) 2254 .addReg(LoReg) 2255 .addImm(AMDGPU::sub0) 2256 .addReg(OpReg) 2257 .addImm(AMDGPU::sub1); 2258 2259 MI.eraseFromParent(); 2260 return true; 2261 } 2262 2263 static bool isConstant(const MachineInstr &MI) { 2264 return MI.getOpcode() == TargetOpcode::G_CONSTANT; 2265 } 2266 2267 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load, 2268 const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const { 2269 2270 const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg()); 2271 2272 assert(PtrMI); 2273 2274 if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD) 2275 return; 2276 2277 GEPInfo GEPInfo(*PtrMI); 2278 2279 for (unsigned i = 1; i != 3; ++i) { 2280 const MachineOperand &GEPOp = PtrMI->getOperand(i); 2281 const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg()); 2282 assert(OpDef); 2283 if (i == 2 && isConstant(*OpDef)) { 2284 // TODO: Could handle constant base + variable offset, but a combine 2285 // probably should have commuted it. 2286 assert(GEPInfo.Imm == 0); 2287 GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue(); 2288 continue; 2289 } 2290 const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI); 2291 if (OpBank->getID() == AMDGPU::SGPRRegBankID) 2292 GEPInfo.SgprParts.push_back(GEPOp.getReg()); 2293 else 2294 GEPInfo.VgprParts.push_back(GEPOp.getReg()); 2295 } 2296 2297 AddrInfo.push_back(GEPInfo); 2298 getAddrModeInfo(*PtrMI, MRI, AddrInfo); 2299 } 2300 2301 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const { 2302 if (!MI.hasOneMemOperand()) 2303 return false; 2304 2305 const MachineMemOperand *MMO = *MI.memoperands_begin(); 2306 const Value *Ptr = MMO->getValue(); 2307 2308 // UndefValue means this is a load of a kernel input. These are uniform. 2309 // Sometimes LDS instructions have constant pointers. 2310 // If Ptr is null, then that means this mem operand contains a 2311 // PseudoSourceValue like GOT. 2312 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) || 2313 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr)) 2314 return true; 2315 2316 if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) 2317 return true; 2318 2319 const Instruction *I = dyn_cast<Instruction>(Ptr); 2320 return I && I->getMetadata("amdgpu.uniform"); 2321 } 2322 2323 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const { 2324 for (const GEPInfo &GEPInfo : AddrInfo) { 2325 if (!GEPInfo.VgprParts.empty()) 2326 return true; 2327 } 2328 return false; 2329 } 2330 2331 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const { 2332 const LLT PtrTy = MRI->getType(I.getOperand(1).getReg()); 2333 unsigned AS = PtrTy.getAddressSpace(); 2334 if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) && 2335 STI.ldsRequiresM0Init()) { 2336 MachineBasicBlock *BB = I.getParent(); 2337 2338 // If DS instructions require M0 initializtion, insert it before selecting. 2339 BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0) 2340 .addImm(-1); 2341 } 2342 } 2343 2344 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW( 2345 MachineInstr &I) const { 2346 initM0(I); 2347 return selectImpl(I, *CoverageInfo); 2348 } 2349 2350 // TODO: No rtn optimization. 2351 bool AMDGPUInstructionSelector::selectG_AMDGPU_ATOMIC_CMPXCHG( 2352 MachineInstr &MI) const { 2353 Register PtrReg = MI.getOperand(1).getReg(); 2354 const LLT PtrTy = MRI->getType(PtrReg); 2355 if (PtrTy.getAddressSpace() == AMDGPUAS::FLAT_ADDRESS || 2356 STI.useFlatForGlobal()) 2357 return selectImpl(MI, *CoverageInfo); 2358 2359 Register DstReg = MI.getOperand(0).getReg(); 2360 const LLT Ty = MRI->getType(DstReg); 2361 const bool Is64 = Ty.getSizeInBits() == 64; 2362 const unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0; 2363 Register TmpReg = MRI->createVirtualRegister( 2364 Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass); 2365 2366 const DebugLoc &DL = MI.getDebugLoc(); 2367 MachineBasicBlock *BB = MI.getParent(); 2368 2369 Register VAddr, RSrcReg, SOffset; 2370 int64_t Offset = 0; 2371 2372 unsigned Opcode; 2373 if (selectMUBUFOffsetImpl(MI.getOperand(1), RSrcReg, SOffset, Offset)) { 2374 Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN : 2375 AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN; 2376 } else if (selectMUBUFAddr64Impl(MI.getOperand(1), VAddr, 2377 RSrcReg, SOffset, Offset)) { 2378 Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN : 2379 AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN; 2380 } else 2381 return selectImpl(MI, *CoverageInfo); 2382 2383 auto MIB = BuildMI(*BB, &MI, DL, TII.get(Opcode), TmpReg) 2384 .addReg(MI.getOperand(2).getReg()); 2385 2386 if (VAddr) 2387 MIB.addReg(VAddr); 2388 2389 MIB.addReg(RSrcReg); 2390 if (SOffset) 2391 MIB.addReg(SOffset); 2392 else 2393 MIB.addImm(0); 2394 2395 MIB.addImm(Offset); 2396 MIB.addImm(1); // glc 2397 MIB.addImm(0); // slc 2398 MIB.cloneMemRefs(MI); 2399 2400 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), DstReg) 2401 .addReg(TmpReg, RegState::Kill, SubReg); 2402 2403 MI.eraseFromParent(); 2404 2405 MRI->setRegClass( 2406 DstReg, Is64 ? &AMDGPU::VReg_64RegClass : &AMDGPU::VGPR_32RegClass); 2407 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 2408 } 2409 2410 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const { 2411 MachineBasicBlock *BB = I.getParent(); 2412 MachineOperand &CondOp = I.getOperand(0); 2413 Register CondReg = CondOp.getReg(); 2414 const DebugLoc &DL = I.getDebugLoc(); 2415 2416 unsigned BrOpcode; 2417 Register CondPhysReg; 2418 const TargetRegisterClass *ConstrainRC; 2419 2420 // In SelectionDAG, we inspect the IR block for uniformity metadata to decide 2421 // whether the branch is uniform when selecting the instruction. In 2422 // GlobalISel, we should push that decision into RegBankSelect. Assume for now 2423 // RegBankSelect knows what it's doing if the branch condition is scc, even 2424 // though it currently does not. 2425 if (!isVCC(CondReg, *MRI)) { 2426 if (MRI->getType(CondReg) != LLT::scalar(32)) 2427 return false; 2428 2429 CondPhysReg = AMDGPU::SCC; 2430 BrOpcode = AMDGPU::S_CBRANCH_SCC1; 2431 ConstrainRC = &AMDGPU::SReg_32RegClass; 2432 } else { 2433 // FIXME: Do we have to insert an and with exec here, like in SelectionDAG? 2434 // We sort of know that a VCC producer based on the register bank, that ands 2435 // inactive lanes with 0. What if there was a logical operation with vcc 2436 // producers in different blocks/with different exec masks? 2437 // FIXME: Should scc->vcc copies and with exec? 2438 CondPhysReg = TRI.getVCC(); 2439 BrOpcode = AMDGPU::S_CBRANCH_VCCNZ; 2440 ConstrainRC = TRI.getBoolRC(); 2441 } 2442 2443 if (!MRI->getRegClassOrNull(CondReg)) 2444 MRI->setRegClass(CondReg, ConstrainRC); 2445 2446 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg) 2447 .addReg(CondReg); 2448 BuildMI(*BB, &I, DL, TII.get(BrOpcode)) 2449 .addMBB(I.getOperand(1).getMBB()); 2450 2451 I.eraseFromParent(); 2452 return true; 2453 } 2454 2455 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE( 2456 MachineInstr &I) const { 2457 Register DstReg = I.getOperand(0).getReg(); 2458 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 2459 const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID; 2460 I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32)); 2461 if (IsVGPR) 2462 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 2463 2464 return RBI.constrainGenericRegister( 2465 DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI); 2466 } 2467 2468 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const { 2469 Register DstReg = I.getOperand(0).getReg(); 2470 Register SrcReg = I.getOperand(1).getReg(); 2471 Register MaskReg = I.getOperand(2).getReg(); 2472 LLT Ty = MRI->getType(DstReg); 2473 LLT MaskTy = MRI->getType(MaskReg); 2474 2475 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 2476 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI); 2477 const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI); 2478 const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID; 2479 if (DstRB != SrcRB) // Should only happen for hand written MIR. 2480 return false; 2481 2482 unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32; 2483 const TargetRegisterClass &RegRC 2484 = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass; 2485 2486 const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB, 2487 *MRI); 2488 const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB, 2489 *MRI); 2490 const TargetRegisterClass *MaskRC = 2491 TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB, *MRI); 2492 2493 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) || 2494 !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) || 2495 !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI)) 2496 return false; 2497 2498 MachineBasicBlock *BB = I.getParent(); 2499 const DebugLoc &DL = I.getDebugLoc(); 2500 if (Ty.getSizeInBits() == 32) { 2501 assert(MaskTy.getSizeInBits() == 32 && 2502 "ptrmask should have been narrowed during legalize"); 2503 2504 BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg) 2505 .addReg(SrcReg) 2506 .addReg(MaskReg); 2507 I.eraseFromParent(); 2508 return true; 2509 } 2510 2511 Register HiReg = MRI->createVirtualRegister(&RegRC); 2512 Register LoReg = MRI->createVirtualRegister(&RegRC); 2513 2514 // Extract the subregisters from the source pointer. 2515 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg) 2516 .addReg(SrcReg, 0, AMDGPU::sub0); 2517 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg) 2518 .addReg(SrcReg, 0, AMDGPU::sub1); 2519 2520 Register MaskedLo, MaskedHi; 2521 2522 // Try to avoid emitting a bit operation when we only need to touch half of 2523 // the 64-bit pointer. 2524 APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zextOrSelf(64); 2525 2526 const APInt MaskHi32 = APInt::getHighBitsSet(64, 32); 2527 const APInt MaskLo32 = APInt::getLowBitsSet(64, 32); 2528 if ((MaskOnes & MaskLo32) == MaskLo32) { 2529 // If all the bits in the low half are 1, we only need a copy for it. 2530 MaskedLo = LoReg; 2531 } else { 2532 // Extract the mask subregister and apply the and. 2533 Register MaskLo = MRI->createVirtualRegister(&RegRC); 2534 MaskedLo = MRI->createVirtualRegister(&RegRC); 2535 2536 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo) 2537 .addReg(MaskReg, 0, AMDGPU::sub0); 2538 BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo) 2539 .addReg(LoReg) 2540 .addReg(MaskLo); 2541 } 2542 2543 if ((MaskOnes & MaskHi32) == MaskHi32) { 2544 // If all the bits in the high half are 1, we only need a copy for it. 2545 MaskedHi = HiReg; 2546 } else { 2547 Register MaskHi = MRI->createVirtualRegister(&RegRC); 2548 MaskedHi = MRI->createVirtualRegister(&RegRC); 2549 2550 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi) 2551 .addReg(MaskReg, 0, AMDGPU::sub1); 2552 BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi) 2553 .addReg(HiReg) 2554 .addReg(MaskHi); 2555 } 2556 2557 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) 2558 .addReg(MaskedLo) 2559 .addImm(AMDGPU::sub0) 2560 .addReg(MaskedHi) 2561 .addImm(AMDGPU::sub1); 2562 I.eraseFromParent(); 2563 return true; 2564 } 2565 2566 /// Return the register to use for the index value, and the subregister to use 2567 /// for the indirectly accessed register. 2568 static std::pair<Register, unsigned> 2569 computeIndirectRegIndex(MachineRegisterInfo &MRI, 2570 const SIRegisterInfo &TRI, 2571 const TargetRegisterClass *SuperRC, 2572 Register IdxReg, 2573 unsigned EltSize) { 2574 Register IdxBaseReg; 2575 int Offset; 2576 2577 std::tie(IdxBaseReg, Offset) = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg); 2578 if (IdxBaseReg == AMDGPU::NoRegister) { 2579 // This will happen if the index is a known constant. This should ordinarily 2580 // be legalized out, but handle it as a register just in case. 2581 assert(Offset == 0); 2582 IdxBaseReg = IdxReg; 2583 } 2584 2585 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize); 2586 2587 // Skip out of bounds offsets, or else we would end up using an undefined 2588 // register. 2589 if (static_cast<unsigned>(Offset) >= SubRegs.size()) 2590 return std::make_pair(IdxReg, SubRegs[0]); 2591 return std::make_pair(IdxBaseReg, SubRegs[Offset]); 2592 } 2593 2594 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT( 2595 MachineInstr &MI) const { 2596 Register DstReg = MI.getOperand(0).getReg(); 2597 Register SrcReg = MI.getOperand(1).getReg(); 2598 Register IdxReg = MI.getOperand(2).getReg(); 2599 2600 LLT DstTy = MRI->getType(DstReg); 2601 LLT SrcTy = MRI->getType(SrcReg); 2602 2603 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 2604 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI); 2605 const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI); 2606 2607 // The index must be scalar. If it wasn't RegBankSelect should have moved this 2608 // into a waterfall loop. 2609 if (IdxRB->getID() != AMDGPU::SGPRRegBankID) 2610 return false; 2611 2612 const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB, 2613 *MRI); 2614 const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(DstTy, *DstRB, 2615 *MRI); 2616 if (!SrcRC || !DstRC) 2617 return false; 2618 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) || 2619 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) || 2620 !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI)) 2621 return false; 2622 2623 MachineBasicBlock *BB = MI.getParent(); 2624 const DebugLoc &DL = MI.getDebugLoc(); 2625 const bool Is64 = DstTy.getSizeInBits() == 64; 2626 2627 unsigned SubReg; 2628 std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg, 2629 DstTy.getSizeInBits() / 8); 2630 2631 if (SrcRB->getID() == AMDGPU::SGPRRegBankID) { 2632 if (DstTy.getSizeInBits() != 32 && !Is64) 2633 return false; 2634 2635 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 2636 .addReg(IdxReg); 2637 2638 unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32; 2639 BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg) 2640 .addReg(SrcReg, 0, SubReg) 2641 .addReg(SrcReg, RegState::Implicit); 2642 MI.eraseFromParent(); 2643 return true; 2644 } 2645 2646 if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32) 2647 return false; 2648 2649 if (!STI.useVGPRIndexMode()) { 2650 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 2651 .addReg(IdxReg); 2652 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg) 2653 .addReg(SrcReg, 0, SubReg) 2654 .addReg(SrcReg, RegState::Implicit); 2655 MI.eraseFromParent(); 2656 return true; 2657 } 2658 2659 BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON)) 2660 .addReg(IdxReg) 2661 .addImm(AMDGPU::VGPRIndexMode::SRC0_ENABLE); 2662 BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), DstReg) 2663 .addReg(SrcReg, 0, SubReg) 2664 .addReg(SrcReg, RegState::Implicit) 2665 .addReg(AMDGPU::M0, RegState::Implicit); 2666 BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF)); 2667 2668 MI.eraseFromParent(); 2669 return true; 2670 } 2671 2672 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd 2673 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT( 2674 MachineInstr &MI) const { 2675 Register DstReg = MI.getOperand(0).getReg(); 2676 Register VecReg = MI.getOperand(1).getReg(); 2677 Register ValReg = MI.getOperand(2).getReg(); 2678 Register IdxReg = MI.getOperand(3).getReg(); 2679 2680 LLT VecTy = MRI->getType(DstReg); 2681 LLT ValTy = MRI->getType(ValReg); 2682 unsigned VecSize = VecTy.getSizeInBits(); 2683 unsigned ValSize = ValTy.getSizeInBits(); 2684 2685 const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI); 2686 const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI); 2687 const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI); 2688 2689 assert(VecTy.getElementType() == ValTy); 2690 2691 // The index must be scalar. If it wasn't RegBankSelect should have moved this 2692 // into a waterfall loop. 2693 if (IdxRB->getID() != AMDGPU::SGPRRegBankID) 2694 return false; 2695 2696 const TargetRegisterClass *VecRC = TRI.getRegClassForTypeOnBank(VecTy, *VecRB, 2697 *MRI); 2698 const TargetRegisterClass *ValRC = TRI.getRegClassForTypeOnBank(ValTy, *ValRB, 2699 *MRI); 2700 2701 if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) || 2702 !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) || 2703 !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) || 2704 !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI)) 2705 return false; 2706 2707 if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32) 2708 return false; 2709 2710 unsigned SubReg; 2711 std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg, 2712 ValSize / 8); 2713 2714 const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID && 2715 STI.useVGPRIndexMode(); 2716 2717 MachineBasicBlock *BB = MI.getParent(); 2718 const DebugLoc &DL = MI.getDebugLoc(); 2719 2720 if (IndexMode) { 2721 BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON)) 2722 .addReg(IdxReg) 2723 .addImm(AMDGPU::VGPRIndexMode::DST_ENABLE); 2724 } else { 2725 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 2726 .addReg(IdxReg); 2727 } 2728 2729 const MCInstrDesc &RegWriteOp 2730 = TII.getIndirectRegWritePseudo(VecSize, ValSize, 2731 VecRB->getID() == AMDGPU::SGPRRegBankID); 2732 BuildMI(*BB, MI, DL, RegWriteOp, DstReg) 2733 .addReg(VecReg) 2734 .addReg(ValReg) 2735 .addImm(SubReg); 2736 2737 if (IndexMode) 2738 BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF)); 2739 2740 MI.eraseFromParent(); 2741 return true; 2742 } 2743 2744 static bool isZeroOrUndef(int X) { 2745 return X == 0 || X == -1; 2746 } 2747 2748 static bool isOneOrUndef(int X) { 2749 return X == 1 || X == -1; 2750 } 2751 2752 static bool isZeroOrOneOrUndef(int X) { 2753 return X == 0 || X == 1 || X == -1; 2754 } 2755 2756 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single 2757 // 32-bit register. 2758 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1, 2759 ArrayRef<int> Mask) { 2760 NewMask[0] = Mask[0]; 2761 NewMask[1] = Mask[1]; 2762 if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1])) 2763 return Src0; 2764 2765 assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1); 2766 assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1); 2767 2768 // Shift the mask inputs to be 0/1; 2769 NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2; 2770 NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2; 2771 return Src1; 2772 } 2773 2774 // This is only legal with VOP3P instructions as an aid to op_sel matching. 2775 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR( 2776 MachineInstr &MI) const { 2777 Register DstReg = MI.getOperand(0).getReg(); 2778 Register Src0Reg = MI.getOperand(1).getReg(); 2779 Register Src1Reg = MI.getOperand(2).getReg(); 2780 ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask(); 2781 2782 const LLT V2S16 = LLT::vector(2, 16); 2783 if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16) 2784 return false; 2785 2786 if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask)) 2787 return false; 2788 2789 assert(ShufMask.size() == 2); 2790 assert(STI.hasSDWA() && "no target has VOP3P but not SDWA"); 2791 2792 MachineBasicBlock *MBB = MI.getParent(); 2793 const DebugLoc &DL = MI.getDebugLoc(); 2794 2795 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 2796 const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID; 2797 const TargetRegisterClass &RC = IsVALU ? 2798 AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass; 2799 2800 // Handle the degenerate case which should have folded out. 2801 if (ShufMask[0] == -1 && ShufMask[1] == -1) { 2802 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg); 2803 2804 MI.eraseFromParent(); 2805 return RBI.constrainGenericRegister(DstReg, RC, *MRI); 2806 } 2807 2808 // A legal VOP3P mask only reads one of the sources. 2809 int Mask[2]; 2810 Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask); 2811 2812 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) || 2813 !RBI.constrainGenericRegister(SrcVec, RC, *MRI)) 2814 return false; 2815 2816 // TODO: This also should have been folded out 2817 if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) { 2818 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg) 2819 .addReg(SrcVec); 2820 2821 MI.eraseFromParent(); 2822 return true; 2823 } 2824 2825 if (Mask[0] == 1 && Mask[1] == -1) { 2826 if (IsVALU) { 2827 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg) 2828 .addImm(16) 2829 .addReg(SrcVec); 2830 } else { 2831 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg) 2832 .addReg(SrcVec) 2833 .addImm(16); 2834 } 2835 } else if (Mask[0] == -1 && Mask[1] == 0) { 2836 if (IsVALU) { 2837 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg) 2838 .addImm(16) 2839 .addReg(SrcVec); 2840 } else { 2841 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg) 2842 .addReg(SrcVec) 2843 .addImm(16); 2844 } 2845 } else if (Mask[0] == 0 && Mask[1] == 0) { 2846 if (IsVALU) { 2847 // Write low half of the register into the high half. 2848 MachineInstr *MovSDWA = 2849 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg) 2850 .addImm(0) // $src0_modifiers 2851 .addReg(SrcVec) // $src0 2852 .addImm(0) // $clamp 2853 .addImm(AMDGPU::SDWA::WORD_1) // $dst_sel 2854 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused 2855 .addImm(AMDGPU::SDWA::WORD_0) // $src0_sel 2856 .addReg(SrcVec, RegState::Implicit); 2857 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1); 2858 } else { 2859 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg) 2860 .addReg(SrcVec) 2861 .addReg(SrcVec); 2862 } 2863 } else if (Mask[0] == 1 && Mask[1] == 1) { 2864 if (IsVALU) { 2865 // Write high half of the register into the low half. 2866 MachineInstr *MovSDWA = 2867 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg) 2868 .addImm(0) // $src0_modifiers 2869 .addReg(SrcVec) // $src0 2870 .addImm(0) // $clamp 2871 .addImm(AMDGPU::SDWA::WORD_0) // $dst_sel 2872 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused 2873 .addImm(AMDGPU::SDWA::WORD_1) // $src0_sel 2874 .addReg(SrcVec, RegState::Implicit); 2875 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1); 2876 } else { 2877 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg) 2878 .addReg(SrcVec) 2879 .addReg(SrcVec); 2880 } 2881 } else if (Mask[0] == 1 && Mask[1] == 0) { 2882 if (IsVALU) { 2883 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32), DstReg) 2884 .addReg(SrcVec) 2885 .addReg(SrcVec) 2886 .addImm(16); 2887 } else { 2888 Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2889 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg) 2890 .addReg(SrcVec) 2891 .addImm(16); 2892 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg) 2893 .addReg(TmpReg) 2894 .addReg(SrcVec); 2895 } 2896 } else 2897 llvm_unreachable("all shuffle masks should be handled"); 2898 2899 MI.eraseFromParent(); 2900 return true; 2901 } 2902 2903 bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD( 2904 MachineInstr &MI) const { 2905 2906 MachineBasicBlock *MBB = MI.getParent(); 2907 const DebugLoc &DL = MI.getDebugLoc(); 2908 2909 if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) { 2910 Function &F = MBB->getParent()->getFunction(); 2911 DiagnosticInfoUnsupported 2912 NoFpRet(F, "return versions of fp atomics not supported", 2913 MI.getDebugLoc(), DS_Error); 2914 F.getContext().diagnose(NoFpRet); 2915 return false; 2916 } 2917 2918 // FIXME: This is only needed because tablegen requires number of dst operands 2919 // in match and replace pattern to be the same. Otherwise patterns can be 2920 // exported from SDag path. 2921 MachineOperand &VDataIn = MI.getOperand(1); 2922 MachineOperand &VIndex = MI.getOperand(3); 2923 MachineOperand &VOffset = MI.getOperand(4); 2924 MachineOperand &SOffset = MI.getOperand(5); 2925 int16_t Offset = MI.getOperand(6).getImm(); 2926 2927 bool HasVOffset = !isOperandImmEqual(VOffset, 0, *MRI); 2928 bool HasVIndex = !isOperandImmEqual(VIndex, 0, *MRI); 2929 2930 unsigned Opcode; 2931 if (HasVOffset) { 2932 Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN 2933 : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN; 2934 } else { 2935 Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN 2936 : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET; 2937 } 2938 2939 if (MRI->getType(VDataIn.getReg()).isVector()) { 2940 switch (Opcode) { 2941 case AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN: 2942 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN; 2943 break; 2944 case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN: 2945 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFEN; 2946 break; 2947 case AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN: 2948 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_IDXEN; 2949 break; 2950 case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET: 2951 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFSET; 2952 break; 2953 } 2954 } 2955 2956 auto I = BuildMI(*MBB, MI, DL, TII.get(Opcode)); 2957 I.add(VDataIn); 2958 2959 if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN || 2960 Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) { 2961 Register IdxReg = MRI->createVirtualRegister(&AMDGPU::VReg_64RegClass); 2962 BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg) 2963 .addReg(VIndex.getReg()) 2964 .addImm(AMDGPU::sub0) 2965 .addReg(VOffset.getReg()) 2966 .addImm(AMDGPU::sub1); 2967 2968 I.addReg(IdxReg); 2969 } else if (HasVIndex) { 2970 I.add(VIndex); 2971 } else if (HasVOffset) { 2972 I.add(VOffset); 2973 } 2974 2975 I.add(MI.getOperand(2)); // rsrc 2976 I.add(SOffset); 2977 I.addImm(Offset); 2978 renderExtractSLC(I, MI, 7); 2979 I.cloneMemRefs(MI); 2980 2981 MI.eraseFromParent(); 2982 2983 return true; 2984 } 2985 2986 bool AMDGPUInstructionSelector::selectGlobalAtomicFaddIntrinsic( 2987 MachineInstr &MI) const{ 2988 2989 MachineBasicBlock *MBB = MI.getParent(); 2990 const DebugLoc &DL = MI.getDebugLoc(); 2991 2992 if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) { 2993 Function &F = MBB->getParent()->getFunction(); 2994 DiagnosticInfoUnsupported 2995 NoFpRet(F, "return versions of fp atomics not supported", 2996 MI.getDebugLoc(), DS_Error); 2997 F.getContext().diagnose(NoFpRet); 2998 return false; 2999 } 3000 3001 // FIXME: This is only needed because tablegen requires number of dst operands 3002 // in match and replace pattern to be the same. Otherwise patterns can be 3003 // exported from SDag path. 3004 auto Addr = selectFlatOffsetImpl<true>(MI.getOperand(2)); 3005 3006 Register Data = MI.getOperand(3).getReg(); 3007 const unsigned Opc = MRI->getType(Data).isVector() ? 3008 AMDGPU::GLOBAL_ATOMIC_PK_ADD_F16 : AMDGPU::GLOBAL_ATOMIC_ADD_F32; 3009 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc)) 3010 .addReg(Addr.first) 3011 .addReg(Data) 3012 .addImm(Addr.second) 3013 .addImm(0) // SLC 3014 .cloneMemRefs(MI); 3015 3016 MI.eraseFromParent(); 3017 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 3018 } 3019 3020 bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{ 3021 MI.setDesc(TII.get(MI.getOperand(1).getImm())); 3022 MI.RemoveOperand(1); 3023 MI.addImplicitDefUseOperands(*MI.getParent()->getParent()); 3024 return true; 3025 } 3026 3027 bool AMDGPUInstructionSelector::select(MachineInstr &I) { 3028 if (I.isPHI()) 3029 return selectPHI(I); 3030 3031 if (!I.isPreISelOpcode()) { 3032 if (I.isCopy()) 3033 return selectCOPY(I); 3034 return true; 3035 } 3036 3037 switch (I.getOpcode()) { 3038 case TargetOpcode::G_AND: 3039 case TargetOpcode::G_OR: 3040 case TargetOpcode::G_XOR: 3041 if (selectImpl(I, *CoverageInfo)) 3042 return true; 3043 return selectG_AND_OR_XOR(I); 3044 case TargetOpcode::G_ADD: 3045 case TargetOpcode::G_SUB: 3046 if (selectImpl(I, *CoverageInfo)) 3047 return true; 3048 return selectG_ADD_SUB(I); 3049 case TargetOpcode::G_UADDO: 3050 case TargetOpcode::G_USUBO: 3051 case TargetOpcode::G_UADDE: 3052 case TargetOpcode::G_USUBE: 3053 return selectG_UADDO_USUBO_UADDE_USUBE(I); 3054 case TargetOpcode::G_INTTOPTR: 3055 case TargetOpcode::G_BITCAST: 3056 case TargetOpcode::G_PTRTOINT: 3057 return selectCOPY(I); 3058 case TargetOpcode::G_CONSTANT: 3059 case TargetOpcode::G_FCONSTANT: 3060 return selectG_CONSTANT(I); 3061 case TargetOpcode::G_FNEG: 3062 if (selectImpl(I, *CoverageInfo)) 3063 return true; 3064 return selectG_FNEG(I); 3065 case TargetOpcode::G_FABS: 3066 if (selectImpl(I, *CoverageInfo)) 3067 return true; 3068 return selectG_FABS(I); 3069 case TargetOpcode::G_EXTRACT: 3070 return selectG_EXTRACT(I); 3071 case TargetOpcode::G_MERGE_VALUES: 3072 case TargetOpcode::G_BUILD_VECTOR: 3073 case TargetOpcode::G_CONCAT_VECTORS: 3074 return selectG_MERGE_VALUES(I); 3075 case TargetOpcode::G_UNMERGE_VALUES: 3076 return selectG_UNMERGE_VALUES(I); 3077 case TargetOpcode::G_BUILD_VECTOR_TRUNC: 3078 return selectG_BUILD_VECTOR_TRUNC(I); 3079 case TargetOpcode::G_PTR_ADD: 3080 return selectG_PTR_ADD(I); 3081 case TargetOpcode::G_IMPLICIT_DEF: 3082 return selectG_IMPLICIT_DEF(I); 3083 case TargetOpcode::G_FREEZE: 3084 return selectCOPY(I); 3085 case TargetOpcode::G_INSERT: 3086 return selectG_INSERT(I); 3087 case TargetOpcode::G_INTRINSIC: 3088 return selectG_INTRINSIC(I); 3089 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: 3090 return selectG_INTRINSIC_W_SIDE_EFFECTS(I); 3091 case TargetOpcode::G_ICMP: 3092 if (selectG_ICMP(I)) 3093 return true; 3094 return selectImpl(I, *CoverageInfo); 3095 case TargetOpcode::G_LOAD: 3096 case TargetOpcode::G_STORE: 3097 case TargetOpcode::G_ATOMIC_CMPXCHG: 3098 case TargetOpcode::G_ATOMICRMW_XCHG: 3099 case TargetOpcode::G_ATOMICRMW_ADD: 3100 case TargetOpcode::G_ATOMICRMW_SUB: 3101 case TargetOpcode::G_ATOMICRMW_AND: 3102 case TargetOpcode::G_ATOMICRMW_OR: 3103 case TargetOpcode::G_ATOMICRMW_XOR: 3104 case TargetOpcode::G_ATOMICRMW_MIN: 3105 case TargetOpcode::G_ATOMICRMW_MAX: 3106 case TargetOpcode::G_ATOMICRMW_UMIN: 3107 case TargetOpcode::G_ATOMICRMW_UMAX: 3108 case TargetOpcode::G_ATOMICRMW_FADD: 3109 case AMDGPU::G_AMDGPU_ATOMIC_INC: 3110 case AMDGPU::G_AMDGPU_ATOMIC_DEC: 3111 case AMDGPU::G_AMDGPU_ATOMIC_FMIN: 3112 case AMDGPU::G_AMDGPU_ATOMIC_FMAX: 3113 return selectG_LOAD_STORE_ATOMICRMW(I); 3114 case AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG: 3115 return selectG_AMDGPU_ATOMIC_CMPXCHG(I); 3116 case TargetOpcode::G_SELECT: 3117 return selectG_SELECT(I); 3118 case TargetOpcode::G_TRUNC: 3119 return selectG_TRUNC(I); 3120 case TargetOpcode::G_SEXT: 3121 case TargetOpcode::G_ZEXT: 3122 case TargetOpcode::G_ANYEXT: 3123 case TargetOpcode::G_SEXT_INREG: 3124 if (selectImpl(I, *CoverageInfo)) 3125 return true; 3126 return selectG_SZA_EXT(I); 3127 case TargetOpcode::G_BRCOND: 3128 return selectG_BRCOND(I); 3129 case TargetOpcode::G_GLOBAL_VALUE: 3130 return selectG_GLOBAL_VALUE(I); 3131 case TargetOpcode::G_PTRMASK: 3132 return selectG_PTRMASK(I); 3133 case TargetOpcode::G_EXTRACT_VECTOR_ELT: 3134 return selectG_EXTRACT_VECTOR_ELT(I); 3135 case TargetOpcode::G_INSERT_VECTOR_ELT: 3136 return selectG_INSERT_VECTOR_ELT(I); 3137 case TargetOpcode::G_SHUFFLE_VECTOR: 3138 return selectG_SHUFFLE_VECTOR(I); 3139 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD: 3140 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: { 3141 const AMDGPU::ImageDimIntrinsicInfo *Intr 3142 = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID()); 3143 assert(Intr && "not an image intrinsic with image pseudo"); 3144 return selectImageIntrinsic(I, Intr); 3145 } 3146 case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY: 3147 return selectBVHIntrinsic(I); 3148 case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD: 3149 return selectAMDGPU_BUFFER_ATOMIC_FADD(I); 3150 default: 3151 return selectImpl(I, *CoverageInfo); 3152 } 3153 return false; 3154 } 3155 3156 InstructionSelector::ComplexRendererFns 3157 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const { 3158 return {{ 3159 [=](MachineInstrBuilder &MIB) { MIB.add(Root); } 3160 }}; 3161 3162 } 3163 3164 std::pair<Register, unsigned> 3165 AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root, 3166 bool AllowAbs) const { 3167 Register Src = Root.getReg(); 3168 Register OrigSrc = Src; 3169 unsigned Mods = 0; 3170 MachineInstr *MI = getDefIgnoringCopies(Src, *MRI); 3171 3172 if (MI && MI->getOpcode() == AMDGPU::G_FNEG) { 3173 Src = MI->getOperand(1).getReg(); 3174 Mods |= SISrcMods::NEG; 3175 MI = getDefIgnoringCopies(Src, *MRI); 3176 } 3177 3178 if (AllowAbs && MI && MI->getOpcode() == AMDGPU::G_FABS) { 3179 Src = MI->getOperand(1).getReg(); 3180 Mods |= SISrcMods::ABS; 3181 } 3182 3183 if (Mods != 0 && 3184 RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) { 3185 MachineInstr *UseMI = Root.getParent(); 3186 3187 // If we looked through copies to find source modifiers on an SGPR operand, 3188 // we now have an SGPR register source. To avoid potentially violating the 3189 // constant bus restriction, we need to insert a copy to a VGPR. 3190 Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc); 3191 BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(), 3192 TII.get(AMDGPU::COPY), VGPRSrc) 3193 .addReg(Src); 3194 Src = VGPRSrc; 3195 } 3196 3197 return std::make_pair(Src, Mods); 3198 } 3199 3200 /// 3201 /// This will select either an SGPR or VGPR operand and will save us from 3202 /// having to write an extra tablegen pattern. 3203 InstructionSelector::ComplexRendererFns 3204 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const { 3205 return {{ 3206 [=](MachineInstrBuilder &MIB) { MIB.add(Root); } 3207 }}; 3208 } 3209 3210 InstructionSelector::ComplexRendererFns 3211 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const { 3212 Register Src; 3213 unsigned Mods; 3214 std::tie(Src, Mods) = selectVOP3ModsImpl(Root); 3215 3216 return {{ 3217 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3218 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods 3219 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp 3220 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod 3221 }}; 3222 } 3223 3224 InstructionSelector::ComplexRendererFns 3225 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const { 3226 Register Src; 3227 unsigned Mods; 3228 std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false); 3229 3230 return {{ 3231 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3232 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods 3233 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp 3234 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod 3235 }}; 3236 } 3237 3238 InstructionSelector::ComplexRendererFns 3239 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const { 3240 return {{ 3241 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }, 3242 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp 3243 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod 3244 }}; 3245 } 3246 3247 InstructionSelector::ComplexRendererFns 3248 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const { 3249 Register Src; 3250 unsigned Mods; 3251 std::tie(Src, Mods) = selectVOP3ModsImpl(Root); 3252 3253 return {{ 3254 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3255 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods 3256 }}; 3257 } 3258 3259 InstructionSelector::ComplexRendererFns 3260 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const { 3261 Register Src; 3262 unsigned Mods; 3263 std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false); 3264 3265 return {{ 3266 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3267 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods 3268 }}; 3269 } 3270 3271 InstructionSelector::ComplexRendererFns 3272 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const { 3273 Register Reg = Root.getReg(); 3274 const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI); 3275 if (Def && (Def->getOpcode() == AMDGPU::G_FNEG || 3276 Def->getOpcode() == AMDGPU::G_FABS)) 3277 return {}; 3278 return {{ 3279 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); }, 3280 }}; 3281 } 3282 3283 std::pair<Register, unsigned> 3284 AMDGPUInstructionSelector::selectVOP3PModsImpl( 3285 Register Src, const MachineRegisterInfo &MRI) const { 3286 unsigned Mods = 0; 3287 MachineInstr *MI = MRI.getVRegDef(Src); 3288 3289 if (MI && MI->getOpcode() == AMDGPU::G_FNEG && 3290 // It's possible to see an f32 fneg here, but unlikely. 3291 // TODO: Treat f32 fneg as only high bit. 3292 MRI.getType(Src) == LLT::vector(2, 16)) { 3293 Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI); 3294 Src = MI->getOperand(1).getReg(); 3295 MI = MRI.getVRegDef(Src); 3296 } 3297 3298 // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector. 3299 3300 // Packed instructions do not have abs modifiers. 3301 Mods |= SISrcMods::OP_SEL_1; 3302 3303 return std::make_pair(Src, Mods); 3304 } 3305 3306 InstructionSelector::ComplexRendererFns 3307 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const { 3308 MachineRegisterInfo &MRI 3309 = Root.getParent()->getParent()->getParent()->getRegInfo(); 3310 3311 Register Src; 3312 unsigned Mods; 3313 std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI); 3314 3315 return {{ 3316 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3317 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods 3318 }}; 3319 } 3320 3321 InstructionSelector::ComplexRendererFns 3322 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const { 3323 Register Src; 3324 unsigned Mods; 3325 std::tie(Src, Mods) = selectVOP3ModsImpl(Root); 3326 if (!isKnownNeverNaN(Src, *MRI)) 3327 return None; 3328 3329 return {{ 3330 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3331 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods 3332 }}; 3333 } 3334 3335 InstructionSelector::ComplexRendererFns 3336 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const { 3337 // FIXME: Handle op_sel 3338 return {{ 3339 [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); }, 3340 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods 3341 }}; 3342 } 3343 3344 InstructionSelector::ComplexRendererFns 3345 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const { 3346 SmallVector<GEPInfo, 4> AddrInfo; 3347 getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo); 3348 3349 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1) 3350 return None; 3351 3352 const GEPInfo &GEPInfo = AddrInfo[0]; 3353 Optional<int64_t> EncodedImm = 3354 AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false); 3355 if (!EncodedImm) 3356 return None; 3357 3358 unsigned PtrReg = GEPInfo.SgprParts[0]; 3359 return {{ 3360 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); }, 3361 [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } 3362 }}; 3363 } 3364 3365 InstructionSelector::ComplexRendererFns 3366 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const { 3367 SmallVector<GEPInfo, 4> AddrInfo; 3368 getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo); 3369 3370 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1) 3371 return None; 3372 3373 const GEPInfo &GEPInfo = AddrInfo[0]; 3374 Register PtrReg = GEPInfo.SgprParts[0]; 3375 Optional<int64_t> EncodedImm = 3376 AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm); 3377 if (!EncodedImm) 3378 return None; 3379 3380 return {{ 3381 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); }, 3382 [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } 3383 }}; 3384 } 3385 3386 InstructionSelector::ComplexRendererFns 3387 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const { 3388 MachineInstr *MI = Root.getParent(); 3389 MachineBasicBlock *MBB = MI->getParent(); 3390 3391 SmallVector<GEPInfo, 4> AddrInfo; 3392 getAddrModeInfo(*MI, *MRI, AddrInfo); 3393 3394 // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits, 3395 // then we can select all ptr + 32-bit offsets not just immediate offsets. 3396 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1) 3397 return None; 3398 3399 const GEPInfo &GEPInfo = AddrInfo[0]; 3400 // SGPR offset is unsigned. 3401 if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm)) 3402 return None; 3403 3404 // If we make it this far we have a load with an 32-bit immediate offset. 3405 // It is OK to select this using a sgpr offset, because we have already 3406 // failed trying to select this load into one of the _IMM variants since 3407 // the _IMM Patterns are considered before the _SGPR patterns. 3408 Register PtrReg = GEPInfo.SgprParts[0]; 3409 Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 3410 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg) 3411 .addImm(GEPInfo.Imm); 3412 return {{ 3413 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); }, 3414 [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); } 3415 }}; 3416 } 3417 3418 template <bool Signed> 3419 std::pair<Register, int> 3420 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root) const { 3421 MachineInstr *MI = Root.getParent(); 3422 3423 auto Default = std::make_pair(Root.getReg(), 0); 3424 3425 if (!STI.hasFlatInstOffsets()) 3426 return Default; 3427 3428 const MachineInstr *OpDef = MRI->getVRegDef(Root.getReg()); 3429 if (!OpDef || OpDef->getOpcode() != AMDGPU::G_PTR_ADD) 3430 return Default; 3431 3432 Optional<int64_t> Offset = 3433 getConstantVRegVal(OpDef->getOperand(2).getReg(), *MRI); 3434 if (!Offset.hasValue()) 3435 return Default; 3436 3437 unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace(); 3438 if (!TII.isLegalFLATOffset(Offset.getValue(), AddrSpace, Signed)) 3439 return Default; 3440 3441 Register BasePtr = OpDef->getOperand(1).getReg(); 3442 3443 return std::make_pair(BasePtr, Offset.getValue()); 3444 } 3445 3446 InstructionSelector::ComplexRendererFns 3447 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const { 3448 auto PtrWithOffset = selectFlatOffsetImpl<false>(Root); 3449 3450 return {{ 3451 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); }, 3452 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); }, 3453 }}; 3454 } 3455 3456 InstructionSelector::ComplexRendererFns 3457 AMDGPUInstructionSelector::selectFlatOffsetSigned(MachineOperand &Root) const { 3458 auto PtrWithOffset = selectFlatOffsetImpl<true>(Root); 3459 3460 return {{ 3461 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); }, 3462 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); }, 3463 }}; 3464 } 3465 3466 /// Match a zero extend from a 32-bit value to 64-bits. 3467 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) { 3468 Register ZExtSrc; 3469 if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc)))) 3470 return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register(); 3471 3472 // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0) 3473 const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI); 3474 if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES) 3475 return false; 3476 3477 int64_t MergeRHS; 3478 if (mi_match(Def->getOperand(2).getReg(), MRI, m_ICst(MergeRHS)) && 3479 MergeRHS == 0) { 3480 return Def->getOperand(1).getReg(); 3481 } 3482 3483 return Register(); 3484 } 3485 3486 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset) 3487 InstructionSelector::ComplexRendererFns 3488 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const { 3489 Register PtrBase; 3490 int64_t ImmOffset; 3491 3492 // Match the immediate offset first, which canonically is moved as low as 3493 // possible. 3494 std::tie(PtrBase, ImmOffset) = getPtrBaseWithConstantOffset(Root.getReg(), 3495 *MRI); 3496 3497 // TODO: Could split larger constant into VGPR offset. 3498 if (ImmOffset != 0 && 3499 !TII.isLegalFLATOffset(ImmOffset, AMDGPUAS::GLOBAL_ADDRESS, true)) { 3500 PtrBase = Root.getReg(); 3501 ImmOffset = 0; 3502 } 3503 3504 // Match the variable offset. 3505 const MachineInstr *PtrBaseDef = getDefIgnoringCopies(PtrBase, *MRI); 3506 if (PtrBaseDef->getOpcode() != AMDGPU::G_PTR_ADD) 3507 return None; 3508 3509 // Look through the SGPR->VGPR copy. 3510 Register PtrBaseSrc = 3511 getSrcRegIgnoringCopies(PtrBaseDef->getOperand(1).getReg(), *MRI); 3512 if (!PtrBaseSrc) 3513 return None; 3514 3515 const RegisterBank *BaseRB = RBI.getRegBank(PtrBaseSrc, *MRI, TRI); 3516 if (BaseRB->getID() != AMDGPU::SGPRRegBankID) 3517 return None; 3518 3519 Register SAddr = PtrBaseSrc; 3520 Register PtrBaseOffset = PtrBaseDef->getOperand(2).getReg(); 3521 3522 // It's possible voffset is an SGPR here, but the copy to VGPR will be 3523 // inserted later. 3524 Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset); 3525 if (!VOffset) 3526 return None; 3527 3528 return {{[=](MachineInstrBuilder &MIB) { // saddr 3529 MIB.addReg(SAddr); 3530 }, 3531 [=](MachineInstrBuilder &MIB) { // voffset 3532 MIB.addReg(VOffset); 3533 }, 3534 [=](MachineInstrBuilder &MIB) { // offset 3535 MIB.addImm(ImmOffset); 3536 }}}; 3537 } 3538 3539 static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) { 3540 auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>(); 3541 return PSV && PSV->isStack(); 3542 } 3543 3544 InstructionSelector::ComplexRendererFns 3545 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const { 3546 MachineInstr *MI = Root.getParent(); 3547 MachineBasicBlock *MBB = MI->getParent(); 3548 MachineFunction *MF = MBB->getParent(); 3549 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 3550 3551 int64_t Offset = 0; 3552 if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) && 3553 Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) { 3554 Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3555 3556 // TODO: Should this be inside the render function? The iterator seems to 3557 // move. 3558 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), 3559 HighBits) 3560 .addImm(Offset & ~4095); 3561 3562 return {{[=](MachineInstrBuilder &MIB) { // rsrc 3563 MIB.addReg(Info->getScratchRSrcReg()); 3564 }, 3565 [=](MachineInstrBuilder &MIB) { // vaddr 3566 MIB.addReg(HighBits); 3567 }, 3568 [=](MachineInstrBuilder &MIB) { // soffset 3569 const MachineMemOperand *MMO = *MI->memoperands_begin(); 3570 const MachinePointerInfo &PtrInfo = MMO->getPointerInfo(); 3571 3572 if (isStackPtrRelative(PtrInfo)) 3573 MIB.addReg(Info->getStackPtrOffsetReg()); 3574 else 3575 MIB.addImm(0); 3576 }, 3577 [=](MachineInstrBuilder &MIB) { // offset 3578 MIB.addImm(Offset & 4095); 3579 }}}; 3580 } 3581 3582 assert(Offset == 0 || Offset == -1); 3583 3584 // Try to fold a frame index directly into the MUBUF vaddr field, and any 3585 // offsets. 3586 Optional<int> FI; 3587 Register VAddr = Root.getReg(); 3588 if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) { 3589 if (isBaseWithConstantOffset(Root, *MRI)) { 3590 const MachineOperand &LHS = RootDef->getOperand(1); 3591 const MachineOperand &RHS = RootDef->getOperand(2); 3592 const MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg()); 3593 const MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg()); 3594 if (LHSDef && RHSDef) { 3595 int64_t PossibleOffset = 3596 RHSDef->getOperand(1).getCImm()->getSExtValue(); 3597 if (SIInstrInfo::isLegalMUBUFImmOffset(PossibleOffset) && 3598 (!STI.privateMemoryResourceIsRangeChecked() || 3599 KnownBits->signBitIsZero(LHS.getReg()))) { 3600 if (LHSDef->getOpcode() == AMDGPU::G_FRAME_INDEX) 3601 FI = LHSDef->getOperand(1).getIndex(); 3602 else 3603 VAddr = LHS.getReg(); 3604 Offset = PossibleOffset; 3605 } 3606 } 3607 } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) { 3608 FI = RootDef->getOperand(1).getIndex(); 3609 } 3610 } 3611 3612 return {{[=](MachineInstrBuilder &MIB) { // rsrc 3613 MIB.addReg(Info->getScratchRSrcReg()); 3614 }, 3615 [=](MachineInstrBuilder &MIB) { // vaddr 3616 if (FI.hasValue()) 3617 MIB.addFrameIndex(FI.getValue()); 3618 else 3619 MIB.addReg(VAddr); 3620 }, 3621 [=](MachineInstrBuilder &MIB) { // soffset 3622 // If we don't know this private access is a local stack object, it 3623 // needs to be relative to the entry point's scratch wave offset. 3624 // TODO: Should split large offsets that don't fit like above. 3625 // TODO: Don't use scratch wave offset just because the offset 3626 // didn't fit. 3627 if (!Info->isEntryFunction() && FI.hasValue()) 3628 MIB.addReg(Info->getStackPtrOffsetReg()); 3629 else 3630 MIB.addImm(0); 3631 }, 3632 [=](MachineInstrBuilder &MIB) { // offset 3633 MIB.addImm(Offset); 3634 }}}; 3635 } 3636 3637 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base, 3638 int64_t Offset) const { 3639 if (!isUInt<16>(Offset)) 3640 return false; 3641 3642 if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled()) 3643 return true; 3644 3645 // On Southern Islands instruction with a negative base value and an offset 3646 // don't seem to work. 3647 return KnownBits->signBitIsZero(Base); 3648 } 3649 3650 bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0, 3651 int64_t Offset1, 3652 unsigned Size) const { 3653 if (Offset0 % Size != 0 || Offset1 % Size != 0) 3654 return false; 3655 if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size)) 3656 return false; 3657 3658 if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled()) 3659 return true; 3660 3661 // On Southern Islands instruction with a negative base value and an offset 3662 // don't seem to work. 3663 return KnownBits->signBitIsZero(Base); 3664 } 3665 3666 InstructionSelector::ComplexRendererFns 3667 AMDGPUInstructionSelector::selectMUBUFScratchOffset( 3668 MachineOperand &Root) const { 3669 MachineInstr *MI = Root.getParent(); 3670 MachineBasicBlock *MBB = MI->getParent(); 3671 3672 int64_t Offset = 0; 3673 if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) || 3674 !SIInstrInfo::isLegalMUBUFImmOffset(Offset)) 3675 return {}; 3676 3677 const MachineFunction *MF = MBB->getParent(); 3678 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 3679 const MachineMemOperand *MMO = *MI->memoperands_begin(); 3680 const MachinePointerInfo &PtrInfo = MMO->getPointerInfo(); 3681 3682 return {{ 3683 [=](MachineInstrBuilder &MIB) { // rsrc 3684 MIB.addReg(Info->getScratchRSrcReg()); 3685 }, 3686 [=](MachineInstrBuilder &MIB) { // soffset 3687 if (isStackPtrRelative(PtrInfo)) 3688 MIB.addReg(Info->getStackPtrOffsetReg()); 3689 else 3690 MIB.addImm(0); 3691 }, 3692 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset 3693 }}; 3694 } 3695 3696 std::pair<Register, unsigned> 3697 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const { 3698 const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg()); 3699 if (!RootDef) 3700 return std::make_pair(Root.getReg(), 0); 3701 3702 int64_t ConstAddr = 0; 3703 3704 Register PtrBase; 3705 int64_t Offset; 3706 std::tie(PtrBase, Offset) = 3707 getPtrBaseWithConstantOffset(Root.getReg(), *MRI); 3708 3709 if (Offset) { 3710 if (isDSOffsetLegal(PtrBase, Offset)) { 3711 // (add n0, c0) 3712 return std::make_pair(PtrBase, Offset); 3713 } 3714 } else if (RootDef->getOpcode() == AMDGPU::G_SUB) { 3715 // TODO 3716 3717 3718 } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) { 3719 // TODO 3720 3721 } 3722 3723 return std::make_pair(Root.getReg(), 0); 3724 } 3725 3726 InstructionSelector::ComplexRendererFns 3727 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const { 3728 Register Reg; 3729 unsigned Offset; 3730 std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root); 3731 return {{ 3732 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); }, 3733 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } 3734 }}; 3735 } 3736 3737 InstructionSelector::ComplexRendererFns 3738 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const { 3739 return selectDSReadWrite2(Root, 4); 3740 } 3741 3742 InstructionSelector::ComplexRendererFns 3743 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const { 3744 return selectDSReadWrite2(Root, 8); 3745 } 3746 3747 InstructionSelector::ComplexRendererFns 3748 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root, 3749 unsigned Size) const { 3750 Register Reg; 3751 unsigned Offset; 3752 std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size); 3753 return {{ 3754 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); }, 3755 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, 3756 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); } 3757 }}; 3758 } 3759 3760 std::pair<Register, unsigned> 3761 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root, 3762 unsigned Size) const { 3763 const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg()); 3764 if (!RootDef) 3765 return std::make_pair(Root.getReg(), 0); 3766 3767 int64_t ConstAddr = 0; 3768 3769 Register PtrBase; 3770 int64_t Offset; 3771 std::tie(PtrBase, Offset) = 3772 getPtrBaseWithConstantOffset(Root.getReg(), *MRI); 3773 3774 if (Offset) { 3775 int64_t OffsetValue0 = Offset; 3776 int64_t OffsetValue1 = Offset + Size; 3777 if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) { 3778 // (add n0, c0) 3779 return std::make_pair(PtrBase, OffsetValue0 / Size); 3780 } 3781 } else if (RootDef->getOpcode() == AMDGPU::G_SUB) { 3782 // TODO 3783 3784 } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) { 3785 // TODO 3786 3787 } 3788 3789 return std::make_pair(Root.getReg(), 0); 3790 } 3791 3792 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return 3793 /// the base value with the constant offset. There may be intervening copies 3794 /// between \p Root and the identified constant. Returns \p Root, 0 if this does 3795 /// not match the pattern. 3796 std::pair<Register, int64_t> 3797 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset( 3798 Register Root, const MachineRegisterInfo &MRI) const { 3799 MachineInstr *RootI = getDefIgnoringCopies(Root, MRI); 3800 if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD) 3801 return {Root, 0}; 3802 3803 MachineOperand &RHS = RootI->getOperand(2); 3804 Optional<ValueAndVReg> MaybeOffset 3805 = getConstantVRegValWithLookThrough(RHS.getReg(), MRI, true); 3806 if (!MaybeOffset) 3807 return {Root, 0}; 3808 return {RootI->getOperand(1).getReg(), MaybeOffset->Value}; 3809 } 3810 3811 static void addZeroImm(MachineInstrBuilder &MIB) { 3812 MIB.addImm(0); 3813 } 3814 3815 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p 3816 /// BasePtr is not valid, a null base pointer will be used. 3817 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI, 3818 uint32_t FormatLo, uint32_t FormatHi, 3819 Register BasePtr) { 3820 Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 3821 Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 3822 Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 3823 Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass); 3824 3825 B.buildInstr(AMDGPU::S_MOV_B32) 3826 .addDef(RSrc2) 3827 .addImm(FormatLo); 3828 B.buildInstr(AMDGPU::S_MOV_B32) 3829 .addDef(RSrc3) 3830 .addImm(FormatHi); 3831 3832 // Build the half of the subregister with the constants before building the 3833 // full 128-bit register. If we are building multiple resource descriptors, 3834 // this will allow CSEing of the 2-component register. 3835 B.buildInstr(AMDGPU::REG_SEQUENCE) 3836 .addDef(RSrcHi) 3837 .addReg(RSrc2) 3838 .addImm(AMDGPU::sub0) 3839 .addReg(RSrc3) 3840 .addImm(AMDGPU::sub1); 3841 3842 Register RSrcLo = BasePtr; 3843 if (!BasePtr) { 3844 RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 3845 B.buildInstr(AMDGPU::S_MOV_B64) 3846 .addDef(RSrcLo) 3847 .addImm(0); 3848 } 3849 3850 B.buildInstr(AMDGPU::REG_SEQUENCE) 3851 .addDef(RSrc) 3852 .addReg(RSrcLo) 3853 .addImm(AMDGPU::sub0_sub1) 3854 .addReg(RSrcHi) 3855 .addImm(AMDGPU::sub2_sub3); 3856 3857 return RSrc; 3858 } 3859 3860 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI, 3861 const SIInstrInfo &TII, Register BasePtr) { 3862 uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat(); 3863 3864 // FIXME: Why are half the "default" bits ignored based on the addressing 3865 // mode? 3866 return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr); 3867 } 3868 3869 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI, 3870 const SIInstrInfo &TII, Register BasePtr) { 3871 uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat(); 3872 3873 // FIXME: Why are half the "default" bits ignored based on the addressing 3874 // mode? 3875 return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr); 3876 } 3877 3878 AMDGPUInstructionSelector::MUBUFAddressData 3879 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const { 3880 MUBUFAddressData Data; 3881 Data.N0 = Src; 3882 3883 Register PtrBase; 3884 int64_t Offset; 3885 3886 std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI); 3887 if (isUInt<32>(Offset)) { 3888 Data.N0 = PtrBase; 3889 Data.Offset = Offset; 3890 } 3891 3892 if (MachineInstr *InputAdd 3893 = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) { 3894 Data.N2 = InputAdd->getOperand(1).getReg(); 3895 Data.N3 = InputAdd->getOperand(2).getReg(); 3896 3897 // FIXME: Need to fix extra SGPR->VGPRcopies inserted 3898 // FIXME: Don't know this was defined by operand 0 3899 // 3900 // TODO: Remove this when we have copy folding optimizations after 3901 // RegBankSelect. 3902 Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg(); 3903 Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg(); 3904 } 3905 3906 return Data; 3907 } 3908 3909 /// Return if the addr64 mubuf mode should be used for the given address. 3910 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const { 3911 // (ptr_add N2, N3) -> addr64, or 3912 // (ptr_add (ptr_add N2, N3), C1) -> addr64 3913 if (Addr.N2) 3914 return true; 3915 3916 const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI); 3917 return N0Bank->getID() == AMDGPU::VGPRRegBankID; 3918 } 3919 3920 /// Split an immediate offset \p ImmOffset depending on whether it fits in the 3921 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable 3922 /// component. 3923 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset( 3924 MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const { 3925 if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset)) 3926 return; 3927 3928 // Illegal offset, store it in soffset. 3929 SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 3930 B.buildInstr(AMDGPU::S_MOV_B32) 3931 .addDef(SOffset) 3932 .addImm(ImmOffset); 3933 ImmOffset = 0; 3934 } 3935 3936 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl( 3937 MachineOperand &Root, Register &VAddr, Register &RSrcReg, 3938 Register &SOffset, int64_t &Offset) const { 3939 // FIXME: Predicates should stop this from reaching here. 3940 // addr64 bit was removed for volcanic islands. 3941 if (!STI.hasAddr64() || STI.useFlatForGlobal()) 3942 return false; 3943 3944 MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg()); 3945 if (!shouldUseAddr64(AddrData)) 3946 return false; 3947 3948 Register N0 = AddrData.N0; 3949 Register N2 = AddrData.N2; 3950 Register N3 = AddrData.N3; 3951 Offset = AddrData.Offset; 3952 3953 // Base pointer for the SRD. 3954 Register SRDPtr; 3955 3956 if (N2) { 3957 if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) { 3958 assert(N3); 3959 if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) { 3960 // Both N2 and N3 are divergent. Use N0 (the result of the add) as the 3961 // addr64, and construct the default resource from a 0 address. 3962 VAddr = N0; 3963 } else { 3964 SRDPtr = N3; 3965 VAddr = N2; 3966 } 3967 } else { 3968 // N2 is not divergent. 3969 SRDPtr = N2; 3970 VAddr = N3; 3971 } 3972 } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) { 3973 // Use the default null pointer in the resource 3974 VAddr = N0; 3975 } else { 3976 // N0 -> offset, or 3977 // (N0 + C1) -> offset 3978 SRDPtr = N0; 3979 } 3980 3981 MachineIRBuilder B(*Root.getParent()); 3982 RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr); 3983 splitIllegalMUBUFOffset(B, SOffset, Offset); 3984 return true; 3985 } 3986 3987 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl( 3988 MachineOperand &Root, Register &RSrcReg, Register &SOffset, 3989 int64_t &Offset) const { 3990 3991 // FIXME: Pattern should not reach here. 3992 if (STI.useFlatForGlobal()) 3993 return false; 3994 3995 MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg()); 3996 if (shouldUseAddr64(AddrData)) 3997 return false; 3998 3999 // N0 -> offset, or 4000 // (N0 + C1) -> offset 4001 Register SRDPtr = AddrData.N0; 4002 Offset = AddrData.Offset; 4003 4004 // TODO: Look through extensions for 32-bit soffset. 4005 MachineIRBuilder B(*Root.getParent()); 4006 4007 RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr); 4008 splitIllegalMUBUFOffset(B, SOffset, Offset); 4009 return true; 4010 } 4011 4012 InstructionSelector::ComplexRendererFns 4013 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const { 4014 Register VAddr; 4015 Register RSrcReg; 4016 Register SOffset; 4017 int64_t Offset = 0; 4018 4019 if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset)) 4020 return {}; 4021 4022 // FIXME: Use defaulted operands for trailing 0s and remove from the complex 4023 // pattern. 4024 return {{ 4025 [=](MachineInstrBuilder &MIB) { // rsrc 4026 MIB.addReg(RSrcReg); 4027 }, 4028 [=](MachineInstrBuilder &MIB) { // vaddr 4029 MIB.addReg(VAddr); 4030 }, 4031 [=](MachineInstrBuilder &MIB) { // soffset 4032 if (SOffset) 4033 MIB.addReg(SOffset); 4034 else 4035 MIB.addImm(0); 4036 }, 4037 [=](MachineInstrBuilder &MIB) { // offset 4038 MIB.addImm(Offset); 4039 }, 4040 addZeroImm, // glc 4041 addZeroImm, // slc 4042 addZeroImm, // tfe 4043 addZeroImm, // dlc 4044 addZeroImm // swz 4045 }}; 4046 } 4047 4048 InstructionSelector::ComplexRendererFns 4049 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const { 4050 Register RSrcReg; 4051 Register SOffset; 4052 int64_t Offset = 0; 4053 4054 if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset)) 4055 return {}; 4056 4057 return {{ 4058 [=](MachineInstrBuilder &MIB) { // rsrc 4059 MIB.addReg(RSrcReg); 4060 }, 4061 [=](MachineInstrBuilder &MIB) { // soffset 4062 if (SOffset) 4063 MIB.addReg(SOffset); 4064 else 4065 MIB.addImm(0); 4066 }, 4067 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset 4068 addZeroImm, // glc 4069 addZeroImm, // slc 4070 addZeroImm, // tfe 4071 addZeroImm, // dlc 4072 addZeroImm // swz 4073 }}; 4074 } 4075 4076 InstructionSelector::ComplexRendererFns 4077 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const { 4078 Register VAddr; 4079 Register RSrcReg; 4080 Register SOffset; 4081 int64_t Offset = 0; 4082 4083 if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset)) 4084 return {}; 4085 4086 // FIXME: Use defaulted operands for trailing 0s and remove from the complex 4087 // pattern. 4088 return {{ 4089 [=](MachineInstrBuilder &MIB) { // rsrc 4090 MIB.addReg(RSrcReg); 4091 }, 4092 [=](MachineInstrBuilder &MIB) { // vaddr 4093 MIB.addReg(VAddr); 4094 }, 4095 [=](MachineInstrBuilder &MIB) { // soffset 4096 if (SOffset) 4097 MIB.addReg(SOffset); 4098 else 4099 MIB.addImm(0); 4100 }, 4101 [=](MachineInstrBuilder &MIB) { // offset 4102 MIB.addImm(Offset); 4103 }, 4104 addZeroImm // slc 4105 }}; 4106 } 4107 4108 InstructionSelector::ComplexRendererFns 4109 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const { 4110 Register RSrcReg; 4111 Register SOffset; 4112 int64_t Offset = 0; 4113 4114 if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset)) 4115 return {}; 4116 4117 return {{ 4118 [=](MachineInstrBuilder &MIB) { // rsrc 4119 MIB.addReg(RSrcReg); 4120 }, 4121 [=](MachineInstrBuilder &MIB) { // soffset 4122 if (SOffset) 4123 MIB.addReg(SOffset); 4124 else 4125 MIB.addImm(0); 4126 }, 4127 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset 4128 addZeroImm // slc 4129 }}; 4130 } 4131 4132 /// Get an immediate that must be 32-bits, and treated as zero extended. 4133 static Optional<uint64_t> getConstantZext32Val(Register Reg, 4134 const MachineRegisterInfo &MRI) { 4135 // getConstantVRegVal sexts any values, so see if that matters. 4136 Optional<int64_t> OffsetVal = getConstantVRegVal(Reg, MRI); 4137 if (!OffsetVal || !isInt<32>(*OffsetVal)) 4138 return None; 4139 return Lo_32(*OffsetVal); 4140 } 4141 4142 InstructionSelector::ComplexRendererFns 4143 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const { 4144 Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI); 4145 if (!OffsetVal) 4146 return {}; 4147 4148 Optional<int64_t> EncodedImm = 4149 AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true); 4150 if (!EncodedImm) 4151 return {}; 4152 4153 return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } }}; 4154 } 4155 4156 InstructionSelector::ComplexRendererFns 4157 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const { 4158 assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS); 4159 4160 Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI); 4161 if (!OffsetVal) 4162 return {}; 4163 4164 Optional<int64_t> EncodedImm 4165 = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal); 4166 if (!EncodedImm) 4167 return {}; 4168 4169 return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } }}; 4170 } 4171 4172 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB, 4173 const MachineInstr &MI, 4174 int OpIdx) const { 4175 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 4176 "Expected G_CONSTANT"); 4177 MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue()); 4178 } 4179 4180 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB, 4181 const MachineInstr &MI, 4182 int OpIdx) const { 4183 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 4184 "Expected G_CONSTANT"); 4185 MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue()); 4186 } 4187 4188 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB, 4189 const MachineInstr &MI, 4190 int OpIdx) const { 4191 assert(OpIdx == -1); 4192 4193 const MachineOperand &Op = MI.getOperand(1); 4194 if (MI.getOpcode() == TargetOpcode::G_FCONSTANT) 4195 MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue()); 4196 else { 4197 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT"); 4198 MIB.addImm(Op.getCImm()->getSExtValue()); 4199 } 4200 } 4201 4202 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB, 4203 const MachineInstr &MI, 4204 int OpIdx) const { 4205 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 4206 "Expected G_CONSTANT"); 4207 MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation()); 4208 } 4209 4210 /// This only really exists to satisfy DAG type checking machinery, so is a 4211 /// no-op here. 4212 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB, 4213 const MachineInstr &MI, 4214 int OpIdx) const { 4215 MIB.addImm(MI.getOperand(OpIdx).getImm()); 4216 } 4217 4218 void AMDGPUInstructionSelector::renderExtractGLC(MachineInstrBuilder &MIB, 4219 const MachineInstr &MI, 4220 int OpIdx) const { 4221 assert(OpIdx >= 0 && "expected to match an immediate operand"); 4222 MIB.addImm(MI.getOperand(OpIdx).getImm() & 1); 4223 } 4224 4225 void AMDGPUInstructionSelector::renderExtractSLC(MachineInstrBuilder &MIB, 4226 const MachineInstr &MI, 4227 int OpIdx) const { 4228 assert(OpIdx >= 0 && "expected to match an immediate operand"); 4229 MIB.addImm((MI.getOperand(OpIdx).getImm() >> 1) & 1); 4230 } 4231 4232 void AMDGPUInstructionSelector::renderExtractDLC(MachineInstrBuilder &MIB, 4233 const MachineInstr &MI, 4234 int OpIdx) const { 4235 assert(OpIdx >= 0 && "expected to match an immediate operand"); 4236 MIB.addImm((MI.getOperand(OpIdx).getImm() >> 2) & 1); 4237 } 4238 4239 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB, 4240 const MachineInstr &MI, 4241 int OpIdx) const { 4242 assert(OpIdx >= 0 && "expected to match an immediate operand"); 4243 MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1); 4244 } 4245 4246 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB, 4247 const MachineInstr &MI, 4248 int OpIdx) const { 4249 MIB.addFrameIndex((MI.getOperand(1).getIndex())); 4250 } 4251 4252 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const { 4253 return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm()); 4254 } 4255 4256 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const { 4257 return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm()); 4258 } 4259 4260 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const { 4261 return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm()); 4262 } 4263 4264 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const { 4265 return TII.isInlineConstant(Imm); 4266 } 4267