1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the targeting of the InstructionSelector class for 10 /// AMDGPU. 11 /// \todo This should be generated by TableGen. 12 //===----------------------------------------------------------------------===// 13 14 #include "AMDGPUInstructionSelector.h" 15 #include "AMDGPU.h" 16 #include "AMDGPUGlobalISelUtils.h" 17 #include "AMDGPUInstrInfo.h" 18 #include "AMDGPURegisterBankInfo.h" 19 #include "AMDGPUTargetMachine.h" 20 #include "SIMachineFunctionInfo.h" 21 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" 22 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h" 23 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" 24 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 25 #include "llvm/IR/DiagnosticInfo.h" 26 27 #define DEBUG_TYPE "amdgpu-isel" 28 29 using namespace llvm; 30 using namespace MIPatternMatch; 31 32 static cl::opt<bool> AllowRiskySelect( 33 "amdgpu-global-isel-risky-select", 34 cl::desc("Allow GlobalISel to select cases that are likely to not work yet"), 35 cl::init(false), 36 cl::ReallyHidden); 37 38 #define GET_GLOBALISEL_IMPL 39 #define AMDGPUSubtarget GCNSubtarget 40 #include "AMDGPUGenGlobalISel.inc" 41 #undef GET_GLOBALISEL_IMPL 42 #undef AMDGPUSubtarget 43 44 AMDGPUInstructionSelector::AMDGPUInstructionSelector( 45 const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI, 46 const AMDGPUTargetMachine &TM) 47 : InstructionSelector(), TII(*STI.getInstrInfo()), 48 TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM), 49 STI(STI), 50 EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG), 51 #define GET_GLOBALISEL_PREDICATES_INIT 52 #include "AMDGPUGenGlobalISel.inc" 53 #undef GET_GLOBALISEL_PREDICATES_INIT 54 #define GET_GLOBALISEL_TEMPORARIES_INIT 55 #include "AMDGPUGenGlobalISel.inc" 56 #undef GET_GLOBALISEL_TEMPORARIES_INIT 57 { 58 } 59 60 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; } 61 62 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB, 63 CodeGenCoverage &CoverageInfo, 64 ProfileSummaryInfo *PSI, 65 BlockFrequencyInfo *BFI) { 66 MRI = &MF.getRegInfo(); 67 Subtarget = &MF.getSubtarget<GCNSubtarget>(); 68 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI); 69 } 70 71 bool AMDGPUInstructionSelector::isVCC(Register Reg, 72 const MachineRegisterInfo &MRI) const { 73 // The verifier is oblivious to s1 being a valid value for wavesize registers. 74 if (Reg.isPhysical()) 75 return false; 76 77 auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg); 78 const TargetRegisterClass *RC = 79 RegClassOrBank.dyn_cast<const TargetRegisterClass*>(); 80 if (RC) { 81 const LLT Ty = MRI.getType(Reg); 82 return RC->hasSuperClassEq(TRI.getBoolRC()) && 83 Ty.isValid() && Ty.getSizeInBits() == 1; 84 } 85 86 const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>(); 87 return RB->getID() == AMDGPU::VCCRegBankID; 88 } 89 90 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI, 91 unsigned NewOpc) const { 92 MI.setDesc(TII.get(NewOpc)); 93 MI.RemoveOperand(1); // Remove intrinsic ID. 94 MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 95 96 MachineOperand &Dst = MI.getOperand(0); 97 MachineOperand &Src = MI.getOperand(1); 98 99 // TODO: This should be legalized to s32 if needed 100 if (MRI->getType(Dst.getReg()) == LLT::scalar(1)) 101 return false; 102 103 const TargetRegisterClass *DstRC 104 = TRI.getConstrainedRegClassForOperand(Dst, *MRI); 105 const TargetRegisterClass *SrcRC 106 = TRI.getConstrainedRegClassForOperand(Src, *MRI); 107 if (!DstRC || DstRC != SrcRC) 108 return false; 109 110 return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) && 111 RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI); 112 } 113 114 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const { 115 const DebugLoc &DL = I.getDebugLoc(); 116 MachineBasicBlock *BB = I.getParent(); 117 I.setDesc(TII.get(TargetOpcode::COPY)); 118 119 const MachineOperand &Src = I.getOperand(1); 120 MachineOperand &Dst = I.getOperand(0); 121 Register DstReg = Dst.getReg(); 122 Register SrcReg = Src.getReg(); 123 124 if (isVCC(DstReg, *MRI)) { 125 if (SrcReg == AMDGPU::SCC) { 126 const TargetRegisterClass *RC 127 = TRI.getConstrainedRegClassForOperand(Dst, *MRI); 128 if (!RC) 129 return true; 130 return RBI.constrainGenericRegister(DstReg, *RC, *MRI); 131 } 132 133 if (!isVCC(SrcReg, *MRI)) { 134 // TODO: Should probably leave the copy and let copyPhysReg expand it. 135 if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI)) 136 return false; 137 138 const TargetRegisterClass *SrcRC 139 = TRI.getConstrainedRegClassForOperand(Src, *MRI); 140 141 Optional<ValueAndVReg> ConstVal = 142 getConstantVRegValWithLookThrough(SrcReg, *MRI, true, true); 143 if (ConstVal) { 144 unsigned MovOpc = 145 STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; 146 BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg) 147 .addImm(ConstVal->Value.getBoolValue() ? -1 : 0); 148 } else { 149 Register MaskedReg = MRI->createVirtualRegister(SrcRC); 150 151 // We can't trust the high bits at this point, so clear them. 152 153 // TODO: Skip masking high bits if def is known boolean. 154 155 unsigned AndOpc = 156 TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32; 157 BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg) 158 .addImm(1) 159 .addReg(SrcReg); 160 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg) 161 .addImm(0) 162 .addReg(MaskedReg); 163 } 164 165 if (!MRI->getRegClassOrNull(SrcReg)) 166 MRI->setRegClass(SrcReg, SrcRC); 167 I.eraseFromParent(); 168 return true; 169 } 170 171 const TargetRegisterClass *RC = 172 TRI.getConstrainedRegClassForOperand(Dst, *MRI); 173 if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI)) 174 return false; 175 176 return true; 177 } 178 179 for (const MachineOperand &MO : I.operands()) { 180 if (MO.getReg().isPhysical()) 181 continue; 182 183 const TargetRegisterClass *RC = 184 TRI.getConstrainedRegClassForOperand(MO, *MRI); 185 if (!RC) 186 continue; 187 RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI); 188 } 189 return true; 190 } 191 192 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const { 193 const Register DefReg = I.getOperand(0).getReg(); 194 const LLT DefTy = MRI->getType(DefReg); 195 if (DefTy == LLT::scalar(1)) { 196 if (!AllowRiskySelect) { 197 LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n"); 198 return false; 199 } 200 201 LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n"); 202 } 203 204 // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy) 205 206 const RegClassOrRegBank &RegClassOrBank = 207 MRI->getRegClassOrRegBank(DefReg); 208 209 const TargetRegisterClass *DefRC 210 = RegClassOrBank.dyn_cast<const TargetRegisterClass *>(); 211 if (!DefRC) { 212 if (!DefTy.isValid()) { 213 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n"); 214 return false; 215 } 216 217 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>(); 218 DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI); 219 if (!DefRC) { 220 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n"); 221 return false; 222 } 223 } 224 225 // TODO: Verify that all registers have the same bank 226 I.setDesc(TII.get(TargetOpcode::PHI)); 227 return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI); 228 } 229 230 MachineOperand 231 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO, 232 const TargetRegisterClass &SubRC, 233 unsigned SubIdx) const { 234 235 MachineInstr *MI = MO.getParent(); 236 MachineBasicBlock *BB = MO.getParent()->getParent(); 237 Register DstReg = MRI->createVirtualRegister(&SubRC); 238 239 if (MO.isReg()) { 240 unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx); 241 Register Reg = MO.getReg(); 242 BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg) 243 .addReg(Reg, 0, ComposedSubIdx); 244 245 return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(), 246 MO.isKill(), MO.isDead(), MO.isUndef(), 247 MO.isEarlyClobber(), 0, MO.isDebug(), 248 MO.isInternalRead()); 249 } 250 251 assert(MO.isImm()); 252 253 APInt Imm(64, MO.getImm()); 254 255 switch (SubIdx) { 256 default: 257 llvm_unreachable("do not know to split immediate with this sub index."); 258 case AMDGPU::sub0: 259 return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue()); 260 case AMDGPU::sub1: 261 return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue()); 262 } 263 } 264 265 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) { 266 switch (Opc) { 267 case AMDGPU::G_AND: 268 return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32; 269 case AMDGPU::G_OR: 270 return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32; 271 case AMDGPU::G_XOR: 272 return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32; 273 default: 274 llvm_unreachable("not a bit op"); 275 } 276 } 277 278 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const { 279 Register DstReg = I.getOperand(0).getReg(); 280 unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI); 281 282 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 283 if (DstRB->getID() != AMDGPU::SGPRRegBankID && 284 DstRB->getID() != AMDGPU::VCCRegBankID) 285 return false; 286 287 bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID && 288 STI.isWave64()); 289 I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64))); 290 291 // Dead implicit-def of scc 292 I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef 293 true, // isImp 294 false, // isKill 295 true)); // isDead 296 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 297 } 298 299 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const { 300 MachineBasicBlock *BB = I.getParent(); 301 MachineFunction *MF = BB->getParent(); 302 Register DstReg = I.getOperand(0).getReg(); 303 const DebugLoc &DL = I.getDebugLoc(); 304 LLT Ty = MRI->getType(DstReg); 305 if (Ty.isVector()) 306 return false; 307 308 unsigned Size = Ty.getSizeInBits(); 309 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 310 const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID; 311 const bool Sub = I.getOpcode() == TargetOpcode::G_SUB; 312 313 if (Size == 32) { 314 if (IsSALU) { 315 const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32; 316 MachineInstr *Add = 317 BuildMI(*BB, &I, DL, TII.get(Opc), DstReg) 318 .add(I.getOperand(1)) 319 .add(I.getOperand(2)); 320 I.eraseFromParent(); 321 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI); 322 } 323 324 if (STI.hasAddNoCarry()) { 325 const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64; 326 I.setDesc(TII.get(Opc)); 327 I.addOperand(*MF, MachineOperand::CreateImm(0)); 328 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 329 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 330 } 331 332 const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64; 333 334 Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass()); 335 MachineInstr *Add 336 = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg) 337 .addDef(UnusedCarry, RegState::Dead) 338 .add(I.getOperand(1)) 339 .add(I.getOperand(2)) 340 .addImm(0); 341 I.eraseFromParent(); 342 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI); 343 } 344 345 assert(!Sub && "illegal sub should not reach here"); 346 347 const TargetRegisterClass &RC 348 = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass; 349 const TargetRegisterClass &HalfRC 350 = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass; 351 352 MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0)); 353 MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0)); 354 MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1)); 355 MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1)); 356 357 Register DstLo = MRI->createVirtualRegister(&HalfRC); 358 Register DstHi = MRI->createVirtualRegister(&HalfRC); 359 360 if (IsSALU) { 361 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo) 362 .add(Lo1) 363 .add(Lo2); 364 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi) 365 .add(Hi1) 366 .add(Hi2); 367 } else { 368 const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass(); 369 Register CarryReg = MRI->createVirtualRegister(CarryRC); 370 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo) 371 .addDef(CarryReg) 372 .add(Lo1) 373 .add(Lo2) 374 .addImm(0); 375 MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi) 376 .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead) 377 .add(Hi1) 378 .add(Hi2) 379 .addReg(CarryReg, RegState::Kill) 380 .addImm(0); 381 382 if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI)) 383 return false; 384 } 385 386 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) 387 .addReg(DstLo) 388 .addImm(AMDGPU::sub0) 389 .addReg(DstHi) 390 .addImm(AMDGPU::sub1); 391 392 393 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI)) 394 return false; 395 396 I.eraseFromParent(); 397 return true; 398 } 399 400 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE( 401 MachineInstr &I) const { 402 MachineBasicBlock *BB = I.getParent(); 403 MachineFunction *MF = BB->getParent(); 404 const DebugLoc &DL = I.getDebugLoc(); 405 Register Dst0Reg = I.getOperand(0).getReg(); 406 Register Dst1Reg = I.getOperand(1).getReg(); 407 const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO || 408 I.getOpcode() == AMDGPU::G_UADDE; 409 const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE || 410 I.getOpcode() == AMDGPU::G_USUBE; 411 412 if (isVCC(Dst1Reg, *MRI)) { 413 unsigned NoCarryOpc = 414 IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64; 415 unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; 416 I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc)); 417 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 418 I.addOperand(*MF, MachineOperand::CreateImm(0)); 419 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 420 } 421 422 Register Src0Reg = I.getOperand(2).getReg(); 423 Register Src1Reg = I.getOperand(3).getReg(); 424 425 if (HasCarryIn) { 426 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC) 427 .addReg(I.getOperand(4).getReg()); 428 } 429 430 unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32; 431 unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32; 432 433 BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg) 434 .add(I.getOperand(2)) 435 .add(I.getOperand(3)); 436 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg) 437 .addReg(AMDGPU::SCC); 438 439 if (!MRI->getRegClassOrNull(Dst1Reg)) 440 MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass); 441 442 if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) || 443 !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) || 444 !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI)) 445 return false; 446 447 if (HasCarryIn && 448 !RBI.constrainGenericRegister(I.getOperand(4).getReg(), 449 AMDGPU::SReg_32RegClass, *MRI)) 450 return false; 451 452 I.eraseFromParent(); 453 return true; 454 } 455 456 // TODO: We should probably legalize these to only using 32-bit results. 457 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const { 458 MachineBasicBlock *BB = I.getParent(); 459 Register DstReg = I.getOperand(0).getReg(); 460 Register SrcReg = I.getOperand(1).getReg(); 461 LLT DstTy = MRI->getType(DstReg); 462 LLT SrcTy = MRI->getType(SrcReg); 463 const unsigned SrcSize = SrcTy.getSizeInBits(); 464 unsigned DstSize = DstTy.getSizeInBits(); 465 466 // TODO: Should handle any multiple of 32 offset. 467 unsigned Offset = I.getOperand(2).getImm(); 468 if (Offset % 32 != 0 || DstSize > 128) 469 return false; 470 471 // 16-bit operations really use 32-bit registers. 472 // FIXME: Probably should not allow 16-bit G_EXTRACT results. 473 if (DstSize == 16) 474 DstSize = 32; 475 476 const TargetRegisterClass *DstRC = 477 TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI); 478 if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) 479 return false; 480 481 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI); 482 const TargetRegisterClass *SrcRC = 483 TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI); 484 if (!SrcRC) 485 return false; 486 unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32, 487 DstSize / 32); 488 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg); 489 if (!SrcRC) 490 return false; 491 492 SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I, 493 *SrcRC, I.getOperand(1)); 494 const DebugLoc &DL = I.getDebugLoc(); 495 BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg) 496 .addReg(SrcReg, 0, SubReg); 497 498 I.eraseFromParent(); 499 return true; 500 } 501 502 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const { 503 MachineBasicBlock *BB = MI.getParent(); 504 Register DstReg = MI.getOperand(0).getReg(); 505 LLT DstTy = MRI->getType(DstReg); 506 LLT SrcTy = MRI->getType(MI.getOperand(1).getReg()); 507 508 const unsigned SrcSize = SrcTy.getSizeInBits(); 509 if (SrcSize < 32) 510 return selectImpl(MI, *CoverageInfo); 511 512 const DebugLoc &DL = MI.getDebugLoc(); 513 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); 514 const unsigned DstSize = DstTy.getSizeInBits(); 515 const TargetRegisterClass *DstRC = 516 TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI); 517 if (!DstRC) 518 return false; 519 520 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8); 521 MachineInstrBuilder MIB = 522 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg); 523 for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) { 524 MachineOperand &Src = MI.getOperand(I + 1); 525 MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef())); 526 MIB.addImm(SubRegs[I]); 527 528 const TargetRegisterClass *SrcRC 529 = TRI.getConstrainedRegClassForOperand(Src, *MRI); 530 if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI)) 531 return false; 532 } 533 534 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) 535 return false; 536 537 MI.eraseFromParent(); 538 return true; 539 } 540 541 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const { 542 MachineBasicBlock *BB = MI.getParent(); 543 const int NumDst = MI.getNumOperands() - 1; 544 545 MachineOperand &Src = MI.getOperand(NumDst); 546 547 Register SrcReg = Src.getReg(); 548 Register DstReg0 = MI.getOperand(0).getReg(); 549 LLT DstTy = MRI->getType(DstReg0); 550 LLT SrcTy = MRI->getType(SrcReg); 551 552 const unsigned DstSize = DstTy.getSizeInBits(); 553 const unsigned SrcSize = SrcTy.getSizeInBits(); 554 const DebugLoc &DL = MI.getDebugLoc(); 555 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI); 556 557 const TargetRegisterClass *SrcRC = 558 TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI); 559 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI)) 560 return false; 561 562 // Note we could have mixed SGPR and VGPR destination banks for an SGPR 563 // source, and this relies on the fact that the same subregister indices are 564 // used for both. 565 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8); 566 for (int I = 0, E = NumDst; I != E; ++I) { 567 MachineOperand &Dst = MI.getOperand(I); 568 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg()) 569 .addReg(SrcReg, 0, SubRegs[I]); 570 571 // Make sure the subregister index is valid for the source register. 572 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]); 573 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI)) 574 return false; 575 576 const TargetRegisterClass *DstRC = 577 TRI.getConstrainedRegClassForOperand(Dst, *MRI); 578 if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI)) 579 return false; 580 } 581 582 MI.eraseFromParent(); 583 return true; 584 } 585 586 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC( 587 MachineInstr &MI) const { 588 if (selectImpl(MI, *CoverageInfo)) 589 return true; 590 591 const LLT S32 = LLT::scalar(32); 592 const LLT V2S16 = LLT::vector(2, 16); 593 594 Register Dst = MI.getOperand(0).getReg(); 595 if (MRI->getType(Dst) != V2S16) 596 return false; 597 598 const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI); 599 if (DstBank->getID() != AMDGPU::SGPRRegBankID) 600 return false; 601 602 Register Src0 = MI.getOperand(1).getReg(); 603 Register Src1 = MI.getOperand(2).getReg(); 604 if (MRI->getType(Src0) != S32) 605 return false; 606 607 const DebugLoc &DL = MI.getDebugLoc(); 608 MachineBasicBlock *BB = MI.getParent(); 609 610 auto ConstSrc1 = 611 getConstantVRegValWithLookThrough(Src1, *MRI, true, true, true); 612 if (ConstSrc1) { 613 auto ConstSrc0 = 614 getConstantVRegValWithLookThrough(Src0, *MRI, true, true, true); 615 if (ConstSrc0) { 616 const int64_t K0 = ConstSrc0->Value.getSExtValue(); 617 const int64_t K1 = ConstSrc1->Value.getSExtValue(); 618 uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff; 619 uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff; 620 621 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst) 622 .addImm(Lo16 | (Hi16 << 16)); 623 MI.eraseFromParent(); 624 return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI); 625 } 626 } 627 628 // TODO: This should probably be a combine somewhere 629 // (build_vector_trunc $src0, undef -> copy $src0 630 MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI); 631 if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) { 632 MI.setDesc(TII.get(AMDGPU::COPY)); 633 MI.RemoveOperand(2); 634 return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) && 635 RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI); 636 } 637 638 Register ShiftSrc0; 639 Register ShiftSrc1; 640 641 // With multiple uses of the shift, this will duplicate the shift and 642 // increase register pressure. 643 // 644 // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16) 645 // => (S_PACK_HH_B32_B16 $src0, $src1) 646 // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16)) 647 // => (S_PACK_LH_B32_B16 $src0, $src1) 648 // (build_vector_trunc $src0, $src1) 649 // => (S_PACK_LL_B32_B16 $src0, $src1) 650 651 bool Shift0 = mi_match( 652 Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16)))); 653 654 bool Shift1 = mi_match( 655 Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16)))); 656 657 unsigned Opc = AMDGPU::S_PACK_LL_B32_B16; 658 if (Shift0 && Shift1) { 659 Opc = AMDGPU::S_PACK_HH_B32_B16; 660 MI.getOperand(1).setReg(ShiftSrc0); 661 MI.getOperand(2).setReg(ShiftSrc1); 662 } else if (Shift1) { 663 Opc = AMDGPU::S_PACK_LH_B32_B16; 664 MI.getOperand(2).setReg(ShiftSrc1); 665 } else if (Shift0 && ConstSrc1 && ConstSrc1->Value == 0) { 666 // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16 667 auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst) 668 .addReg(ShiftSrc0) 669 .addImm(16); 670 671 MI.eraseFromParent(); 672 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 673 } 674 675 MI.setDesc(TII.get(Opc)); 676 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI); 677 } 678 679 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const { 680 return selectG_ADD_SUB(I); 681 } 682 683 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const { 684 const MachineOperand &MO = I.getOperand(0); 685 686 // FIXME: Interface for getConstrainedRegClassForOperand needs work. The 687 // regbank check here is to know why getConstrainedRegClassForOperand failed. 688 const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI); 689 if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) || 690 (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) { 691 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF)); 692 return true; 693 } 694 695 return false; 696 } 697 698 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const { 699 MachineBasicBlock *BB = I.getParent(); 700 701 Register DstReg = I.getOperand(0).getReg(); 702 Register Src0Reg = I.getOperand(1).getReg(); 703 Register Src1Reg = I.getOperand(2).getReg(); 704 LLT Src1Ty = MRI->getType(Src1Reg); 705 706 unsigned DstSize = MRI->getType(DstReg).getSizeInBits(); 707 unsigned InsSize = Src1Ty.getSizeInBits(); 708 709 int64_t Offset = I.getOperand(3).getImm(); 710 711 // FIXME: These cases should have been illegal and unnecessary to check here. 712 if (Offset % 32 != 0 || InsSize % 32 != 0) 713 return false; 714 715 // Currently not handled by getSubRegFromChannel. 716 if (InsSize > 128) 717 return false; 718 719 unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32); 720 if (SubReg == AMDGPU::NoSubRegister) 721 return false; 722 723 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); 724 const TargetRegisterClass *DstRC = 725 TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI); 726 if (!DstRC) 727 return false; 728 729 const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI); 730 const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI); 731 const TargetRegisterClass *Src0RC = 732 TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI); 733 const TargetRegisterClass *Src1RC = 734 TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI); 735 736 // Deal with weird cases where the class only partially supports the subreg 737 // index. 738 Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg); 739 if (!Src0RC || !Src1RC) 740 return false; 741 742 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) || 743 !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) || 744 !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI)) 745 return false; 746 747 const DebugLoc &DL = I.getDebugLoc(); 748 BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg) 749 .addReg(Src0Reg) 750 .addReg(Src1Reg) 751 .addImm(SubReg); 752 753 I.eraseFromParent(); 754 return true; 755 } 756 757 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const { 758 if (STI.getLDSBankCount() != 16) 759 return selectImpl(MI, *CoverageInfo); 760 761 Register Dst = MI.getOperand(0).getReg(); 762 Register Src0 = MI.getOperand(2).getReg(); 763 Register M0Val = MI.getOperand(6).getReg(); 764 if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) || 765 !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) || 766 !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI)) 767 return false; 768 769 // This requires 2 instructions. It is possible to write a pattern to support 770 // this, but the generated isel emitter doesn't correctly deal with multiple 771 // output instructions using the same physical register input. The copy to m0 772 // is incorrectly placed before the second instruction. 773 // 774 // TODO: Match source modifiers. 775 776 Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 777 const DebugLoc &DL = MI.getDebugLoc(); 778 MachineBasicBlock *MBB = MI.getParent(); 779 780 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 781 .addReg(M0Val); 782 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov) 783 .addImm(2) 784 .addImm(MI.getOperand(4).getImm()) // $attr 785 .addImm(MI.getOperand(3).getImm()); // $attrchan 786 787 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst) 788 .addImm(0) // $src0_modifiers 789 .addReg(Src0) // $src0 790 .addImm(MI.getOperand(4).getImm()) // $attr 791 .addImm(MI.getOperand(3).getImm()) // $attrchan 792 .addImm(0) // $src2_modifiers 793 .addReg(InterpMov) // $src2 - 2 f16 values selected by high 794 .addImm(MI.getOperand(5).getImm()) // $high 795 .addImm(0) // $clamp 796 .addImm(0); // $omod 797 798 MI.eraseFromParent(); 799 return true; 800 } 801 802 // Writelane is special in that it can use SGPR and M0 (which would normally 803 // count as using the constant bus twice - but in this case it is allowed since 804 // the lane selector doesn't count as a use of the constant bus). However, it is 805 // still required to abide by the 1 SGPR rule. Fix this up if we might have 806 // multiple SGPRs. 807 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const { 808 // With a constant bus limit of at least 2, there's no issue. 809 if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1) 810 return selectImpl(MI, *CoverageInfo); 811 812 MachineBasicBlock *MBB = MI.getParent(); 813 const DebugLoc &DL = MI.getDebugLoc(); 814 Register VDst = MI.getOperand(0).getReg(); 815 Register Val = MI.getOperand(2).getReg(); 816 Register LaneSelect = MI.getOperand(3).getReg(); 817 Register VDstIn = MI.getOperand(4).getReg(); 818 819 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst); 820 821 Optional<ValueAndVReg> ConstSelect = 822 getConstantVRegValWithLookThrough(LaneSelect, *MRI, true, true); 823 if (ConstSelect) { 824 // The selector has to be an inline immediate, so we can use whatever for 825 // the other operands. 826 MIB.addReg(Val); 827 MIB.addImm(ConstSelect->Value.getSExtValue() & 828 maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2())); 829 } else { 830 Optional<ValueAndVReg> ConstVal = 831 getConstantVRegValWithLookThrough(Val, *MRI, true, true); 832 833 // If the value written is an inline immediate, we can get away without a 834 // copy to m0. 835 if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(), 836 STI.hasInv2PiInlineImm())) { 837 MIB.addImm(ConstVal->Value.getSExtValue()); 838 MIB.addReg(LaneSelect); 839 } else { 840 MIB.addReg(Val); 841 842 // If the lane selector was originally in a VGPR and copied with 843 // readfirstlane, there's a hazard to read the same SGPR from the 844 // VALU. Constrain to a different SGPR to help avoid needing a nop later. 845 RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI); 846 847 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 848 .addReg(LaneSelect); 849 MIB.addReg(AMDGPU::M0); 850 } 851 } 852 853 MIB.addReg(VDstIn); 854 855 MI.eraseFromParent(); 856 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 857 } 858 859 // We need to handle this here because tablegen doesn't support matching 860 // instructions with multiple outputs. 861 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const { 862 Register Dst0 = MI.getOperand(0).getReg(); 863 Register Dst1 = MI.getOperand(1).getReg(); 864 865 LLT Ty = MRI->getType(Dst0); 866 unsigned Opc; 867 if (Ty == LLT::scalar(32)) 868 Opc = AMDGPU::V_DIV_SCALE_F32_e64; 869 else if (Ty == LLT::scalar(64)) 870 Opc = AMDGPU::V_DIV_SCALE_F64_e64; 871 else 872 return false; 873 874 // TODO: Match source modifiers. 875 876 const DebugLoc &DL = MI.getDebugLoc(); 877 MachineBasicBlock *MBB = MI.getParent(); 878 879 Register Numer = MI.getOperand(3).getReg(); 880 Register Denom = MI.getOperand(4).getReg(); 881 unsigned ChooseDenom = MI.getOperand(5).getImm(); 882 883 Register Src0 = ChooseDenom != 0 ? Numer : Denom; 884 885 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0) 886 .addDef(Dst1) 887 .addImm(0) // $src0_modifiers 888 .addUse(Src0) // $src0 889 .addImm(0) // $src1_modifiers 890 .addUse(Denom) // $src1 891 .addImm(0) // $src2_modifiers 892 .addUse(Numer) // $src2 893 .addImm(0) // $clamp 894 .addImm(0); // $omod 895 896 MI.eraseFromParent(); 897 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 898 } 899 900 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const { 901 unsigned IntrinsicID = I.getIntrinsicID(); 902 switch (IntrinsicID) { 903 case Intrinsic::amdgcn_if_break: { 904 MachineBasicBlock *BB = I.getParent(); 905 906 // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick 907 // SelectionDAG uses for wave32 vs wave64. 908 BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK)) 909 .add(I.getOperand(0)) 910 .add(I.getOperand(2)) 911 .add(I.getOperand(3)); 912 913 Register DstReg = I.getOperand(0).getReg(); 914 Register Src0Reg = I.getOperand(2).getReg(); 915 Register Src1Reg = I.getOperand(3).getReg(); 916 917 I.eraseFromParent(); 918 919 for (Register Reg : { DstReg, Src0Reg, Src1Reg }) 920 MRI->setRegClass(Reg, TRI.getWaveMaskRegClass()); 921 922 return true; 923 } 924 case Intrinsic::amdgcn_interp_p1_f16: 925 return selectInterpP1F16(I); 926 case Intrinsic::amdgcn_wqm: 927 return constrainCopyLikeIntrin(I, AMDGPU::WQM); 928 case Intrinsic::amdgcn_softwqm: 929 return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM); 930 case Intrinsic::amdgcn_strict_wwm: 931 case Intrinsic::amdgcn_wwm: 932 return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WWM); 933 case Intrinsic::amdgcn_strict_wqm: 934 return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WQM); 935 case Intrinsic::amdgcn_writelane: 936 return selectWritelane(I); 937 case Intrinsic::amdgcn_div_scale: 938 return selectDivScale(I); 939 case Intrinsic::amdgcn_icmp: 940 return selectIntrinsicIcmp(I); 941 case Intrinsic::amdgcn_ballot: 942 return selectBallot(I); 943 case Intrinsic::amdgcn_reloc_constant: 944 return selectRelocConstant(I); 945 case Intrinsic::amdgcn_groupstaticsize: 946 return selectGroupStaticSize(I); 947 case Intrinsic::returnaddress: 948 return selectReturnAddress(I); 949 default: 950 return selectImpl(I, *CoverageInfo); 951 } 952 } 953 954 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) { 955 if (Size != 32 && Size != 64) 956 return -1; 957 switch (P) { 958 default: 959 llvm_unreachable("Unknown condition code!"); 960 case CmpInst::ICMP_NE: 961 return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64; 962 case CmpInst::ICMP_EQ: 963 return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64; 964 case CmpInst::ICMP_SGT: 965 return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64; 966 case CmpInst::ICMP_SGE: 967 return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64; 968 case CmpInst::ICMP_SLT: 969 return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64; 970 case CmpInst::ICMP_SLE: 971 return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64; 972 case CmpInst::ICMP_UGT: 973 return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64; 974 case CmpInst::ICMP_UGE: 975 return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64; 976 case CmpInst::ICMP_ULT: 977 return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64; 978 case CmpInst::ICMP_ULE: 979 return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64; 980 } 981 } 982 983 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P, 984 unsigned Size) const { 985 if (Size == 64) { 986 if (!STI.hasScalarCompareEq64()) 987 return -1; 988 989 switch (P) { 990 case CmpInst::ICMP_NE: 991 return AMDGPU::S_CMP_LG_U64; 992 case CmpInst::ICMP_EQ: 993 return AMDGPU::S_CMP_EQ_U64; 994 default: 995 return -1; 996 } 997 } 998 999 if (Size != 32) 1000 return -1; 1001 1002 switch (P) { 1003 case CmpInst::ICMP_NE: 1004 return AMDGPU::S_CMP_LG_U32; 1005 case CmpInst::ICMP_EQ: 1006 return AMDGPU::S_CMP_EQ_U32; 1007 case CmpInst::ICMP_SGT: 1008 return AMDGPU::S_CMP_GT_I32; 1009 case CmpInst::ICMP_SGE: 1010 return AMDGPU::S_CMP_GE_I32; 1011 case CmpInst::ICMP_SLT: 1012 return AMDGPU::S_CMP_LT_I32; 1013 case CmpInst::ICMP_SLE: 1014 return AMDGPU::S_CMP_LE_I32; 1015 case CmpInst::ICMP_UGT: 1016 return AMDGPU::S_CMP_GT_U32; 1017 case CmpInst::ICMP_UGE: 1018 return AMDGPU::S_CMP_GE_U32; 1019 case CmpInst::ICMP_ULT: 1020 return AMDGPU::S_CMP_LT_U32; 1021 case CmpInst::ICMP_ULE: 1022 return AMDGPU::S_CMP_LE_U32; 1023 default: 1024 llvm_unreachable("Unknown condition code!"); 1025 } 1026 } 1027 1028 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const { 1029 MachineBasicBlock *BB = I.getParent(); 1030 const DebugLoc &DL = I.getDebugLoc(); 1031 1032 Register SrcReg = I.getOperand(2).getReg(); 1033 unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI); 1034 1035 auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate(); 1036 1037 Register CCReg = I.getOperand(0).getReg(); 1038 if (!isVCC(CCReg, *MRI)) { 1039 int Opcode = getS_CMPOpcode(Pred, Size); 1040 if (Opcode == -1) 1041 return false; 1042 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode)) 1043 .add(I.getOperand(2)) 1044 .add(I.getOperand(3)); 1045 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg) 1046 .addReg(AMDGPU::SCC); 1047 bool Ret = 1048 constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) && 1049 RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI); 1050 I.eraseFromParent(); 1051 return Ret; 1052 } 1053 1054 int Opcode = getV_CMPOpcode(Pred, Size); 1055 if (Opcode == -1) 1056 return false; 1057 1058 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), 1059 I.getOperand(0).getReg()) 1060 .add(I.getOperand(2)) 1061 .add(I.getOperand(3)); 1062 RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), 1063 *TRI.getBoolRC(), *MRI); 1064 bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI); 1065 I.eraseFromParent(); 1066 return Ret; 1067 } 1068 1069 bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const { 1070 Register Dst = I.getOperand(0).getReg(); 1071 if (isVCC(Dst, *MRI)) 1072 return false; 1073 1074 if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize()) 1075 return false; 1076 1077 MachineBasicBlock *BB = I.getParent(); 1078 const DebugLoc &DL = I.getDebugLoc(); 1079 Register SrcReg = I.getOperand(2).getReg(); 1080 unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI); 1081 auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm()); 1082 1083 int Opcode = getV_CMPOpcode(Pred, Size); 1084 if (Opcode == -1) 1085 return false; 1086 1087 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst) 1088 .add(I.getOperand(2)) 1089 .add(I.getOperand(3)); 1090 RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(), 1091 *MRI); 1092 bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI); 1093 I.eraseFromParent(); 1094 return Ret; 1095 } 1096 1097 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const { 1098 MachineBasicBlock *BB = I.getParent(); 1099 const DebugLoc &DL = I.getDebugLoc(); 1100 Register DstReg = I.getOperand(0).getReg(); 1101 const unsigned Size = MRI->getType(DstReg).getSizeInBits(); 1102 const bool Is64 = Size == 64; 1103 1104 if (Size != STI.getWavefrontSize()) 1105 return false; 1106 1107 Optional<ValueAndVReg> Arg = 1108 getConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI, true); 1109 1110 if (Arg.hasValue()) { 1111 const int64_t Value = Arg.getValue().Value.getSExtValue(); 1112 if (Value == 0) { 1113 unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; 1114 BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0); 1115 } else if (Value == -1) { // all ones 1116 Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO; 1117 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg); 1118 } else 1119 return false; 1120 } else { 1121 Register SrcReg = I.getOperand(2).getReg(); 1122 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg); 1123 } 1124 1125 I.eraseFromParent(); 1126 return true; 1127 } 1128 1129 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const { 1130 Register DstReg = I.getOperand(0).getReg(); 1131 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); 1132 const TargetRegisterClass *DstRC = 1133 TRI.getRegClassForSizeOnBank(32, *DstBank, *MRI); 1134 if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) 1135 return false; 1136 1137 const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID; 1138 1139 Module *M = MF->getFunction().getParent(); 1140 const MDNode *Metadata = I.getOperand(2).getMetadata(); 1141 auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString(); 1142 auto RelocSymbol = cast<GlobalVariable>( 1143 M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext()))); 1144 1145 MachineBasicBlock *BB = I.getParent(); 1146 BuildMI(*BB, &I, I.getDebugLoc(), 1147 TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg) 1148 .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO); 1149 1150 I.eraseFromParent(); 1151 return true; 1152 } 1153 1154 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const { 1155 Triple::OSType OS = MF->getTarget().getTargetTriple().getOS(); 1156 1157 Register DstReg = I.getOperand(0).getReg(); 1158 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 1159 unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ? 1160 AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 1161 1162 MachineBasicBlock *MBB = I.getParent(); 1163 const DebugLoc &DL = I.getDebugLoc(); 1164 1165 auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg); 1166 1167 if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) { 1168 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1169 MIB.addImm(MFI->getLDSSize()); 1170 } else { 1171 Module *M = MF->getFunction().getParent(); 1172 const GlobalValue *GV 1173 = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize); 1174 MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO); 1175 } 1176 1177 I.eraseFromParent(); 1178 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 1179 } 1180 1181 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const { 1182 MachineBasicBlock *MBB = I.getParent(); 1183 MachineFunction &MF = *MBB->getParent(); 1184 const DebugLoc &DL = I.getDebugLoc(); 1185 1186 MachineOperand &Dst = I.getOperand(0); 1187 Register DstReg = Dst.getReg(); 1188 unsigned Depth = I.getOperand(2).getImm(); 1189 1190 const TargetRegisterClass *RC 1191 = TRI.getConstrainedRegClassForOperand(Dst, *MRI); 1192 if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) || 1193 !RBI.constrainGenericRegister(DstReg, *RC, *MRI)) 1194 return false; 1195 1196 // Check for kernel and shader functions 1197 if (Depth != 0 || 1198 MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) { 1199 BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg) 1200 .addImm(0); 1201 I.eraseFromParent(); 1202 return true; 1203 } 1204 1205 MachineFrameInfo &MFI = MF.getFrameInfo(); 1206 // There is a call to @llvm.returnaddress in this function 1207 MFI.setReturnAddressIsTaken(true); 1208 1209 // Get the return address reg and mark it as an implicit live-in 1210 Register ReturnAddrReg = TRI.getReturnAddressReg(MF); 1211 Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg, 1212 AMDGPU::SReg_64RegClass); 1213 BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg) 1214 .addReg(LiveIn); 1215 I.eraseFromParent(); 1216 return true; 1217 } 1218 1219 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const { 1220 // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick 1221 // SelectionDAG uses for wave32 vs wave64. 1222 MachineBasicBlock *BB = MI.getParent(); 1223 BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF)) 1224 .add(MI.getOperand(1)); 1225 1226 Register Reg = MI.getOperand(1).getReg(); 1227 MI.eraseFromParent(); 1228 1229 if (!MRI->getRegClassOrNull(Reg)) 1230 MRI->setRegClass(Reg, TRI.getWaveMaskRegClass()); 1231 return true; 1232 } 1233 1234 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic( 1235 MachineInstr &MI, Intrinsic::ID IntrID) const { 1236 MachineBasicBlock *MBB = MI.getParent(); 1237 MachineFunction *MF = MBB->getParent(); 1238 const DebugLoc &DL = MI.getDebugLoc(); 1239 1240 unsigned IndexOperand = MI.getOperand(7).getImm(); 1241 bool WaveRelease = MI.getOperand(8).getImm() != 0; 1242 bool WaveDone = MI.getOperand(9).getImm() != 0; 1243 1244 if (WaveDone && !WaveRelease) 1245 report_fatal_error("ds_ordered_count: wave_done requires wave_release"); 1246 1247 unsigned OrderedCountIndex = IndexOperand & 0x3f; 1248 IndexOperand &= ~0x3f; 1249 unsigned CountDw = 0; 1250 1251 if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) { 1252 CountDw = (IndexOperand >> 24) & 0xf; 1253 IndexOperand &= ~(0xf << 24); 1254 1255 if (CountDw < 1 || CountDw > 4) { 1256 report_fatal_error( 1257 "ds_ordered_count: dword count must be between 1 and 4"); 1258 } 1259 } 1260 1261 if (IndexOperand) 1262 report_fatal_error("ds_ordered_count: bad index operand"); 1263 1264 unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1; 1265 unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF); 1266 1267 unsigned Offset0 = OrderedCountIndex << 2; 1268 unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) | 1269 (Instruction << 4); 1270 1271 if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) 1272 Offset1 |= (CountDw - 1) << 6; 1273 1274 unsigned Offset = Offset0 | (Offset1 << 8); 1275 1276 Register M0Val = MI.getOperand(2).getReg(); 1277 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 1278 .addReg(M0Val); 1279 1280 Register DstReg = MI.getOperand(0).getReg(); 1281 Register ValReg = MI.getOperand(3).getReg(); 1282 MachineInstrBuilder DS = 1283 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg) 1284 .addReg(ValReg) 1285 .addImm(Offset) 1286 .cloneMemRefs(MI); 1287 1288 if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI)) 1289 return false; 1290 1291 bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI); 1292 MI.eraseFromParent(); 1293 return Ret; 1294 } 1295 1296 static unsigned gwsIntrinToOpcode(unsigned IntrID) { 1297 switch (IntrID) { 1298 case Intrinsic::amdgcn_ds_gws_init: 1299 return AMDGPU::DS_GWS_INIT; 1300 case Intrinsic::amdgcn_ds_gws_barrier: 1301 return AMDGPU::DS_GWS_BARRIER; 1302 case Intrinsic::amdgcn_ds_gws_sema_v: 1303 return AMDGPU::DS_GWS_SEMA_V; 1304 case Intrinsic::amdgcn_ds_gws_sema_br: 1305 return AMDGPU::DS_GWS_SEMA_BR; 1306 case Intrinsic::amdgcn_ds_gws_sema_p: 1307 return AMDGPU::DS_GWS_SEMA_P; 1308 case Intrinsic::amdgcn_ds_gws_sema_release_all: 1309 return AMDGPU::DS_GWS_SEMA_RELEASE_ALL; 1310 default: 1311 llvm_unreachable("not a gws intrinsic"); 1312 } 1313 } 1314 1315 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI, 1316 Intrinsic::ID IID) const { 1317 if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all && 1318 !STI.hasGWSSemaReleaseAll()) 1319 return false; 1320 1321 // intrinsic ID, vsrc, offset 1322 const bool HasVSrc = MI.getNumOperands() == 3; 1323 assert(HasVSrc || MI.getNumOperands() == 2); 1324 1325 Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg(); 1326 const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI); 1327 if (OffsetRB->getID() != AMDGPU::SGPRRegBankID) 1328 return false; 1329 1330 MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI); 1331 assert(OffsetDef); 1332 1333 unsigned ImmOffset; 1334 1335 MachineBasicBlock *MBB = MI.getParent(); 1336 const DebugLoc &DL = MI.getDebugLoc(); 1337 1338 MachineInstr *Readfirstlane = nullptr; 1339 1340 // If we legalized the VGPR input, strip out the readfirstlane to analyze the 1341 // incoming offset, in case there's an add of a constant. We'll have to put it 1342 // back later. 1343 if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) { 1344 Readfirstlane = OffsetDef; 1345 BaseOffset = OffsetDef->getOperand(1).getReg(); 1346 OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI); 1347 } 1348 1349 if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) { 1350 // If we have a constant offset, try to use the 0 in m0 as the base. 1351 // TODO: Look into changing the default m0 initialization value. If the 1352 // default -1 only set the low 16-bits, we could leave it as-is and add 1 to 1353 // the immediate offset. 1354 1355 ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue(); 1356 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0) 1357 .addImm(0); 1358 } else { 1359 std::tie(BaseOffset, ImmOffset) = 1360 AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset); 1361 1362 if (Readfirstlane) { 1363 // We have the constant offset now, so put the readfirstlane back on the 1364 // variable component. 1365 if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI)) 1366 return false; 1367 1368 Readfirstlane->getOperand(1).setReg(BaseOffset); 1369 BaseOffset = Readfirstlane->getOperand(0).getReg(); 1370 } else { 1371 if (!RBI.constrainGenericRegister(BaseOffset, 1372 AMDGPU::SReg_32RegClass, *MRI)) 1373 return false; 1374 } 1375 1376 Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 1377 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base) 1378 .addReg(BaseOffset) 1379 .addImm(16); 1380 1381 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 1382 .addReg(M0Base); 1383 } 1384 1385 // The resource id offset is computed as (<isa opaque base> + M0[21:16] + 1386 // offset field) % 64. Some versions of the programming guide omit the m0 1387 // part, or claim it's from offset 0. 1388 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID))); 1389 1390 if (HasVSrc) { 1391 Register VSrc = MI.getOperand(1).getReg(); 1392 MIB.addReg(VSrc); 1393 if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI)) 1394 return false; 1395 } 1396 1397 MIB.addImm(ImmOffset) 1398 .cloneMemRefs(MI); 1399 1400 MI.eraseFromParent(); 1401 return true; 1402 } 1403 1404 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI, 1405 bool IsAppend) const { 1406 Register PtrBase = MI.getOperand(2).getReg(); 1407 LLT PtrTy = MRI->getType(PtrBase); 1408 bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS; 1409 1410 unsigned Offset; 1411 std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2)); 1412 1413 // TODO: Should this try to look through readfirstlane like GWS? 1414 if (!isDSOffsetLegal(PtrBase, Offset)) { 1415 PtrBase = MI.getOperand(2).getReg(); 1416 Offset = 0; 1417 } 1418 1419 MachineBasicBlock *MBB = MI.getParent(); 1420 const DebugLoc &DL = MI.getDebugLoc(); 1421 const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME; 1422 1423 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 1424 .addReg(PtrBase); 1425 if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI)) 1426 return false; 1427 1428 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg()) 1429 .addImm(Offset) 1430 .addImm(IsGDS ? -1 : 0) 1431 .cloneMemRefs(MI); 1432 MI.eraseFromParent(); 1433 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 1434 } 1435 1436 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const { 1437 if (TM.getOptLevel() > CodeGenOpt::None) { 1438 unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second; 1439 if (WGSize <= STI.getWavefrontSize()) { 1440 MachineBasicBlock *MBB = MI.getParent(); 1441 const DebugLoc &DL = MI.getDebugLoc(); 1442 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER)); 1443 MI.eraseFromParent(); 1444 return true; 1445 } 1446 } 1447 return selectImpl(MI, *CoverageInfo); 1448 } 1449 1450 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE, 1451 bool &IsTexFail) { 1452 if (TexFailCtrl) 1453 IsTexFail = true; 1454 1455 TFE = (TexFailCtrl & 0x1) ? 1 : 0; 1456 TexFailCtrl &= ~(uint64_t)0x1; 1457 LWE = (TexFailCtrl & 0x2) ? 1 : 0; 1458 TexFailCtrl &= ~(uint64_t)0x2; 1459 1460 return TexFailCtrl == 0; 1461 } 1462 1463 bool AMDGPUInstructionSelector::selectImageIntrinsic( 1464 MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const { 1465 MachineBasicBlock *MBB = MI.getParent(); 1466 const DebugLoc &DL = MI.getDebugLoc(); 1467 1468 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = 1469 AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode); 1470 1471 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim); 1472 const AMDGPU::MIMGLZMappingInfo *LZMappingInfo = 1473 AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode); 1474 const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo = 1475 AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode); 1476 unsigned IntrOpcode = Intr->BaseOpcode; 1477 const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI); 1478 1479 const unsigned ArgOffset = MI.getNumExplicitDefs() + 1; 1480 1481 Register VDataIn, VDataOut; 1482 LLT VDataTy; 1483 int NumVDataDwords = -1; 1484 bool IsD16 = false; 1485 1486 bool Unorm; 1487 if (!BaseOpcode->Sampler) 1488 Unorm = true; 1489 else 1490 Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0; 1491 1492 bool TFE; 1493 bool LWE; 1494 bool IsTexFail = false; 1495 if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(), 1496 TFE, LWE, IsTexFail)) 1497 return false; 1498 1499 const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm(); 1500 const bool IsA16 = (Flags & 1) != 0; 1501 const bool IsG16 = (Flags & 2) != 0; 1502 1503 // A16 implies 16 bit gradients 1504 if (IsA16 && !IsG16) 1505 return false; 1506 1507 unsigned DMask = 0; 1508 unsigned DMaskLanes = 0; 1509 1510 if (BaseOpcode->Atomic) { 1511 VDataOut = MI.getOperand(0).getReg(); 1512 VDataIn = MI.getOperand(2).getReg(); 1513 LLT Ty = MRI->getType(VDataIn); 1514 1515 // Be careful to allow atomic swap on 16-bit element vectors. 1516 const bool Is64Bit = BaseOpcode->AtomicX2 ? 1517 Ty.getSizeInBits() == 128 : 1518 Ty.getSizeInBits() == 64; 1519 1520 if (BaseOpcode->AtomicX2) { 1521 assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister); 1522 1523 DMask = Is64Bit ? 0xf : 0x3; 1524 NumVDataDwords = Is64Bit ? 4 : 2; 1525 } else { 1526 DMask = Is64Bit ? 0x3 : 0x1; 1527 NumVDataDwords = Is64Bit ? 2 : 1; 1528 } 1529 } else { 1530 DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm(); 1531 DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask); 1532 1533 // One memoperand is mandatory, except for getresinfo. 1534 // FIXME: Check this in verifier. 1535 if (!MI.memoperands_empty()) { 1536 const MachineMemOperand *MMO = *MI.memoperands_begin(); 1537 1538 // Infer d16 from the memory size, as the register type will be mangled by 1539 // unpacked subtargets, or by TFE. 1540 IsD16 = ((8 * MMO->getSize()) / DMaskLanes) < 32; 1541 } 1542 1543 if (BaseOpcode->Store) { 1544 VDataIn = MI.getOperand(1).getReg(); 1545 VDataTy = MRI->getType(VDataIn); 1546 NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32; 1547 } else { 1548 VDataOut = MI.getOperand(0).getReg(); 1549 VDataTy = MRI->getType(VDataOut); 1550 NumVDataDwords = DMaskLanes; 1551 1552 if (IsD16 && !STI.hasUnpackedD16VMem()) 1553 NumVDataDwords = (DMaskLanes + 1) / 2; 1554 } 1555 } 1556 1557 // Optimize _L to _LZ when _L is zero 1558 if (LZMappingInfo) { 1559 // The legalizer replaced the register with an immediate 0 if we need to 1560 // change the opcode. 1561 const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->LodIndex); 1562 if (Lod.isImm()) { 1563 assert(Lod.getImm() == 0); 1564 IntrOpcode = LZMappingInfo->LZ; // set new opcode to _lz variant of _l 1565 } 1566 } 1567 1568 // Optimize _mip away, when 'lod' is zero 1569 if (MIPMappingInfo) { 1570 const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->MipIndex); 1571 if (Lod.isImm()) { 1572 assert(Lod.getImm() == 0); 1573 IntrOpcode = MIPMappingInfo->NONMIP; // set new opcode to variant without _mip 1574 } 1575 } 1576 1577 // Set G16 opcode 1578 if (IsG16 && !IsA16) { 1579 const AMDGPU::MIMGG16MappingInfo *G16MappingInfo = 1580 AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode); 1581 assert(G16MappingInfo); 1582 IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16 1583 } 1584 1585 // TODO: Check this in verifier. 1586 assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this"); 1587 1588 unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm(); 1589 if (BaseOpcode->Atomic) 1590 CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization 1591 if (CPol & ~AMDGPU::CPol::ALL) 1592 return false; 1593 1594 int NumVAddrRegs = 0; 1595 int NumVAddrDwords = 0; 1596 for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) { 1597 // Skip the $noregs and 0s inserted during legalization. 1598 MachineOperand &AddrOp = MI.getOperand(ArgOffset + I); 1599 if (!AddrOp.isReg()) 1600 continue; // XXX - Break? 1601 1602 Register Addr = AddrOp.getReg(); 1603 if (!Addr) 1604 break; 1605 1606 ++NumVAddrRegs; 1607 NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32; 1608 } 1609 1610 // The legalizer preprocessed the intrinsic arguments. If we aren't using 1611 // NSA, these should have beeen packed into a single value in the first 1612 // address register 1613 const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs; 1614 if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) { 1615 LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n"); 1616 return false; 1617 } 1618 1619 if (IsTexFail) 1620 ++NumVDataDwords; 1621 1622 int Opcode = -1; 1623 if (IsGFX10Plus) { 1624 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, 1625 UseNSA ? AMDGPU::MIMGEncGfx10NSA 1626 : AMDGPU::MIMGEncGfx10Default, 1627 NumVDataDwords, NumVAddrDwords); 1628 } else { 1629 if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 1630 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8, 1631 NumVDataDwords, NumVAddrDwords); 1632 if (Opcode == -1) 1633 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6, 1634 NumVDataDwords, NumVAddrDwords); 1635 } 1636 assert(Opcode != -1); 1637 1638 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode)) 1639 .cloneMemRefs(MI); 1640 1641 if (VDataOut) { 1642 if (BaseOpcode->AtomicX2) { 1643 const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64; 1644 1645 Register TmpReg = MRI->createVirtualRegister( 1646 Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass); 1647 unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0; 1648 1649 MIB.addDef(TmpReg); 1650 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut) 1651 .addReg(TmpReg, RegState::Kill, SubReg); 1652 1653 } else { 1654 MIB.addDef(VDataOut); // vdata output 1655 } 1656 } 1657 1658 if (VDataIn) 1659 MIB.addReg(VDataIn); // vdata input 1660 1661 for (int I = 0; I != NumVAddrRegs; ++I) { 1662 MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I); 1663 if (SrcOp.isReg()) { 1664 assert(SrcOp.getReg() != 0); 1665 MIB.addReg(SrcOp.getReg()); 1666 } 1667 } 1668 1669 MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg()); 1670 if (BaseOpcode->Sampler) 1671 MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg()); 1672 1673 MIB.addImm(DMask); // dmask 1674 1675 if (IsGFX10Plus) 1676 MIB.addImm(DimInfo->Encoding); 1677 MIB.addImm(Unorm); 1678 1679 MIB.addImm(CPol); 1680 MIB.addImm(IsA16 && // a16 or r128 1681 STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0); 1682 if (IsGFX10Plus) 1683 MIB.addImm(IsA16 ? -1 : 0); 1684 1685 MIB.addImm(TFE); // tfe 1686 MIB.addImm(LWE); // lwe 1687 if (!IsGFX10Plus) 1688 MIB.addImm(DimInfo->DA ? -1 : 0); 1689 if (BaseOpcode->HasD16) 1690 MIB.addImm(IsD16 ? -1 : 0); 1691 1692 MI.eraseFromParent(); 1693 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 1694 } 1695 1696 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS( 1697 MachineInstr &I) const { 1698 unsigned IntrinsicID = I.getIntrinsicID(); 1699 switch (IntrinsicID) { 1700 case Intrinsic::amdgcn_end_cf: 1701 return selectEndCfIntrinsic(I); 1702 case Intrinsic::amdgcn_ds_ordered_add: 1703 case Intrinsic::amdgcn_ds_ordered_swap: 1704 return selectDSOrderedIntrinsic(I, IntrinsicID); 1705 case Intrinsic::amdgcn_ds_gws_init: 1706 case Intrinsic::amdgcn_ds_gws_barrier: 1707 case Intrinsic::amdgcn_ds_gws_sema_v: 1708 case Intrinsic::amdgcn_ds_gws_sema_br: 1709 case Intrinsic::amdgcn_ds_gws_sema_p: 1710 case Intrinsic::amdgcn_ds_gws_sema_release_all: 1711 return selectDSGWSIntrinsic(I, IntrinsicID); 1712 case Intrinsic::amdgcn_ds_append: 1713 return selectDSAppendConsume(I, true); 1714 case Intrinsic::amdgcn_ds_consume: 1715 return selectDSAppendConsume(I, false); 1716 case Intrinsic::amdgcn_s_barrier: 1717 return selectSBarrier(I); 1718 case Intrinsic::amdgcn_global_atomic_fadd: 1719 return selectGlobalAtomicFadd(I, I.getOperand(2), I.getOperand(3)); 1720 default: { 1721 return selectImpl(I, *CoverageInfo); 1722 } 1723 } 1724 } 1725 1726 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const { 1727 if (selectImpl(I, *CoverageInfo)) 1728 return true; 1729 1730 MachineBasicBlock *BB = I.getParent(); 1731 const DebugLoc &DL = I.getDebugLoc(); 1732 1733 Register DstReg = I.getOperand(0).getReg(); 1734 unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI); 1735 assert(Size <= 32 || Size == 64); 1736 const MachineOperand &CCOp = I.getOperand(1); 1737 Register CCReg = CCOp.getReg(); 1738 if (!isVCC(CCReg, *MRI)) { 1739 unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 : 1740 AMDGPU::S_CSELECT_B32; 1741 MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC) 1742 .addReg(CCReg); 1743 1744 // The generic constrainSelectedInstRegOperands doesn't work for the scc register 1745 // bank, because it does not cover the register class that we used to represent 1746 // for it. So we need to manually set the register class here. 1747 if (!MRI->getRegClassOrNull(CCReg)) 1748 MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI)); 1749 MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg) 1750 .add(I.getOperand(2)) 1751 .add(I.getOperand(3)); 1752 1753 bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI) | 1754 constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI); 1755 I.eraseFromParent(); 1756 return Ret; 1757 } 1758 1759 // Wide VGPR select should have been split in RegBankSelect. 1760 if (Size > 32) 1761 return false; 1762 1763 MachineInstr *Select = 1764 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1765 .addImm(0) 1766 .add(I.getOperand(3)) 1767 .addImm(0) 1768 .add(I.getOperand(2)) 1769 .add(I.getOperand(1)); 1770 1771 bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI); 1772 I.eraseFromParent(); 1773 return Ret; 1774 } 1775 1776 static int sizeToSubRegIndex(unsigned Size) { 1777 switch (Size) { 1778 case 32: 1779 return AMDGPU::sub0; 1780 case 64: 1781 return AMDGPU::sub0_sub1; 1782 case 96: 1783 return AMDGPU::sub0_sub1_sub2; 1784 case 128: 1785 return AMDGPU::sub0_sub1_sub2_sub3; 1786 case 256: 1787 return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7; 1788 default: 1789 if (Size < 32) 1790 return AMDGPU::sub0; 1791 if (Size > 256) 1792 return -1; 1793 return sizeToSubRegIndex(PowerOf2Ceil(Size)); 1794 } 1795 } 1796 1797 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const { 1798 Register DstReg = I.getOperand(0).getReg(); 1799 Register SrcReg = I.getOperand(1).getReg(); 1800 const LLT DstTy = MRI->getType(DstReg); 1801 const LLT SrcTy = MRI->getType(SrcReg); 1802 const LLT S1 = LLT::scalar(1); 1803 1804 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI); 1805 const RegisterBank *DstRB; 1806 if (DstTy == S1) { 1807 // This is a special case. We don't treat s1 for legalization artifacts as 1808 // vcc booleans. 1809 DstRB = SrcRB; 1810 } else { 1811 DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 1812 if (SrcRB != DstRB) 1813 return false; 1814 } 1815 1816 const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID; 1817 1818 unsigned DstSize = DstTy.getSizeInBits(); 1819 unsigned SrcSize = SrcTy.getSizeInBits(); 1820 1821 const TargetRegisterClass *SrcRC 1822 = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI); 1823 const TargetRegisterClass *DstRC 1824 = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI); 1825 if (!SrcRC || !DstRC) 1826 return false; 1827 1828 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) || 1829 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) { 1830 LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n"); 1831 return false; 1832 } 1833 1834 if (DstTy == LLT::vector(2, 16) && SrcTy == LLT::vector(2, 32)) { 1835 MachineBasicBlock *MBB = I.getParent(); 1836 const DebugLoc &DL = I.getDebugLoc(); 1837 1838 Register LoReg = MRI->createVirtualRegister(DstRC); 1839 Register HiReg = MRI->createVirtualRegister(DstRC); 1840 BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg) 1841 .addReg(SrcReg, 0, AMDGPU::sub0); 1842 BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg) 1843 .addReg(SrcReg, 0, AMDGPU::sub1); 1844 1845 if (IsVALU && STI.hasSDWA()) { 1846 // Write the low 16-bits of the high element into the high 16-bits of the 1847 // low element. 1848 MachineInstr *MovSDWA = 1849 BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg) 1850 .addImm(0) // $src0_modifiers 1851 .addReg(HiReg) // $src0 1852 .addImm(0) // $clamp 1853 .addImm(AMDGPU::SDWA::WORD_1) // $dst_sel 1854 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused 1855 .addImm(AMDGPU::SDWA::WORD_0) // $src0_sel 1856 .addReg(LoReg, RegState::Implicit); 1857 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1); 1858 } else { 1859 Register TmpReg0 = MRI->createVirtualRegister(DstRC); 1860 Register TmpReg1 = MRI->createVirtualRegister(DstRC); 1861 Register ImmReg = MRI->createVirtualRegister(DstRC); 1862 if (IsVALU) { 1863 BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0) 1864 .addImm(16) 1865 .addReg(HiReg); 1866 } else { 1867 BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0) 1868 .addReg(HiReg) 1869 .addImm(16); 1870 } 1871 1872 unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32; 1873 unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32; 1874 unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32; 1875 1876 BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg) 1877 .addImm(0xffff); 1878 BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1) 1879 .addReg(LoReg) 1880 .addReg(ImmReg); 1881 BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg) 1882 .addReg(TmpReg0) 1883 .addReg(TmpReg1); 1884 } 1885 1886 I.eraseFromParent(); 1887 return true; 1888 } 1889 1890 if (!DstTy.isScalar()) 1891 return false; 1892 1893 if (SrcSize > 32) { 1894 int SubRegIdx = sizeToSubRegIndex(DstSize); 1895 if (SubRegIdx == -1) 1896 return false; 1897 1898 // Deal with weird cases where the class only partially supports the subreg 1899 // index. 1900 const TargetRegisterClass *SrcWithSubRC 1901 = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx); 1902 if (!SrcWithSubRC) 1903 return false; 1904 1905 if (SrcWithSubRC != SrcRC) { 1906 if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI)) 1907 return false; 1908 } 1909 1910 I.getOperand(1).setSubReg(SubRegIdx); 1911 } 1912 1913 I.setDesc(TII.get(TargetOpcode::COPY)); 1914 return true; 1915 } 1916 1917 /// \returns true if a bitmask for \p Size bits will be an inline immediate. 1918 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) { 1919 Mask = maskTrailingOnes<unsigned>(Size); 1920 int SignedMask = static_cast<int>(Mask); 1921 return SignedMask >= -16 && SignedMask <= 64; 1922 } 1923 1924 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1. 1925 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank( 1926 Register Reg, const MachineRegisterInfo &MRI, 1927 const TargetRegisterInfo &TRI) const { 1928 const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg); 1929 if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>()) 1930 return RB; 1931 1932 // Ignore the type, since we don't use vcc in artifacts. 1933 if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>()) 1934 return &RBI.getRegBankFromRegClass(*RC, LLT()); 1935 return nullptr; 1936 } 1937 1938 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const { 1939 bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG; 1940 bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg; 1941 const DebugLoc &DL = I.getDebugLoc(); 1942 MachineBasicBlock &MBB = *I.getParent(); 1943 const Register DstReg = I.getOperand(0).getReg(); 1944 const Register SrcReg = I.getOperand(1).getReg(); 1945 1946 const LLT DstTy = MRI->getType(DstReg); 1947 const LLT SrcTy = MRI->getType(SrcReg); 1948 const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ? 1949 I.getOperand(2).getImm() : SrcTy.getSizeInBits(); 1950 const unsigned DstSize = DstTy.getSizeInBits(); 1951 if (!DstTy.isScalar()) 1952 return false; 1953 1954 // Artifact casts should never use vcc. 1955 const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI); 1956 1957 // FIXME: This should probably be illegal and split earlier. 1958 if (I.getOpcode() == AMDGPU::G_ANYEXT) { 1959 if (DstSize <= 32) 1960 return selectCOPY(I); 1961 1962 const TargetRegisterClass *SrcRC = 1963 TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank, *MRI); 1964 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); 1965 const TargetRegisterClass *DstRC = 1966 TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI); 1967 1968 Register UndefReg = MRI->createVirtualRegister(SrcRC); 1969 BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg); 1970 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) 1971 .addReg(SrcReg) 1972 .addImm(AMDGPU::sub0) 1973 .addReg(UndefReg) 1974 .addImm(AMDGPU::sub1); 1975 I.eraseFromParent(); 1976 1977 return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) && 1978 RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI); 1979 } 1980 1981 if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) { 1982 // 64-bit should have been split up in RegBankSelect 1983 1984 // Try to use an and with a mask if it will save code size. 1985 unsigned Mask; 1986 if (!Signed && shouldUseAndMask(SrcSize, Mask)) { 1987 MachineInstr *ExtI = 1988 BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg) 1989 .addImm(Mask) 1990 .addReg(SrcReg); 1991 I.eraseFromParent(); 1992 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI); 1993 } 1994 1995 const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64; 1996 MachineInstr *ExtI = 1997 BuildMI(MBB, I, DL, TII.get(BFE), DstReg) 1998 .addReg(SrcReg) 1999 .addImm(0) // Offset 2000 .addImm(SrcSize); // Width 2001 I.eraseFromParent(); 2002 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI); 2003 } 2004 2005 if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) { 2006 const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ? 2007 AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass; 2008 if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI)) 2009 return false; 2010 2011 if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) { 2012 const unsigned SextOpc = SrcSize == 8 ? 2013 AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16; 2014 BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg) 2015 .addReg(SrcReg); 2016 I.eraseFromParent(); 2017 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI); 2018 } 2019 2020 const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64; 2021 const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32; 2022 2023 // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width. 2024 if (DstSize > 32 && (SrcSize <= 32 || InReg)) { 2025 // We need a 64-bit register source, but the high bits don't matter. 2026 Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); 2027 Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2028 unsigned SubReg = InReg ? AMDGPU::sub0 : 0; 2029 2030 BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg); 2031 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg) 2032 .addReg(SrcReg, 0, SubReg) 2033 .addImm(AMDGPU::sub0) 2034 .addReg(UndefReg) 2035 .addImm(AMDGPU::sub1); 2036 2037 BuildMI(MBB, I, DL, TII.get(BFE64), DstReg) 2038 .addReg(ExtReg) 2039 .addImm(SrcSize << 16); 2040 2041 I.eraseFromParent(); 2042 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI); 2043 } 2044 2045 unsigned Mask; 2046 if (!Signed && shouldUseAndMask(SrcSize, Mask)) { 2047 BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg) 2048 .addReg(SrcReg) 2049 .addImm(Mask); 2050 } else { 2051 BuildMI(MBB, I, DL, TII.get(BFE32), DstReg) 2052 .addReg(SrcReg) 2053 .addImm(SrcSize << 16); 2054 } 2055 2056 I.eraseFromParent(); 2057 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI); 2058 } 2059 2060 return false; 2061 } 2062 2063 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const { 2064 MachineBasicBlock *BB = I.getParent(); 2065 MachineOperand &ImmOp = I.getOperand(1); 2066 Register DstReg = I.getOperand(0).getReg(); 2067 unsigned Size = MRI->getType(DstReg).getSizeInBits(); 2068 2069 // The AMDGPU backend only supports Imm operands and not CImm or FPImm. 2070 if (ImmOp.isFPImm()) { 2071 const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt(); 2072 ImmOp.ChangeToImmediate(Imm.getZExtValue()); 2073 } else if (ImmOp.isCImm()) { 2074 ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue()); 2075 } else { 2076 llvm_unreachable("Not supported by g_constants"); 2077 } 2078 2079 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 2080 const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID; 2081 2082 unsigned Opcode; 2083 if (DstRB->getID() == AMDGPU::VCCRegBankID) { 2084 Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 2085 } else { 2086 Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 2087 2088 // We should never produce s1 values on banks other than VCC. If the user of 2089 // this already constrained the register, we may incorrectly think it's VCC 2090 // if it wasn't originally. 2091 if (Size == 1) 2092 return false; 2093 } 2094 2095 if (Size != 64) { 2096 I.setDesc(TII.get(Opcode)); 2097 I.addImplicitDefUseOperands(*MF); 2098 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 2099 } 2100 2101 const DebugLoc &DL = I.getDebugLoc(); 2102 2103 APInt Imm(Size, I.getOperand(1).getImm()); 2104 2105 MachineInstr *ResInst; 2106 if (IsSgpr && TII.isInlineConstant(Imm)) { 2107 ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg) 2108 .addImm(I.getOperand(1).getImm()); 2109 } else { 2110 const TargetRegisterClass *RC = IsSgpr ? 2111 &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass; 2112 Register LoReg = MRI->createVirtualRegister(RC); 2113 Register HiReg = MRI->createVirtualRegister(RC); 2114 2115 BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg) 2116 .addImm(Imm.trunc(32).getZExtValue()); 2117 2118 BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg) 2119 .addImm(Imm.ashr(32).getZExtValue()); 2120 2121 ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) 2122 .addReg(LoReg) 2123 .addImm(AMDGPU::sub0) 2124 .addReg(HiReg) 2125 .addImm(AMDGPU::sub1); 2126 } 2127 2128 // We can't call constrainSelectedInstRegOperands here, because it doesn't 2129 // work for target independent opcodes 2130 I.eraseFromParent(); 2131 const TargetRegisterClass *DstRC = 2132 TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI); 2133 if (!DstRC) 2134 return true; 2135 return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI); 2136 } 2137 2138 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const { 2139 // Only manually handle the f64 SGPR case. 2140 // 2141 // FIXME: This is a workaround for 2.5 different tablegen problems. Because 2142 // the bit ops theoretically have a second result due to the implicit def of 2143 // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing 2144 // that is easy by disabling the check. The result works, but uses a 2145 // nonsensical sreg32orlds_and_sreg_1 regclass. 2146 // 2147 // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to 2148 // the variadic REG_SEQUENCE operands. 2149 2150 Register Dst = MI.getOperand(0).getReg(); 2151 const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI); 2152 if (DstRB->getID() != AMDGPU::SGPRRegBankID || 2153 MRI->getType(Dst) != LLT::scalar(64)) 2154 return false; 2155 2156 Register Src = MI.getOperand(1).getReg(); 2157 MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI); 2158 if (Fabs) 2159 Src = Fabs->getOperand(1).getReg(); 2160 2161 if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) || 2162 !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI)) 2163 return false; 2164 2165 MachineBasicBlock *BB = MI.getParent(); 2166 const DebugLoc &DL = MI.getDebugLoc(); 2167 Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2168 Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2169 Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2170 Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2171 2172 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg) 2173 .addReg(Src, 0, AMDGPU::sub0); 2174 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg) 2175 .addReg(Src, 0, AMDGPU::sub1); 2176 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg) 2177 .addImm(0x80000000); 2178 2179 // Set or toggle sign bit. 2180 unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32; 2181 BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg) 2182 .addReg(HiReg) 2183 .addReg(ConstReg); 2184 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst) 2185 .addReg(LoReg) 2186 .addImm(AMDGPU::sub0) 2187 .addReg(OpReg) 2188 .addImm(AMDGPU::sub1); 2189 MI.eraseFromParent(); 2190 return true; 2191 } 2192 2193 // FIXME: This is a workaround for the same tablegen problems as G_FNEG 2194 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const { 2195 Register Dst = MI.getOperand(0).getReg(); 2196 const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI); 2197 if (DstRB->getID() != AMDGPU::SGPRRegBankID || 2198 MRI->getType(Dst) != LLT::scalar(64)) 2199 return false; 2200 2201 Register Src = MI.getOperand(1).getReg(); 2202 MachineBasicBlock *BB = MI.getParent(); 2203 const DebugLoc &DL = MI.getDebugLoc(); 2204 Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2205 Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2206 Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2207 Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2208 2209 if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) || 2210 !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI)) 2211 return false; 2212 2213 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg) 2214 .addReg(Src, 0, AMDGPU::sub0); 2215 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg) 2216 .addReg(Src, 0, AMDGPU::sub1); 2217 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg) 2218 .addImm(0x7fffffff); 2219 2220 // Clear sign bit. 2221 // TODO: Should this used S_BITSET0_*? 2222 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg) 2223 .addReg(HiReg) 2224 .addReg(ConstReg); 2225 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst) 2226 .addReg(LoReg) 2227 .addImm(AMDGPU::sub0) 2228 .addReg(OpReg) 2229 .addImm(AMDGPU::sub1); 2230 2231 MI.eraseFromParent(); 2232 return true; 2233 } 2234 2235 static bool isConstant(const MachineInstr &MI) { 2236 return MI.getOpcode() == TargetOpcode::G_CONSTANT; 2237 } 2238 2239 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load, 2240 const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const { 2241 2242 const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg()); 2243 2244 assert(PtrMI); 2245 2246 if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD) 2247 return; 2248 2249 GEPInfo GEPInfo(*PtrMI); 2250 2251 for (unsigned i = 1; i != 3; ++i) { 2252 const MachineOperand &GEPOp = PtrMI->getOperand(i); 2253 const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg()); 2254 assert(OpDef); 2255 if (i == 2 && isConstant(*OpDef)) { 2256 // TODO: Could handle constant base + variable offset, but a combine 2257 // probably should have commuted it. 2258 assert(GEPInfo.Imm == 0); 2259 GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue(); 2260 continue; 2261 } 2262 const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI); 2263 if (OpBank->getID() == AMDGPU::SGPRRegBankID) 2264 GEPInfo.SgprParts.push_back(GEPOp.getReg()); 2265 else 2266 GEPInfo.VgprParts.push_back(GEPOp.getReg()); 2267 } 2268 2269 AddrInfo.push_back(GEPInfo); 2270 getAddrModeInfo(*PtrMI, MRI, AddrInfo); 2271 } 2272 2273 bool AMDGPUInstructionSelector::isSGPR(Register Reg) const { 2274 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID; 2275 } 2276 2277 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const { 2278 if (!MI.hasOneMemOperand()) 2279 return false; 2280 2281 const MachineMemOperand *MMO = *MI.memoperands_begin(); 2282 const Value *Ptr = MMO->getValue(); 2283 2284 // UndefValue means this is a load of a kernel input. These are uniform. 2285 // Sometimes LDS instructions have constant pointers. 2286 // If Ptr is null, then that means this mem operand contains a 2287 // PseudoSourceValue like GOT. 2288 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) || 2289 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr)) 2290 return true; 2291 2292 if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) 2293 return true; 2294 2295 const Instruction *I = dyn_cast<Instruction>(Ptr); 2296 return I && I->getMetadata("amdgpu.uniform"); 2297 } 2298 2299 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const { 2300 for (const GEPInfo &GEPInfo : AddrInfo) { 2301 if (!GEPInfo.VgprParts.empty()) 2302 return true; 2303 } 2304 return false; 2305 } 2306 2307 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const { 2308 const LLT PtrTy = MRI->getType(I.getOperand(1).getReg()); 2309 unsigned AS = PtrTy.getAddressSpace(); 2310 if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) && 2311 STI.ldsRequiresM0Init()) { 2312 MachineBasicBlock *BB = I.getParent(); 2313 2314 // If DS instructions require M0 initializtion, insert it before selecting. 2315 BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0) 2316 .addImm(-1); 2317 } 2318 } 2319 2320 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW( 2321 MachineInstr &I) const { 2322 if (I.getOpcode() == TargetOpcode::G_ATOMICRMW_FADD) { 2323 const LLT PtrTy = MRI->getType(I.getOperand(1).getReg()); 2324 unsigned AS = PtrTy.getAddressSpace(); 2325 if (AS == AMDGPUAS::GLOBAL_ADDRESS) 2326 return selectGlobalAtomicFadd(I, I.getOperand(1), I.getOperand(2)); 2327 } 2328 2329 initM0(I); 2330 return selectImpl(I, *CoverageInfo); 2331 } 2332 2333 // TODO: No rtn optimization. 2334 bool AMDGPUInstructionSelector::selectG_AMDGPU_ATOMIC_CMPXCHG( 2335 MachineInstr &MI) const { 2336 Register PtrReg = MI.getOperand(1).getReg(); 2337 const LLT PtrTy = MRI->getType(PtrReg); 2338 if (PtrTy.getAddressSpace() == AMDGPUAS::FLAT_ADDRESS || 2339 STI.useFlatForGlobal()) 2340 return selectImpl(MI, *CoverageInfo); 2341 2342 Register DstReg = MI.getOperand(0).getReg(); 2343 const LLT Ty = MRI->getType(DstReg); 2344 const bool Is64 = Ty.getSizeInBits() == 64; 2345 const unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0; 2346 Register TmpReg = MRI->createVirtualRegister( 2347 Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass); 2348 2349 const DebugLoc &DL = MI.getDebugLoc(); 2350 MachineBasicBlock *BB = MI.getParent(); 2351 2352 Register VAddr, RSrcReg, SOffset; 2353 int64_t Offset = 0; 2354 2355 unsigned Opcode; 2356 if (selectMUBUFOffsetImpl(MI.getOperand(1), RSrcReg, SOffset, Offset)) { 2357 Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN : 2358 AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN; 2359 } else if (selectMUBUFAddr64Impl(MI.getOperand(1), VAddr, 2360 RSrcReg, SOffset, Offset)) { 2361 Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN : 2362 AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN; 2363 } else 2364 return selectImpl(MI, *CoverageInfo); 2365 2366 auto MIB = BuildMI(*BB, &MI, DL, TII.get(Opcode), TmpReg) 2367 .addReg(MI.getOperand(2).getReg()); 2368 2369 if (VAddr) 2370 MIB.addReg(VAddr); 2371 2372 MIB.addReg(RSrcReg); 2373 if (SOffset) 2374 MIB.addReg(SOffset); 2375 else 2376 MIB.addImm(0); 2377 2378 MIB.addImm(Offset); 2379 MIB.addImm(AMDGPU::CPol::GLC); 2380 MIB.cloneMemRefs(MI); 2381 2382 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), DstReg) 2383 .addReg(TmpReg, RegState::Kill, SubReg); 2384 2385 MI.eraseFromParent(); 2386 2387 MRI->setRegClass( 2388 DstReg, Is64 ? &AMDGPU::VReg_64RegClass : &AMDGPU::VGPR_32RegClass); 2389 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 2390 } 2391 2392 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const { 2393 MachineBasicBlock *BB = I.getParent(); 2394 MachineOperand &CondOp = I.getOperand(0); 2395 Register CondReg = CondOp.getReg(); 2396 const DebugLoc &DL = I.getDebugLoc(); 2397 2398 unsigned BrOpcode; 2399 Register CondPhysReg; 2400 const TargetRegisterClass *ConstrainRC; 2401 2402 // In SelectionDAG, we inspect the IR block for uniformity metadata to decide 2403 // whether the branch is uniform when selecting the instruction. In 2404 // GlobalISel, we should push that decision into RegBankSelect. Assume for now 2405 // RegBankSelect knows what it's doing if the branch condition is scc, even 2406 // though it currently does not. 2407 if (!isVCC(CondReg, *MRI)) { 2408 if (MRI->getType(CondReg) != LLT::scalar(32)) 2409 return false; 2410 2411 CondPhysReg = AMDGPU::SCC; 2412 BrOpcode = AMDGPU::S_CBRANCH_SCC1; 2413 ConstrainRC = &AMDGPU::SReg_32RegClass; 2414 } else { 2415 // FIXME: Do we have to insert an and with exec here, like in SelectionDAG? 2416 // We sort of know that a VCC producer based on the register bank, that ands 2417 // inactive lanes with 0. What if there was a logical operation with vcc 2418 // producers in different blocks/with different exec masks? 2419 // FIXME: Should scc->vcc copies and with exec? 2420 CondPhysReg = TRI.getVCC(); 2421 BrOpcode = AMDGPU::S_CBRANCH_VCCNZ; 2422 ConstrainRC = TRI.getBoolRC(); 2423 } 2424 2425 if (!MRI->getRegClassOrNull(CondReg)) 2426 MRI->setRegClass(CondReg, ConstrainRC); 2427 2428 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg) 2429 .addReg(CondReg); 2430 BuildMI(*BB, &I, DL, TII.get(BrOpcode)) 2431 .addMBB(I.getOperand(1).getMBB()); 2432 2433 I.eraseFromParent(); 2434 return true; 2435 } 2436 2437 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE( 2438 MachineInstr &I) const { 2439 Register DstReg = I.getOperand(0).getReg(); 2440 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 2441 const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID; 2442 I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32)); 2443 if (IsVGPR) 2444 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 2445 2446 return RBI.constrainGenericRegister( 2447 DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI); 2448 } 2449 2450 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const { 2451 Register DstReg = I.getOperand(0).getReg(); 2452 Register SrcReg = I.getOperand(1).getReg(); 2453 Register MaskReg = I.getOperand(2).getReg(); 2454 LLT Ty = MRI->getType(DstReg); 2455 LLT MaskTy = MRI->getType(MaskReg); 2456 2457 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 2458 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI); 2459 const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI); 2460 const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID; 2461 if (DstRB != SrcRB) // Should only happen for hand written MIR. 2462 return false; 2463 2464 unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32; 2465 const TargetRegisterClass &RegRC 2466 = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass; 2467 2468 const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB, 2469 *MRI); 2470 const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB, 2471 *MRI); 2472 const TargetRegisterClass *MaskRC = 2473 TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB, *MRI); 2474 2475 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) || 2476 !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) || 2477 !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI)) 2478 return false; 2479 2480 MachineBasicBlock *BB = I.getParent(); 2481 const DebugLoc &DL = I.getDebugLoc(); 2482 if (Ty.getSizeInBits() == 32) { 2483 assert(MaskTy.getSizeInBits() == 32 && 2484 "ptrmask should have been narrowed during legalize"); 2485 2486 BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg) 2487 .addReg(SrcReg) 2488 .addReg(MaskReg); 2489 I.eraseFromParent(); 2490 return true; 2491 } 2492 2493 Register HiReg = MRI->createVirtualRegister(&RegRC); 2494 Register LoReg = MRI->createVirtualRegister(&RegRC); 2495 2496 // Extract the subregisters from the source pointer. 2497 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg) 2498 .addReg(SrcReg, 0, AMDGPU::sub0); 2499 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg) 2500 .addReg(SrcReg, 0, AMDGPU::sub1); 2501 2502 Register MaskedLo, MaskedHi; 2503 2504 // Try to avoid emitting a bit operation when we only need to touch half of 2505 // the 64-bit pointer. 2506 APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zextOrSelf(64); 2507 2508 const APInt MaskHi32 = APInt::getHighBitsSet(64, 32); 2509 const APInt MaskLo32 = APInt::getLowBitsSet(64, 32); 2510 if ((MaskOnes & MaskLo32) == MaskLo32) { 2511 // If all the bits in the low half are 1, we only need a copy for it. 2512 MaskedLo = LoReg; 2513 } else { 2514 // Extract the mask subregister and apply the and. 2515 Register MaskLo = MRI->createVirtualRegister(&RegRC); 2516 MaskedLo = MRI->createVirtualRegister(&RegRC); 2517 2518 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo) 2519 .addReg(MaskReg, 0, AMDGPU::sub0); 2520 BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo) 2521 .addReg(LoReg) 2522 .addReg(MaskLo); 2523 } 2524 2525 if ((MaskOnes & MaskHi32) == MaskHi32) { 2526 // If all the bits in the high half are 1, we only need a copy for it. 2527 MaskedHi = HiReg; 2528 } else { 2529 Register MaskHi = MRI->createVirtualRegister(&RegRC); 2530 MaskedHi = MRI->createVirtualRegister(&RegRC); 2531 2532 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi) 2533 .addReg(MaskReg, 0, AMDGPU::sub1); 2534 BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi) 2535 .addReg(HiReg) 2536 .addReg(MaskHi); 2537 } 2538 2539 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) 2540 .addReg(MaskedLo) 2541 .addImm(AMDGPU::sub0) 2542 .addReg(MaskedHi) 2543 .addImm(AMDGPU::sub1); 2544 I.eraseFromParent(); 2545 return true; 2546 } 2547 2548 /// Return the register to use for the index value, and the subregister to use 2549 /// for the indirectly accessed register. 2550 static std::pair<Register, unsigned> 2551 computeIndirectRegIndex(MachineRegisterInfo &MRI, 2552 const SIRegisterInfo &TRI, 2553 const TargetRegisterClass *SuperRC, 2554 Register IdxReg, 2555 unsigned EltSize) { 2556 Register IdxBaseReg; 2557 int Offset; 2558 2559 std::tie(IdxBaseReg, Offset) = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg); 2560 if (IdxBaseReg == AMDGPU::NoRegister) { 2561 // This will happen if the index is a known constant. This should ordinarily 2562 // be legalized out, but handle it as a register just in case. 2563 assert(Offset == 0); 2564 IdxBaseReg = IdxReg; 2565 } 2566 2567 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize); 2568 2569 // Skip out of bounds offsets, or else we would end up using an undefined 2570 // register. 2571 if (static_cast<unsigned>(Offset) >= SubRegs.size()) 2572 return std::make_pair(IdxReg, SubRegs[0]); 2573 return std::make_pair(IdxBaseReg, SubRegs[Offset]); 2574 } 2575 2576 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT( 2577 MachineInstr &MI) const { 2578 Register DstReg = MI.getOperand(0).getReg(); 2579 Register SrcReg = MI.getOperand(1).getReg(); 2580 Register IdxReg = MI.getOperand(2).getReg(); 2581 2582 LLT DstTy = MRI->getType(DstReg); 2583 LLT SrcTy = MRI->getType(SrcReg); 2584 2585 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 2586 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI); 2587 const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI); 2588 2589 // The index must be scalar. If it wasn't RegBankSelect should have moved this 2590 // into a waterfall loop. 2591 if (IdxRB->getID() != AMDGPU::SGPRRegBankID) 2592 return false; 2593 2594 const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB, 2595 *MRI); 2596 const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(DstTy, *DstRB, 2597 *MRI); 2598 if (!SrcRC || !DstRC) 2599 return false; 2600 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) || 2601 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) || 2602 !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI)) 2603 return false; 2604 2605 MachineBasicBlock *BB = MI.getParent(); 2606 const DebugLoc &DL = MI.getDebugLoc(); 2607 const bool Is64 = DstTy.getSizeInBits() == 64; 2608 2609 unsigned SubReg; 2610 std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg, 2611 DstTy.getSizeInBits() / 8); 2612 2613 if (SrcRB->getID() == AMDGPU::SGPRRegBankID) { 2614 if (DstTy.getSizeInBits() != 32 && !Is64) 2615 return false; 2616 2617 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 2618 .addReg(IdxReg); 2619 2620 unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32; 2621 BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg) 2622 .addReg(SrcReg, 0, SubReg) 2623 .addReg(SrcReg, RegState::Implicit); 2624 MI.eraseFromParent(); 2625 return true; 2626 } 2627 2628 if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32) 2629 return false; 2630 2631 if (!STI.useVGPRIndexMode()) { 2632 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 2633 .addReg(IdxReg); 2634 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg) 2635 .addReg(SrcReg, 0, SubReg) 2636 .addReg(SrcReg, RegState::Implicit); 2637 MI.eraseFromParent(); 2638 return true; 2639 } 2640 2641 const MCInstrDesc &GPRIDXDesc = 2642 TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true); 2643 BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg) 2644 .addReg(SrcReg) 2645 .addReg(IdxReg) 2646 .addImm(SubReg); 2647 2648 MI.eraseFromParent(); 2649 return true; 2650 } 2651 2652 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd 2653 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT( 2654 MachineInstr &MI) const { 2655 Register DstReg = MI.getOperand(0).getReg(); 2656 Register VecReg = MI.getOperand(1).getReg(); 2657 Register ValReg = MI.getOperand(2).getReg(); 2658 Register IdxReg = MI.getOperand(3).getReg(); 2659 2660 LLT VecTy = MRI->getType(DstReg); 2661 LLT ValTy = MRI->getType(ValReg); 2662 unsigned VecSize = VecTy.getSizeInBits(); 2663 unsigned ValSize = ValTy.getSizeInBits(); 2664 2665 const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI); 2666 const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI); 2667 const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI); 2668 2669 assert(VecTy.getElementType() == ValTy); 2670 2671 // The index must be scalar. If it wasn't RegBankSelect should have moved this 2672 // into a waterfall loop. 2673 if (IdxRB->getID() != AMDGPU::SGPRRegBankID) 2674 return false; 2675 2676 const TargetRegisterClass *VecRC = TRI.getRegClassForTypeOnBank(VecTy, *VecRB, 2677 *MRI); 2678 const TargetRegisterClass *ValRC = TRI.getRegClassForTypeOnBank(ValTy, *ValRB, 2679 *MRI); 2680 2681 if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) || 2682 !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) || 2683 !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) || 2684 !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI)) 2685 return false; 2686 2687 if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32) 2688 return false; 2689 2690 unsigned SubReg; 2691 std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg, 2692 ValSize / 8); 2693 2694 const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID && 2695 STI.useVGPRIndexMode(); 2696 2697 MachineBasicBlock *BB = MI.getParent(); 2698 const DebugLoc &DL = MI.getDebugLoc(); 2699 2700 if (!IndexMode) { 2701 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 2702 .addReg(IdxReg); 2703 2704 const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo( 2705 VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID); 2706 BuildMI(*BB, MI, DL, RegWriteOp, DstReg) 2707 .addReg(VecReg) 2708 .addReg(ValReg) 2709 .addImm(SubReg); 2710 MI.eraseFromParent(); 2711 return true; 2712 } 2713 2714 const MCInstrDesc &GPRIDXDesc = 2715 TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false); 2716 BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg) 2717 .addReg(VecReg) 2718 .addReg(ValReg) 2719 .addReg(IdxReg) 2720 .addImm(SubReg); 2721 2722 MI.eraseFromParent(); 2723 return true; 2724 } 2725 2726 static bool isZeroOrUndef(int X) { 2727 return X == 0 || X == -1; 2728 } 2729 2730 static bool isOneOrUndef(int X) { 2731 return X == 1 || X == -1; 2732 } 2733 2734 static bool isZeroOrOneOrUndef(int X) { 2735 return X == 0 || X == 1 || X == -1; 2736 } 2737 2738 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single 2739 // 32-bit register. 2740 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1, 2741 ArrayRef<int> Mask) { 2742 NewMask[0] = Mask[0]; 2743 NewMask[1] = Mask[1]; 2744 if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1])) 2745 return Src0; 2746 2747 assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1); 2748 assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1); 2749 2750 // Shift the mask inputs to be 0/1; 2751 NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2; 2752 NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2; 2753 return Src1; 2754 } 2755 2756 // This is only legal with VOP3P instructions as an aid to op_sel matching. 2757 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR( 2758 MachineInstr &MI) const { 2759 Register DstReg = MI.getOperand(0).getReg(); 2760 Register Src0Reg = MI.getOperand(1).getReg(); 2761 Register Src1Reg = MI.getOperand(2).getReg(); 2762 ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask(); 2763 2764 const LLT V2S16 = LLT::vector(2, 16); 2765 if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16) 2766 return false; 2767 2768 if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask)) 2769 return false; 2770 2771 assert(ShufMask.size() == 2); 2772 assert(STI.hasSDWA() && "no target has VOP3P but not SDWA"); 2773 2774 MachineBasicBlock *MBB = MI.getParent(); 2775 const DebugLoc &DL = MI.getDebugLoc(); 2776 2777 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 2778 const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID; 2779 const TargetRegisterClass &RC = IsVALU ? 2780 AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass; 2781 2782 // Handle the degenerate case which should have folded out. 2783 if (ShufMask[0] == -1 && ShufMask[1] == -1) { 2784 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg); 2785 2786 MI.eraseFromParent(); 2787 return RBI.constrainGenericRegister(DstReg, RC, *MRI); 2788 } 2789 2790 // A legal VOP3P mask only reads one of the sources. 2791 int Mask[2]; 2792 Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask); 2793 2794 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) || 2795 !RBI.constrainGenericRegister(SrcVec, RC, *MRI)) 2796 return false; 2797 2798 // TODO: This also should have been folded out 2799 if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) { 2800 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg) 2801 .addReg(SrcVec); 2802 2803 MI.eraseFromParent(); 2804 return true; 2805 } 2806 2807 if (Mask[0] == 1 && Mask[1] == -1) { 2808 if (IsVALU) { 2809 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg) 2810 .addImm(16) 2811 .addReg(SrcVec); 2812 } else { 2813 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg) 2814 .addReg(SrcVec) 2815 .addImm(16); 2816 } 2817 } else if (Mask[0] == -1 && Mask[1] == 0) { 2818 if (IsVALU) { 2819 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg) 2820 .addImm(16) 2821 .addReg(SrcVec); 2822 } else { 2823 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg) 2824 .addReg(SrcVec) 2825 .addImm(16); 2826 } 2827 } else if (Mask[0] == 0 && Mask[1] == 0) { 2828 if (IsVALU) { 2829 // Write low half of the register into the high half. 2830 MachineInstr *MovSDWA = 2831 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg) 2832 .addImm(0) // $src0_modifiers 2833 .addReg(SrcVec) // $src0 2834 .addImm(0) // $clamp 2835 .addImm(AMDGPU::SDWA::WORD_1) // $dst_sel 2836 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused 2837 .addImm(AMDGPU::SDWA::WORD_0) // $src0_sel 2838 .addReg(SrcVec, RegState::Implicit); 2839 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1); 2840 } else { 2841 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg) 2842 .addReg(SrcVec) 2843 .addReg(SrcVec); 2844 } 2845 } else if (Mask[0] == 1 && Mask[1] == 1) { 2846 if (IsVALU) { 2847 // Write high half of the register into the low half. 2848 MachineInstr *MovSDWA = 2849 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg) 2850 .addImm(0) // $src0_modifiers 2851 .addReg(SrcVec) // $src0 2852 .addImm(0) // $clamp 2853 .addImm(AMDGPU::SDWA::WORD_0) // $dst_sel 2854 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused 2855 .addImm(AMDGPU::SDWA::WORD_1) // $src0_sel 2856 .addReg(SrcVec, RegState::Implicit); 2857 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1); 2858 } else { 2859 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg) 2860 .addReg(SrcVec) 2861 .addReg(SrcVec); 2862 } 2863 } else if (Mask[0] == 1 && Mask[1] == 0) { 2864 if (IsVALU) { 2865 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32_e64), DstReg) 2866 .addReg(SrcVec) 2867 .addReg(SrcVec) 2868 .addImm(16); 2869 } else { 2870 Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2871 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg) 2872 .addReg(SrcVec) 2873 .addImm(16); 2874 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg) 2875 .addReg(TmpReg) 2876 .addReg(SrcVec); 2877 } 2878 } else 2879 llvm_unreachable("all shuffle masks should be handled"); 2880 2881 MI.eraseFromParent(); 2882 return true; 2883 } 2884 2885 bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD( 2886 MachineInstr &MI) const { 2887 if (STI.hasGFX90AInsts()) 2888 return selectImpl(MI, *CoverageInfo); 2889 2890 MachineBasicBlock *MBB = MI.getParent(); 2891 const DebugLoc &DL = MI.getDebugLoc(); 2892 2893 if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) { 2894 Function &F = MBB->getParent()->getFunction(); 2895 DiagnosticInfoUnsupported 2896 NoFpRet(F, "return versions of fp atomics not supported", 2897 MI.getDebugLoc(), DS_Error); 2898 F.getContext().diagnose(NoFpRet); 2899 return false; 2900 } 2901 2902 // FIXME: This is only needed because tablegen requires number of dst operands 2903 // in match and replace pattern to be the same. Otherwise patterns can be 2904 // exported from SDag path. 2905 MachineOperand &VDataIn = MI.getOperand(1); 2906 MachineOperand &VIndex = MI.getOperand(3); 2907 MachineOperand &VOffset = MI.getOperand(4); 2908 MachineOperand &SOffset = MI.getOperand(5); 2909 int16_t Offset = MI.getOperand(6).getImm(); 2910 2911 bool HasVOffset = !isOperandImmEqual(VOffset, 0, *MRI); 2912 bool HasVIndex = !isOperandImmEqual(VIndex, 0, *MRI); 2913 2914 unsigned Opcode; 2915 if (HasVOffset) { 2916 Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN 2917 : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN; 2918 } else { 2919 Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN 2920 : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET; 2921 } 2922 2923 if (MRI->getType(VDataIn.getReg()).isVector()) { 2924 switch (Opcode) { 2925 case AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN: 2926 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN; 2927 break; 2928 case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN: 2929 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFEN; 2930 break; 2931 case AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN: 2932 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_IDXEN; 2933 break; 2934 case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET: 2935 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFSET; 2936 break; 2937 } 2938 } 2939 2940 auto I = BuildMI(*MBB, MI, DL, TII.get(Opcode)); 2941 I.add(VDataIn); 2942 2943 if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN || 2944 Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) { 2945 Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class()); 2946 BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg) 2947 .addReg(VIndex.getReg()) 2948 .addImm(AMDGPU::sub0) 2949 .addReg(VOffset.getReg()) 2950 .addImm(AMDGPU::sub1); 2951 2952 I.addReg(IdxReg); 2953 } else if (HasVIndex) { 2954 I.add(VIndex); 2955 } else if (HasVOffset) { 2956 I.add(VOffset); 2957 } 2958 2959 I.add(MI.getOperand(2)); // rsrc 2960 I.add(SOffset); 2961 I.addImm(Offset); 2962 I.addImm(MI.getOperand(7).getImm()); // cpol 2963 I.cloneMemRefs(MI); 2964 2965 MI.eraseFromParent(); 2966 2967 return true; 2968 } 2969 2970 bool AMDGPUInstructionSelector::selectGlobalAtomicFadd( 2971 MachineInstr &MI, MachineOperand &AddrOp, MachineOperand &DataOp) const { 2972 2973 if (STI.hasGFX90AInsts()) { 2974 // gfx90a adds return versions of the global atomic fadd instructions so no 2975 // special handling is required. 2976 return selectImpl(MI, *CoverageInfo); 2977 } 2978 2979 MachineBasicBlock *MBB = MI.getParent(); 2980 const DebugLoc &DL = MI.getDebugLoc(); 2981 2982 if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) { 2983 Function &F = MBB->getParent()->getFunction(); 2984 DiagnosticInfoUnsupported 2985 NoFpRet(F, "return versions of fp atomics not supported", 2986 MI.getDebugLoc(), DS_Error); 2987 F.getContext().diagnose(NoFpRet); 2988 return false; 2989 } 2990 2991 // FIXME: This is only needed because tablegen requires number of dst operands 2992 // in match and replace pattern to be the same. Otherwise patterns can be 2993 // exported from SDag path. 2994 auto Addr = selectFlatOffsetImpl<true>(AddrOp); 2995 2996 Register Data = DataOp.getReg(); 2997 const unsigned Opc = MRI->getType(Data).isVector() ? 2998 AMDGPU::GLOBAL_ATOMIC_PK_ADD_F16 : AMDGPU::GLOBAL_ATOMIC_ADD_F32; 2999 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc)) 3000 .addReg(Addr.first) 3001 .addReg(Data) 3002 .addImm(Addr.second) 3003 .addImm(0) // cpol 3004 .cloneMemRefs(MI); 3005 3006 MI.eraseFromParent(); 3007 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 3008 } 3009 3010 bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{ 3011 MI.setDesc(TII.get(MI.getOperand(1).getImm())); 3012 MI.RemoveOperand(1); 3013 MI.addImplicitDefUseOperands(*MI.getParent()->getParent()); 3014 return true; 3015 } 3016 3017 bool AMDGPUInstructionSelector::select(MachineInstr &I) { 3018 if (I.isPHI()) 3019 return selectPHI(I); 3020 3021 if (!I.isPreISelOpcode()) { 3022 if (I.isCopy()) 3023 return selectCOPY(I); 3024 return true; 3025 } 3026 3027 switch (I.getOpcode()) { 3028 case TargetOpcode::G_AND: 3029 case TargetOpcode::G_OR: 3030 case TargetOpcode::G_XOR: 3031 if (selectImpl(I, *CoverageInfo)) 3032 return true; 3033 return selectG_AND_OR_XOR(I); 3034 case TargetOpcode::G_ADD: 3035 case TargetOpcode::G_SUB: 3036 if (selectImpl(I, *CoverageInfo)) 3037 return true; 3038 return selectG_ADD_SUB(I); 3039 case TargetOpcode::G_UADDO: 3040 case TargetOpcode::G_USUBO: 3041 case TargetOpcode::G_UADDE: 3042 case TargetOpcode::G_USUBE: 3043 return selectG_UADDO_USUBO_UADDE_USUBE(I); 3044 case TargetOpcode::G_INTTOPTR: 3045 case TargetOpcode::G_BITCAST: 3046 case TargetOpcode::G_PTRTOINT: 3047 return selectCOPY(I); 3048 case TargetOpcode::G_CONSTANT: 3049 case TargetOpcode::G_FCONSTANT: 3050 return selectG_CONSTANT(I); 3051 case TargetOpcode::G_FNEG: 3052 if (selectImpl(I, *CoverageInfo)) 3053 return true; 3054 return selectG_FNEG(I); 3055 case TargetOpcode::G_FABS: 3056 if (selectImpl(I, *CoverageInfo)) 3057 return true; 3058 return selectG_FABS(I); 3059 case TargetOpcode::G_EXTRACT: 3060 return selectG_EXTRACT(I); 3061 case TargetOpcode::G_MERGE_VALUES: 3062 case TargetOpcode::G_BUILD_VECTOR: 3063 case TargetOpcode::G_CONCAT_VECTORS: 3064 return selectG_MERGE_VALUES(I); 3065 case TargetOpcode::G_UNMERGE_VALUES: 3066 return selectG_UNMERGE_VALUES(I); 3067 case TargetOpcode::G_BUILD_VECTOR_TRUNC: 3068 return selectG_BUILD_VECTOR_TRUNC(I); 3069 case TargetOpcode::G_PTR_ADD: 3070 return selectG_PTR_ADD(I); 3071 case TargetOpcode::G_IMPLICIT_DEF: 3072 return selectG_IMPLICIT_DEF(I); 3073 case TargetOpcode::G_FREEZE: 3074 return selectCOPY(I); 3075 case TargetOpcode::G_INSERT: 3076 return selectG_INSERT(I); 3077 case TargetOpcode::G_INTRINSIC: 3078 return selectG_INTRINSIC(I); 3079 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: 3080 return selectG_INTRINSIC_W_SIDE_EFFECTS(I); 3081 case TargetOpcode::G_ICMP: 3082 if (selectG_ICMP(I)) 3083 return true; 3084 return selectImpl(I, *CoverageInfo); 3085 case TargetOpcode::G_LOAD: 3086 case TargetOpcode::G_STORE: 3087 case TargetOpcode::G_ATOMIC_CMPXCHG: 3088 case TargetOpcode::G_ATOMICRMW_XCHG: 3089 case TargetOpcode::G_ATOMICRMW_ADD: 3090 case TargetOpcode::G_ATOMICRMW_SUB: 3091 case TargetOpcode::G_ATOMICRMW_AND: 3092 case TargetOpcode::G_ATOMICRMW_OR: 3093 case TargetOpcode::G_ATOMICRMW_XOR: 3094 case TargetOpcode::G_ATOMICRMW_MIN: 3095 case TargetOpcode::G_ATOMICRMW_MAX: 3096 case TargetOpcode::G_ATOMICRMW_UMIN: 3097 case TargetOpcode::G_ATOMICRMW_UMAX: 3098 case TargetOpcode::G_ATOMICRMW_FADD: 3099 case AMDGPU::G_AMDGPU_ATOMIC_INC: 3100 case AMDGPU::G_AMDGPU_ATOMIC_DEC: 3101 case AMDGPU::G_AMDGPU_ATOMIC_FMIN: 3102 case AMDGPU::G_AMDGPU_ATOMIC_FMAX: 3103 return selectG_LOAD_STORE_ATOMICRMW(I); 3104 case AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG: 3105 return selectG_AMDGPU_ATOMIC_CMPXCHG(I); 3106 case TargetOpcode::G_SELECT: 3107 return selectG_SELECT(I); 3108 case TargetOpcode::G_TRUNC: 3109 return selectG_TRUNC(I); 3110 case TargetOpcode::G_SEXT: 3111 case TargetOpcode::G_ZEXT: 3112 case TargetOpcode::G_ANYEXT: 3113 case TargetOpcode::G_SEXT_INREG: 3114 if (selectImpl(I, *CoverageInfo)) 3115 return true; 3116 return selectG_SZA_EXT(I); 3117 case TargetOpcode::G_BRCOND: 3118 return selectG_BRCOND(I); 3119 case TargetOpcode::G_GLOBAL_VALUE: 3120 return selectG_GLOBAL_VALUE(I); 3121 case TargetOpcode::G_PTRMASK: 3122 return selectG_PTRMASK(I); 3123 case TargetOpcode::G_EXTRACT_VECTOR_ELT: 3124 return selectG_EXTRACT_VECTOR_ELT(I); 3125 case TargetOpcode::G_INSERT_VECTOR_ELT: 3126 return selectG_INSERT_VECTOR_ELT(I); 3127 case TargetOpcode::G_SHUFFLE_VECTOR: 3128 return selectG_SHUFFLE_VECTOR(I); 3129 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD: 3130 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: { 3131 const AMDGPU::ImageDimIntrinsicInfo *Intr 3132 = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID()); 3133 assert(Intr && "not an image intrinsic with image pseudo"); 3134 return selectImageIntrinsic(I, Intr); 3135 } 3136 case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY: 3137 return selectBVHIntrinsic(I); 3138 case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD: 3139 return selectAMDGPU_BUFFER_ATOMIC_FADD(I); 3140 default: 3141 return selectImpl(I, *CoverageInfo); 3142 } 3143 return false; 3144 } 3145 3146 InstructionSelector::ComplexRendererFns 3147 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const { 3148 return {{ 3149 [=](MachineInstrBuilder &MIB) { MIB.add(Root); } 3150 }}; 3151 3152 } 3153 3154 std::pair<Register, unsigned> 3155 AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root, 3156 bool AllowAbs) const { 3157 Register Src = Root.getReg(); 3158 Register OrigSrc = Src; 3159 unsigned Mods = 0; 3160 MachineInstr *MI = getDefIgnoringCopies(Src, *MRI); 3161 3162 if (MI && MI->getOpcode() == AMDGPU::G_FNEG) { 3163 Src = MI->getOperand(1).getReg(); 3164 Mods |= SISrcMods::NEG; 3165 MI = getDefIgnoringCopies(Src, *MRI); 3166 } 3167 3168 if (AllowAbs && MI && MI->getOpcode() == AMDGPU::G_FABS) { 3169 Src = MI->getOperand(1).getReg(); 3170 Mods |= SISrcMods::ABS; 3171 } 3172 3173 if (Mods != 0 && 3174 RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) { 3175 MachineInstr *UseMI = Root.getParent(); 3176 3177 // If we looked through copies to find source modifiers on an SGPR operand, 3178 // we now have an SGPR register source. To avoid potentially violating the 3179 // constant bus restriction, we need to insert a copy to a VGPR. 3180 Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc); 3181 BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(), 3182 TII.get(AMDGPU::COPY), VGPRSrc) 3183 .addReg(Src); 3184 Src = VGPRSrc; 3185 } 3186 3187 return std::make_pair(Src, Mods); 3188 } 3189 3190 /// 3191 /// This will select either an SGPR or VGPR operand and will save us from 3192 /// having to write an extra tablegen pattern. 3193 InstructionSelector::ComplexRendererFns 3194 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const { 3195 return {{ 3196 [=](MachineInstrBuilder &MIB) { MIB.add(Root); } 3197 }}; 3198 } 3199 3200 InstructionSelector::ComplexRendererFns 3201 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const { 3202 Register Src; 3203 unsigned Mods; 3204 std::tie(Src, Mods) = selectVOP3ModsImpl(Root); 3205 3206 return {{ 3207 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3208 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods 3209 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp 3210 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod 3211 }}; 3212 } 3213 3214 InstructionSelector::ComplexRendererFns 3215 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const { 3216 Register Src; 3217 unsigned Mods; 3218 std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false); 3219 3220 return {{ 3221 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3222 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods 3223 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp 3224 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod 3225 }}; 3226 } 3227 3228 InstructionSelector::ComplexRendererFns 3229 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const { 3230 return {{ 3231 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }, 3232 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp 3233 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod 3234 }}; 3235 } 3236 3237 InstructionSelector::ComplexRendererFns 3238 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const { 3239 Register Src; 3240 unsigned Mods; 3241 std::tie(Src, Mods) = selectVOP3ModsImpl(Root); 3242 3243 return {{ 3244 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3245 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods 3246 }}; 3247 } 3248 3249 InstructionSelector::ComplexRendererFns 3250 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const { 3251 Register Src; 3252 unsigned Mods; 3253 std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false); 3254 3255 return {{ 3256 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3257 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods 3258 }}; 3259 } 3260 3261 InstructionSelector::ComplexRendererFns 3262 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const { 3263 Register Reg = Root.getReg(); 3264 const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI); 3265 if (Def && (Def->getOpcode() == AMDGPU::G_FNEG || 3266 Def->getOpcode() == AMDGPU::G_FABS)) 3267 return {}; 3268 return {{ 3269 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); }, 3270 }}; 3271 } 3272 3273 std::pair<Register, unsigned> 3274 AMDGPUInstructionSelector::selectVOP3PModsImpl( 3275 Register Src, const MachineRegisterInfo &MRI) const { 3276 unsigned Mods = 0; 3277 MachineInstr *MI = MRI.getVRegDef(Src); 3278 3279 if (MI && MI->getOpcode() == AMDGPU::G_FNEG && 3280 // It's possible to see an f32 fneg here, but unlikely. 3281 // TODO: Treat f32 fneg as only high bit. 3282 MRI.getType(Src) == LLT::vector(2, 16)) { 3283 Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI); 3284 Src = MI->getOperand(1).getReg(); 3285 MI = MRI.getVRegDef(Src); 3286 } 3287 3288 // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector. 3289 3290 // Packed instructions do not have abs modifiers. 3291 Mods |= SISrcMods::OP_SEL_1; 3292 3293 return std::make_pair(Src, Mods); 3294 } 3295 3296 InstructionSelector::ComplexRendererFns 3297 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const { 3298 MachineRegisterInfo &MRI 3299 = Root.getParent()->getParent()->getParent()->getRegInfo(); 3300 3301 Register Src; 3302 unsigned Mods; 3303 std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI); 3304 3305 return {{ 3306 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3307 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods 3308 }}; 3309 } 3310 3311 InstructionSelector::ComplexRendererFns 3312 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const { 3313 Register Src; 3314 unsigned Mods; 3315 std::tie(Src, Mods) = selectVOP3ModsImpl(Root); 3316 if (!isKnownNeverNaN(Src, *MRI)) 3317 return None; 3318 3319 return {{ 3320 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3321 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods 3322 }}; 3323 } 3324 3325 InstructionSelector::ComplexRendererFns 3326 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const { 3327 // FIXME: Handle op_sel 3328 return {{ 3329 [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); }, 3330 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods 3331 }}; 3332 } 3333 3334 InstructionSelector::ComplexRendererFns 3335 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const { 3336 SmallVector<GEPInfo, 4> AddrInfo; 3337 getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo); 3338 3339 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1) 3340 return None; 3341 3342 const GEPInfo &GEPInfo = AddrInfo[0]; 3343 Optional<int64_t> EncodedImm = 3344 AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false); 3345 if (!EncodedImm) 3346 return None; 3347 3348 unsigned PtrReg = GEPInfo.SgprParts[0]; 3349 return {{ 3350 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); }, 3351 [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } 3352 }}; 3353 } 3354 3355 InstructionSelector::ComplexRendererFns 3356 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const { 3357 SmallVector<GEPInfo, 4> AddrInfo; 3358 getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo); 3359 3360 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1) 3361 return None; 3362 3363 const GEPInfo &GEPInfo = AddrInfo[0]; 3364 Register PtrReg = GEPInfo.SgprParts[0]; 3365 Optional<int64_t> EncodedImm = 3366 AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm); 3367 if (!EncodedImm) 3368 return None; 3369 3370 return {{ 3371 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); }, 3372 [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } 3373 }}; 3374 } 3375 3376 InstructionSelector::ComplexRendererFns 3377 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const { 3378 MachineInstr *MI = Root.getParent(); 3379 MachineBasicBlock *MBB = MI->getParent(); 3380 3381 SmallVector<GEPInfo, 4> AddrInfo; 3382 getAddrModeInfo(*MI, *MRI, AddrInfo); 3383 3384 // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits, 3385 // then we can select all ptr + 32-bit offsets not just immediate offsets. 3386 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1) 3387 return None; 3388 3389 const GEPInfo &GEPInfo = AddrInfo[0]; 3390 // SGPR offset is unsigned. 3391 if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm)) 3392 return None; 3393 3394 // If we make it this far we have a load with an 32-bit immediate offset. 3395 // It is OK to select this using a sgpr offset, because we have already 3396 // failed trying to select this load into one of the _IMM variants since 3397 // the _IMM Patterns are considered before the _SGPR patterns. 3398 Register PtrReg = GEPInfo.SgprParts[0]; 3399 Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 3400 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg) 3401 .addImm(GEPInfo.Imm); 3402 return {{ 3403 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); }, 3404 [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); } 3405 }}; 3406 } 3407 3408 template <bool Signed> 3409 std::pair<Register, int> 3410 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root) const { 3411 MachineInstr *MI = Root.getParent(); 3412 3413 auto Default = std::make_pair(Root.getReg(), 0); 3414 3415 if (!STI.hasFlatInstOffsets()) 3416 return Default; 3417 3418 Register PtrBase; 3419 int64_t ConstOffset; 3420 std::tie(PtrBase, ConstOffset) = 3421 getPtrBaseWithConstantOffset(Root.getReg(), *MRI); 3422 if (ConstOffset == 0) 3423 return Default; 3424 3425 unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace(); 3426 if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, Signed)) 3427 return Default; 3428 3429 return std::make_pair(PtrBase, ConstOffset); 3430 } 3431 3432 InstructionSelector::ComplexRendererFns 3433 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const { 3434 auto PtrWithOffset = selectFlatOffsetImpl<false>(Root); 3435 3436 return {{ 3437 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); }, 3438 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); }, 3439 }}; 3440 } 3441 3442 InstructionSelector::ComplexRendererFns 3443 AMDGPUInstructionSelector::selectFlatOffsetSigned(MachineOperand &Root) const { 3444 auto PtrWithOffset = selectFlatOffsetImpl<true>(Root); 3445 3446 return {{ 3447 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); }, 3448 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); }, 3449 }}; 3450 } 3451 3452 /// Match a zero extend from a 32-bit value to 64-bits. 3453 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) { 3454 Register ZExtSrc; 3455 if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc)))) 3456 return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register(); 3457 3458 // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0) 3459 const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI); 3460 if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES) 3461 return false; 3462 3463 if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) { 3464 return Def->getOperand(1).getReg(); 3465 } 3466 3467 return Register(); 3468 } 3469 3470 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset) 3471 InstructionSelector::ComplexRendererFns 3472 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const { 3473 Register Addr = Root.getReg(); 3474 Register PtrBase; 3475 int64_t ConstOffset; 3476 int64_t ImmOffset = 0; 3477 3478 // Match the immediate offset first, which canonically is moved as low as 3479 // possible. 3480 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI); 3481 3482 if (ConstOffset != 0) { 3483 if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, true)) { 3484 Addr = PtrBase; 3485 ImmOffset = ConstOffset; 3486 } else if (ConstOffset > 0) { 3487 auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI); 3488 if (!PtrBaseDef) 3489 return None; 3490 3491 if (isSGPR(PtrBaseDef->Reg)) { 3492 // Offset is too large. 3493 // 3494 // saddr + large_offset -> saddr + (voffset = large_offset & ~MaxOffset) 3495 // + (large_offset & MaxOffset); 3496 int64_t SplitImmOffset, RemainderOffset; 3497 std::tie(SplitImmOffset, RemainderOffset) 3498 = TII.splitFlatOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, true); 3499 3500 if (isUInt<32>(RemainderOffset)) { 3501 MachineInstr *MI = Root.getParent(); 3502 MachineBasicBlock *MBB = MI->getParent(); 3503 Register HighBits 3504 = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3505 3506 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), 3507 HighBits) 3508 .addImm(RemainderOffset); 3509 3510 return {{ 3511 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); }, // saddr 3512 [=](MachineInstrBuilder &MIB) { MIB.addReg(HighBits); }, // voffset 3513 [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); }, 3514 }}; 3515 } 3516 } 3517 } 3518 } 3519 3520 auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI); 3521 if (!AddrDef) 3522 return None; 3523 3524 // Match the variable offset. 3525 if (AddrDef->MI->getOpcode() != AMDGPU::G_PTR_ADD) { 3526 // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and 3527 // drop this. 3528 if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF || 3529 AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT) 3530 return None; 3531 3532 // It's cheaper to materialize a single 32-bit zero for vaddr than the two 3533 // moves required to copy a 64-bit SGPR to VGPR. 3534 const Register SAddr = AddrDef->Reg; 3535 if (!isSGPR(SAddr)) 3536 return None; 3537 3538 MachineInstr *MI = Root.getParent(); 3539 MachineBasicBlock *MBB = MI->getParent(); 3540 Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3541 3542 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), 3543 VOffset) 3544 .addImm(0); 3545 3546 return {{ 3547 [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr 3548 [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); }, // voffset 3549 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset 3550 }}; 3551 } 3552 3553 // Look through the SGPR->VGPR copy. 3554 Register SAddr = 3555 getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI); 3556 if (!SAddr || !isSGPR(SAddr)) 3557 return None; 3558 3559 Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg(); 3560 3561 // It's possible voffset is an SGPR here, but the copy to VGPR will be 3562 // inserted later. 3563 Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset); 3564 if (!VOffset) 3565 return None; 3566 3567 return {{[=](MachineInstrBuilder &MIB) { // saddr 3568 MIB.addReg(SAddr); 3569 }, 3570 [=](MachineInstrBuilder &MIB) { // voffset 3571 MIB.addReg(VOffset); 3572 }, 3573 [=](MachineInstrBuilder &MIB) { // offset 3574 MIB.addImm(ImmOffset); 3575 }}}; 3576 } 3577 3578 InstructionSelector::ComplexRendererFns 3579 AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const { 3580 Register Addr = Root.getReg(); 3581 Register PtrBase; 3582 int64_t ConstOffset; 3583 int64_t ImmOffset = 0; 3584 3585 // Match the immediate offset first, which canonically is moved as low as 3586 // possible. 3587 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI); 3588 3589 if (ConstOffset != 0 && 3590 TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS, true)) { 3591 Addr = PtrBase; 3592 ImmOffset = ConstOffset; 3593 } 3594 3595 auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI); 3596 if (!AddrDef) 3597 return None; 3598 3599 if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) { 3600 int FI = AddrDef->MI->getOperand(1).getIndex(); 3601 return {{ 3602 [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr 3603 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset 3604 }}; 3605 } 3606 3607 Register SAddr = AddrDef->Reg; 3608 3609 if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) { 3610 Register LHS = AddrDef->MI->getOperand(1).getReg(); 3611 Register RHS = AddrDef->MI->getOperand(2).getReg(); 3612 auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI); 3613 auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI); 3614 3615 if (LHSDef && RHSDef && 3616 LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX && 3617 isSGPR(RHSDef->Reg)) { 3618 int FI = LHSDef->MI->getOperand(1).getIndex(); 3619 MachineInstr &I = *Root.getParent(); 3620 MachineBasicBlock *BB = I.getParent(); 3621 const DebugLoc &DL = I.getDebugLoc(); 3622 SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 3623 3624 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), SAddr) 3625 .addFrameIndex(FI) 3626 .addReg(RHSDef->Reg); 3627 } 3628 } 3629 3630 if (!isSGPR(SAddr)) 3631 return None; 3632 3633 return {{ 3634 [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr 3635 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset 3636 }}; 3637 } 3638 3639 static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) { 3640 auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>(); 3641 return PSV && PSV->isStack(); 3642 } 3643 3644 InstructionSelector::ComplexRendererFns 3645 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const { 3646 MachineInstr *MI = Root.getParent(); 3647 MachineBasicBlock *MBB = MI->getParent(); 3648 MachineFunction *MF = MBB->getParent(); 3649 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 3650 3651 int64_t Offset = 0; 3652 if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) && 3653 Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) { 3654 Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3655 3656 // TODO: Should this be inside the render function? The iterator seems to 3657 // move. 3658 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), 3659 HighBits) 3660 .addImm(Offset & ~4095); 3661 3662 return {{[=](MachineInstrBuilder &MIB) { // rsrc 3663 MIB.addReg(Info->getScratchRSrcReg()); 3664 }, 3665 [=](MachineInstrBuilder &MIB) { // vaddr 3666 MIB.addReg(HighBits); 3667 }, 3668 [=](MachineInstrBuilder &MIB) { // soffset 3669 // Use constant zero for soffset and rely on eliminateFrameIndex 3670 // to choose the appropriate frame register if need be. 3671 MIB.addImm(0); 3672 }, 3673 [=](MachineInstrBuilder &MIB) { // offset 3674 MIB.addImm(Offset & 4095); 3675 }}}; 3676 } 3677 3678 assert(Offset == 0 || Offset == -1); 3679 3680 // Try to fold a frame index directly into the MUBUF vaddr field, and any 3681 // offsets. 3682 Optional<int> FI; 3683 Register VAddr = Root.getReg(); 3684 if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) { 3685 Register PtrBase; 3686 int64_t ConstOffset; 3687 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(VAddr, *MRI); 3688 if (ConstOffset != 0) { 3689 if (SIInstrInfo::isLegalMUBUFImmOffset(ConstOffset) && 3690 (!STI.privateMemoryResourceIsRangeChecked() || 3691 KnownBits->signBitIsZero(PtrBase))) { 3692 const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase); 3693 if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX) 3694 FI = PtrBaseDef->getOperand(1).getIndex(); 3695 else 3696 VAddr = PtrBase; 3697 Offset = ConstOffset; 3698 } 3699 } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) { 3700 FI = RootDef->getOperand(1).getIndex(); 3701 } 3702 } 3703 3704 return {{[=](MachineInstrBuilder &MIB) { // rsrc 3705 MIB.addReg(Info->getScratchRSrcReg()); 3706 }, 3707 [=](MachineInstrBuilder &MIB) { // vaddr 3708 if (FI.hasValue()) 3709 MIB.addFrameIndex(FI.getValue()); 3710 else 3711 MIB.addReg(VAddr); 3712 }, 3713 [=](MachineInstrBuilder &MIB) { // soffset 3714 // Use constant zero for soffset and rely on eliminateFrameIndex 3715 // to choose the appropriate frame register if need be. 3716 MIB.addImm(0); 3717 }, 3718 [=](MachineInstrBuilder &MIB) { // offset 3719 MIB.addImm(Offset); 3720 }}}; 3721 } 3722 3723 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base, 3724 int64_t Offset) const { 3725 if (!isUInt<16>(Offset)) 3726 return false; 3727 3728 if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled()) 3729 return true; 3730 3731 // On Southern Islands instruction with a negative base value and an offset 3732 // don't seem to work. 3733 return KnownBits->signBitIsZero(Base); 3734 } 3735 3736 bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0, 3737 int64_t Offset1, 3738 unsigned Size) const { 3739 if (Offset0 % Size != 0 || Offset1 % Size != 0) 3740 return false; 3741 if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size)) 3742 return false; 3743 3744 if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled()) 3745 return true; 3746 3747 // On Southern Islands instruction with a negative base value and an offset 3748 // don't seem to work. 3749 return KnownBits->signBitIsZero(Base); 3750 } 3751 3752 InstructionSelector::ComplexRendererFns 3753 AMDGPUInstructionSelector::selectMUBUFScratchOffset( 3754 MachineOperand &Root) const { 3755 MachineInstr *MI = Root.getParent(); 3756 MachineBasicBlock *MBB = MI->getParent(); 3757 3758 int64_t Offset = 0; 3759 if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) || 3760 !SIInstrInfo::isLegalMUBUFImmOffset(Offset)) 3761 return {}; 3762 3763 const MachineFunction *MF = MBB->getParent(); 3764 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 3765 const MachineMemOperand *MMO = *MI->memoperands_begin(); 3766 const MachinePointerInfo &PtrInfo = MMO->getPointerInfo(); 3767 3768 return {{ 3769 [=](MachineInstrBuilder &MIB) { // rsrc 3770 MIB.addReg(Info->getScratchRSrcReg()); 3771 }, 3772 [=](MachineInstrBuilder &MIB) { // soffset 3773 if (isStackPtrRelative(PtrInfo)) 3774 MIB.addReg(Info->getStackPtrOffsetReg()); 3775 else 3776 MIB.addImm(0); 3777 }, 3778 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset 3779 }}; 3780 } 3781 3782 std::pair<Register, unsigned> 3783 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const { 3784 const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg()); 3785 if (!RootDef) 3786 return std::make_pair(Root.getReg(), 0); 3787 3788 int64_t ConstAddr = 0; 3789 3790 Register PtrBase; 3791 int64_t Offset; 3792 std::tie(PtrBase, Offset) = 3793 getPtrBaseWithConstantOffset(Root.getReg(), *MRI); 3794 3795 if (Offset) { 3796 if (isDSOffsetLegal(PtrBase, Offset)) { 3797 // (add n0, c0) 3798 return std::make_pair(PtrBase, Offset); 3799 } 3800 } else if (RootDef->getOpcode() == AMDGPU::G_SUB) { 3801 // TODO 3802 3803 3804 } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) { 3805 // TODO 3806 3807 } 3808 3809 return std::make_pair(Root.getReg(), 0); 3810 } 3811 3812 InstructionSelector::ComplexRendererFns 3813 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const { 3814 Register Reg; 3815 unsigned Offset; 3816 std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root); 3817 return {{ 3818 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); }, 3819 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } 3820 }}; 3821 } 3822 3823 InstructionSelector::ComplexRendererFns 3824 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const { 3825 return selectDSReadWrite2(Root, 4); 3826 } 3827 3828 InstructionSelector::ComplexRendererFns 3829 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const { 3830 return selectDSReadWrite2(Root, 8); 3831 } 3832 3833 InstructionSelector::ComplexRendererFns 3834 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root, 3835 unsigned Size) const { 3836 Register Reg; 3837 unsigned Offset; 3838 std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size); 3839 return {{ 3840 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); }, 3841 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, 3842 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); } 3843 }}; 3844 } 3845 3846 std::pair<Register, unsigned> 3847 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root, 3848 unsigned Size) const { 3849 const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg()); 3850 if (!RootDef) 3851 return std::make_pair(Root.getReg(), 0); 3852 3853 int64_t ConstAddr = 0; 3854 3855 Register PtrBase; 3856 int64_t Offset; 3857 std::tie(PtrBase, Offset) = 3858 getPtrBaseWithConstantOffset(Root.getReg(), *MRI); 3859 3860 if (Offset) { 3861 int64_t OffsetValue0 = Offset; 3862 int64_t OffsetValue1 = Offset + Size; 3863 if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) { 3864 // (add n0, c0) 3865 return std::make_pair(PtrBase, OffsetValue0 / Size); 3866 } 3867 } else if (RootDef->getOpcode() == AMDGPU::G_SUB) { 3868 // TODO 3869 3870 } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) { 3871 // TODO 3872 3873 } 3874 3875 return std::make_pair(Root.getReg(), 0); 3876 } 3877 3878 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return 3879 /// the base value with the constant offset. There may be intervening copies 3880 /// between \p Root and the identified constant. Returns \p Root, 0 if this does 3881 /// not match the pattern. 3882 std::pair<Register, int64_t> 3883 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset( 3884 Register Root, const MachineRegisterInfo &MRI) const { 3885 MachineInstr *RootI = getDefIgnoringCopies(Root, MRI); 3886 if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD) 3887 return {Root, 0}; 3888 3889 MachineOperand &RHS = RootI->getOperand(2); 3890 Optional<ValueAndVReg> MaybeOffset 3891 = getConstantVRegValWithLookThrough(RHS.getReg(), MRI, true); 3892 if (!MaybeOffset) 3893 return {Root, 0}; 3894 return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()}; 3895 } 3896 3897 static void addZeroImm(MachineInstrBuilder &MIB) { 3898 MIB.addImm(0); 3899 } 3900 3901 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p 3902 /// BasePtr is not valid, a null base pointer will be used. 3903 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI, 3904 uint32_t FormatLo, uint32_t FormatHi, 3905 Register BasePtr) { 3906 Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 3907 Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 3908 Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 3909 Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass); 3910 3911 B.buildInstr(AMDGPU::S_MOV_B32) 3912 .addDef(RSrc2) 3913 .addImm(FormatLo); 3914 B.buildInstr(AMDGPU::S_MOV_B32) 3915 .addDef(RSrc3) 3916 .addImm(FormatHi); 3917 3918 // Build the half of the subregister with the constants before building the 3919 // full 128-bit register. If we are building multiple resource descriptors, 3920 // this will allow CSEing of the 2-component register. 3921 B.buildInstr(AMDGPU::REG_SEQUENCE) 3922 .addDef(RSrcHi) 3923 .addReg(RSrc2) 3924 .addImm(AMDGPU::sub0) 3925 .addReg(RSrc3) 3926 .addImm(AMDGPU::sub1); 3927 3928 Register RSrcLo = BasePtr; 3929 if (!BasePtr) { 3930 RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 3931 B.buildInstr(AMDGPU::S_MOV_B64) 3932 .addDef(RSrcLo) 3933 .addImm(0); 3934 } 3935 3936 B.buildInstr(AMDGPU::REG_SEQUENCE) 3937 .addDef(RSrc) 3938 .addReg(RSrcLo) 3939 .addImm(AMDGPU::sub0_sub1) 3940 .addReg(RSrcHi) 3941 .addImm(AMDGPU::sub2_sub3); 3942 3943 return RSrc; 3944 } 3945 3946 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI, 3947 const SIInstrInfo &TII, Register BasePtr) { 3948 uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat(); 3949 3950 // FIXME: Why are half the "default" bits ignored based on the addressing 3951 // mode? 3952 return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr); 3953 } 3954 3955 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI, 3956 const SIInstrInfo &TII, Register BasePtr) { 3957 uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat(); 3958 3959 // FIXME: Why are half the "default" bits ignored based on the addressing 3960 // mode? 3961 return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr); 3962 } 3963 3964 AMDGPUInstructionSelector::MUBUFAddressData 3965 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const { 3966 MUBUFAddressData Data; 3967 Data.N0 = Src; 3968 3969 Register PtrBase; 3970 int64_t Offset; 3971 3972 std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI); 3973 if (isUInt<32>(Offset)) { 3974 Data.N0 = PtrBase; 3975 Data.Offset = Offset; 3976 } 3977 3978 if (MachineInstr *InputAdd 3979 = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) { 3980 Data.N2 = InputAdd->getOperand(1).getReg(); 3981 Data.N3 = InputAdd->getOperand(2).getReg(); 3982 3983 // FIXME: Need to fix extra SGPR->VGPRcopies inserted 3984 // FIXME: Don't know this was defined by operand 0 3985 // 3986 // TODO: Remove this when we have copy folding optimizations after 3987 // RegBankSelect. 3988 Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg(); 3989 Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg(); 3990 } 3991 3992 return Data; 3993 } 3994 3995 /// Return if the addr64 mubuf mode should be used for the given address. 3996 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const { 3997 // (ptr_add N2, N3) -> addr64, or 3998 // (ptr_add (ptr_add N2, N3), C1) -> addr64 3999 if (Addr.N2) 4000 return true; 4001 4002 const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI); 4003 return N0Bank->getID() == AMDGPU::VGPRRegBankID; 4004 } 4005 4006 /// Split an immediate offset \p ImmOffset depending on whether it fits in the 4007 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable 4008 /// component. 4009 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset( 4010 MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const { 4011 if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset)) 4012 return; 4013 4014 // Illegal offset, store it in soffset. 4015 SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 4016 B.buildInstr(AMDGPU::S_MOV_B32) 4017 .addDef(SOffset) 4018 .addImm(ImmOffset); 4019 ImmOffset = 0; 4020 } 4021 4022 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl( 4023 MachineOperand &Root, Register &VAddr, Register &RSrcReg, 4024 Register &SOffset, int64_t &Offset) const { 4025 // FIXME: Predicates should stop this from reaching here. 4026 // addr64 bit was removed for volcanic islands. 4027 if (!STI.hasAddr64() || STI.useFlatForGlobal()) 4028 return false; 4029 4030 MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg()); 4031 if (!shouldUseAddr64(AddrData)) 4032 return false; 4033 4034 Register N0 = AddrData.N0; 4035 Register N2 = AddrData.N2; 4036 Register N3 = AddrData.N3; 4037 Offset = AddrData.Offset; 4038 4039 // Base pointer for the SRD. 4040 Register SRDPtr; 4041 4042 if (N2) { 4043 if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) { 4044 assert(N3); 4045 if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) { 4046 // Both N2 and N3 are divergent. Use N0 (the result of the add) as the 4047 // addr64, and construct the default resource from a 0 address. 4048 VAddr = N0; 4049 } else { 4050 SRDPtr = N3; 4051 VAddr = N2; 4052 } 4053 } else { 4054 // N2 is not divergent. 4055 SRDPtr = N2; 4056 VAddr = N3; 4057 } 4058 } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) { 4059 // Use the default null pointer in the resource 4060 VAddr = N0; 4061 } else { 4062 // N0 -> offset, or 4063 // (N0 + C1) -> offset 4064 SRDPtr = N0; 4065 } 4066 4067 MachineIRBuilder B(*Root.getParent()); 4068 RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr); 4069 splitIllegalMUBUFOffset(B, SOffset, Offset); 4070 return true; 4071 } 4072 4073 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl( 4074 MachineOperand &Root, Register &RSrcReg, Register &SOffset, 4075 int64_t &Offset) const { 4076 4077 // FIXME: Pattern should not reach here. 4078 if (STI.useFlatForGlobal()) 4079 return false; 4080 4081 MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg()); 4082 if (shouldUseAddr64(AddrData)) 4083 return false; 4084 4085 // N0 -> offset, or 4086 // (N0 + C1) -> offset 4087 Register SRDPtr = AddrData.N0; 4088 Offset = AddrData.Offset; 4089 4090 // TODO: Look through extensions for 32-bit soffset. 4091 MachineIRBuilder B(*Root.getParent()); 4092 4093 RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr); 4094 splitIllegalMUBUFOffset(B, SOffset, Offset); 4095 return true; 4096 } 4097 4098 InstructionSelector::ComplexRendererFns 4099 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const { 4100 Register VAddr; 4101 Register RSrcReg; 4102 Register SOffset; 4103 int64_t Offset = 0; 4104 4105 if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset)) 4106 return {}; 4107 4108 // FIXME: Use defaulted operands for trailing 0s and remove from the complex 4109 // pattern. 4110 return {{ 4111 [=](MachineInstrBuilder &MIB) { // rsrc 4112 MIB.addReg(RSrcReg); 4113 }, 4114 [=](MachineInstrBuilder &MIB) { // vaddr 4115 MIB.addReg(VAddr); 4116 }, 4117 [=](MachineInstrBuilder &MIB) { // soffset 4118 if (SOffset) 4119 MIB.addReg(SOffset); 4120 else 4121 MIB.addImm(0); 4122 }, 4123 [=](MachineInstrBuilder &MIB) { // offset 4124 MIB.addImm(Offset); 4125 }, 4126 addZeroImm, // cpol 4127 addZeroImm, // tfe 4128 addZeroImm // swz 4129 }}; 4130 } 4131 4132 InstructionSelector::ComplexRendererFns 4133 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const { 4134 Register RSrcReg; 4135 Register SOffset; 4136 int64_t Offset = 0; 4137 4138 if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset)) 4139 return {}; 4140 4141 return {{ 4142 [=](MachineInstrBuilder &MIB) { // rsrc 4143 MIB.addReg(RSrcReg); 4144 }, 4145 [=](MachineInstrBuilder &MIB) { // soffset 4146 if (SOffset) 4147 MIB.addReg(SOffset); 4148 else 4149 MIB.addImm(0); 4150 }, 4151 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset 4152 addZeroImm, // cpol 4153 addZeroImm, // tfe 4154 addZeroImm, // swz 4155 }}; 4156 } 4157 4158 InstructionSelector::ComplexRendererFns 4159 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const { 4160 Register VAddr; 4161 Register RSrcReg; 4162 Register SOffset; 4163 int64_t Offset = 0; 4164 4165 if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset)) 4166 return {}; 4167 4168 // FIXME: Use defaulted operands for trailing 0s and remove from the complex 4169 // pattern. 4170 return {{ 4171 [=](MachineInstrBuilder &MIB) { // rsrc 4172 MIB.addReg(RSrcReg); 4173 }, 4174 [=](MachineInstrBuilder &MIB) { // vaddr 4175 MIB.addReg(VAddr); 4176 }, 4177 [=](MachineInstrBuilder &MIB) { // soffset 4178 if (SOffset) 4179 MIB.addReg(SOffset); 4180 else 4181 MIB.addImm(0); 4182 }, 4183 [=](MachineInstrBuilder &MIB) { // offset 4184 MIB.addImm(Offset); 4185 }, 4186 [=](MachineInstrBuilder &MIB) { 4187 MIB.addImm(AMDGPU::CPol::GLC); // cpol 4188 } 4189 }}; 4190 } 4191 4192 InstructionSelector::ComplexRendererFns 4193 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const { 4194 Register RSrcReg; 4195 Register SOffset; 4196 int64_t Offset = 0; 4197 4198 if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset)) 4199 return {}; 4200 4201 return {{ 4202 [=](MachineInstrBuilder &MIB) { // rsrc 4203 MIB.addReg(RSrcReg); 4204 }, 4205 [=](MachineInstrBuilder &MIB) { // soffset 4206 if (SOffset) 4207 MIB.addReg(SOffset); 4208 else 4209 MIB.addImm(0); 4210 }, 4211 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset 4212 [=](MachineInstrBuilder &MIB) { MIB.addImm(AMDGPU::CPol::GLC); } // cpol 4213 }}; 4214 } 4215 4216 /// Get an immediate that must be 32-bits, and treated as zero extended. 4217 static Optional<uint64_t> getConstantZext32Val(Register Reg, 4218 const MachineRegisterInfo &MRI) { 4219 // getConstantVRegVal sexts any values, so see if that matters. 4220 Optional<int64_t> OffsetVal = getConstantVRegSExtVal(Reg, MRI); 4221 if (!OffsetVal || !isInt<32>(*OffsetVal)) 4222 return None; 4223 return Lo_32(*OffsetVal); 4224 } 4225 4226 InstructionSelector::ComplexRendererFns 4227 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const { 4228 Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI); 4229 if (!OffsetVal) 4230 return {}; 4231 4232 Optional<int64_t> EncodedImm = 4233 AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true); 4234 if (!EncodedImm) 4235 return {}; 4236 4237 return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } }}; 4238 } 4239 4240 InstructionSelector::ComplexRendererFns 4241 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const { 4242 assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS); 4243 4244 Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI); 4245 if (!OffsetVal) 4246 return {}; 4247 4248 Optional<int64_t> EncodedImm 4249 = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal); 4250 if (!EncodedImm) 4251 return {}; 4252 4253 return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } }}; 4254 } 4255 4256 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB, 4257 const MachineInstr &MI, 4258 int OpIdx) const { 4259 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 4260 "Expected G_CONSTANT"); 4261 MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue()); 4262 } 4263 4264 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB, 4265 const MachineInstr &MI, 4266 int OpIdx) const { 4267 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 4268 "Expected G_CONSTANT"); 4269 MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue()); 4270 } 4271 4272 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB, 4273 const MachineInstr &MI, 4274 int OpIdx) const { 4275 assert(OpIdx == -1); 4276 4277 const MachineOperand &Op = MI.getOperand(1); 4278 if (MI.getOpcode() == TargetOpcode::G_FCONSTANT) 4279 MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue()); 4280 else { 4281 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT"); 4282 MIB.addImm(Op.getCImm()->getSExtValue()); 4283 } 4284 } 4285 4286 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB, 4287 const MachineInstr &MI, 4288 int OpIdx) const { 4289 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 4290 "Expected G_CONSTANT"); 4291 MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation()); 4292 } 4293 4294 /// This only really exists to satisfy DAG type checking machinery, so is a 4295 /// no-op here. 4296 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB, 4297 const MachineInstr &MI, 4298 int OpIdx) const { 4299 MIB.addImm(MI.getOperand(OpIdx).getImm()); 4300 } 4301 4302 void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB, 4303 const MachineInstr &MI, 4304 int OpIdx) const { 4305 assert(OpIdx >= 0 && "expected to match an immediate operand"); 4306 MIB.addImm(MI.getOperand(OpIdx).getImm() & AMDGPU::CPol::ALL); 4307 } 4308 4309 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB, 4310 const MachineInstr &MI, 4311 int OpIdx) const { 4312 assert(OpIdx >= 0 && "expected to match an immediate operand"); 4313 MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1); 4314 } 4315 4316 void AMDGPUInstructionSelector::renderSetGLC(MachineInstrBuilder &MIB, 4317 const MachineInstr &MI, 4318 int OpIdx) const { 4319 assert(OpIdx >= 0 && "expected to match an immediate operand"); 4320 MIB.addImm(MI.getOperand(OpIdx).getImm() | AMDGPU::CPol::GLC); 4321 } 4322 4323 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB, 4324 const MachineInstr &MI, 4325 int OpIdx) const { 4326 MIB.addFrameIndex((MI.getOperand(1).getIndex())); 4327 } 4328 4329 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const { 4330 return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm()); 4331 } 4332 4333 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const { 4334 return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm()); 4335 } 4336 4337 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const { 4338 return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm()); 4339 } 4340 4341 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const { 4342 return TII.isInlineConstant(Imm); 4343 } 4344