1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the targeting of the InstructionSelector class for 10 /// AMDGPU. 11 /// \todo This should be generated by TableGen. 12 //===----------------------------------------------------------------------===// 13 14 #include "AMDGPUInstructionSelector.h" 15 #include "AMDGPU.h" 16 #include "AMDGPUGlobalISelUtils.h" 17 #include "AMDGPUInstrInfo.h" 18 #include "AMDGPURegisterBankInfo.h" 19 #include "AMDGPUTargetMachine.h" 20 #include "SIMachineFunctionInfo.h" 21 #include "Utils/AMDGPUBaseInfo.h" 22 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" 23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h" 24 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" 25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 26 #include "llvm/IR/DiagnosticInfo.h" 27 #include "llvm/IR/IntrinsicsAMDGPU.h" 28 29 #define DEBUG_TYPE "amdgpu-isel" 30 31 using namespace llvm; 32 using namespace MIPatternMatch; 33 34 static cl::opt<bool> AllowRiskySelect( 35 "amdgpu-global-isel-risky-select", 36 cl::desc("Allow GlobalISel to select cases that are likely to not work yet"), 37 cl::init(false), 38 cl::ReallyHidden); 39 40 #define GET_GLOBALISEL_IMPL 41 #define AMDGPUSubtarget GCNSubtarget 42 #include "AMDGPUGenGlobalISel.inc" 43 #undef GET_GLOBALISEL_IMPL 44 #undef AMDGPUSubtarget 45 46 AMDGPUInstructionSelector::AMDGPUInstructionSelector( 47 const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI, 48 const AMDGPUTargetMachine &TM) 49 : TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM), 50 STI(STI), 51 EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG), 52 #define GET_GLOBALISEL_PREDICATES_INIT 53 #include "AMDGPUGenGlobalISel.inc" 54 #undef GET_GLOBALISEL_PREDICATES_INIT 55 #define GET_GLOBALISEL_TEMPORARIES_INIT 56 #include "AMDGPUGenGlobalISel.inc" 57 #undef GET_GLOBALISEL_TEMPORARIES_INIT 58 { 59 } 60 61 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; } 62 63 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB, 64 CodeGenCoverage &CoverageInfo, 65 ProfileSummaryInfo *PSI, 66 BlockFrequencyInfo *BFI) { 67 MRI = &MF.getRegInfo(); 68 Subtarget = &MF.getSubtarget<GCNSubtarget>(); 69 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI); 70 } 71 72 bool AMDGPUInstructionSelector::isVCC(Register Reg, 73 const MachineRegisterInfo &MRI) const { 74 // The verifier is oblivious to s1 being a valid value for wavesize registers. 75 if (Reg.isPhysical()) 76 return false; 77 78 auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg); 79 const TargetRegisterClass *RC = 80 RegClassOrBank.dyn_cast<const TargetRegisterClass*>(); 81 if (RC) { 82 const LLT Ty = MRI.getType(Reg); 83 return RC->hasSuperClassEq(TRI.getBoolRC()) && 84 Ty.isValid() && Ty.getSizeInBits() == 1; 85 } 86 87 const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>(); 88 return RB->getID() == AMDGPU::VCCRegBankID; 89 } 90 91 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI, 92 unsigned NewOpc) const { 93 MI.setDesc(TII.get(NewOpc)); 94 MI.RemoveOperand(1); // Remove intrinsic ID. 95 MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 96 97 MachineOperand &Dst = MI.getOperand(0); 98 MachineOperand &Src = MI.getOperand(1); 99 100 // TODO: This should be legalized to s32 if needed 101 if (MRI->getType(Dst.getReg()) == LLT::scalar(1)) 102 return false; 103 104 const TargetRegisterClass *DstRC 105 = TRI.getConstrainedRegClassForOperand(Dst, *MRI); 106 const TargetRegisterClass *SrcRC 107 = TRI.getConstrainedRegClassForOperand(Src, *MRI); 108 if (!DstRC || DstRC != SrcRC) 109 return false; 110 111 return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) && 112 RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI); 113 } 114 115 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const { 116 const DebugLoc &DL = I.getDebugLoc(); 117 MachineBasicBlock *BB = I.getParent(); 118 I.setDesc(TII.get(TargetOpcode::COPY)); 119 120 const MachineOperand &Src = I.getOperand(1); 121 MachineOperand &Dst = I.getOperand(0); 122 Register DstReg = Dst.getReg(); 123 Register SrcReg = Src.getReg(); 124 125 if (isVCC(DstReg, *MRI)) { 126 if (SrcReg == AMDGPU::SCC) { 127 const TargetRegisterClass *RC 128 = TRI.getConstrainedRegClassForOperand(Dst, *MRI); 129 if (!RC) 130 return true; 131 return RBI.constrainGenericRegister(DstReg, *RC, *MRI); 132 } 133 134 if (!isVCC(SrcReg, *MRI)) { 135 // TODO: Should probably leave the copy and let copyPhysReg expand it. 136 if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI)) 137 return false; 138 139 const TargetRegisterClass *SrcRC 140 = TRI.getConstrainedRegClassForOperand(Src, *MRI); 141 142 Optional<ValueAndVReg> ConstVal = 143 getIConstantVRegValWithLookThrough(SrcReg, *MRI, true); 144 if (ConstVal) { 145 unsigned MovOpc = 146 STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; 147 BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg) 148 .addImm(ConstVal->Value.getBoolValue() ? -1 : 0); 149 } else { 150 Register MaskedReg = MRI->createVirtualRegister(SrcRC); 151 152 // We can't trust the high bits at this point, so clear them. 153 154 // TODO: Skip masking high bits if def is known boolean. 155 156 unsigned AndOpc = 157 TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32; 158 BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg) 159 .addImm(1) 160 .addReg(SrcReg); 161 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg) 162 .addImm(0) 163 .addReg(MaskedReg); 164 } 165 166 if (!MRI->getRegClassOrNull(SrcReg)) 167 MRI->setRegClass(SrcReg, SrcRC); 168 I.eraseFromParent(); 169 return true; 170 } 171 172 const TargetRegisterClass *RC = 173 TRI.getConstrainedRegClassForOperand(Dst, *MRI); 174 if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI)) 175 return false; 176 177 return true; 178 } 179 180 for (const MachineOperand &MO : I.operands()) { 181 if (MO.getReg().isPhysical()) 182 continue; 183 184 const TargetRegisterClass *RC = 185 TRI.getConstrainedRegClassForOperand(MO, *MRI); 186 if (!RC) 187 continue; 188 RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI); 189 } 190 return true; 191 } 192 193 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const { 194 const Register DefReg = I.getOperand(0).getReg(); 195 const LLT DefTy = MRI->getType(DefReg); 196 if (DefTy == LLT::scalar(1)) { 197 if (!AllowRiskySelect) { 198 LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n"); 199 return false; 200 } 201 202 LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n"); 203 } 204 205 // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy) 206 207 const RegClassOrRegBank &RegClassOrBank = 208 MRI->getRegClassOrRegBank(DefReg); 209 210 const TargetRegisterClass *DefRC 211 = RegClassOrBank.dyn_cast<const TargetRegisterClass *>(); 212 if (!DefRC) { 213 if (!DefTy.isValid()) { 214 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n"); 215 return false; 216 } 217 218 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>(); 219 DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI); 220 if (!DefRC) { 221 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n"); 222 return false; 223 } 224 } 225 226 // TODO: Verify that all registers have the same bank 227 I.setDesc(TII.get(TargetOpcode::PHI)); 228 return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI); 229 } 230 231 MachineOperand 232 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO, 233 const TargetRegisterClass &SubRC, 234 unsigned SubIdx) const { 235 236 MachineInstr *MI = MO.getParent(); 237 MachineBasicBlock *BB = MO.getParent()->getParent(); 238 Register DstReg = MRI->createVirtualRegister(&SubRC); 239 240 if (MO.isReg()) { 241 unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx); 242 Register Reg = MO.getReg(); 243 BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg) 244 .addReg(Reg, 0, ComposedSubIdx); 245 246 return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(), 247 MO.isKill(), MO.isDead(), MO.isUndef(), 248 MO.isEarlyClobber(), 0, MO.isDebug(), 249 MO.isInternalRead()); 250 } 251 252 assert(MO.isImm()); 253 254 APInt Imm(64, MO.getImm()); 255 256 switch (SubIdx) { 257 default: 258 llvm_unreachable("do not know to split immediate with this sub index."); 259 case AMDGPU::sub0: 260 return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue()); 261 case AMDGPU::sub1: 262 return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue()); 263 } 264 } 265 266 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) { 267 switch (Opc) { 268 case AMDGPU::G_AND: 269 return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32; 270 case AMDGPU::G_OR: 271 return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32; 272 case AMDGPU::G_XOR: 273 return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32; 274 default: 275 llvm_unreachable("not a bit op"); 276 } 277 } 278 279 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const { 280 Register DstReg = I.getOperand(0).getReg(); 281 unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI); 282 283 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 284 if (DstRB->getID() != AMDGPU::SGPRRegBankID && 285 DstRB->getID() != AMDGPU::VCCRegBankID) 286 return false; 287 288 bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID && 289 STI.isWave64()); 290 I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64))); 291 292 // Dead implicit-def of scc 293 I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef 294 true, // isImp 295 false, // isKill 296 true)); // isDead 297 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 298 } 299 300 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const { 301 MachineBasicBlock *BB = I.getParent(); 302 MachineFunction *MF = BB->getParent(); 303 Register DstReg = I.getOperand(0).getReg(); 304 const DebugLoc &DL = I.getDebugLoc(); 305 LLT Ty = MRI->getType(DstReg); 306 if (Ty.isVector()) 307 return false; 308 309 unsigned Size = Ty.getSizeInBits(); 310 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 311 const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID; 312 const bool Sub = I.getOpcode() == TargetOpcode::G_SUB; 313 314 if (Size == 32) { 315 if (IsSALU) { 316 const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32; 317 MachineInstr *Add = 318 BuildMI(*BB, &I, DL, TII.get(Opc), DstReg) 319 .add(I.getOperand(1)) 320 .add(I.getOperand(2)); 321 I.eraseFromParent(); 322 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI); 323 } 324 325 if (STI.hasAddNoCarry()) { 326 const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64; 327 I.setDesc(TII.get(Opc)); 328 I.addOperand(*MF, MachineOperand::CreateImm(0)); 329 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 330 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 331 } 332 333 const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64; 334 335 Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass()); 336 MachineInstr *Add 337 = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg) 338 .addDef(UnusedCarry, RegState::Dead) 339 .add(I.getOperand(1)) 340 .add(I.getOperand(2)) 341 .addImm(0); 342 I.eraseFromParent(); 343 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI); 344 } 345 346 assert(!Sub && "illegal sub should not reach here"); 347 348 const TargetRegisterClass &RC 349 = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass; 350 const TargetRegisterClass &HalfRC 351 = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass; 352 353 MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0)); 354 MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0)); 355 MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1)); 356 MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1)); 357 358 Register DstLo = MRI->createVirtualRegister(&HalfRC); 359 Register DstHi = MRI->createVirtualRegister(&HalfRC); 360 361 if (IsSALU) { 362 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo) 363 .add(Lo1) 364 .add(Lo2); 365 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi) 366 .add(Hi1) 367 .add(Hi2); 368 } else { 369 const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass(); 370 Register CarryReg = MRI->createVirtualRegister(CarryRC); 371 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo) 372 .addDef(CarryReg) 373 .add(Lo1) 374 .add(Lo2) 375 .addImm(0); 376 MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi) 377 .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead) 378 .add(Hi1) 379 .add(Hi2) 380 .addReg(CarryReg, RegState::Kill) 381 .addImm(0); 382 383 if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI)) 384 return false; 385 } 386 387 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) 388 .addReg(DstLo) 389 .addImm(AMDGPU::sub0) 390 .addReg(DstHi) 391 .addImm(AMDGPU::sub1); 392 393 394 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI)) 395 return false; 396 397 I.eraseFromParent(); 398 return true; 399 } 400 401 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE( 402 MachineInstr &I) const { 403 MachineBasicBlock *BB = I.getParent(); 404 MachineFunction *MF = BB->getParent(); 405 const DebugLoc &DL = I.getDebugLoc(); 406 Register Dst0Reg = I.getOperand(0).getReg(); 407 Register Dst1Reg = I.getOperand(1).getReg(); 408 const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO || 409 I.getOpcode() == AMDGPU::G_UADDE; 410 const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE || 411 I.getOpcode() == AMDGPU::G_USUBE; 412 413 if (isVCC(Dst1Reg, *MRI)) { 414 unsigned NoCarryOpc = 415 IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64; 416 unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; 417 I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc)); 418 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 419 I.addOperand(*MF, MachineOperand::CreateImm(0)); 420 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 421 } 422 423 Register Src0Reg = I.getOperand(2).getReg(); 424 Register Src1Reg = I.getOperand(3).getReg(); 425 426 if (HasCarryIn) { 427 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC) 428 .addReg(I.getOperand(4).getReg()); 429 } 430 431 unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32; 432 unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32; 433 434 BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg) 435 .add(I.getOperand(2)) 436 .add(I.getOperand(3)); 437 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg) 438 .addReg(AMDGPU::SCC); 439 440 if (!MRI->getRegClassOrNull(Dst1Reg)) 441 MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass); 442 443 if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) || 444 !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) || 445 !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI)) 446 return false; 447 448 if (HasCarryIn && 449 !RBI.constrainGenericRegister(I.getOperand(4).getReg(), 450 AMDGPU::SReg_32RegClass, *MRI)) 451 return false; 452 453 I.eraseFromParent(); 454 return true; 455 } 456 457 // TODO: We should probably legalize these to only using 32-bit results. 458 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const { 459 MachineBasicBlock *BB = I.getParent(); 460 Register DstReg = I.getOperand(0).getReg(); 461 Register SrcReg = I.getOperand(1).getReg(); 462 LLT DstTy = MRI->getType(DstReg); 463 LLT SrcTy = MRI->getType(SrcReg); 464 const unsigned SrcSize = SrcTy.getSizeInBits(); 465 unsigned DstSize = DstTy.getSizeInBits(); 466 467 // TODO: Should handle any multiple of 32 offset. 468 unsigned Offset = I.getOperand(2).getImm(); 469 if (Offset % 32 != 0 || DstSize > 128) 470 return false; 471 472 // 16-bit operations really use 32-bit registers. 473 // FIXME: Probably should not allow 16-bit G_EXTRACT results. 474 if (DstSize == 16) 475 DstSize = 32; 476 477 const TargetRegisterClass *DstRC = 478 TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI); 479 if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) 480 return false; 481 482 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI); 483 const TargetRegisterClass *SrcRC = 484 TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI); 485 if (!SrcRC) 486 return false; 487 unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32, 488 DstSize / 32); 489 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg); 490 if (!SrcRC) 491 return false; 492 493 SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I, 494 *SrcRC, I.getOperand(1)); 495 const DebugLoc &DL = I.getDebugLoc(); 496 BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg) 497 .addReg(SrcReg, 0, SubReg); 498 499 I.eraseFromParent(); 500 return true; 501 } 502 503 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const { 504 MachineBasicBlock *BB = MI.getParent(); 505 Register DstReg = MI.getOperand(0).getReg(); 506 LLT DstTy = MRI->getType(DstReg); 507 LLT SrcTy = MRI->getType(MI.getOperand(1).getReg()); 508 509 const unsigned SrcSize = SrcTy.getSizeInBits(); 510 if (SrcSize < 32) 511 return selectImpl(MI, *CoverageInfo); 512 513 const DebugLoc &DL = MI.getDebugLoc(); 514 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); 515 const unsigned DstSize = DstTy.getSizeInBits(); 516 const TargetRegisterClass *DstRC = 517 TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI); 518 if (!DstRC) 519 return false; 520 521 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8); 522 MachineInstrBuilder MIB = 523 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg); 524 for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) { 525 MachineOperand &Src = MI.getOperand(I + 1); 526 MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef())); 527 MIB.addImm(SubRegs[I]); 528 529 const TargetRegisterClass *SrcRC 530 = TRI.getConstrainedRegClassForOperand(Src, *MRI); 531 if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI)) 532 return false; 533 } 534 535 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) 536 return false; 537 538 MI.eraseFromParent(); 539 return true; 540 } 541 542 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const { 543 MachineBasicBlock *BB = MI.getParent(); 544 const int NumDst = MI.getNumOperands() - 1; 545 546 MachineOperand &Src = MI.getOperand(NumDst); 547 548 Register SrcReg = Src.getReg(); 549 Register DstReg0 = MI.getOperand(0).getReg(); 550 LLT DstTy = MRI->getType(DstReg0); 551 LLT SrcTy = MRI->getType(SrcReg); 552 553 const unsigned DstSize = DstTy.getSizeInBits(); 554 const unsigned SrcSize = SrcTy.getSizeInBits(); 555 const DebugLoc &DL = MI.getDebugLoc(); 556 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI); 557 558 const TargetRegisterClass *SrcRC = 559 TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI); 560 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI)) 561 return false; 562 563 // Note we could have mixed SGPR and VGPR destination banks for an SGPR 564 // source, and this relies on the fact that the same subregister indices are 565 // used for both. 566 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8); 567 for (int I = 0, E = NumDst; I != E; ++I) { 568 MachineOperand &Dst = MI.getOperand(I); 569 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg()) 570 .addReg(SrcReg, 0, SubRegs[I]); 571 572 // Make sure the subregister index is valid for the source register. 573 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]); 574 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI)) 575 return false; 576 577 const TargetRegisterClass *DstRC = 578 TRI.getConstrainedRegClassForOperand(Dst, *MRI); 579 if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI)) 580 return false; 581 } 582 583 MI.eraseFromParent(); 584 return true; 585 } 586 587 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC( 588 MachineInstr &MI) const { 589 if (selectImpl(MI, *CoverageInfo)) 590 return true; 591 592 const LLT S32 = LLT::scalar(32); 593 const LLT V2S16 = LLT::fixed_vector(2, 16); 594 595 Register Dst = MI.getOperand(0).getReg(); 596 if (MRI->getType(Dst) != V2S16) 597 return false; 598 599 const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI); 600 if (DstBank->getID() != AMDGPU::SGPRRegBankID) 601 return false; 602 603 Register Src0 = MI.getOperand(1).getReg(); 604 Register Src1 = MI.getOperand(2).getReg(); 605 if (MRI->getType(Src0) != S32) 606 return false; 607 608 const DebugLoc &DL = MI.getDebugLoc(); 609 MachineBasicBlock *BB = MI.getParent(); 610 611 auto ConstSrc1 = getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true); 612 if (ConstSrc1) { 613 auto ConstSrc0 = 614 getAnyConstantVRegValWithLookThrough(Src0, *MRI, true, true); 615 if (ConstSrc0) { 616 const int64_t K0 = ConstSrc0->Value.getSExtValue(); 617 const int64_t K1 = ConstSrc1->Value.getSExtValue(); 618 uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff; 619 uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff; 620 621 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst) 622 .addImm(Lo16 | (Hi16 << 16)); 623 MI.eraseFromParent(); 624 return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI); 625 } 626 } 627 628 // TODO: This should probably be a combine somewhere 629 // (build_vector_trunc $src0, undef -> copy $src0 630 MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI); 631 if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) { 632 MI.setDesc(TII.get(AMDGPU::COPY)); 633 MI.RemoveOperand(2); 634 return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) && 635 RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI); 636 } 637 638 Register ShiftSrc0; 639 Register ShiftSrc1; 640 641 // With multiple uses of the shift, this will duplicate the shift and 642 // increase register pressure. 643 // 644 // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16) 645 // => (S_PACK_HH_B32_B16 $src0, $src1) 646 // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16)) 647 // => (S_PACK_LH_B32_B16 $src0, $src1) 648 // (build_vector_trunc $src0, $src1) 649 // => (S_PACK_LL_B32_B16 $src0, $src1) 650 651 bool Shift0 = mi_match( 652 Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16)))); 653 654 bool Shift1 = mi_match( 655 Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16)))); 656 657 unsigned Opc = AMDGPU::S_PACK_LL_B32_B16; 658 if (Shift0 && Shift1) { 659 Opc = AMDGPU::S_PACK_HH_B32_B16; 660 MI.getOperand(1).setReg(ShiftSrc0); 661 MI.getOperand(2).setReg(ShiftSrc1); 662 } else if (Shift1) { 663 Opc = AMDGPU::S_PACK_LH_B32_B16; 664 MI.getOperand(2).setReg(ShiftSrc1); 665 } else if (Shift0 && ConstSrc1 && ConstSrc1->Value == 0) { 666 // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16 667 auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst) 668 .addReg(ShiftSrc0) 669 .addImm(16); 670 671 MI.eraseFromParent(); 672 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 673 } 674 675 MI.setDesc(TII.get(Opc)); 676 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI); 677 } 678 679 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const { 680 return selectG_ADD_SUB(I); 681 } 682 683 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const { 684 const MachineOperand &MO = I.getOperand(0); 685 686 // FIXME: Interface for getConstrainedRegClassForOperand needs work. The 687 // regbank check here is to know why getConstrainedRegClassForOperand failed. 688 const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI); 689 if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) || 690 (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) { 691 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF)); 692 return true; 693 } 694 695 return false; 696 } 697 698 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const { 699 MachineBasicBlock *BB = I.getParent(); 700 701 Register DstReg = I.getOperand(0).getReg(); 702 Register Src0Reg = I.getOperand(1).getReg(); 703 Register Src1Reg = I.getOperand(2).getReg(); 704 LLT Src1Ty = MRI->getType(Src1Reg); 705 706 unsigned DstSize = MRI->getType(DstReg).getSizeInBits(); 707 unsigned InsSize = Src1Ty.getSizeInBits(); 708 709 int64_t Offset = I.getOperand(3).getImm(); 710 711 // FIXME: These cases should have been illegal and unnecessary to check here. 712 if (Offset % 32 != 0 || InsSize % 32 != 0) 713 return false; 714 715 // Currently not handled by getSubRegFromChannel. 716 if (InsSize > 128) 717 return false; 718 719 unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32); 720 if (SubReg == AMDGPU::NoSubRegister) 721 return false; 722 723 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); 724 const TargetRegisterClass *DstRC = 725 TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI); 726 if (!DstRC) 727 return false; 728 729 const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI); 730 const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI); 731 const TargetRegisterClass *Src0RC = 732 TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI); 733 const TargetRegisterClass *Src1RC = 734 TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI); 735 736 // Deal with weird cases where the class only partially supports the subreg 737 // index. 738 Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg); 739 if (!Src0RC || !Src1RC) 740 return false; 741 742 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) || 743 !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) || 744 !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI)) 745 return false; 746 747 const DebugLoc &DL = I.getDebugLoc(); 748 BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg) 749 .addReg(Src0Reg) 750 .addReg(Src1Reg) 751 .addImm(SubReg); 752 753 I.eraseFromParent(); 754 return true; 755 } 756 757 bool AMDGPUInstructionSelector::selectG_SBFX_UBFX(MachineInstr &MI) const { 758 Register DstReg = MI.getOperand(0).getReg(); 759 Register SrcReg = MI.getOperand(1).getReg(); 760 Register OffsetReg = MI.getOperand(2).getReg(); 761 Register WidthReg = MI.getOperand(3).getReg(); 762 763 assert(RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID && 764 "scalar BFX instructions are expanded in regbankselect"); 765 assert(MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 && 766 "64-bit vector BFX instructions are expanded in regbankselect"); 767 768 const DebugLoc &DL = MI.getDebugLoc(); 769 MachineBasicBlock *MBB = MI.getParent(); 770 771 bool IsSigned = MI.getOpcode() == TargetOpcode::G_SBFX; 772 unsigned Opc = IsSigned ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64; 773 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), DstReg) 774 .addReg(SrcReg) 775 .addReg(OffsetReg) 776 .addReg(WidthReg); 777 MI.eraseFromParent(); 778 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 779 } 780 781 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const { 782 if (STI.getLDSBankCount() != 16) 783 return selectImpl(MI, *CoverageInfo); 784 785 Register Dst = MI.getOperand(0).getReg(); 786 Register Src0 = MI.getOperand(2).getReg(); 787 Register M0Val = MI.getOperand(6).getReg(); 788 if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) || 789 !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) || 790 !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI)) 791 return false; 792 793 // This requires 2 instructions. It is possible to write a pattern to support 794 // this, but the generated isel emitter doesn't correctly deal with multiple 795 // output instructions using the same physical register input. The copy to m0 796 // is incorrectly placed before the second instruction. 797 // 798 // TODO: Match source modifiers. 799 800 Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 801 const DebugLoc &DL = MI.getDebugLoc(); 802 MachineBasicBlock *MBB = MI.getParent(); 803 804 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 805 .addReg(M0Val); 806 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov) 807 .addImm(2) 808 .addImm(MI.getOperand(4).getImm()) // $attr 809 .addImm(MI.getOperand(3).getImm()); // $attrchan 810 811 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst) 812 .addImm(0) // $src0_modifiers 813 .addReg(Src0) // $src0 814 .addImm(MI.getOperand(4).getImm()) // $attr 815 .addImm(MI.getOperand(3).getImm()) // $attrchan 816 .addImm(0) // $src2_modifiers 817 .addReg(InterpMov) // $src2 - 2 f16 values selected by high 818 .addImm(MI.getOperand(5).getImm()) // $high 819 .addImm(0) // $clamp 820 .addImm(0); // $omod 821 822 MI.eraseFromParent(); 823 return true; 824 } 825 826 // Writelane is special in that it can use SGPR and M0 (which would normally 827 // count as using the constant bus twice - but in this case it is allowed since 828 // the lane selector doesn't count as a use of the constant bus). However, it is 829 // still required to abide by the 1 SGPR rule. Fix this up if we might have 830 // multiple SGPRs. 831 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const { 832 // With a constant bus limit of at least 2, there's no issue. 833 if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1) 834 return selectImpl(MI, *CoverageInfo); 835 836 MachineBasicBlock *MBB = MI.getParent(); 837 const DebugLoc &DL = MI.getDebugLoc(); 838 Register VDst = MI.getOperand(0).getReg(); 839 Register Val = MI.getOperand(2).getReg(); 840 Register LaneSelect = MI.getOperand(3).getReg(); 841 Register VDstIn = MI.getOperand(4).getReg(); 842 843 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst); 844 845 Optional<ValueAndVReg> ConstSelect = 846 getIConstantVRegValWithLookThrough(LaneSelect, *MRI); 847 if (ConstSelect) { 848 // The selector has to be an inline immediate, so we can use whatever for 849 // the other operands. 850 MIB.addReg(Val); 851 MIB.addImm(ConstSelect->Value.getSExtValue() & 852 maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2())); 853 } else { 854 Optional<ValueAndVReg> ConstVal = 855 getIConstantVRegValWithLookThrough(Val, *MRI); 856 857 // If the value written is an inline immediate, we can get away without a 858 // copy to m0. 859 if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(), 860 STI.hasInv2PiInlineImm())) { 861 MIB.addImm(ConstVal->Value.getSExtValue()); 862 MIB.addReg(LaneSelect); 863 } else { 864 MIB.addReg(Val); 865 866 // If the lane selector was originally in a VGPR and copied with 867 // readfirstlane, there's a hazard to read the same SGPR from the 868 // VALU. Constrain to a different SGPR to help avoid needing a nop later. 869 RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI); 870 871 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 872 .addReg(LaneSelect); 873 MIB.addReg(AMDGPU::M0); 874 } 875 } 876 877 MIB.addReg(VDstIn); 878 879 MI.eraseFromParent(); 880 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 881 } 882 883 // We need to handle this here because tablegen doesn't support matching 884 // instructions with multiple outputs. 885 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const { 886 Register Dst0 = MI.getOperand(0).getReg(); 887 Register Dst1 = MI.getOperand(1).getReg(); 888 889 LLT Ty = MRI->getType(Dst0); 890 unsigned Opc; 891 if (Ty == LLT::scalar(32)) 892 Opc = AMDGPU::V_DIV_SCALE_F32_e64; 893 else if (Ty == LLT::scalar(64)) 894 Opc = AMDGPU::V_DIV_SCALE_F64_e64; 895 else 896 return false; 897 898 // TODO: Match source modifiers. 899 900 const DebugLoc &DL = MI.getDebugLoc(); 901 MachineBasicBlock *MBB = MI.getParent(); 902 903 Register Numer = MI.getOperand(3).getReg(); 904 Register Denom = MI.getOperand(4).getReg(); 905 unsigned ChooseDenom = MI.getOperand(5).getImm(); 906 907 Register Src0 = ChooseDenom != 0 ? Numer : Denom; 908 909 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0) 910 .addDef(Dst1) 911 .addImm(0) // $src0_modifiers 912 .addUse(Src0) // $src0 913 .addImm(0) // $src1_modifiers 914 .addUse(Denom) // $src1 915 .addImm(0) // $src2_modifiers 916 .addUse(Numer) // $src2 917 .addImm(0) // $clamp 918 .addImm(0); // $omod 919 920 MI.eraseFromParent(); 921 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 922 } 923 924 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const { 925 unsigned IntrinsicID = I.getIntrinsicID(); 926 switch (IntrinsicID) { 927 case Intrinsic::amdgcn_if_break: { 928 MachineBasicBlock *BB = I.getParent(); 929 930 // FIXME: Manually selecting to avoid dealing with the SReg_1 trick 931 // SelectionDAG uses for wave32 vs wave64. 932 BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK)) 933 .add(I.getOperand(0)) 934 .add(I.getOperand(2)) 935 .add(I.getOperand(3)); 936 937 Register DstReg = I.getOperand(0).getReg(); 938 Register Src0Reg = I.getOperand(2).getReg(); 939 Register Src1Reg = I.getOperand(3).getReg(); 940 941 I.eraseFromParent(); 942 943 for (Register Reg : { DstReg, Src0Reg, Src1Reg }) 944 MRI->setRegClass(Reg, TRI.getWaveMaskRegClass()); 945 946 return true; 947 } 948 case Intrinsic::amdgcn_interp_p1_f16: 949 return selectInterpP1F16(I); 950 case Intrinsic::amdgcn_wqm: 951 return constrainCopyLikeIntrin(I, AMDGPU::WQM); 952 case Intrinsic::amdgcn_softwqm: 953 return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM); 954 case Intrinsic::amdgcn_strict_wwm: 955 case Intrinsic::amdgcn_wwm: 956 return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WWM); 957 case Intrinsic::amdgcn_strict_wqm: 958 return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WQM); 959 case Intrinsic::amdgcn_writelane: 960 return selectWritelane(I); 961 case Intrinsic::amdgcn_div_scale: 962 return selectDivScale(I); 963 case Intrinsic::amdgcn_icmp: 964 return selectIntrinsicIcmp(I); 965 case Intrinsic::amdgcn_ballot: 966 return selectBallot(I); 967 case Intrinsic::amdgcn_reloc_constant: 968 return selectRelocConstant(I); 969 case Intrinsic::amdgcn_groupstaticsize: 970 return selectGroupStaticSize(I); 971 case Intrinsic::returnaddress: 972 return selectReturnAddress(I); 973 default: 974 return selectImpl(I, *CoverageInfo); 975 } 976 } 977 978 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) { 979 if (Size != 32 && Size != 64) 980 return -1; 981 switch (P) { 982 default: 983 llvm_unreachable("Unknown condition code!"); 984 case CmpInst::ICMP_NE: 985 return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64; 986 case CmpInst::ICMP_EQ: 987 return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64; 988 case CmpInst::ICMP_SGT: 989 return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64; 990 case CmpInst::ICMP_SGE: 991 return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64; 992 case CmpInst::ICMP_SLT: 993 return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64; 994 case CmpInst::ICMP_SLE: 995 return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64; 996 case CmpInst::ICMP_UGT: 997 return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64; 998 case CmpInst::ICMP_UGE: 999 return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64; 1000 case CmpInst::ICMP_ULT: 1001 return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64; 1002 case CmpInst::ICMP_ULE: 1003 return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64; 1004 } 1005 } 1006 1007 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P, 1008 unsigned Size) const { 1009 if (Size == 64) { 1010 if (!STI.hasScalarCompareEq64()) 1011 return -1; 1012 1013 switch (P) { 1014 case CmpInst::ICMP_NE: 1015 return AMDGPU::S_CMP_LG_U64; 1016 case CmpInst::ICMP_EQ: 1017 return AMDGPU::S_CMP_EQ_U64; 1018 default: 1019 return -1; 1020 } 1021 } 1022 1023 if (Size != 32) 1024 return -1; 1025 1026 switch (P) { 1027 case CmpInst::ICMP_NE: 1028 return AMDGPU::S_CMP_LG_U32; 1029 case CmpInst::ICMP_EQ: 1030 return AMDGPU::S_CMP_EQ_U32; 1031 case CmpInst::ICMP_SGT: 1032 return AMDGPU::S_CMP_GT_I32; 1033 case CmpInst::ICMP_SGE: 1034 return AMDGPU::S_CMP_GE_I32; 1035 case CmpInst::ICMP_SLT: 1036 return AMDGPU::S_CMP_LT_I32; 1037 case CmpInst::ICMP_SLE: 1038 return AMDGPU::S_CMP_LE_I32; 1039 case CmpInst::ICMP_UGT: 1040 return AMDGPU::S_CMP_GT_U32; 1041 case CmpInst::ICMP_UGE: 1042 return AMDGPU::S_CMP_GE_U32; 1043 case CmpInst::ICMP_ULT: 1044 return AMDGPU::S_CMP_LT_U32; 1045 case CmpInst::ICMP_ULE: 1046 return AMDGPU::S_CMP_LE_U32; 1047 default: 1048 llvm_unreachable("Unknown condition code!"); 1049 } 1050 } 1051 1052 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const { 1053 MachineBasicBlock *BB = I.getParent(); 1054 const DebugLoc &DL = I.getDebugLoc(); 1055 1056 Register SrcReg = I.getOperand(2).getReg(); 1057 unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI); 1058 1059 auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate(); 1060 1061 Register CCReg = I.getOperand(0).getReg(); 1062 if (!isVCC(CCReg, *MRI)) { 1063 int Opcode = getS_CMPOpcode(Pred, Size); 1064 if (Opcode == -1) 1065 return false; 1066 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode)) 1067 .add(I.getOperand(2)) 1068 .add(I.getOperand(3)); 1069 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg) 1070 .addReg(AMDGPU::SCC); 1071 bool Ret = 1072 constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) && 1073 RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI); 1074 I.eraseFromParent(); 1075 return Ret; 1076 } 1077 1078 int Opcode = getV_CMPOpcode(Pred, Size); 1079 if (Opcode == -1) 1080 return false; 1081 1082 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), 1083 I.getOperand(0).getReg()) 1084 .add(I.getOperand(2)) 1085 .add(I.getOperand(3)); 1086 RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), 1087 *TRI.getBoolRC(), *MRI); 1088 bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI); 1089 I.eraseFromParent(); 1090 return Ret; 1091 } 1092 1093 bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const { 1094 Register Dst = I.getOperand(0).getReg(); 1095 if (isVCC(Dst, *MRI)) 1096 return false; 1097 1098 if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize()) 1099 return false; 1100 1101 MachineBasicBlock *BB = I.getParent(); 1102 const DebugLoc &DL = I.getDebugLoc(); 1103 Register SrcReg = I.getOperand(2).getReg(); 1104 unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI); 1105 auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm()); 1106 1107 int Opcode = getV_CMPOpcode(Pred, Size); 1108 if (Opcode == -1) 1109 return false; 1110 1111 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst) 1112 .add(I.getOperand(2)) 1113 .add(I.getOperand(3)); 1114 RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(), 1115 *MRI); 1116 bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI); 1117 I.eraseFromParent(); 1118 return Ret; 1119 } 1120 1121 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const { 1122 MachineBasicBlock *BB = I.getParent(); 1123 const DebugLoc &DL = I.getDebugLoc(); 1124 Register DstReg = I.getOperand(0).getReg(); 1125 const unsigned Size = MRI->getType(DstReg).getSizeInBits(); 1126 const bool Is64 = Size == 64; 1127 1128 if (Size != STI.getWavefrontSize()) 1129 return false; 1130 1131 Optional<ValueAndVReg> Arg = 1132 getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI); 1133 1134 if (Arg.hasValue()) { 1135 const int64_t Value = Arg.getValue().Value.getSExtValue(); 1136 if (Value == 0) { 1137 unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; 1138 BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0); 1139 } else if (Value == -1) { // all ones 1140 Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO; 1141 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg); 1142 } else 1143 return false; 1144 } else { 1145 Register SrcReg = I.getOperand(2).getReg(); 1146 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg); 1147 } 1148 1149 I.eraseFromParent(); 1150 return true; 1151 } 1152 1153 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const { 1154 Register DstReg = I.getOperand(0).getReg(); 1155 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); 1156 const TargetRegisterClass *DstRC = 1157 TRI.getRegClassForSizeOnBank(32, *DstBank, *MRI); 1158 if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) 1159 return false; 1160 1161 const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID; 1162 1163 Module *M = MF->getFunction().getParent(); 1164 const MDNode *Metadata = I.getOperand(2).getMetadata(); 1165 auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString(); 1166 auto RelocSymbol = cast<GlobalVariable>( 1167 M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext()))); 1168 1169 MachineBasicBlock *BB = I.getParent(); 1170 BuildMI(*BB, &I, I.getDebugLoc(), 1171 TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg) 1172 .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO); 1173 1174 I.eraseFromParent(); 1175 return true; 1176 } 1177 1178 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const { 1179 Triple::OSType OS = MF->getTarget().getTargetTriple().getOS(); 1180 1181 Register DstReg = I.getOperand(0).getReg(); 1182 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 1183 unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ? 1184 AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 1185 1186 MachineBasicBlock *MBB = I.getParent(); 1187 const DebugLoc &DL = I.getDebugLoc(); 1188 1189 auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg); 1190 1191 if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) { 1192 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1193 MIB.addImm(MFI->getLDSSize()); 1194 } else { 1195 Module *M = MF->getFunction().getParent(); 1196 const GlobalValue *GV 1197 = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize); 1198 MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO); 1199 } 1200 1201 I.eraseFromParent(); 1202 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 1203 } 1204 1205 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const { 1206 MachineBasicBlock *MBB = I.getParent(); 1207 MachineFunction &MF = *MBB->getParent(); 1208 const DebugLoc &DL = I.getDebugLoc(); 1209 1210 MachineOperand &Dst = I.getOperand(0); 1211 Register DstReg = Dst.getReg(); 1212 unsigned Depth = I.getOperand(2).getImm(); 1213 1214 const TargetRegisterClass *RC 1215 = TRI.getConstrainedRegClassForOperand(Dst, *MRI); 1216 if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) || 1217 !RBI.constrainGenericRegister(DstReg, *RC, *MRI)) 1218 return false; 1219 1220 // Check for kernel and shader functions 1221 if (Depth != 0 || 1222 MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) { 1223 BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg) 1224 .addImm(0); 1225 I.eraseFromParent(); 1226 return true; 1227 } 1228 1229 MachineFrameInfo &MFI = MF.getFrameInfo(); 1230 // There is a call to @llvm.returnaddress in this function 1231 MFI.setReturnAddressIsTaken(true); 1232 1233 // Get the return address reg and mark it as an implicit live-in 1234 Register ReturnAddrReg = TRI.getReturnAddressReg(MF); 1235 Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg, 1236 AMDGPU::SReg_64RegClass, DL); 1237 BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg) 1238 .addReg(LiveIn); 1239 I.eraseFromParent(); 1240 return true; 1241 } 1242 1243 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const { 1244 // FIXME: Manually selecting to avoid dealing with the SReg_1 trick 1245 // SelectionDAG uses for wave32 vs wave64. 1246 MachineBasicBlock *BB = MI.getParent(); 1247 BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF)) 1248 .add(MI.getOperand(1)); 1249 1250 Register Reg = MI.getOperand(1).getReg(); 1251 MI.eraseFromParent(); 1252 1253 if (!MRI->getRegClassOrNull(Reg)) 1254 MRI->setRegClass(Reg, TRI.getWaveMaskRegClass()); 1255 return true; 1256 } 1257 1258 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic( 1259 MachineInstr &MI, Intrinsic::ID IntrID) const { 1260 MachineBasicBlock *MBB = MI.getParent(); 1261 MachineFunction *MF = MBB->getParent(); 1262 const DebugLoc &DL = MI.getDebugLoc(); 1263 1264 unsigned IndexOperand = MI.getOperand(7).getImm(); 1265 bool WaveRelease = MI.getOperand(8).getImm() != 0; 1266 bool WaveDone = MI.getOperand(9).getImm() != 0; 1267 1268 if (WaveDone && !WaveRelease) 1269 report_fatal_error("ds_ordered_count: wave_done requires wave_release"); 1270 1271 unsigned OrderedCountIndex = IndexOperand & 0x3f; 1272 IndexOperand &= ~0x3f; 1273 unsigned CountDw = 0; 1274 1275 if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) { 1276 CountDw = (IndexOperand >> 24) & 0xf; 1277 IndexOperand &= ~(0xf << 24); 1278 1279 if (CountDw < 1 || CountDw > 4) { 1280 report_fatal_error( 1281 "ds_ordered_count: dword count must be between 1 and 4"); 1282 } 1283 } 1284 1285 if (IndexOperand) 1286 report_fatal_error("ds_ordered_count: bad index operand"); 1287 1288 unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1; 1289 unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF); 1290 1291 unsigned Offset0 = OrderedCountIndex << 2; 1292 unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) | 1293 (Instruction << 4); 1294 1295 if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) 1296 Offset1 |= (CountDw - 1) << 6; 1297 1298 unsigned Offset = Offset0 | (Offset1 << 8); 1299 1300 Register M0Val = MI.getOperand(2).getReg(); 1301 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 1302 .addReg(M0Val); 1303 1304 Register DstReg = MI.getOperand(0).getReg(); 1305 Register ValReg = MI.getOperand(3).getReg(); 1306 MachineInstrBuilder DS = 1307 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg) 1308 .addReg(ValReg) 1309 .addImm(Offset) 1310 .cloneMemRefs(MI); 1311 1312 if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI)) 1313 return false; 1314 1315 bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI); 1316 MI.eraseFromParent(); 1317 return Ret; 1318 } 1319 1320 static unsigned gwsIntrinToOpcode(unsigned IntrID) { 1321 switch (IntrID) { 1322 case Intrinsic::amdgcn_ds_gws_init: 1323 return AMDGPU::DS_GWS_INIT; 1324 case Intrinsic::amdgcn_ds_gws_barrier: 1325 return AMDGPU::DS_GWS_BARRIER; 1326 case Intrinsic::amdgcn_ds_gws_sema_v: 1327 return AMDGPU::DS_GWS_SEMA_V; 1328 case Intrinsic::amdgcn_ds_gws_sema_br: 1329 return AMDGPU::DS_GWS_SEMA_BR; 1330 case Intrinsic::amdgcn_ds_gws_sema_p: 1331 return AMDGPU::DS_GWS_SEMA_P; 1332 case Intrinsic::amdgcn_ds_gws_sema_release_all: 1333 return AMDGPU::DS_GWS_SEMA_RELEASE_ALL; 1334 default: 1335 llvm_unreachable("not a gws intrinsic"); 1336 } 1337 } 1338 1339 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI, 1340 Intrinsic::ID IID) const { 1341 if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all && 1342 !STI.hasGWSSemaReleaseAll()) 1343 return false; 1344 1345 // intrinsic ID, vsrc, offset 1346 const bool HasVSrc = MI.getNumOperands() == 3; 1347 assert(HasVSrc || MI.getNumOperands() == 2); 1348 1349 Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg(); 1350 const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI); 1351 if (OffsetRB->getID() != AMDGPU::SGPRRegBankID) 1352 return false; 1353 1354 MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI); 1355 assert(OffsetDef); 1356 1357 unsigned ImmOffset; 1358 1359 MachineBasicBlock *MBB = MI.getParent(); 1360 const DebugLoc &DL = MI.getDebugLoc(); 1361 1362 MachineInstr *Readfirstlane = nullptr; 1363 1364 // If we legalized the VGPR input, strip out the readfirstlane to analyze the 1365 // incoming offset, in case there's an add of a constant. We'll have to put it 1366 // back later. 1367 if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) { 1368 Readfirstlane = OffsetDef; 1369 BaseOffset = OffsetDef->getOperand(1).getReg(); 1370 OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI); 1371 } 1372 1373 if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) { 1374 // If we have a constant offset, try to use the 0 in m0 as the base. 1375 // TODO: Look into changing the default m0 initialization value. If the 1376 // default -1 only set the low 16-bits, we could leave it as-is and add 1 to 1377 // the immediate offset. 1378 1379 ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue(); 1380 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0) 1381 .addImm(0); 1382 } else { 1383 std::tie(BaseOffset, ImmOffset) = 1384 AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset); 1385 1386 if (Readfirstlane) { 1387 // We have the constant offset now, so put the readfirstlane back on the 1388 // variable component. 1389 if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI)) 1390 return false; 1391 1392 Readfirstlane->getOperand(1).setReg(BaseOffset); 1393 BaseOffset = Readfirstlane->getOperand(0).getReg(); 1394 } else { 1395 if (!RBI.constrainGenericRegister(BaseOffset, 1396 AMDGPU::SReg_32RegClass, *MRI)) 1397 return false; 1398 } 1399 1400 Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 1401 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base) 1402 .addReg(BaseOffset) 1403 .addImm(16); 1404 1405 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 1406 .addReg(M0Base); 1407 } 1408 1409 // The resource id offset is computed as (<isa opaque base> + M0[21:16] + 1410 // offset field) % 64. Some versions of the programming guide omit the m0 1411 // part, or claim it's from offset 0. 1412 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID))); 1413 1414 if (HasVSrc) { 1415 Register VSrc = MI.getOperand(1).getReg(); 1416 1417 if (STI.needsAlignedVGPRs()) { 1418 // Add implicit aligned super-reg to force alignment on the data operand. 1419 Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1420 BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef); 1421 Register NewVR = 1422 MRI->createVirtualRegister(&AMDGPU::VReg_64_Align2RegClass); 1423 BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), NewVR) 1424 .addReg(VSrc, 0, MI.getOperand(1).getSubReg()) 1425 .addImm(AMDGPU::sub0) 1426 .addReg(Undef) 1427 .addImm(AMDGPU::sub1); 1428 MIB.addReg(NewVR, 0, AMDGPU::sub0); 1429 MIB.addReg(NewVR, RegState::Implicit); 1430 } else { 1431 MIB.addReg(VSrc); 1432 } 1433 1434 if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI)) 1435 return false; 1436 } 1437 1438 MIB.addImm(ImmOffset) 1439 .cloneMemRefs(MI); 1440 1441 MI.eraseFromParent(); 1442 return true; 1443 } 1444 1445 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI, 1446 bool IsAppend) const { 1447 Register PtrBase = MI.getOperand(2).getReg(); 1448 LLT PtrTy = MRI->getType(PtrBase); 1449 bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS; 1450 1451 unsigned Offset; 1452 std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2)); 1453 1454 // TODO: Should this try to look through readfirstlane like GWS? 1455 if (!isDSOffsetLegal(PtrBase, Offset)) { 1456 PtrBase = MI.getOperand(2).getReg(); 1457 Offset = 0; 1458 } 1459 1460 MachineBasicBlock *MBB = MI.getParent(); 1461 const DebugLoc &DL = MI.getDebugLoc(); 1462 const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME; 1463 1464 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 1465 .addReg(PtrBase); 1466 if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI)) 1467 return false; 1468 1469 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg()) 1470 .addImm(Offset) 1471 .addImm(IsGDS ? -1 : 0) 1472 .cloneMemRefs(MI); 1473 MI.eraseFromParent(); 1474 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 1475 } 1476 1477 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const { 1478 if (TM.getOptLevel() > CodeGenOpt::None) { 1479 unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second; 1480 if (WGSize <= STI.getWavefrontSize()) { 1481 MachineBasicBlock *MBB = MI.getParent(); 1482 const DebugLoc &DL = MI.getDebugLoc(); 1483 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER)); 1484 MI.eraseFromParent(); 1485 return true; 1486 } 1487 } 1488 return selectImpl(MI, *CoverageInfo); 1489 } 1490 1491 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE, 1492 bool &IsTexFail) { 1493 if (TexFailCtrl) 1494 IsTexFail = true; 1495 1496 TFE = (TexFailCtrl & 0x1) ? true : false; 1497 TexFailCtrl &= ~(uint64_t)0x1; 1498 LWE = (TexFailCtrl & 0x2) ? true : false; 1499 TexFailCtrl &= ~(uint64_t)0x2; 1500 1501 return TexFailCtrl == 0; 1502 } 1503 1504 bool AMDGPUInstructionSelector::selectImageIntrinsic( 1505 MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const { 1506 MachineBasicBlock *MBB = MI.getParent(); 1507 const DebugLoc &DL = MI.getDebugLoc(); 1508 1509 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = 1510 AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode); 1511 1512 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim); 1513 const AMDGPU::MIMGLZMappingInfo *LZMappingInfo = 1514 AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode); 1515 const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo = 1516 AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode); 1517 unsigned IntrOpcode = Intr->BaseOpcode; 1518 const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI); 1519 1520 const unsigned ArgOffset = MI.getNumExplicitDefs() + 1; 1521 1522 Register VDataIn, VDataOut; 1523 LLT VDataTy; 1524 int NumVDataDwords = -1; 1525 bool IsD16 = MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16 || 1526 MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16; 1527 1528 bool Unorm; 1529 if (!BaseOpcode->Sampler) 1530 Unorm = true; 1531 else 1532 Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0; 1533 1534 bool TFE; 1535 bool LWE; 1536 bool IsTexFail = false; 1537 if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(), 1538 TFE, LWE, IsTexFail)) 1539 return false; 1540 1541 const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm(); 1542 const bool IsA16 = (Flags & 1) != 0; 1543 const bool IsG16 = (Flags & 2) != 0; 1544 1545 // A16 implies 16 bit gradients if subtarget doesn't support G16 1546 if (IsA16 && !STI.hasG16() && !IsG16) 1547 return false; 1548 1549 unsigned DMask = 0; 1550 unsigned DMaskLanes = 0; 1551 1552 if (BaseOpcode->Atomic) { 1553 VDataOut = MI.getOperand(0).getReg(); 1554 VDataIn = MI.getOperand(2).getReg(); 1555 LLT Ty = MRI->getType(VDataIn); 1556 1557 // Be careful to allow atomic swap on 16-bit element vectors. 1558 const bool Is64Bit = BaseOpcode->AtomicX2 ? 1559 Ty.getSizeInBits() == 128 : 1560 Ty.getSizeInBits() == 64; 1561 1562 if (BaseOpcode->AtomicX2) { 1563 assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister); 1564 1565 DMask = Is64Bit ? 0xf : 0x3; 1566 NumVDataDwords = Is64Bit ? 4 : 2; 1567 } else { 1568 DMask = Is64Bit ? 0x3 : 0x1; 1569 NumVDataDwords = Is64Bit ? 2 : 1; 1570 } 1571 } else { 1572 DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm(); 1573 DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask); 1574 1575 if (BaseOpcode->Store) { 1576 VDataIn = MI.getOperand(1).getReg(); 1577 VDataTy = MRI->getType(VDataIn); 1578 NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32; 1579 } else { 1580 VDataOut = MI.getOperand(0).getReg(); 1581 VDataTy = MRI->getType(VDataOut); 1582 NumVDataDwords = DMaskLanes; 1583 1584 if (IsD16 && !STI.hasUnpackedD16VMem()) 1585 NumVDataDwords = (DMaskLanes + 1) / 2; 1586 } 1587 } 1588 1589 // Optimize _L to _LZ when _L is zero 1590 if (LZMappingInfo) { 1591 // The legalizer replaced the register with an immediate 0 if we need to 1592 // change the opcode. 1593 const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->LodIndex); 1594 if (Lod.isImm()) { 1595 assert(Lod.getImm() == 0); 1596 IntrOpcode = LZMappingInfo->LZ; // set new opcode to _lz variant of _l 1597 } 1598 } 1599 1600 // Optimize _mip away, when 'lod' is zero 1601 if (MIPMappingInfo) { 1602 const MachineOperand &Lod = MI.getOperand(ArgOffset + Intr->MipIndex); 1603 if (Lod.isImm()) { 1604 assert(Lod.getImm() == 0); 1605 IntrOpcode = MIPMappingInfo->NONMIP; // set new opcode to variant without _mip 1606 } 1607 } 1608 1609 // Set G16 opcode 1610 if (IsG16 && !IsA16) { 1611 const AMDGPU::MIMGG16MappingInfo *G16MappingInfo = 1612 AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode); 1613 assert(G16MappingInfo); 1614 IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16 1615 } 1616 1617 // TODO: Check this in verifier. 1618 assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this"); 1619 1620 unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm(); 1621 if (BaseOpcode->Atomic) 1622 CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization 1623 if (CPol & ~AMDGPU::CPol::ALL) 1624 return false; 1625 1626 int NumVAddrRegs = 0; 1627 int NumVAddrDwords = 0; 1628 for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) { 1629 // Skip the $noregs and 0s inserted during legalization. 1630 MachineOperand &AddrOp = MI.getOperand(ArgOffset + I); 1631 if (!AddrOp.isReg()) 1632 continue; // XXX - Break? 1633 1634 Register Addr = AddrOp.getReg(); 1635 if (!Addr) 1636 break; 1637 1638 ++NumVAddrRegs; 1639 NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32; 1640 } 1641 1642 // The legalizer preprocessed the intrinsic arguments. If we aren't using 1643 // NSA, these should have beeen packed into a single value in the first 1644 // address register 1645 const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs; 1646 if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) { 1647 LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n"); 1648 return false; 1649 } 1650 1651 if (IsTexFail) 1652 ++NumVDataDwords; 1653 1654 int Opcode = -1; 1655 if (IsGFX10Plus) { 1656 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, 1657 UseNSA ? AMDGPU::MIMGEncGfx10NSA 1658 : AMDGPU::MIMGEncGfx10Default, 1659 NumVDataDwords, NumVAddrDwords); 1660 } else { 1661 if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 1662 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8, 1663 NumVDataDwords, NumVAddrDwords); 1664 if (Opcode == -1) 1665 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6, 1666 NumVDataDwords, NumVAddrDwords); 1667 } 1668 assert(Opcode != -1); 1669 1670 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode)) 1671 .cloneMemRefs(MI); 1672 1673 if (VDataOut) { 1674 if (BaseOpcode->AtomicX2) { 1675 const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64; 1676 1677 Register TmpReg = MRI->createVirtualRegister( 1678 Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass); 1679 unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0; 1680 1681 MIB.addDef(TmpReg); 1682 if (!MRI->use_empty(VDataOut)) { 1683 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut) 1684 .addReg(TmpReg, RegState::Kill, SubReg); 1685 } 1686 1687 } else { 1688 MIB.addDef(VDataOut); // vdata output 1689 } 1690 } 1691 1692 if (VDataIn) 1693 MIB.addReg(VDataIn); // vdata input 1694 1695 for (int I = 0; I != NumVAddrRegs; ++I) { 1696 MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I); 1697 if (SrcOp.isReg()) { 1698 assert(SrcOp.getReg() != 0); 1699 MIB.addReg(SrcOp.getReg()); 1700 } 1701 } 1702 1703 MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg()); 1704 if (BaseOpcode->Sampler) 1705 MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg()); 1706 1707 MIB.addImm(DMask); // dmask 1708 1709 if (IsGFX10Plus) 1710 MIB.addImm(DimInfo->Encoding); 1711 MIB.addImm(Unorm); 1712 1713 MIB.addImm(CPol); 1714 MIB.addImm(IsA16 && // a16 or r128 1715 STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0); 1716 if (IsGFX10Plus) 1717 MIB.addImm(IsA16 ? -1 : 0); 1718 1719 MIB.addImm(TFE); // tfe 1720 MIB.addImm(LWE); // lwe 1721 if (!IsGFX10Plus) 1722 MIB.addImm(DimInfo->DA ? -1 : 0); 1723 if (BaseOpcode->HasD16) 1724 MIB.addImm(IsD16 ? -1 : 0); 1725 1726 if (IsTexFail) { 1727 // An image load instruction with TFE/LWE only conditionally writes to its 1728 // result registers. Initialize them to zero so that we always get well 1729 // defined result values. 1730 assert(VDataOut && !VDataIn); 1731 Register Tied = MRI->cloneVirtualRegister(VDataOut); 1732 Register Zero = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1733 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::V_MOV_B32_e32), Zero) 1734 .addImm(0); 1735 auto Parts = TRI.getRegSplitParts(MRI->getRegClass(Tied), 4); 1736 if (STI.usePRTStrictNull()) { 1737 // With enable-prt-strict-null enabled, initialize all result registers to 1738 // zero. 1739 auto RegSeq = 1740 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied); 1741 for (auto Sub : Parts) 1742 RegSeq.addReg(Zero).addImm(Sub); 1743 } else { 1744 // With enable-prt-strict-null disabled, only initialize the extra TFE/LWE 1745 // result register. 1746 Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1747 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef); 1748 auto RegSeq = 1749 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied); 1750 for (auto Sub : Parts.drop_back(1)) 1751 RegSeq.addReg(Undef).addImm(Sub); 1752 RegSeq.addReg(Zero).addImm(Parts.back()); 1753 } 1754 MIB.addReg(Tied, RegState::Implicit); 1755 MIB->tieOperands(0, MIB->getNumOperands() - 1); 1756 } 1757 1758 MI.eraseFromParent(); 1759 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 1760 } 1761 1762 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS( 1763 MachineInstr &I) const { 1764 unsigned IntrinsicID = I.getIntrinsicID(); 1765 switch (IntrinsicID) { 1766 case Intrinsic::amdgcn_end_cf: 1767 return selectEndCfIntrinsic(I); 1768 case Intrinsic::amdgcn_ds_ordered_add: 1769 case Intrinsic::amdgcn_ds_ordered_swap: 1770 return selectDSOrderedIntrinsic(I, IntrinsicID); 1771 case Intrinsic::amdgcn_ds_gws_init: 1772 case Intrinsic::amdgcn_ds_gws_barrier: 1773 case Intrinsic::amdgcn_ds_gws_sema_v: 1774 case Intrinsic::amdgcn_ds_gws_sema_br: 1775 case Intrinsic::amdgcn_ds_gws_sema_p: 1776 case Intrinsic::amdgcn_ds_gws_sema_release_all: 1777 return selectDSGWSIntrinsic(I, IntrinsicID); 1778 case Intrinsic::amdgcn_ds_append: 1779 return selectDSAppendConsume(I, true); 1780 case Intrinsic::amdgcn_ds_consume: 1781 return selectDSAppendConsume(I, false); 1782 case Intrinsic::amdgcn_s_barrier: 1783 return selectSBarrier(I); 1784 case Intrinsic::amdgcn_global_atomic_fadd: 1785 return selectGlobalAtomicFadd(I, I.getOperand(2), I.getOperand(3)); 1786 default: { 1787 return selectImpl(I, *CoverageInfo); 1788 } 1789 } 1790 } 1791 1792 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const { 1793 if (selectImpl(I, *CoverageInfo)) 1794 return true; 1795 1796 MachineBasicBlock *BB = I.getParent(); 1797 const DebugLoc &DL = I.getDebugLoc(); 1798 1799 Register DstReg = I.getOperand(0).getReg(); 1800 unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI); 1801 assert(Size <= 32 || Size == 64); 1802 const MachineOperand &CCOp = I.getOperand(1); 1803 Register CCReg = CCOp.getReg(); 1804 if (!isVCC(CCReg, *MRI)) { 1805 unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 : 1806 AMDGPU::S_CSELECT_B32; 1807 MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC) 1808 .addReg(CCReg); 1809 1810 // The generic constrainSelectedInstRegOperands doesn't work for the scc register 1811 // bank, because it does not cover the register class that we used to represent 1812 // for it. So we need to manually set the register class here. 1813 if (!MRI->getRegClassOrNull(CCReg)) 1814 MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI)); 1815 MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg) 1816 .add(I.getOperand(2)) 1817 .add(I.getOperand(3)); 1818 1819 bool Ret = false; 1820 Ret |= constrainSelectedInstRegOperands(*Select, TII, TRI, RBI); 1821 Ret |= constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI); 1822 I.eraseFromParent(); 1823 return Ret; 1824 } 1825 1826 // Wide VGPR select should have been split in RegBankSelect. 1827 if (Size > 32) 1828 return false; 1829 1830 MachineInstr *Select = 1831 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1832 .addImm(0) 1833 .add(I.getOperand(3)) 1834 .addImm(0) 1835 .add(I.getOperand(2)) 1836 .add(I.getOperand(1)); 1837 1838 bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI); 1839 I.eraseFromParent(); 1840 return Ret; 1841 } 1842 1843 static int sizeToSubRegIndex(unsigned Size) { 1844 switch (Size) { 1845 case 32: 1846 return AMDGPU::sub0; 1847 case 64: 1848 return AMDGPU::sub0_sub1; 1849 case 96: 1850 return AMDGPU::sub0_sub1_sub2; 1851 case 128: 1852 return AMDGPU::sub0_sub1_sub2_sub3; 1853 case 256: 1854 return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7; 1855 default: 1856 if (Size < 32) 1857 return AMDGPU::sub0; 1858 if (Size > 256) 1859 return -1; 1860 return sizeToSubRegIndex(PowerOf2Ceil(Size)); 1861 } 1862 } 1863 1864 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const { 1865 Register DstReg = I.getOperand(0).getReg(); 1866 Register SrcReg = I.getOperand(1).getReg(); 1867 const LLT DstTy = MRI->getType(DstReg); 1868 const LLT SrcTy = MRI->getType(SrcReg); 1869 const LLT S1 = LLT::scalar(1); 1870 1871 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI); 1872 const RegisterBank *DstRB; 1873 if (DstTy == S1) { 1874 // This is a special case. We don't treat s1 for legalization artifacts as 1875 // vcc booleans. 1876 DstRB = SrcRB; 1877 } else { 1878 DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 1879 if (SrcRB != DstRB) 1880 return false; 1881 } 1882 1883 const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID; 1884 1885 unsigned DstSize = DstTy.getSizeInBits(); 1886 unsigned SrcSize = SrcTy.getSizeInBits(); 1887 1888 const TargetRegisterClass *SrcRC 1889 = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI); 1890 const TargetRegisterClass *DstRC 1891 = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI); 1892 if (!SrcRC || !DstRC) 1893 return false; 1894 1895 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) || 1896 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) { 1897 LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n"); 1898 return false; 1899 } 1900 1901 if (DstTy == LLT::fixed_vector(2, 16) && SrcTy == LLT::fixed_vector(2, 32)) { 1902 MachineBasicBlock *MBB = I.getParent(); 1903 const DebugLoc &DL = I.getDebugLoc(); 1904 1905 Register LoReg = MRI->createVirtualRegister(DstRC); 1906 Register HiReg = MRI->createVirtualRegister(DstRC); 1907 BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg) 1908 .addReg(SrcReg, 0, AMDGPU::sub0); 1909 BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg) 1910 .addReg(SrcReg, 0, AMDGPU::sub1); 1911 1912 if (IsVALU && STI.hasSDWA()) { 1913 // Write the low 16-bits of the high element into the high 16-bits of the 1914 // low element. 1915 MachineInstr *MovSDWA = 1916 BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg) 1917 .addImm(0) // $src0_modifiers 1918 .addReg(HiReg) // $src0 1919 .addImm(0) // $clamp 1920 .addImm(AMDGPU::SDWA::WORD_1) // $dst_sel 1921 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused 1922 .addImm(AMDGPU::SDWA::WORD_0) // $src0_sel 1923 .addReg(LoReg, RegState::Implicit); 1924 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1); 1925 } else { 1926 Register TmpReg0 = MRI->createVirtualRegister(DstRC); 1927 Register TmpReg1 = MRI->createVirtualRegister(DstRC); 1928 Register ImmReg = MRI->createVirtualRegister(DstRC); 1929 if (IsVALU) { 1930 BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0) 1931 .addImm(16) 1932 .addReg(HiReg); 1933 } else { 1934 BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0) 1935 .addReg(HiReg) 1936 .addImm(16); 1937 } 1938 1939 unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32; 1940 unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32; 1941 unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32; 1942 1943 BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg) 1944 .addImm(0xffff); 1945 BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1) 1946 .addReg(LoReg) 1947 .addReg(ImmReg); 1948 BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg) 1949 .addReg(TmpReg0) 1950 .addReg(TmpReg1); 1951 } 1952 1953 I.eraseFromParent(); 1954 return true; 1955 } 1956 1957 if (!DstTy.isScalar()) 1958 return false; 1959 1960 if (SrcSize > 32) { 1961 int SubRegIdx = sizeToSubRegIndex(DstSize); 1962 if (SubRegIdx == -1) 1963 return false; 1964 1965 // Deal with weird cases where the class only partially supports the subreg 1966 // index. 1967 const TargetRegisterClass *SrcWithSubRC 1968 = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx); 1969 if (!SrcWithSubRC) 1970 return false; 1971 1972 if (SrcWithSubRC != SrcRC) { 1973 if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI)) 1974 return false; 1975 } 1976 1977 I.getOperand(1).setSubReg(SubRegIdx); 1978 } 1979 1980 I.setDesc(TII.get(TargetOpcode::COPY)); 1981 return true; 1982 } 1983 1984 /// \returns true if a bitmask for \p Size bits will be an inline immediate. 1985 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) { 1986 Mask = maskTrailingOnes<unsigned>(Size); 1987 int SignedMask = static_cast<int>(Mask); 1988 return SignedMask >= -16 && SignedMask <= 64; 1989 } 1990 1991 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1. 1992 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank( 1993 Register Reg, const MachineRegisterInfo &MRI, 1994 const TargetRegisterInfo &TRI) const { 1995 const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg); 1996 if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>()) 1997 return RB; 1998 1999 // Ignore the type, since we don't use vcc in artifacts. 2000 if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>()) 2001 return &RBI.getRegBankFromRegClass(*RC, LLT()); 2002 return nullptr; 2003 } 2004 2005 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const { 2006 bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG; 2007 bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg; 2008 const DebugLoc &DL = I.getDebugLoc(); 2009 MachineBasicBlock &MBB = *I.getParent(); 2010 const Register DstReg = I.getOperand(0).getReg(); 2011 const Register SrcReg = I.getOperand(1).getReg(); 2012 2013 const LLT DstTy = MRI->getType(DstReg); 2014 const LLT SrcTy = MRI->getType(SrcReg); 2015 const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ? 2016 I.getOperand(2).getImm() : SrcTy.getSizeInBits(); 2017 const unsigned DstSize = DstTy.getSizeInBits(); 2018 if (!DstTy.isScalar()) 2019 return false; 2020 2021 // Artifact casts should never use vcc. 2022 const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI); 2023 2024 // FIXME: This should probably be illegal and split earlier. 2025 if (I.getOpcode() == AMDGPU::G_ANYEXT) { 2026 if (DstSize <= 32) 2027 return selectCOPY(I); 2028 2029 const TargetRegisterClass *SrcRC = 2030 TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank, *MRI); 2031 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); 2032 const TargetRegisterClass *DstRC = 2033 TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI); 2034 2035 Register UndefReg = MRI->createVirtualRegister(SrcRC); 2036 BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg); 2037 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) 2038 .addReg(SrcReg) 2039 .addImm(AMDGPU::sub0) 2040 .addReg(UndefReg) 2041 .addImm(AMDGPU::sub1); 2042 I.eraseFromParent(); 2043 2044 return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) && 2045 RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI); 2046 } 2047 2048 if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) { 2049 // 64-bit should have been split up in RegBankSelect 2050 2051 // Try to use an and with a mask if it will save code size. 2052 unsigned Mask; 2053 if (!Signed && shouldUseAndMask(SrcSize, Mask)) { 2054 MachineInstr *ExtI = 2055 BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg) 2056 .addImm(Mask) 2057 .addReg(SrcReg); 2058 I.eraseFromParent(); 2059 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI); 2060 } 2061 2062 const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64; 2063 MachineInstr *ExtI = 2064 BuildMI(MBB, I, DL, TII.get(BFE), DstReg) 2065 .addReg(SrcReg) 2066 .addImm(0) // Offset 2067 .addImm(SrcSize); // Width 2068 I.eraseFromParent(); 2069 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI); 2070 } 2071 2072 if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) { 2073 const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ? 2074 AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass; 2075 if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI)) 2076 return false; 2077 2078 if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) { 2079 const unsigned SextOpc = SrcSize == 8 ? 2080 AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16; 2081 BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg) 2082 .addReg(SrcReg); 2083 I.eraseFromParent(); 2084 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI); 2085 } 2086 2087 const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64; 2088 const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32; 2089 2090 // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width. 2091 if (DstSize > 32 && (SrcSize <= 32 || InReg)) { 2092 // We need a 64-bit register source, but the high bits don't matter. 2093 Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); 2094 Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2095 unsigned SubReg = InReg ? AMDGPU::sub0 : 0; 2096 2097 BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg); 2098 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg) 2099 .addReg(SrcReg, 0, SubReg) 2100 .addImm(AMDGPU::sub0) 2101 .addReg(UndefReg) 2102 .addImm(AMDGPU::sub1); 2103 2104 BuildMI(MBB, I, DL, TII.get(BFE64), DstReg) 2105 .addReg(ExtReg) 2106 .addImm(SrcSize << 16); 2107 2108 I.eraseFromParent(); 2109 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI); 2110 } 2111 2112 unsigned Mask; 2113 if (!Signed && shouldUseAndMask(SrcSize, Mask)) { 2114 BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg) 2115 .addReg(SrcReg) 2116 .addImm(Mask); 2117 } else { 2118 BuildMI(MBB, I, DL, TII.get(BFE32), DstReg) 2119 .addReg(SrcReg) 2120 .addImm(SrcSize << 16); 2121 } 2122 2123 I.eraseFromParent(); 2124 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI); 2125 } 2126 2127 return false; 2128 } 2129 2130 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const { 2131 MachineBasicBlock *BB = I.getParent(); 2132 MachineOperand &ImmOp = I.getOperand(1); 2133 Register DstReg = I.getOperand(0).getReg(); 2134 unsigned Size = MRI->getType(DstReg).getSizeInBits(); 2135 2136 // The AMDGPU backend only supports Imm operands and not CImm or FPImm. 2137 if (ImmOp.isFPImm()) { 2138 const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt(); 2139 ImmOp.ChangeToImmediate(Imm.getZExtValue()); 2140 } else if (ImmOp.isCImm()) { 2141 ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue()); 2142 } else { 2143 llvm_unreachable("Not supported by g_constants"); 2144 } 2145 2146 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 2147 const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID; 2148 2149 unsigned Opcode; 2150 if (DstRB->getID() == AMDGPU::VCCRegBankID) { 2151 Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 2152 } else { 2153 Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 2154 2155 // We should never produce s1 values on banks other than VCC. If the user of 2156 // this already constrained the register, we may incorrectly think it's VCC 2157 // if it wasn't originally. 2158 if (Size == 1) 2159 return false; 2160 } 2161 2162 if (Size != 64) { 2163 I.setDesc(TII.get(Opcode)); 2164 I.addImplicitDefUseOperands(*MF); 2165 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 2166 } 2167 2168 const DebugLoc &DL = I.getDebugLoc(); 2169 2170 APInt Imm(Size, I.getOperand(1).getImm()); 2171 2172 MachineInstr *ResInst; 2173 if (IsSgpr && TII.isInlineConstant(Imm)) { 2174 ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg) 2175 .addImm(I.getOperand(1).getImm()); 2176 } else { 2177 const TargetRegisterClass *RC = IsSgpr ? 2178 &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass; 2179 Register LoReg = MRI->createVirtualRegister(RC); 2180 Register HiReg = MRI->createVirtualRegister(RC); 2181 2182 BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg) 2183 .addImm(Imm.trunc(32).getZExtValue()); 2184 2185 BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg) 2186 .addImm(Imm.ashr(32).getZExtValue()); 2187 2188 ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) 2189 .addReg(LoReg) 2190 .addImm(AMDGPU::sub0) 2191 .addReg(HiReg) 2192 .addImm(AMDGPU::sub1); 2193 } 2194 2195 // We can't call constrainSelectedInstRegOperands here, because it doesn't 2196 // work for target independent opcodes 2197 I.eraseFromParent(); 2198 const TargetRegisterClass *DstRC = 2199 TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI); 2200 if (!DstRC) 2201 return true; 2202 return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI); 2203 } 2204 2205 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const { 2206 // Only manually handle the f64 SGPR case. 2207 // 2208 // FIXME: This is a workaround for 2.5 different tablegen problems. Because 2209 // the bit ops theoretically have a second result due to the implicit def of 2210 // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing 2211 // that is easy by disabling the check. The result works, but uses a 2212 // nonsensical sreg32orlds_and_sreg_1 regclass. 2213 // 2214 // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to 2215 // the variadic REG_SEQUENCE operands. 2216 2217 Register Dst = MI.getOperand(0).getReg(); 2218 const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI); 2219 if (DstRB->getID() != AMDGPU::SGPRRegBankID || 2220 MRI->getType(Dst) != LLT::scalar(64)) 2221 return false; 2222 2223 Register Src = MI.getOperand(1).getReg(); 2224 MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI); 2225 if (Fabs) 2226 Src = Fabs->getOperand(1).getReg(); 2227 2228 if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) || 2229 !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI)) 2230 return false; 2231 2232 MachineBasicBlock *BB = MI.getParent(); 2233 const DebugLoc &DL = MI.getDebugLoc(); 2234 Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2235 Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2236 Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2237 Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2238 2239 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg) 2240 .addReg(Src, 0, AMDGPU::sub0); 2241 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg) 2242 .addReg(Src, 0, AMDGPU::sub1); 2243 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg) 2244 .addImm(0x80000000); 2245 2246 // Set or toggle sign bit. 2247 unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32; 2248 BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg) 2249 .addReg(HiReg) 2250 .addReg(ConstReg); 2251 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst) 2252 .addReg(LoReg) 2253 .addImm(AMDGPU::sub0) 2254 .addReg(OpReg) 2255 .addImm(AMDGPU::sub1); 2256 MI.eraseFromParent(); 2257 return true; 2258 } 2259 2260 // FIXME: This is a workaround for the same tablegen problems as G_FNEG 2261 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const { 2262 Register Dst = MI.getOperand(0).getReg(); 2263 const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI); 2264 if (DstRB->getID() != AMDGPU::SGPRRegBankID || 2265 MRI->getType(Dst) != LLT::scalar(64)) 2266 return false; 2267 2268 Register Src = MI.getOperand(1).getReg(); 2269 MachineBasicBlock *BB = MI.getParent(); 2270 const DebugLoc &DL = MI.getDebugLoc(); 2271 Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2272 Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2273 Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2274 Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2275 2276 if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) || 2277 !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI)) 2278 return false; 2279 2280 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg) 2281 .addReg(Src, 0, AMDGPU::sub0); 2282 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg) 2283 .addReg(Src, 0, AMDGPU::sub1); 2284 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg) 2285 .addImm(0x7fffffff); 2286 2287 // Clear sign bit. 2288 // TODO: Should this used S_BITSET0_*? 2289 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg) 2290 .addReg(HiReg) 2291 .addReg(ConstReg); 2292 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst) 2293 .addReg(LoReg) 2294 .addImm(AMDGPU::sub0) 2295 .addReg(OpReg) 2296 .addImm(AMDGPU::sub1); 2297 2298 MI.eraseFromParent(); 2299 return true; 2300 } 2301 2302 static bool isConstant(const MachineInstr &MI) { 2303 return MI.getOpcode() == TargetOpcode::G_CONSTANT; 2304 } 2305 2306 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load, 2307 const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const { 2308 2309 const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg()); 2310 2311 assert(PtrMI); 2312 2313 if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD) 2314 return; 2315 2316 GEPInfo GEPInfo(*PtrMI); 2317 2318 for (unsigned i = 1; i != 3; ++i) { 2319 const MachineOperand &GEPOp = PtrMI->getOperand(i); 2320 const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg()); 2321 assert(OpDef); 2322 if (i == 2 && isConstant(*OpDef)) { 2323 // TODO: Could handle constant base + variable offset, but a combine 2324 // probably should have commuted it. 2325 assert(GEPInfo.Imm == 0); 2326 GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue(); 2327 continue; 2328 } 2329 const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI); 2330 if (OpBank->getID() == AMDGPU::SGPRRegBankID) 2331 GEPInfo.SgprParts.push_back(GEPOp.getReg()); 2332 else 2333 GEPInfo.VgprParts.push_back(GEPOp.getReg()); 2334 } 2335 2336 AddrInfo.push_back(GEPInfo); 2337 getAddrModeInfo(*PtrMI, MRI, AddrInfo); 2338 } 2339 2340 bool AMDGPUInstructionSelector::isSGPR(Register Reg) const { 2341 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID; 2342 } 2343 2344 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const { 2345 if (!MI.hasOneMemOperand()) 2346 return false; 2347 2348 const MachineMemOperand *MMO = *MI.memoperands_begin(); 2349 const Value *Ptr = MMO->getValue(); 2350 2351 // UndefValue means this is a load of a kernel input. These are uniform. 2352 // Sometimes LDS instructions have constant pointers. 2353 // If Ptr is null, then that means this mem operand contains a 2354 // PseudoSourceValue like GOT. 2355 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) || 2356 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr)) 2357 return true; 2358 2359 if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) 2360 return true; 2361 2362 const Instruction *I = dyn_cast<Instruction>(Ptr); 2363 return I && I->getMetadata("amdgpu.uniform"); 2364 } 2365 2366 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const { 2367 for (const GEPInfo &GEPInfo : AddrInfo) { 2368 if (!GEPInfo.VgprParts.empty()) 2369 return true; 2370 } 2371 return false; 2372 } 2373 2374 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const { 2375 const LLT PtrTy = MRI->getType(I.getOperand(1).getReg()); 2376 unsigned AS = PtrTy.getAddressSpace(); 2377 if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) && 2378 STI.ldsRequiresM0Init()) { 2379 MachineBasicBlock *BB = I.getParent(); 2380 2381 // If DS instructions require M0 initialization, insert it before selecting. 2382 BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0) 2383 .addImm(-1); 2384 } 2385 } 2386 2387 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW( 2388 MachineInstr &I) const { 2389 if (I.getOpcode() == TargetOpcode::G_ATOMICRMW_FADD) { 2390 const LLT PtrTy = MRI->getType(I.getOperand(1).getReg()); 2391 unsigned AS = PtrTy.getAddressSpace(); 2392 if (AS == AMDGPUAS::GLOBAL_ADDRESS) 2393 return selectGlobalAtomicFadd(I, I.getOperand(1), I.getOperand(2)); 2394 } 2395 2396 initM0(I); 2397 return selectImpl(I, *CoverageInfo); 2398 } 2399 2400 // TODO: No rtn optimization. 2401 bool AMDGPUInstructionSelector::selectG_AMDGPU_ATOMIC_CMPXCHG( 2402 MachineInstr &MI) const { 2403 Register PtrReg = MI.getOperand(1).getReg(); 2404 const LLT PtrTy = MRI->getType(PtrReg); 2405 if (PtrTy.getAddressSpace() == AMDGPUAS::FLAT_ADDRESS || 2406 STI.useFlatForGlobal()) 2407 return selectImpl(MI, *CoverageInfo); 2408 2409 Register DstReg = MI.getOperand(0).getReg(); 2410 const LLT Ty = MRI->getType(DstReg); 2411 const bool Is64 = Ty.getSizeInBits() == 64; 2412 const unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0; 2413 Register TmpReg = MRI->createVirtualRegister( 2414 Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass); 2415 2416 const DebugLoc &DL = MI.getDebugLoc(); 2417 MachineBasicBlock *BB = MI.getParent(); 2418 2419 Register VAddr, RSrcReg, SOffset; 2420 int64_t Offset = 0; 2421 2422 unsigned Opcode; 2423 if (selectMUBUFOffsetImpl(MI.getOperand(1), RSrcReg, SOffset, Offset)) { 2424 Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN : 2425 AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN; 2426 } else if (selectMUBUFAddr64Impl(MI.getOperand(1), VAddr, 2427 RSrcReg, SOffset, Offset)) { 2428 Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN : 2429 AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN; 2430 } else 2431 return selectImpl(MI, *CoverageInfo); 2432 2433 auto MIB = BuildMI(*BB, &MI, DL, TII.get(Opcode), TmpReg) 2434 .addReg(MI.getOperand(2).getReg()); 2435 2436 if (VAddr) 2437 MIB.addReg(VAddr); 2438 2439 MIB.addReg(RSrcReg); 2440 if (SOffset) 2441 MIB.addReg(SOffset); 2442 else 2443 MIB.addImm(0); 2444 2445 MIB.addImm(Offset); 2446 MIB.addImm(AMDGPU::CPol::GLC); 2447 MIB.cloneMemRefs(MI); 2448 2449 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), DstReg) 2450 .addReg(TmpReg, RegState::Kill, SubReg); 2451 2452 MI.eraseFromParent(); 2453 2454 MRI->setRegClass( 2455 DstReg, Is64 ? &AMDGPU::VReg_64RegClass : &AMDGPU::VGPR_32RegClass); 2456 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 2457 } 2458 2459 static bool isVCmpResult(Register Reg, MachineRegisterInfo &MRI) { 2460 if (Reg.isPhysical()) 2461 return false; 2462 2463 MachineInstr &MI = *MRI.getUniqueVRegDef(Reg); 2464 const unsigned Opcode = MI.getOpcode(); 2465 2466 if (Opcode == AMDGPU::COPY) 2467 return isVCmpResult(MI.getOperand(1).getReg(), MRI); 2468 2469 if (Opcode == AMDGPU::G_AND || Opcode == AMDGPU::G_OR || 2470 Opcode == AMDGPU::G_XOR) 2471 return isVCmpResult(MI.getOperand(1).getReg(), MRI) && 2472 isVCmpResult(MI.getOperand(2).getReg(), MRI); 2473 2474 if (Opcode == TargetOpcode::G_INTRINSIC) 2475 return MI.getIntrinsicID() == Intrinsic::amdgcn_class; 2476 2477 return Opcode == AMDGPU::G_ICMP || Opcode == AMDGPU::G_FCMP; 2478 } 2479 2480 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const { 2481 MachineBasicBlock *BB = I.getParent(); 2482 MachineOperand &CondOp = I.getOperand(0); 2483 Register CondReg = CondOp.getReg(); 2484 const DebugLoc &DL = I.getDebugLoc(); 2485 2486 unsigned BrOpcode; 2487 Register CondPhysReg; 2488 const TargetRegisterClass *ConstrainRC; 2489 2490 // In SelectionDAG, we inspect the IR block for uniformity metadata to decide 2491 // whether the branch is uniform when selecting the instruction. In 2492 // GlobalISel, we should push that decision into RegBankSelect. Assume for now 2493 // RegBankSelect knows what it's doing if the branch condition is scc, even 2494 // though it currently does not. 2495 if (!isVCC(CondReg, *MRI)) { 2496 if (MRI->getType(CondReg) != LLT::scalar(32)) 2497 return false; 2498 2499 CondPhysReg = AMDGPU::SCC; 2500 BrOpcode = AMDGPU::S_CBRANCH_SCC1; 2501 ConstrainRC = &AMDGPU::SReg_32RegClass; 2502 } else { 2503 // FIXME: Should scc->vcc copies and with exec? 2504 2505 // Unless the value of CondReg is a result of a V_CMP* instruction then we 2506 // need to insert an and with exec. 2507 if (!isVCmpResult(CondReg, *MRI)) { 2508 const bool Is64 = STI.isWave64(); 2509 const unsigned Opcode = Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32; 2510 const Register Exec = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO; 2511 2512 Register TmpReg = MRI->createVirtualRegister(TRI.getBoolRC()); 2513 BuildMI(*BB, &I, DL, TII.get(Opcode), TmpReg) 2514 .addReg(CondReg) 2515 .addReg(Exec); 2516 CondReg = TmpReg; 2517 } 2518 2519 CondPhysReg = TRI.getVCC(); 2520 BrOpcode = AMDGPU::S_CBRANCH_VCCNZ; 2521 ConstrainRC = TRI.getBoolRC(); 2522 } 2523 2524 if (!MRI->getRegClassOrNull(CondReg)) 2525 MRI->setRegClass(CondReg, ConstrainRC); 2526 2527 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg) 2528 .addReg(CondReg); 2529 BuildMI(*BB, &I, DL, TII.get(BrOpcode)) 2530 .addMBB(I.getOperand(1).getMBB()); 2531 2532 I.eraseFromParent(); 2533 return true; 2534 } 2535 2536 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE( 2537 MachineInstr &I) const { 2538 Register DstReg = I.getOperand(0).getReg(); 2539 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 2540 const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID; 2541 I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32)); 2542 if (IsVGPR) 2543 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 2544 2545 return RBI.constrainGenericRegister( 2546 DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI); 2547 } 2548 2549 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const { 2550 Register DstReg = I.getOperand(0).getReg(); 2551 Register SrcReg = I.getOperand(1).getReg(); 2552 Register MaskReg = I.getOperand(2).getReg(); 2553 LLT Ty = MRI->getType(DstReg); 2554 LLT MaskTy = MRI->getType(MaskReg); 2555 MachineBasicBlock *BB = I.getParent(); 2556 const DebugLoc &DL = I.getDebugLoc(); 2557 2558 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 2559 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI); 2560 const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI); 2561 const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID; 2562 if (DstRB != SrcRB) // Should only happen for hand written MIR. 2563 return false; 2564 2565 // Try to avoid emitting a bit operation when we only need to touch half of 2566 // the 64-bit pointer. 2567 APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zextOrSelf(64); 2568 const APInt MaskHi32 = APInt::getHighBitsSet(64, 32); 2569 const APInt MaskLo32 = APInt::getLowBitsSet(64, 32); 2570 2571 const bool CanCopyLow32 = (MaskOnes & MaskLo32) == MaskLo32; 2572 const bool CanCopyHi32 = (MaskOnes & MaskHi32) == MaskHi32; 2573 2574 if (!IsVGPR && Ty.getSizeInBits() == 64 && 2575 !CanCopyLow32 && !CanCopyHi32) { 2576 auto MIB = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_AND_B64), DstReg) 2577 .addReg(SrcReg) 2578 .addReg(MaskReg); 2579 I.eraseFromParent(); 2580 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 2581 } 2582 2583 unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32; 2584 const TargetRegisterClass &RegRC 2585 = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass; 2586 2587 const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB, 2588 *MRI); 2589 const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB, 2590 *MRI); 2591 const TargetRegisterClass *MaskRC = 2592 TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB, *MRI); 2593 2594 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) || 2595 !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) || 2596 !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI)) 2597 return false; 2598 2599 if (Ty.getSizeInBits() == 32) { 2600 assert(MaskTy.getSizeInBits() == 32 && 2601 "ptrmask should have been narrowed during legalize"); 2602 2603 BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg) 2604 .addReg(SrcReg) 2605 .addReg(MaskReg); 2606 I.eraseFromParent(); 2607 return true; 2608 } 2609 2610 Register HiReg = MRI->createVirtualRegister(&RegRC); 2611 Register LoReg = MRI->createVirtualRegister(&RegRC); 2612 2613 // Extract the subregisters from the source pointer. 2614 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg) 2615 .addReg(SrcReg, 0, AMDGPU::sub0); 2616 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg) 2617 .addReg(SrcReg, 0, AMDGPU::sub1); 2618 2619 Register MaskedLo, MaskedHi; 2620 2621 if (CanCopyLow32) { 2622 // If all the bits in the low half are 1, we only need a copy for it. 2623 MaskedLo = LoReg; 2624 } else { 2625 // Extract the mask subregister and apply the and. 2626 Register MaskLo = MRI->createVirtualRegister(&RegRC); 2627 MaskedLo = MRI->createVirtualRegister(&RegRC); 2628 2629 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo) 2630 .addReg(MaskReg, 0, AMDGPU::sub0); 2631 BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo) 2632 .addReg(LoReg) 2633 .addReg(MaskLo); 2634 } 2635 2636 if (CanCopyHi32) { 2637 // If all the bits in the high half are 1, we only need a copy for it. 2638 MaskedHi = HiReg; 2639 } else { 2640 Register MaskHi = MRI->createVirtualRegister(&RegRC); 2641 MaskedHi = MRI->createVirtualRegister(&RegRC); 2642 2643 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi) 2644 .addReg(MaskReg, 0, AMDGPU::sub1); 2645 BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi) 2646 .addReg(HiReg) 2647 .addReg(MaskHi); 2648 } 2649 2650 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) 2651 .addReg(MaskedLo) 2652 .addImm(AMDGPU::sub0) 2653 .addReg(MaskedHi) 2654 .addImm(AMDGPU::sub1); 2655 I.eraseFromParent(); 2656 return true; 2657 } 2658 2659 /// Return the register to use for the index value, and the subregister to use 2660 /// for the indirectly accessed register. 2661 static std::pair<Register, unsigned> 2662 computeIndirectRegIndex(MachineRegisterInfo &MRI, 2663 const SIRegisterInfo &TRI, 2664 const TargetRegisterClass *SuperRC, 2665 Register IdxReg, 2666 unsigned EltSize) { 2667 Register IdxBaseReg; 2668 int Offset; 2669 2670 std::tie(IdxBaseReg, Offset) = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg); 2671 if (IdxBaseReg == AMDGPU::NoRegister) { 2672 // This will happen if the index is a known constant. This should ordinarily 2673 // be legalized out, but handle it as a register just in case. 2674 assert(Offset == 0); 2675 IdxBaseReg = IdxReg; 2676 } 2677 2678 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize); 2679 2680 // Skip out of bounds offsets, or else we would end up using an undefined 2681 // register. 2682 if (static_cast<unsigned>(Offset) >= SubRegs.size()) 2683 return std::make_pair(IdxReg, SubRegs[0]); 2684 return std::make_pair(IdxBaseReg, SubRegs[Offset]); 2685 } 2686 2687 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT( 2688 MachineInstr &MI) const { 2689 Register DstReg = MI.getOperand(0).getReg(); 2690 Register SrcReg = MI.getOperand(1).getReg(); 2691 Register IdxReg = MI.getOperand(2).getReg(); 2692 2693 LLT DstTy = MRI->getType(DstReg); 2694 LLT SrcTy = MRI->getType(SrcReg); 2695 2696 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 2697 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI); 2698 const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI); 2699 2700 // The index must be scalar. If it wasn't RegBankSelect should have moved this 2701 // into a waterfall loop. 2702 if (IdxRB->getID() != AMDGPU::SGPRRegBankID) 2703 return false; 2704 2705 const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB, 2706 *MRI); 2707 const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(DstTy, *DstRB, 2708 *MRI); 2709 if (!SrcRC || !DstRC) 2710 return false; 2711 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) || 2712 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) || 2713 !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI)) 2714 return false; 2715 2716 MachineBasicBlock *BB = MI.getParent(); 2717 const DebugLoc &DL = MI.getDebugLoc(); 2718 const bool Is64 = DstTy.getSizeInBits() == 64; 2719 2720 unsigned SubReg; 2721 std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg, 2722 DstTy.getSizeInBits() / 8); 2723 2724 if (SrcRB->getID() == AMDGPU::SGPRRegBankID) { 2725 if (DstTy.getSizeInBits() != 32 && !Is64) 2726 return false; 2727 2728 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 2729 .addReg(IdxReg); 2730 2731 unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32; 2732 BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg) 2733 .addReg(SrcReg, 0, SubReg) 2734 .addReg(SrcReg, RegState::Implicit); 2735 MI.eraseFromParent(); 2736 return true; 2737 } 2738 2739 if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32) 2740 return false; 2741 2742 if (!STI.useVGPRIndexMode()) { 2743 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 2744 .addReg(IdxReg); 2745 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg) 2746 .addReg(SrcReg, 0, SubReg) 2747 .addReg(SrcReg, RegState::Implicit); 2748 MI.eraseFromParent(); 2749 return true; 2750 } 2751 2752 const MCInstrDesc &GPRIDXDesc = 2753 TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true); 2754 BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg) 2755 .addReg(SrcReg) 2756 .addReg(IdxReg) 2757 .addImm(SubReg); 2758 2759 MI.eraseFromParent(); 2760 return true; 2761 } 2762 2763 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd 2764 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT( 2765 MachineInstr &MI) const { 2766 Register DstReg = MI.getOperand(0).getReg(); 2767 Register VecReg = MI.getOperand(1).getReg(); 2768 Register ValReg = MI.getOperand(2).getReg(); 2769 Register IdxReg = MI.getOperand(3).getReg(); 2770 2771 LLT VecTy = MRI->getType(DstReg); 2772 LLT ValTy = MRI->getType(ValReg); 2773 unsigned VecSize = VecTy.getSizeInBits(); 2774 unsigned ValSize = ValTy.getSizeInBits(); 2775 2776 const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI); 2777 const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI); 2778 const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI); 2779 2780 assert(VecTy.getElementType() == ValTy); 2781 2782 // The index must be scalar. If it wasn't RegBankSelect should have moved this 2783 // into a waterfall loop. 2784 if (IdxRB->getID() != AMDGPU::SGPRRegBankID) 2785 return false; 2786 2787 const TargetRegisterClass *VecRC = TRI.getRegClassForTypeOnBank(VecTy, *VecRB, 2788 *MRI); 2789 const TargetRegisterClass *ValRC = TRI.getRegClassForTypeOnBank(ValTy, *ValRB, 2790 *MRI); 2791 2792 if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) || 2793 !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) || 2794 !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) || 2795 !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI)) 2796 return false; 2797 2798 if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32) 2799 return false; 2800 2801 unsigned SubReg; 2802 std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg, 2803 ValSize / 8); 2804 2805 const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID && 2806 STI.useVGPRIndexMode(); 2807 2808 MachineBasicBlock *BB = MI.getParent(); 2809 const DebugLoc &DL = MI.getDebugLoc(); 2810 2811 if (!IndexMode) { 2812 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 2813 .addReg(IdxReg); 2814 2815 const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo( 2816 VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID); 2817 BuildMI(*BB, MI, DL, RegWriteOp, DstReg) 2818 .addReg(VecReg) 2819 .addReg(ValReg) 2820 .addImm(SubReg); 2821 MI.eraseFromParent(); 2822 return true; 2823 } 2824 2825 const MCInstrDesc &GPRIDXDesc = 2826 TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false); 2827 BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg) 2828 .addReg(VecReg) 2829 .addReg(ValReg) 2830 .addReg(IdxReg) 2831 .addImm(SubReg); 2832 2833 MI.eraseFromParent(); 2834 return true; 2835 } 2836 2837 static bool isZeroOrUndef(int X) { 2838 return X == 0 || X == -1; 2839 } 2840 2841 static bool isOneOrUndef(int X) { 2842 return X == 1 || X == -1; 2843 } 2844 2845 static bool isZeroOrOneOrUndef(int X) { 2846 return X == 0 || X == 1 || X == -1; 2847 } 2848 2849 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single 2850 // 32-bit register. 2851 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1, 2852 ArrayRef<int> Mask) { 2853 NewMask[0] = Mask[0]; 2854 NewMask[1] = Mask[1]; 2855 if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1])) 2856 return Src0; 2857 2858 assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1); 2859 assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1); 2860 2861 // Shift the mask inputs to be 0/1; 2862 NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2; 2863 NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2; 2864 return Src1; 2865 } 2866 2867 // This is only legal with VOP3P instructions as an aid to op_sel matching. 2868 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR( 2869 MachineInstr &MI) const { 2870 Register DstReg = MI.getOperand(0).getReg(); 2871 Register Src0Reg = MI.getOperand(1).getReg(); 2872 Register Src1Reg = MI.getOperand(2).getReg(); 2873 ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask(); 2874 2875 const LLT V2S16 = LLT::fixed_vector(2, 16); 2876 if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16) 2877 return false; 2878 2879 if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask)) 2880 return false; 2881 2882 assert(ShufMask.size() == 2); 2883 assert(STI.hasSDWA() && "no target has VOP3P but not SDWA"); 2884 2885 MachineBasicBlock *MBB = MI.getParent(); 2886 const DebugLoc &DL = MI.getDebugLoc(); 2887 2888 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 2889 const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID; 2890 const TargetRegisterClass &RC = IsVALU ? 2891 AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass; 2892 2893 // Handle the degenerate case which should have folded out. 2894 if (ShufMask[0] == -1 && ShufMask[1] == -1) { 2895 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg); 2896 2897 MI.eraseFromParent(); 2898 return RBI.constrainGenericRegister(DstReg, RC, *MRI); 2899 } 2900 2901 // A legal VOP3P mask only reads one of the sources. 2902 int Mask[2]; 2903 Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask); 2904 2905 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) || 2906 !RBI.constrainGenericRegister(SrcVec, RC, *MRI)) 2907 return false; 2908 2909 // TODO: This also should have been folded out 2910 if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) { 2911 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg) 2912 .addReg(SrcVec); 2913 2914 MI.eraseFromParent(); 2915 return true; 2916 } 2917 2918 if (Mask[0] == 1 && Mask[1] == -1) { 2919 if (IsVALU) { 2920 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg) 2921 .addImm(16) 2922 .addReg(SrcVec); 2923 } else { 2924 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg) 2925 .addReg(SrcVec) 2926 .addImm(16); 2927 } 2928 } else if (Mask[0] == -1 && Mask[1] == 0) { 2929 if (IsVALU) { 2930 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg) 2931 .addImm(16) 2932 .addReg(SrcVec); 2933 } else { 2934 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg) 2935 .addReg(SrcVec) 2936 .addImm(16); 2937 } 2938 } else if (Mask[0] == 0 && Mask[1] == 0) { 2939 if (IsVALU) { 2940 // Write low half of the register into the high half. 2941 MachineInstr *MovSDWA = 2942 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg) 2943 .addImm(0) // $src0_modifiers 2944 .addReg(SrcVec) // $src0 2945 .addImm(0) // $clamp 2946 .addImm(AMDGPU::SDWA::WORD_1) // $dst_sel 2947 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused 2948 .addImm(AMDGPU::SDWA::WORD_0) // $src0_sel 2949 .addReg(SrcVec, RegState::Implicit); 2950 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1); 2951 } else { 2952 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg) 2953 .addReg(SrcVec) 2954 .addReg(SrcVec); 2955 } 2956 } else if (Mask[0] == 1 && Mask[1] == 1) { 2957 if (IsVALU) { 2958 // Write high half of the register into the low half. 2959 MachineInstr *MovSDWA = 2960 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg) 2961 .addImm(0) // $src0_modifiers 2962 .addReg(SrcVec) // $src0 2963 .addImm(0) // $clamp 2964 .addImm(AMDGPU::SDWA::WORD_0) // $dst_sel 2965 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused 2966 .addImm(AMDGPU::SDWA::WORD_1) // $src0_sel 2967 .addReg(SrcVec, RegState::Implicit); 2968 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1); 2969 } else { 2970 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg) 2971 .addReg(SrcVec) 2972 .addReg(SrcVec); 2973 } 2974 } else if (Mask[0] == 1 && Mask[1] == 0) { 2975 if (IsVALU) { 2976 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32_e64), DstReg) 2977 .addReg(SrcVec) 2978 .addReg(SrcVec) 2979 .addImm(16); 2980 } else { 2981 Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2982 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg) 2983 .addReg(SrcVec) 2984 .addImm(16); 2985 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg) 2986 .addReg(TmpReg) 2987 .addReg(SrcVec); 2988 } 2989 } else 2990 llvm_unreachable("all shuffle masks should be handled"); 2991 2992 MI.eraseFromParent(); 2993 return true; 2994 } 2995 2996 bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD( 2997 MachineInstr &MI) const { 2998 if (STI.hasGFX90AInsts()) 2999 return selectImpl(MI, *CoverageInfo); 3000 3001 MachineBasicBlock *MBB = MI.getParent(); 3002 const DebugLoc &DL = MI.getDebugLoc(); 3003 3004 if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) { 3005 Function &F = MBB->getParent()->getFunction(); 3006 DiagnosticInfoUnsupported 3007 NoFpRet(F, "return versions of fp atomics not supported", 3008 MI.getDebugLoc(), DS_Error); 3009 F.getContext().diagnose(NoFpRet); 3010 return false; 3011 } 3012 3013 // FIXME: This is only needed because tablegen requires number of dst operands 3014 // in match and replace pattern to be the same. Otherwise patterns can be 3015 // exported from SDag path. 3016 MachineOperand &VDataIn = MI.getOperand(1); 3017 MachineOperand &VIndex = MI.getOperand(3); 3018 MachineOperand &VOffset = MI.getOperand(4); 3019 MachineOperand &SOffset = MI.getOperand(5); 3020 int16_t Offset = MI.getOperand(6).getImm(); 3021 3022 bool HasVOffset = !isOperandImmEqual(VOffset, 0, *MRI); 3023 bool HasVIndex = !isOperandImmEqual(VIndex, 0, *MRI); 3024 3025 unsigned Opcode; 3026 if (HasVOffset) { 3027 Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN 3028 : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN; 3029 } else { 3030 Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN 3031 : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET; 3032 } 3033 3034 if (MRI->getType(VDataIn.getReg()).isVector()) { 3035 switch (Opcode) { 3036 case AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN: 3037 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN; 3038 break; 3039 case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN: 3040 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFEN; 3041 break; 3042 case AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN: 3043 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_IDXEN; 3044 break; 3045 case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET: 3046 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFSET; 3047 break; 3048 } 3049 } 3050 3051 auto I = BuildMI(*MBB, MI, DL, TII.get(Opcode)); 3052 I.add(VDataIn); 3053 3054 if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN || 3055 Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) { 3056 Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class()); 3057 BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg) 3058 .addReg(VIndex.getReg()) 3059 .addImm(AMDGPU::sub0) 3060 .addReg(VOffset.getReg()) 3061 .addImm(AMDGPU::sub1); 3062 3063 I.addReg(IdxReg); 3064 } else if (HasVIndex) { 3065 I.add(VIndex); 3066 } else if (HasVOffset) { 3067 I.add(VOffset); 3068 } 3069 3070 I.add(MI.getOperand(2)); // rsrc 3071 I.add(SOffset); 3072 I.addImm(Offset); 3073 I.addImm(MI.getOperand(7).getImm()); // cpol 3074 I.cloneMemRefs(MI); 3075 3076 MI.eraseFromParent(); 3077 3078 return true; 3079 } 3080 3081 bool AMDGPUInstructionSelector::selectGlobalAtomicFadd( 3082 MachineInstr &MI, MachineOperand &AddrOp, MachineOperand &DataOp) const { 3083 3084 if (STI.hasGFX90AInsts()) { 3085 // gfx90a adds return versions of the global atomic fadd instructions so no 3086 // special handling is required. 3087 return selectImpl(MI, *CoverageInfo); 3088 } 3089 3090 MachineBasicBlock *MBB = MI.getParent(); 3091 const DebugLoc &DL = MI.getDebugLoc(); 3092 3093 if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) { 3094 Function &F = MBB->getParent()->getFunction(); 3095 DiagnosticInfoUnsupported 3096 NoFpRet(F, "return versions of fp atomics not supported", 3097 MI.getDebugLoc(), DS_Error); 3098 F.getContext().diagnose(NoFpRet); 3099 return false; 3100 } 3101 3102 // FIXME: This is only needed because tablegen requires number of dst operands 3103 // in match and replace pattern to be the same. Otherwise patterns can be 3104 // exported from SDag path. 3105 auto Addr = selectFlatOffsetImpl(AddrOp, SIInstrFlags::FlatGlobal); 3106 3107 Register Data = DataOp.getReg(); 3108 const unsigned Opc = MRI->getType(Data).isVector() ? 3109 AMDGPU::GLOBAL_ATOMIC_PK_ADD_F16 : AMDGPU::GLOBAL_ATOMIC_ADD_F32; 3110 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc)) 3111 .addReg(Addr.first) 3112 .addReg(Data) 3113 .addImm(Addr.second) 3114 .addImm(0) // cpol 3115 .cloneMemRefs(MI); 3116 3117 MI.eraseFromParent(); 3118 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 3119 } 3120 3121 bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{ 3122 MI.setDesc(TII.get(MI.getOperand(1).getImm())); 3123 MI.RemoveOperand(1); 3124 MI.addImplicitDefUseOperands(*MI.getParent()->getParent()); 3125 return true; 3126 } 3127 3128 bool AMDGPUInstructionSelector::selectWaveAddress(MachineInstr &MI) const { 3129 Register DstReg = MI.getOperand(0).getReg(); 3130 Register SrcReg = MI.getOperand(1).getReg(); 3131 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 3132 const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID; 3133 MachineBasicBlock *MBB = MI.getParent(); 3134 const DebugLoc &DL = MI.getDebugLoc(); 3135 3136 if (IsVALU) { 3137 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg) 3138 .addImm(Subtarget->getWavefrontSizeLog2()) 3139 .addReg(SrcReg); 3140 } else { 3141 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg) 3142 .addReg(SrcReg) 3143 .addImm(Subtarget->getWavefrontSizeLog2()); 3144 } 3145 3146 const TargetRegisterClass &RC = 3147 IsVALU ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass; 3148 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI)) 3149 return false; 3150 3151 MI.eraseFromParent(); 3152 return true; 3153 } 3154 3155 bool AMDGPUInstructionSelector::select(MachineInstr &I) { 3156 if (I.isPHI()) 3157 return selectPHI(I); 3158 3159 if (!I.isPreISelOpcode()) { 3160 if (I.isCopy()) 3161 return selectCOPY(I); 3162 return true; 3163 } 3164 3165 switch (I.getOpcode()) { 3166 case TargetOpcode::G_AND: 3167 case TargetOpcode::G_OR: 3168 case TargetOpcode::G_XOR: 3169 if (selectImpl(I, *CoverageInfo)) 3170 return true; 3171 return selectG_AND_OR_XOR(I); 3172 case TargetOpcode::G_ADD: 3173 case TargetOpcode::G_SUB: 3174 if (selectImpl(I, *CoverageInfo)) 3175 return true; 3176 return selectG_ADD_SUB(I); 3177 case TargetOpcode::G_UADDO: 3178 case TargetOpcode::G_USUBO: 3179 case TargetOpcode::G_UADDE: 3180 case TargetOpcode::G_USUBE: 3181 return selectG_UADDO_USUBO_UADDE_USUBE(I); 3182 case TargetOpcode::G_INTTOPTR: 3183 case TargetOpcode::G_BITCAST: 3184 case TargetOpcode::G_PTRTOINT: 3185 return selectCOPY(I); 3186 case TargetOpcode::G_CONSTANT: 3187 case TargetOpcode::G_FCONSTANT: 3188 return selectG_CONSTANT(I); 3189 case TargetOpcode::G_FNEG: 3190 if (selectImpl(I, *CoverageInfo)) 3191 return true; 3192 return selectG_FNEG(I); 3193 case TargetOpcode::G_FABS: 3194 if (selectImpl(I, *CoverageInfo)) 3195 return true; 3196 return selectG_FABS(I); 3197 case TargetOpcode::G_EXTRACT: 3198 return selectG_EXTRACT(I); 3199 case TargetOpcode::G_MERGE_VALUES: 3200 case TargetOpcode::G_BUILD_VECTOR: 3201 case TargetOpcode::G_CONCAT_VECTORS: 3202 return selectG_MERGE_VALUES(I); 3203 case TargetOpcode::G_UNMERGE_VALUES: 3204 return selectG_UNMERGE_VALUES(I); 3205 case TargetOpcode::G_BUILD_VECTOR_TRUNC: 3206 return selectG_BUILD_VECTOR_TRUNC(I); 3207 case TargetOpcode::G_PTR_ADD: 3208 return selectG_PTR_ADD(I); 3209 case TargetOpcode::G_IMPLICIT_DEF: 3210 return selectG_IMPLICIT_DEF(I); 3211 case TargetOpcode::G_FREEZE: 3212 return selectCOPY(I); 3213 case TargetOpcode::G_INSERT: 3214 return selectG_INSERT(I); 3215 case TargetOpcode::G_INTRINSIC: 3216 return selectG_INTRINSIC(I); 3217 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: 3218 return selectG_INTRINSIC_W_SIDE_EFFECTS(I); 3219 case TargetOpcode::G_ICMP: 3220 if (selectG_ICMP(I)) 3221 return true; 3222 return selectImpl(I, *CoverageInfo); 3223 case TargetOpcode::G_LOAD: 3224 case TargetOpcode::G_STORE: 3225 case TargetOpcode::G_ATOMIC_CMPXCHG: 3226 case TargetOpcode::G_ATOMICRMW_XCHG: 3227 case TargetOpcode::G_ATOMICRMW_ADD: 3228 case TargetOpcode::G_ATOMICRMW_SUB: 3229 case TargetOpcode::G_ATOMICRMW_AND: 3230 case TargetOpcode::G_ATOMICRMW_OR: 3231 case TargetOpcode::G_ATOMICRMW_XOR: 3232 case TargetOpcode::G_ATOMICRMW_MIN: 3233 case TargetOpcode::G_ATOMICRMW_MAX: 3234 case TargetOpcode::G_ATOMICRMW_UMIN: 3235 case TargetOpcode::G_ATOMICRMW_UMAX: 3236 case TargetOpcode::G_ATOMICRMW_FADD: 3237 case AMDGPU::G_AMDGPU_ATOMIC_INC: 3238 case AMDGPU::G_AMDGPU_ATOMIC_DEC: 3239 case AMDGPU::G_AMDGPU_ATOMIC_FMIN: 3240 case AMDGPU::G_AMDGPU_ATOMIC_FMAX: 3241 return selectG_LOAD_STORE_ATOMICRMW(I); 3242 case AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG: 3243 return selectG_AMDGPU_ATOMIC_CMPXCHG(I); 3244 case TargetOpcode::G_SELECT: 3245 return selectG_SELECT(I); 3246 case TargetOpcode::G_TRUNC: 3247 return selectG_TRUNC(I); 3248 case TargetOpcode::G_SEXT: 3249 case TargetOpcode::G_ZEXT: 3250 case TargetOpcode::G_ANYEXT: 3251 case TargetOpcode::G_SEXT_INREG: 3252 if (selectImpl(I, *CoverageInfo)) 3253 return true; 3254 return selectG_SZA_EXT(I); 3255 case TargetOpcode::G_BRCOND: 3256 return selectG_BRCOND(I); 3257 case TargetOpcode::G_GLOBAL_VALUE: 3258 return selectG_GLOBAL_VALUE(I); 3259 case TargetOpcode::G_PTRMASK: 3260 return selectG_PTRMASK(I); 3261 case TargetOpcode::G_EXTRACT_VECTOR_ELT: 3262 return selectG_EXTRACT_VECTOR_ELT(I); 3263 case TargetOpcode::G_INSERT_VECTOR_ELT: 3264 return selectG_INSERT_VECTOR_ELT(I); 3265 case TargetOpcode::G_SHUFFLE_VECTOR: 3266 return selectG_SHUFFLE_VECTOR(I); 3267 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD: 3268 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16: 3269 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: 3270 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: { 3271 const AMDGPU::ImageDimIntrinsicInfo *Intr 3272 = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID()); 3273 assert(Intr && "not an image intrinsic with image pseudo"); 3274 return selectImageIntrinsic(I, Intr); 3275 } 3276 case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY: 3277 return selectBVHIntrinsic(I); 3278 case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD: 3279 return selectAMDGPU_BUFFER_ATOMIC_FADD(I); 3280 case AMDGPU::G_SBFX: 3281 case AMDGPU::G_UBFX: 3282 return selectG_SBFX_UBFX(I); 3283 case AMDGPU::G_SI_CALL: 3284 I.setDesc(TII.get(AMDGPU::SI_CALL)); 3285 return true; 3286 case AMDGPU::G_AMDGPU_WAVE_ADDRESS: 3287 return selectWaveAddress(I); 3288 default: 3289 return selectImpl(I, *CoverageInfo); 3290 } 3291 return false; 3292 } 3293 3294 InstructionSelector::ComplexRendererFns 3295 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const { 3296 return {{ 3297 [=](MachineInstrBuilder &MIB) { MIB.add(Root); } 3298 }}; 3299 3300 } 3301 3302 std::pair<Register, unsigned> 3303 AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root, 3304 bool AllowAbs) const { 3305 Register Src = Root.getReg(); 3306 Register OrigSrc = Src; 3307 unsigned Mods = 0; 3308 MachineInstr *MI = getDefIgnoringCopies(Src, *MRI); 3309 3310 if (MI && MI->getOpcode() == AMDGPU::G_FNEG) { 3311 Src = MI->getOperand(1).getReg(); 3312 Mods |= SISrcMods::NEG; 3313 MI = getDefIgnoringCopies(Src, *MRI); 3314 } 3315 3316 if (AllowAbs && MI && MI->getOpcode() == AMDGPU::G_FABS) { 3317 Src = MI->getOperand(1).getReg(); 3318 Mods |= SISrcMods::ABS; 3319 } 3320 3321 if (Mods != 0 && 3322 RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) { 3323 MachineInstr *UseMI = Root.getParent(); 3324 3325 // If we looked through copies to find source modifiers on an SGPR operand, 3326 // we now have an SGPR register source. To avoid potentially violating the 3327 // constant bus restriction, we need to insert a copy to a VGPR. 3328 Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc); 3329 BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(), 3330 TII.get(AMDGPU::COPY), VGPRSrc) 3331 .addReg(Src); 3332 Src = VGPRSrc; 3333 } 3334 3335 return std::make_pair(Src, Mods); 3336 } 3337 3338 /// 3339 /// This will select either an SGPR or VGPR operand and will save us from 3340 /// having to write an extra tablegen pattern. 3341 InstructionSelector::ComplexRendererFns 3342 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const { 3343 return {{ 3344 [=](MachineInstrBuilder &MIB) { MIB.add(Root); } 3345 }}; 3346 } 3347 3348 InstructionSelector::ComplexRendererFns 3349 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const { 3350 Register Src; 3351 unsigned Mods; 3352 std::tie(Src, Mods) = selectVOP3ModsImpl(Root); 3353 3354 return {{ 3355 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3356 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods 3357 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp 3358 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod 3359 }}; 3360 } 3361 3362 InstructionSelector::ComplexRendererFns 3363 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const { 3364 Register Src; 3365 unsigned Mods; 3366 std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false); 3367 3368 return {{ 3369 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3370 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods 3371 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp 3372 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod 3373 }}; 3374 } 3375 3376 InstructionSelector::ComplexRendererFns 3377 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const { 3378 return {{ 3379 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }, 3380 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp 3381 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod 3382 }}; 3383 } 3384 3385 InstructionSelector::ComplexRendererFns 3386 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const { 3387 Register Src; 3388 unsigned Mods; 3389 std::tie(Src, Mods) = selectVOP3ModsImpl(Root); 3390 3391 return {{ 3392 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3393 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods 3394 }}; 3395 } 3396 3397 InstructionSelector::ComplexRendererFns 3398 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const { 3399 Register Src; 3400 unsigned Mods; 3401 std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false); 3402 3403 return {{ 3404 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3405 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods 3406 }}; 3407 } 3408 3409 InstructionSelector::ComplexRendererFns 3410 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const { 3411 Register Reg = Root.getReg(); 3412 const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI); 3413 if (Def && (Def->getOpcode() == AMDGPU::G_FNEG || 3414 Def->getOpcode() == AMDGPU::G_FABS)) 3415 return {}; 3416 return {{ 3417 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); }, 3418 }}; 3419 } 3420 3421 std::pair<Register, unsigned> 3422 AMDGPUInstructionSelector::selectVOP3PModsImpl( 3423 Register Src, const MachineRegisterInfo &MRI) const { 3424 unsigned Mods = 0; 3425 MachineInstr *MI = MRI.getVRegDef(Src); 3426 3427 if (MI && MI->getOpcode() == AMDGPU::G_FNEG && 3428 // It's possible to see an f32 fneg here, but unlikely. 3429 // TODO: Treat f32 fneg as only high bit. 3430 MRI.getType(Src) == LLT::fixed_vector(2, 16)) { 3431 Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI); 3432 Src = MI->getOperand(1).getReg(); 3433 MI = MRI.getVRegDef(Src); 3434 } 3435 3436 // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector. 3437 3438 // Packed instructions do not have abs modifiers. 3439 Mods |= SISrcMods::OP_SEL_1; 3440 3441 return std::make_pair(Src, Mods); 3442 } 3443 3444 InstructionSelector::ComplexRendererFns 3445 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const { 3446 MachineRegisterInfo &MRI 3447 = Root.getParent()->getParent()->getParent()->getRegInfo(); 3448 3449 Register Src; 3450 unsigned Mods; 3451 std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI); 3452 3453 return {{ 3454 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3455 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods 3456 }}; 3457 } 3458 3459 InstructionSelector::ComplexRendererFns 3460 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const { 3461 Register Src; 3462 unsigned Mods; 3463 std::tie(Src, Mods) = selectVOP3ModsImpl(Root); 3464 if (!isKnownNeverNaN(Src, *MRI)) 3465 return None; 3466 3467 return {{ 3468 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3469 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods 3470 }}; 3471 } 3472 3473 InstructionSelector::ComplexRendererFns 3474 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const { 3475 // FIXME: Handle op_sel 3476 return {{ 3477 [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); }, 3478 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods 3479 }}; 3480 } 3481 3482 InstructionSelector::ComplexRendererFns 3483 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const { 3484 SmallVector<GEPInfo, 4> AddrInfo; 3485 getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo); 3486 3487 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1) 3488 return None; 3489 3490 const GEPInfo &GEPInfo = AddrInfo[0]; 3491 Optional<int64_t> EncodedImm = 3492 AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false); 3493 if (!EncodedImm) 3494 return None; 3495 3496 unsigned PtrReg = GEPInfo.SgprParts[0]; 3497 return {{ 3498 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); }, 3499 [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } 3500 }}; 3501 } 3502 3503 InstructionSelector::ComplexRendererFns 3504 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const { 3505 SmallVector<GEPInfo, 4> AddrInfo; 3506 getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo); 3507 3508 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1) 3509 return None; 3510 3511 const GEPInfo &GEPInfo = AddrInfo[0]; 3512 Register PtrReg = GEPInfo.SgprParts[0]; 3513 Optional<int64_t> EncodedImm = 3514 AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm); 3515 if (!EncodedImm) 3516 return None; 3517 3518 return {{ 3519 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); }, 3520 [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } 3521 }}; 3522 } 3523 3524 InstructionSelector::ComplexRendererFns 3525 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const { 3526 MachineInstr *MI = Root.getParent(); 3527 MachineBasicBlock *MBB = MI->getParent(); 3528 3529 SmallVector<GEPInfo, 4> AddrInfo; 3530 getAddrModeInfo(*MI, *MRI, AddrInfo); 3531 3532 // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits, 3533 // then we can select all ptr + 32-bit offsets not just immediate offsets. 3534 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1) 3535 return None; 3536 3537 const GEPInfo &GEPInfo = AddrInfo[0]; 3538 // SGPR offset is unsigned. 3539 if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm)) 3540 return None; 3541 3542 // If we make it this far we have a load with an 32-bit immediate offset. 3543 // It is OK to select this using a sgpr offset, because we have already 3544 // failed trying to select this load into one of the _IMM variants since 3545 // the _IMM Patterns are considered before the _SGPR patterns. 3546 Register PtrReg = GEPInfo.SgprParts[0]; 3547 Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 3548 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg) 3549 .addImm(GEPInfo.Imm); 3550 return {{ 3551 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); }, 3552 [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); } 3553 }}; 3554 } 3555 3556 std::pair<Register, int> 3557 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root, 3558 uint64_t FlatVariant) const { 3559 MachineInstr *MI = Root.getParent(); 3560 3561 auto Default = std::make_pair(Root.getReg(), 0); 3562 3563 if (!STI.hasFlatInstOffsets()) 3564 return Default; 3565 3566 Register PtrBase; 3567 int64_t ConstOffset; 3568 std::tie(PtrBase, ConstOffset) = 3569 getPtrBaseWithConstantOffset(Root.getReg(), *MRI); 3570 if (ConstOffset == 0) 3571 return Default; 3572 3573 unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace(); 3574 if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, FlatVariant)) 3575 return Default; 3576 3577 return std::make_pair(PtrBase, ConstOffset); 3578 } 3579 3580 InstructionSelector::ComplexRendererFns 3581 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const { 3582 auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FLAT); 3583 3584 return {{ 3585 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); }, 3586 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); }, 3587 }}; 3588 } 3589 3590 InstructionSelector::ComplexRendererFns 3591 AMDGPUInstructionSelector::selectGlobalOffset(MachineOperand &Root) const { 3592 auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatGlobal); 3593 3594 return {{ 3595 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); }, 3596 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); }, 3597 }}; 3598 } 3599 3600 InstructionSelector::ComplexRendererFns 3601 AMDGPUInstructionSelector::selectScratchOffset(MachineOperand &Root) const { 3602 auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatScratch); 3603 3604 return {{ 3605 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); }, 3606 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); }, 3607 }}; 3608 } 3609 3610 /// Match a zero extend from a 32-bit value to 64-bits. 3611 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) { 3612 Register ZExtSrc; 3613 if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc)))) 3614 return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register(); 3615 3616 // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0) 3617 const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI); 3618 if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES) 3619 return false; 3620 3621 if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) { 3622 return Def->getOperand(1).getReg(); 3623 } 3624 3625 return Register(); 3626 } 3627 3628 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset) 3629 InstructionSelector::ComplexRendererFns 3630 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const { 3631 Register Addr = Root.getReg(); 3632 Register PtrBase; 3633 int64_t ConstOffset; 3634 int64_t ImmOffset = 0; 3635 3636 // Match the immediate offset first, which canonically is moved as low as 3637 // possible. 3638 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI); 3639 3640 if (ConstOffset != 0) { 3641 if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, 3642 SIInstrFlags::FlatGlobal)) { 3643 Addr = PtrBase; 3644 ImmOffset = ConstOffset; 3645 } else { 3646 auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI); 3647 if (!PtrBaseDef) 3648 return None; 3649 3650 if (isSGPR(PtrBaseDef->Reg)) { 3651 if (ConstOffset > 0) { 3652 // Offset is too large. 3653 // 3654 // saddr + large_offset -> saddr + 3655 // (voffset = large_offset & ~MaxOffset) + 3656 // (large_offset & MaxOffset); 3657 int64_t SplitImmOffset, RemainderOffset; 3658 std::tie(SplitImmOffset, RemainderOffset) = TII.splitFlatOffset( 3659 ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal); 3660 3661 if (isUInt<32>(RemainderOffset)) { 3662 MachineInstr *MI = Root.getParent(); 3663 MachineBasicBlock *MBB = MI->getParent(); 3664 Register HighBits = 3665 MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3666 3667 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), 3668 HighBits) 3669 .addImm(RemainderOffset); 3670 3671 return {{ 3672 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); }, // saddr 3673 [=](MachineInstrBuilder &MIB) { 3674 MIB.addReg(HighBits); 3675 }, // voffset 3676 [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); }, 3677 }}; 3678 } 3679 } 3680 3681 // We are adding a 64 bit SGPR and a constant. If constant bus limit 3682 // is 1 we would need to perform 1 or 2 extra moves for each half of 3683 // the constant and it is better to do a scalar add and then issue a 3684 // single VALU instruction to materialize zero. Otherwise it is less 3685 // instructions to perform VALU adds with immediates or inline literals. 3686 unsigned NumLiterals = 3687 !TII.isInlineConstant(APInt(32, ConstOffset & 0xffffffff)) + 3688 !TII.isInlineConstant(APInt(32, ConstOffset >> 32)); 3689 if (STI.getConstantBusLimit(AMDGPU::V_ADD_U32_e64) > NumLiterals) 3690 return None; 3691 } 3692 } 3693 } 3694 3695 auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI); 3696 if (!AddrDef) 3697 return None; 3698 3699 // Match the variable offset. 3700 if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) { 3701 // Look through the SGPR->VGPR copy. 3702 Register SAddr = 3703 getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI); 3704 3705 if (SAddr && isSGPR(SAddr)) { 3706 Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg(); 3707 3708 // It's possible voffset is an SGPR here, but the copy to VGPR will be 3709 // inserted later. 3710 if (Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) { 3711 return {{[=](MachineInstrBuilder &MIB) { // saddr 3712 MIB.addReg(SAddr); 3713 }, 3714 [=](MachineInstrBuilder &MIB) { // voffset 3715 MIB.addReg(VOffset); 3716 }, 3717 [=](MachineInstrBuilder &MIB) { // offset 3718 MIB.addImm(ImmOffset); 3719 }}}; 3720 } 3721 } 3722 } 3723 3724 // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and 3725 // drop this. 3726 if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF || 3727 AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT || !isSGPR(AddrDef->Reg)) 3728 return None; 3729 3730 // It's cheaper to materialize a single 32-bit zero for vaddr than the two 3731 // moves required to copy a 64-bit SGPR to VGPR. 3732 MachineInstr *MI = Root.getParent(); 3733 MachineBasicBlock *MBB = MI->getParent(); 3734 Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3735 3736 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), VOffset) 3737 .addImm(0); 3738 3739 return {{ 3740 [=](MachineInstrBuilder &MIB) { MIB.addReg(AddrDef->Reg); }, // saddr 3741 [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); }, // voffset 3742 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset 3743 }}; 3744 } 3745 3746 InstructionSelector::ComplexRendererFns 3747 AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const { 3748 Register Addr = Root.getReg(); 3749 Register PtrBase; 3750 int64_t ConstOffset; 3751 int64_t ImmOffset = 0; 3752 3753 // Match the immediate offset first, which canonically is moved as low as 3754 // possible. 3755 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI); 3756 3757 if (ConstOffset != 0 && 3758 TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS, 3759 SIInstrFlags::FlatScratch)) { 3760 Addr = PtrBase; 3761 ImmOffset = ConstOffset; 3762 } 3763 3764 auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI); 3765 if (!AddrDef) 3766 return None; 3767 3768 if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) { 3769 int FI = AddrDef->MI->getOperand(1).getIndex(); 3770 return {{ 3771 [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr 3772 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset 3773 }}; 3774 } 3775 3776 Register SAddr = AddrDef->Reg; 3777 3778 if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) { 3779 Register LHS = AddrDef->MI->getOperand(1).getReg(); 3780 Register RHS = AddrDef->MI->getOperand(2).getReg(); 3781 auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI); 3782 auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI); 3783 3784 if (LHSDef && RHSDef && 3785 LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX && 3786 isSGPR(RHSDef->Reg)) { 3787 int FI = LHSDef->MI->getOperand(1).getIndex(); 3788 MachineInstr &I = *Root.getParent(); 3789 MachineBasicBlock *BB = I.getParent(); 3790 const DebugLoc &DL = I.getDebugLoc(); 3791 SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 3792 3793 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_I32), SAddr) 3794 .addFrameIndex(FI) 3795 .addReg(RHSDef->Reg); 3796 } 3797 } 3798 3799 if (!isSGPR(SAddr)) 3800 return None; 3801 3802 return {{ 3803 [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr 3804 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset 3805 }}; 3806 } 3807 3808 InstructionSelector::ComplexRendererFns 3809 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const { 3810 MachineInstr *MI = Root.getParent(); 3811 MachineBasicBlock *MBB = MI->getParent(); 3812 MachineFunction *MF = MBB->getParent(); 3813 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 3814 3815 int64_t Offset = 0; 3816 if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) && 3817 Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) { 3818 Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3819 3820 // TODO: Should this be inside the render function? The iterator seems to 3821 // move. 3822 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), 3823 HighBits) 3824 .addImm(Offset & ~4095); 3825 3826 return {{[=](MachineInstrBuilder &MIB) { // rsrc 3827 MIB.addReg(Info->getScratchRSrcReg()); 3828 }, 3829 [=](MachineInstrBuilder &MIB) { // vaddr 3830 MIB.addReg(HighBits); 3831 }, 3832 [=](MachineInstrBuilder &MIB) { // soffset 3833 // Use constant zero for soffset and rely on eliminateFrameIndex 3834 // to choose the appropriate frame register if need be. 3835 MIB.addImm(0); 3836 }, 3837 [=](MachineInstrBuilder &MIB) { // offset 3838 MIB.addImm(Offset & 4095); 3839 }}}; 3840 } 3841 3842 assert(Offset == 0 || Offset == -1); 3843 3844 // Try to fold a frame index directly into the MUBUF vaddr field, and any 3845 // offsets. 3846 Optional<int> FI; 3847 Register VAddr = Root.getReg(); 3848 if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) { 3849 Register PtrBase; 3850 int64_t ConstOffset; 3851 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(VAddr, *MRI); 3852 if (ConstOffset != 0) { 3853 if (SIInstrInfo::isLegalMUBUFImmOffset(ConstOffset) && 3854 (!STI.privateMemoryResourceIsRangeChecked() || 3855 KnownBits->signBitIsZero(PtrBase))) { 3856 const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase); 3857 if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX) 3858 FI = PtrBaseDef->getOperand(1).getIndex(); 3859 else 3860 VAddr = PtrBase; 3861 Offset = ConstOffset; 3862 } 3863 } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) { 3864 FI = RootDef->getOperand(1).getIndex(); 3865 } 3866 } 3867 3868 return {{[=](MachineInstrBuilder &MIB) { // rsrc 3869 MIB.addReg(Info->getScratchRSrcReg()); 3870 }, 3871 [=](MachineInstrBuilder &MIB) { // vaddr 3872 if (FI.hasValue()) 3873 MIB.addFrameIndex(FI.getValue()); 3874 else 3875 MIB.addReg(VAddr); 3876 }, 3877 [=](MachineInstrBuilder &MIB) { // soffset 3878 // Use constant zero for soffset and rely on eliminateFrameIndex 3879 // to choose the appropriate frame register if need be. 3880 MIB.addImm(0); 3881 }, 3882 [=](MachineInstrBuilder &MIB) { // offset 3883 MIB.addImm(Offset); 3884 }}}; 3885 } 3886 3887 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base, 3888 int64_t Offset) const { 3889 if (!isUInt<16>(Offset)) 3890 return false; 3891 3892 if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled()) 3893 return true; 3894 3895 // On Southern Islands instruction with a negative base value and an offset 3896 // don't seem to work. 3897 return KnownBits->signBitIsZero(Base); 3898 } 3899 3900 bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0, 3901 int64_t Offset1, 3902 unsigned Size) const { 3903 if (Offset0 % Size != 0 || Offset1 % Size != 0) 3904 return false; 3905 if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size)) 3906 return false; 3907 3908 if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled()) 3909 return true; 3910 3911 // On Southern Islands instruction with a negative base value and an offset 3912 // don't seem to work. 3913 return KnownBits->signBitIsZero(Base); 3914 } 3915 3916 bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI, 3917 unsigned ShAmtBits) const { 3918 assert(MI.getOpcode() == TargetOpcode::G_AND); 3919 3920 Optional<APInt> RHS = getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI); 3921 if (!RHS) 3922 return false; 3923 3924 if (RHS->countTrailingOnes() >= ShAmtBits) 3925 return true; 3926 3927 const APInt &LHSKnownZeros = 3928 KnownBits->getKnownZeroes(MI.getOperand(1).getReg()); 3929 return (LHSKnownZeros | *RHS).countTrailingOnes() >= ShAmtBits; 3930 } 3931 3932 InstructionSelector::ComplexRendererFns 3933 AMDGPUInstructionSelector::selectMUBUFScratchOffset( 3934 MachineOperand &Root) const { 3935 MachineInstr *MI = Root.getParent(); 3936 MachineBasicBlock *MBB = MI->getParent(); 3937 3938 int64_t Offset = 0; 3939 if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) || 3940 !SIInstrInfo::isLegalMUBUFImmOffset(Offset)) 3941 return {}; 3942 3943 const MachineFunction *MF = MBB->getParent(); 3944 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 3945 3946 return {{ 3947 [=](MachineInstrBuilder &MIB) { // rsrc 3948 MIB.addReg(Info->getScratchRSrcReg()); 3949 }, 3950 [=](MachineInstrBuilder &MIB) { // soffset 3951 MIB.addImm(0); 3952 }, 3953 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset 3954 }}; 3955 } 3956 3957 std::pair<Register, unsigned> 3958 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const { 3959 const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg()); 3960 if (!RootDef) 3961 return std::make_pair(Root.getReg(), 0); 3962 3963 int64_t ConstAddr = 0; 3964 3965 Register PtrBase; 3966 int64_t Offset; 3967 std::tie(PtrBase, Offset) = 3968 getPtrBaseWithConstantOffset(Root.getReg(), *MRI); 3969 3970 if (Offset) { 3971 if (isDSOffsetLegal(PtrBase, Offset)) { 3972 // (add n0, c0) 3973 return std::make_pair(PtrBase, Offset); 3974 } 3975 } else if (RootDef->getOpcode() == AMDGPU::G_SUB) { 3976 // TODO 3977 3978 3979 } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) { 3980 // TODO 3981 3982 } 3983 3984 return std::make_pair(Root.getReg(), 0); 3985 } 3986 3987 InstructionSelector::ComplexRendererFns 3988 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const { 3989 Register Reg; 3990 unsigned Offset; 3991 std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root); 3992 return {{ 3993 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); }, 3994 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } 3995 }}; 3996 } 3997 3998 InstructionSelector::ComplexRendererFns 3999 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const { 4000 return selectDSReadWrite2(Root, 4); 4001 } 4002 4003 InstructionSelector::ComplexRendererFns 4004 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const { 4005 return selectDSReadWrite2(Root, 8); 4006 } 4007 4008 InstructionSelector::ComplexRendererFns 4009 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root, 4010 unsigned Size) const { 4011 Register Reg; 4012 unsigned Offset; 4013 std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size); 4014 return {{ 4015 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); }, 4016 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, 4017 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); } 4018 }}; 4019 } 4020 4021 std::pair<Register, unsigned> 4022 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root, 4023 unsigned Size) const { 4024 const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg()); 4025 if (!RootDef) 4026 return std::make_pair(Root.getReg(), 0); 4027 4028 int64_t ConstAddr = 0; 4029 4030 Register PtrBase; 4031 int64_t Offset; 4032 std::tie(PtrBase, Offset) = 4033 getPtrBaseWithConstantOffset(Root.getReg(), *MRI); 4034 4035 if (Offset) { 4036 int64_t OffsetValue0 = Offset; 4037 int64_t OffsetValue1 = Offset + Size; 4038 if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) { 4039 // (add n0, c0) 4040 return std::make_pair(PtrBase, OffsetValue0 / Size); 4041 } 4042 } else if (RootDef->getOpcode() == AMDGPU::G_SUB) { 4043 // TODO 4044 4045 } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) { 4046 // TODO 4047 4048 } 4049 4050 return std::make_pair(Root.getReg(), 0); 4051 } 4052 4053 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return 4054 /// the base value with the constant offset. There may be intervening copies 4055 /// between \p Root and the identified constant. Returns \p Root, 0 if this does 4056 /// not match the pattern. 4057 std::pair<Register, int64_t> 4058 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset( 4059 Register Root, const MachineRegisterInfo &MRI) const { 4060 MachineInstr *RootI = getDefIgnoringCopies(Root, MRI); 4061 if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD) 4062 return {Root, 0}; 4063 4064 MachineOperand &RHS = RootI->getOperand(2); 4065 Optional<ValueAndVReg> MaybeOffset = 4066 getIConstantVRegValWithLookThrough(RHS.getReg(), MRI); 4067 if (!MaybeOffset) 4068 return {Root, 0}; 4069 return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()}; 4070 } 4071 4072 static void addZeroImm(MachineInstrBuilder &MIB) { 4073 MIB.addImm(0); 4074 } 4075 4076 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p 4077 /// BasePtr is not valid, a null base pointer will be used. 4078 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI, 4079 uint32_t FormatLo, uint32_t FormatHi, 4080 Register BasePtr) { 4081 Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 4082 Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 4083 Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 4084 Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass); 4085 4086 B.buildInstr(AMDGPU::S_MOV_B32) 4087 .addDef(RSrc2) 4088 .addImm(FormatLo); 4089 B.buildInstr(AMDGPU::S_MOV_B32) 4090 .addDef(RSrc3) 4091 .addImm(FormatHi); 4092 4093 // Build the half of the subregister with the constants before building the 4094 // full 128-bit register. If we are building multiple resource descriptors, 4095 // this will allow CSEing of the 2-component register. 4096 B.buildInstr(AMDGPU::REG_SEQUENCE) 4097 .addDef(RSrcHi) 4098 .addReg(RSrc2) 4099 .addImm(AMDGPU::sub0) 4100 .addReg(RSrc3) 4101 .addImm(AMDGPU::sub1); 4102 4103 Register RSrcLo = BasePtr; 4104 if (!BasePtr) { 4105 RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 4106 B.buildInstr(AMDGPU::S_MOV_B64) 4107 .addDef(RSrcLo) 4108 .addImm(0); 4109 } 4110 4111 B.buildInstr(AMDGPU::REG_SEQUENCE) 4112 .addDef(RSrc) 4113 .addReg(RSrcLo) 4114 .addImm(AMDGPU::sub0_sub1) 4115 .addReg(RSrcHi) 4116 .addImm(AMDGPU::sub2_sub3); 4117 4118 return RSrc; 4119 } 4120 4121 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI, 4122 const SIInstrInfo &TII, Register BasePtr) { 4123 uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat(); 4124 4125 // FIXME: Why are half the "default" bits ignored based on the addressing 4126 // mode? 4127 return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr); 4128 } 4129 4130 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI, 4131 const SIInstrInfo &TII, Register BasePtr) { 4132 uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat(); 4133 4134 // FIXME: Why are half the "default" bits ignored based on the addressing 4135 // mode? 4136 return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr); 4137 } 4138 4139 AMDGPUInstructionSelector::MUBUFAddressData 4140 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const { 4141 MUBUFAddressData Data; 4142 Data.N0 = Src; 4143 4144 Register PtrBase; 4145 int64_t Offset; 4146 4147 std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI); 4148 if (isUInt<32>(Offset)) { 4149 Data.N0 = PtrBase; 4150 Data.Offset = Offset; 4151 } 4152 4153 if (MachineInstr *InputAdd 4154 = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) { 4155 Data.N2 = InputAdd->getOperand(1).getReg(); 4156 Data.N3 = InputAdd->getOperand(2).getReg(); 4157 4158 // FIXME: Need to fix extra SGPR->VGPRcopies inserted 4159 // FIXME: Don't know this was defined by operand 0 4160 // 4161 // TODO: Remove this when we have copy folding optimizations after 4162 // RegBankSelect. 4163 Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg(); 4164 Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg(); 4165 } 4166 4167 return Data; 4168 } 4169 4170 /// Return if the addr64 mubuf mode should be used for the given address. 4171 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const { 4172 // (ptr_add N2, N3) -> addr64, or 4173 // (ptr_add (ptr_add N2, N3), C1) -> addr64 4174 if (Addr.N2) 4175 return true; 4176 4177 const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI); 4178 return N0Bank->getID() == AMDGPU::VGPRRegBankID; 4179 } 4180 4181 /// Split an immediate offset \p ImmOffset depending on whether it fits in the 4182 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable 4183 /// component. 4184 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset( 4185 MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const { 4186 if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset)) 4187 return; 4188 4189 // Illegal offset, store it in soffset. 4190 SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 4191 B.buildInstr(AMDGPU::S_MOV_B32) 4192 .addDef(SOffset) 4193 .addImm(ImmOffset); 4194 ImmOffset = 0; 4195 } 4196 4197 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl( 4198 MachineOperand &Root, Register &VAddr, Register &RSrcReg, 4199 Register &SOffset, int64_t &Offset) const { 4200 // FIXME: Predicates should stop this from reaching here. 4201 // addr64 bit was removed for volcanic islands. 4202 if (!STI.hasAddr64() || STI.useFlatForGlobal()) 4203 return false; 4204 4205 MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg()); 4206 if (!shouldUseAddr64(AddrData)) 4207 return false; 4208 4209 Register N0 = AddrData.N0; 4210 Register N2 = AddrData.N2; 4211 Register N3 = AddrData.N3; 4212 Offset = AddrData.Offset; 4213 4214 // Base pointer for the SRD. 4215 Register SRDPtr; 4216 4217 if (N2) { 4218 if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) { 4219 assert(N3); 4220 if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) { 4221 // Both N2 and N3 are divergent. Use N0 (the result of the add) as the 4222 // addr64, and construct the default resource from a 0 address. 4223 VAddr = N0; 4224 } else { 4225 SRDPtr = N3; 4226 VAddr = N2; 4227 } 4228 } else { 4229 // N2 is not divergent. 4230 SRDPtr = N2; 4231 VAddr = N3; 4232 } 4233 } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) { 4234 // Use the default null pointer in the resource 4235 VAddr = N0; 4236 } else { 4237 // N0 -> offset, or 4238 // (N0 + C1) -> offset 4239 SRDPtr = N0; 4240 } 4241 4242 MachineIRBuilder B(*Root.getParent()); 4243 RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr); 4244 splitIllegalMUBUFOffset(B, SOffset, Offset); 4245 return true; 4246 } 4247 4248 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl( 4249 MachineOperand &Root, Register &RSrcReg, Register &SOffset, 4250 int64_t &Offset) const { 4251 4252 // FIXME: Pattern should not reach here. 4253 if (STI.useFlatForGlobal()) 4254 return false; 4255 4256 MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg()); 4257 if (shouldUseAddr64(AddrData)) 4258 return false; 4259 4260 // N0 -> offset, or 4261 // (N0 + C1) -> offset 4262 Register SRDPtr = AddrData.N0; 4263 Offset = AddrData.Offset; 4264 4265 // TODO: Look through extensions for 32-bit soffset. 4266 MachineIRBuilder B(*Root.getParent()); 4267 4268 RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr); 4269 splitIllegalMUBUFOffset(B, SOffset, Offset); 4270 return true; 4271 } 4272 4273 InstructionSelector::ComplexRendererFns 4274 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const { 4275 Register VAddr; 4276 Register RSrcReg; 4277 Register SOffset; 4278 int64_t Offset = 0; 4279 4280 if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset)) 4281 return {}; 4282 4283 // FIXME: Use defaulted operands for trailing 0s and remove from the complex 4284 // pattern. 4285 return {{ 4286 [=](MachineInstrBuilder &MIB) { // rsrc 4287 MIB.addReg(RSrcReg); 4288 }, 4289 [=](MachineInstrBuilder &MIB) { // vaddr 4290 MIB.addReg(VAddr); 4291 }, 4292 [=](MachineInstrBuilder &MIB) { // soffset 4293 if (SOffset) 4294 MIB.addReg(SOffset); 4295 else 4296 MIB.addImm(0); 4297 }, 4298 [=](MachineInstrBuilder &MIB) { // offset 4299 MIB.addImm(Offset); 4300 }, 4301 addZeroImm, // cpol 4302 addZeroImm, // tfe 4303 addZeroImm // swz 4304 }}; 4305 } 4306 4307 InstructionSelector::ComplexRendererFns 4308 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const { 4309 Register RSrcReg; 4310 Register SOffset; 4311 int64_t Offset = 0; 4312 4313 if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset)) 4314 return {}; 4315 4316 return {{ 4317 [=](MachineInstrBuilder &MIB) { // rsrc 4318 MIB.addReg(RSrcReg); 4319 }, 4320 [=](MachineInstrBuilder &MIB) { // soffset 4321 if (SOffset) 4322 MIB.addReg(SOffset); 4323 else 4324 MIB.addImm(0); 4325 }, 4326 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset 4327 addZeroImm, // cpol 4328 addZeroImm, // tfe 4329 addZeroImm, // swz 4330 }}; 4331 } 4332 4333 InstructionSelector::ComplexRendererFns 4334 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const { 4335 Register VAddr; 4336 Register RSrcReg; 4337 Register SOffset; 4338 int64_t Offset = 0; 4339 4340 if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset)) 4341 return {}; 4342 4343 // FIXME: Use defaulted operands for trailing 0s and remove from the complex 4344 // pattern. 4345 return {{ 4346 [=](MachineInstrBuilder &MIB) { // rsrc 4347 MIB.addReg(RSrcReg); 4348 }, 4349 [=](MachineInstrBuilder &MIB) { // vaddr 4350 MIB.addReg(VAddr); 4351 }, 4352 [=](MachineInstrBuilder &MIB) { // soffset 4353 if (SOffset) 4354 MIB.addReg(SOffset); 4355 else 4356 MIB.addImm(0); 4357 }, 4358 [=](MachineInstrBuilder &MIB) { // offset 4359 MIB.addImm(Offset); 4360 }, 4361 [=](MachineInstrBuilder &MIB) { 4362 MIB.addImm(AMDGPU::CPol::GLC); // cpol 4363 } 4364 }}; 4365 } 4366 4367 InstructionSelector::ComplexRendererFns 4368 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const { 4369 Register RSrcReg; 4370 Register SOffset; 4371 int64_t Offset = 0; 4372 4373 if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset)) 4374 return {}; 4375 4376 return {{ 4377 [=](MachineInstrBuilder &MIB) { // rsrc 4378 MIB.addReg(RSrcReg); 4379 }, 4380 [=](MachineInstrBuilder &MIB) { // soffset 4381 if (SOffset) 4382 MIB.addReg(SOffset); 4383 else 4384 MIB.addImm(0); 4385 }, 4386 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset 4387 [=](MachineInstrBuilder &MIB) { MIB.addImm(AMDGPU::CPol::GLC); } // cpol 4388 }}; 4389 } 4390 4391 /// Get an immediate that must be 32-bits, and treated as zero extended. 4392 static Optional<uint64_t> getConstantZext32Val(Register Reg, 4393 const MachineRegisterInfo &MRI) { 4394 // getIConstantVRegVal sexts any values, so see if that matters. 4395 Optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI); 4396 if (!OffsetVal || !isInt<32>(*OffsetVal)) 4397 return None; 4398 return Lo_32(*OffsetVal); 4399 } 4400 4401 InstructionSelector::ComplexRendererFns 4402 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const { 4403 Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI); 4404 if (!OffsetVal) 4405 return {}; 4406 4407 Optional<int64_t> EncodedImm = 4408 AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true); 4409 if (!EncodedImm) 4410 return {}; 4411 4412 return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } }}; 4413 } 4414 4415 InstructionSelector::ComplexRendererFns 4416 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const { 4417 assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS); 4418 4419 Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI); 4420 if (!OffsetVal) 4421 return {}; 4422 4423 Optional<int64_t> EncodedImm 4424 = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal); 4425 if (!EncodedImm) 4426 return {}; 4427 4428 return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } }}; 4429 } 4430 4431 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB, 4432 const MachineInstr &MI, 4433 int OpIdx) const { 4434 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 4435 "Expected G_CONSTANT"); 4436 MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue()); 4437 } 4438 4439 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB, 4440 const MachineInstr &MI, 4441 int OpIdx) const { 4442 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 4443 "Expected G_CONSTANT"); 4444 MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue()); 4445 } 4446 4447 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB, 4448 const MachineInstr &MI, 4449 int OpIdx) const { 4450 assert(OpIdx == -1); 4451 4452 const MachineOperand &Op = MI.getOperand(1); 4453 if (MI.getOpcode() == TargetOpcode::G_FCONSTANT) 4454 MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue()); 4455 else { 4456 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT"); 4457 MIB.addImm(Op.getCImm()->getSExtValue()); 4458 } 4459 } 4460 4461 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB, 4462 const MachineInstr &MI, 4463 int OpIdx) const { 4464 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 4465 "Expected G_CONSTANT"); 4466 MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation()); 4467 } 4468 4469 /// This only really exists to satisfy DAG type checking machinery, so is a 4470 /// no-op here. 4471 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB, 4472 const MachineInstr &MI, 4473 int OpIdx) const { 4474 MIB.addImm(MI.getOperand(OpIdx).getImm()); 4475 } 4476 4477 void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB, 4478 const MachineInstr &MI, 4479 int OpIdx) const { 4480 assert(OpIdx >= 0 && "expected to match an immediate operand"); 4481 MIB.addImm(MI.getOperand(OpIdx).getImm() & AMDGPU::CPol::ALL); 4482 } 4483 4484 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB, 4485 const MachineInstr &MI, 4486 int OpIdx) const { 4487 assert(OpIdx >= 0 && "expected to match an immediate operand"); 4488 MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1); 4489 } 4490 4491 void AMDGPUInstructionSelector::renderSetGLC(MachineInstrBuilder &MIB, 4492 const MachineInstr &MI, 4493 int OpIdx) const { 4494 assert(OpIdx >= 0 && "expected to match an immediate operand"); 4495 MIB.addImm(MI.getOperand(OpIdx).getImm() | AMDGPU::CPol::GLC); 4496 } 4497 4498 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB, 4499 const MachineInstr &MI, 4500 int OpIdx) const { 4501 MIB.addFrameIndex((MI.getOperand(1).getIndex())); 4502 } 4503 4504 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const { 4505 return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm()); 4506 } 4507 4508 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const { 4509 return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm()); 4510 } 4511 4512 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const { 4513 return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm()); 4514 } 4515 4516 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const { 4517 return TII.isInlineConstant(Imm); 4518 } 4519