1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the targeting of the InstructionSelector class for 10 /// AMDGPU. 11 /// \todo This should be generated by TableGen. 12 //===----------------------------------------------------------------------===// 13 14 #include "AMDGPUInstructionSelector.h" 15 #include "AMDGPU.h" 16 #include "AMDGPUGlobalISelUtils.h" 17 #include "AMDGPUInstrInfo.h" 18 #include "AMDGPURegisterBankInfo.h" 19 #include "AMDGPUTargetMachine.h" 20 #include "SIMachineFunctionInfo.h" 21 #include "Utils/AMDGPUBaseInfo.h" 22 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h" 23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h" 24 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" 25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 26 #include "llvm/CodeGen/MachineFrameInfo.h" 27 #include "llvm/IR/DiagnosticInfo.h" 28 #include "llvm/IR/IntrinsicsAMDGPU.h" 29 30 #define DEBUG_TYPE "amdgpu-isel" 31 32 using namespace llvm; 33 using namespace MIPatternMatch; 34 35 static cl::opt<bool> AllowRiskySelect( 36 "amdgpu-global-isel-risky-select", 37 cl::desc("Allow GlobalISel to select cases that are likely to not work yet"), 38 cl::init(false), 39 cl::ReallyHidden); 40 41 #define GET_GLOBALISEL_IMPL 42 #define AMDGPUSubtarget GCNSubtarget 43 #include "AMDGPUGenGlobalISel.inc" 44 #undef GET_GLOBALISEL_IMPL 45 #undef AMDGPUSubtarget 46 47 AMDGPUInstructionSelector::AMDGPUInstructionSelector( 48 const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI, 49 const AMDGPUTargetMachine &TM) 50 : TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM), 51 STI(STI), 52 EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG), 53 #define GET_GLOBALISEL_PREDICATES_INIT 54 #include "AMDGPUGenGlobalISel.inc" 55 #undef GET_GLOBALISEL_PREDICATES_INIT 56 #define GET_GLOBALISEL_TEMPORARIES_INIT 57 #include "AMDGPUGenGlobalISel.inc" 58 #undef GET_GLOBALISEL_TEMPORARIES_INIT 59 { 60 } 61 62 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; } 63 64 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB, 65 CodeGenCoverage &CoverageInfo, 66 ProfileSummaryInfo *PSI, 67 BlockFrequencyInfo *BFI) { 68 MRI = &MF.getRegInfo(); 69 Subtarget = &MF.getSubtarget<GCNSubtarget>(); 70 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI); 71 } 72 73 bool AMDGPUInstructionSelector::isVCC(Register Reg, 74 const MachineRegisterInfo &MRI) const { 75 // The verifier is oblivious to s1 being a valid value for wavesize registers. 76 if (Reg.isPhysical()) 77 return false; 78 79 auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg); 80 const TargetRegisterClass *RC = 81 RegClassOrBank.dyn_cast<const TargetRegisterClass*>(); 82 if (RC) { 83 const LLT Ty = MRI.getType(Reg); 84 return RC->hasSuperClassEq(TRI.getBoolRC()) && 85 Ty.isValid() && Ty.getSizeInBits() == 1; 86 } 87 88 const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>(); 89 return RB->getID() == AMDGPU::VCCRegBankID; 90 } 91 92 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI, 93 unsigned NewOpc) const { 94 MI.setDesc(TII.get(NewOpc)); 95 MI.removeOperand(1); // Remove intrinsic ID. 96 MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 97 98 MachineOperand &Dst = MI.getOperand(0); 99 MachineOperand &Src = MI.getOperand(1); 100 101 // TODO: This should be legalized to s32 if needed 102 if (MRI->getType(Dst.getReg()) == LLT::scalar(1)) 103 return false; 104 105 const TargetRegisterClass *DstRC 106 = TRI.getConstrainedRegClassForOperand(Dst, *MRI); 107 const TargetRegisterClass *SrcRC 108 = TRI.getConstrainedRegClassForOperand(Src, *MRI); 109 if (!DstRC || DstRC != SrcRC) 110 return false; 111 112 return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) && 113 RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI); 114 } 115 116 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const { 117 const DebugLoc &DL = I.getDebugLoc(); 118 MachineBasicBlock *BB = I.getParent(); 119 I.setDesc(TII.get(TargetOpcode::COPY)); 120 121 const MachineOperand &Src = I.getOperand(1); 122 MachineOperand &Dst = I.getOperand(0); 123 Register DstReg = Dst.getReg(); 124 Register SrcReg = Src.getReg(); 125 126 if (isVCC(DstReg, *MRI)) { 127 if (SrcReg == AMDGPU::SCC) { 128 const TargetRegisterClass *RC 129 = TRI.getConstrainedRegClassForOperand(Dst, *MRI); 130 if (!RC) 131 return true; 132 return RBI.constrainGenericRegister(DstReg, *RC, *MRI); 133 } 134 135 if (!isVCC(SrcReg, *MRI)) { 136 // TODO: Should probably leave the copy and let copyPhysReg expand it. 137 if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI)) 138 return false; 139 140 const TargetRegisterClass *SrcRC 141 = TRI.getConstrainedRegClassForOperand(Src, *MRI); 142 143 Optional<ValueAndVReg> ConstVal = 144 getIConstantVRegValWithLookThrough(SrcReg, *MRI, true); 145 if (ConstVal) { 146 unsigned MovOpc = 147 STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; 148 BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg) 149 .addImm(ConstVal->Value.getBoolValue() ? -1 : 0); 150 } else { 151 Register MaskedReg = MRI->createVirtualRegister(SrcRC); 152 153 // We can't trust the high bits at this point, so clear them. 154 155 // TODO: Skip masking high bits if def is known boolean. 156 157 unsigned AndOpc = 158 TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32; 159 BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg) 160 .addImm(1) 161 .addReg(SrcReg); 162 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg) 163 .addImm(0) 164 .addReg(MaskedReg); 165 } 166 167 if (!MRI->getRegClassOrNull(SrcReg)) 168 MRI->setRegClass(SrcReg, SrcRC); 169 I.eraseFromParent(); 170 return true; 171 } 172 173 const TargetRegisterClass *RC = 174 TRI.getConstrainedRegClassForOperand(Dst, *MRI); 175 if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI)) 176 return false; 177 178 return true; 179 } 180 181 for (const MachineOperand &MO : I.operands()) { 182 if (MO.getReg().isPhysical()) 183 continue; 184 185 const TargetRegisterClass *RC = 186 TRI.getConstrainedRegClassForOperand(MO, *MRI); 187 if (!RC) 188 continue; 189 RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI); 190 } 191 return true; 192 } 193 194 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const { 195 const Register DefReg = I.getOperand(0).getReg(); 196 const LLT DefTy = MRI->getType(DefReg); 197 if (DefTy == LLT::scalar(1)) { 198 if (!AllowRiskySelect) { 199 LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n"); 200 return false; 201 } 202 203 LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n"); 204 } 205 206 // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy) 207 208 const RegClassOrRegBank &RegClassOrBank = 209 MRI->getRegClassOrRegBank(DefReg); 210 211 const TargetRegisterClass *DefRC 212 = RegClassOrBank.dyn_cast<const TargetRegisterClass *>(); 213 if (!DefRC) { 214 if (!DefTy.isValid()) { 215 LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n"); 216 return false; 217 } 218 219 const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>(); 220 DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI); 221 if (!DefRC) { 222 LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n"); 223 return false; 224 } 225 } 226 227 // TODO: Verify that all registers have the same bank 228 I.setDesc(TII.get(TargetOpcode::PHI)); 229 return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI); 230 } 231 232 MachineOperand 233 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO, 234 const TargetRegisterClass &SubRC, 235 unsigned SubIdx) const { 236 237 MachineInstr *MI = MO.getParent(); 238 MachineBasicBlock *BB = MO.getParent()->getParent(); 239 Register DstReg = MRI->createVirtualRegister(&SubRC); 240 241 if (MO.isReg()) { 242 unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx); 243 Register Reg = MO.getReg(); 244 BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg) 245 .addReg(Reg, 0, ComposedSubIdx); 246 247 return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(), 248 MO.isKill(), MO.isDead(), MO.isUndef(), 249 MO.isEarlyClobber(), 0, MO.isDebug(), 250 MO.isInternalRead()); 251 } 252 253 assert(MO.isImm()); 254 255 APInt Imm(64, MO.getImm()); 256 257 switch (SubIdx) { 258 default: 259 llvm_unreachable("do not know to split immediate with this sub index."); 260 case AMDGPU::sub0: 261 return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue()); 262 case AMDGPU::sub1: 263 return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue()); 264 } 265 } 266 267 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) { 268 switch (Opc) { 269 case AMDGPU::G_AND: 270 return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32; 271 case AMDGPU::G_OR: 272 return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32; 273 case AMDGPU::G_XOR: 274 return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32; 275 default: 276 llvm_unreachable("not a bit op"); 277 } 278 } 279 280 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const { 281 Register DstReg = I.getOperand(0).getReg(); 282 unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI); 283 284 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 285 if (DstRB->getID() != AMDGPU::SGPRRegBankID && 286 DstRB->getID() != AMDGPU::VCCRegBankID) 287 return false; 288 289 bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID && 290 STI.isWave64()); 291 I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64))); 292 293 // Dead implicit-def of scc 294 I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef 295 true, // isImp 296 false, // isKill 297 true)); // isDead 298 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 299 } 300 301 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const { 302 MachineBasicBlock *BB = I.getParent(); 303 MachineFunction *MF = BB->getParent(); 304 Register DstReg = I.getOperand(0).getReg(); 305 const DebugLoc &DL = I.getDebugLoc(); 306 LLT Ty = MRI->getType(DstReg); 307 if (Ty.isVector()) 308 return false; 309 310 unsigned Size = Ty.getSizeInBits(); 311 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 312 const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID; 313 const bool Sub = I.getOpcode() == TargetOpcode::G_SUB; 314 315 if (Size == 32) { 316 if (IsSALU) { 317 const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32; 318 MachineInstr *Add = 319 BuildMI(*BB, &I, DL, TII.get(Opc), DstReg) 320 .add(I.getOperand(1)) 321 .add(I.getOperand(2)); 322 I.eraseFromParent(); 323 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI); 324 } 325 326 if (STI.hasAddNoCarry()) { 327 const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64; 328 I.setDesc(TII.get(Opc)); 329 I.addOperand(*MF, MachineOperand::CreateImm(0)); 330 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 331 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 332 } 333 334 const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64; 335 336 Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass()); 337 MachineInstr *Add 338 = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg) 339 .addDef(UnusedCarry, RegState::Dead) 340 .add(I.getOperand(1)) 341 .add(I.getOperand(2)) 342 .addImm(0); 343 I.eraseFromParent(); 344 return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI); 345 } 346 347 assert(!Sub && "illegal sub should not reach here"); 348 349 const TargetRegisterClass &RC 350 = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass; 351 const TargetRegisterClass &HalfRC 352 = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass; 353 354 MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0)); 355 MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0)); 356 MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1)); 357 MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1)); 358 359 Register DstLo = MRI->createVirtualRegister(&HalfRC); 360 Register DstHi = MRI->createVirtualRegister(&HalfRC); 361 362 if (IsSALU) { 363 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo) 364 .add(Lo1) 365 .add(Lo2); 366 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi) 367 .add(Hi1) 368 .add(Hi2); 369 } else { 370 const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass(); 371 Register CarryReg = MRI->createVirtualRegister(CarryRC); 372 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo) 373 .addDef(CarryReg) 374 .add(Lo1) 375 .add(Lo2) 376 .addImm(0); 377 MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi) 378 .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead) 379 .add(Hi1) 380 .add(Hi2) 381 .addReg(CarryReg, RegState::Kill) 382 .addImm(0); 383 384 if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI)) 385 return false; 386 } 387 388 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) 389 .addReg(DstLo) 390 .addImm(AMDGPU::sub0) 391 .addReg(DstHi) 392 .addImm(AMDGPU::sub1); 393 394 395 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI)) 396 return false; 397 398 I.eraseFromParent(); 399 return true; 400 } 401 402 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE( 403 MachineInstr &I) const { 404 MachineBasicBlock *BB = I.getParent(); 405 MachineFunction *MF = BB->getParent(); 406 const DebugLoc &DL = I.getDebugLoc(); 407 Register Dst0Reg = I.getOperand(0).getReg(); 408 Register Dst1Reg = I.getOperand(1).getReg(); 409 const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO || 410 I.getOpcode() == AMDGPU::G_UADDE; 411 const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE || 412 I.getOpcode() == AMDGPU::G_USUBE; 413 414 if (isVCC(Dst1Reg, *MRI)) { 415 unsigned NoCarryOpc = 416 IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64; 417 unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64; 418 I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc)); 419 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 420 I.addOperand(*MF, MachineOperand::CreateImm(0)); 421 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 422 } 423 424 Register Src0Reg = I.getOperand(2).getReg(); 425 Register Src1Reg = I.getOperand(3).getReg(); 426 427 if (HasCarryIn) { 428 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC) 429 .addReg(I.getOperand(4).getReg()); 430 } 431 432 unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32; 433 unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32; 434 435 BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg) 436 .add(I.getOperand(2)) 437 .add(I.getOperand(3)); 438 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg) 439 .addReg(AMDGPU::SCC); 440 441 if (!MRI->getRegClassOrNull(Dst1Reg)) 442 MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass); 443 444 if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) || 445 !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) || 446 !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI)) 447 return false; 448 449 if (HasCarryIn && 450 !RBI.constrainGenericRegister(I.getOperand(4).getReg(), 451 AMDGPU::SReg_32RegClass, *MRI)) 452 return false; 453 454 I.eraseFromParent(); 455 return true; 456 } 457 458 // TODO: We should probably legalize these to only using 32-bit results. 459 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const { 460 MachineBasicBlock *BB = I.getParent(); 461 Register DstReg = I.getOperand(0).getReg(); 462 Register SrcReg = I.getOperand(1).getReg(); 463 LLT DstTy = MRI->getType(DstReg); 464 LLT SrcTy = MRI->getType(SrcReg); 465 const unsigned SrcSize = SrcTy.getSizeInBits(); 466 unsigned DstSize = DstTy.getSizeInBits(); 467 468 // TODO: Should handle any multiple of 32 offset. 469 unsigned Offset = I.getOperand(2).getImm(); 470 if (Offset % 32 != 0 || DstSize > 128) 471 return false; 472 473 // 16-bit operations really use 32-bit registers. 474 // FIXME: Probably should not allow 16-bit G_EXTRACT results. 475 if (DstSize == 16) 476 DstSize = 32; 477 478 const TargetRegisterClass *DstRC = 479 TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI); 480 if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) 481 return false; 482 483 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI); 484 const TargetRegisterClass *SrcRC = 485 TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI); 486 if (!SrcRC) 487 return false; 488 unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32, 489 DstSize / 32); 490 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg); 491 if (!SrcRC) 492 return false; 493 494 SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I, 495 *SrcRC, I.getOperand(1)); 496 const DebugLoc &DL = I.getDebugLoc(); 497 BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg) 498 .addReg(SrcReg, 0, SubReg); 499 500 I.eraseFromParent(); 501 return true; 502 } 503 504 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const { 505 MachineBasicBlock *BB = MI.getParent(); 506 Register DstReg = MI.getOperand(0).getReg(); 507 LLT DstTy = MRI->getType(DstReg); 508 LLT SrcTy = MRI->getType(MI.getOperand(1).getReg()); 509 510 const unsigned SrcSize = SrcTy.getSizeInBits(); 511 if (SrcSize < 32) 512 return selectImpl(MI, *CoverageInfo); 513 514 const DebugLoc &DL = MI.getDebugLoc(); 515 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); 516 const unsigned DstSize = DstTy.getSizeInBits(); 517 const TargetRegisterClass *DstRC = 518 TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI); 519 if (!DstRC) 520 return false; 521 522 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8); 523 MachineInstrBuilder MIB = 524 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg); 525 for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) { 526 MachineOperand &Src = MI.getOperand(I + 1); 527 MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef())); 528 MIB.addImm(SubRegs[I]); 529 530 const TargetRegisterClass *SrcRC 531 = TRI.getConstrainedRegClassForOperand(Src, *MRI); 532 if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI)) 533 return false; 534 } 535 536 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) 537 return false; 538 539 MI.eraseFromParent(); 540 return true; 541 } 542 543 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const { 544 MachineBasicBlock *BB = MI.getParent(); 545 const int NumDst = MI.getNumOperands() - 1; 546 547 MachineOperand &Src = MI.getOperand(NumDst); 548 549 Register SrcReg = Src.getReg(); 550 Register DstReg0 = MI.getOperand(0).getReg(); 551 LLT DstTy = MRI->getType(DstReg0); 552 LLT SrcTy = MRI->getType(SrcReg); 553 554 const unsigned DstSize = DstTy.getSizeInBits(); 555 const unsigned SrcSize = SrcTy.getSizeInBits(); 556 const DebugLoc &DL = MI.getDebugLoc(); 557 const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI); 558 559 const TargetRegisterClass *SrcRC = 560 TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI); 561 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI)) 562 return false; 563 564 // Note we could have mixed SGPR and VGPR destination banks for an SGPR 565 // source, and this relies on the fact that the same subregister indices are 566 // used for both. 567 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8); 568 for (int I = 0, E = NumDst; I != E; ++I) { 569 MachineOperand &Dst = MI.getOperand(I); 570 BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg()) 571 .addReg(SrcReg, 0, SubRegs[I]); 572 573 // Make sure the subregister index is valid for the source register. 574 SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]); 575 if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI)) 576 return false; 577 578 const TargetRegisterClass *DstRC = 579 TRI.getConstrainedRegClassForOperand(Dst, *MRI); 580 if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI)) 581 return false; 582 } 583 584 MI.eraseFromParent(); 585 return true; 586 } 587 588 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC( 589 MachineInstr &MI) const { 590 if (selectImpl(MI, *CoverageInfo)) 591 return true; 592 593 const LLT S32 = LLT::scalar(32); 594 const LLT V2S16 = LLT::fixed_vector(2, 16); 595 596 Register Dst = MI.getOperand(0).getReg(); 597 if (MRI->getType(Dst) != V2S16) 598 return false; 599 600 const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI); 601 if (DstBank->getID() != AMDGPU::SGPRRegBankID) 602 return false; 603 604 Register Src0 = MI.getOperand(1).getReg(); 605 Register Src1 = MI.getOperand(2).getReg(); 606 if (MRI->getType(Src0) != S32) 607 return false; 608 609 const DebugLoc &DL = MI.getDebugLoc(); 610 MachineBasicBlock *BB = MI.getParent(); 611 612 auto ConstSrc1 = getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true); 613 if (ConstSrc1) { 614 auto ConstSrc0 = 615 getAnyConstantVRegValWithLookThrough(Src0, *MRI, true, true); 616 if (ConstSrc0) { 617 const int64_t K0 = ConstSrc0->Value.getSExtValue(); 618 const int64_t K1 = ConstSrc1->Value.getSExtValue(); 619 uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff; 620 uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff; 621 622 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst) 623 .addImm(Lo16 | (Hi16 << 16)); 624 MI.eraseFromParent(); 625 return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI); 626 } 627 } 628 629 // TODO: This should probably be a combine somewhere 630 // (build_vector_trunc $src0, undef -> copy $src0 631 MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI); 632 if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) { 633 MI.setDesc(TII.get(AMDGPU::COPY)); 634 MI.removeOperand(2); 635 return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) && 636 RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI); 637 } 638 639 Register ShiftSrc0; 640 Register ShiftSrc1; 641 642 // With multiple uses of the shift, this will duplicate the shift and 643 // increase register pressure. 644 // 645 // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16) 646 // => (S_PACK_HH_B32_B16 $src0, $src1) 647 // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16)) 648 // => (S_PACK_LH_B32_B16 $src0, $src1) 649 // (build_vector_trunc $src0, $src1) 650 // => (S_PACK_LL_B32_B16 $src0, $src1) 651 652 bool Shift0 = mi_match( 653 Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16)))); 654 655 bool Shift1 = mi_match( 656 Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16)))); 657 658 unsigned Opc = AMDGPU::S_PACK_LL_B32_B16; 659 if (Shift0 && Shift1) { 660 Opc = AMDGPU::S_PACK_HH_B32_B16; 661 MI.getOperand(1).setReg(ShiftSrc0); 662 MI.getOperand(2).setReg(ShiftSrc1); 663 } else if (Shift1) { 664 Opc = AMDGPU::S_PACK_LH_B32_B16; 665 MI.getOperand(2).setReg(ShiftSrc1); 666 } else if (Shift0 && ConstSrc1 && ConstSrc1->Value == 0) { 667 // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16 668 auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst) 669 .addReg(ShiftSrc0) 670 .addImm(16); 671 672 MI.eraseFromParent(); 673 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 674 } 675 676 MI.setDesc(TII.get(Opc)); 677 return constrainSelectedInstRegOperands(MI, TII, TRI, RBI); 678 } 679 680 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const { 681 return selectG_ADD_SUB(I); 682 } 683 684 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const { 685 const MachineOperand &MO = I.getOperand(0); 686 687 // FIXME: Interface for getConstrainedRegClassForOperand needs work. The 688 // regbank check here is to know why getConstrainedRegClassForOperand failed. 689 const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI); 690 if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) || 691 (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) { 692 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF)); 693 return true; 694 } 695 696 return false; 697 } 698 699 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const { 700 MachineBasicBlock *BB = I.getParent(); 701 702 Register DstReg = I.getOperand(0).getReg(); 703 Register Src0Reg = I.getOperand(1).getReg(); 704 Register Src1Reg = I.getOperand(2).getReg(); 705 LLT Src1Ty = MRI->getType(Src1Reg); 706 707 unsigned DstSize = MRI->getType(DstReg).getSizeInBits(); 708 unsigned InsSize = Src1Ty.getSizeInBits(); 709 710 int64_t Offset = I.getOperand(3).getImm(); 711 712 // FIXME: These cases should have been illegal and unnecessary to check here. 713 if (Offset % 32 != 0 || InsSize % 32 != 0) 714 return false; 715 716 // Currently not handled by getSubRegFromChannel. 717 if (InsSize > 128) 718 return false; 719 720 unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32); 721 if (SubReg == AMDGPU::NoSubRegister) 722 return false; 723 724 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); 725 const TargetRegisterClass *DstRC = 726 TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI); 727 if (!DstRC) 728 return false; 729 730 const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI); 731 const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI); 732 const TargetRegisterClass *Src0RC = 733 TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI); 734 const TargetRegisterClass *Src1RC = 735 TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI); 736 737 // Deal with weird cases where the class only partially supports the subreg 738 // index. 739 Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg); 740 if (!Src0RC || !Src1RC) 741 return false; 742 743 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) || 744 !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) || 745 !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI)) 746 return false; 747 748 const DebugLoc &DL = I.getDebugLoc(); 749 BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg) 750 .addReg(Src0Reg) 751 .addReg(Src1Reg) 752 .addImm(SubReg); 753 754 I.eraseFromParent(); 755 return true; 756 } 757 758 bool AMDGPUInstructionSelector::selectG_SBFX_UBFX(MachineInstr &MI) const { 759 Register DstReg = MI.getOperand(0).getReg(); 760 Register SrcReg = MI.getOperand(1).getReg(); 761 Register OffsetReg = MI.getOperand(2).getReg(); 762 Register WidthReg = MI.getOperand(3).getReg(); 763 764 assert(RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID && 765 "scalar BFX instructions are expanded in regbankselect"); 766 assert(MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 && 767 "64-bit vector BFX instructions are expanded in regbankselect"); 768 769 const DebugLoc &DL = MI.getDebugLoc(); 770 MachineBasicBlock *MBB = MI.getParent(); 771 772 bool IsSigned = MI.getOpcode() == TargetOpcode::G_SBFX; 773 unsigned Opc = IsSigned ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64; 774 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), DstReg) 775 .addReg(SrcReg) 776 .addReg(OffsetReg) 777 .addReg(WidthReg); 778 MI.eraseFromParent(); 779 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 780 } 781 782 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const { 783 if (STI.getLDSBankCount() != 16) 784 return selectImpl(MI, *CoverageInfo); 785 786 Register Dst = MI.getOperand(0).getReg(); 787 Register Src0 = MI.getOperand(2).getReg(); 788 Register M0Val = MI.getOperand(6).getReg(); 789 if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) || 790 !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) || 791 !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI)) 792 return false; 793 794 // This requires 2 instructions. It is possible to write a pattern to support 795 // this, but the generated isel emitter doesn't correctly deal with multiple 796 // output instructions using the same physical register input. The copy to m0 797 // is incorrectly placed before the second instruction. 798 // 799 // TODO: Match source modifiers. 800 801 Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 802 const DebugLoc &DL = MI.getDebugLoc(); 803 MachineBasicBlock *MBB = MI.getParent(); 804 805 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 806 .addReg(M0Val); 807 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov) 808 .addImm(2) 809 .addImm(MI.getOperand(4).getImm()) // $attr 810 .addImm(MI.getOperand(3).getImm()); // $attrchan 811 812 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst) 813 .addImm(0) // $src0_modifiers 814 .addReg(Src0) // $src0 815 .addImm(MI.getOperand(4).getImm()) // $attr 816 .addImm(MI.getOperand(3).getImm()) // $attrchan 817 .addImm(0) // $src2_modifiers 818 .addReg(InterpMov) // $src2 - 2 f16 values selected by high 819 .addImm(MI.getOperand(5).getImm()) // $high 820 .addImm(0) // $clamp 821 .addImm(0); // $omod 822 823 MI.eraseFromParent(); 824 return true; 825 } 826 827 // Writelane is special in that it can use SGPR and M0 (which would normally 828 // count as using the constant bus twice - but in this case it is allowed since 829 // the lane selector doesn't count as a use of the constant bus). However, it is 830 // still required to abide by the 1 SGPR rule. Fix this up if we might have 831 // multiple SGPRs. 832 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const { 833 // With a constant bus limit of at least 2, there's no issue. 834 if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1) 835 return selectImpl(MI, *CoverageInfo); 836 837 MachineBasicBlock *MBB = MI.getParent(); 838 const DebugLoc &DL = MI.getDebugLoc(); 839 Register VDst = MI.getOperand(0).getReg(); 840 Register Val = MI.getOperand(2).getReg(); 841 Register LaneSelect = MI.getOperand(3).getReg(); 842 Register VDstIn = MI.getOperand(4).getReg(); 843 844 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst); 845 846 Optional<ValueAndVReg> ConstSelect = 847 getIConstantVRegValWithLookThrough(LaneSelect, *MRI); 848 if (ConstSelect) { 849 // The selector has to be an inline immediate, so we can use whatever for 850 // the other operands. 851 MIB.addReg(Val); 852 MIB.addImm(ConstSelect->Value.getSExtValue() & 853 maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2())); 854 } else { 855 Optional<ValueAndVReg> ConstVal = 856 getIConstantVRegValWithLookThrough(Val, *MRI); 857 858 // If the value written is an inline immediate, we can get away without a 859 // copy to m0. 860 if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(), 861 STI.hasInv2PiInlineImm())) { 862 MIB.addImm(ConstVal->Value.getSExtValue()); 863 MIB.addReg(LaneSelect); 864 } else { 865 MIB.addReg(Val); 866 867 // If the lane selector was originally in a VGPR and copied with 868 // readfirstlane, there's a hazard to read the same SGPR from the 869 // VALU. Constrain to a different SGPR to help avoid needing a nop later. 870 RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI); 871 872 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 873 .addReg(LaneSelect); 874 MIB.addReg(AMDGPU::M0); 875 } 876 } 877 878 MIB.addReg(VDstIn); 879 880 MI.eraseFromParent(); 881 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 882 } 883 884 // We need to handle this here because tablegen doesn't support matching 885 // instructions with multiple outputs. 886 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const { 887 Register Dst0 = MI.getOperand(0).getReg(); 888 Register Dst1 = MI.getOperand(1).getReg(); 889 890 LLT Ty = MRI->getType(Dst0); 891 unsigned Opc; 892 if (Ty == LLT::scalar(32)) 893 Opc = AMDGPU::V_DIV_SCALE_F32_e64; 894 else if (Ty == LLT::scalar(64)) 895 Opc = AMDGPU::V_DIV_SCALE_F64_e64; 896 else 897 return false; 898 899 // TODO: Match source modifiers. 900 901 const DebugLoc &DL = MI.getDebugLoc(); 902 MachineBasicBlock *MBB = MI.getParent(); 903 904 Register Numer = MI.getOperand(3).getReg(); 905 Register Denom = MI.getOperand(4).getReg(); 906 unsigned ChooseDenom = MI.getOperand(5).getImm(); 907 908 Register Src0 = ChooseDenom != 0 ? Numer : Denom; 909 910 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0) 911 .addDef(Dst1) 912 .addImm(0) // $src0_modifiers 913 .addUse(Src0) // $src0 914 .addImm(0) // $src1_modifiers 915 .addUse(Denom) // $src1 916 .addImm(0) // $src2_modifiers 917 .addUse(Numer) // $src2 918 .addImm(0) // $clamp 919 .addImm(0); // $omod 920 921 MI.eraseFromParent(); 922 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 923 } 924 925 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const { 926 unsigned IntrinsicID = I.getIntrinsicID(); 927 switch (IntrinsicID) { 928 case Intrinsic::amdgcn_if_break: { 929 MachineBasicBlock *BB = I.getParent(); 930 931 // FIXME: Manually selecting to avoid dealing with the SReg_1 trick 932 // SelectionDAG uses for wave32 vs wave64. 933 BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK)) 934 .add(I.getOperand(0)) 935 .add(I.getOperand(2)) 936 .add(I.getOperand(3)); 937 938 Register DstReg = I.getOperand(0).getReg(); 939 Register Src0Reg = I.getOperand(2).getReg(); 940 Register Src1Reg = I.getOperand(3).getReg(); 941 942 I.eraseFromParent(); 943 944 for (Register Reg : { DstReg, Src0Reg, Src1Reg }) 945 MRI->setRegClass(Reg, TRI.getWaveMaskRegClass()); 946 947 return true; 948 } 949 case Intrinsic::amdgcn_interp_p1_f16: 950 return selectInterpP1F16(I); 951 case Intrinsic::amdgcn_wqm: 952 return constrainCopyLikeIntrin(I, AMDGPU::WQM); 953 case Intrinsic::amdgcn_softwqm: 954 return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM); 955 case Intrinsic::amdgcn_strict_wwm: 956 case Intrinsic::amdgcn_wwm: 957 return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WWM); 958 case Intrinsic::amdgcn_strict_wqm: 959 return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WQM); 960 case Intrinsic::amdgcn_writelane: 961 return selectWritelane(I); 962 case Intrinsic::amdgcn_div_scale: 963 return selectDivScale(I); 964 case Intrinsic::amdgcn_icmp: 965 return selectIntrinsicIcmp(I); 966 case Intrinsic::amdgcn_ballot: 967 return selectBallot(I); 968 case Intrinsic::amdgcn_reloc_constant: 969 return selectRelocConstant(I); 970 case Intrinsic::amdgcn_groupstaticsize: 971 return selectGroupStaticSize(I); 972 case Intrinsic::returnaddress: 973 return selectReturnAddress(I); 974 default: 975 return selectImpl(I, *CoverageInfo); 976 } 977 } 978 979 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) { 980 if (Size != 32 && Size != 64) 981 return -1; 982 switch (P) { 983 default: 984 llvm_unreachable("Unknown condition code!"); 985 case CmpInst::ICMP_NE: 986 return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64; 987 case CmpInst::ICMP_EQ: 988 return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64; 989 case CmpInst::ICMP_SGT: 990 return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64; 991 case CmpInst::ICMP_SGE: 992 return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64; 993 case CmpInst::ICMP_SLT: 994 return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64; 995 case CmpInst::ICMP_SLE: 996 return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64; 997 case CmpInst::ICMP_UGT: 998 return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64; 999 case CmpInst::ICMP_UGE: 1000 return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64; 1001 case CmpInst::ICMP_ULT: 1002 return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64; 1003 case CmpInst::ICMP_ULE: 1004 return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64; 1005 } 1006 } 1007 1008 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P, 1009 unsigned Size) const { 1010 if (Size == 64) { 1011 if (!STI.hasScalarCompareEq64()) 1012 return -1; 1013 1014 switch (P) { 1015 case CmpInst::ICMP_NE: 1016 return AMDGPU::S_CMP_LG_U64; 1017 case CmpInst::ICMP_EQ: 1018 return AMDGPU::S_CMP_EQ_U64; 1019 default: 1020 return -1; 1021 } 1022 } 1023 1024 if (Size != 32) 1025 return -1; 1026 1027 switch (P) { 1028 case CmpInst::ICMP_NE: 1029 return AMDGPU::S_CMP_LG_U32; 1030 case CmpInst::ICMP_EQ: 1031 return AMDGPU::S_CMP_EQ_U32; 1032 case CmpInst::ICMP_SGT: 1033 return AMDGPU::S_CMP_GT_I32; 1034 case CmpInst::ICMP_SGE: 1035 return AMDGPU::S_CMP_GE_I32; 1036 case CmpInst::ICMP_SLT: 1037 return AMDGPU::S_CMP_LT_I32; 1038 case CmpInst::ICMP_SLE: 1039 return AMDGPU::S_CMP_LE_I32; 1040 case CmpInst::ICMP_UGT: 1041 return AMDGPU::S_CMP_GT_U32; 1042 case CmpInst::ICMP_UGE: 1043 return AMDGPU::S_CMP_GE_U32; 1044 case CmpInst::ICMP_ULT: 1045 return AMDGPU::S_CMP_LT_U32; 1046 case CmpInst::ICMP_ULE: 1047 return AMDGPU::S_CMP_LE_U32; 1048 default: 1049 llvm_unreachable("Unknown condition code!"); 1050 } 1051 } 1052 1053 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const { 1054 MachineBasicBlock *BB = I.getParent(); 1055 const DebugLoc &DL = I.getDebugLoc(); 1056 1057 Register SrcReg = I.getOperand(2).getReg(); 1058 unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI); 1059 1060 auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate(); 1061 1062 Register CCReg = I.getOperand(0).getReg(); 1063 if (!isVCC(CCReg, *MRI)) { 1064 int Opcode = getS_CMPOpcode(Pred, Size); 1065 if (Opcode == -1) 1066 return false; 1067 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode)) 1068 .add(I.getOperand(2)) 1069 .add(I.getOperand(3)); 1070 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg) 1071 .addReg(AMDGPU::SCC); 1072 bool Ret = 1073 constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) && 1074 RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI); 1075 I.eraseFromParent(); 1076 return Ret; 1077 } 1078 1079 int Opcode = getV_CMPOpcode(Pred, Size); 1080 if (Opcode == -1) 1081 return false; 1082 1083 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), 1084 I.getOperand(0).getReg()) 1085 .add(I.getOperand(2)) 1086 .add(I.getOperand(3)); 1087 RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), 1088 *TRI.getBoolRC(), *MRI); 1089 bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI); 1090 I.eraseFromParent(); 1091 return Ret; 1092 } 1093 1094 bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const { 1095 Register Dst = I.getOperand(0).getReg(); 1096 if (isVCC(Dst, *MRI)) 1097 return false; 1098 1099 if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize()) 1100 return false; 1101 1102 MachineBasicBlock *BB = I.getParent(); 1103 const DebugLoc &DL = I.getDebugLoc(); 1104 Register SrcReg = I.getOperand(2).getReg(); 1105 unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI); 1106 1107 auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm()); 1108 if (!ICmpInst::isIntPredicate(static_cast<ICmpInst::Predicate>(Pred))) { 1109 MachineInstr *ICmp = 1110 BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Dst); 1111 1112 if (!RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), 1113 *TRI.getBoolRC(), *MRI)) 1114 return false; 1115 I.eraseFromParent(); 1116 return true; 1117 } 1118 1119 int Opcode = getV_CMPOpcode(Pred, Size); 1120 if (Opcode == -1) 1121 return false; 1122 1123 MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst) 1124 .add(I.getOperand(2)) 1125 .add(I.getOperand(3)); 1126 RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(), 1127 *MRI); 1128 bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI); 1129 I.eraseFromParent(); 1130 return Ret; 1131 } 1132 1133 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const { 1134 MachineBasicBlock *BB = I.getParent(); 1135 const DebugLoc &DL = I.getDebugLoc(); 1136 Register DstReg = I.getOperand(0).getReg(); 1137 const unsigned Size = MRI->getType(DstReg).getSizeInBits(); 1138 const bool Is64 = Size == 64; 1139 1140 if (Size != STI.getWavefrontSize()) 1141 return false; 1142 1143 Optional<ValueAndVReg> Arg = 1144 getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI); 1145 1146 if (Arg.hasValue()) { 1147 const int64_t Value = Arg.getValue().Value.getSExtValue(); 1148 if (Value == 0) { 1149 unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32; 1150 BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0); 1151 } else if (Value == -1) { // all ones 1152 Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO; 1153 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg); 1154 } else 1155 return false; 1156 } else { 1157 Register SrcReg = I.getOperand(2).getReg(); 1158 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg); 1159 } 1160 1161 I.eraseFromParent(); 1162 return true; 1163 } 1164 1165 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const { 1166 Register DstReg = I.getOperand(0).getReg(); 1167 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); 1168 const TargetRegisterClass *DstRC = 1169 TRI.getRegClassForSizeOnBank(32, *DstBank, *MRI); 1170 if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) 1171 return false; 1172 1173 const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID; 1174 1175 Module *M = MF->getFunction().getParent(); 1176 const MDNode *Metadata = I.getOperand(2).getMetadata(); 1177 auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString(); 1178 auto RelocSymbol = cast<GlobalVariable>( 1179 M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext()))); 1180 1181 MachineBasicBlock *BB = I.getParent(); 1182 BuildMI(*BB, &I, I.getDebugLoc(), 1183 TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg) 1184 .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO); 1185 1186 I.eraseFromParent(); 1187 return true; 1188 } 1189 1190 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const { 1191 Triple::OSType OS = MF->getTarget().getTargetTriple().getOS(); 1192 1193 Register DstReg = I.getOperand(0).getReg(); 1194 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 1195 unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ? 1196 AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 1197 1198 MachineBasicBlock *MBB = I.getParent(); 1199 const DebugLoc &DL = I.getDebugLoc(); 1200 1201 auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg); 1202 1203 if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) { 1204 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 1205 MIB.addImm(MFI->getLDSSize()); 1206 } else { 1207 Module *M = MF->getFunction().getParent(); 1208 const GlobalValue *GV 1209 = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize); 1210 MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO); 1211 } 1212 1213 I.eraseFromParent(); 1214 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 1215 } 1216 1217 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const { 1218 MachineBasicBlock *MBB = I.getParent(); 1219 MachineFunction &MF = *MBB->getParent(); 1220 const DebugLoc &DL = I.getDebugLoc(); 1221 1222 MachineOperand &Dst = I.getOperand(0); 1223 Register DstReg = Dst.getReg(); 1224 unsigned Depth = I.getOperand(2).getImm(); 1225 1226 const TargetRegisterClass *RC 1227 = TRI.getConstrainedRegClassForOperand(Dst, *MRI); 1228 if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) || 1229 !RBI.constrainGenericRegister(DstReg, *RC, *MRI)) 1230 return false; 1231 1232 // Check for kernel and shader functions 1233 if (Depth != 0 || 1234 MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) { 1235 BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg) 1236 .addImm(0); 1237 I.eraseFromParent(); 1238 return true; 1239 } 1240 1241 MachineFrameInfo &MFI = MF.getFrameInfo(); 1242 // There is a call to @llvm.returnaddress in this function 1243 MFI.setReturnAddressIsTaken(true); 1244 1245 // Get the return address reg and mark it as an implicit live-in 1246 Register ReturnAddrReg = TRI.getReturnAddressReg(MF); 1247 Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg, 1248 AMDGPU::SReg_64RegClass, DL); 1249 BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg) 1250 .addReg(LiveIn); 1251 I.eraseFromParent(); 1252 return true; 1253 } 1254 1255 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const { 1256 // FIXME: Manually selecting to avoid dealing with the SReg_1 trick 1257 // SelectionDAG uses for wave32 vs wave64. 1258 MachineBasicBlock *BB = MI.getParent(); 1259 BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF)) 1260 .add(MI.getOperand(1)); 1261 1262 Register Reg = MI.getOperand(1).getReg(); 1263 MI.eraseFromParent(); 1264 1265 if (!MRI->getRegClassOrNull(Reg)) 1266 MRI->setRegClass(Reg, TRI.getWaveMaskRegClass()); 1267 return true; 1268 } 1269 1270 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic( 1271 MachineInstr &MI, Intrinsic::ID IntrID) const { 1272 MachineBasicBlock *MBB = MI.getParent(); 1273 MachineFunction *MF = MBB->getParent(); 1274 const DebugLoc &DL = MI.getDebugLoc(); 1275 1276 unsigned IndexOperand = MI.getOperand(7).getImm(); 1277 bool WaveRelease = MI.getOperand(8).getImm() != 0; 1278 bool WaveDone = MI.getOperand(9).getImm() != 0; 1279 1280 if (WaveDone && !WaveRelease) 1281 report_fatal_error("ds_ordered_count: wave_done requires wave_release"); 1282 1283 unsigned OrderedCountIndex = IndexOperand & 0x3f; 1284 IndexOperand &= ~0x3f; 1285 unsigned CountDw = 0; 1286 1287 if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) { 1288 CountDw = (IndexOperand >> 24) & 0xf; 1289 IndexOperand &= ~(0xf << 24); 1290 1291 if (CountDw < 1 || CountDw > 4) { 1292 report_fatal_error( 1293 "ds_ordered_count: dword count must be between 1 and 4"); 1294 } 1295 } 1296 1297 if (IndexOperand) 1298 report_fatal_error("ds_ordered_count: bad index operand"); 1299 1300 unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1; 1301 unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF); 1302 1303 unsigned Offset0 = OrderedCountIndex << 2; 1304 unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) | 1305 (Instruction << 4); 1306 1307 if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) 1308 Offset1 |= (CountDw - 1) << 6; 1309 1310 unsigned Offset = Offset0 | (Offset1 << 8); 1311 1312 Register M0Val = MI.getOperand(2).getReg(); 1313 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 1314 .addReg(M0Val); 1315 1316 Register DstReg = MI.getOperand(0).getReg(); 1317 Register ValReg = MI.getOperand(3).getReg(); 1318 MachineInstrBuilder DS = 1319 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg) 1320 .addReg(ValReg) 1321 .addImm(Offset) 1322 .cloneMemRefs(MI); 1323 1324 if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI)) 1325 return false; 1326 1327 bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI); 1328 MI.eraseFromParent(); 1329 return Ret; 1330 } 1331 1332 static unsigned gwsIntrinToOpcode(unsigned IntrID) { 1333 switch (IntrID) { 1334 case Intrinsic::amdgcn_ds_gws_init: 1335 return AMDGPU::DS_GWS_INIT; 1336 case Intrinsic::amdgcn_ds_gws_barrier: 1337 return AMDGPU::DS_GWS_BARRIER; 1338 case Intrinsic::amdgcn_ds_gws_sema_v: 1339 return AMDGPU::DS_GWS_SEMA_V; 1340 case Intrinsic::amdgcn_ds_gws_sema_br: 1341 return AMDGPU::DS_GWS_SEMA_BR; 1342 case Intrinsic::amdgcn_ds_gws_sema_p: 1343 return AMDGPU::DS_GWS_SEMA_P; 1344 case Intrinsic::amdgcn_ds_gws_sema_release_all: 1345 return AMDGPU::DS_GWS_SEMA_RELEASE_ALL; 1346 default: 1347 llvm_unreachable("not a gws intrinsic"); 1348 } 1349 } 1350 1351 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI, 1352 Intrinsic::ID IID) const { 1353 if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all && 1354 !STI.hasGWSSemaReleaseAll()) 1355 return false; 1356 1357 // intrinsic ID, vsrc, offset 1358 const bool HasVSrc = MI.getNumOperands() == 3; 1359 assert(HasVSrc || MI.getNumOperands() == 2); 1360 1361 Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg(); 1362 const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI); 1363 if (OffsetRB->getID() != AMDGPU::SGPRRegBankID) 1364 return false; 1365 1366 MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI); 1367 assert(OffsetDef); 1368 1369 unsigned ImmOffset; 1370 1371 MachineBasicBlock *MBB = MI.getParent(); 1372 const DebugLoc &DL = MI.getDebugLoc(); 1373 1374 MachineInstr *Readfirstlane = nullptr; 1375 1376 // If we legalized the VGPR input, strip out the readfirstlane to analyze the 1377 // incoming offset, in case there's an add of a constant. We'll have to put it 1378 // back later. 1379 if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) { 1380 Readfirstlane = OffsetDef; 1381 BaseOffset = OffsetDef->getOperand(1).getReg(); 1382 OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI); 1383 } 1384 1385 if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) { 1386 // If we have a constant offset, try to use the 0 in m0 as the base. 1387 // TODO: Look into changing the default m0 initialization value. If the 1388 // default -1 only set the low 16-bits, we could leave it as-is and add 1 to 1389 // the immediate offset. 1390 1391 ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue(); 1392 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0) 1393 .addImm(0); 1394 } else { 1395 std::tie(BaseOffset, ImmOffset) = 1396 AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset); 1397 1398 if (Readfirstlane) { 1399 // We have the constant offset now, so put the readfirstlane back on the 1400 // variable component. 1401 if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI)) 1402 return false; 1403 1404 Readfirstlane->getOperand(1).setReg(BaseOffset); 1405 BaseOffset = Readfirstlane->getOperand(0).getReg(); 1406 } else { 1407 if (!RBI.constrainGenericRegister(BaseOffset, 1408 AMDGPU::SReg_32RegClass, *MRI)) 1409 return false; 1410 } 1411 1412 Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 1413 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base) 1414 .addReg(BaseOffset) 1415 .addImm(16); 1416 1417 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 1418 .addReg(M0Base); 1419 } 1420 1421 // The resource id offset is computed as (<isa opaque base> + M0[21:16] + 1422 // offset field) % 64. Some versions of the programming guide omit the m0 1423 // part, or claim it's from offset 0. 1424 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID))); 1425 1426 if (HasVSrc) { 1427 Register VSrc = MI.getOperand(1).getReg(); 1428 1429 if (STI.needsAlignedVGPRs()) { 1430 // Add implicit aligned super-reg to force alignment on the data operand. 1431 Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1432 BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef); 1433 Register NewVR = 1434 MRI->createVirtualRegister(&AMDGPU::VReg_64_Align2RegClass); 1435 BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), NewVR) 1436 .addReg(VSrc, 0, MI.getOperand(1).getSubReg()) 1437 .addImm(AMDGPU::sub0) 1438 .addReg(Undef) 1439 .addImm(AMDGPU::sub1); 1440 MIB.addReg(NewVR, 0, AMDGPU::sub0); 1441 MIB.addReg(NewVR, RegState::Implicit); 1442 } else { 1443 MIB.addReg(VSrc); 1444 } 1445 1446 if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI)) 1447 return false; 1448 } 1449 1450 MIB.addImm(ImmOffset) 1451 .cloneMemRefs(MI); 1452 1453 MI.eraseFromParent(); 1454 return true; 1455 } 1456 1457 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI, 1458 bool IsAppend) const { 1459 Register PtrBase = MI.getOperand(2).getReg(); 1460 LLT PtrTy = MRI->getType(PtrBase); 1461 bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS; 1462 1463 unsigned Offset; 1464 std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2)); 1465 1466 // TODO: Should this try to look through readfirstlane like GWS? 1467 if (!isDSOffsetLegal(PtrBase, Offset)) { 1468 PtrBase = MI.getOperand(2).getReg(); 1469 Offset = 0; 1470 } 1471 1472 MachineBasicBlock *MBB = MI.getParent(); 1473 const DebugLoc &DL = MI.getDebugLoc(); 1474 const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME; 1475 1476 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 1477 .addReg(PtrBase); 1478 if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI)) 1479 return false; 1480 1481 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg()) 1482 .addImm(Offset) 1483 .addImm(IsGDS ? -1 : 0) 1484 .cloneMemRefs(MI); 1485 MI.eraseFromParent(); 1486 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 1487 } 1488 1489 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const { 1490 if (TM.getOptLevel() > CodeGenOpt::None) { 1491 unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second; 1492 if (WGSize <= STI.getWavefrontSize()) { 1493 MachineBasicBlock *MBB = MI.getParent(); 1494 const DebugLoc &DL = MI.getDebugLoc(); 1495 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER)); 1496 MI.eraseFromParent(); 1497 return true; 1498 } 1499 } 1500 return selectImpl(MI, *CoverageInfo); 1501 } 1502 1503 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE, 1504 bool &IsTexFail) { 1505 if (TexFailCtrl) 1506 IsTexFail = true; 1507 1508 TFE = (TexFailCtrl & 0x1) ? true : false; 1509 TexFailCtrl &= ~(uint64_t)0x1; 1510 LWE = (TexFailCtrl & 0x2) ? true : false; 1511 TexFailCtrl &= ~(uint64_t)0x2; 1512 1513 return TexFailCtrl == 0; 1514 } 1515 1516 bool AMDGPUInstructionSelector::selectImageIntrinsic( 1517 MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const { 1518 MachineBasicBlock *MBB = MI.getParent(); 1519 const DebugLoc &DL = MI.getDebugLoc(); 1520 1521 const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode = 1522 AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode); 1523 1524 const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim); 1525 unsigned IntrOpcode = Intr->BaseOpcode; 1526 const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI); 1527 1528 const unsigned ArgOffset = MI.getNumExplicitDefs() + 1; 1529 1530 Register VDataIn, VDataOut; 1531 LLT VDataTy; 1532 int NumVDataDwords = -1; 1533 bool IsD16 = MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16 || 1534 MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16; 1535 1536 bool Unorm; 1537 if (!BaseOpcode->Sampler) 1538 Unorm = true; 1539 else 1540 Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0; 1541 1542 bool TFE; 1543 bool LWE; 1544 bool IsTexFail = false; 1545 if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(), 1546 TFE, LWE, IsTexFail)) 1547 return false; 1548 1549 const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm(); 1550 const bool IsA16 = (Flags & 1) != 0; 1551 const bool IsG16 = (Flags & 2) != 0; 1552 1553 // A16 implies 16 bit gradients if subtarget doesn't support G16 1554 if (IsA16 && !STI.hasG16() && !IsG16) 1555 return false; 1556 1557 unsigned DMask = 0; 1558 unsigned DMaskLanes = 0; 1559 1560 if (BaseOpcode->Atomic) { 1561 VDataOut = MI.getOperand(0).getReg(); 1562 VDataIn = MI.getOperand(2).getReg(); 1563 LLT Ty = MRI->getType(VDataIn); 1564 1565 // Be careful to allow atomic swap on 16-bit element vectors. 1566 const bool Is64Bit = BaseOpcode->AtomicX2 ? 1567 Ty.getSizeInBits() == 128 : 1568 Ty.getSizeInBits() == 64; 1569 1570 if (BaseOpcode->AtomicX2) { 1571 assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister); 1572 1573 DMask = Is64Bit ? 0xf : 0x3; 1574 NumVDataDwords = Is64Bit ? 4 : 2; 1575 } else { 1576 DMask = Is64Bit ? 0x3 : 0x1; 1577 NumVDataDwords = Is64Bit ? 2 : 1; 1578 } 1579 } else { 1580 DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm(); 1581 DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask); 1582 1583 if (BaseOpcode->Store) { 1584 VDataIn = MI.getOperand(1).getReg(); 1585 VDataTy = MRI->getType(VDataIn); 1586 NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32; 1587 } else { 1588 VDataOut = MI.getOperand(0).getReg(); 1589 VDataTy = MRI->getType(VDataOut); 1590 NumVDataDwords = DMaskLanes; 1591 1592 if (IsD16 && !STI.hasUnpackedD16VMem()) 1593 NumVDataDwords = (DMaskLanes + 1) / 2; 1594 } 1595 } 1596 1597 // Set G16 opcode 1598 if (IsG16 && !IsA16) { 1599 const AMDGPU::MIMGG16MappingInfo *G16MappingInfo = 1600 AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode); 1601 assert(G16MappingInfo); 1602 IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16 1603 } 1604 1605 // TODO: Check this in verifier. 1606 assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this"); 1607 1608 unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm(); 1609 if (BaseOpcode->Atomic) 1610 CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization 1611 if (CPol & ~AMDGPU::CPol::ALL) 1612 return false; 1613 1614 int NumVAddrRegs = 0; 1615 int NumVAddrDwords = 0; 1616 for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) { 1617 // Skip the $noregs and 0s inserted during legalization. 1618 MachineOperand &AddrOp = MI.getOperand(ArgOffset + I); 1619 if (!AddrOp.isReg()) 1620 continue; // XXX - Break? 1621 1622 Register Addr = AddrOp.getReg(); 1623 if (!Addr) 1624 break; 1625 1626 ++NumVAddrRegs; 1627 NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32; 1628 } 1629 1630 // The legalizer preprocessed the intrinsic arguments. If we aren't using 1631 // NSA, these should have been packed into a single value in the first 1632 // address register 1633 const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs; 1634 if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) { 1635 LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n"); 1636 return false; 1637 } 1638 1639 if (IsTexFail) 1640 ++NumVDataDwords; 1641 1642 int Opcode = -1; 1643 if (IsGFX10Plus) { 1644 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, 1645 UseNSA ? AMDGPU::MIMGEncGfx10NSA 1646 : AMDGPU::MIMGEncGfx10Default, 1647 NumVDataDwords, NumVAddrDwords); 1648 } else { 1649 if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 1650 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8, 1651 NumVDataDwords, NumVAddrDwords); 1652 if (Opcode == -1) 1653 Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6, 1654 NumVDataDwords, NumVAddrDwords); 1655 } 1656 assert(Opcode != -1); 1657 1658 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode)) 1659 .cloneMemRefs(MI); 1660 1661 if (VDataOut) { 1662 if (BaseOpcode->AtomicX2) { 1663 const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64; 1664 1665 Register TmpReg = MRI->createVirtualRegister( 1666 Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass); 1667 unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0; 1668 1669 MIB.addDef(TmpReg); 1670 if (!MRI->use_empty(VDataOut)) { 1671 BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut) 1672 .addReg(TmpReg, RegState::Kill, SubReg); 1673 } 1674 1675 } else { 1676 MIB.addDef(VDataOut); // vdata output 1677 } 1678 } 1679 1680 if (VDataIn) 1681 MIB.addReg(VDataIn); // vdata input 1682 1683 for (int I = 0; I != NumVAddrRegs; ++I) { 1684 MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I); 1685 if (SrcOp.isReg()) { 1686 assert(SrcOp.getReg() != 0); 1687 MIB.addReg(SrcOp.getReg()); 1688 } 1689 } 1690 1691 MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg()); 1692 if (BaseOpcode->Sampler) 1693 MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg()); 1694 1695 MIB.addImm(DMask); // dmask 1696 1697 if (IsGFX10Plus) 1698 MIB.addImm(DimInfo->Encoding); 1699 MIB.addImm(Unorm); 1700 1701 MIB.addImm(CPol); 1702 MIB.addImm(IsA16 && // a16 or r128 1703 STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0); 1704 if (IsGFX10Plus) 1705 MIB.addImm(IsA16 ? -1 : 0); 1706 1707 MIB.addImm(TFE); // tfe 1708 MIB.addImm(LWE); // lwe 1709 if (!IsGFX10Plus) 1710 MIB.addImm(DimInfo->DA ? -1 : 0); 1711 if (BaseOpcode->HasD16) 1712 MIB.addImm(IsD16 ? -1 : 0); 1713 1714 if (IsTexFail) { 1715 // An image load instruction with TFE/LWE only conditionally writes to its 1716 // result registers. Initialize them to zero so that we always get well 1717 // defined result values. 1718 assert(VDataOut && !VDataIn); 1719 Register Tied = MRI->cloneVirtualRegister(VDataOut); 1720 Register Zero = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1721 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::V_MOV_B32_e32), Zero) 1722 .addImm(0); 1723 auto Parts = TRI.getRegSplitParts(MRI->getRegClass(Tied), 4); 1724 if (STI.usePRTStrictNull()) { 1725 // With enable-prt-strict-null enabled, initialize all result registers to 1726 // zero. 1727 auto RegSeq = 1728 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied); 1729 for (auto Sub : Parts) 1730 RegSeq.addReg(Zero).addImm(Sub); 1731 } else { 1732 // With enable-prt-strict-null disabled, only initialize the extra TFE/LWE 1733 // result register. 1734 Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 1735 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef); 1736 auto RegSeq = 1737 BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied); 1738 for (auto Sub : Parts.drop_back(1)) 1739 RegSeq.addReg(Undef).addImm(Sub); 1740 RegSeq.addReg(Zero).addImm(Parts.back()); 1741 } 1742 MIB.addReg(Tied, RegState::Implicit); 1743 MIB->tieOperands(0, MIB->getNumOperands() - 1); 1744 } 1745 1746 MI.eraseFromParent(); 1747 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 1748 } 1749 1750 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS( 1751 MachineInstr &I) const { 1752 unsigned IntrinsicID = I.getIntrinsicID(); 1753 switch (IntrinsicID) { 1754 case Intrinsic::amdgcn_end_cf: 1755 return selectEndCfIntrinsic(I); 1756 case Intrinsic::amdgcn_ds_ordered_add: 1757 case Intrinsic::amdgcn_ds_ordered_swap: 1758 return selectDSOrderedIntrinsic(I, IntrinsicID); 1759 case Intrinsic::amdgcn_ds_gws_init: 1760 case Intrinsic::amdgcn_ds_gws_barrier: 1761 case Intrinsic::amdgcn_ds_gws_sema_v: 1762 case Intrinsic::amdgcn_ds_gws_sema_br: 1763 case Intrinsic::amdgcn_ds_gws_sema_p: 1764 case Intrinsic::amdgcn_ds_gws_sema_release_all: 1765 return selectDSGWSIntrinsic(I, IntrinsicID); 1766 case Intrinsic::amdgcn_ds_append: 1767 return selectDSAppendConsume(I, true); 1768 case Intrinsic::amdgcn_ds_consume: 1769 return selectDSAppendConsume(I, false); 1770 case Intrinsic::amdgcn_s_barrier: 1771 return selectSBarrier(I); 1772 case Intrinsic::amdgcn_global_atomic_fadd: 1773 return selectGlobalAtomicFadd(I, I.getOperand(2), I.getOperand(3)); 1774 default: { 1775 return selectImpl(I, *CoverageInfo); 1776 } 1777 } 1778 } 1779 1780 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const { 1781 if (selectImpl(I, *CoverageInfo)) 1782 return true; 1783 1784 MachineBasicBlock *BB = I.getParent(); 1785 const DebugLoc &DL = I.getDebugLoc(); 1786 1787 Register DstReg = I.getOperand(0).getReg(); 1788 unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI); 1789 assert(Size <= 32 || Size == 64); 1790 const MachineOperand &CCOp = I.getOperand(1); 1791 Register CCReg = CCOp.getReg(); 1792 if (!isVCC(CCReg, *MRI)) { 1793 unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 : 1794 AMDGPU::S_CSELECT_B32; 1795 MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC) 1796 .addReg(CCReg); 1797 1798 // The generic constrainSelectedInstRegOperands doesn't work for the scc register 1799 // bank, because it does not cover the register class that we used to represent 1800 // for it. So we need to manually set the register class here. 1801 if (!MRI->getRegClassOrNull(CCReg)) 1802 MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI)); 1803 MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg) 1804 .add(I.getOperand(2)) 1805 .add(I.getOperand(3)); 1806 1807 bool Ret = false; 1808 Ret |= constrainSelectedInstRegOperands(*Select, TII, TRI, RBI); 1809 Ret |= constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI); 1810 I.eraseFromParent(); 1811 return Ret; 1812 } 1813 1814 // Wide VGPR select should have been split in RegBankSelect. 1815 if (Size > 32) 1816 return false; 1817 1818 MachineInstr *Select = 1819 BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg) 1820 .addImm(0) 1821 .add(I.getOperand(3)) 1822 .addImm(0) 1823 .add(I.getOperand(2)) 1824 .add(I.getOperand(1)); 1825 1826 bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI); 1827 I.eraseFromParent(); 1828 return Ret; 1829 } 1830 1831 static int sizeToSubRegIndex(unsigned Size) { 1832 switch (Size) { 1833 case 32: 1834 return AMDGPU::sub0; 1835 case 64: 1836 return AMDGPU::sub0_sub1; 1837 case 96: 1838 return AMDGPU::sub0_sub1_sub2; 1839 case 128: 1840 return AMDGPU::sub0_sub1_sub2_sub3; 1841 case 256: 1842 return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7; 1843 default: 1844 if (Size < 32) 1845 return AMDGPU::sub0; 1846 if (Size > 256) 1847 return -1; 1848 return sizeToSubRegIndex(PowerOf2Ceil(Size)); 1849 } 1850 } 1851 1852 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const { 1853 Register DstReg = I.getOperand(0).getReg(); 1854 Register SrcReg = I.getOperand(1).getReg(); 1855 const LLT DstTy = MRI->getType(DstReg); 1856 const LLT SrcTy = MRI->getType(SrcReg); 1857 const LLT S1 = LLT::scalar(1); 1858 1859 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI); 1860 const RegisterBank *DstRB; 1861 if (DstTy == S1) { 1862 // This is a special case. We don't treat s1 for legalization artifacts as 1863 // vcc booleans. 1864 DstRB = SrcRB; 1865 } else { 1866 DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 1867 if (SrcRB != DstRB) 1868 return false; 1869 } 1870 1871 const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID; 1872 1873 unsigned DstSize = DstTy.getSizeInBits(); 1874 unsigned SrcSize = SrcTy.getSizeInBits(); 1875 1876 const TargetRegisterClass *SrcRC 1877 = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI); 1878 const TargetRegisterClass *DstRC 1879 = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI); 1880 if (!SrcRC || !DstRC) 1881 return false; 1882 1883 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) || 1884 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) { 1885 LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n"); 1886 return false; 1887 } 1888 1889 if (DstTy == LLT::fixed_vector(2, 16) && SrcTy == LLT::fixed_vector(2, 32)) { 1890 MachineBasicBlock *MBB = I.getParent(); 1891 const DebugLoc &DL = I.getDebugLoc(); 1892 1893 Register LoReg = MRI->createVirtualRegister(DstRC); 1894 Register HiReg = MRI->createVirtualRegister(DstRC); 1895 BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg) 1896 .addReg(SrcReg, 0, AMDGPU::sub0); 1897 BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg) 1898 .addReg(SrcReg, 0, AMDGPU::sub1); 1899 1900 if (IsVALU && STI.hasSDWA()) { 1901 // Write the low 16-bits of the high element into the high 16-bits of the 1902 // low element. 1903 MachineInstr *MovSDWA = 1904 BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg) 1905 .addImm(0) // $src0_modifiers 1906 .addReg(HiReg) // $src0 1907 .addImm(0) // $clamp 1908 .addImm(AMDGPU::SDWA::WORD_1) // $dst_sel 1909 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused 1910 .addImm(AMDGPU::SDWA::WORD_0) // $src0_sel 1911 .addReg(LoReg, RegState::Implicit); 1912 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1); 1913 } else { 1914 Register TmpReg0 = MRI->createVirtualRegister(DstRC); 1915 Register TmpReg1 = MRI->createVirtualRegister(DstRC); 1916 Register ImmReg = MRI->createVirtualRegister(DstRC); 1917 if (IsVALU) { 1918 BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0) 1919 .addImm(16) 1920 .addReg(HiReg); 1921 } else { 1922 BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0) 1923 .addReg(HiReg) 1924 .addImm(16); 1925 } 1926 1927 unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32; 1928 unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32; 1929 unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32; 1930 1931 BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg) 1932 .addImm(0xffff); 1933 BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1) 1934 .addReg(LoReg) 1935 .addReg(ImmReg); 1936 BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg) 1937 .addReg(TmpReg0) 1938 .addReg(TmpReg1); 1939 } 1940 1941 I.eraseFromParent(); 1942 return true; 1943 } 1944 1945 if (!DstTy.isScalar()) 1946 return false; 1947 1948 if (SrcSize > 32) { 1949 int SubRegIdx = sizeToSubRegIndex(DstSize); 1950 if (SubRegIdx == -1) 1951 return false; 1952 1953 // Deal with weird cases where the class only partially supports the subreg 1954 // index. 1955 const TargetRegisterClass *SrcWithSubRC 1956 = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx); 1957 if (!SrcWithSubRC) 1958 return false; 1959 1960 if (SrcWithSubRC != SrcRC) { 1961 if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI)) 1962 return false; 1963 } 1964 1965 I.getOperand(1).setSubReg(SubRegIdx); 1966 } 1967 1968 I.setDesc(TII.get(TargetOpcode::COPY)); 1969 return true; 1970 } 1971 1972 /// \returns true if a bitmask for \p Size bits will be an inline immediate. 1973 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) { 1974 Mask = maskTrailingOnes<unsigned>(Size); 1975 int SignedMask = static_cast<int>(Mask); 1976 return SignedMask >= -16 && SignedMask <= 64; 1977 } 1978 1979 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1. 1980 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank( 1981 Register Reg, const MachineRegisterInfo &MRI, 1982 const TargetRegisterInfo &TRI) const { 1983 const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg); 1984 if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>()) 1985 return RB; 1986 1987 // Ignore the type, since we don't use vcc in artifacts. 1988 if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>()) 1989 return &RBI.getRegBankFromRegClass(*RC, LLT()); 1990 return nullptr; 1991 } 1992 1993 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const { 1994 bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG; 1995 bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg; 1996 const DebugLoc &DL = I.getDebugLoc(); 1997 MachineBasicBlock &MBB = *I.getParent(); 1998 const Register DstReg = I.getOperand(0).getReg(); 1999 const Register SrcReg = I.getOperand(1).getReg(); 2000 2001 const LLT DstTy = MRI->getType(DstReg); 2002 const LLT SrcTy = MRI->getType(SrcReg); 2003 const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ? 2004 I.getOperand(2).getImm() : SrcTy.getSizeInBits(); 2005 const unsigned DstSize = DstTy.getSizeInBits(); 2006 if (!DstTy.isScalar()) 2007 return false; 2008 2009 // Artifact casts should never use vcc. 2010 const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI); 2011 2012 // FIXME: This should probably be illegal and split earlier. 2013 if (I.getOpcode() == AMDGPU::G_ANYEXT) { 2014 if (DstSize <= 32) 2015 return selectCOPY(I); 2016 2017 const TargetRegisterClass *SrcRC = 2018 TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank, *MRI); 2019 const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI); 2020 const TargetRegisterClass *DstRC = 2021 TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI); 2022 2023 Register UndefReg = MRI->createVirtualRegister(SrcRC); 2024 BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg); 2025 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) 2026 .addReg(SrcReg) 2027 .addImm(AMDGPU::sub0) 2028 .addReg(UndefReg) 2029 .addImm(AMDGPU::sub1); 2030 I.eraseFromParent(); 2031 2032 return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) && 2033 RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI); 2034 } 2035 2036 if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) { 2037 // 64-bit should have been split up in RegBankSelect 2038 2039 // Try to use an and with a mask if it will save code size. 2040 unsigned Mask; 2041 if (!Signed && shouldUseAndMask(SrcSize, Mask)) { 2042 MachineInstr *ExtI = 2043 BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg) 2044 .addImm(Mask) 2045 .addReg(SrcReg); 2046 I.eraseFromParent(); 2047 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI); 2048 } 2049 2050 const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64; 2051 MachineInstr *ExtI = 2052 BuildMI(MBB, I, DL, TII.get(BFE), DstReg) 2053 .addReg(SrcReg) 2054 .addImm(0) // Offset 2055 .addImm(SrcSize); // Width 2056 I.eraseFromParent(); 2057 return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI); 2058 } 2059 2060 if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) { 2061 const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ? 2062 AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass; 2063 if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI)) 2064 return false; 2065 2066 if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) { 2067 const unsigned SextOpc = SrcSize == 8 ? 2068 AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16; 2069 BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg) 2070 .addReg(SrcReg); 2071 I.eraseFromParent(); 2072 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI); 2073 } 2074 2075 const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64; 2076 const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32; 2077 2078 // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width. 2079 if (DstSize > 32 && (SrcSize <= 32 || InReg)) { 2080 // We need a 64-bit register source, but the high bits don't matter. 2081 Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass); 2082 Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2083 unsigned SubReg = InReg ? AMDGPU::sub0 : 0; 2084 2085 BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg); 2086 BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg) 2087 .addReg(SrcReg, 0, SubReg) 2088 .addImm(AMDGPU::sub0) 2089 .addReg(UndefReg) 2090 .addImm(AMDGPU::sub1); 2091 2092 BuildMI(MBB, I, DL, TII.get(BFE64), DstReg) 2093 .addReg(ExtReg) 2094 .addImm(SrcSize << 16); 2095 2096 I.eraseFromParent(); 2097 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI); 2098 } 2099 2100 unsigned Mask; 2101 if (!Signed && shouldUseAndMask(SrcSize, Mask)) { 2102 BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg) 2103 .addReg(SrcReg) 2104 .addImm(Mask); 2105 } else { 2106 BuildMI(MBB, I, DL, TII.get(BFE32), DstReg) 2107 .addReg(SrcReg) 2108 .addImm(SrcSize << 16); 2109 } 2110 2111 I.eraseFromParent(); 2112 return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI); 2113 } 2114 2115 return false; 2116 } 2117 2118 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const { 2119 MachineBasicBlock *BB = I.getParent(); 2120 MachineOperand &ImmOp = I.getOperand(1); 2121 Register DstReg = I.getOperand(0).getReg(); 2122 unsigned Size = MRI->getType(DstReg).getSizeInBits(); 2123 2124 // The AMDGPU backend only supports Imm operands and not CImm or FPImm. 2125 if (ImmOp.isFPImm()) { 2126 const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt(); 2127 ImmOp.ChangeToImmediate(Imm.getZExtValue()); 2128 } else if (ImmOp.isCImm()) { 2129 ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue()); 2130 } else { 2131 llvm_unreachable("Not supported by g_constants"); 2132 } 2133 2134 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 2135 const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID; 2136 2137 unsigned Opcode; 2138 if (DstRB->getID() == AMDGPU::VCCRegBankID) { 2139 Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64; 2140 } else { 2141 Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 2142 2143 // We should never produce s1 values on banks other than VCC. If the user of 2144 // this already constrained the register, we may incorrectly think it's VCC 2145 // if it wasn't originally. 2146 if (Size == 1) 2147 return false; 2148 } 2149 2150 if (Size != 64) { 2151 I.setDesc(TII.get(Opcode)); 2152 I.addImplicitDefUseOperands(*MF); 2153 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 2154 } 2155 2156 const DebugLoc &DL = I.getDebugLoc(); 2157 2158 APInt Imm(Size, I.getOperand(1).getImm()); 2159 2160 MachineInstr *ResInst; 2161 if (IsSgpr && TII.isInlineConstant(Imm)) { 2162 ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg) 2163 .addImm(I.getOperand(1).getImm()); 2164 } else { 2165 const TargetRegisterClass *RC = IsSgpr ? 2166 &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass; 2167 Register LoReg = MRI->createVirtualRegister(RC); 2168 Register HiReg = MRI->createVirtualRegister(RC); 2169 2170 BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg) 2171 .addImm(Imm.trunc(32).getZExtValue()); 2172 2173 BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg) 2174 .addImm(Imm.ashr(32).getZExtValue()); 2175 2176 ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) 2177 .addReg(LoReg) 2178 .addImm(AMDGPU::sub0) 2179 .addReg(HiReg) 2180 .addImm(AMDGPU::sub1); 2181 } 2182 2183 // We can't call constrainSelectedInstRegOperands here, because it doesn't 2184 // work for target independent opcodes 2185 I.eraseFromParent(); 2186 const TargetRegisterClass *DstRC = 2187 TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI); 2188 if (!DstRC) 2189 return true; 2190 return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI); 2191 } 2192 2193 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const { 2194 // Only manually handle the f64 SGPR case. 2195 // 2196 // FIXME: This is a workaround for 2.5 different tablegen problems. Because 2197 // the bit ops theoretically have a second result due to the implicit def of 2198 // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing 2199 // that is easy by disabling the check. The result works, but uses a 2200 // nonsensical sreg32orlds_and_sreg_1 regclass. 2201 // 2202 // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to 2203 // the variadic REG_SEQUENCE operands. 2204 2205 Register Dst = MI.getOperand(0).getReg(); 2206 const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI); 2207 if (DstRB->getID() != AMDGPU::SGPRRegBankID || 2208 MRI->getType(Dst) != LLT::scalar(64)) 2209 return false; 2210 2211 Register Src = MI.getOperand(1).getReg(); 2212 MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI); 2213 if (Fabs) 2214 Src = Fabs->getOperand(1).getReg(); 2215 2216 if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) || 2217 !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI)) 2218 return false; 2219 2220 MachineBasicBlock *BB = MI.getParent(); 2221 const DebugLoc &DL = MI.getDebugLoc(); 2222 Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2223 Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2224 Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2225 Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2226 2227 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg) 2228 .addReg(Src, 0, AMDGPU::sub0); 2229 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg) 2230 .addReg(Src, 0, AMDGPU::sub1); 2231 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg) 2232 .addImm(0x80000000); 2233 2234 // Set or toggle sign bit. 2235 unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32; 2236 BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg) 2237 .addReg(HiReg) 2238 .addReg(ConstReg); 2239 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst) 2240 .addReg(LoReg) 2241 .addImm(AMDGPU::sub0) 2242 .addReg(OpReg) 2243 .addImm(AMDGPU::sub1); 2244 MI.eraseFromParent(); 2245 return true; 2246 } 2247 2248 // FIXME: This is a workaround for the same tablegen problems as G_FNEG 2249 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const { 2250 Register Dst = MI.getOperand(0).getReg(); 2251 const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI); 2252 if (DstRB->getID() != AMDGPU::SGPRRegBankID || 2253 MRI->getType(Dst) != LLT::scalar(64)) 2254 return false; 2255 2256 Register Src = MI.getOperand(1).getReg(); 2257 MachineBasicBlock *BB = MI.getParent(); 2258 const DebugLoc &DL = MI.getDebugLoc(); 2259 Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2260 Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2261 Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2262 Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2263 2264 if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) || 2265 !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI)) 2266 return false; 2267 2268 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg) 2269 .addReg(Src, 0, AMDGPU::sub0); 2270 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg) 2271 .addReg(Src, 0, AMDGPU::sub1); 2272 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg) 2273 .addImm(0x7fffffff); 2274 2275 // Clear sign bit. 2276 // TODO: Should this used S_BITSET0_*? 2277 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg) 2278 .addReg(HiReg) 2279 .addReg(ConstReg); 2280 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst) 2281 .addReg(LoReg) 2282 .addImm(AMDGPU::sub0) 2283 .addReg(OpReg) 2284 .addImm(AMDGPU::sub1); 2285 2286 MI.eraseFromParent(); 2287 return true; 2288 } 2289 2290 static bool isConstant(const MachineInstr &MI) { 2291 return MI.getOpcode() == TargetOpcode::G_CONSTANT; 2292 } 2293 2294 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load, 2295 const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const { 2296 2297 const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg()); 2298 2299 assert(PtrMI); 2300 2301 if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD) 2302 return; 2303 2304 GEPInfo GEPInfo(*PtrMI); 2305 2306 for (unsigned i = 1; i != 3; ++i) { 2307 const MachineOperand &GEPOp = PtrMI->getOperand(i); 2308 const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg()); 2309 assert(OpDef); 2310 if (i == 2 && isConstant(*OpDef)) { 2311 // TODO: Could handle constant base + variable offset, but a combine 2312 // probably should have commuted it. 2313 assert(GEPInfo.Imm == 0); 2314 GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue(); 2315 continue; 2316 } 2317 const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI); 2318 if (OpBank->getID() == AMDGPU::SGPRRegBankID) 2319 GEPInfo.SgprParts.push_back(GEPOp.getReg()); 2320 else 2321 GEPInfo.VgprParts.push_back(GEPOp.getReg()); 2322 } 2323 2324 AddrInfo.push_back(GEPInfo); 2325 getAddrModeInfo(*PtrMI, MRI, AddrInfo); 2326 } 2327 2328 bool AMDGPUInstructionSelector::isSGPR(Register Reg) const { 2329 return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID; 2330 } 2331 2332 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const { 2333 if (!MI.hasOneMemOperand()) 2334 return false; 2335 2336 const MachineMemOperand *MMO = *MI.memoperands_begin(); 2337 const Value *Ptr = MMO->getValue(); 2338 2339 // UndefValue means this is a load of a kernel input. These are uniform. 2340 // Sometimes LDS instructions have constant pointers. 2341 // If Ptr is null, then that means this mem operand contains a 2342 // PseudoSourceValue like GOT. 2343 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) || 2344 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr)) 2345 return true; 2346 2347 if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) 2348 return true; 2349 2350 const Instruction *I = dyn_cast<Instruction>(Ptr); 2351 return I && I->getMetadata("amdgpu.uniform"); 2352 } 2353 2354 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const { 2355 for (const GEPInfo &GEPInfo : AddrInfo) { 2356 if (!GEPInfo.VgprParts.empty()) 2357 return true; 2358 } 2359 return false; 2360 } 2361 2362 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const { 2363 const LLT PtrTy = MRI->getType(I.getOperand(1).getReg()); 2364 unsigned AS = PtrTy.getAddressSpace(); 2365 if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) && 2366 STI.ldsRequiresM0Init()) { 2367 MachineBasicBlock *BB = I.getParent(); 2368 2369 // If DS instructions require M0 initialization, insert it before selecting. 2370 BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0) 2371 .addImm(-1); 2372 } 2373 } 2374 2375 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW( 2376 MachineInstr &I) const { 2377 if (I.getOpcode() == TargetOpcode::G_ATOMICRMW_FADD) { 2378 const LLT PtrTy = MRI->getType(I.getOperand(1).getReg()); 2379 unsigned AS = PtrTy.getAddressSpace(); 2380 if (AS == AMDGPUAS::GLOBAL_ADDRESS) 2381 return selectGlobalAtomicFadd(I, I.getOperand(1), I.getOperand(2)); 2382 } 2383 2384 initM0(I); 2385 return selectImpl(I, *CoverageInfo); 2386 } 2387 2388 static bool isVCmpResult(Register Reg, MachineRegisterInfo &MRI) { 2389 if (Reg.isPhysical()) 2390 return false; 2391 2392 MachineInstr &MI = *MRI.getUniqueVRegDef(Reg); 2393 const unsigned Opcode = MI.getOpcode(); 2394 2395 if (Opcode == AMDGPU::COPY) 2396 return isVCmpResult(MI.getOperand(1).getReg(), MRI); 2397 2398 if (Opcode == AMDGPU::G_AND || Opcode == AMDGPU::G_OR || 2399 Opcode == AMDGPU::G_XOR) 2400 return isVCmpResult(MI.getOperand(1).getReg(), MRI) && 2401 isVCmpResult(MI.getOperand(2).getReg(), MRI); 2402 2403 if (Opcode == TargetOpcode::G_INTRINSIC) 2404 return MI.getIntrinsicID() == Intrinsic::amdgcn_class; 2405 2406 return Opcode == AMDGPU::G_ICMP || Opcode == AMDGPU::G_FCMP; 2407 } 2408 2409 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const { 2410 MachineBasicBlock *BB = I.getParent(); 2411 MachineOperand &CondOp = I.getOperand(0); 2412 Register CondReg = CondOp.getReg(); 2413 const DebugLoc &DL = I.getDebugLoc(); 2414 2415 unsigned BrOpcode; 2416 Register CondPhysReg; 2417 const TargetRegisterClass *ConstrainRC; 2418 2419 // In SelectionDAG, we inspect the IR block for uniformity metadata to decide 2420 // whether the branch is uniform when selecting the instruction. In 2421 // GlobalISel, we should push that decision into RegBankSelect. Assume for now 2422 // RegBankSelect knows what it's doing if the branch condition is scc, even 2423 // though it currently does not. 2424 if (!isVCC(CondReg, *MRI)) { 2425 if (MRI->getType(CondReg) != LLT::scalar(32)) 2426 return false; 2427 2428 CondPhysReg = AMDGPU::SCC; 2429 BrOpcode = AMDGPU::S_CBRANCH_SCC1; 2430 ConstrainRC = &AMDGPU::SReg_32RegClass; 2431 } else { 2432 // FIXME: Should scc->vcc copies and with exec? 2433 2434 // Unless the value of CondReg is a result of a V_CMP* instruction then we 2435 // need to insert an and with exec. 2436 if (!isVCmpResult(CondReg, *MRI)) { 2437 const bool Is64 = STI.isWave64(); 2438 const unsigned Opcode = Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32; 2439 const Register Exec = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO; 2440 2441 Register TmpReg = MRI->createVirtualRegister(TRI.getBoolRC()); 2442 BuildMI(*BB, &I, DL, TII.get(Opcode), TmpReg) 2443 .addReg(CondReg) 2444 .addReg(Exec); 2445 CondReg = TmpReg; 2446 } 2447 2448 CondPhysReg = TRI.getVCC(); 2449 BrOpcode = AMDGPU::S_CBRANCH_VCCNZ; 2450 ConstrainRC = TRI.getBoolRC(); 2451 } 2452 2453 if (!MRI->getRegClassOrNull(CondReg)) 2454 MRI->setRegClass(CondReg, ConstrainRC); 2455 2456 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg) 2457 .addReg(CondReg); 2458 BuildMI(*BB, &I, DL, TII.get(BrOpcode)) 2459 .addMBB(I.getOperand(1).getMBB()); 2460 2461 I.eraseFromParent(); 2462 return true; 2463 } 2464 2465 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE( 2466 MachineInstr &I) const { 2467 Register DstReg = I.getOperand(0).getReg(); 2468 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 2469 const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID; 2470 I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32)); 2471 if (IsVGPR) 2472 I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true)); 2473 2474 return RBI.constrainGenericRegister( 2475 DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI); 2476 } 2477 2478 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const { 2479 Register DstReg = I.getOperand(0).getReg(); 2480 Register SrcReg = I.getOperand(1).getReg(); 2481 Register MaskReg = I.getOperand(2).getReg(); 2482 LLT Ty = MRI->getType(DstReg); 2483 LLT MaskTy = MRI->getType(MaskReg); 2484 MachineBasicBlock *BB = I.getParent(); 2485 const DebugLoc &DL = I.getDebugLoc(); 2486 2487 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 2488 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI); 2489 const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI); 2490 const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID; 2491 if (DstRB != SrcRB) // Should only happen for hand written MIR. 2492 return false; 2493 2494 // Try to avoid emitting a bit operation when we only need to touch half of 2495 // the 64-bit pointer. 2496 APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zextOrSelf(64); 2497 const APInt MaskHi32 = APInt::getHighBitsSet(64, 32); 2498 const APInt MaskLo32 = APInt::getLowBitsSet(64, 32); 2499 2500 const bool CanCopyLow32 = (MaskOnes & MaskLo32) == MaskLo32; 2501 const bool CanCopyHi32 = (MaskOnes & MaskHi32) == MaskHi32; 2502 2503 if (!IsVGPR && Ty.getSizeInBits() == 64 && 2504 !CanCopyLow32 && !CanCopyHi32) { 2505 auto MIB = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_AND_B64), DstReg) 2506 .addReg(SrcReg) 2507 .addReg(MaskReg); 2508 I.eraseFromParent(); 2509 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 2510 } 2511 2512 unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32; 2513 const TargetRegisterClass &RegRC 2514 = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass; 2515 2516 const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB, 2517 *MRI); 2518 const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB, 2519 *MRI); 2520 const TargetRegisterClass *MaskRC = 2521 TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB, *MRI); 2522 2523 if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) || 2524 !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) || 2525 !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI)) 2526 return false; 2527 2528 if (Ty.getSizeInBits() == 32) { 2529 assert(MaskTy.getSizeInBits() == 32 && 2530 "ptrmask should have been narrowed during legalize"); 2531 2532 BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg) 2533 .addReg(SrcReg) 2534 .addReg(MaskReg); 2535 I.eraseFromParent(); 2536 return true; 2537 } 2538 2539 Register HiReg = MRI->createVirtualRegister(&RegRC); 2540 Register LoReg = MRI->createVirtualRegister(&RegRC); 2541 2542 // Extract the subregisters from the source pointer. 2543 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg) 2544 .addReg(SrcReg, 0, AMDGPU::sub0); 2545 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg) 2546 .addReg(SrcReg, 0, AMDGPU::sub1); 2547 2548 Register MaskedLo, MaskedHi; 2549 2550 if (CanCopyLow32) { 2551 // If all the bits in the low half are 1, we only need a copy for it. 2552 MaskedLo = LoReg; 2553 } else { 2554 // Extract the mask subregister and apply the and. 2555 Register MaskLo = MRI->createVirtualRegister(&RegRC); 2556 MaskedLo = MRI->createVirtualRegister(&RegRC); 2557 2558 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo) 2559 .addReg(MaskReg, 0, AMDGPU::sub0); 2560 BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo) 2561 .addReg(LoReg) 2562 .addReg(MaskLo); 2563 } 2564 2565 if (CanCopyHi32) { 2566 // If all the bits in the high half are 1, we only need a copy for it. 2567 MaskedHi = HiReg; 2568 } else { 2569 Register MaskHi = MRI->createVirtualRegister(&RegRC); 2570 MaskedHi = MRI->createVirtualRegister(&RegRC); 2571 2572 BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi) 2573 .addReg(MaskReg, 0, AMDGPU::sub1); 2574 BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi) 2575 .addReg(HiReg) 2576 .addReg(MaskHi); 2577 } 2578 2579 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) 2580 .addReg(MaskedLo) 2581 .addImm(AMDGPU::sub0) 2582 .addReg(MaskedHi) 2583 .addImm(AMDGPU::sub1); 2584 I.eraseFromParent(); 2585 return true; 2586 } 2587 2588 /// Return the register to use for the index value, and the subregister to use 2589 /// for the indirectly accessed register. 2590 static std::pair<Register, unsigned> 2591 computeIndirectRegIndex(MachineRegisterInfo &MRI, 2592 const SIRegisterInfo &TRI, 2593 const TargetRegisterClass *SuperRC, 2594 Register IdxReg, 2595 unsigned EltSize) { 2596 Register IdxBaseReg; 2597 int Offset; 2598 2599 std::tie(IdxBaseReg, Offset) = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg); 2600 if (IdxBaseReg == AMDGPU::NoRegister) { 2601 // This will happen if the index is a known constant. This should ordinarily 2602 // be legalized out, but handle it as a register just in case. 2603 assert(Offset == 0); 2604 IdxBaseReg = IdxReg; 2605 } 2606 2607 ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize); 2608 2609 // Skip out of bounds offsets, or else we would end up using an undefined 2610 // register. 2611 if (static_cast<unsigned>(Offset) >= SubRegs.size()) 2612 return std::make_pair(IdxReg, SubRegs[0]); 2613 return std::make_pair(IdxBaseReg, SubRegs[Offset]); 2614 } 2615 2616 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT( 2617 MachineInstr &MI) const { 2618 Register DstReg = MI.getOperand(0).getReg(); 2619 Register SrcReg = MI.getOperand(1).getReg(); 2620 Register IdxReg = MI.getOperand(2).getReg(); 2621 2622 LLT DstTy = MRI->getType(DstReg); 2623 LLT SrcTy = MRI->getType(SrcReg); 2624 2625 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 2626 const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI); 2627 const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI); 2628 2629 // The index must be scalar. If it wasn't RegBankSelect should have moved this 2630 // into a waterfall loop. 2631 if (IdxRB->getID() != AMDGPU::SGPRRegBankID) 2632 return false; 2633 2634 const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB, 2635 *MRI); 2636 const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(DstTy, *DstRB, 2637 *MRI); 2638 if (!SrcRC || !DstRC) 2639 return false; 2640 if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) || 2641 !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) || 2642 !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI)) 2643 return false; 2644 2645 MachineBasicBlock *BB = MI.getParent(); 2646 const DebugLoc &DL = MI.getDebugLoc(); 2647 const bool Is64 = DstTy.getSizeInBits() == 64; 2648 2649 unsigned SubReg; 2650 std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg, 2651 DstTy.getSizeInBits() / 8); 2652 2653 if (SrcRB->getID() == AMDGPU::SGPRRegBankID) { 2654 if (DstTy.getSizeInBits() != 32 && !Is64) 2655 return false; 2656 2657 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 2658 .addReg(IdxReg); 2659 2660 unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32; 2661 BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg) 2662 .addReg(SrcReg, 0, SubReg) 2663 .addReg(SrcReg, RegState::Implicit); 2664 MI.eraseFromParent(); 2665 return true; 2666 } 2667 2668 if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32) 2669 return false; 2670 2671 if (!STI.useVGPRIndexMode()) { 2672 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 2673 .addReg(IdxReg); 2674 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg) 2675 .addReg(SrcReg, 0, SubReg) 2676 .addReg(SrcReg, RegState::Implicit); 2677 MI.eraseFromParent(); 2678 return true; 2679 } 2680 2681 const MCInstrDesc &GPRIDXDesc = 2682 TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true); 2683 BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg) 2684 .addReg(SrcReg) 2685 .addReg(IdxReg) 2686 .addImm(SubReg); 2687 2688 MI.eraseFromParent(); 2689 return true; 2690 } 2691 2692 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd 2693 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT( 2694 MachineInstr &MI) const { 2695 Register DstReg = MI.getOperand(0).getReg(); 2696 Register VecReg = MI.getOperand(1).getReg(); 2697 Register ValReg = MI.getOperand(2).getReg(); 2698 Register IdxReg = MI.getOperand(3).getReg(); 2699 2700 LLT VecTy = MRI->getType(DstReg); 2701 LLT ValTy = MRI->getType(ValReg); 2702 unsigned VecSize = VecTy.getSizeInBits(); 2703 unsigned ValSize = ValTy.getSizeInBits(); 2704 2705 const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI); 2706 const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI); 2707 const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI); 2708 2709 assert(VecTy.getElementType() == ValTy); 2710 2711 // The index must be scalar. If it wasn't RegBankSelect should have moved this 2712 // into a waterfall loop. 2713 if (IdxRB->getID() != AMDGPU::SGPRRegBankID) 2714 return false; 2715 2716 const TargetRegisterClass *VecRC = TRI.getRegClassForTypeOnBank(VecTy, *VecRB, 2717 *MRI); 2718 const TargetRegisterClass *ValRC = TRI.getRegClassForTypeOnBank(ValTy, *ValRB, 2719 *MRI); 2720 2721 if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) || 2722 !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) || 2723 !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) || 2724 !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI)) 2725 return false; 2726 2727 if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32) 2728 return false; 2729 2730 unsigned SubReg; 2731 std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg, 2732 ValSize / 8); 2733 2734 const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID && 2735 STI.useVGPRIndexMode(); 2736 2737 MachineBasicBlock *BB = MI.getParent(); 2738 const DebugLoc &DL = MI.getDebugLoc(); 2739 2740 if (!IndexMode) { 2741 BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0) 2742 .addReg(IdxReg); 2743 2744 const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo( 2745 VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID); 2746 BuildMI(*BB, MI, DL, RegWriteOp, DstReg) 2747 .addReg(VecReg) 2748 .addReg(ValReg) 2749 .addImm(SubReg); 2750 MI.eraseFromParent(); 2751 return true; 2752 } 2753 2754 const MCInstrDesc &GPRIDXDesc = 2755 TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false); 2756 BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg) 2757 .addReg(VecReg) 2758 .addReg(ValReg) 2759 .addReg(IdxReg) 2760 .addImm(SubReg); 2761 2762 MI.eraseFromParent(); 2763 return true; 2764 } 2765 2766 static bool isZeroOrUndef(int X) { 2767 return X == 0 || X == -1; 2768 } 2769 2770 static bool isOneOrUndef(int X) { 2771 return X == 1 || X == -1; 2772 } 2773 2774 static bool isZeroOrOneOrUndef(int X) { 2775 return X == 0 || X == 1 || X == -1; 2776 } 2777 2778 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single 2779 // 32-bit register. 2780 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1, 2781 ArrayRef<int> Mask) { 2782 NewMask[0] = Mask[0]; 2783 NewMask[1] = Mask[1]; 2784 if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1])) 2785 return Src0; 2786 2787 assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1); 2788 assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1); 2789 2790 // Shift the mask inputs to be 0/1; 2791 NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2; 2792 NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2; 2793 return Src1; 2794 } 2795 2796 // This is only legal with VOP3P instructions as an aid to op_sel matching. 2797 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR( 2798 MachineInstr &MI) const { 2799 Register DstReg = MI.getOperand(0).getReg(); 2800 Register Src0Reg = MI.getOperand(1).getReg(); 2801 Register Src1Reg = MI.getOperand(2).getReg(); 2802 ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask(); 2803 2804 const LLT V2S16 = LLT::fixed_vector(2, 16); 2805 if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16) 2806 return false; 2807 2808 if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask)) 2809 return false; 2810 2811 assert(ShufMask.size() == 2); 2812 assert(STI.hasSDWA() && "no target has VOP3P but not SDWA"); 2813 2814 MachineBasicBlock *MBB = MI.getParent(); 2815 const DebugLoc &DL = MI.getDebugLoc(); 2816 2817 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 2818 const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID; 2819 const TargetRegisterClass &RC = IsVALU ? 2820 AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass; 2821 2822 // Handle the degenerate case which should have folded out. 2823 if (ShufMask[0] == -1 && ShufMask[1] == -1) { 2824 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg); 2825 2826 MI.eraseFromParent(); 2827 return RBI.constrainGenericRegister(DstReg, RC, *MRI); 2828 } 2829 2830 // A legal VOP3P mask only reads one of the sources. 2831 int Mask[2]; 2832 Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask); 2833 2834 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) || 2835 !RBI.constrainGenericRegister(SrcVec, RC, *MRI)) 2836 return false; 2837 2838 // TODO: This also should have been folded out 2839 if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) { 2840 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg) 2841 .addReg(SrcVec); 2842 2843 MI.eraseFromParent(); 2844 return true; 2845 } 2846 2847 if (Mask[0] == 1 && Mask[1] == -1) { 2848 if (IsVALU) { 2849 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg) 2850 .addImm(16) 2851 .addReg(SrcVec); 2852 } else { 2853 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg) 2854 .addReg(SrcVec) 2855 .addImm(16); 2856 } 2857 } else if (Mask[0] == -1 && Mask[1] == 0) { 2858 if (IsVALU) { 2859 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg) 2860 .addImm(16) 2861 .addReg(SrcVec); 2862 } else { 2863 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg) 2864 .addReg(SrcVec) 2865 .addImm(16); 2866 } 2867 } else if (Mask[0] == 0 && Mask[1] == 0) { 2868 if (IsVALU) { 2869 // Write low half of the register into the high half. 2870 MachineInstr *MovSDWA = 2871 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg) 2872 .addImm(0) // $src0_modifiers 2873 .addReg(SrcVec) // $src0 2874 .addImm(0) // $clamp 2875 .addImm(AMDGPU::SDWA::WORD_1) // $dst_sel 2876 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused 2877 .addImm(AMDGPU::SDWA::WORD_0) // $src0_sel 2878 .addReg(SrcVec, RegState::Implicit); 2879 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1); 2880 } else { 2881 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg) 2882 .addReg(SrcVec) 2883 .addReg(SrcVec); 2884 } 2885 } else if (Mask[0] == 1 && Mask[1] == 1) { 2886 if (IsVALU) { 2887 // Write high half of the register into the low half. 2888 MachineInstr *MovSDWA = 2889 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg) 2890 .addImm(0) // $src0_modifiers 2891 .addReg(SrcVec) // $src0 2892 .addImm(0) // $clamp 2893 .addImm(AMDGPU::SDWA::WORD_0) // $dst_sel 2894 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused 2895 .addImm(AMDGPU::SDWA::WORD_1) // $src0_sel 2896 .addReg(SrcVec, RegState::Implicit); 2897 MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1); 2898 } else { 2899 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg) 2900 .addReg(SrcVec) 2901 .addReg(SrcVec); 2902 } 2903 } else if (Mask[0] == 1 && Mask[1] == 0) { 2904 if (IsVALU) { 2905 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32_e64), DstReg) 2906 .addReg(SrcVec) 2907 .addReg(SrcVec) 2908 .addImm(16); 2909 } else { 2910 Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 2911 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg) 2912 .addReg(SrcVec) 2913 .addImm(16); 2914 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg) 2915 .addReg(TmpReg) 2916 .addReg(SrcVec); 2917 } 2918 } else 2919 llvm_unreachable("all shuffle masks should be handled"); 2920 2921 MI.eraseFromParent(); 2922 return true; 2923 } 2924 2925 bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD( 2926 MachineInstr &MI) const { 2927 if (STI.hasGFX90AInsts()) 2928 return selectImpl(MI, *CoverageInfo); 2929 2930 MachineBasicBlock *MBB = MI.getParent(); 2931 const DebugLoc &DL = MI.getDebugLoc(); 2932 2933 if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) { 2934 Function &F = MBB->getParent()->getFunction(); 2935 DiagnosticInfoUnsupported 2936 NoFpRet(F, "return versions of fp atomics not supported", 2937 MI.getDebugLoc(), DS_Error); 2938 F.getContext().diagnose(NoFpRet); 2939 return false; 2940 } 2941 2942 // FIXME: This is only needed because tablegen requires number of dst operands 2943 // in match and replace pattern to be the same. Otherwise patterns can be 2944 // exported from SDag path. 2945 MachineOperand &VDataIn = MI.getOperand(1); 2946 MachineOperand &VIndex = MI.getOperand(3); 2947 MachineOperand &VOffset = MI.getOperand(4); 2948 MachineOperand &SOffset = MI.getOperand(5); 2949 int16_t Offset = MI.getOperand(6).getImm(); 2950 2951 bool HasVOffset = !isOperandImmEqual(VOffset, 0, *MRI); 2952 bool HasVIndex = !isOperandImmEqual(VIndex, 0, *MRI); 2953 2954 unsigned Opcode; 2955 if (HasVOffset) { 2956 Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN 2957 : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN; 2958 } else { 2959 Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN 2960 : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET; 2961 } 2962 2963 if (MRI->getType(VDataIn.getReg()).isVector()) { 2964 switch (Opcode) { 2965 case AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN: 2966 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN; 2967 break; 2968 case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN: 2969 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFEN; 2970 break; 2971 case AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN: 2972 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_IDXEN; 2973 break; 2974 case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET: 2975 Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFSET; 2976 break; 2977 } 2978 } 2979 2980 auto I = BuildMI(*MBB, MI, DL, TII.get(Opcode)); 2981 I.add(VDataIn); 2982 2983 if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN || 2984 Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) { 2985 Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class()); 2986 BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg) 2987 .addReg(VIndex.getReg()) 2988 .addImm(AMDGPU::sub0) 2989 .addReg(VOffset.getReg()) 2990 .addImm(AMDGPU::sub1); 2991 2992 I.addReg(IdxReg); 2993 } else if (HasVIndex) { 2994 I.add(VIndex); 2995 } else if (HasVOffset) { 2996 I.add(VOffset); 2997 } 2998 2999 I.add(MI.getOperand(2)); // rsrc 3000 I.add(SOffset); 3001 I.addImm(Offset); 3002 I.addImm(MI.getOperand(7).getImm()); // cpol 3003 I.cloneMemRefs(MI); 3004 3005 MI.eraseFromParent(); 3006 3007 return true; 3008 } 3009 3010 bool AMDGPUInstructionSelector::selectGlobalAtomicFadd( 3011 MachineInstr &MI, MachineOperand &AddrOp, MachineOperand &DataOp) const { 3012 3013 if (STI.hasGFX90AInsts()) { 3014 // gfx90a adds return versions of the global atomic fadd instructions so no 3015 // special handling is required. 3016 return selectImpl(MI, *CoverageInfo); 3017 } 3018 3019 MachineBasicBlock *MBB = MI.getParent(); 3020 const DebugLoc &DL = MI.getDebugLoc(); 3021 3022 if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) { 3023 Function &F = MBB->getParent()->getFunction(); 3024 DiagnosticInfoUnsupported 3025 NoFpRet(F, "return versions of fp atomics not supported", 3026 MI.getDebugLoc(), DS_Error); 3027 F.getContext().diagnose(NoFpRet); 3028 return false; 3029 } 3030 3031 // FIXME: This is only needed because tablegen requires number of dst operands 3032 // in match and replace pattern to be the same. Otherwise patterns can be 3033 // exported from SDag path. 3034 auto Addr = selectFlatOffsetImpl(AddrOp, SIInstrFlags::FlatGlobal); 3035 3036 Register Data = DataOp.getReg(); 3037 const unsigned Opc = MRI->getType(Data).isVector() ? 3038 AMDGPU::GLOBAL_ATOMIC_PK_ADD_F16 : AMDGPU::GLOBAL_ATOMIC_ADD_F32; 3039 auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc)) 3040 .addReg(Addr.first) 3041 .addReg(Data) 3042 .addImm(Addr.second) 3043 .addImm(0) // cpol 3044 .cloneMemRefs(MI); 3045 3046 MI.eraseFromParent(); 3047 return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI); 3048 } 3049 3050 bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{ 3051 MI.setDesc(TII.get(MI.getOperand(1).getImm())); 3052 MI.removeOperand(1); 3053 MI.addImplicitDefUseOperands(*MI.getParent()->getParent()); 3054 return true; 3055 } 3056 3057 bool AMDGPUInstructionSelector::selectWaveAddress(MachineInstr &MI) const { 3058 Register DstReg = MI.getOperand(0).getReg(); 3059 Register SrcReg = MI.getOperand(1).getReg(); 3060 const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI); 3061 const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID; 3062 MachineBasicBlock *MBB = MI.getParent(); 3063 const DebugLoc &DL = MI.getDebugLoc(); 3064 3065 if (IsVALU) { 3066 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg) 3067 .addImm(Subtarget->getWavefrontSizeLog2()) 3068 .addReg(SrcReg); 3069 } else { 3070 BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg) 3071 .addReg(SrcReg) 3072 .addImm(Subtarget->getWavefrontSizeLog2()); 3073 } 3074 3075 const TargetRegisterClass &RC = 3076 IsVALU ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass; 3077 if (!RBI.constrainGenericRegister(DstReg, RC, *MRI)) 3078 return false; 3079 3080 MI.eraseFromParent(); 3081 return true; 3082 } 3083 3084 bool AMDGPUInstructionSelector::select(MachineInstr &I) { 3085 if (I.isPHI()) 3086 return selectPHI(I); 3087 3088 if (!I.isPreISelOpcode()) { 3089 if (I.isCopy()) 3090 return selectCOPY(I); 3091 return true; 3092 } 3093 3094 switch (I.getOpcode()) { 3095 case TargetOpcode::G_AND: 3096 case TargetOpcode::G_OR: 3097 case TargetOpcode::G_XOR: 3098 if (selectImpl(I, *CoverageInfo)) 3099 return true; 3100 return selectG_AND_OR_XOR(I); 3101 case TargetOpcode::G_ADD: 3102 case TargetOpcode::G_SUB: 3103 if (selectImpl(I, *CoverageInfo)) 3104 return true; 3105 return selectG_ADD_SUB(I); 3106 case TargetOpcode::G_UADDO: 3107 case TargetOpcode::G_USUBO: 3108 case TargetOpcode::G_UADDE: 3109 case TargetOpcode::G_USUBE: 3110 return selectG_UADDO_USUBO_UADDE_USUBE(I); 3111 case TargetOpcode::G_INTTOPTR: 3112 case TargetOpcode::G_BITCAST: 3113 case TargetOpcode::G_PTRTOINT: 3114 return selectCOPY(I); 3115 case TargetOpcode::G_CONSTANT: 3116 case TargetOpcode::G_FCONSTANT: 3117 return selectG_CONSTANT(I); 3118 case TargetOpcode::G_FNEG: 3119 if (selectImpl(I, *CoverageInfo)) 3120 return true; 3121 return selectG_FNEG(I); 3122 case TargetOpcode::G_FABS: 3123 if (selectImpl(I, *CoverageInfo)) 3124 return true; 3125 return selectG_FABS(I); 3126 case TargetOpcode::G_EXTRACT: 3127 return selectG_EXTRACT(I); 3128 case TargetOpcode::G_MERGE_VALUES: 3129 case TargetOpcode::G_BUILD_VECTOR: 3130 case TargetOpcode::G_CONCAT_VECTORS: 3131 return selectG_MERGE_VALUES(I); 3132 case TargetOpcode::G_UNMERGE_VALUES: 3133 return selectG_UNMERGE_VALUES(I); 3134 case TargetOpcode::G_BUILD_VECTOR_TRUNC: 3135 return selectG_BUILD_VECTOR_TRUNC(I); 3136 case TargetOpcode::G_PTR_ADD: 3137 return selectG_PTR_ADD(I); 3138 case TargetOpcode::G_IMPLICIT_DEF: 3139 return selectG_IMPLICIT_DEF(I); 3140 case TargetOpcode::G_FREEZE: 3141 return selectCOPY(I); 3142 case TargetOpcode::G_INSERT: 3143 return selectG_INSERT(I); 3144 case TargetOpcode::G_INTRINSIC: 3145 return selectG_INTRINSIC(I); 3146 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: 3147 return selectG_INTRINSIC_W_SIDE_EFFECTS(I); 3148 case TargetOpcode::G_ICMP: 3149 if (selectG_ICMP(I)) 3150 return true; 3151 return selectImpl(I, *CoverageInfo); 3152 case TargetOpcode::G_LOAD: 3153 case TargetOpcode::G_STORE: 3154 case TargetOpcode::G_ATOMIC_CMPXCHG: 3155 case TargetOpcode::G_ATOMICRMW_XCHG: 3156 case TargetOpcode::G_ATOMICRMW_ADD: 3157 case TargetOpcode::G_ATOMICRMW_SUB: 3158 case TargetOpcode::G_ATOMICRMW_AND: 3159 case TargetOpcode::G_ATOMICRMW_OR: 3160 case TargetOpcode::G_ATOMICRMW_XOR: 3161 case TargetOpcode::G_ATOMICRMW_MIN: 3162 case TargetOpcode::G_ATOMICRMW_MAX: 3163 case TargetOpcode::G_ATOMICRMW_UMIN: 3164 case TargetOpcode::G_ATOMICRMW_UMAX: 3165 case TargetOpcode::G_ATOMICRMW_FADD: 3166 case AMDGPU::G_AMDGPU_ATOMIC_INC: 3167 case AMDGPU::G_AMDGPU_ATOMIC_DEC: 3168 case AMDGPU::G_AMDGPU_ATOMIC_FMIN: 3169 case AMDGPU::G_AMDGPU_ATOMIC_FMAX: 3170 return selectG_LOAD_STORE_ATOMICRMW(I); 3171 case TargetOpcode::G_SELECT: 3172 return selectG_SELECT(I); 3173 case TargetOpcode::G_TRUNC: 3174 return selectG_TRUNC(I); 3175 case TargetOpcode::G_SEXT: 3176 case TargetOpcode::G_ZEXT: 3177 case TargetOpcode::G_ANYEXT: 3178 case TargetOpcode::G_SEXT_INREG: 3179 if (selectImpl(I, *CoverageInfo)) 3180 return true; 3181 return selectG_SZA_EXT(I); 3182 case TargetOpcode::G_BRCOND: 3183 return selectG_BRCOND(I); 3184 case TargetOpcode::G_GLOBAL_VALUE: 3185 return selectG_GLOBAL_VALUE(I); 3186 case TargetOpcode::G_PTRMASK: 3187 return selectG_PTRMASK(I); 3188 case TargetOpcode::G_EXTRACT_VECTOR_ELT: 3189 return selectG_EXTRACT_VECTOR_ELT(I); 3190 case TargetOpcode::G_INSERT_VECTOR_ELT: 3191 return selectG_INSERT_VECTOR_ELT(I); 3192 case TargetOpcode::G_SHUFFLE_VECTOR: 3193 return selectG_SHUFFLE_VECTOR(I); 3194 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD: 3195 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16: 3196 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: 3197 case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: { 3198 const AMDGPU::ImageDimIntrinsicInfo *Intr 3199 = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID()); 3200 assert(Intr && "not an image intrinsic with image pseudo"); 3201 return selectImageIntrinsic(I, Intr); 3202 } 3203 case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY: 3204 return selectBVHIntrinsic(I); 3205 case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD: 3206 return selectAMDGPU_BUFFER_ATOMIC_FADD(I); 3207 case AMDGPU::G_SBFX: 3208 case AMDGPU::G_UBFX: 3209 return selectG_SBFX_UBFX(I); 3210 case AMDGPU::G_SI_CALL: 3211 I.setDesc(TII.get(AMDGPU::SI_CALL)); 3212 return true; 3213 case AMDGPU::G_AMDGPU_WAVE_ADDRESS: 3214 return selectWaveAddress(I); 3215 default: 3216 return selectImpl(I, *CoverageInfo); 3217 } 3218 return false; 3219 } 3220 3221 InstructionSelector::ComplexRendererFns 3222 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const { 3223 return {{ 3224 [=](MachineInstrBuilder &MIB) { MIB.add(Root); } 3225 }}; 3226 3227 } 3228 3229 std::pair<Register, unsigned> 3230 AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root, 3231 bool AllowAbs) const { 3232 Register Src = Root.getReg(); 3233 Register OrigSrc = Src; 3234 unsigned Mods = 0; 3235 MachineInstr *MI = getDefIgnoringCopies(Src, *MRI); 3236 3237 if (MI && MI->getOpcode() == AMDGPU::G_FNEG) { 3238 Src = MI->getOperand(1).getReg(); 3239 Mods |= SISrcMods::NEG; 3240 MI = getDefIgnoringCopies(Src, *MRI); 3241 } 3242 3243 if (AllowAbs && MI && MI->getOpcode() == AMDGPU::G_FABS) { 3244 Src = MI->getOperand(1).getReg(); 3245 Mods |= SISrcMods::ABS; 3246 } 3247 3248 if (Mods != 0 && 3249 RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) { 3250 MachineInstr *UseMI = Root.getParent(); 3251 3252 // If we looked through copies to find source modifiers on an SGPR operand, 3253 // we now have an SGPR register source. To avoid potentially violating the 3254 // constant bus restriction, we need to insert a copy to a VGPR. 3255 Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc); 3256 BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(), 3257 TII.get(AMDGPU::COPY), VGPRSrc) 3258 .addReg(Src); 3259 Src = VGPRSrc; 3260 } 3261 3262 return std::make_pair(Src, Mods); 3263 } 3264 3265 /// 3266 /// This will select either an SGPR or VGPR operand and will save us from 3267 /// having to write an extra tablegen pattern. 3268 InstructionSelector::ComplexRendererFns 3269 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const { 3270 return {{ 3271 [=](MachineInstrBuilder &MIB) { MIB.add(Root); } 3272 }}; 3273 } 3274 3275 InstructionSelector::ComplexRendererFns 3276 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const { 3277 Register Src; 3278 unsigned Mods; 3279 std::tie(Src, Mods) = selectVOP3ModsImpl(Root); 3280 3281 return {{ 3282 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3283 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods 3284 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp 3285 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod 3286 }}; 3287 } 3288 3289 InstructionSelector::ComplexRendererFns 3290 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const { 3291 Register Src; 3292 unsigned Mods; 3293 std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false); 3294 3295 return {{ 3296 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3297 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods 3298 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp 3299 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod 3300 }}; 3301 } 3302 3303 InstructionSelector::ComplexRendererFns 3304 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const { 3305 return {{ 3306 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }, 3307 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp 3308 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod 3309 }}; 3310 } 3311 3312 InstructionSelector::ComplexRendererFns 3313 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const { 3314 Register Src; 3315 unsigned Mods; 3316 std::tie(Src, Mods) = selectVOP3ModsImpl(Root); 3317 3318 return {{ 3319 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3320 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods 3321 }}; 3322 } 3323 3324 InstructionSelector::ComplexRendererFns 3325 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const { 3326 Register Src; 3327 unsigned Mods; 3328 std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false); 3329 3330 return {{ 3331 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3332 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods 3333 }}; 3334 } 3335 3336 InstructionSelector::ComplexRendererFns 3337 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const { 3338 Register Reg = Root.getReg(); 3339 const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI); 3340 if (Def && (Def->getOpcode() == AMDGPU::G_FNEG || 3341 Def->getOpcode() == AMDGPU::G_FABS)) 3342 return {}; 3343 return {{ 3344 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); }, 3345 }}; 3346 } 3347 3348 std::pair<Register, unsigned> 3349 AMDGPUInstructionSelector::selectVOP3PModsImpl( 3350 Register Src, const MachineRegisterInfo &MRI, bool IsDOT) const { 3351 unsigned Mods = 0; 3352 MachineInstr *MI = MRI.getVRegDef(Src); 3353 3354 if (MI && MI->getOpcode() == AMDGPU::G_FNEG && 3355 // It's possible to see an f32 fneg here, but unlikely. 3356 // TODO: Treat f32 fneg as only high bit. 3357 MRI.getType(Src) == LLT::fixed_vector(2, 16)) { 3358 Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI); 3359 Src = MI->getOperand(1).getReg(); 3360 MI = MRI.getVRegDef(Src); 3361 } 3362 3363 // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector. 3364 (void)IsDOT; // DOTs do not use OPSEL on gfx940+, check ST.hasDOTOpSelHazard() 3365 3366 // Packed instructions do not have abs modifiers. 3367 Mods |= SISrcMods::OP_SEL_1; 3368 3369 return std::make_pair(Src, Mods); 3370 } 3371 3372 InstructionSelector::ComplexRendererFns 3373 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const { 3374 MachineRegisterInfo &MRI 3375 = Root.getParent()->getParent()->getParent()->getRegInfo(); 3376 3377 Register Src; 3378 unsigned Mods; 3379 std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI); 3380 3381 return {{ 3382 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3383 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods 3384 }}; 3385 } 3386 3387 InstructionSelector::ComplexRendererFns 3388 AMDGPUInstructionSelector::selectVOP3PModsDOT(MachineOperand &Root) const { 3389 MachineRegisterInfo &MRI 3390 = Root.getParent()->getParent()->getParent()->getRegInfo(); 3391 3392 Register Src; 3393 unsigned Mods; 3394 std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI, true); 3395 3396 return {{ 3397 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3398 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods 3399 }}; 3400 } 3401 3402 InstructionSelector::ComplexRendererFns 3403 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const { 3404 Register Src; 3405 unsigned Mods; 3406 std::tie(Src, Mods) = selectVOP3ModsImpl(Root); 3407 if (!isKnownNeverNaN(Src, *MRI)) 3408 return None; 3409 3410 return {{ 3411 [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); }, 3412 [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods 3413 }}; 3414 } 3415 3416 InstructionSelector::ComplexRendererFns 3417 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const { 3418 // FIXME: Handle op_sel 3419 return {{ 3420 [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); }, 3421 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods 3422 }}; 3423 } 3424 3425 InstructionSelector::ComplexRendererFns 3426 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const { 3427 SmallVector<GEPInfo, 4> AddrInfo; 3428 getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo); 3429 3430 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1) 3431 return None; 3432 3433 const GEPInfo &GEPInfo = AddrInfo[0]; 3434 Optional<int64_t> EncodedImm = 3435 AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false); 3436 if (!EncodedImm) 3437 return None; 3438 3439 unsigned PtrReg = GEPInfo.SgprParts[0]; 3440 return {{ 3441 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); }, 3442 [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } 3443 }}; 3444 } 3445 3446 InstructionSelector::ComplexRendererFns 3447 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const { 3448 SmallVector<GEPInfo, 4> AddrInfo; 3449 getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo); 3450 3451 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1) 3452 return None; 3453 3454 const GEPInfo &GEPInfo = AddrInfo[0]; 3455 Register PtrReg = GEPInfo.SgprParts[0]; 3456 Optional<int64_t> EncodedImm = 3457 AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm); 3458 if (!EncodedImm) 3459 return None; 3460 3461 return {{ 3462 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); }, 3463 [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } 3464 }}; 3465 } 3466 3467 InstructionSelector::ComplexRendererFns 3468 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const { 3469 MachineInstr *MI = Root.getParent(); 3470 MachineBasicBlock *MBB = MI->getParent(); 3471 3472 SmallVector<GEPInfo, 4> AddrInfo; 3473 getAddrModeInfo(*MI, *MRI, AddrInfo); 3474 3475 // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits, 3476 // then we can select all ptr + 32-bit offsets not just immediate offsets. 3477 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1) 3478 return None; 3479 3480 const GEPInfo &GEPInfo = AddrInfo[0]; 3481 // SGPR offset is unsigned. 3482 if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm)) 3483 return None; 3484 3485 // If we make it this far we have a load with an 32-bit immediate offset. 3486 // It is OK to select this using a sgpr offset, because we have already 3487 // failed trying to select this load into one of the _IMM variants since 3488 // the _IMM Patterns are considered before the _SGPR patterns. 3489 Register PtrReg = GEPInfo.SgprParts[0]; 3490 Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 3491 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg) 3492 .addImm(GEPInfo.Imm); 3493 return {{ 3494 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); }, 3495 [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); } 3496 }}; 3497 } 3498 3499 std::pair<Register, int> 3500 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root, 3501 uint64_t FlatVariant) const { 3502 MachineInstr *MI = Root.getParent(); 3503 3504 auto Default = std::make_pair(Root.getReg(), 0); 3505 3506 if (!STI.hasFlatInstOffsets()) 3507 return Default; 3508 3509 Register PtrBase; 3510 int64_t ConstOffset; 3511 std::tie(PtrBase, ConstOffset) = 3512 getPtrBaseWithConstantOffset(Root.getReg(), *MRI); 3513 if (ConstOffset == 0) 3514 return Default; 3515 3516 unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace(); 3517 if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, FlatVariant)) 3518 return Default; 3519 3520 return std::make_pair(PtrBase, ConstOffset); 3521 } 3522 3523 InstructionSelector::ComplexRendererFns 3524 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const { 3525 auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FLAT); 3526 3527 return {{ 3528 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); }, 3529 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); }, 3530 }}; 3531 } 3532 3533 InstructionSelector::ComplexRendererFns 3534 AMDGPUInstructionSelector::selectGlobalOffset(MachineOperand &Root) const { 3535 auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatGlobal); 3536 3537 return {{ 3538 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); }, 3539 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); }, 3540 }}; 3541 } 3542 3543 InstructionSelector::ComplexRendererFns 3544 AMDGPUInstructionSelector::selectScratchOffset(MachineOperand &Root) const { 3545 auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatScratch); 3546 3547 return {{ 3548 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); }, 3549 [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); }, 3550 }}; 3551 } 3552 3553 /// Match a zero extend from a 32-bit value to 64-bits. 3554 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) { 3555 Register ZExtSrc; 3556 if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc)))) 3557 return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register(); 3558 3559 // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0) 3560 const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI); 3561 if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES) 3562 return false; 3563 3564 if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) { 3565 return Def->getOperand(1).getReg(); 3566 } 3567 3568 return Register(); 3569 } 3570 3571 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset) 3572 InstructionSelector::ComplexRendererFns 3573 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const { 3574 Register Addr = Root.getReg(); 3575 Register PtrBase; 3576 int64_t ConstOffset; 3577 int64_t ImmOffset = 0; 3578 3579 // Match the immediate offset first, which canonically is moved as low as 3580 // possible. 3581 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI); 3582 3583 if (ConstOffset != 0) { 3584 if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, 3585 SIInstrFlags::FlatGlobal)) { 3586 Addr = PtrBase; 3587 ImmOffset = ConstOffset; 3588 } else { 3589 auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI); 3590 if (!PtrBaseDef) 3591 return None; 3592 3593 if (isSGPR(PtrBaseDef->Reg)) { 3594 if (ConstOffset > 0) { 3595 // Offset is too large. 3596 // 3597 // saddr + large_offset -> saddr + 3598 // (voffset = large_offset & ~MaxOffset) + 3599 // (large_offset & MaxOffset); 3600 int64_t SplitImmOffset, RemainderOffset; 3601 std::tie(SplitImmOffset, RemainderOffset) = TII.splitFlatOffset( 3602 ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal); 3603 3604 if (isUInt<32>(RemainderOffset)) { 3605 MachineInstr *MI = Root.getParent(); 3606 MachineBasicBlock *MBB = MI->getParent(); 3607 Register HighBits = 3608 MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3609 3610 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), 3611 HighBits) 3612 .addImm(RemainderOffset); 3613 3614 return {{ 3615 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); }, // saddr 3616 [=](MachineInstrBuilder &MIB) { 3617 MIB.addReg(HighBits); 3618 }, // voffset 3619 [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); }, 3620 }}; 3621 } 3622 } 3623 3624 // We are adding a 64 bit SGPR and a constant. If constant bus limit 3625 // is 1 we would need to perform 1 or 2 extra moves for each half of 3626 // the constant and it is better to do a scalar add and then issue a 3627 // single VALU instruction to materialize zero. Otherwise it is less 3628 // instructions to perform VALU adds with immediates or inline literals. 3629 unsigned NumLiterals = 3630 !TII.isInlineConstant(APInt(32, ConstOffset & 0xffffffff)) + 3631 !TII.isInlineConstant(APInt(32, ConstOffset >> 32)); 3632 if (STI.getConstantBusLimit(AMDGPU::V_ADD_U32_e64) > NumLiterals) 3633 return None; 3634 } 3635 } 3636 } 3637 3638 auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI); 3639 if (!AddrDef) 3640 return None; 3641 3642 // Match the variable offset. 3643 if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) { 3644 // Look through the SGPR->VGPR copy. 3645 Register SAddr = 3646 getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI); 3647 3648 if (SAddr && isSGPR(SAddr)) { 3649 Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg(); 3650 3651 // It's possible voffset is an SGPR here, but the copy to VGPR will be 3652 // inserted later. 3653 if (Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) { 3654 return {{[=](MachineInstrBuilder &MIB) { // saddr 3655 MIB.addReg(SAddr); 3656 }, 3657 [=](MachineInstrBuilder &MIB) { // voffset 3658 MIB.addReg(VOffset); 3659 }, 3660 [=](MachineInstrBuilder &MIB) { // offset 3661 MIB.addImm(ImmOffset); 3662 }}}; 3663 } 3664 } 3665 } 3666 3667 // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and 3668 // drop this. 3669 if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF || 3670 AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT || !isSGPR(AddrDef->Reg)) 3671 return None; 3672 3673 // It's cheaper to materialize a single 32-bit zero for vaddr than the two 3674 // moves required to copy a 64-bit SGPR to VGPR. 3675 MachineInstr *MI = Root.getParent(); 3676 MachineBasicBlock *MBB = MI->getParent(); 3677 Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3678 3679 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), VOffset) 3680 .addImm(0); 3681 3682 return {{ 3683 [=](MachineInstrBuilder &MIB) { MIB.addReg(AddrDef->Reg); }, // saddr 3684 [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); }, // voffset 3685 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset 3686 }}; 3687 } 3688 3689 InstructionSelector::ComplexRendererFns 3690 AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const { 3691 Register Addr = Root.getReg(); 3692 Register PtrBase; 3693 int64_t ConstOffset; 3694 int64_t ImmOffset = 0; 3695 3696 // Match the immediate offset first, which canonically is moved as low as 3697 // possible. 3698 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI); 3699 3700 if (ConstOffset != 0 && 3701 TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS, 3702 SIInstrFlags::FlatScratch)) { 3703 Addr = PtrBase; 3704 ImmOffset = ConstOffset; 3705 } 3706 3707 auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI); 3708 if (!AddrDef) 3709 return None; 3710 3711 if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) { 3712 int FI = AddrDef->MI->getOperand(1).getIndex(); 3713 return {{ 3714 [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr 3715 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset 3716 }}; 3717 } 3718 3719 Register SAddr = AddrDef->Reg; 3720 3721 if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) { 3722 Register LHS = AddrDef->MI->getOperand(1).getReg(); 3723 Register RHS = AddrDef->MI->getOperand(2).getReg(); 3724 auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI); 3725 auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI); 3726 3727 if (LHSDef && RHSDef && 3728 LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX && 3729 isSGPR(RHSDef->Reg)) { 3730 int FI = LHSDef->MI->getOperand(1).getIndex(); 3731 MachineInstr &I = *Root.getParent(); 3732 MachineBasicBlock *BB = I.getParent(); 3733 const DebugLoc &DL = I.getDebugLoc(); 3734 SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 3735 3736 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_I32), SAddr) 3737 .addFrameIndex(FI) 3738 .addReg(RHSDef->Reg); 3739 } 3740 } 3741 3742 if (!isSGPR(SAddr)) 3743 return None; 3744 3745 return {{ 3746 [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr 3747 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset 3748 }}; 3749 } 3750 3751 InstructionSelector::ComplexRendererFns 3752 AMDGPUInstructionSelector::selectScratchSVAddr(MachineOperand &Root) const { 3753 Register Addr = Root.getReg(); 3754 Register PtrBase; 3755 int64_t ConstOffset; 3756 int64_t ImmOffset = 0; 3757 3758 // Match the immediate offset first, which canonically is moved as low as 3759 // possible. 3760 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI); 3761 3762 if (ConstOffset != 0 && 3763 TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS, true)) { 3764 Addr = PtrBase; 3765 ImmOffset = ConstOffset; 3766 } 3767 3768 auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI); 3769 if (!AddrDef) 3770 return None; 3771 3772 if (AddrDef->MI->getOpcode() != AMDGPU::G_PTR_ADD) 3773 return None; 3774 3775 Register RHS = AddrDef->MI->getOperand(2).getReg(); 3776 if (RBI.getRegBank(RHS, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) 3777 return None; 3778 3779 Register LHS = AddrDef->MI->getOperand(1).getReg(); 3780 auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI); 3781 3782 if (LHSDef && LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) { 3783 int FI = LHSDef->MI->getOperand(1).getIndex(); 3784 return {{ 3785 [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr 3786 [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr 3787 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset 3788 }}; 3789 } 3790 3791 if (!isSGPR(LHS)) 3792 return None; 3793 3794 return {{ 3795 [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr 3796 [=](MachineInstrBuilder &MIB) { MIB.addReg(LHS); }, // saddr 3797 [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset 3798 }}; 3799 } 3800 3801 InstructionSelector::ComplexRendererFns 3802 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const { 3803 MachineInstr *MI = Root.getParent(); 3804 MachineBasicBlock *MBB = MI->getParent(); 3805 MachineFunction *MF = MBB->getParent(); 3806 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 3807 3808 int64_t Offset = 0; 3809 if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) && 3810 Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) { 3811 Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass); 3812 3813 // TODO: Should this be inside the render function? The iterator seems to 3814 // move. 3815 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), 3816 HighBits) 3817 .addImm(Offset & ~4095); 3818 3819 return {{[=](MachineInstrBuilder &MIB) { // rsrc 3820 MIB.addReg(Info->getScratchRSrcReg()); 3821 }, 3822 [=](MachineInstrBuilder &MIB) { // vaddr 3823 MIB.addReg(HighBits); 3824 }, 3825 [=](MachineInstrBuilder &MIB) { // soffset 3826 // Use constant zero for soffset and rely on eliminateFrameIndex 3827 // to choose the appropriate frame register if need be. 3828 MIB.addImm(0); 3829 }, 3830 [=](MachineInstrBuilder &MIB) { // offset 3831 MIB.addImm(Offset & 4095); 3832 }}}; 3833 } 3834 3835 assert(Offset == 0 || Offset == -1); 3836 3837 // Try to fold a frame index directly into the MUBUF vaddr field, and any 3838 // offsets. 3839 Optional<int> FI; 3840 Register VAddr = Root.getReg(); 3841 if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) { 3842 Register PtrBase; 3843 int64_t ConstOffset; 3844 std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(VAddr, *MRI); 3845 if (ConstOffset != 0) { 3846 if (SIInstrInfo::isLegalMUBUFImmOffset(ConstOffset) && 3847 (!STI.privateMemoryResourceIsRangeChecked() || 3848 KnownBits->signBitIsZero(PtrBase))) { 3849 const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase); 3850 if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX) 3851 FI = PtrBaseDef->getOperand(1).getIndex(); 3852 else 3853 VAddr = PtrBase; 3854 Offset = ConstOffset; 3855 } 3856 } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) { 3857 FI = RootDef->getOperand(1).getIndex(); 3858 } 3859 } 3860 3861 return {{[=](MachineInstrBuilder &MIB) { // rsrc 3862 MIB.addReg(Info->getScratchRSrcReg()); 3863 }, 3864 [=](MachineInstrBuilder &MIB) { // vaddr 3865 if (FI.hasValue()) 3866 MIB.addFrameIndex(FI.getValue()); 3867 else 3868 MIB.addReg(VAddr); 3869 }, 3870 [=](MachineInstrBuilder &MIB) { // soffset 3871 // Use constant zero for soffset and rely on eliminateFrameIndex 3872 // to choose the appropriate frame register if need be. 3873 MIB.addImm(0); 3874 }, 3875 [=](MachineInstrBuilder &MIB) { // offset 3876 MIB.addImm(Offset); 3877 }}}; 3878 } 3879 3880 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base, 3881 int64_t Offset) const { 3882 if (!isUInt<16>(Offset)) 3883 return false; 3884 3885 if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled()) 3886 return true; 3887 3888 // On Southern Islands instruction with a negative base value and an offset 3889 // don't seem to work. 3890 return KnownBits->signBitIsZero(Base); 3891 } 3892 3893 bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0, 3894 int64_t Offset1, 3895 unsigned Size) const { 3896 if (Offset0 % Size != 0 || Offset1 % Size != 0) 3897 return false; 3898 if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size)) 3899 return false; 3900 3901 if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled()) 3902 return true; 3903 3904 // On Southern Islands instruction with a negative base value and an offset 3905 // don't seem to work. 3906 return KnownBits->signBitIsZero(Base); 3907 } 3908 3909 bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI, 3910 unsigned ShAmtBits) const { 3911 assert(MI.getOpcode() == TargetOpcode::G_AND); 3912 3913 Optional<APInt> RHS = getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI); 3914 if (!RHS) 3915 return false; 3916 3917 if (RHS->countTrailingOnes() >= ShAmtBits) 3918 return true; 3919 3920 const APInt &LHSKnownZeros = 3921 KnownBits->getKnownZeroes(MI.getOperand(1).getReg()); 3922 return (LHSKnownZeros | *RHS).countTrailingOnes() >= ShAmtBits; 3923 } 3924 3925 // Return the wave level SGPR base address if this is a wave address. 3926 static Register getWaveAddress(const MachineInstr *Def) { 3927 return Def->getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS 3928 ? Def->getOperand(1).getReg() 3929 : Register(); 3930 } 3931 3932 InstructionSelector::ComplexRendererFns 3933 AMDGPUInstructionSelector::selectMUBUFScratchOffset( 3934 MachineOperand &Root) const { 3935 Register Reg = Root.getReg(); 3936 const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>(); 3937 3938 const MachineInstr *Def = MRI->getVRegDef(Reg); 3939 if (Register WaveBase = getWaveAddress(Def)) { 3940 return {{ 3941 [=](MachineInstrBuilder &MIB) { // rsrc 3942 MIB.addReg(Info->getScratchRSrcReg()); 3943 }, 3944 [=](MachineInstrBuilder &MIB) { // soffset 3945 MIB.addReg(WaveBase); 3946 }, 3947 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // offset 3948 }}; 3949 } 3950 3951 int64_t Offset = 0; 3952 3953 // FIXME: Copy check is a hack 3954 Register BasePtr; 3955 if (mi_match(Reg, *MRI, m_GPtrAdd(m_Reg(BasePtr), m_Copy(m_ICst(Offset))))) { 3956 if (!SIInstrInfo::isLegalMUBUFImmOffset(Offset)) 3957 return {}; 3958 const MachineInstr *BasePtrDef = MRI->getVRegDef(BasePtr); 3959 Register WaveBase = getWaveAddress(BasePtrDef); 3960 if (!WaveBase) 3961 return {}; 3962 3963 return {{ 3964 [=](MachineInstrBuilder &MIB) { // rsrc 3965 MIB.addReg(Info->getScratchRSrcReg()); 3966 }, 3967 [=](MachineInstrBuilder &MIB) { // soffset 3968 MIB.addReg(WaveBase); 3969 }, 3970 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset 3971 }}; 3972 } 3973 3974 if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) || 3975 !SIInstrInfo::isLegalMUBUFImmOffset(Offset)) 3976 return {}; 3977 3978 return {{ 3979 [=](MachineInstrBuilder &MIB) { // rsrc 3980 MIB.addReg(Info->getScratchRSrcReg()); 3981 }, 3982 [=](MachineInstrBuilder &MIB) { // soffset 3983 MIB.addImm(0); 3984 }, 3985 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset 3986 }}; 3987 } 3988 3989 std::pair<Register, unsigned> 3990 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const { 3991 const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg()); 3992 if (!RootDef) 3993 return std::make_pair(Root.getReg(), 0); 3994 3995 int64_t ConstAddr = 0; 3996 3997 Register PtrBase; 3998 int64_t Offset; 3999 std::tie(PtrBase, Offset) = 4000 getPtrBaseWithConstantOffset(Root.getReg(), *MRI); 4001 4002 if (Offset) { 4003 if (isDSOffsetLegal(PtrBase, Offset)) { 4004 // (add n0, c0) 4005 return std::make_pair(PtrBase, Offset); 4006 } 4007 } else if (RootDef->getOpcode() == AMDGPU::G_SUB) { 4008 // TODO 4009 4010 4011 } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) { 4012 // TODO 4013 4014 } 4015 4016 return std::make_pair(Root.getReg(), 0); 4017 } 4018 4019 InstructionSelector::ComplexRendererFns 4020 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const { 4021 Register Reg; 4022 unsigned Offset; 4023 std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root); 4024 return {{ 4025 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); }, 4026 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } 4027 }}; 4028 } 4029 4030 InstructionSelector::ComplexRendererFns 4031 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const { 4032 return selectDSReadWrite2(Root, 4); 4033 } 4034 4035 InstructionSelector::ComplexRendererFns 4036 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const { 4037 return selectDSReadWrite2(Root, 8); 4038 } 4039 4040 InstructionSelector::ComplexRendererFns 4041 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root, 4042 unsigned Size) const { 4043 Register Reg; 4044 unsigned Offset; 4045 std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size); 4046 return {{ 4047 [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); }, 4048 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, 4049 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); } 4050 }}; 4051 } 4052 4053 std::pair<Register, unsigned> 4054 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root, 4055 unsigned Size) const { 4056 const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg()); 4057 if (!RootDef) 4058 return std::make_pair(Root.getReg(), 0); 4059 4060 int64_t ConstAddr = 0; 4061 4062 Register PtrBase; 4063 int64_t Offset; 4064 std::tie(PtrBase, Offset) = 4065 getPtrBaseWithConstantOffset(Root.getReg(), *MRI); 4066 4067 if (Offset) { 4068 int64_t OffsetValue0 = Offset; 4069 int64_t OffsetValue1 = Offset + Size; 4070 if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) { 4071 // (add n0, c0) 4072 return std::make_pair(PtrBase, OffsetValue0 / Size); 4073 } 4074 } else if (RootDef->getOpcode() == AMDGPU::G_SUB) { 4075 // TODO 4076 4077 } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) { 4078 // TODO 4079 4080 } 4081 4082 return std::make_pair(Root.getReg(), 0); 4083 } 4084 4085 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return 4086 /// the base value with the constant offset. There may be intervening copies 4087 /// between \p Root and the identified constant. Returns \p Root, 0 if this does 4088 /// not match the pattern. 4089 std::pair<Register, int64_t> 4090 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset( 4091 Register Root, const MachineRegisterInfo &MRI) const { 4092 MachineInstr *RootI = getDefIgnoringCopies(Root, MRI); 4093 if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD) 4094 return {Root, 0}; 4095 4096 MachineOperand &RHS = RootI->getOperand(2); 4097 Optional<ValueAndVReg> MaybeOffset = 4098 getIConstantVRegValWithLookThrough(RHS.getReg(), MRI); 4099 if (!MaybeOffset) 4100 return {Root, 0}; 4101 return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()}; 4102 } 4103 4104 static void addZeroImm(MachineInstrBuilder &MIB) { 4105 MIB.addImm(0); 4106 } 4107 4108 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p 4109 /// BasePtr is not valid, a null base pointer will be used. 4110 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI, 4111 uint32_t FormatLo, uint32_t FormatHi, 4112 Register BasePtr) { 4113 Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 4114 Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 4115 Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 4116 Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass); 4117 4118 B.buildInstr(AMDGPU::S_MOV_B32) 4119 .addDef(RSrc2) 4120 .addImm(FormatLo); 4121 B.buildInstr(AMDGPU::S_MOV_B32) 4122 .addDef(RSrc3) 4123 .addImm(FormatHi); 4124 4125 // Build the half of the subregister with the constants before building the 4126 // full 128-bit register. If we are building multiple resource descriptors, 4127 // this will allow CSEing of the 2-component register. 4128 B.buildInstr(AMDGPU::REG_SEQUENCE) 4129 .addDef(RSrcHi) 4130 .addReg(RSrc2) 4131 .addImm(AMDGPU::sub0) 4132 .addReg(RSrc3) 4133 .addImm(AMDGPU::sub1); 4134 4135 Register RSrcLo = BasePtr; 4136 if (!BasePtr) { 4137 RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass); 4138 B.buildInstr(AMDGPU::S_MOV_B64) 4139 .addDef(RSrcLo) 4140 .addImm(0); 4141 } 4142 4143 B.buildInstr(AMDGPU::REG_SEQUENCE) 4144 .addDef(RSrc) 4145 .addReg(RSrcLo) 4146 .addImm(AMDGPU::sub0_sub1) 4147 .addReg(RSrcHi) 4148 .addImm(AMDGPU::sub2_sub3); 4149 4150 return RSrc; 4151 } 4152 4153 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI, 4154 const SIInstrInfo &TII, Register BasePtr) { 4155 uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat(); 4156 4157 // FIXME: Why are half the "default" bits ignored based on the addressing 4158 // mode? 4159 return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr); 4160 } 4161 4162 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI, 4163 const SIInstrInfo &TII, Register BasePtr) { 4164 uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat(); 4165 4166 // FIXME: Why are half the "default" bits ignored based on the addressing 4167 // mode? 4168 return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr); 4169 } 4170 4171 AMDGPUInstructionSelector::MUBUFAddressData 4172 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const { 4173 MUBUFAddressData Data; 4174 Data.N0 = Src; 4175 4176 Register PtrBase; 4177 int64_t Offset; 4178 4179 std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI); 4180 if (isUInt<32>(Offset)) { 4181 Data.N0 = PtrBase; 4182 Data.Offset = Offset; 4183 } 4184 4185 if (MachineInstr *InputAdd 4186 = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) { 4187 Data.N2 = InputAdd->getOperand(1).getReg(); 4188 Data.N3 = InputAdd->getOperand(2).getReg(); 4189 4190 // FIXME: Need to fix extra SGPR->VGPRcopies inserted 4191 // FIXME: Don't know this was defined by operand 0 4192 // 4193 // TODO: Remove this when we have copy folding optimizations after 4194 // RegBankSelect. 4195 Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg(); 4196 Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg(); 4197 } 4198 4199 return Data; 4200 } 4201 4202 /// Return if the addr64 mubuf mode should be used for the given address. 4203 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const { 4204 // (ptr_add N2, N3) -> addr64, or 4205 // (ptr_add (ptr_add N2, N3), C1) -> addr64 4206 if (Addr.N2) 4207 return true; 4208 4209 const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI); 4210 return N0Bank->getID() == AMDGPU::VGPRRegBankID; 4211 } 4212 4213 /// Split an immediate offset \p ImmOffset depending on whether it fits in the 4214 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable 4215 /// component. 4216 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset( 4217 MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const { 4218 if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset)) 4219 return; 4220 4221 // Illegal offset, store it in soffset. 4222 SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass); 4223 B.buildInstr(AMDGPU::S_MOV_B32) 4224 .addDef(SOffset) 4225 .addImm(ImmOffset); 4226 ImmOffset = 0; 4227 } 4228 4229 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl( 4230 MachineOperand &Root, Register &VAddr, Register &RSrcReg, 4231 Register &SOffset, int64_t &Offset) const { 4232 // FIXME: Predicates should stop this from reaching here. 4233 // addr64 bit was removed for volcanic islands. 4234 if (!STI.hasAddr64() || STI.useFlatForGlobal()) 4235 return false; 4236 4237 MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg()); 4238 if (!shouldUseAddr64(AddrData)) 4239 return false; 4240 4241 Register N0 = AddrData.N0; 4242 Register N2 = AddrData.N2; 4243 Register N3 = AddrData.N3; 4244 Offset = AddrData.Offset; 4245 4246 // Base pointer for the SRD. 4247 Register SRDPtr; 4248 4249 if (N2) { 4250 if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) { 4251 assert(N3); 4252 if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) { 4253 // Both N2 and N3 are divergent. Use N0 (the result of the add) as the 4254 // addr64, and construct the default resource from a 0 address. 4255 VAddr = N0; 4256 } else { 4257 SRDPtr = N3; 4258 VAddr = N2; 4259 } 4260 } else { 4261 // N2 is not divergent. 4262 SRDPtr = N2; 4263 VAddr = N3; 4264 } 4265 } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) { 4266 // Use the default null pointer in the resource 4267 VAddr = N0; 4268 } else { 4269 // N0 -> offset, or 4270 // (N0 + C1) -> offset 4271 SRDPtr = N0; 4272 } 4273 4274 MachineIRBuilder B(*Root.getParent()); 4275 RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr); 4276 splitIllegalMUBUFOffset(B, SOffset, Offset); 4277 return true; 4278 } 4279 4280 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl( 4281 MachineOperand &Root, Register &RSrcReg, Register &SOffset, 4282 int64_t &Offset) const { 4283 4284 // FIXME: Pattern should not reach here. 4285 if (STI.useFlatForGlobal()) 4286 return false; 4287 4288 MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg()); 4289 if (shouldUseAddr64(AddrData)) 4290 return false; 4291 4292 // N0 -> offset, or 4293 // (N0 + C1) -> offset 4294 Register SRDPtr = AddrData.N0; 4295 Offset = AddrData.Offset; 4296 4297 // TODO: Look through extensions for 32-bit soffset. 4298 MachineIRBuilder B(*Root.getParent()); 4299 4300 RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr); 4301 splitIllegalMUBUFOffset(B, SOffset, Offset); 4302 return true; 4303 } 4304 4305 InstructionSelector::ComplexRendererFns 4306 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const { 4307 Register VAddr; 4308 Register RSrcReg; 4309 Register SOffset; 4310 int64_t Offset = 0; 4311 4312 if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset)) 4313 return {}; 4314 4315 // FIXME: Use defaulted operands for trailing 0s and remove from the complex 4316 // pattern. 4317 return {{ 4318 [=](MachineInstrBuilder &MIB) { // rsrc 4319 MIB.addReg(RSrcReg); 4320 }, 4321 [=](MachineInstrBuilder &MIB) { // vaddr 4322 MIB.addReg(VAddr); 4323 }, 4324 [=](MachineInstrBuilder &MIB) { // soffset 4325 if (SOffset) 4326 MIB.addReg(SOffset); 4327 else 4328 MIB.addImm(0); 4329 }, 4330 [=](MachineInstrBuilder &MIB) { // offset 4331 MIB.addImm(Offset); 4332 }, 4333 addZeroImm, // cpol 4334 addZeroImm, // tfe 4335 addZeroImm // swz 4336 }}; 4337 } 4338 4339 InstructionSelector::ComplexRendererFns 4340 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const { 4341 Register RSrcReg; 4342 Register SOffset; 4343 int64_t Offset = 0; 4344 4345 if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset)) 4346 return {}; 4347 4348 return {{ 4349 [=](MachineInstrBuilder &MIB) { // rsrc 4350 MIB.addReg(RSrcReg); 4351 }, 4352 [=](MachineInstrBuilder &MIB) { // soffset 4353 if (SOffset) 4354 MIB.addReg(SOffset); 4355 else 4356 MIB.addImm(0); 4357 }, 4358 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset 4359 addZeroImm, // cpol 4360 addZeroImm, // tfe 4361 addZeroImm, // swz 4362 }}; 4363 } 4364 4365 InstructionSelector::ComplexRendererFns 4366 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const { 4367 Register VAddr; 4368 Register RSrcReg; 4369 Register SOffset; 4370 int64_t Offset = 0; 4371 4372 if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset)) 4373 return {}; 4374 4375 // FIXME: Use defaulted operands for trailing 0s and remove from the complex 4376 // pattern. 4377 return {{ 4378 [=](MachineInstrBuilder &MIB) { // rsrc 4379 MIB.addReg(RSrcReg); 4380 }, 4381 [=](MachineInstrBuilder &MIB) { // vaddr 4382 MIB.addReg(VAddr); 4383 }, 4384 [=](MachineInstrBuilder &MIB) { // soffset 4385 if (SOffset) 4386 MIB.addReg(SOffset); 4387 else 4388 MIB.addImm(0); 4389 }, 4390 [=](MachineInstrBuilder &MIB) { // offset 4391 MIB.addImm(Offset); 4392 }, 4393 [=](MachineInstrBuilder &MIB) { 4394 MIB.addImm(AMDGPU::CPol::GLC); // cpol 4395 } 4396 }}; 4397 } 4398 4399 InstructionSelector::ComplexRendererFns 4400 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const { 4401 Register RSrcReg; 4402 Register SOffset; 4403 int64_t Offset = 0; 4404 4405 if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset)) 4406 return {}; 4407 4408 return {{ 4409 [=](MachineInstrBuilder &MIB) { // rsrc 4410 MIB.addReg(RSrcReg); 4411 }, 4412 [=](MachineInstrBuilder &MIB) { // soffset 4413 if (SOffset) 4414 MIB.addReg(SOffset); 4415 else 4416 MIB.addImm(0); 4417 }, 4418 [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset 4419 [=](MachineInstrBuilder &MIB) { MIB.addImm(AMDGPU::CPol::GLC); } // cpol 4420 }}; 4421 } 4422 4423 /// Get an immediate that must be 32-bits, and treated as zero extended. 4424 static Optional<uint64_t> getConstantZext32Val(Register Reg, 4425 const MachineRegisterInfo &MRI) { 4426 // getIConstantVRegVal sexts any values, so see if that matters. 4427 Optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI); 4428 if (!OffsetVal || !isInt<32>(*OffsetVal)) 4429 return None; 4430 return Lo_32(*OffsetVal); 4431 } 4432 4433 InstructionSelector::ComplexRendererFns 4434 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const { 4435 Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI); 4436 if (!OffsetVal) 4437 return {}; 4438 4439 Optional<int64_t> EncodedImm = 4440 AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true); 4441 if (!EncodedImm) 4442 return {}; 4443 4444 return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } }}; 4445 } 4446 4447 InstructionSelector::ComplexRendererFns 4448 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const { 4449 assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS); 4450 4451 Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI); 4452 if (!OffsetVal) 4453 return {}; 4454 4455 Optional<int64_t> EncodedImm 4456 = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal); 4457 if (!EncodedImm) 4458 return {}; 4459 4460 return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); } }}; 4461 } 4462 4463 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB, 4464 const MachineInstr &MI, 4465 int OpIdx) const { 4466 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 4467 "Expected G_CONSTANT"); 4468 MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue()); 4469 } 4470 4471 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB, 4472 const MachineInstr &MI, 4473 int OpIdx) const { 4474 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 4475 "Expected G_CONSTANT"); 4476 MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue()); 4477 } 4478 4479 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB, 4480 const MachineInstr &MI, 4481 int OpIdx) const { 4482 assert(OpIdx == -1); 4483 4484 const MachineOperand &Op = MI.getOperand(1); 4485 if (MI.getOpcode() == TargetOpcode::G_FCONSTANT) 4486 MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue()); 4487 else { 4488 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT"); 4489 MIB.addImm(Op.getCImm()->getSExtValue()); 4490 } 4491 } 4492 4493 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB, 4494 const MachineInstr &MI, 4495 int OpIdx) const { 4496 assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 && 4497 "Expected G_CONSTANT"); 4498 MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation()); 4499 } 4500 4501 /// This only really exists to satisfy DAG type checking machinery, so is a 4502 /// no-op here. 4503 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB, 4504 const MachineInstr &MI, 4505 int OpIdx) const { 4506 MIB.addImm(MI.getOperand(OpIdx).getImm()); 4507 } 4508 4509 void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB, 4510 const MachineInstr &MI, 4511 int OpIdx) const { 4512 assert(OpIdx >= 0 && "expected to match an immediate operand"); 4513 MIB.addImm(MI.getOperand(OpIdx).getImm() & AMDGPU::CPol::ALL); 4514 } 4515 4516 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB, 4517 const MachineInstr &MI, 4518 int OpIdx) const { 4519 assert(OpIdx >= 0 && "expected to match an immediate operand"); 4520 MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1); 4521 } 4522 4523 void AMDGPUInstructionSelector::renderSetGLC(MachineInstrBuilder &MIB, 4524 const MachineInstr &MI, 4525 int OpIdx) const { 4526 assert(OpIdx >= 0 && "expected to match an immediate operand"); 4527 MIB.addImm(MI.getOperand(OpIdx).getImm() | AMDGPU::CPol::GLC); 4528 } 4529 4530 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB, 4531 const MachineInstr &MI, 4532 int OpIdx) const { 4533 MIB.addFrameIndex((MI.getOperand(1).getIndex())); 4534 } 4535 4536 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const { 4537 return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm()); 4538 } 4539 4540 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const { 4541 return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm()); 4542 } 4543 4544 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const { 4545 return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm()); 4546 } 4547 4548 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const { 4549 return TII.isInlineConstant(Imm); 4550 } 4551