1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the targeting of the InstructionSelector class for 10 /// AMDGPU. 11 /// \todo This should be generated by TableGen. 12 //===----------------------------------------------------------------------===// 13 14 #include "AMDGPUInstructionSelector.h" 15 #include "AMDGPUInstrInfo.h" 16 #include "AMDGPURegisterBankInfo.h" 17 #include "AMDGPURegisterInfo.h" 18 #include "AMDGPUSubtarget.h" 19 #include "AMDGPUTargetMachine.h" 20 #include "SIMachineFunctionInfo.h" 21 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 22 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h" 23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h" 24 #include "llvm/CodeGen/GlobalISel/Utils.h" 25 #include "llvm/CodeGen/MachineBasicBlock.h" 26 #include "llvm/CodeGen/MachineFunction.h" 27 #include "llvm/CodeGen/MachineInstr.h" 28 #include "llvm/CodeGen/MachineInstrBuilder.h" 29 #include "llvm/CodeGen/MachineRegisterInfo.h" 30 #include "llvm/IR/Type.h" 31 #include "llvm/Support/Debug.h" 32 #include "llvm/Support/raw_ostream.h" 33 34 #define DEBUG_TYPE "amdgpu-isel" 35 36 using namespace llvm; 37 38 #define GET_GLOBALISEL_IMPL 39 #define AMDGPUSubtarget GCNSubtarget 40 #include "AMDGPUGenGlobalISel.inc" 41 #undef GET_GLOBALISEL_IMPL 42 #undef AMDGPUSubtarget 43 44 AMDGPUInstructionSelector::AMDGPUInstructionSelector( 45 const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI, 46 const AMDGPUTargetMachine &TM) 47 : InstructionSelector(), TII(*STI.getInstrInfo()), 48 TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM), 49 STI(STI), 50 EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG), 51 #define GET_GLOBALISEL_PREDICATES_INIT 52 #include "AMDGPUGenGlobalISel.inc" 53 #undef GET_GLOBALISEL_PREDICATES_INIT 54 #define GET_GLOBALISEL_TEMPORARIES_INIT 55 #include "AMDGPUGenGlobalISel.inc" 56 #undef GET_GLOBALISEL_TEMPORARIES_INIT 57 { 58 } 59 60 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; } 61 62 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const { 63 MachineBasicBlock *BB = I.getParent(); 64 MachineFunction *MF = BB->getParent(); 65 MachineRegisterInfo &MRI = MF->getRegInfo(); 66 I.setDesc(TII.get(TargetOpcode::COPY)); 67 for (const MachineOperand &MO : I.operands()) { 68 if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) 69 continue; 70 71 const TargetRegisterClass *RC = 72 TRI.getConstrainedRegClassForOperand(MO, MRI); 73 if (!RC) 74 continue; 75 RBI.constrainGenericRegister(MO.getReg(), *RC, MRI); 76 } 77 return true; 78 } 79 80 MachineOperand 81 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO, 82 unsigned SubIdx) const { 83 84 MachineInstr *MI = MO.getParent(); 85 MachineBasicBlock *BB = MO.getParent()->getParent(); 86 MachineFunction *MF = BB->getParent(); 87 MachineRegisterInfo &MRI = MF->getRegInfo(); 88 unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 89 90 if (MO.isReg()) { 91 unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx); 92 unsigned Reg = MO.getReg(); 93 BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg) 94 .addReg(Reg, 0, ComposedSubIdx); 95 96 return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(), 97 MO.isKill(), MO.isDead(), MO.isUndef(), 98 MO.isEarlyClobber(), 0, MO.isDebug(), 99 MO.isInternalRead()); 100 } 101 102 assert(MO.isImm()); 103 104 APInt Imm(64, MO.getImm()); 105 106 switch (SubIdx) { 107 default: 108 llvm_unreachable("do not know to split immediate with this sub index."); 109 case AMDGPU::sub0: 110 return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue()); 111 case AMDGPU::sub1: 112 return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue()); 113 } 114 } 115 116 static int64_t getConstant(const MachineInstr *MI) { 117 return MI->getOperand(1).getCImm()->getSExtValue(); 118 } 119 120 bool AMDGPUInstructionSelector::selectG_ADD(MachineInstr &I) const { 121 MachineBasicBlock *BB = I.getParent(); 122 MachineFunction *MF = BB->getParent(); 123 MachineRegisterInfo &MRI = MF->getRegInfo(); 124 unsigned Size = RBI.getSizeInBits(I.getOperand(0).getReg(), MRI, TRI); 125 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 126 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 127 128 if (Size != 64) 129 return false; 130 131 DebugLoc DL = I.getDebugLoc(); 132 133 MachineOperand Lo1(getSubOperand64(I.getOperand(1), AMDGPU::sub0)); 134 MachineOperand Lo2(getSubOperand64(I.getOperand(2), AMDGPU::sub0)); 135 136 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo) 137 .add(Lo1) 138 .add(Lo2); 139 140 MachineOperand Hi1(getSubOperand64(I.getOperand(1), AMDGPU::sub1)); 141 MachineOperand Hi2(getSubOperand64(I.getOperand(2), AMDGPU::sub1)); 142 143 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi) 144 .add(Hi1) 145 .add(Hi2); 146 147 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), I.getOperand(0).getReg()) 148 .addReg(DstLo) 149 .addImm(AMDGPU::sub0) 150 .addReg(DstHi) 151 .addImm(AMDGPU::sub1); 152 153 for (MachineOperand &MO : I.explicit_operands()) { 154 if (!MO.isReg() || TargetRegisterInfo::isPhysicalRegister(MO.getReg())) 155 continue; 156 RBI.constrainGenericRegister(MO.getReg(), AMDGPU::SReg_64RegClass, MRI); 157 } 158 159 I.eraseFromParent(); 160 return true; 161 } 162 163 bool AMDGPUInstructionSelector::selectG_GEP(MachineInstr &I) const { 164 return selectG_ADD(I); 165 } 166 167 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const { 168 MachineBasicBlock *BB = I.getParent(); 169 MachineFunction *MF = BB->getParent(); 170 MachineRegisterInfo &MRI = MF->getRegInfo(); 171 const MachineOperand &MO = I.getOperand(0); 172 const TargetRegisterClass *RC = 173 TRI.getConstrainedRegClassForOperand(MO, MRI); 174 if (RC) 175 RBI.constrainGenericRegister(MO.getReg(), *RC, MRI); 176 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF)); 177 return true; 178 } 179 180 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I, 181 CodeGenCoverage &CoverageInfo) const { 182 unsigned IntrinsicID = I.getOperand(1).getIntrinsicID(); 183 184 switch (IntrinsicID) { 185 default: 186 break; 187 case Intrinsic::maxnum: 188 case Intrinsic::minnum: 189 case Intrinsic::amdgcn_cvt_pkrtz: 190 return selectImpl(I, CoverageInfo); 191 192 case Intrinsic::amdgcn_kernarg_segment_ptr: { 193 MachineFunction *MF = I.getParent()->getParent(); 194 MachineRegisterInfo &MRI = MF->getRegInfo(); 195 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 196 const ArgDescriptor *InputPtrReg; 197 const TargetRegisterClass *RC; 198 const DebugLoc &DL = I.getDebugLoc(); 199 200 std::tie(InputPtrReg, RC) 201 = MFI->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 202 if (!InputPtrReg) 203 report_fatal_error("missing kernarg segment ptr"); 204 205 BuildMI(*I.getParent(), &I, DL, TII.get(AMDGPU::COPY)) 206 .add(I.getOperand(0)) 207 .addReg(MRI.getLiveInVirtReg(InputPtrReg->getRegister())); 208 I.eraseFromParent(); 209 return true; 210 } 211 } 212 return false; 213 } 214 215 static MachineInstr * 216 buildEXP(const TargetInstrInfo &TII, MachineInstr *Insert, unsigned Tgt, 217 unsigned Reg0, unsigned Reg1, unsigned Reg2, unsigned Reg3, 218 unsigned VM, bool Compr, unsigned Enabled, bool Done) { 219 const DebugLoc &DL = Insert->getDebugLoc(); 220 MachineBasicBlock &BB = *Insert->getParent(); 221 unsigned Opcode = Done ? AMDGPU::EXP_DONE : AMDGPU::EXP; 222 return BuildMI(BB, Insert, DL, TII.get(Opcode)) 223 .addImm(Tgt) 224 .addReg(Reg0) 225 .addReg(Reg1) 226 .addReg(Reg2) 227 .addReg(Reg3) 228 .addImm(VM) 229 .addImm(Compr) 230 .addImm(Enabled); 231 } 232 233 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS( 234 MachineInstr &I, 235 CodeGenCoverage &CoverageInfo) const { 236 MachineBasicBlock *BB = I.getParent(); 237 MachineFunction *MF = BB->getParent(); 238 MachineRegisterInfo &MRI = MF->getRegInfo(); 239 240 unsigned IntrinsicID = I.getOperand(0).getIntrinsicID(); 241 switch (IntrinsicID) { 242 case Intrinsic::amdgcn_exp: { 243 int64_t Tgt = getConstant(MRI.getVRegDef(I.getOperand(1).getReg())); 244 int64_t Enabled = getConstant(MRI.getVRegDef(I.getOperand(2).getReg())); 245 int64_t Done = getConstant(MRI.getVRegDef(I.getOperand(7).getReg())); 246 int64_t VM = getConstant(MRI.getVRegDef(I.getOperand(8).getReg())); 247 248 MachineInstr *Exp = buildEXP(TII, &I, Tgt, I.getOperand(3).getReg(), 249 I.getOperand(4).getReg(), 250 I.getOperand(5).getReg(), 251 I.getOperand(6).getReg(), 252 VM, false, Enabled, Done); 253 254 I.eraseFromParent(); 255 return constrainSelectedInstRegOperands(*Exp, TII, TRI, RBI); 256 } 257 case Intrinsic::amdgcn_exp_compr: { 258 const DebugLoc &DL = I.getDebugLoc(); 259 int64_t Tgt = getConstant(MRI.getVRegDef(I.getOperand(1).getReg())); 260 int64_t Enabled = getConstant(MRI.getVRegDef(I.getOperand(2).getReg())); 261 unsigned Reg0 = I.getOperand(3).getReg(); 262 unsigned Reg1 = I.getOperand(4).getReg(); 263 unsigned Undef = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 264 int64_t Done = getConstant(MRI.getVRegDef(I.getOperand(5).getReg())); 265 int64_t VM = getConstant(MRI.getVRegDef(I.getOperand(6).getReg())); 266 267 BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef); 268 MachineInstr *Exp = buildEXP(TII, &I, Tgt, Reg0, Reg1, Undef, Undef, VM, 269 true, Enabled, Done); 270 271 I.eraseFromParent(); 272 return constrainSelectedInstRegOperands(*Exp, TII, TRI, RBI); 273 } 274 } 275 return false; 276 } 277 278 bool AMDGPUInstructionSelector::selectG_STORE(MachineInstr &I) const { 279 MachineBasicBlock *BB = I.getParent(); 280 MachineFunction *MF = BB->getParent(); 281 MachineRegisterInfo &MRI = MF->getRegInfo(); 282 DebugLoc DL = I.getDebugLoc(); 283 unsigned StoreSize = RBI.getSizeInBits(I.getOperand(0).getReg(), MRI, TRI); 284 unsigned Opcode; 285 286 // FIXME: Select store instruction based on address space 287 switch (StoreSize) { 288 default: 289 return false; 290 case 32: 291 Opcode = AMDGPU::FLAT_STORE_DWORD; 292 break; 293 case 64: 294 Opcode = AMDGPU::FLAT_STORE_DWORDX2; 295 break; 296 case 96: 297 Opcode = AMDGPU::FLAT_STORE_DWORDX3; 298 break; 299 case 128: 300 Opcode = AMDGPU::FLAT_STORE_DWORDX4; 301 break; 302 } 303 304 MachineInstr *Flat = BuildMI(*BB, &I, DL, TII.get(Opcode)) 305 .add(I.getOperand(1)) 306 .add(I.getOperand(0)) 307 .addImm(0) // offset 308 .addImm(0) // glc 309 .addImm(0); // slc 310 311 312 // Now that we selected an opcode, we need to constrain the register 313 // operands to use appropriate classes. 314 bool Ret = constrainSelectedInstRegOperands(*Flat, TII, TRI, RBI); 315 316 I.eraseFromParent(); 317 return Ret; 318 } 319 320 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const { 321 MachineBasicBlock *BB = I.getParent(); 322 MachineFunction *MF = BB->getParent(); 323 MachineRegisterInfo &MRI = MF->getRegInfo(); 324 MachineOperand &ImmOp = I.getOperand(1); 325 326 // The AMDGPU backend only supports Imm operands and not CImm or FPImm. 327 if (ImmOp.isFPImm()) { 328 const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt(); 329 ImmOp.ChangeToImmediate(Imm.getZExtValue()); 330 } else if (ImmOp.isCImm()) { 331 ImmOp.ChangeToImmediate(ImmOp.getCImm()->getZExtValue()); 332 } 333 334 unsigned DstReg = I.getOperand(0).getReg(); 335 unsigned Size; 336 bool IsSgpr; 337 const RegisterBank *RB = MRI.getRegBankOrNull(I.getOperand(0).getReg()); 338 if (RB) { 339 IsSgpr = RB->getID() == AMDGPU::SGPRRegBankID; 340 Size = MRI.getType(DstReg).getSizeInBits(); 341 } else { 342 const TargetRegisterClass *RC = TRI.getRegClassForReg(MRI, DstReg); 343 IsSgpr = TRI.isSGPRClass(RC); 344 Size = TRI.getRegSizeInBits(*RC); 345 } 346 347 if (Size != 32 && Size != 64) 348 return false; 349 350 unsigned Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 351 if (Size == 32) { 352 I.setDesc(TII.get(Opcode)); 353 I.addImplicitDefUseOperands(*MF); 354 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 355 } 356 357 DebugLoc DL = I.getDebugLoc(); 358 const TargetRegisterClass *RC = IsSgpr ? &AMDGPU::SReg_32_XM0RegClass : 359 &AMDGPU::VGPR_32RegClass; 360 unsigned LoReg = MRI.createVirtualRegister(RC); 361 unsigned HiReg = MRI.createVirtualRegister(RC); 362 const APInt &Imm = APInt(Size, I.getOperand(1).getImm()); 363 364 BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg) 365 .addImm(Imm.trunc(32).getZExtValue()); 366 367 BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg) 368 .addImm(Imm.ashr(32).getZExtValue()); 369 370 const MachineInstr *RS = 371 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) 372 .addReg(LoReg) 373 .addImm(AMDGPU::sub0) 374 .addReg(HiReg) 375 .addImm(AMDGPU::sub1); 376 377 // We can't call constrainSelectedInstRegOperands here, because it doesn't 378 // work for target independent opcodes 379 I.eraseFromParent(); 380 const TargetRegisterClass *DstRC = 381 TRI.getConstrainedRegClassForOperand(RS->getOperand(0), MRI); 382 if (!DstRC) 383 return true; 384 return RBI.constrainGenericRegister(DstReg, *DstRC, MRI); 385 } 386 387 static bool isConstant(const MachineInstr &MI) { 388 return MI.getOpcode() == TargetOpcode::G_CONSTANT; 389 } 390 391 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load, 392 const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const { 393 394 const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg()); 395 396 assert(PtrMI); 397 398 if (PtrMI->getOpcode() != TargetOpcode::G_GEP) 399 return; 400 401 GEPInfo GEPInfo(*PtrMI); 402 403 for (unsigned i = 1, e = 3; i < e; ++i) { 404 const MachineOperand &GEPOp = PtrMI->getOperand(i); 405 const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg()); 406 assert(OpDef); 407 if (isConstant(*OpDef)) { 408 // FIXME: Is it possible to have multiple Imm parts? Maybe if we 409 // are lacking other optimizations. 410 assert(GEPInfo.Imm == 0); 411 GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue(); 412 continue; 413 } 414 const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI); 415 if (OpBank->getID() == AMDGPU::SGPRRegBankID) 416 GEPInfo.SgprParts.push_back(GEPOp.getReg()); 417 else 418 GEPInfo.VgprParts.push_back(GEPOp.getReg()); 419 } 420 421 AddrInfo.push_back(GEPInfo); 422 getAddrModeInfo(*PtrMI, MRI, AddrInfo); 423 } 424 425 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const { 426 if (!MI.hasOneMemOperand()) 427 return false; 428 429 const MachineMemOperand *MMO = *MI.memoperands_begin(); 430 const Value *Ptr = MMO->getValue(); 431 432 // UndefValue means this is a load of a kernel input. These are uniform. 433 // Sometimes LDS instructions have constant pointers. 434 // If Ptr is null, then that means this mem operand contains a 435 // PseudoSourceValue like GOT. 436 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) || 437 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr)) 438 return true; 439 440 if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) 441 return true; 442 443 const Instruction *I = dyn_cast<Instruction>(Ptr); 444 return I && I->getMetadata("amdgpu.uniform"); 445 } 446 447 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const { 448 for (const GEPInfo &GEPInfo : AddrInfo) { 449 if (!GEPInfo.VgprParts.empty()) 450 return true; 451 } 452 return false; 453 } 454 455 bool AMDGPUInstructionSelector::selectG_LOAD(MachineInstr &I) const { 456 MachineBasicBlock *BB = I.getParent(); 457 MachineFunction *MF = BB->getParent(); 458 MachineRegisterInfo &MRI = MF->getRegInfo(); 459 DebugLoc DL = I.getDebugLoc(); 460 unsigned DstReg = I.getOperand(0).getReg(); 461 unsigned PtrReg = I.getOperand(1).getReg(); 462 unsigned LoadSize = RBI.getSizeInBits(DstReg, MRI, TRI); 463 unsigned Opcode; 464 465 SmallVector<GEPInfo, 4> AddrInfo; 466 467 getAddrModeInfo(I, MRI, AddrInfo); 468 469 switch (LoadSize) { 470 default: 471 llvm_unreachable("Load size not supported\n"); 472 case 32: 473 Opcode = AMDGPU::FLAT_LOAD_DWORD; 474 break; 475 case 64: 476 Opcode = AMDGPU::FLAT_LOAD_DWORDX2; 477 break; 478 } 479 480 MachineInstr *Flat = BuildMI(*BB, &I, DL, TII.get(Opcode)) 481 .add(I.getOperand(0)) 482 .addReg(PtrReg) 483 .addImm(0) // offset 484 .addImm(0) // glc 485 .addImm(0); // slc 486 487 bool Ret = constrainSelectedInstRegOperands(*Flat, TII, TRI, RBI); 488 I.eraseFromParent(); 489 return Ret; 490 } 491 492 bool AMDGPUInstructionSelector::select(MachineInstr &I, 493 CodeGenCoverage &CoverageInfo) const { 494 495 if (!isPreISelGenericOpcode(I.getOpcode())) { 496 if (I.isCopy()) 497 return selectCOPY(I); 498 return true; 499 } 500 501 switch (I.getOpcode()) { 502 default: 503 return selectImpl(I, CoverageInfo); 504 case TargetOpcode::G_ADD: 505 return selectG_ADD(I); 506 case TargetOpcode::G_INTTOPTR: 507 case TargetOpcode::G_BITCAST: 508 return selectCOPY(I); 509 case TargetOpcode::G_CONSTANT: 510 case TargetOpcode::G_FCONSTANT: 511 return selectG_CONSTANT(I); 512 case TargetOpcode::G_GEP: 513 return selectG_GEP(I); 514 case TargetOpcode::G_IMPLICIT_DEF: 515 return selectG_IMPLICIT_DEF(I); 516 case TargetOpcode::G_INTRINSIC: 517 return selectG_INTRINSIC(I, CoverageInfo); 518 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: 519 return selectG_INTRINSIC_W_SIDE_EFFECTS(I, CoverageInfo); 520 case TargetOpcode::G_LOAD: 521 if (selectImpl(I, CoverageInfo)) 522 return true; 523 return selectG_LOAD(I); 524 case TargetOpcode::G_STORE: 525 return selectG_STORE(I); 526 } 527 return false; 528 } 529 530 InstructionSelector::ComplexRendererFns 531 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const { 532 return {{ 533 [=](MachineInstrBuilder &MIB) { MIB.add(Root); } 534 }}; 535 536 } 537 538 /// 539 /// This will select either an SGPR or VGPR operand and will save us from 540 /// having to write an extra tablegen pattern. 541 InstructionSelector::ComplexRendererFns 542 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const { 543 return {{ 544 [=](MachineInstrBuilder &MIB) { MIB.add(Root); } 545 }}; 546 } 547 548 InstructionSelector::ComplexRendererFns 549 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const { 550 return {{ 551 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }, 552 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // src0_mods 553 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp 554 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod 555 }}; 556 } 557 InstructionSelector::ComplexRendererFns 558 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const { 559 return {{ 560 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }, 561 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp 562 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod 563 }}; 564 } 565 566 InstructionSelector::ComplexRendererFns 567 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const { 568 return {{ 569 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }, 570 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods 571 }}; 572 } 573 574 InstructionSelector::ComplexRendererFns 575 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const { 576 MachineRegisterInfo &MRI = 577 Root.getParent()->getParent()->getParent()->getRegInfo(); 578 579 SmallVector<GEPInfo, 4> AddrInfo; 580 getAddrModeInfo(*Root.getParent(), MRI, AddrInfo); 581 582 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1) 583 return None; 584 585 const GEPInfo &GEPInfo = AddrInfo[0]; 586 587 if (!AMDGPU::isLegalSMRDImmOffset(STI, GEPInfo.Imm)) 588 return None; 589 590 unsigned PtrReg = GEPInfo.SgprParts[0]; 591 int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm); 592 return {{ 593 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); }, 594 [=](MachineInstrBuilder &MIB) { MIB.addImm(EncodedImm); } 595 }}; 596 } 597 598 InstructionSelector::ComplexRendererFns 599 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const { 600 MachineRegisterInfo &MRI = 601 Root.getParent()->getParent()->getParent()->getRegInfo(); 602 603 SmallVector<GEPInfo, 4> AddrInfo; 604 getAddrModeInfo(*Root.getParent(), MRI, AddrInfo); 605 606 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1) 607 return None; 608 609 const GEPInfo &GEPInfo = AddrInfo[0]; 610 unsigned PtrReg = GEPInfo.SgprParts[0]; 611 int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm); 612 if (!isUInt<32>(EncodedImm)) 613 return None; 614 615 return {{ 616 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); }, 617 [=](MachineInstrBuilder &MIB) { MIB.addImm(EncodedImm); } 618 }}; 619 } 620 621 InstructionSelector::ComplexRendererFns 622 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const { 623 MachineInstr *MI = Root.getParent(); 624 MachineBasicBlock *MBB = MI->getParent(); 625 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 626 627 SmallVector<GEPInfo, 4> AddrInfo; 628 getAddrModeInfo(*MI, MRI, AddrInfo); 629 630 // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits, 631 // then we can select all ptr + 32-bit offsets not just immediate offsets. 632 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1) 633 return None; 634 635 const GEPInfo &GEPInfo = AddrInfo[0]; 636 if (!GEPInfo.Imm || !isUInt<32>(GEPInfo.Imm)) 637 return None; 638 639 // If we make it this far we have a load with an 32-bit immediate offset. 640 // It is OK to select this using a sgpr offset, because we have already 641 // failed trying to select this load into one of the _IMM variants since 642 // the _IMM Patterns are considered before the _SGPR patterns. 643 unsigned PtrReg = GEPInfo.SgprParts[0]; 644 unsigned OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 645 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg) 646 .addImm(GEPInfo.Imm); 647 return {{ 648 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); }, 649 [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); } 650 }}; 651 } 652