1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the targeting of the InstructionSelector class for 10 /// AMDGPU. 11 /// \todo This should be generated by TableGen. 12 //===----------------------------------------------------------------------===// 13 14 #include "AMDGPUInstructionSelector.h" 15 #include "AMDGPUInstrInfo.h" 16 #include "AMDGPURegisterBankInfo.h" 17 #include "AMDGPURegisterInfo.h" 18 #include "AMDGPUSubtarget.h" 19 #include "AMDGPUTargetMachine.h" 20 #include "SIMachineFunctionInfo.h" 21 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 22 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h" 23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h" 24 #include "llvm/CodeGen/GlobalISel/Utils.h" 25 #include "llvm/CodeGen/MachineBasicBlock.h" 26 #include "llvm/CodeGen/MachineFunction.h" 27 #include "llvm/CodeGen/MachineInstr.h" 28 #include "llvm/CodeGen/MachineInstrBuilder.h" 29 #include "llvm/CodeGen/MachineRegisterInfo.h" 30 #include "llvm/IR/Type.h" 31 #include "llvm/Support/Debug.h" 32 #include "llvm/Support/raw_ostream.h" 33 34 #define DEBUG_TYPE "amdgpu-isel" 35 36 using namespace llvm; 37 38 #define GET_GLOBALISEL_IMPL 39 #define AMDGPUSubtarget GCNSubtarget 40 #include "AMDGPUGenGlobalISel.inc" 41 #undef GET_GLOBALISEL_IMPL 42 #undef AMDGPUSubtarget 43 44 AMDGPUInstructionSelector::AMDGPUInstructionSelector( 45 const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI, 46 const AMDGPUTargetMachine &TM) 47 : InstructionSelector(), TII(*STI.getInstrInfo()), 48 TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM), 49 STI(STI), 50 EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG), 51 #define GET_GLOBALISEL_PREDICATES_INIT 52 #include "AMDGPUGenGlobalISel.inc" 53 #undef GET_GLOBALISEL_PREDICATES_INIT 54 #define GET_GLOBALISEL_TEMPORARIES_INIT 55 #include "AMDGPUGenGlobalISel.inc" 56 #undef GET_GLOBALISEL_TEMPORARIES_INIT 57 { 58 } 59 60 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; } 61 62 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const { 63 MachineBasicBlock *BB = I.getParent(); 64 MachineFunction *MF = BB->getParent(); 65 MachineRegisterInfo &MRI = MF->getRegInfo(); 66 I.setDesc(TII.get(TargetOpcode::COPY)); 67 for (const MachineOperand &MO : I.operands()) { 68 if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) 69 continue; 70 71 const TargetRegisterClass *RC = 72 TRI.getConstrainedRegClassForOperand(MO, MRI); 73 if (!RC) 74 continue; 75 RBI.constrainGenericRegister(MO.getReg(), *RC, MRI); 76 } 77 return true; 78 } 79 80 MachineOperand 81 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO, 82 unsigned SubIdx) const { 83 84 MachineInstr *MI = MO.getParent(); 85 MachineBasicBlock *BB = MO.getParent()->getParent(); 86 MachineFunction *MF = BB->getParent(); 87 MachineRegisterInfo &MRI = MF->getRegInfo(); 88 unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass); 89 90 if (MO.isReg()) { 91 unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx); 92 unsigned Reg = MO.getReg(); 93 BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg) 94 .addReg(Reg, 0, ComposedSubIdx); 95 96 return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(), 97 MO.isKill(), MO.isDead(), MO.isUndef(), 98 MO.isEarlyClobber(), 0, MO.isDebug(), 99 MO.isInternalRead()); 100 } 101 102 assert(MO.isImm()); 103 104 APInt Imm(64, MO.getImm()); 105 106 switch (SubIdx) { 107 default: 108 llvm_unreachable("do not know to split immediate with this sub index."); 109 case AMDGPU::sub0: 110 return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue()); 111 case AMDGPU::sub1: 112 return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue()); 113 } 114 } 115 116 static int64_t getConstant(const MachineInstr *MI) { 117 return MI->getOperand(1).getCImm()->getSExtValue(); 118 } 119 120 bool AMDGPUInstructionSelector::selectG_ADD(MachineInstr &I) const { 121 MachineBasicBlock *BB = I.getParent(); 122 MachineFunction *MF = BB->getParent(); 123 MachineRegisterInfo &MRI = MF->getRegInfo(); 124 unsigned Size = RBI.getSizeInBits(I.getOperand(0).getReg(), MRI, TRI); 125 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 126 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass); 127 128 if (Size != 64) 129 return false; 130 131 DebugLoc DL = I.getDebugLoc(); 132 133 MachineOperand Lo1(getSubOperand64(I.getOperand(1), AMDGPU::sub0)); 134 MachineOperand Lo2(getSubOperand64(I.getOperand(2), AMDGPU::sub0)); 135 136 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo) 137 .add(Lo1) 138 .add(Lo2); 139 140 MachineOperand Hi1(getSubOperand64(I.getOperand(1), AMDGPU::sub1)); 141 MachineOperand Hi2(getSubOperand64(I.getOperand(2), AMDGPU::sub1)); 142 143 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi) 144 .add(Hi1) 145 .add(Hi2); 146 147 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), I.getOperand(0).getReg()) 148 .addReg(DstLo) 149 .addImm(AMDGPU::sub0) 150 .addReg(DstHi) 151 .addImm(AMDGPU::sub1); 152 153 for (MachineOperand &MO : I.explicit_operands()) { 154 if (!MO.isReg() || TargetRegisterInfo::isPhysicalRegister(MO.getReg())) 155 continue; 156 RBI.constrainGenericRegister(MO.getReg(), AMDGPU::SReg_64RegClass, MRI); 157 } 158 159 I.eraseFromParent(); 160 return true; 161 } 162 163 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const { 164 MachineBasicBlock *BB = I.getParent(); 165 MachineFunction *MF = BB->getParent(); 166 MachineRegisterInfo &MRI = MF->getRegInfo(); 167 assert(I.getOperand(2).getImm() % 32 == 0); 168 unsigned SubReg = TRI.getSubRegFromChannel(I.getOperand(2).getImm() / 32); 169 const DebugLoc &DL = I.getDebugLoc(); 170 MachineInstr *Copy = BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), 171 I.getOperand(0).getReg()) 172 .addReg(I.getOperand(1).getReg(), 0, SubReg); 173 174 for (const MachineOperand &MO : Copy->operands()) { 175 const TargetRegisterClass *RC = 176 TRI.getConstrainedRegClassForOperand(MO, MRI); 177 if (!RC) 178 continue; 179 RBI.constrainGenericRegister(MO.getReg(), *RC, MRI); 180 } 181 I.eraseFromParent(); 182 return true; 183 } 184 185 bool AMDGPUInstructionSelector::selectG_GEP(MachineInstr &I) const { 186 return selectG_ADD(I); 187 } 188 189 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const { 190 MachineBasicBlock *BB = I.getParent(); 191 MachineFunction *MF = BB->getParent(); 192 MachineRegisterInfo &MRI = MF->getRegInfo(); 193 const MachineOperand &MO = I.getOperand(0); 194 const TargetRegisterClass *RC = 195 TRI.getConstrainedRegClassForOperand(MO, MRI); 196 if (RC) 197 RBI.constrainGenericRegister(MO.getReg(), *RC, MRI); 198 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF)); 199 return true; 200 } 201 202 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const { 203 MachineBasicBlock *BB = I.getParent(); 204 MachineFunction *MF = BB->getParent(); 205 MachineRegisterInfo &MRI = MF->getRegInfo(); 206 unsigned SubReg = TRI.getSubRegFromChannel(I.getOperand(3).getImm() / 32); 207 DebugLoc DL = I.getDebugLoc(); 208 MachineInstr *Ins = BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG)) 209 .addDef(I.getOperand(0).getReg()) 210 .addReg(I.getOperand(1).getReg()) 211 .addReg(I.getOperand(2).getReg()) 212 .addImm(SubReg); 213 214 for (const MachineOperand &MO : Ins->operands()) { 215 if (!MO.isReg()) 216 continue; 217 if (TargetRegisterInfo::isPhysicalRegister(MO.getReg())) 218 continue; 219 220 const TargetRegisterClass *RC = 221 TRI.getConstrainedRegClassForOperand(MO, MRI); 222 if (!RC) 223 continue; 224 RBI.constrainGenericRegister(MO.getReg(), *RC, MRI); 225 } 226 I.eraseFromParent(); 227 return true; 228 } 229 230 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I, 231 CodeGenCoverage &CoverageInfo) const { 232 unsigned IntrinsicID = I.getOperand(1).getIntrinsicID(); 233 234 switch (IntrinsicID) { 235 default: 236 break; 237 case Intrinsic::maxnum: 238 case Intrinsic::minnum: 239 case Intrinsic::amdgcn_cvt_pkrtz: 240 return selectImpl(I, CoverageInfo); 241 242 case Intrinsic::amdgcn_kernarg_segment_ptr: { 243 MachineFunction *MF = I.getParent()->getParent(); 244 MachineRegisterInfo &MRI = MF->getRegInfo(); 245 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>(); 246 const ArgDescriptor *InputPtrReg; 247 const TargetRegisterClass *RC; 248 const DebugLoc &DL = I.getDebugLoc(); 249 250 std::tie(InputPtrReg, RC) 251 = MFI->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR); 252 if (!InputPtrReg) 253 report_fatal_error("missing kernarg segment ptr"); 254 255 BuildMI(*I.getParent(), &I, DL, TII.get(AMDGPU::COPY)) 256 .add(I.getOperand(0)) 257 .addReg(MRI.getLiveInVirtReg(InputPtrReg->getRegister())); 258 I.eraseFromParent(); 259 return true; 260 } 261 } 262 return false; 263 } 264 265 static MachineInstr * 266 buildEXP(const TargetInstrInfo &TII, MachineInstr *Insert, unsigned Tgt, 267 unsigned Reg0, unsigned Reg1, unsigned Reg2, unsigned Reg3, 268 unsigned VM, bool Compr, unsigned Enabled, bool Done) { 269 const DebugLoc &DL = Insert->getDebugLoc(); 270 MachineBasicBlock &BB = *Insert->getParent(); 271 unsigned Opcode = Done ? AMDGPU::EXP_DONE : AMDGPU::EXP; 272 return BuildMI(BB, Insert, DL, TII.get(Opcode)) 273 .addImm(Tgt) 274 .addReg(Reg0) 275 .addReg(Reg1) 276 .addReg(Reg2) 277 .addReg(Reg3) 278 .addImm(VM) 279 .addImm(Compr) 280 .addImm(Enabled); 281 } 282 283 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS( 284 MachineInstr &I, 285 CodeGenCoverage &CoverageInfo) const { 286 MachineBasicBlock *BB = I.getParent(); 287 MachineFunction *MF = BB->getParent(); 288 MachineRegisterInfo &MRI = MF->getRegInfo(); 289 290 unsigned IntrinsicID = I.getOperand(0).getIntrinsicID(); 291 switch (IntrinsicID) { 292 case Intrinsic::amdgcn_exp: { 293 int64_t Tgt = getConstant(MRI.getVRegDef(I.getOperand(1).getReg())); 294 int64_t Enabled = getConstant(MRI.getVRegDef(I.getOperand(2).getReg())); 295 int64_t Done = getConstant(MRI.getVRegDef(I.getOperand(7).getReg())); 296 int64_t VM = getConstant(MRI.getVRegDef(I.getOperand(8).getReg())); 297 298 MachineInstr *Exp = buildEXP(TII, &I, Tgt, I.getOperand(3).getReg(), 299 I.getOperand(4).getReg(), 300 I.getOperand(5).getReg(), 301 I.getOperand(6).getReg(), 302 VM, false, Enabled, Done); 303 304 I.eraseFromParent(); 305 return constrainSelectedInstRegOperands(*Exp, TII, TRI, RBI); 306 } 307 case Intrinsic::amdgcn_exp_compr: { 308 const DebugLoc &DL = I.getDebugLoc(); 309 int64_t Tgt = getConstant(MRI.getVRegDef(I.getOperand(1).getReg())); 310 int64_t Enabled = getConstant(MRI.getVRegDef(I.getOperand(2).getReg())); 311 unsigned Reg0 = I.getOperand(3).getReg(); 312 unsigned Reg1 = I.getOperand(4).getReg(); 313 unsigned Undef = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass); 314 int64_t Done = getConstant(MRI.getVRegDef(I.getOperand(5).getReg())); 315 int64_t VM = getConstant(MRI.getVRegDef(I.getOperand(6).getReg())); 316 317 BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef); 318 MachineInstr *Exp = buildEXP(TII, &I, Tgt, Reg0, Reg1, Undef, Undef, VM, 319 true, Enabled, Done); 320 321 I.eraseFromParent(); 322 return constrainSelectedInstRegOperands(*Exp, TII, TRI, RBI); 323 } 324 } 325 return false; 326 } 327 328 bool AMDGPUInstructionSelector::selectG_STORE(MachineInstr &I) const { 329 MachineBasicBlock *BB = I.getParent(); 330 MachineFunction *MF = BB->getParent(); 331 MachineRegisterInfo &MRI = MF->getRegInfo(); 332 DebugLoc DL = I.getDebugLoc(); 333 unsigned StoreSize = RBI.getSizeInBits(I.getOperand(0).getReg(), MRI, TRI); 334 unsigned Opcode; 335 336 // FIXME: Select store instruction based on address space 337 switch (StoreSize) { 338 default: 339 return false; 340 case 32: 341 Opcode = AMDGPU::FLAT_STORE_DWORD; 342 break; 343 case 64: 344 Opcode = AMDGPU::FLAT_STORE_DWORDX2; 345 break; 346 case 96: 347 Opcode = AMDGPU::FLAT_STORE_DWORDX3; 348 break; 349 case 128: 350 Opcode = AMDGPU::FLAT_STORE_DWORDX4; 351 break; 352 } 353 354 MachineInstr *Flat = BuildMI(*BB, &I, DL, TII.get(Opcode)) 355 .add(I.getOperand(1)) 356 .add(I.getOperand(0)) 357 .addImm(0) // offset 358 .addImm(0) // glc 359 .addImm(0); // slc 360 361 362 // Now that we selected an opcode, we need to constrain the register 363 // operands to use appropriate classes. 364 bool Ret = constrainSelectedInstRegOperands(*Flat, TII, TRI, RBI); 365 366 I.eraseFromParent(); 367 return Ret; 368 } 369 370 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const { 371 MachineBasicBlock *BB = I.getParent(); 372 MachineFunction *MF = BB->getParent(); 373 MachineRegisterInfo &MRI = MF->getRegInfo(); 374 MachineOperand &ImmOp = I.getOperand(1); 375 376 // The AMDGPU backend only supports Imm operands and not CImm or FPImm. 377 if (ImmOp.isFPImm()) { 378 const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt(); 379 ImmOp.ChangeToImmediate(Imm.getZExtValue()); 380 } else if (ImmOp.isCImm()) { 381 ImmOp.ChangeToImmediate(ImmOp.getCImm()->getZExtValue()); 382 } 383 384 unsigned DstReg = I.getOperand(0).getReg(); 385 unsigned Size; 386 bool IsSgpr; 387 const RegisterBank *RB = MRI.getRegBankOrNull(I.getOperand(0).getReg()); 388 if (RB) { 389 IsSgpr = RB->getID() == AMDGPU::SGPRRegBankID; 390 Size = MRI.getType(DstReg).getSizeInBits(); 391 } else { 392 const TargetRegisterClass *RC = TRI.getRegClassForReg(MRI, DstReg); 393 IsSgpr = TRI.isSGPRClass(RC); 394 Size = TRI.getRegSizeInBits(*RC); 395 } 396 397 if (Size != 32 && Size != 64) 398 return false; 399 400 unsigned Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32; 401 if (Size == 32) { 402 I.setDesc(TII.get(Opcode)); 403 I.addImplicitDefUseOperands(*MF); 404 return constrainSelectedInstRegOperands(I, TII, TRI, RBI); 405 } 406 407 DebugLoc DL = I.getDebugLoc(); 408 const TargetRegisterClass *RC = IsSgpr ? &AMDGPU::SReg_32_XM0RegClass : 409 &AMDGPU::VGPR_32RegClass; 410 unsigned LoReg = MRI.createVirtualRegister(RC); 411 unsigned HiReg = MRI.createVirtualRegister(RC); 412 const APInt &Imm = APInt(Size, I.getOperand(1).getImm()); 413 414 BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg) 415 .addImm(Imm.trunc(32).getZExtValue()); 416 417 BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg) 418 .addImm(Imm.ashr(32).getZExtValue()); 419 420 const MachineInstr *RS = 421 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg) 422 .addReg(LoReg) 423 .addImm(AMDGPU::sub0) 424 .addReg(HiReg) 425 .addImm(AMDGPU::sub1); 426 427 // We can't call constrainSelectedInstRegOperands here, because it doesn't 428 // work for target independent opcodes 429 I.eraseFromParent(); 430 const TargetRegisterClass *DstRC = 431 TRI.getConstrainedRegClassForOperand(RS->getOperand(0), MRI); 432 if (!DstRC) 433 return true; 434 return RBI.constrainGenericRegister(DstReg, *DstRC, MRI); 435 } 436 437 static bool isConstant(const MachineInstr &MI) { 438 return MI.getOpcode() == TargetOpcode::G_CONSTANT; 439 } 440 441 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load, 442 const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const { 443 444 const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg()); 445 446 assert(PtrMI); 447 448 if (PtrMI->getOpcode() != TargetOpcode::G_GEP) 449 return; 450 451 GEPInfo GEPInfo(*PtrMI); 452 453 for (unsigned i = 1, e = 3; i < e; ++i) { 454 const MachineOperand &GEPOp = PtrMI->getOperand(i); 455 const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg()); 456 assert(OpDef); 457 if (isConstant(*OpDef)) { 458 // FIXME: Is it possible to have multiple Imm parts? Maybe if we 459 // are lacking other optimizations. 460 assert(GEPInfo.Imm == 0); 461 GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue(); 462 continue; 463 } 464 const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI); 465 if (OpBank->getID() == AMDGPU::SGPRRegBankID) 466 GEPInfo.SgprParts.push_back(GEPOp.getReg()); 467 else 468 GEPInfo.VgprParts.push_back(GEPOp.getReg()); 469 } 470 471 AddrInfo.push_back(GEPInfo); 472 getAddrModeInfo(*PtrMI, MRI, AddrInfo); 473 } 474 475 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const { 476 if (!MI.hasOneMemOperand()) 477 return false; 478 479 const MachineMemOperand *MMO = *MI.memoperands_begin(); 480 const Value *Ptr = MMO->getValue(); 481 482 // UndefValue means this is a load of a kernel input. These are uniform. 483 // Sometimes LDS instructions have constant pointers. 484 // If Ptr is null, then that means this mem operand contains a 485 // PseudoSourceValue like GOT. 486 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) || 487 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr)) 488 return true; 489 490 if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT) 491 return true; 492 493 const Instruction *I = dyn_cast<Instruction>(Ptr); 494 return I && I->getMetadata("amdgpu.uniform"); 495 } 496 497 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const { 498 for (const GEPInfo &GEPInfo : AddrInfo) { 499 if (!GEPInfo.VgprParts.empty()) 500 return true; 501 } 502 return false; 503 } 504 505 bool AMDGPUInstructionSelector::selectG_LOAD(MachineInstr &I) const { 506 MachineBasicBlock *BB = I.getParent(); 507 MachineFunction *MF = BB->getParent(); 508 MachineRegisterInfo &MRI = MF->getRegInfo(); 509 DebugLoc DL = I.getDebugLoc(); 510 unsigned DstReg = I.getOperand(0).getReg(); 511 unsigned PtrReg = I.getOperand(1).getReg(); 512 unsigned LoadSize = RBI.getSizeInBits(DstReg, MRI, TRI); 513 unsigned Opcode; 514 515 SmallVector<GEPInfo, 4> AddrInfo; 516 517 getAddrModeInfo(I, MRI, AddrInfo); 518 519 switch (LoadSize) { 520 default: 521 llvm_unreachable("Load size not supported\n"); 522 case 32: 523 Opcode = AMDGPU::FLAT_LOAD_DWORD; 524 break; 525 case 64: 526 Opcode = AMDGPU::FLAT_LOAD_DWORDX2; 527 break; 528 } 529 530 MachineInstr *Flat = BuildMI(*BB, &I, DL, TII.get(Opcode)) 531 .add(I.getOperand(0)) 532 .addReg(PtrReg) 533 .addImm(0) // offset 534 .addImm(0) // glc 535 .addImm(0); // slc 536 537 bool Ret = constrainSelectedInstRegOperands(*Flat, TII, TRI, RBI); 538 I.eraseFromParent(); 539 return Ret; 540 } 541 542 bool AMDGPUInstructionSelector::select(MachineInstr &I, 543 CodeGenCoverage &CoverageInfo) const { 544 545 if (!isPreISelGenericOpcode(I.getOpcode())) { 546 if (I.isCopy()) 547 return selectCOPY(I); 548 return true; 549 } 550 551 switch (I.getOpcode()) { 552 default: 553 return selectImpl(I, CoverageInfo); 554 case TargetOpcode::G_ADD: 555 return selectG_ADD(I); 556 case TargetOpcode::G_INTTOPTR: 557 case TargetOpcode::G_BITCAST: 558 return selectCOPY(I); 559 case TargetOpcode::G_CONSTANT: 560 case TargetOpcode::G_FCONSTANT: 561 return selectG_CONSTANT(I); 562 case TargetOpcode::G_EXTRACT: 563 return selectG_EXTRACT(I); 564 case TargetOpcode::G_GEP: 565 return selectG_GEP(I); 566 case TargetOpcode::G_IMPLICIT_DEF: 567 return selectG_IMPLICIT_DEF(I); 568 case TargetOpcode::G_INSERT: 569 return selectG_INSERT(I); 570 case TargetOpcode::G_INTRINSIC: 571 return selectG_INTRINSIC(I, CoverageInfo); 572 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: 573 return selectG_INTRINSIC_W_SIDE_EFFECTS(I, CoverageInfo); 574 case TargetOpcode::G_LOAD: 575 if (selectImpl(I, CoverageInfo)) 576 return true; 577 return selectG_LOAD(I); 578 case TargetOpcode::G_STORE: 579 return selectG_STORE(I); 580 } 581 return false; 582 } 583 584 InstructionSelector::ComplexRendererFns 585 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const { 586 return {{ 587 [=](MachineInstrBuilder &MIB) { MIB.add(Root); } 588 }}; 589 590 } 591 592 /// 593 /// This will select either an SGPR or VGPR operand and will save us from 594 /// having to write an extra tablegen pattern. 595 InstructionSelector::ComplexRendererFns 596 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const { 597 return {{ 598 [=](MachineInstrBuilder &MIB) { MIB.add(Root); } 599 }}; 600 } 601 602 InstructionSelector::ComplexRendererFns 603 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const { 604 return {{ 605 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }, 606 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // src0_mods 607 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp 608 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod 609 }}; 610 } 611 InstructionSelector::ComplexRendererFns 612 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const { 613 return {{ 614 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }, 615 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp 616 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod 617 }}; 618 } 619 620 InstructionSelector::ComplexRendererFns 621 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const { 622 return {{ 623 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }, 624 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods 625 }}; 626 } 627 628 InstructionSelector::ComplexRendererFns 629 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const { 630 MachineRegisterInfo &MRI = 631 Root.getParent()->getParent()->getParent()->getRegInfo(); 632 633 SmallVector<GEPInfo, 4> AddrInfo; 634 getAddrModeInfo(*Root.getParent(), MRI, AddrInfo); 635 636 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1) 637 return None; 638 639 const GEPInfo &GEPInfo = AddrInfo[0]; 640 641 if (!AMDGPU::isLegalSMRDImmOffset(STI, GEPInfo.Imm)) 642 return None; 643 644 unsigned PtrReg = GEPInfo.SgprParts[0]; 645 int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm); 646 return {{ 647 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); }, 648 [=](MachineInstrBuilder &MIB) { MIB.addImm(EncodedImm); } 649 }}; 650 } 651 652 InstructionSelector::ComplexRendererFns 653 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const { 654 MachineRegisterInfo &MRI = 655 Root.getParent()->getParent()->getParent()->getRegInfo(); 656 657 SmallVector<GEPInfo, 4> AddrInfo; 658 getAddrModeInfo(*Root.getParent(), MRI, AddrInfo); 659 660 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1) 661 return None; 662 663 const GEPInfo &GEPInfo = AddrInfo[0]; 664 unsigned PtrReg = GEPInfo.SgprParts[0]; 665 int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm); 666 if (!isUInt<32>(EncodedImm)) 667 return None; 668 669 return {{ 670 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); }, 671 [=](MachineInstrBuilder &MIB) { MIB.addImm(EncodedImm); } 672 }}; 673 } 674 675 InstructionSelector::ComplexRendererFns 676 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const { 677 MachineInstr *MI = Root.getParent(); 678 MachineBasicBlock *MBB = MI->getParent(); 679 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); 680 681 SmallVector<GEPInfo, 4> AddrInfo; 682 getAddrModeInfo(*MI, MRI, AddrInfo); 683 684 // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits, 685 // then we can select all ptr + 32-bit offsets not just immediate offsets. 686 if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1) 687 return None; 688 689 const GEPInfo &GEPInfo = AddrInfo[0]; 690 if (!GEPInfo.Imm || !isUInt<32>(GEPInfo.Imm)) 691 return None; 692 693 // If we make it this far we have a load with an 32-bit immediate offset. 694 // It is OK to select this using a sgpr offset, because we have already 695 // failed trying to select this load into one of the _IMM variants since 696 // the _IMM Patterns are considered before the _SGPR patterns. 697 unsigned PtrReg = GEPInfo.SgprParts[0]; 698 unsigned OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32_XM0RegClass); 699 BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg) 700 .addImm(GEPInfo.Imm); 701 return {{ 702 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); }, 703 [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); } 704 }}; 705 } 706