1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPUInstrInfo.h"
16 #include "AMDGPURegisterBankInfo.h"
17 #include "AMDGPURegisterInfo.h"
18 #include "AMDGPUSubtarget.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
22 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
24 #include "llvm/CodeGen/GlobalISel/Utils.h"
25 #include "llvm/CodeGen/MachineBasicBlock.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineInstr.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/IR/Type.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/raw_ostream.h"
33 
34 #define DEBUG_TYPE "amdgpu-isel"
35 
36 using namespace llvm;
37 
38 #define GET_GLOBALISEL_IMPL
39 #define AMDGPUSubtarget GCNSubtarget
40 #include "AMDGPUGenGlobalISel.inc"
41 #undef GET_GLOBALISEL_IMPL
42 #undef AMDGPUSubtarget
43 
44 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
45     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
46     const AMDGPUTargetMachine &TM)
47     : InstructionSelector(), TII(*STI.getInstrInfo()),
48       TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
49       STI(STI),
50       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
51 #define GET_GLOBALISEL_PREDICATES_INIT
52 #include "AMDGPUGenGlobalISel.inc"
53 #undef GET_GLOBALISEL_PREDICATES_INIT
54 #define GET_GLOBALISEL_TEMPORARIES_INIT
55 #include "AMDGPUGenGlobalISel.inc"
56 #undef GET_GLOBALISEL_TEMPORARIES_INIT
57 {
58 }
59 
60 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
61 
62 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
63   MachineBasicBlock *BB = I.getParent();
64   MachineFunction *MF = BB->getParent();
65   MachineRegisterInfo &MRI = MF->getRegInfo();
66   I.setDesc(TII.get(TargetOpcode::COPY));
67   for (const MachineOperand &MO : I.operands()) {
68     if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
69       continue;
70 
71     const TargetRegisterClass *RC =
72             TRI.getConstrainedRegClassForOperand(MO, MRI);
73     if (!RC)
74       continue;
75     RBI.constrainGenericRegister(MO.getReg(), *RC, MRI);
76   }
77   return true;
78 }
79 
80 MachineOperand
81 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
82                                            unsigned SubIdx) const {
83 
84   MachineInstr *MI = MO.getParent();
85   MachineBasicBlock *BB = MO.getParent()->getParent();
86   MachineFunction *MF = BB->getParent();
87   MachineRegisterInfo &MRI = MF->getRegInfo();
88   unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
89 
90   if (MO.isReg()) {
91     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
92     unsigned Reg = MO.getReg();
93     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
94             .addReg(Reg, 0, ComposedSubIdx);
95 
96     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
97                                      MO.isKill(), MO.isDead(), MO.isUndef(),
98                                      MO.isEarlyClobber(), 0, MO.isDebug(),
99                                      MO.isInternalRead());
100   }
101 
102   assert(MO.isImm());
103 
104   APInt Imm(64, MO.getImm());
105 
106   switch (SubIdx) {
107   default:
108     llvm_unreachable("do not know to split immediate with this sub index.");
109   case AMDGPU::sub0:
110     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
111   case AMDGPU::sub1:
112     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
113   }
114 }
115 
116 static int64_t getConstant(const MachineInstr *MI) {
117   return MI->getOperand(1).getCImm()->getSExtValue();
118 }
119 
120 bool AMDGPUInstructionSelector::selectG_ADD(MachineInstr &I) const {
121   MachineBasicBlock *BB = I.getParent();
122   MachineFunction *MF = BB->getParent();
123   MachineRegisterInfo &MRI = MF->getRegInfo();
124   unsigned Size = RBI.getSizeInBits(I.getOperand(0).getReg(), MRI, TRI);
125   unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
126   unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
127 
128   if (Size != 64)
129     return false;
130 
131   DebugLoc DL = I.getDebugLoc();
132 
133   MachineOperand Lo1(getSubOperand64(I.getOperand(1), AMDGPU::sub0));
134   MachineOperand Lo2(getSubOperand64(I.getOperand(2), AMDGPU::sub0));
135 
136   BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
137           .add(Lo1)
138           .add(Lo2);
139 
140   MachineOperand Hi1(getSubOperand64(I.getOperand(1), AMDGPU::sub1));
141   MachineOperand Hi2(getSubOperand64(I.getOperand(2), AMDGPU::sub1));
142 
143   BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
144           .add(Hi1)
145           .add(Hi2);
146 
147   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), I.getOperand(0).getReg())
148           .addReg(DstLo)
149           .addImm(AMDGPU::sub0)
150           .addReg(DstHi)
151           .addImm(AMDGPU::sub1);
152 
153   for (MachineOperand &MO : I.explicit_operands()) {
154     if (!MO.isReg() || TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
155       continue;
156     RBI.constrainGenericRegister(MO.getReg(), AMDGPU::SReg_64RegClass, MRI);
157   }
158 
159   I.eraseFromParent();
160   return true;
161 }
162 
163 bool AMDGPUInstructionSelector::selectG_GEP(MachineInstr &I) const {
164   return selectG_ADD(I);
165 }
166 
167 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
168   MachineBasicBlock *BB = I.getParent();
169   MachineFunction *MF = BB->getParent();
170   MachineRegisterInfo &MRI = MF->getRegInfo();
171   const MachineOperand &MO = I.getOperand(0);
172   const TargetRegisterClass *RC =
173       TRI.getConstrainedRegClassForOperand(MO, MRI);
174   if (RC)
175     RBI.constrainGenericRegister(MO.getReg(), *RC, MRI);
176   I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
177   return true;
178 }
179 
180 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I,
181                                           CodeGenCoverage &CoverageInfo) const {
182   unsigned IntrinsicID =  I.getOperand(1).getIntrinsicID();
183 
184   switch (IntrinsicID) {
185   default:
186     break;
187   case Intrinsic::maxnum:
188   case Intrinsic::minnum:
189   case Intrinsic::amdgcn_cvt_pkrtz:
190     return selectImpl(I, CoverageInfo);
191 
192   case Intrinsic::amdgcn_kernarg_segment_ptr: {
193     MachineFunction *MF = I.getParent()->getParent();
194     MachineRegisterInfo &MRI = MF->getRegInfo();
195     const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
196     const ArgDescriptor *InputPtrReg;
197     const TargetRegisterClass *RC;
198     const DebugLoc &DL = I.getDebugLoc();
199 
200     std::tie(InputPtrReg, RC)
201       = MFI->getPreloadedValue(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
202     if (!InputPtrReg)
203       report_fatal_error("missing kernarg segment ptr");
204 
205     BuildMI(*I.getParent(), &I, DL, TII.get(AMDGPU::COPY))
206       .add(I.getOperand(0))
207       .addReg(MRI.getLiveInVirtReg(InputPtrReg->getRegister()));
208     I.eraseFromParent();
209     return true;
210   }
211   }
212   return false;
213 }
214 
215 static MachineInstr *
216 buildEXP(const TargetInstrInfo &TII, MachineInstr *Insert, unsigned Tgt,
217          unsigned Reg0, unsigned Reg1, unsigned Reg2, unsigned Reg3,
218          unsigned VM, bool Compr, unsigned Enabled, bool Done) {
219   const DebugLoc &DL = Insert->getDebugLoc();
220   MachineBasicBlock &BB = *Insert->getParent();
221   unsigned Opcode = Done ? AMDGPU::EXP_DONE : AMDGPU::EXP;
222   return BuildMI(BB, Insert, DL, TII.get(Opcode))
223           .addImm(Tgt)
224           .addReg(Reg0)
225           .addReg(Reg1)
226           .addReg(Reg2)
227           .addReg(Reg3)
228           .addImm(VM)
229           .addImm(Compr)
230           .addImm(Enabled);
231 }
232 
233 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
234                                                  MachineInstr &I,
235 						 CodeGenCoverage &CoverageInfo) const {
236   MachineBasicBlock *BB = I.getParent();
237   MachineFunction *MF = BB->getParent();
238   MachineRegisterInfo &MRI = MF->getRegInfo();
239 
240   unsigned IntrinsicID = I.getOperand(0).getIntrinsicID();
241   switch (IntrinsicID) {
242   case Intrinsic::amdgcn_exp: {
243     int64_t Tgt = getConstant(MRI.getVRegDef(I.getOperand(1).getReg()));
244     int64_t Enabled = getConstant(MRI.getVRegDef(I.getOperand(2).getReg()));
245     int64_t Done = getConstant(MRI.getVRegDef(I.getOperand(7).getReg()));
246     int64_t VM = getConstant(MRI.getVRegDef(I.getOperand(8).getReg()));
247 
248     MachineInstr *Exp = buildEXP(TII, &I, Tgt, I.getOperand(3).getReg(),
249                                  I.getOperand(4).getReg(),
250                                  I.getOperand(5).getReg(),
251                                  I.getOperand(6).getReg(),
252                                  VM, false, Enabled, Done);
253 
254     I.eraseFromParent();
255     return constrainSelectedInstRegOperands(*Exp, TII, TRI, RBI);
256   }
257   case Intrinsic::amdgcn_exp_compr: {
258     const DebugLoc &DL = I.getDebugLoc();
259     int64_t Tgt = getConstant(MRI.getVRegDef(I.getOperand(1).getReg()));
260     int64_t Enabled = getConstant(MRI.getVRegDef(I.getOperand(2).getReg()));
261     unsigned Reg0 = I.getOperand(3).getReg();
262     unsigned Reg1 = I.getOperand(4).getReg();
263     unsigned Undef = MRI.createVirtualRegister(&AMDGPU::VGPR_32RegClass);
264     int64_t Done = getConstant(MRI.getVRegDef(I.getOperand(5).getReg()));
265     int64_t VM = getConstant(MRI.getVRegDef(I.getOperand(6).getReg()));
266 
267     BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
268     MachineInstr *Exp = buildEXP(TII, &I, Tgt, Reg0, Reg1, Undef, Undef, VM,
269                                  true,  Enabled, Done);
270 
271     I.eraseFromParent();
272     return constrainSelectedInstRegOperands(*Exp, TII, TRI, RBI);
273   }
274   }
275   return false;
276 }
277 
278 bool AMDGPUInstructionSelector::selectG_STORE(MachineInstr &I) const {
279   MachineBasicBlock *BB = I.getParent();
280   MachineFunction *MF = BB->getParent();
281   MachineRegisterInfo &MRI = MF->getRegInfo();
282   DebugLoc DL = I.getDebugLoc();
283   unsigned StoreSize = RBI.getSizeInBits(I.getOperand(0).getReg(), MRI, TRI);
284   unsigned Opcode;
285 
286   // FIXME: Select store instruction based on address space
287   switch (StoreSize) {
288   default:
289     return false;
290   case 32:
291     Opcode = AMDGPU::FLAT_STORE_DWORD;
292     break;
293   case 64:
294     Opcode = AMDGPU::FLAT_STORE_DWORDX2;
295     break;
296   case 96:
297     Opcode = AMDGPU::FLAT_STORE_DWORDX3;
298     break;
299   case 128:
300     Opcode = AMDGPU::FLAT_STORE_DWORDX4;
301     break;
302   }
303 
304   MachineInstr *Flat = BuildMI(*BB, &I, DL, TII.get(Opcode))
305           .add(I.getOperand(1))
306           .add(I.getOperand(0))
307           .addImm(0)  // offset
308           .addImm(0)  // glc
309           .addImm(0); // slc
310 
311 
312   // Now that we selected an opcode, we need to constrain the register
313   // operands to use appropriate classes.
314   bool Ret = constrainSelectedInstRegOperands(*Flat, TII, TRI, RBI);
315 
316   I.eraseFromParent();
317   return Ret;
318 }
319 
320 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
321   MachineBasicBlock *BB = I.getParent();
322   MachineFunction *MF = BB->getParent();
323   MachineRegisterInfo &MRI = MF->getRegInfo();
324   MachineOperand &ImmOp = I.getOperand(1);
325 
326   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
327   if (ImmOp.isFPImm()) {
328     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
329     ImmOp.ChangeToImmediate(Imm.getZExtValue());
330   } else if (ImmOp.isCImm()) {
331     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getZExtValue());
332   }
333 
334   unsigned DstReg = I.getOperand(0).getReg();
335   unsigned Size;
336   bool IsSgpr;
337   const RegisterBank *RB = MRI.getRegBankOrNull(I.getOperand(0).getReg());
338   if (RB) {
339     IsSgpr = RB->getID() == AMDGPU::SGPRRegBankID;
340     Size = MRI.getType(DstReg).getSizeInBits();
341   } else {
342     const TargetRegisterClass *RC = TRI.getRegClassForReg(MRI, DstReg);
343     IsSgpr = TRI.isSGPRClass(RC);
344     Size = TRI.getRegSizeInBits(*RC);
345   }
346 
347   if (Size != 32 && Size != 64)
348     return false;
349 
350   unsigned Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
351   if (Size == 32) {
352     I.setDesc(TII.get(Opcode));
353     I.addImplicitDefUseOperands(*MF);
354     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
355   }
356 
357   DebugLoc DL = I.getDebugLoc();
358   const TargetRegisterClass *RC = IsSgpr ? &AMDGPU::SReg_32_XM0RegClass :
359                                            &AMDGPU::VGPR_32RegClass;
360   unsigned LoReg = MRI.createVirtualRegister(RC);
361   unsigned HiReg = MRI.createVirtualRegister(RC);
362   const APInt &Imm = APInt(Size, I.getOperand(1).getImm());
363 
364   BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
365           .addImm(Imm.trunc(32).getZExtValue());
366 
367   BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
368           .addImm(Imm.ashr(32).getZExtValue());
369 
370   const MachineInstr *RS =
371       BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
372               .addReg(LoReg)
373               .addImm(AMDGPU::sub0)
374               .addReg(HiReg)
375               .addImm(AMDGPU::sub1);
376 
377   // We can't call constrainSelectedInstRegOperands here, because it doesn't
378   // work for target independent opcodes
379   I.eraseFromParent();
380   const TargetRegisterClass *DstRC =
381       TRI.getConstrainedRegClassForOperand(RS->getOperand(0), MRI);
382   if (!DstRC)
383     return true;
384   return RBI.constrainGenericRegister(DstReg, *DstRC, MRI);
385 }
386 
387 static bool isConstant(const MachineInstr &MI) {
388   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
389 }
390 
391 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
392     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
393 
394   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
395 
396   assert(PtrMI);
397 
398   if (PtrMI->getOpcode() != TargetOpcode::G_GEP)
399     return;
400 
401   GEPInfo GEPInfo(*PtrMI);
402 
403   for (unsigned i = 1, e = 3; i < e; ++i) {
404     const MachineOperand &GEPOp = PtrMI->getOperand(i);
405     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
406     assert(OpDef);
407     if (isConstant(*OpDef)) {
408       // FIXME: Is it possible to have multiple Imm parts?  Maybe if we
409       // are lacking other optimizations.
410       assert(GEPInfo.Imm == 0);
411       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
412       continue;
413     }
414     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
415     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
416       GEPInfo.SgprParts.push_back(GEPOp.getReg());
417     else
418       GEPInfo.VgprParts.push_back(GEPOp.getReg());
419   }
420 
421   AddrInfo.push_back(GEPInfo);
422   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
423 }
424 
425 static bool isInstrUniform(const MachineInstr &MI) {
426   if (!MI.hasOneMemOperand())
427     return false;
428 
429   const MachineMemOperand *MMO = *MI.memoperands_begin();
430   const Value *Ptr = MMO->getValue();
431 
432   // UndefValue means this is a load of a kernel input.  These are uniform.
433   // Sometimes LDS instructions have constant pointers.
434   // If Ptr is null, then that means this mem operand contains a
435   // PseudoSourceValue like GOT.
436   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
437       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
438     return true;
439 
440   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
441     return true;
442 
443   const Instruction *I = dyn_cast<Instruction>(Ptr);
444   return I && I->getMetadata("amdgpu.uniform");
445 }
446 
447 static unsigned getSmrdOpcode(unsigned BaseOpcode, unsigned LoadSize) {
448 
449   if (LoadSize == 32)
450     return BaseOpcode;
451 
452   switch (BaseOpcode) {
453   case AMDGPU::S_LOAD_DWORD_IMM:
454     switch (LoadSize) {
455     case 64:
456       return AMDGPU::S_LOAD_DWORDX2_IMM;
457     case 128:
458       return AMDGPU::S_LOAD_DWORDX4_IMM;
459     case 256:
460       return AMDGPU::S_LOAD_DWORDX8_IMM;
461     case 512:
462       return AMDGPU::S_LOAD_DWORDX16_IMM;
463     }
464     break;
465   case AMDGPU::S_LOAD_DWORD_IMM_ci:
466     switch (LoadSize) {
467     case 64:
468       return AMDGPU::S_LOAD_DWORDX2_IMM_ci;
469     case 128:
470       return AMDGPU::S_LOAD_DWORDX4_IMM_ci;
471     case 256:
472       return AMDGPU::S_LOAD_DWORDX8_IMM_ci;
473     case 512:
474       return AMDGPU::S_LOAD_DWORDX16_IMM_ci;
475     }
476     break;
477   case AMDGPU::S_LOAD_DWORD_SGPR:
478     switch (LoadSize) {
479     case 64:
480       return AMDGPU::S_LOAD_DWORDX2_SGPR;
481     case 128:
482       return AMDGPU::S_LOAD_DWORDX4_SGPR;
483     case 256:
484       return AMDGPU::S_LOAD_DWORDX8_SGPR;
485     case 512:
486       return AMDGPU::S_LOAD_DWORDX16_SGPR;
487     }
488     break;
489   }
490   llvm_unreachable("Invalid base smrd opcode or size");
491 }
492 
493 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
494   for (const GEPInfo &GEPInfo : AddrInfo) {
495     if (!GEPInfo.VgprParts.empty())
496       return true;
497   }
498   return false;
499 }
500 
501 bool AMDGPUInstructionSelector::selectSMRD(MachineInstr &I,
502                                            ArrayRef<GEPInfo> AddrInfo) const {
503 
504   if (!I.hasOneMemOperand())
505     return false;
506 
507   if ((*I.memoperands_begin())->getAddrSpace() != AMDGPUAS::CONSTANT_ADDRESS &&
508       (*I.memoperands_begin())->getAddrSpace() != AMDGPUAS::CONSTANT_ADDRESS_32BIT)
509     return false;
510 
511   if (!isInstrUniform(I))
512     return false;
513 
514   if (hasVgprParts(AddrInfo))
515     return false;
516 
517   MachineBasicBlock *BB = I.getParent();
518   MachineFunction *MF = BB->getParent();
519   const GCNSubtarget &Subtarget = MF->getSubtarget<GCNSubtarget>();
520   MachineRegisterInfo &MRI = MF->getRegInfo();
521   unsigned DstReg = I.getOperand(0).getReg();
522   const DebugLoc &DL = I.getDebugLoc();
523   unsigned Opcode;
524   unsigned LoadSize = RBI.getSizeInBits(DstReg, MRI, TRI);
525 
526   if (!AddrInfo.empty() && AddrInfo[0].SgprParts.size() == 1) {
527 
528     const GEPInfo &GEPInfo = AddrInfo[0];
529 
530     unsigned PtrReg = GEPInfo.SgprParts[0];
531     int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(Subtarget, GEPInfo.Imm);
532     if (AMDGPU::isLegalSMRDImmOffset(Subtarget, GEPInfo.Imm)) {
533       Opcode = getSmrdOpcode(AMDGPU::S_LOAD_DWORD_IMM, LoadSize);
534 
535       MachineInstr *SMRD = BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg)
536                                  .addReg(PtrReg)
537                                  .addImm(EncodedImm)
538                                  .addImm(0); // glc
539       return constrainSelectedInstRegOperands(*SMRD, TII, TRI, RBI);
540     }
541 
542     if (Subtarget.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS &&
543         isUInt<32>(EncodedImm)) {
544       Opcode = getSmrdOpcode(AMDGPU::S_LOAD_DWORD_IMM_ci, LoadSize);
545       MachineInstr *SMRD = BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg)
546                                    .addReg(PtrReg)
547                                    .addImm(EncodedImm)
548                                    .addImm(0); // glc
549       return constrainSelectedInstRegOperands(*SMRD, TII, TRI, RBI);
550     }
551 
552     if (isUInt<32>(GEPInfo.Imm)) {
553       Opcode = getSmrdOpcode(AMDGPU::S_LOAD_DWORD_SGPR, LoadSize);
554       unsigned OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
555       BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B32), OffsetReg)
556               .addImm(GEPInfo.Imm);
557 
558       MachineInstr *SMRD = BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg)
559                                    .addReg(PtrReg)
560                                    .addReg(OffsetReg)
561                                    .addImm(0); // glc
562       return constrainSelectedInstRegOperands(*SMRD, TII, TRI, RBI);
563     }
564   }
565 
566   unsigned PtrReg = I.getOperand(1).getReg();
567   Opcode = getSmrdOpcode(AMDGPU::S_LOAD_DWORD_IMM, LoadSize);
568   MachineInstr *SMRD = BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg)
569                                .addReg(PtrReg)
570                                .addImm(0)
571                                .addImm(0); // glc
572   return constrainSelectedInstRegOperands(*SMRD, TII, TRI, RBI);
573 }
574 
575 
576 bool AMDGPUInstructionSelector::selectG_LOAD(MachineInstr &I) const {
577   MachineBasicBlock *BB = I.getParent();
578   MachineFunction *MF = BB->getParent();
579   MachineRegisterInfo &MRI = MF->getRegInfo();
580   DebugLoc DL = I.getDebugLoc();
581   unsigned DstReg = I.getOperand(0).getReg();
582   unsigned PtrReg = I.getOperand(1).getReg();
583   unsigned LoadSize = RBI.getSizeInBits(DstReg, MRI, TRI);
584   unsigned Opcode;
585 
586   SmallVector<GEPInfo, 4> AddrInfo;
587 
588   getAddrModeInfo(I, MRI, AddrInfo);
589 
590   if (selectSMRD(I, AddrInfo)) {
591     I.eraseFromParent();
592     return true;
593   }
594 
595   switch (LoadSize) {
596   default:
597     llvm_unreachable("Load size not supported\n");
598   case 32:
599     Opcode = AMDGPU::FLAT_LOAD_DWORD;
600     break;
601   case 64:
602     Opcode = AMDGPU::FLAT_LOAD_DWORDX2;
603     break;
604   }
605 
606   MachineInstr *Flat = BuildMI(*BB, &I, DL, TII.get(Opcode))
607                                .add(I.getOperand(0))
608                                .addReg(PtrReg)
609                                .addImm(0)  // offset
610                                .addImm(0)  // glc
611                                .addImm(0); // slc
612 
613   bool Ret = constrainSelectedInstRegOperands(*Flat, TII, TRI, RBI);
614   I.eraseFromParent();
615   return Ret;
616 }
617 
618 bool AMDGPUInstructionSelector::select(MachineInstr &I,
619                                        CodeGenCoverage &CoverageInfo) const {
620 
621   if (!isPreISelGenericOpcode(I.getOpcode())) {
622     if (I.isCopy())
623       return selectCOPY(I);
624     return true;
625   }
626 
627   switch (I.getOpcode()) {
628   default:
629     return selectImpl(I, CoverageInfo);
630   case TargetOpcode::G_ADD:
631     return selectG_ADD(I);
632   case TargetOpcode::G_INTTOPTR:
633   case TargetOpcode::G_BITCAST:
634     return selectCOPY(I);
635   case TargetOpcode::G_CONSTANT:
636   case TargetOpcode::G_FCONSTANT:
637     return selectG_CONSTANT(I);
638   case TargetOpcode::G_GEP:
639     return selectG_GEP(I);
640   case TargetOpcode::G_IMPLICIT_DEF:
641     return selectG_IMPLICIT_DEF(I);
642   case TargetOpcode::G_INTRINSIC:
643     return selectG_INTRINSIC(I, CoverageInfo);
644   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
645     return selectG_INTRINSIC_W_SIDE_EFFECTS(I, CoverageInfo);
646   case TargetOpcode::G_LOAD:
647     return selectG_LOAD(I);
648   case TargetOpcode::G_STORE:
649     return selectG_STORE(I);
650   }
651   return false;
652 }
653 
654 InstructionSelector::ComplexRendererFns
655 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
656   return {{
657       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
658   }};
659 
660 }
661 
662 ///
663 /// This will select either an SGPR or VGPR operand and will save us from
664 /// having to write an extra tablegen pattern.
665 InstructionSelector::ComplexRendererFns
666 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
667   return {{
668       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
669   }};
670 }
671 
672 InstructionSelector::ComplexRendererFns
673 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
674   return {{
675       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
676       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // src0_mods
677       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
678       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
679   }};
680 }
681 InstructionSelector::ComplexRendererFns
682 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
683   return {{
684       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
685       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
686       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
687   }};
688 }
689 
690 InstructionSelector::ComplexRendererFns
691 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
692   return {{
693       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
694       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // src_mods
695   }};
696 }
697