1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPUInstrInfo.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPURegisterBankInfo.h"
18 #include "AMDGPURegisterInfo.h"
19 #include "AMDGPUSubtarget.h"
20 #include "AMDGPUTargetMachine.h"
21 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
22 #include "SIMachineFunctionInfo.h"
23 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
24 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
25 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
26 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
27 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
28 #include "llvm/CodeGen/GlobalISel/Utils.h"
29 #include "llvm/CodeGen/MachineBasicBlock.h"
30 #include "llvm/CodeGen/MachineFunction.h"
31 #include "llvm/CodeGen/MachineInstr.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/IR/Type.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/raw_ostream.h"
37 
38 #define DEBUG_TYPE "amdgpu-isel"
39 
40 using namespace llvm;
41 using namespace MIPatternMatch;
42 
43 #define GET_GLOBALISEL_IMPL
44 #define AMDGPUSubtarget GCNSubtarget
45 #include "AMDGPUGenGlobalISel.inc"
46 #undef GET_GLOBALISEL_IMPL
47 #undef AMDGPUSubtarget
48 
49 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
50     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
51     const AMDGPUTargetMachine &TM)
52     : InstructionSelector(), TII(*STI.getInstrInfo()),
53       TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
54       STI(STI),
55       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
56 #define GET_GLOBALISEL_PREDICATES_INIT
57 #include "AMDGPUGenGlobalISel.inc"
58 #undef GET_GLOBALISEL_PREDICATES_INIT
59 #define GET_GLOBALISEL_TEMPORARIES_INIT
60 #include "AMDGPUGenGlobalISel.inc"
61 #undef GET_GLOBALISEL_TEMPORARIES_INIT
62 {
63 }
64 
65 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
66 
67 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits &KB,
68                                         CodeGenCoverage &CoverageInfo) {
69   MRI = &MF.getRegInfo();
70   InstructionSelector::setupMF(MF, KB, CoverageInfo);
71 }
72 
73 bool AMDGPUInstructionSelector::isVCC(Register Reg,
74                                       const MachineRegisterInfo &MRI) const {
75   if (Register::isPhysicalRegister(Reg))
76     return Reg == TRI.getVCC();
77 
78   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
79   const TargetRegisterClass *RC =
80       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
81   if (RC) {
82     const LLT Ty = MRI.getType(Reg);
83     return RC->hasSuperClassEq(TRI.getBoolRC()) &&
84            Ty.isValid() && Ty.getSizeInBits() == 1;
85   }
86 
87   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
88   return RB->getID() == AMDGPU::VCCRegBankID;
89 }
90 
91 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
92   const DebugLoc &DL = I.getDebugLoc();
93   MachineBasicBlock *BB = I.getParent();
94   I.setDesc(TII.get(TargetOpcode::COPY));
95 
96   const MachineOperand &Src = I.getOperand(1);
97   MachineOperand &Dst = I.getOperand(0);
98   Register DstReg = Dst.getReg();
99   Register SrcReg = Src.getReg();
100 
101   if (isVCC(DstReg, *MRI)) {
102     if (SrcReg == AMDGPU::SCC) {
103       const TargetRegisterClass *RC
104         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
105       if (!RC)
106         return true;
107       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
108     }
109 
110     if (!isVCC(SrcReg, *MRI)) {
111       // TODO: Should probably leave the copy and let copyPhysReg expand it.
112       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
113         return false;
114 
115       const TargetRegisterClass *SrcRC
116         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
117 
118       Register MaskedReg = MRI->createVirtualRegister(SrcRC);
119 
120       // We can't trust the high bits at this point, so clear them.
121 
122       // TODO: Skip masking high bits if def is known boolean.
123 
124       unsigned AndOpc = TRI.isSGPRClass(SrcRC) ?
125         AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
126       BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
127         .addImm(1)
128         .addReg(SrcReg);
129       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
130         .addImm(0)
131         .addReg(MaskedReg);
132 
133       if (!MRI->getRegClassOrNull(SrcReg))
134         MRI->setRegClass(SrcReg, SrcRC);
135       I.eraseFromParent();
136       return true;
137     }
138 
139     const TargetRegisterClass *RC =
140       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
141     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
142       return false;
143 
144     // Don't constrain the source register to a class so the def instruction
145     // handles it (unless it's undef).
146     //
147     // FIXME: This is a hack. When selecting the def, we neeed to know
148     // specifically know that the result is VCCRegBank, and not just an SGPR
149     // with size 1. An SReg_32 with size 1 is ambiguous with wave32.
150     if (Src.isUndef()) {
151       const TargetRegisterClass *SrcRC =
152         TRI.getConstrainedRegClassForOperand(Src, *MRI);
153       if (SrcRC && !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
154         return false;
155     }
156 
157     return true;
158   }
159 
160   for (const MachineOperand &MO : I.operands()) {
161     if (Register::isPhysicalRegister(MO.getReg()))
162       continue;
163 
164     const TargetRegisterClass *RC =
165             TRI.getConstrainedRegClassForOperand(MO, *MRI);
166     if (!RC)
167       continue;
168     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
169   }
170   return true;
171 }
172 
173 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
174   const Register DefReg = I.getOperand(0).getReg();
175   const LLT DefTy = MRI->getType(DefReg);
176 
177   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
178 
179   const RegClassOrRegBank &RegClassOrBank =
180     MRI->getRegClassOrRegBank(DefReg);
181 
182   const TargetRegisterClass *DefRC
183     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
184   if (!DefRC) {
185     if (!DefTy.isValid()) {
186       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
187       return false;
188     }
189 
190     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
191     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI);
192     if (!DefRC) {
193       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
194       return false;
195     }
196   }
197 
198   // TODO: Verify that all registers have the same bank
199   I.setDesc(TII.get(TargetOpcode::PHI));
200   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
201 }
202 
203 MachineOperand
204 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
205                                            const TargetRegisterClass &SubRC,
206                                            unsigned SubIdx) const {
207 
208   MachineInstr *MI = MO.getParent();
209   MachineBasicBlock *BB = MO.getParent()->getParent();
210   Register DstReg = MRI->createVirtualRegister(&SubRC);
211 
212   if (MO.isReg()) {
213     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
214     Register Reg = MO.getReg();
215     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
216             .addReg(Reg, 0, ComposedSubIdx);
217 
218     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
219                                      MO.isKill(), MO.isDead(), MO.isUndef(),
220                                      MO.isEarlyClobber(), 0, MO.isDebug(),
221                                      MO.isInternalRead());
222   }
223 
224   assert(MO.isImm());
225 
226   APInt Imm(64, MO.getImm());
227 
228   switch (SubIdx) {
229   default:
230     llvm_unreachable("do not know to split immediate with this sub index.");
231   case AMDGPU::sub0:
232     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
233   case AMDGPU::sub1:
234     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
235   }
236 }
237 
238 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
239   switch (Opc) {
240   case AMDGPU::G_AND:
241     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
242   case AMDGPU::G_OR:
243     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
244   case AMDGPU::G_XOR:
245     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
246   default:
247     llvm_unreachable("not a bit op");
248   }
249 }
250 
251 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
252   MachineOperand &Dst = I.getOperand(0);
253   MachineOperand &Src0 = I.getOperand(1);
254   MachineOperand &Src1 = I.getOperand(2);
255   Register DstReg = Dst.getReg();
256   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
257 
258   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
259   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
260     const TargetRegisterClass *RC = TRI.getBoolRC();
261     unsigned InstOpc = getLogicalBitOpcode(I.getOpcode(),
262                                            RC == &AMDGPU::SReg_64RegClass);
263     I.setDesc(TII.get(InstOpc));
264 
265     // FIXME: Hack to avoid turning the register bank into a register class.
266     // The selector for G_ICMP relies on seeing the register bank for the result
267     // is VCC. In wave32 if we constrain the registers to SReg_32 here, it will
268     // be ambiguous whether it's a scalar or vector bool.
269     if (Src0.isUndef() && !MRI->getRegClassOrNull(Src0.getReg()))
270       MRI->setRegClass(Src0.getReg(), RC);
271     if (Src1.isUndef() && !MRI->getRegClassOrNull(Src1.getReg()))
272       MRI->setRegClass(Src1.getReg(), RC);
273 
274     return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
275   }
276 
277   // TODO: Should this allow an SCC bank result, and produce a copy from SCC for
278   // the result?
279   if (DstRB->getID() == AMDGPU::SGPRRegBankID) {
280     unsigned InstOpc = getLogicalBitOpcode(I.getOpcode(), Size > 32);
281     I.setDesc(TII.get(InstOpc));
282     // Dead implicit-def of scc
283     I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
284                                            true, // isImp
285                                            false, // isKill
286                                            true)); // isDead
287     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
288   }
289 
290   return false;
291 }
292 
293 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
294   MachineBasicBlock *BB = I.getParent();
295   MachineFunction *MF = BB->getParent();
296   Register DstReg = I.getOperand(0).getReg();
297   const DebugLoc &DL = I.getDebugLoc();
298   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
299   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
300   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
301   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
302 
303   if (Size == 32) {
304     if (IsSALU) {
305       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
306       MachineInstr *Add =
307         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
308         .add(I.getOperand(1))
309         .add(I.getOperand(2));
310       I.eraseFromParent();
311       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
312     }
313 
314     if (STI.hasAddNoCarry()) {
315       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
316       I.setDesc(TII.get(Opc));
317       I.addOperand(*MF, MachineOperand::CreateImm(0));
318       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
319       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
320     }
321 
322     const unsigned Opc = Sub ? AMDGPU::V_SUB_I32_e64 : AMDGPU::V_ADD_I32_e64;
323 
324     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
325     MachineInstr *Add
326       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
327       .addDef(UnusedCarry, RegState::Dead)
328       .add(I.getOperand(1))
329       .add(I.getOperand(2))
330       .addImm(0);
331     I.eraseFromParent();
332     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
333   }
334 
335   assert(!Sub && "illegal sub should not reach here");
336 
337   const TargetRegisterClass &RC
338     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
339   const TargetRegisterClass &HalfRC
340     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
341 
342   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
343   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
344   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
345   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
346 
347   Register DstLo = MRI->createVirtualRegister(&HalfRC);
348   Register DstHi = MRI->createVirtualRegister(&HalfRC);
349 
350   if (IsSALU) {
351     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
352       .add(Lo1)
353       .add(Lo2);
354     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
355       .add(Hi1)
356       .add(Hi2);
357   } else {
358     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
359     Register CarryReg = MRI->createVirtualRegister(CarryRC);
360     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_I32_e64), DstLo)
361       .addDef(CarryReg)
362       .add(Lo1)
363       .add(Lo2)
364       .addImm(0);
365     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
366       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
367       .add(Hi1)
368       .add(Hi2)
369       .addReg(CarryReg, RegState::Kill)
370       .addImm(0);
371 
372     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
373       return false;
374   }
375 
376   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
377     .addReg(DstLo)
378     .addImm(AMDGPU::sub0)
379     .addReg(DstHi)
380     .addImm(AMDGPU::sub1);
381 
382 
383   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
384     return false;
385 
386   I.eraseFromParent();
387   return true;
388 }
389 
390 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
391   MachineInstr &I) const {
392   MachineBasicBlock *BB = I.getParent();
393   MachineFunction *MF = BB->getParent();
394   const DebugLoc &DL = I.getDebugLoc();
395   Register Dst0Reg = I.getOperand(0).getReg();
396   Register Dst1Reg = I.getOperand(1).getReg();
397   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
398                      I.getOpcode() == AMDGPU::G_UADDE;
399   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
400                           I.getOpcode() == AMDGPU::G_USUBE;
401 
402   if (isVCC(Dst1Reg, *MRI)) {
403       // The name of the opcodes are misleading. v_add_i32/v_sub_i32 have unsigned
404       // carry out despite the _i32 name. These were renamed in VI to _U32.
405       // FIXME: We should probably rename the opcodes here.
406     unsigned NoCarryOpc = IsAdd ? AMDGPU::V_ADD_I32_e64 : AMDGPU::V_SUB_I32_e64;
407     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
408     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
409     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
410     I.addOperand(*MF, MachineOperand::CreateImm(0));
411     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
412   }
413 
414   Register Src0Reg = I.getOperand(2).getReg();
415   Register Src1Reg = I.getOperand(3).getReg();
416 
417   if (HasCarryIn) {
418     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
419       .addReg(I.getOperand(4).getReg());
420   }
421 
422   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
423   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
424 
425   BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
426     .add(I.getOperand(2))
427     .add(I.getOperand(3));
428   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
429     .addReg(AMDGPU::SCC);
430 
431   if (!MRI->getRegClassOrNull(Dst1Reg))
432     MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
433 
434   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
435       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
436       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
437     return false;
438 
439   if (HasCarryIn &&
440       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
441                                     AMDGPU::SReg_32RegClass, *MRI))
442     return false;
443 
444   I.eraseFromParent();
445   return true;
446 }
447 
448 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
449   MachineBasicBlock *BB = I.getParent();
450   Register DstReg = I.getOperand(0).getReg();
451   Register SrcReg = I.getOperand(1).getReg();
452   LLT DstTy = MRI->getType(DstReg);
453   LLT SrcTy = MRI->getType(SrcReg);
454   const unsigned SrcSize = SrcTy.getSizeInBits();
455   const unsigned DstSize = DstTy.getSizeInBits();
456 
457   // TODO: Should handle any multiple of 32 offset.
458   unsigned Offset = I.getOperand(2).getImm();
459   if (Offset % DstSize != 0)
460     return false;
461 
462   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
463   const TargetRegisterClass *SrcRC =
464     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
465   if (!SrcRC)
466     return false;
467 
468   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
469 
470   const DebugLoc &DL = I.getDebugLoc();
471   MachineInstr *Copy = BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
472                                .addReg(SrcReg, 0, SubRegs[Offset / DstSize]);
473 
474   for (const MachineOperand &MO : Copy->operands()) {
475     const TargetRegisterClass *RC =
476             TRI.getConstrainedRegClassForOperand(MO, *MRI);
477     if (!RC)
478       continue;
479     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
480   }
481   I.eraseFromParent();
482   return true;
483 }
484 
485 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
486   MachineBasicBlock *BB = MI.getParent();
487   Register DstReg = MI.getOperand(0).getReg();
488   LLT DstTy = MRI->getType(DstReg);
489   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
490 
491   const unsigned SrcSize = SrcTy.getSizeInBits();
492   if (SrcSize < 32)
493     return selectImpl(MI, *CoverageInfo);
494 
495   const DebugLoc &DL = MI.getDebugLoc();
496   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
497   const unsigned DstSize = DstTy.getSizeInBits();
498   const TargetRegisterClass *DstRC =
499     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
500   if (!DstRC)
501     return false;
502 
503   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
504   MachineInstrBuilder MIB =
505     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
506   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
507     MachineOperand &Src = MI.getOperand(I + 1);
508     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
509     MIB.addImm(SubRegs[I]);
510 
511     const TargetRegisterClass *SrcRC
512       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
513     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
514       return false;
515   }
516 
517   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
518     return false;
519 
520   MI.eraseFromParent();
521   return true;
522 }
523 
524 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
525   MachineBasicBlock *BB = MI.getParent();
526   const int NumDst = MI.getNumOperands() - 1;
527 
528   MachineOperand &Src = MI.getOperand(NumDst);
529 
530   Register SrcReg = Src.getReg();
531   Register DstReg0 = MI.getOperand(0).getReg();
532   LLT DstTy = MRI->getType(DstReg0);
533   LLT SrcTy = MRI->getType(SrcReg);
534 
535   const unsigned DstSize = DstTy.getSizeInBits();
536   const unsigned SrcSize = SrcTy.getSizeInBits();
537   const DebugLoc &DL = MI.getDebugLoc();
538   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
539 
540   const TargetRegisterClass *SrcRC =
541     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
542   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
543     return false;
544 
545   const unsigned SrcFlags = getUndefRegState(Src.isUndef());
546 
547   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
548   // source, and this relies on the fact that the same subregister indices are
549   // used for both.
550   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
551   for (int I = 0, E = NumDst; I != E; ++I) {
552     MachineOperand &Dst = MI.getOperand(I);
553     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
554       .addReg(SrcReg, SrcFlags, SubRegs[I]);
555 
556     const TargetRegisterClass *DstRC =
557       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
558     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
559       return false;
560   }
561 
562   MI.eraseFromParent();
563   return true;
564 }
565 
566 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
567   return selectG_ADD_SUB(I);
568 }
569 
570 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
571   const MachineOperand &MO = I.getOperand(0);
572 
573   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
574   // regbank check here is to know why getConstrainedRegClassForOperand failed.
575   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
576   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
577       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
578     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
579     return true;
580   }
581 
582   return false;
583 }
584 
585 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
586   MachineBasicBlock *BB = I.getParent();
587 
588   Register DstReg = I.getOperand(0).getReg();
589   Register Src0Reg = I.getOperand(1).getReg();
590   Register Src1Reg = I.getOperand(2).getReg();
591   LLT Src1Ty = MRI->getType(Src1Reg);
592 
593   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
594   unsigned InsSize = Src1Ty.getSizeInBits();
595 
596   int64_t Offset = I.getOperand(3).getImm();
597   if (Offset % 32 != 0)
598     return false;
599 
600   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
601   if (SubReg == AMDGPU::NoSubRegister)
602     return false;
603 
604   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
605   const TargetRegisterClass *DstRC =
606     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
607   if (!DstRC)
608     return false;
609 
610   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
611   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
612   const TargetRegisterClass *Src0RC =
613     TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI);
614   const TargetRegisterClass *Src1RC =
615     TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI);
616 
617   // Deal with weird cases where the class only partially supports the subreg
618   // index.
619   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
620   if (!Src0RC)
621     return false;
622 
623   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
624       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
625       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
626     return false;
627 
628   const DebugLoc &DL = I.getDebugLoc();
629   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
630     .addReg(Src0Reg)
631     .addReg(Src1Reg)
632     .addImm(SubReg);
633 
634   I.eraseFromParent();
635   return true;
636 }
637 
638 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
639   if (STI.getLDSBankCount() != 16)
640     return selectImpl(MI, *CoverageInfo);
641 
642   Register Dst = MI.getOperand(0).getReg();
643   Register Src0 = MI.getOperand(2).getReg();
644   Register M0Val = MI.getOperand(6).getReg();
645   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
646       !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
647       !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
648     return false;
649 
650   // This requires 2 instructions. It is possible to write a pattern to support
651   // this, but the generated isel emitter doesn't correctly deal with multiple
652   // output instructions using the same physical register input. The copy to m0
653   // is incorrectly placed before the second instruction.
654   //
655   // TODO: Match source modifiers.
656 
657   Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
658   const DebugLoc &DL = MI.getDebugLoc();
659   MachineBasicBlock *MBB = MI.getParent();
660 
661   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
662     .addReg(M0Val);
663   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
664     .addImm(2)
665     .addImm(MI.getOperand(4).getImm())  // $attr
666     .addImm(MI.getOperand(3).getImm()); // $attrchan
667 
668   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
669     .addImm(0)                          // $src0_modifiers
670     .addReg(Src0)                       // $src0
671     .addImm(MI.getOperand(4).getImm())  // $attr
672     .addImm(MI.getOperand(3).getImm())  // $attrchan
673     .addImm(0)                          // $src2_modifiers
674     .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
675     .addImm(MI.getOperand(5).getImm())  // $high
676     .addImm(0)                          // $clamp
677     .addImm(0);                         // $omod
678 
679   MI.eraseFromParent();
680   return true;
681 }
682 
683 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
684   unsigned IntrinsicID = I.getIntrinsicID();
685   switch (IntrinsicID) {
686   case Intrinsic::amdgcn_if_break: {
687     MachineBasicBlock *BB = I.getParent();
688 
689     // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
690     // SelectionDAG uses for wave32 vs wave64.
691     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
692       .add(I.getOperand(0))
693       .add(I.getOperand(2))
694       .add(I.getOperand(3));
695 
696     Register DstReg = I.getOperand(0).getReg();
697     Register Src0Reg = I.getOperand(2).getReg();
698     Register Src1Reg = I.getOperand(3).getReg();
699 
700     I.eraseFromParent();
701 
702     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
703       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
704 
705     return true;
706   }
707   case Intrinsic::amdgcn_interp_p1_f16:
708     return selectInterpP1F16(I);
709   default:
710     return selectImpl(I, *CoverageInfo);
711   }
712 }
713 
714 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
715   if (Size != 32 && Size != 64)
716     return -1;
717   switch (P) {
718   default:
719     llvm_unreachable("Unknown condition code!");
720   case CmpInst::ICMP_NE:
721     return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
722   case CmpInst::ICMP_EQ:
723     return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
724   case CmpInst::ICMP_SGT:
725     return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
726   case CmpInst::ICMP_SGE:
727     return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
728   case CmpInst::ICMP_SLT:
729     return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
730   case CmpInst::ICMP_SLE:
731     return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
732   case CmpInst::ICMP_UGT:
733     return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
734   case CmpInst::ICMP_UGE:
735     return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
736   case CmpInst::ICMP_ULT:
737     return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
738   case CmpInst::ICMP_ULE:
739     return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
740   }
741 }
742 
743 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
744                                               unsigned Size) const {
745   if (Size == 64) {
746     if (!STI.hasScalarCompareEq64())
747       return -1;
748 
749     switch (P) {
750     case CmpInst::ICMP_NE:
751       return AMDGPU::S_CMP_LG_U64;
752     case CmpInst::ICMP_EQ:
753       return AMDGPU::S_CMP_EQ_U64;
754     default:
755       return -1;
756     }
757   }
758 
759   if (Size != 32)
760     return -1;
761 
762   switch (P) {
763   case CmpInst::ICMP_NE:
764     return AMDGPU::S_CMP_LG_U32;
765   case CmpInst::ICMP_EQ:
766     return AMDGPU::S_CMP_EQ_U32;
767   case CmpInst::ICMP_SGT:
768     return AMDGPU::S_CMP_GT_I32;
769   case CmpInst::ICMP_SGE:
770     return AMDGPU::S_CMP_GE_I32;
771   case CmpInst::ICMP_SLT:
772     return AMDGPU::S_CMP_LT_I32;
773   case CmpInst::ICMP_SLE:
774     return AMDGPU::S_CMP_LE_I32;
775   case CmpInst::ICMP_UGT:
776     return AMDGPU::S_CMP_GT_U32;
777   case CmpInst::ICMP_UGE:
778     return AMDGPU::S_CMP_GE_U32;
779   case CmpInst::ICMP_ULT:
780     return AMDGPU::S_CMP_LT_U32;
781   case CmpInst::ICMP_ULE:
782     return AMDGPU::S_CMP_LE_U32;
783   default:
784     llvm_unreachable("Unknown condition code!");
785   }
786 }
787 
788 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
789   MachineBasicBlock *BB = I.getParent();
790   const DebugLoc &DL = I.getDebugLoc();
791 
792   Register SrcReg = I.getOperand(2).getReg();
793   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
794 
795   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
796 
797   Register CCReg = I.getOperand(0).getReg();
798   if (!isVCC(CCReg, *MRI)) {
799     int Opcode = getS_CMPOpcode(Pred, Size);
800     if (Opcode == -1)
801       return false;
802     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
803             .add(I.getOperand(2))
804             .add(I.getOperand(3));
805     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
806       .addReg(AMDGPU::SCC);
807     bool Ret =
808         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
809         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
810     I.eraseFromParent();
811     return Ret;
812   }
813 
814   int Opcode = getV_CMPOpcode(Pred, Size);
815   if (Opcode == -1)
816     return false;
817 
818   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
819             I.getOperand(0).getReg())
820             .add(I.getOperand(2))
821             .add(I.getOperand(3));
822   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
823                                *TRI.getBoolRC(), *MRI);
824   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
825   I.eraseFromParent();
826   return Ret;
827 }
828 
829 static bool isZero(Register Reg, MachineRegisterInfo &MRI) {
830   int64_t C;
831   if (mi_match(Reg, MRI, m_ICst(C)) && C == 0)
832     return true;
833 
834   // FIXME: matcher should ignore copies
835   return mi_match(Reg, MRI, m_Copy(m_ICst(C))) && C == 0;
836 }
837 
838 static unsigned extractGLC(unsigned AuxiliaryData) {
839   return AuxiliaryData & 1;
840 }
841 
842 static unsigned extractSLC(unsigned AuxiliaryData) {
843   return (AuxiliaryData >> 1) & 1;
844 }
845 
846 static unsigned extractDLC(unsigned AuxiliaryData) {
847   return (AuxiliaryData >> 2) & 1;
848 }
849 
850 static unsigned extractSWZ(unsigned AuxiliaryData) {
851   return (AuxiliaryData >> 3) & 1;
852 }
853 
854 static unsigned getBufferStoreOpcode(LLT Ty,
855                                      const unsigned MemSize,
856                                      const bool Offen) {
857   const int Size = Ty.getSizeInBits();
858   switch (8 * MemSize) {
859   case 8:
860     return Offen ? AMDGPU::BUFFER_STORE_BYTE_OFFEN_exact :
861                    AMDGPU::BUFFER_STORE_BYTE_OFFSET_exact;
862   case 16:
863     return Offen ? AMDGPU::BUFFER_STORE_SHORT_OFFEN_exact :
864                    AMDGPU::BUFFER_STORE_SHORT_OFFSET_exact;
865   default:
866     unsigned Opc = Offen ? AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact :
867                            AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact;
868     if (Size > 32)
869       Opc = AMDGPU::getMUBUFOpcode(Opc, Size / 32);
870     return Opc;
871   }
872 }
873 
874 static unsigned getBufferStoreFormatOpcode(LLT Ty,
875                                            const unsigned MemSize,
876                                            const bool Offen) {
877   bool IsD16Packed = Ty.getScalarSizeInBits() == 16;
878   bool IsD16Unpacked = 8 * MemSize < Ty.getSizeInBits();
879   int NumElts = Ty.isVector() ? Ty.getNumElements() : 1;
880 
881   if (IsD16Packed) {
882     switch (NumElts) {
883     case 1:
884       return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_X_OFFEN_exact :
885                      AMDGPU::BUFFER_STORE_FORMAT_D16_X_OFFSET_exact;
886     case 2:
887       return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_XY_OFFEN_exact :
888                      AMDGPU::BUFFER_STORE_FORMAT_D16_XY_OFFSET_exact;
889     case 3:
890       return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_XYZ_OFFEN_exact :
891                      AMDGPU::BUFFER_STORE_FORMAT_D16_XYZ_OFFSET_exact;
892     case 4:
893       return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_XYZW_OFFEN_exact :
894                      AMDGPU::BUFFER_STORE_FORMAT_D16_XYZW_OFFSET_exact;
895     default:
896       return -1;
897     }
898   }
899 
900   if (IsD16Unpacked) {
901     switch (NumElts) {
902     case 1:
903       return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_X_OFFEN_exact :
904                      AMDGPU::BUFFER_STORE_FORMAT_D16_X_OFFSET_exact;
905     case 2:
906       return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_XY_gfx80_OFFEN_exact :
907                      AMDGPU::BUFFER_STORE_FORMAT_D16_XY_gfx80_OFFSET_exact;
908     case 3:
909       return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_XYZ_gfx80_OFFEN_exact :
910                      AMDGPU::BUFFER_STORE_FORMAT_D16_XYZ_gfx80_OFFSET_exact;
911     case 4:
912       return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_XYZW_gfx80_OFFEN_exact :
913                      AMDGPU::BUFFER_STORE_FORMAT_D16_XYZW_gfx80_OFFSET_exact;
914     default:
915       return -1;
916     }
917   }
918 
919   switch (NumElts) {
920   case 1:
921     return Offen ? AMDGPU::BUFFER_STORE_FORMAT_X_OFFEN_exact :
922                    AMDGPU::BUFFER_STORE_FORMAT_X_OFFSET_exact;
923   case 2:
924     return Offen ? AMDGPU::BUFFER_STORE_FORMAT_XY_OFFEN_exact :
925                   AMDGPU::BUFFER_STORE_FORMAT_XY_OFFSET_exact;
926   case 3:
927     return Offen ? AMDGPU::BUFFER_STORE_FORMAT_XYZ_OFFEN_exact :
928                    AMDGPU::BUFFER_STORE_FORMAT_XYZ_OFFSET_exact;
929   case 4:
930     return Offen ? AMDGPU::BUFFER_STORE_FORMAT_XYZW_OFFEN_exact :
931                    AMDGPU::BUFFER_STORE_FORMAT_XYZW_OFFSET_exact;
932   default:
933     return -1;
934   }
935 
936   llvm_unreachable("unhandled buffer store");
937 }
938 
939 // TODO: Move this to combiner
940 // Returns base register, imm offset, total constant offset.
941 std::tuple<Register, unsigned, unsigned>
942 AMDGPUInstructionSelector::splitBufferOffsets(MachineIRBuilder &B,
943                                               Register OrigOffset) const {
944   const unsigned MaxImm = 4095;
945   Register BaseReg;
946   unsigned TotalConstOffset;
947   MachineInstr *OffsetDef;
948 
949   std::tie(BaseReg, TotalConstOffset, OffsetDef)
950     = AMDGPU::getBaseWithConstantOffset(*MRI, OrigOffset);
951 
952   unsigned ImmOffset = TotalConstOffset;
953 
954   // If the immediate value is too big for the immoffset field, put the value
955   // and -4096 into the immoffset field so that the value that is copied/added
956   // for the voffset field is a multiple of 4096, and it stands more chance
957   // of being CSEd with the copy/add for another similar load/store.f
958   // However, do not do that rounding down to a multiple of 4096 if that is a
959   // negative number, as it appears to be illegal to have a negative offset
960   // in the vgpr, even if adding the immediate offset makes it positive.
961   unsigned Overflow = ImmOffset & ~MaxImm;
962   ImmOffset -= Overflow;
963   if ((int32_t)Overflow < 0) {
964     Overflow += ImmOffset;
965     ImmOffset = 0;
966   }
967 
968   if (Overflow != 0) {
969     // In case this is in a waterfall loop, insert offset code at the def point
970     // of the offset, not inside the loop.
971     MachineBasicBlock::iterator OldInsPt = B.getInsertPt();
972     MachineBasicBlock &OldMBB = B.getMBB();
973     B.setInstr(*OffsetDef);
974 
975     if (!BaseReg) {
976       BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
977       B.buildInstr(AMDGPU::V_MOV_B32_e32)
978         .addDef(BaseReg)
979         .addImm(Overflow);
980     } else {
981       Register OverflowVal = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
982       B.buildInstr(AMDGPU::V_MOV_B32_e32)
983         .addDef(OverflowVal)
984         .addImm(Overflow);
985 
986       Register NewBaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
987       TII.getAddNoCarry(B.getMBB(), B.getInsertPt(), B.getDebugLoc(), NewBaseReg)
988         .addReg(BaseReg)
989         .addReg(OverflowVal, RegState::Kill)
990         .addImm(0);
991       BaseReg = NewBaseReg;
992     }
993 
994     B.setInsertPt(OldMBB, OldInsPt);
995   }
996 
997   return std::make_tuple(BaseReg, ImmOffset, TotalConstOffset);
998 }
999 
1000 bool AMDGPUInstructionSelector::selectStoreIntrinsic(MachineInstr &MI,
1001                                                      bool IsFormat) const {
1002   MachineIRBuilder B(MI);
1003   MachineFunction &MF = B.getMF();
1004   Register VData = MI.getOperand(1).getReg();
1005   LLT Ty = MRI->getType(VData);
1006 
1007   int Size = Ty.getSizeInBits();
1008   if (Size % 32 != 0)
1009     return false;
1010 
1011   // FIXME: Verifier should enforce 1 MMO for these intrinsics.
1012   MachineMemOperand *MMO = *MI.memoperands_begin();
1013   const int MemSize = MMO->getSize();
1014 
1015   Register RSrc = MI.getOperand(2).getReg();
1016   Register VOffset = MI.getOperand(3).getReg();
1017   Register SOffset = MI.getOperand(4).getReg();
1018   unsigned AuxiliaryData = MI.getOperand(5).getImm();
1019   unsigned ImmOffset;
1020   unsigned TotalOffset;
1021 
1022   std::tie(VOffset, ImmOffset, TotalOffset) = splitBufferOffsets(B, VOffset);
1023   if (TotalOffset != 0)
1024     MMO = MF.getMachineMemOperand(MMO, TotalOffset, MemSize);
1025 
1026   const bool Offen = !isZero(VOffset, *MRI);
1027 
1028   int Opc = IsFormat ? getBufferStoreFormatOpcode(Ty, MemSize, Offen) :
1029     getBufferStoreOpcode(Ty, MemSize, Offen);
1030   if (Opc == -1)
1031     return false;
1032 
1033   MachineInstrBuilder MIB = B.buildInstr(Opc)
1034     .addUse(VData);
1035 
1036   if (Offen)
1037     MIB.addUse(VOffset);
1038 
1039   MIB.addUse(RSrc)
1040      .addUse(SOffset)
1041      .addImm(ImmOffset)
1042      .addImm(extractGLC(AuxiliaryData))
1043      .addImm(extractSLC(AuxiliaryData))
1044      .addImm(0) // tfe: FIXME: Remove from inst
1045      .addImm(extractDLC(AuxiliaryData))
1046      .addImm(extractSWZ(AuxiliaryData))
1047      .addMemOperand(MMO);
1048 
1049   MI.eraseFromParent();
1050 
1051   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1052 }
1053 
1054 static unsigned getDSShaderTypeValue(const MachineFunction &MF) {
1055   switch (MF.getFunction().getCallingConv()) {
1056   case CallingConv::AMDGPU_PS:
1057     return 1;
1058   case CallingConv::AMDGPU_VS:
1059     return 2;
1060   case CallingConv::AMDGPU_GS:
1061     return 3;
1062   case CallingConv::AMDGPU_HS:
1063   case CallingConv::AMDGPU_LS:
1064   case CallingConv::AMDGPU_ES:
1065     report_fatal_error("ds_ordered_count unsupported for this calling conv");
1066   case CallingConv::AMDGPU_CS:
1067   case CallingConv::AMDGPU_KERNEL:
1068   case CallingConv::C:
1069   case CallingConv::Fast:
1070   default:
1071     // Assume other calling conventions are various compute callable functions
1072     return 0;
1073   }
1074 }
1075 
1076 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1077   MachineInstr &MI, Intrinsic::ID IntrID) const {
1078   MachineBasicBlock *MBB = MI.getParent();
1079   MachineFunction *MF = MBB->getParent();
1080   const DebugLoc &DL = MI.getDebugLoc();
1081 
1082   unsigned IndexOperand = MI.getOperand(7).getImm();
1083   bool WaveRelease = MI.getOperand(8).getImm() != 0;
1084   bool WaveDone = MI.getOperand(9).getImm() != 0;
1085 
1086   if (WaveDone && !WaveRelease)
1087     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1088 
1089   unsigned OrderedCountIndex = IndexOperand & 0x3f;
1090   IndexOperand &= ~0x3f;
1091   unsigned CountDw = 0;
1092 
1093   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1094     CountDw = (IndexOperand >> 24) & 0xf;
1095     IndexOperand &= ~(0xf << 24);
1096 
1097     if (CountDw < 1 || CountDw > 4) {
1098       report_fatal_error(
1099         "ds_ordered_count: dword count must be between 1 and 4");
1100     }
1101   }
1102 
1103   if (IndexOperand)
1104     report_fatal_error("ds_ordered_count: bad index operand");
1105 
1106   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1107   unsigned ShaderType = getDSShaderTypeValue(*MF);
1108 
1109   unsigned Offset0 = OrderedCountIndex << 2;
1110   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
1111                      (Instruction << 4);
1112 
1113   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1114     Offset1 |= (CountDw - 1) << 6;
1115 
1116   unsigned Offset = Offset0 | (Offset1 << 8);
1117 
1118   Register M0Val = MI.getOperand(2).getReg();
1119   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1120     .addReg(M0Val);
1121 
1122   Register DstReg = MI.getOperand(0).getReg();
1123   Register ValReg = MI.getOperand(3).getReg();
1124   MachineInstrBuilder DS =
1125     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1126       .addReg(ValReg)
1127       .addImm(Offset)
1128       .cloneMemRefs(MI);
1129 
1130   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1131     return false;
1132 
1133   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1134   MI.eraseFromParent();
1135   return Ret;
1136 }
1137 
1138 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1139   switch (IntrID) {
1140   case Intrinsic::amdgcn_ds_gws_init:
1141     return AMDGPU::DS_GWS_INIT;
1142   case Intrinsic::amdgcn_ds_gws_barrier:
1143     return AMDGPU::DS_GWS_BARRIER;
1144   case Intrinsic::amdgcn_ds_gws_sema_v:
1145     return AMDGPU::DS_GWS_SEMA_V;
1146   case Intrinsic::amdgcn_ds_gws_sema_br:
1147     return AMDGPU::DS_GWS_SEMA_BR;
1148   case Intrinsic::amdgcn_ds_gws_sema_p:
1149     return AMDGPU::DS_GWS_SEMA_P;
1150   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1151     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1152   default:
1153     llvm_unreachable("not a gws intrinsic");
1154   }
1155 }
1156 
1157 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1158                                                      Intrinsic::ID IID) const {
1159   if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1160       !STI.hasGWSSemaReleaseAll())
1161     return false;
1162 
1163   // intrinsic ID, vsrc, offset
1164   const bool HasVSrc = MI.getNumOperands() == 3;
1165   assert(HasVSrc || MI.getNumOperands() == 2);
1166 
1167   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1168   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1169   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1170     return false;
1171 
1172   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1173   assert(OffsetDef);
1174 
1175   unsigned ImmOffset;
1176 
1177   MachineBasicBlock *MBB = MI.getParent();
1178   const DebugLoc &DL = MI.getDebugLoc();
1179 
1180   MachineInstr *Readfirstlane = nullptr;
1181 
1182   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1183   // incoming offset, in case there's an add of a constant. We'll have to put it
1184   // back later.
1185   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1186     Readfirstlane = OffsetDef;
1187     BaseOffset = OffsetDef->getOperand(1).getReg();
1188     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1189   }
1190 
1191   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1192     // If we have a constant offset, try to use the 0 in m0 as the base.
1193     // TODO: Look into changing the default m0 initialization value. If the
1194     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1195     // the immediate offset.
1196 
1197     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1198     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1199       .addImm(0);
1200   } else {
1201     std::tie(BaseOffset, ImmOffset, OffsetDef)
1202       = AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1203 
1204     if (Readfirstlane) {
1205       // We have the constant offset now, so put the readfirstlane back on the
1206       // variable component.
1207       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1208         return false;
1209 
1210       Readfirstlane->getOperand(1).setReg(BaseOffset);
1211       BaseOffset = Readfirstlane->getOperand(0).getReg();
1212     } else {
1213       if (!RBI.constrainGenericRegister(BaseOffset,
1214                                         AMDGPU::SReg_32RegClass, *MRI))
1215         return false;
1216     }
1217 
1218     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1219     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1220       .addReg(BaseOffset)
1221       .addImm(16);
1222 
1223     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1224       .addReg(M0Base);
1225   }
1226 
1227   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1228   // offset field) % 64. Some versions of the programming guide omit the m0
1229   // part, or claim it's from offset 0.
1230   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1231 
1232   if (HasVSrc) {
1233     Register VSrc = MI.getOperand(1).getReg();
1234     MIB.addReg(VSrc);
1235     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1236       return false;
1237   }
1238 
1239   MIB.addImm(ImmOffset)
1240      .addImm(-1) // $gds
1241      .cloneMemRefs(MI);
1242 
1243   MI.eraseFromParent();
1244   return true;
1245 }
1246 
1247 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1248                                                       bool IsAppend) const {
1249   Register PtrBase = MI.getOperand(2).getReg();
1250   LLT PtrTy = MRI->getType(PtrBase);
1251   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1252 
1253   unsigned Offset;
1254   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1255 
1256   // TODO: Should this try to look through readfirstlane like GWS?
1257   if (!isDSOffsetLegal(PtrBase, Offset, 16)) {
1258     PtrBase = MI.getOperand(2).getReg();
1259     Offset = 0;
1260   }
1261 
1262   MachineBasicBlock *MBB = MI.getParent();
1263   const DebugLoc &DL = MI.getDebugLoc();
1264   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1265 
1266   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1267     .addReg(PtrBase);
1268   BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1269     .addImm(Offset)
1270     .addImm(IsGDS ? -1 : 0)
1271     .cloneMemRefs(MI);
1272 
1273   MI.eraseFromParent();
1274   return true;
1275 }
1276 
1277 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1278     MachineInstr &I) const {
1279   MachineBasicBlock *BB = I.getParent();
1280   unsigned IntrinsicID = I.getIntrinsicID();
1281   switch (IntrinsicID) {
1282   case Intrinsic::amdgcn_end_cf: {
1283     // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
1284     // SelectionDAG uses for wave32 vs wave64.
1285     BuildMI(*BB, &I, I.getDebugLoc(),
1286             TII.get(AMDGPU::SI_END_CF))
1287       .add(I.getOperand(1));
1288 
1289     Register Reg = I.getOperand(1).getReg();
1290     I.eraseFromParent();
1291 
1292     if (!MRI->getRegClassOrNull(Reg))
1293       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1294     return true;
1295   }
1296   case Intrinsic::amdgcn_raw_buffer_store:
1297     return selectStoreIntrinsic(I, false);
1298   case Intrinsic::amdgcn_raw_buffer_store_format:
1299     return selectStoreIntrinsic(I, true);
1300   case Intrinsic::amdgcn_ds_ordered_add:
1301   case Intrinsic::amdgcn_ds_ordered_swap:
1302     return selectDSOrderedIntrinsic(I, IntrinsicID);
1303   case Intrinsic::amdgcn_ds_gws_init:
1304   case Intrinsic::amdgcn_ds_gws_barrier:
1305   case Intrinsic::amdgcn_ds_gws_sema_v:
1306   case Intrinsic::amdgcn_ds_gws_sema_br:
1307   case Intrinsic::amdgcn_ds_gws_sema_p:
1308   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1309     return selectDSGWSIntrinsic(I, IntrinsicID);
1310   case Intrinsic::amdgcn_ds_append:
1311     return selectDSAppendConsume(I, true);
1312   case Intrinsic::amdgcn_ds_consume:
1313     return selectDSAppendConsume(I, false);
1314   default:
1315     return selectImpl(I, *CoverageInfo);
1316   }
1317 }
1318 
1319 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1320   MachineBasicBlock *BB = I.getParent();
1321   const DebugLoc &DL = I.getDebugLoc();
1322 
1323   Register DstReg = I.getOperand(0).getReg();
1324   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1325   assert(Size <= 32 || Size == 64);
1326   const MachineOperand &CCOp = I.getOperand(1);
1327   Register CCReg = CCOp.getReg();
1328   if (!isVCC(CCReg, *MRI)) {
1329     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1330                                          AMDGPU::S_CSELECT_B32;
1331     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1332             .addReg(CCReg);
1333 
1334     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1335     // bank, because it does not cover the register class that we used to represent
1336     // for it.  So we need to manually set the register class here.
1337     if (!MRI->getRegClassOrNull(CCReg))
1338         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1339     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1340             .add(I.getOperand(2))
1341             .add(I.getOperand(3));
1342 
1343     bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI) |
1344                constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1345     I.eraseFromParent();
1346     return Ret;
1347   }
1348 
1349   // Wide VGPR select should have been split in RegBankSelect.
1350   if (Size > 32)
1351     return false;
1352 
1353   MachineInstr *Select =
1354       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1355               .addImm(0)
1356               .add(I.getOperand(3))
1357               .addImm(0)
1358               .add(I.getOperand(2))
1359               .add(I.getOperand(1));
1360 
1361   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1362   I.eraseFromParent();
1363   return Ret;
1364 }
1365 
1366 bool AMDGPUInstructionSelector::selectG_STORE(MachineInstr &I) const {
1367   initM0(I);
1368   return selectImpl(I, *CoverageInfo);
1369 }
1370 
1371 static int sizeToSubRegIndex(unsigned Size) {
1372   switch (Size) {
1373   case 32:
1374     return AMDGPU::sub0;
1375   case 64:
1376     return AMDGPU::sub0_sub1;
1377   case 96:
1378     return AMDGPU::sub0_sub1_sub2;
1379   case 128:
1380     return AMDGPU::sub0_sub1_sub2_sub3;
1381   case 256:
1382     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1383   default:
1384     if (Size < 32)
1385       return AMDGPU::sub0;
1386     if (Size > 256)
1387       return -1;
1388     return sizeToSubRegIndex(PowerOf2Ceil(Size));
1389   }
1390 }
1391 
1392 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1393   Register DstReg = I.getOperand(0).getReg();
1394   Register SrcReg = I.getOperand(1).getReg();
1395   const LLT DstTy = MRI->getType(DstReg);
1396   const LLT SrcTy = MRI->getType(SrcReg);
1397   if (!DstTy.isScalar())
1398     return false;
1399 
1400   const LLT S1 = LLT::scalar(1);
1401 
1402   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1403   const RegisterBank *DstRB;
1404   if (DstTy == S1) {
1405     // This is a special case. We don't treat s1 for legalization artifacts as
1406     // vcc booleans.
1407     DstRB = SrcRB;
1408   } else {
1409     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1410     if (SrcRB != DstRB)
1411       return false;
1412   }
1413 
1414   unsigned DstSize = DstTy.getSizeInBits();
1415   unsigned SrcSize = SrcTy.getSizeInBits();
1416 
1417   const TargetRegisterClass *SrcRC
1418     = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI);
1419   const TargetRegisterClass *DstRC
1420     = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI);
1421 
1422   if (SrcSize > 32) {
1423     int SubRegIdx = sizeToSubRegIndex(DstSize);
1424     if (SubRegIdx == -1)
1425       return false;
1426 
1427     // Deal with weird cases where the class only partially supports the subreg
1428     // index.
1429     SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
1430     if (!SrcRC)
1431       return false;
1432 
1433     I.getOperand(1).setSubReg(SubRegIdx);
1434   }
1435 
1436   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1437       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1438     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1439     return false;
1440   }
1441 
1442   I.setDesc(TII.get(TargetOpcode::COPY));
1443   return true;
1444 }
1445 
1446 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
1447 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
1448   Mask = maskTrailingOnes<unsigned>(Size);
1449   int SignedMask = static_cast<int>(Mask);
1450   return SignedMask >= -16 && SignedMask <= 64;
1451 }
1452 
1453 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
1454 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
1455   Register Reg, const MachineRegisterInfo &MRI,
1456   const TargetRegisterInfo &TRI) const {
1457   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
1458   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
1459     return RB;
1460 
1461   // Ignore the type, since we don't use vcc in artifacts.
1462   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
1463     return &RBI.getRegBankFromRegClass(*RC, LLT());
1464   return nullptr;
1465 }
1466 
1467 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
1468   bool Signed = I.getOpcode() == AMDGPU::G_SEXT;
1469   const DebugLoc &DL = I.getDebugLoc();
1470   MachineBasicBlock &MBB = *I.getParent();
1471   const Register DstReg = I.getOperand(0).getReg();
1472   const Register SrcReg = I.getOperand(1).getReg();
1473 
1474   const LLT DstTy = MRI->getType(DstReg);
1475   const LLT SrcTy = MRI->getType(SrcReg);
1476   const unsigned SrcSize = SrcTy.getSizeInBits();
1477   const unsigned DstSize = DstTy.getSizeInBits();
1478   if (!DstTy.isScalar())
1479     return false;
1480 
1481   if (I.getOpcode() == AMDGPU::G_ANYEXT)
1482     return selectCOPY(I);
1483 
1484   // Artifact casts should never use vcc.
1485   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
1486 
1487   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
1488     // 64-bit should have been split up in RegBankSelect
1489 
1490     // Try to use an and with a mask if it will save code size.
1491     unsigned Mask;
1492     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
1493       MachineInstr *ExtI =
1494       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
1495         .addImm(Mask)
1496         .addReg(SrcReg);
1497       I.eraseFromParent();
1498       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1499     }
1500 
1501     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32 : AMDGPU::V_BFE_U32;
1502     MachineInstr *ExtI =
1503       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
1504       .addReg(SrcReg)
1505       .addImm(0) // Offset
1506       .addImm(SrcSize); // Width
1507     I.eraseFromParent();
1508     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1509   }
1510 
1511   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
1512     if (!RBI.constrainGenericRegister(SrcReg, AMDGPU::SReg_32RegClass, *MRI))
1513       return false;
1514 
1515     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
1516       const unsigned SextOpc = SrcSize == 8 ?
1517         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
1518       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
1519         .addReg(SrcReg);
1520       I.eraseFromParent();
1521       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
1522     }
1523 
1524     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
1525     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
1526 
1527     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
1528     if (DstSize > 32 && SrcSize <= 32) {
1529       // We need a 64-bit register source, but the high bits don't matter.
1530       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
1531       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1532       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
1533       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
1534         .addReg(SrcReg)
1535         .addImm(AMDGPU::sub0)
1536         .addReg(UndefReg)
1537         .addImm(AMDGPU::sub1);
1538 
1539       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
1540         .addReg(ExtReg)
1541         .addImm(SrcSize << 16);
1542 
1543       I.eraseFromParent();
1544       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
1545     }
1546 
1547     unsigned Mask;
1548     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
1549       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
1550         .addReg(SrcReg)
1551         .addImm(Mask);
1552     } else {
1553       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
1554         .addReg(SrcReg)
1555         .addImm(SrcSize << 16);
1556     }
1557 
1558     I.eraseFromParent();
1559     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
1560   }
1561 
1562   return false;
1563 }
1564 
1565 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
1566   MachineBasicBlock *BB = I.getParent();
1567   MachineOperand &ImmOp = I.getOperand(1);
1568 
1569   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
1570   if (ImmOp.isFPImm()) {
1571     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
1572     ImmOp.ChangeToImmediate(Imm.getZExtValue());
1573   } else if (ImmOp.isCImm()) {
1574     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getZExtValue());
1575   }
1576 
1577   Register DstReg = I.getOperand(0).getReg();
1578   unsigned Size;
1579   bool IsSgpr;
1580   const RegisterBank *RB = MRI->getRegBankOrNull(I.getOperand(0).getReg());
1581   if (RB) {
1582     IsSgpr = RB->getID() == AMDGPU::SGPRRegBankID;
1583     Size = MRI->getType(DstReg).getSizeInBits();
1584   } else {
1585     const TargetRegisterClass *RC = TRI.getRegClassForReg(*MRI, DstReg);
1586     IsSgpr = TRI.isSGPRClass(RC);
1587     Size = TRI.getRegSizeInBits(*RC);
1588   }
1589 
1590   if (Size != 32 && Size != 64)
1591     return false;
1592 
1593   unsigned Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1594   if (Size == 32) {
1595     I.setDesc(TII.get(Opcode));
1596     I.addImplicitDefUseOperands(*MF);
1597     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1598   }
1599 
1600   const DebugLoc &DL = I.getDebugLoc();
1601 
1602   APInt Imm(Size, I.getOperand(1).getImm());
1603 
1604   MachineInstr *ResInst;
1605   if (IsSgpr && TII.isInlineConstant(Imm)) {
1606     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1607       .addImm(I.getOperand(1).getImm());
1608   } else {
1609     const TargetRegisterClass *RC = IsSgpr ?
1610       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
1611     Register LoReg = MRI->createVirtualRegister(RC);
1612     Register HiReg = MRI->createVirtualRegister(RC);
1613 
1614     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
1615       .addImm(Imm.trunc(32).getZExtValue());
1616 
1617     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
1618       .addImm(Imm.ashr(32).getZExtValue());
1619 
1620     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
1621       .addReg(LoReg)
1622       .addImm(AMDGPU::sub0)
1623       .addReg(HiReg)
1624       .addImm(AMDGPU::sub1);
1625   }
1626 
1627   // We can't call constrainSelectedInstRegOperands here, because it doesn't
1628   // work for target independent opcodes
1629   I.eraseFromParent();
1630   const TargetRegisterClass *DstRC =
1631     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
1632   if (!DstRC)
1633     return true;
1634   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
1635 }
1636 
1637 static bool isConstant(const MachineInstr &MI) {
1638   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
1639 }
1640 
1641 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
1642     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
1643 
1644   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
1645 
1646   assert(PtrMI);
1647 
1648   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
1649     return;
1650 
1651   GEPInfo GEPInfo(*PtrMI);
1652 
1653   for (unsigned i = 1; i != 3; ++i) {
1654     const MachineOperand &GEPOp = PtrMI->getOperand(i);
1655     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
1656     assert(OpDef);
1657     if (i == 2 && isConstant(*OpDef)) {
1658       // TODO: Could handle constant base + variable offset, but a combine
1659       // probably should have commuted it.
1660       assert(GEPInfo.Imm == 0);
1661       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
1662       continue;
1663     }
1664     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
1665     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
1666       GEPInfo.SgprParts.push_back(GEPOp.getReg());
1667     else
1668       GEPInfo.VgprParts.push_back(GEPOp.getReg());
1669   }
1670 
1671   AddrInfo.push_back(GEPInfo);
1672   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
1673 }
1674 
1675 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
1676   if (!MI.hasOneMemOperand())
1677     return false;
1678 
1679   const MachineMemOperand *MMO = *MI.memoperands_begin();
1680   const Value *Ptr = MMO->getValue();
1681 
1682   // UndefValue means this is a load of a kernel input.  These are uniform.
1683   // Sometimes LDS instructions have constant pointers.
1684   // If Ptr is null, then that means this mem operand contains a
1685   // PseudoSourceValue like GOT.
1686   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
1687       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
1688     return true;
1689 
1690   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
1691     return true;
1692 
1693   const Instruction *I = dyn_cast<Instruction>(Ptr);
1694   return I && I->getMetadata("amdgpu.uniform");
1695 }
1696 
1697 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
1698   for (const GEPInfo &GEPInfo : AddrInfo) {
1699     if (!GEPInfo.VgprParts.empty())
1700       return true;
1701   }
1702   return false;
1703 }
1704 
1705 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
1706   MachineBasicBlock *BB = I.getParent();
1707 
1708   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
1709   unsigned AS = PtrTy.getAddressSpace();
1710   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
1711       STI.ldsRequiresM0Init()) {
1712     // If DS instructions require M0 initializtion, insert it before selecting.
1713     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1714       .addImm(-1);
1715   }
1716 }
1717 
1718 bool AMDGPUInstructionSelector::selectG_LOAD_ATOMICRMW(MachineInstr &I) const {
1719   initM0(I);
1720   return selectImpl(I, *CoverageInfo);
1721 }
1722 
1723 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
1724   MachineBasicBlock *BB = I.getParent();
1725   MachineOperand &CondOp = I.getOperand(0);
1726   Register CondReg = CondOp.getReg();
1727   const DebugLoc &DL = I.getDebugLoc();
1728 
1729   unsigned BrOpcode;
1730   Register CondPhysReg;
1731   const TargetRegisterClass *ConstrainRC;
1732 
1733   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
1734   // whether the branch is uniform when selecting the instruction. In
1735   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
1736   // RegBankSelect knows what it's doing if the branch condition is scc, even
1737   // though it currently does not.
1738   if (!isVCC(CondReg, *MRI)) {
1739     if (MRI->getType(CondReg) != LLT::scalar(32))
1740       return false;
1741 
1742     CondPhysReg = AMDGPU::SCC;
1743     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
1744     // FIXME: Hack for isSCC tests
1745     ConstrainRC = &AMDGPU::SGPR_32RegClass;
1746   } else {
1747     // FIXME: Do we have to insert an and with exec here, like in SelectionDAG?
1748     // We sort of know that a VCC producer based on the register bank, that ands
1749     // inactive lanes with 0. What if there was a logical operation with vcc
1750     // producers in different blocks/with different exec masks?
1751     // FIXME: Should scc->vcc copies and with exec?
1752     CondPhysReg = TRI.getVCC();
1753     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
1754     ConstrainRC = TRI.getBoolRC();
1755   }
1756 
1757   if (!MRI->getRegClassOrNull(CondReg))
1758     MRI->setRegClass(CondReg, ConstrainRC);
1759 
1760   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
1761     .addReg(CondReg);
1762   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
1763     .addMBB(I.getOperand(1).getMBB());
1764 
1765   I.eraseFromParent();
1766   return true;
1767 }
1768 
1769 bool AMDGPUInstructionSelector::selectG_FRAME_INDEX(MachineInstr &I) const {
1770   Register DstReg = I.getOperand(0).getReg();
1771   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1772   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
1773   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
1774   if (IsVGPR)
1775     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
1776 
1777   return RBI.constrainGenericRegister(
1778     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
1779 }
1780 
1781 bool AMDGPUInstructionSelector::selectG_PTR_MASK(MachineInstr &I) const {
1782   uint64_t Align = I.getOperand(2).getImm();
1783   const uint64_t Mask = ~((UINT64_C(1) << Align) - 1);
1784 
1785   MachineBasicBlock *BB = I.getParent();
1786 
1787   Register DstReg = I.getOperand(0).getReg();
1788   Register SrcReg = I.getOperand(1).getReg();
1789 
1790   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1791   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1792   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
1793   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1794   unsigned MovOpc = IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1795   const TargetRegisterClass &RegRC
1796     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
1797 
1798   LLT Ty = MRI->getType(DstReg);
1799 
1800   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB,
1801                                                                   *MRI);
1802   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB,
1803                                                                   *MRI);
1804   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
1805       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
1806     return false;
1807 
1808   const DebugLoc &DL = I.getDebugLoc();
1809   Register ImmReg = MRI->createVirtualRegister(&RegRC);
1810   BuildMI(*BB, &I, DL, TII.get(MovOpc), ImmReg)
1811     .addImm(Mask);
1812 
1813   if (Ty.getSizeInBits() == 32) {
1814     BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
1815       .addReg(SrcReg)
1816       .addReg(ImmReg);
1817     I.eraseFromParent();
1818     return true;
1819   }
1820 
1821   Register HiReg = MRI->createVirtualRegister(&RegRC);
1822   Register LoReg = MRI->createVirtualRegister(&RegRC);
1823   Register MaskLo = MRI->createVirtualRegister(&RegRC);
1824 
1825   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
1826     .addReg(SrcReg, 0, AMDGPU::sub0);
1827   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
1828     .addReg(SrcReg, 0, AMDGPU::sub1);
1829 
1830   BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskLo)
1831     .addReg(LoReg)
1832     .addReg(ImmReg);
1833   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
1834     .addReg(MaskLo)
1835     .addImm(AMDGPU::sub0)
1836     .addReg(HiReg)
1837     .addImm(AMDGPU::sub1);
1838   I.eraseFromParent();
1839   return true;
1840 }
1841 
1842 /// Return the register to use for the index value, and the subregister to use
1843 /// for the indirectly accessed register.
1844 static std::pair<Register, unsigned>
1845 computeIndirectRegIndex(MachineRegisterInfo &MRI,
1846                         const SIRegisterInfo &TRI,
1847                         const TargetRegisterClass *SuperRC,
1848                         Register IdxReg,
1849                         unsigned EltSize) {
1850   Register IdxBaseReg;
1851   int Offset;
1852   MachineInstr *Unused;
1853 
1854   std::tie(IdxBaseReg, Offset, Unused)
1855     = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
1856 
1857   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
1858 
1859   // Skip out of bounds offsets, or else we would end up using an undefined
1860   // register.
1861   if (static_cast<unsigned>(Offset) >= SubRegs.size())
1862     return std::make_pair(IdxReg, SubRegs[0]);
1863   return std::make_pair(IdxBaseReg, SubRegs[Offset]);
1864 }
1865 
1866 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
1867   MachineInstr &MI) const {
1868   Register DstReg = MI.getOperand(0).getReg();
1869   Register SrcReg = MI.getOperand(1).getReg();
1870   Register IdxReg = MI.getOperand(2).getReg();
1871 
1872   LLT DstTy = MRI->getType(DstReg);
1873   LLT SrcTy = MRI->getType(SrcReg);
1874 
1875   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1876   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1877   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
1878 
1879   // The index must be scalar. If it wasn't RegBankSelect should have moved this
1880   // into a waterfall loop.
1881   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
1882     return false;
1883 
1884   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB,
1885                                                                   *MRI);
1886   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(DstTy, *DstRB,
1887                                                                   *MRI);
1888   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1889       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
1890       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
1891     return false;
1892 
1893   MachineBasicBlock *BB = MI.getParent();
1894   const DebugLoc &DL = MI.getDebugLoc();
1895   const bool Is64 = DstTy.getSizeInBits() == 64;
1896 
1897   unsigned SubReg;
1898   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
1899                                                      DstTy.getSizeInBits() / 8);
1900 
1901   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
1902     if (DstTy.getSizeInBits() != 32 && !Is64)
1903       return false;
1904 
1905     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1906       .addReg(IdxReg);
1907 
1908     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
1909     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
1910       .addReg(SrcReg, 0, SubReg)
1911       .addReg(SrcReg, RegState::Implicit);
1912     MI.eraseFromParent();
1913     return true;
1914   }
1915 
1916   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
1917     return false;
1918 
1919   if (!STI.useVGPRIndexMode()) {
1920     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1921       .addReg(IdxReg);
1922     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
1923       .addReg(SrcReg, RegState::Undef, SubReg)
1924       .addReg(SrcReg, RegState::Implicit);
1925     MI.eraseFromParent();
1926     return true;
1927   }
1928 
1929   BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON))
1930     .addReg(IdxReg)
1931     .addImm(AMDGPU::VGPRIndexMode::SRC0_ENABLE);
1932   BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), DstReg)
1933     .addReg(SrcReg, RegState::Undef, SubReg)
1934     .addReg(SrcReg, RegState::Implicit)
1935     .addReg(AMDGPU::M0, RegState::Implicit);
1936   BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF));
1937 
1938   MI.eraseFromParent();
1939   return true;
1940 }
1941 
1942 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
1943 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
1944   MachineInstr &MI) const {
1945   Register DstReg = MI.getOperand(0).getReg();
1946   Register VecReg = MI.getOperand(1).getReg();
1947   Register ValReg = MI.getOperand(2).getReg();
1948   Register IdxReg = MI.getOperand(3).getReg();
1949 
1950   LLT VecTy = MRI->getType(DstReg);
1951   LLT ValTy = MRI->getType(ValReg);
1952   unsigned VecSize = VecTy.getSizeInBits();
1953   unsigned ValSize = ValTy.getSizeInBits();
1954 
1955   const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
1956   const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
1957   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
1958 
1959   assert(VecTy.getElementType() == ValTy);
1960 
1961   // The index must be scalar. If it wasn't RegBankSelect should have moved this
1962   // into a waterfall loop.
1963   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
1964     return false;
1965 
1966   const TargetRegisterClass *VecRC = TRI.getRegClassForTypeOnBank(VecTy, *VecRB,
1967                                                                   *MRI);
1968   const TargetRegisterClass *ValRC = TRI.getRegClassForTypeOnBank(ValTy, *ValRB,
1969                                                                   *MRI);
1970 
1971   if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
1972       !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
1973       !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
1974       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
1975     return false;
1976 
1977   if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
1978     return false;
1979 
1980   unsigned SubReg;
1981   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
1982                                                      ValSize / 8);
1983 
1984   const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
1985                          STI.useVGPRIndexMode();
1986 
1987   MachineBasicBlock *BB = MI.getParent();
1988   const DebugLoc &DL = MI.getDebugLoc();
1989 
1990   if (IndexMode) {
1991     BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON))
1992       .addReg(IdxReg)
1993       .addImm(AMDGPU::VGPRIndexMode::DST_ENABLE);
1994   } else {
1995     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1996       .addReg(IdxReg);
1997   }
1998 
1999   const MCInstrDesc &RegWriteOp
2000     = TII.getIndirectRegWritePseudo(VecSize, ValSize,
2001                                     VecRB->getID() == AMDGPU::SGPRRegBankID);
2002   BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
2003     .addReg(VecReg)
2004     .addReg(ValReg)
2005     .addImm(SubReg);
2006 
2007   if (IndexMode)
2008     BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF));
2009 
2010   MI.eraseFromParent();
2011   return true;
2012 }
2013 
2014 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
2015   if (I.isPHI())
2016     return selectPHI(I);
2017 
2018   if (!I.isPreISelOpcode()) {
2019     if (I.isCopy())
2020       return selectCOPY(I);
2021     return true;
2022   }
2023 
2024   switch (I.getOpcode()) {
2025   case TargetOpcode::G_AND:
2026   case TargetOpcode::G_OR:
2027   case TargetOpcode::G_XOR:
2028     if (selectG_AND_OR_XOR(I))
2029       return true;
2030     return selectImpl(I, *CoverageInfo);
2031   case TargetOpcode::G_ADD:
2032   case TargetOpcode::G_SUB:
2033     if (selectImpl(I, *CoverageInfo))
2034       return true;
2035     return selectG_ADD_SUB(I);
2036   case TargetOpcode::G_UADDO:
2037   case TargetOpcode::G_USUBO:
2038   case TargetOpcode::G_UADDE:
2039   case TargetOpcode::G_USUBE:
2040     return selectG_UADDO_USUBO_UADDE_USUBE(I);
2041   case TargetOpcode::G_INTTOPTR:
2042   case TargetOpcode::G_BITCAST:
2043   case TargetOpcode::G_PTRTOINT:
2044     return selectCOPY(I);
2045   case TargetOpcode::G_CONSTANT:
2046   case TargetOpcode::G_FCONSTANT:
2047     return selectG_CONSTANT(I);
2048   case TargetOpcode::G_EXTRACT:
2049     return selectG_EXTRACT(I);
2050   case TargetOpcode::G_MERGE_VALUES:
2051   case TargetOpcode::G_BUILD_VECTOR:
2052   case TargetOpcode::G_CONCAT_VECTORS:
2053     return selectG_MERGE_VALUES(I);
2054   case TargetOpcode::G_UNMERGE_VALUES:
2055     return selectG_UNMERGE_VALUES(I);
2056   case TargetOpcode::G_PTR_ADD:
2057     return selectG_PTR_ADD(I);
2058   case TargetOpcode::G_IMPLICIT_DEF:
2059     return selectG_IMPLICIT_DEF(I);
2060   case TargetOpcode::G_INSERT:
2061     return selectG_INSERT(I);
2062   case TargetOpcode::G_INTRINSIC:
2063     return selectG_INTRINSIC(I);
2064   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
2065     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
2066   case TargetOpcode::G_ICMP:
2067     if (selectG_ICMP(I))
2068       return true;
2069     return selectImpl(I, *CoverageInfo);
2070   case TargetOpcode::G_LOAD:
2071   case TargetOpcode::G_ATOMIC_CMPXCHG:
2072   case TargetOpcode::G_ATOMICRMW_XCHG:
2073   case TargetOpcode::G_ATOMICRMW_ADD:
2074   case TargetOpcode::G_ATOMICRMW_SUB:
2075   case TargetOpcode::G_ATOMICRMW_AND:
2076   case TargetOpcode::G_ATOMICRMW_OR:
2077   case TargetOpcode::G_ATOMICRMW_XOR:
2078   case TargetOpcode::G_ATOMICRMW_MIN:
2079   case TargetOpcode::G_ATOMICRMW_MAX:
2080   case TargetOpcode::G_ATOMICRMW_UMIN:
2081   case TargetOpcode::G_ATOMICRMW_UMAX:
2082   case TargetOpcode::G_ATOMICRMW_FADD:
2083     return selectG_LOAD_ATOMICRMW(I);
2084   case TargetOpcode::G_SELECT:
2085     return selectG_SELECT(I);
2086   case TargetOpcode::G_STORE:
2087     return selectG_STORE(I);
2088   case TargetOpcode::G_TRUNC:
2089     return selectG_TRUNC(I);
2090   case TargetOpcode::G_SEXT:
2091   case TargetOpcode::G_ZEXT:
2092   case TargetOpcode::G_ANYEXT:
2093     if (selectImpl(I, *CoverageInfo))
2094       return true;
2095     return selectG_SZA_EXT(I);
2096   case TargetOpcode::G_BRCOND:
2097     return selectG_BRCOND(I);
2098   case TargetOpcode::G_FRAME_INDEX:
2099     return selectG_FRAME_INDEX(I);
2100   case TargetOpcode::G_PTR_MASK:
2101     return selectG_PTR_MASK(I);
2102   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
2103     return selectG_EXTRACT_VECTOR_ELT(I);
2104   case TargetOpcode::G_INSERT_VECTOR_ELT:
2105     return selectG_INSERT_VECTOR_ELT(I);
2106   case AMDGPU::G_AMDGPU_ATOMIC_INC:
2107   case AMDGPU::G_AMDGPU_ATOMIC_DEC:
2108     initM0(I);
2109     return selectImpl(I, *CoverageInfo);
2110   default:
2111     return selectImpl(I, *CoverageInfo);
2112   }
2113   return false;
2114 }
2115 
2116 InstructionSelector::ComplexRendererFns
2117 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
2118   return {{
2119       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
2120   }};
2121 
2122 }
2123 
2124 std::pair<Register, unsigned>
2125 AMDGPUInstructionSelector::selectVOP3ModsImpl(
2126   Register Src) const {
2127   unsigned Mods = 0;
2128   MachineInstr *MI = MRI->getVRegDef(Src);
2129 
2130   if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
2131     Src = MI->getOperand(1).getReg();
2132     Mods |= SISrcMods::NEG;
2133     MI = MRI->getVRegDef(Src);
2134   }
2135 
2136   if (MI && MI->getOpcode() == AMDGPU::G_FABS) {
2137     Src = MI->getOperand(1).getReg();
2138     Mods |= SISrcMods::ABS;
2139   }
2140 
2141   return std::make_pair(Src, Mods);
2142 }
2143 
2144 ///
2145 /// This will select either an SGPR or VGPR operand and will save us from
2146 /// having to write an extra tablegen pattern.
2147 InstructionSelector::ComplexRendererFns
2148 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
2149   return {{
2150       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
2151   }};
2152 }
2153 
2154 InstructionSelector::ComplexRendererFns
2155 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
2156   Register Src;
2157   unsigned Mods;
2158   std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg());
2159 
2160   return {{
2161       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
2162       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
2163       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
2164       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
2165   }};
2166 }
2167 
2168 InstructionSelector::ComplexRendererFns
2169 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
2170   return {{
2171       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
2172       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
2173       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
2174   }};
2175 }
2176 
2177 InstructionSelector::ComplexRendererFns
2178 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
2179   Register Src;
2180   unsigned Mods;
2181   std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg());
2182 
2183   return {{
2184       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
2185       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
2186   }};
2187 }
2188 
2189 InstructionSelector::ComplexRendererFns
2190 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
2191   Register Src;
2192   unsigned Mods;
2193   std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg());
2194   if (!TM.Options.NoNaNsFPMath && !isKnownNeverNaN(Src, *MRI))
2195     return None;
2196 
2197   return {{
2198       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
2199       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
2200   }};
2201 }
2202 
2203 InstructionSelector::ComplexRendererFns
2204 AMDGPUInstructionSelector::selectVOP3OpSelMods0(MachineOperand &Root) const {
2205   // FIXME: Handle clamp and op_sel
2206   return {{
2207       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
2208       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // src_mods
2209       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // clamp
2210   }};
2211 }
2212 
2213 InstructionSelector::ComplexRendererFns
2214 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
2215   // FIXME: Handle op_sel
2216   return {{
2217       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
2218       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
2219   }};
2220 }
2221 
2222 InstructionSelector::ComplexRendererFns
2223 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
2224   SmallVector<GEPInfo, 4> AddrInfo;
2225   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
2226 
2227   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
2228     return None;
2229 
2230   const GEPInfo &GEPInfo = AddrInfo[0];
2231 
2232   if (!AMDGPU::isLegalSMRDImmOffset(STI, GEPInfo.Imm))
2233     return None;
2234 
2235   unsigned PtrReg = GEPInfo.SgprParts[0];
2236   int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm);
2237   return {{
2238     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
2239     [=](MachineInstrBuilder &MIB) { MIB.addImm(EncodedImm); }
2240   }};
2241 }
2242 
2243 InstructionSelector::ComplexRendererFns
2244 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
2245   SmallVector<GEPInfo, 4> AddrInfo;
2246   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
2247 
2248   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
2249     return None;
2250 
2251   const GEPInfo &GEPInfo = AddrInfo[0];
2252   unsigned PtrReg = GEPInfo.SgprParts[0];
2253   int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm);
2254   if (!isUInt<32>(EncodedImm))
2255     return None;
2256 
2257   return {{
2258     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
2259     [=](MachineInstrBuilder &MIB) { MIB.addImm(EncodedImm); }
2260   }};
2261 }
2262 
2263 InstructionSelector::ComplexRendererFns
2264 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
2265   MachineInstr *MI = Root.getParent();
2266   MachineBasicBlock *MBB = MI->getParent();
2267 
2268   SmallVector<GEPInfo, 4> AddrInfo;
2269   getAddrModeInfo(*MI, *MRI, AddrInfo);
2270 
2271   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
2272   // then we can select all ptr + 32-bit offsets not just immediate offsets.
2273   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
2274     return None;
2275 
2276   const GEPInfo &GEPInfo = AddrInfo[0];
2277   if (!GEPInfo.Imm || !isUInt<32>(GEPInfo.Imm))
2278     return None;
2279 
2280   // If we make it this far we have a load with an 32-bit immediate offset.
2281   // It is OK to select this using a sgpr offset, because we have already
2282   // failed trying to select this load into one of the _IMM variants since
2283   // the _IMM Patterns are considered before the _SGPR patterns.
2284   unsigned PtrReg = GEPInfo.SgprParts[0];
2285   Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2286   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
2287           .addImm(GEPInfo.Imm);
2288   return {{
2289     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
2290     [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
2291   }};
2292 }
2293 
2294 template <bool Signed>
2295 InstructionSelector::ComplexRendererFns
2296 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root) const {
2297   MachineInstr *MI = Root.getParent();
2298 
2299   InstructionSelector::ComplexRendererFns Default = {{
2300       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
2301       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },  // offset
2302       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // slc
2303     }};
2304 
2305   if (!STI.hasFlatInstOffsets())
2306     return Default;
2307 
2308   const MachineInstr *OpDef = MRI->getVRegDef(Root.getReg());
2309   if (!OpDef || OpDef->getOpcode() != AMDGPU::G_PTR_ADD)
2310     return Default;
2311 
2312   Optional<int64_t> Offset =
2313     getConstantVRegVal(OpDef->getOperand(2).getReg(), *MRI);
2314   if (!Offset.hasValue())
2315     return Default;
2316 
2317   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
2318   if (!TII.isLegalFLATOffset(Offset.getValue(), AddrSpace, Signed))
2319     return Default;
2320 
2321   Register BasePtr = OpDef->getOperand(1).getReg();
2322 
2323   return {{
2324       [=](MachineInstrBuilder &MIB) { MIB.addReg(BasePtr); },
2325       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset.getValue()); },
2326       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // slc
2327     }};
2328 }
2329 
2330 InstructionSelector::ComplexRendererFns
2331 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
2332   return selectFlatOffsetImpl<false>(Root);
2333 }
2334 
2335 InstructionSelector::ComplexRendererFns
2336 AMDGPUInstructionSelector::selectFlatOffsetSigned(MachineOperand &Root) const {
2337   return selectFlatOffsetImpl<true>(Root);
2338 }
2339 
2340 static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
2341   auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
2342   return PSV && PSV->isStack();
2343 }
2344 
2345 InstructionSelector::ComplexRendererFns
2346 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
2347   MachineInstr *MI = Root.getParent();
2348   MachineBasicBlock *MBB = MI->getParent();
2349   MachineFunction *MF = MBB->getParent();
2350   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
2351 
2352   int64_t Offset = 0;
2353   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset))) {
2354     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2355 
2356     // TODO: Should this be inside the render function? The iterator seems to
2357     // move.
2358     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
2359             HighBits)
2360       .addImm(Offset & ~4095);
2361 
2362     return {{[=](MachineInstrBuilder &MIB) { // rsrc
2363                MIB.addReg(Info->getScratchRSrcReg());
2364              },
2365              [=](MachineInstrBuilder &MIB) { // vaddr
2366                MIB.addReg(HighBits);
2367              },
2368              [=](MachineInstrBuilder &MIB) { // soffset
2369                const MachineMemOperand *MMO = *MI->memoperands_begin();
2370                const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
2371 
2372                Register SOffsetReg = isStackPtrRelative(PtrInfo)
2373                                          ? Info->getStackPtrOffsetReg()
2374                                          : Info->getScratchWaveOffsetReg();
2375                MIB.addReg(SOffsetReg);
2376              },
2377              [=](MachineInstrBuilder &MIB) { // offset
2378                MIB.addImm(Offset & 4095);
2379              }}};
2380   }
2381 
2382   assert(Offset == 0);
2383 
2384   // Try to fold a frame index directly into the MUBUF vaddr field, and any
2385   // offsets.
2386   Optional<int> FI;
2387   Register VAddr = Root.getReg();
2388   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
2389     if (isBaseWithConstantOffset(Root, *MRI)) {
2390       const MachineOperand &LHS = RootDef->getOperand(1);
2391       const MachineOperand &RHS = RootDef->getOperand(2);
2392       const MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
2393       const MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
2394       if (LHSDef && RHSDef) {
2395         int64_t PossibleOffset =
2396             RHSDef->getOperand(1).getCImm()->getSExtValue();
2397         if (SIInstrInfo::isLegalMUBUFImmOffset(PossibleOffset) &&
2398             (!STI.privateMemoryResourceIsRangeChecked() ||
2399              KnownBits->signBitIsZero(LHS.getReg()))) {
2400           if (LHSDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
2401             FI = LHSDef->getOperand(1).getIndex();
2402           else
2403             VAddr = LHS.getReg();
2404           Offset = PossibleOffset;
2405         }
2406       }
2407     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
2408       FI = RootDef->getOperand(1).getIndex();
2409     }
2410   }
2411 
2412   // If we don't know this private access is a local stack object, it needs to
2413   // be relative to the entry point's scratch wave offset register.
2414   // TODO: Should split large offsets that don't fit like above.
2415   // TODO: Don't use scratch wave offset just because the offset didn't fit.
2416   Register SOffset = FI.hasValue() ? Info->getStackPtrOffsetReg()
2417                                    : Info->getScratchWaveOffsetReg();
2418 
2419   return {{[=](MachineInstrBuilder &MIB) { // rsrc
2420              MIB.addReg(Info->getScratchRSrcReg());
2421            },
2422            [=](MachineInstrBuilder &MIB) { // vaddr
2423              if (FI.hasValue())
2424                MIB.addFrameIndex(FI.getValue());
2425              else
2426                MIB.addReg(VAddr);
2427            },
2428            [=](MachineInstrBuilder &MIB) { // soffset
2429              MIB.addReg(SOffset);
2430            },
2431            [=](MachineInstrBuilder &MIB) { // offset
2432              MIB.addImm(Offset);
2433            }}};
2434 }
2435 
2436 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
2437                                                 int64_t Offset,
2438                                                 unsigned OffsetBits) const {
2439   if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
2440       (OffsetBits == 8 && !isUInt<8>(Offset)))
2441     return false;
2442 
2443   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
2444     return true;
2445 
2446   // On Southern Islands instruction with a negative base value and an offset
2447   // don't seem to work.
2448   return KnownBits->signBitIsZero(Base);
2449 }
2450 
2451 InstructionSelector::ComplexRendererFns
2452 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
2453     MachineOperand &Root) const {
2454   MachineInstr *MI = Root.getParent();
2455   MachineBasicBlock *MBB = MI->getParent();
2456 
2457   int64_t Offset = 0;
2458   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
2459       !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
2460     return {};
2461 
2462   const MachineFunction *MF = MBB->getParent();
2463   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
2464   const MachineMemOperand *MMO = *MI->memoperands_begin();
2465   const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
2466 
2467   Register SOffsetReg = isStackPtrRelative(PtrInfo)
2468                             ? Info->getStackPtrOffsetReg()
2469                             : Info->getScratchWaveOffsetReg();
2470   return {{
2471       [=](MachineInstrBuilder &MIB) {
2472         MIB.addReg(Info->getScratchRSrcReg());
2473       },                                                         // rsrc
2474       [=](MachineInstrBuilder &MIB) { MIB.addReg(SOffsetReg); }, // soffset
2475       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }      // offset
2476   }};
2477 }
2478 
2479 std::pair<Register, unsigned>
2480 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
2481   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
2482   if (!RootDef)
2483     return std::make_pair(Root.getReg(), 0);
2484 
2485   int64_t ConstAddr = 0;
2486   if (isBaseWithConstantOffset(Root, *MRI)) {
2487     const MachineOperand &LHS = RootDef->getOperand(1);
2488     const MachineOperand &RHS = RootDef->getOperand(2);
2489     const MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
2490     const MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
2491     if (LHSDef && RHSDef) {
2492       int64_t PossibleOffset =
2493         RHSDef->getOperand(1).getCImm()->getSExtValue();
2494       if (isDSOffsetLegal(LHS.getReg(), PossibleOffset, 16)) {
2495         // (add n0, c0)
2496         return std::make_pair(LHS.getReg(), PossibleOffset);
2497       }
2498     }
2499   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
2500     // TODO
2501 
2502 
2503   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
2504     // TODO
2505 
2506   }
2507 
2508   return std::make_pair(Root.getReg(), 0);
2509 }
2510 
2511 InstructionSelector::ComplexRendererFns
2512 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
2513 
2514   Register Reg;
2515   unsigned Offset;
2516   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
2517   return {{
2518       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
2519       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
2520     }};
2521 }
2522 
2523 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
2524                                                  const MachineInstr &MI,
2525                                                  int OpIdx) const {
2526   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
2527          "Expected G_CONSTANT");
2528   Optional<int64_t> CstVal = getConstantVRegVal(MI.getOperand(0).getReg(), *MRI);
2529   assert(CstVal && "Expected constant value");
2530   MIB.addImm(CstVal.getValue());
2531 }
2532 
2533 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
2534                                                 const MachineInstr &MI,
2535                                                 int OpIdx) const {
2536   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
2537          "Expected G_CONSTANT");
2538   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
2539 }
2540 
2541 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
2542                                                  const MachineInstr &MI,
2543                                                  int OpIdx) const {
2544   assert(OpIdx == -1);
2545 
2546   const MachineOperand &Op = MI.getOperand(1);
2547   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
2548     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
2549   else {
2550     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
2551     MIB.addImm(Op.getCImm()->getSExtValue());
2552   }
2553 }
2554 
2555 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
2556                                                 const MachineInstr &MI,
2557                                                 int OpIdx) const {
2558   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
2559          "Expected G_CONSTANT");
2560   MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
2561 }
2562 
2563 /// This only really exists to satisfy DAG type checking machinery, so is a
2564 /// no-op here.
2565 void AMDGPUInstructionSelector::renderTruncTImm32(MachineInstrBuilder &MIB,
2566                                                   const MachineInstr &MI,
2567                                                   int OpIdx) const {
2568   MIB.addImm(MI.getOperand(OpIdx).getImm());
2569 }
2570 
2571 void AMDGPUInstructionSelector::renderTruncTImm16(MachineInstrBuilder &MIB,
2572                                                   const MachineInstr &MI,
2573                                                   int OpIdx) const {
2574   MIB.addImm(MI.getOperand(OpIdx).getImm());
2575 }
2576 
2577 void AMDGPUInstructionSelector::renderTruncTImm1(MachineInstrBuilder &MIB,
2578                                                  const MachineInstr &MI,
2579                                                  int OpIdx) const {
2580   MIB.addImm(MI.getOperand(OpIdx).getImm());
2581 }
2582 
2583 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
2584   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
2585 }
2586 
2587 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
2588   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
2589 }
2590 
2591 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
2592   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
2593 }
2594 
2595 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
2596   return TII.isInlineConstant(Imm);
2597 }
2598