1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPUInstrInfo.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPURegisterBankInfo.h"
18 #include "AMDGPURegisterInfo.h"
19 #include "AMDGPUSubtarget.h"
20 #include "AMDGPUTargetMachine.h"
21 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
22 #include "SIMachineFunctionInfo.h"
23 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
24 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
25 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
26 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
27 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
28 #include "llvm/CodeGen/GlobalISel/Utils.h"
29 #include "llvm/CodeGen/MachineBasicBlock.h"
30 #include "llvm/CodeGen/MachineFunction.h"
31 #include "llvm/CodeGen/MachineInstr.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/IR/Type.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/raw_ostream.h"
37 
38 #define DEBUG_TYPE "amdgpu-isel"
39 
40 using namespace llvm;
41 using namespace MIPatternMatch;
42 
43 #define GET_GLOBALISEL_IMPL
44 #define AMDGPUSubtarget GCNSubtarget
45 #include "AMDGPUGenGlobalISel.inc"
46 #undef GET_GLOBALISEL_IMPL
47 #undef AMDGPUSubtarget
48 
49 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
50     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
51     const AMDGPUTargetMachine &TM)
52     : InstructionSelector(), TII(*STI.getInstrInfo()),
53       TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
54       STI(STI),
55       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
56 #define GET_GLOBALISEL_PREDICATES_INIT
57 #include "AMDGPUGenGlobalISel.inc"
58 #undef GET_GLOBALISEL_PREDICATES_INIT
59 #define GET_GLOBALISEL_TEMPORARIES_INIT
60 #include "AMDGPUGenGlobalISel.inc"
61 #undef GET_GLOBALISEL_TEMPORARIES_INIT
62 {
63 }
64 
65 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
66 
67 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits &KB,
68                                         CodeGenCoverage &CoverageInfo) {
69   MRI = &MF.getRegInfo();
70   InstructionSelector::setupMF(MF, KB, CoverageInfo);
71 }
72 
73 bool AMDGPUInstructionSelector::isVCC(Register Reg,
74                                       const MachineRegisterInfo &MRI) const {
75   if (Register::isPhysicalRegister(Reg))
76     return Reg == TRI.getVCC();
77 
78   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
79   const TargetRegisterClass *RC =
80       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
81   if (RC) {
82     const LLT Ty = MRI.getType(Reg);
83     return RC->hasSuperClassEq(TRI.getBoolRC()) &&
84            Ty.isValid() && Ty.getSizeInBits() == 1;
85   }
86 
87   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
88   return RB->getID() == AMDGPU::VCCRegBankID;
89 }
90 
91 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
92   const DebugLoc &DL = I.getDebugLoc();
93   MachineBasicBlock *BB = I.getParent();
94   I.setDesc(TII.get(TargetOpcode::COPY));
95 
96   const MachineOperand &Src = I.getOperand(1);
97   MachineOperand &Dst = I.getOperand(0);
98   Register DstReg = Dst.getReg();
99   Register SrcReg = Src.getReg();
100 
101   if (isVCC(DstReg, *MRI)) {
102     if (SrcReg == AMDGPU::SCC) {
103       const TargetRegisterClass *RC
104         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
105       if (!RC)
106         return true;
107       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
108     }
109 
110     if (!isVCC(SrcReg, *MRI)) {
111       // TODO: Should probably leave the copy and let copyPhysReg expand it.
112       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
113         return false;
114 
115       const TargetRegisterClass *SrcRC
116         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
117 
118       Register MaskedReg = MRI->createVirtualRegister(SrcRC);
119 
120       // We can't trust the high bits at this point, so clear them.
121 
122       // TODO: Skip masking high bits if def is known boolean.
123 
124       unsigned AndOpc = TRI.isSGPRClass(SrcRC) ?
125         AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
126       BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
127         .addImm(1)
128         .addReg(SrcReg);
129       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
130         .addImm(0)
131         .addReg(MaskedReg);
132 
133       if (!MRI->getRegClassOrNull(SrcReg))
134         MRI->setRegClass(SrcReg, SrcRC);
135       I.eraseFromParent();
136       return true;
137     }
138 
139     const TargetRegisterClass *RC =
140       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
141     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
142       return false;
143 
144     // Don't constrain the source register to a class so the def instruction
145     // handles it (unless it's undef).
146     //
147     // FIXME: This is a hack. When selecting the def, we neeed to know
148     // specifically know that the result is VCCRegBank, and not just an SGPR
149     // with size 1. An SReg_32 with size 1 is ambiguous with wave32.
150     if (Src.isUndef()) {
151       const TargetRegisterClass *SrcRC =
152         TRI.getConstrainedRegClassForOperand(Src, *MRI);
153       if (SrcRC && !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
154         return false;
155     }
156 
157     return true;
158   }
159 
160   for (const MachineOperand &MO : I.operands()) {
161     if (Register::isPhysicalRegister(MO.getReg()))
162       continue;
163 
164     const TargetRegisterClass *RC =
165             TRI.getConstrainedRegClassForOperand(MO, *MRI);
166     if (!RC)
167       continue;
168     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
169   }
170   return true;
171 }
172 
173 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
174   const Register DefReg = I.getOperand(0).getReg();
175   const LLT DefTy = MRI->getType(DefReg);
176 
177   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
178 
179   const RegClassOrRegBank &RegClassOrBank =
180     MRI->getRegClassOrRegBank(DefReg);
181 
182   const TargetRegisterClass *DefRC
183     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
184   if (!DefRC) {
185     if (!DefTy.isValid()) {
186       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
187       return false;
188     }
189 
190     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
191     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI);
192     if (!DefRC) {
193       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
194       return false;
195     }
196   }
197 
198   // TODO: Verify that all registers have the same bank
199   I.setDesc(TII.get(TargetOpcode::PHI));
200   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
201 }
202 
203 MachineOperand
204 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
205                                            const TargetRegisterClass &SubRC,
206                                            unsigned SubIdx) const {
207 
208   MachineInstr *MI = MO.getParent();
209   MachineBasicBlock *BB = MO.getParent()->getParent();
210   Register DstReg = MRI->createVirtualRegister(&SubRC);
211 
212   if (MO.isReg()) {
213     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
214     Register Reg = MO.getReg();
215     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
216             .addReg(Reg, 0, ComposedSubIdx);
217 
218     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
219                                      MO.isKill(), MO.isDead(), MO.isUndef(),
220                                      MO.isEarlyClobber(), 0, MO.isDebug(),
221                                      MO.isInternalRead());
222   }
223 
224   assert(MO.isImm());
225 
226   APInt Imm(64, MO.getImm());
227 
228   switch (SubIdx) {
229   default:
230     llvm_unreachable("do not know to split immediate with this sub index.");
231   case AMDGPU::sub0:
232     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
233   case AMDGPU::sub1:
234     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
235   }
236 }
237 
238 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
239   switch (Opc) {
240   case AMDGPU::G_AND:
241     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
242   case AMDGPU::G_OR:
243     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
244   case AMDGPU::G_XOR:
245     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
246   default:
247     llvm_unreachable("not a bit op");
248   }
249 }
250 
251 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
252   MachineOperand &Dst = I.getOperand(0);
253   MachineOperand &Src0 = I.getOperand(1);
254   MachineOperand &Src1 = I.getOperand(2);
255   Register DstReg = Dst.getReg();
256   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
257 
258   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
259   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
260     const TargetRegisterClass *RC = TRI.getBoolRC();
261     unsigned InstOpc = getLogicalBitOpcode(I.getOpcode(),
262                                            RC == &AMDGPU::SReg_64RegClass);
263     I.setDesc(TII.get(InstOpc));
264 
265     // FIXME: Hack to avoid turning the register bank into a register class.
266     // The selector for G_ICMP relies on seeing the register bank for the result
267     // is VCC. In wave32 if we constrain the registers to SReg_32 here, it will
268     // be ambiguous whether it's a scalar or vector bool.
269     if (Src0.isUndef() && !MRI->getRegClassOrNull(Src0.getReg()))
270       MRI->setRegClass(Src0.getReg(), RC);
271     if (Src1.isUndef() && !MRI->getRegClassOrNull(Src1.getReg()))
272       MRI->setRegClass(Src1.getReg(), RC);
273 
274     return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
275   }
276 
277   // TODO: Should this allow an SCC bank result, and produce a copy from SCC for
278   // the result?
279   if (DstRB->getID() == AMDGPU::SGPRRegBankID) {
280     unsigned InstOpc = getLogicalBitOpcode(I.getOpcode(), Size > 32);
281     I.setDesc(TII.get(InstOpc));
282     // Dead implicit-def of scc
283     I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
284                                            true, // isImp
285                                            false, // isKill
286                                            true)); // isDead
287     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
288   }
289 
290   return false;
291 }
292 
293 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
294   MachineBasicBlock *BB = I.getParent();
295   MachineFunction *MF = BB->getParent();
296   Register DstReg = I.getOperand(0).getReg();
297   const DebugLoc &DL = I.getDebugLoc();
298   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
299   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
300   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
301   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
302 
303   if (Size == 32) {
304     if (IsSALU) {
305       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
306       MachineInstr *Add =
307         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
308         .add(I.getOperand(1))
309         .add(I.getOperand(2));
310       I.eraseFromParent();
311       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
312     }
313 
314     if (STI.hasAddNoCarry()) {
315       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
316       I.setDesc(TII.get(Opc));
317       I.addOperand(*MF, MachineOperand::CreateImm(0));
318       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
319       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
320     }
321 
322     const unsigned Opc = Sub ? AMDGPU::V_SUB_I32_e64 : AMDGPU::V_ADD_I32_e64;
323 
324     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
325     MachineInstr *Add
326       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
327       .addDef(UnusedCarry, RegState::Dead)
328       .add(I.getOperand(1))
329       .add(I.getOperand(2))
330       .addImm(0);
331     I.eraseFromParent();
332     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
333   }
334 
335   assert(!Sub && "illegal sub should not reach here");
336 
337   const TargetRegisterClass &RC
338     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
339   const TargetRegisterClass &HalfRC
340     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
341 
342   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
343   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
344   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
345   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
346 
347   Register DstLo = MRI->createVirtualRegister(&HalfRC);
348   Register DstHi = MRI->createVirtualRegister(&HalfRC);
349 
350   if (IsSALU) {
351     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
352       .add(Lo1)
353       .add(Lo2);
354     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
355       .add(Hi1)
356       .add(Hi2);
357   } else {
358     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
359     Register CarryReg = MRI->createVirtualRegister(CarryRC);
360     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_I32_e64), DstLo)
361       .addDef(CarryReg)
362       .add(Lo1)
363       .add(Lo2)
364       .addImm(0);
365     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
366       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
367       .add(Hi1)
368       .add(Hi2)
369       .addReg(CarryReg, RegState::Kill)
370       .addImm(0);
371 
372     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
373       return false;
374   }
375 
376   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
377     .addReg(DstLo)
378     .addImm(AMDGPU::sub0)
379     .addReg(DstHi)
380     .addImm(AMDGPU::sub1);
381 
382 
383   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
384     return false;
385 
386   I.eraseFromParent();
387   return true;
388 }
389 
390 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
391   MachineInstr &I) const {
392   MachineBasicBlock *BB = I.getParent();
393   MachineFunction *MF = BB->getParent();
394   const DebugLoc &DL = I.getDebugLoc();
395   Register Dst0Reg = I.getOperand(0).getReg();
396   Register Dst1Reg = I.getOperand(1).getReg();
397   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
398                      I.getOpcode() == AMDGPU::G_UADDE;
399   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
400                           I.getOpcode() == AMDGPU::G_USUBE;
401 
402   if (isVCC(Dst1Reg, *MRI)) {
403       // The name of the opcodes are misleading. v_add_i32/v_sub_i32 have unsigned
404       // carry out despite the _i32 name. These were renamed in VI to _U32.
405       // FIXME: We should probably rename the opcodes here.
406     unsigned NoCarryOpc = IsAdd ? AMDGPU::V_ADD_I32_e64 : AMDGPU::V_SUB_I32_e64;
407     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
408     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
409     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
410     I.addOperand(*MF, MachineOperand::CreateImm(0));
411     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
412   }
413 
414   Register Src0Reg = I.getOperand(2).getReg();
415   Register Src1Reg = I.getOperand(3).getReg();
416 
417   if (HasCarryIn) {
418     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
419       .addReg(I.getOperand(4).getReg());
420   }
421 
422   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
423   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
424 
425   BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
426     .add(I.getOperand(2))
427     .add(I.getOperand(3));
428   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
429     .addReg(AMDGPU::SCC);
430 
431   if (!MRI->getRegClassOrNull(Dst1Reg))
432     MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
433 
434   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
435       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
436       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
437     return false;
438 
439   if (HasCarryIn &&
440       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
441                                     AMDGPU::SReg_32RegClass, *MRI))
442     return false;
443 
444   I.eraseFromParent();
445   return true;
446 }
447 
448 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
449   MachineBasicBlock *BB = I.getParent();
450   Register DstReg = I.getOperand(0).getReg();
451   Register SrcReg = I.getOperand(1).getReg();
452   LLT DstTy = MRI->getType(DstReg);
453   LLT SrcTy = MRI->getType(SrcReg);
454   const unsigned SrcSize = SrcTy.getSizeInBits();
455   const unsigned DstSize = DstTy.getSizeInBits();
456 
457   // TODO: Should handle any multiple of 32 offset.
458   unsigned Offset = I.getOperand(2).getImm();
459   if (Offset % DstSize != 0)
460     return false;
461 
462   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
463   const TargetRegisterClass *SrcRC =
464     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
465   if (!SrcRC)
466     return false;
467 
468   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
469 
470   const DebugLoc &DL = I.getDebugLoc();
471   MachineInstr *Copy = BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
472                                .addReg(SrcReg, 0, SubRegs[Offset / DstSize]);
473 
474   for (const MachineOperand &MO : Copy->operands()) {
475     const TargetRegisterClass *RC =
476             TRI.getConstrainedRegClassForOperand(MO, *MRI);
477     if (!RC)
478       continue;
479     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
480   }
481   I.eraseFromParent();
482   return true;
483 }
484 
485 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
486   MachineBasicBlock *BB = MI.getParent();
487   Register DstReg = MI.getOperand(0).getReg();
488   LLT DstTy = MRI->getType(DstReg);
489   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
490 
491   const unsigned SrcSize = SrcTy.getSizeInBits();
492   if (SrcSize < 32)
493     return selectImpl(MI, *CoverageInfo);
494 
495   const DebugLoc &DL = MI.getDebugLoc();
496   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
497   const unsigned DstSize = DstTy.getSizeInBits();
498   const TargetRegisterClass *DstRC =
499     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
500   if (!DstRC)
501     return false;
502 
503   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
504   MachineInstrBuilder MIB =
505     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
506   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
507     MachineOperand &Src = MI.getOperand(I + 1);
508     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
509     MIB.addImm(SubRegs[I]);
510 
511     const TargetRegisterClass *SrcRC
512       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
513     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
514       return false;
515   }
516 
517   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
518     return false;
519 
520   MI.eraseFromParent();
521   return true;
522 }
523 
524 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
525   MachineBasicBlock *BB = MI.getParent();
526   const int NumDst = MI.getNumOperands() - 1;
527 
528   MachineOperand &Src = MI.getOperand(NumDst);
529 
530   Register SrcReg = Src.getReg();
531   Register DstReg0 = MI.getOperand(0).getReg();
532   LLT DstTy = MRI->getType(DstReg0);
533   LLT SrcTy = MRI->getType(SrcReg);
534 
535   const unsigned DstSize = DstTy.getSizeInBits();
536   const unsigned SrcSize = SrcTy.getSizeInBits();
537   const DebugLoc &DL = MI.getDebugLoc();
538   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
539 
540   const TargetRegisterClass *SrcRC =
541     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
542   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
543     return false;
544 
545   const unsigned SrcFlags = getUndefRegState(Src.isUndef());
546 
547   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
548   // source, and this relies on the fact that the same subregister indices are
549   // used for both.
550   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
551   for (int I = 0, E = NumDst; I != E; ++I) {
552     MachineOperand &Dst = MI.getOperand(I);
553     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
554       .addReg(SrcReg, SrcFlags, SubRegs[I]);
555 
556     const TargetRegisterClass *DstRC =
557       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
558     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
559       return false;
560   }
561 
562   MI.eraseFromParent();
563   return true;
564 }
565 
566 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
567   return selectG_ADD_SUB(I);
568 }
569 
570 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
571   const MachineOperand &MO = I.getOperand(0);
572 
573   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
574   // regbank check here is to know why getConstrainedRegClassForOperand failed.
575   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
576   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
577       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
578     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
579     return true;
580   }
581 
582   return false;
583 }
584 
585 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
586   MachineBasicBlock *BB = I.getParent();
587 
588   Register DstReg = I.getOperand(0).getReg();
589   Register Src0Reg = I.getOperand(1).getReg();
590   Register Src1Reg = I.getOperand(2).getReg();
591   LLT Src1Ty = MRI->getType(Src1Reg);
592 
593   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
594   unsigned InsSize = Src1Ty.getSizeInBits();
595 
596   int64_t Offset = I.getOperand(3).getImm();
597   if (Offset % 32 != 0)
598     return false;
599 
600   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
601   if (SubReg == AMDGPU::NoSubRegister)
602     return false;
603 
604   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
605   const TargetRegisterClass *DstRC =
606     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
607   if (!DstRC)
608     return false;
609 
610   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
611   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
612   const TargetRegisterClass *Src0RC =
613     TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI);
614   const TargetRegisterClass *Src1RC =
615     TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI);
616 
617   // Deal with weird cases where the class only partially supports the subreg
618   // index.
619   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
620   if (!Src0RC)
621     return false;
622 
623   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
624       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
625       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
626     return false;
627 
628   const DebugLoc &DL = I.getDebugLoc();
629   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
630     .addReg(Src0Reg)
631     .addReg(Src1Reg)
632     .addImm(SubReg);
633 
634   I.eraseFromParent();
635   return true;
636 }
637 
638 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
639   unsigned IntrinsicID = I.getIntrinsicID();
640   switch (IntrinsicID) {
641   case Intrinsic::amdgcn_if_break: {
642     MachineBasicBlock *BB = I.getParent();
643 
644     // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
645     // SelectionDAG uses for wave32 vs wave64.
646     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
647       .add(I.getOperand(0))
648       .add(I.getOperand(2))
649       .add(I.getOperand(3));
650 
651     Register DstReg = I.getOperand(0).getReg();
652     Register Src0Reg = I.getOperand(2).getReg();
653     Register Src1Reg = I.getOperand(3).getReg();
654 
655     I.eraseFromParent();
656 
657     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
658       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
659 
660     return true;
661   }
662   default:
663     return selectImpl(I, *CoverageInfo);
664   }
665 }
666 
667 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
668   if (Size != 32 && Size != 64)
669     return -1;
670   switch (P) {
671   default:
672     llvm_unreachable("Unknown condition code!");
673   case CmpInst::ICMP_NE:
674     return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
675   case CmpInst::ICMP_EQ:
676     return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
677   case CmpInst::ICMP_SGT:
678     return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
679   case CmpInst::ICMP_SGE:
680     return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
681   case CmpInst::ICMP_SLT:
682     return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
683   case CmpInst::ICMP_SLE:
684     return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
685   case CmpInst::ICMP_UGT:
686     return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
687   case CmpInst::ICMP_UGE:
688     return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
689   case CmpInst::ICMP_ULT:
690     return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
691   case CmpInst::ICMP_ULE:
692     return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
693   }
694 }
695 
696 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
697                                               unsigned Size) const {
698   if (Size == 64) {
699     if (!STI.hasScalarCompareEq64())
700       return -1;
701 
702     switch (P) {
703     case CmpInst::ICMP_NE:
704       return AMDGPU::S_CMP_LG_U64;
705     case CmpInst::ICMP_EQ:
706       return AMDGPU::S_CMP_EQ_U64;
707     default:
708       return -1;
709     }
710   }
711 
712   if (Size != 32)
713     return -1;
714 
715   switch (P) {
716   case CmpInst::ICMP_NE:
717     return AMDGPU::S_CMP_LG_U32;
718   case CmpInst::ICMP_EQ:
719     return AMDGPU::S_CMP_EQ_U32;
720   case CmpInst::ICMP_SGT:
721     return AMDGPU::S_CMP_GT_I32;
722   case CmpInst::ICMP_SGE:
723     return AMDGPU::S_CMP_GE_I32;
724   case CmpInst::ICMP_SLT:
725     return AMDGPU::S_CMP_LT_I32;
726   case CmpInst::ICMP_SLE:
727     return AMDGPU::S_CMP_LE_I32;
728   case CmpInst::ICMP_UGT:
729     return AMDGPU::S_CMP_GT_U32;
730   case CmpInst::ICMP_UGE:
731     return AMDGPU::S_CMP_GE_U32;
732   case CmpInst::ICMP_ULT:
733     return AMDGPU::S_CMP_LT_U32;
734   case CmpInst::ICMP_ULE:
735     return AMDGPU::S_CMP_LE_U32;
736   default:
737     llvm_unreachable("Unknown condition code!");
738   }
739 }
740 
741 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
742   MachineBasicBlock *BB = I.getParent();
743   const DebugLoc &DL = I.getDebugLoc();
744 
745   Register SrcReg = I.getOperand(2).getReg();
746   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
747 
748   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
749 
750   Register CCReg = I.getOperand(0).getReg();
751   if (!isVCC(CCReg, *MRI)) {
752     int Opcode = getS_CMPOpcode(Pred, Size);
753     if (Opcode == -1)
754       return false;
755     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
756             .add(I.getOperand(2))
757             .add(I.getOperand(3));
758     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
759       .addReg(AMDGPU::SCC);
760     bool Ret =
761         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
762         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
763     I.eraseFromParent();
764     return Ret;
765   }
766 
767   int Opcode = getV_CMPOpcode(Pred, Size);
768   if (Opcode == -1)
769     return false;
770 
771   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
772             I.getOperand(0).getReg())
773             .add(I.getOperand(2))
774             .add(I.getOperand(3));
775   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
776                                *TRI.getBoolRC(), *MRI);
777   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
778   I.eraseFromParent();
779   return Ret;
780 }
781 
782 static bool isZero(Register Reg, MachineRegisterInfo &MRI) {
783   int64_t C;
784   if (mi_match(Reg, MRI, m_ICst(C)) && C == 0)
785     return true;
786 
787   // FIXME: matcher should ignore copies
788   return mi_match(Reg, MRI, m_Copy(m_ICst(C))) && C == 0;
789 }
790 
791 static unsigned extractGLC(unsigned AuxiliaryData) {
792   return AuxiliaryData & 1;
793 }
794 
795 static unsigned extractSLC(unsigned AuxiliaryData) {
796   return (AuxiliaryData >> 1) & 1;
797 }
798 
799 static unsigned extractDLC(unsigned AuxiliaryData) {
800   return (AuxiliaryData >> 2) & 1;
801 }
802 
803 static unsigned extractSWZ(unsigned AuxiliaryData) {
804   return (AuxiliaryData >> 3) & 1;
805 }
806 
807 static unsigned getBufferStoreOpcode(LLT Ty,
808                                      const unsigned MemSize,
809                                      const bool Offen) {
810   const int Size = Ty.getSizeInBits();
811   switch (8 * MemSize) {
812   case 8:
813     return Offen ? AMDGPU::BUFFER_STORE_BYTE_OFFEN_exact :
814                    AMDGPU::BUFFER_STORE_BYTE_OFFSET_exact;
815   case 16:
816     return Offen ? AMDGPU::BUFFER_STORE_SHORT_OFFEN_exact :
817                    AMDGPU::BUFFER_STORE_SHORT_OFFSET_exact;
818   default:
819     unsigned Opc = Offen ? AMDGPU::BUFFER_STORE_DWORD_OFFEN_exact :
820                            AMDGPU::BUFFER_STORE_DWORD_OFFSET_exact;
821     if (Size > 32)
822       Opc = AMDGPU::getMUBUFOpcode(Opc, Size / 32);
823     return Opc;
824   }
825 }
826 
827 static unsigned getBufferStoreFormatOpcode(LLT Ty,
828                                            const unsigned MemSize,
829                                            const bool Offen) {
830   bool IsD16Packed = Ty.getScalarSizeInBits() == 16;
831   bool IsD16Unpacked = 8 * MemSize < Ty.getSizeInBits();
832   int NumElts = Ty.isVector() ? Ty.getNumElements() : 1;
833 
834   if (IsD16Packed) {
835     switch (NumElts) {
836     case 1:
837       return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_X_OFFEN_exact :
838                      AMDGPU::BUFFER_STORE_FORMAT_D16_X_OFFSET_exact;
839     case 2:
840       return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_XY_OFFEN_exact :
841                      AMDGPU::BUFFER_STORE_FORMAT_D16_XY_OFFSET_exact;
842     case 3:
843       return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_XYZ_OFFEN_exact :
844                      AMDGPU::BUFFER_STORE_FORMAT_D16_XYZ_OFFSET_exact;
845     case 4:
846       return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_XYZW_OFFEN_exact :
847                      AMDGPU::BUFFER_STORE_FORMAT_D16_XYZW_OFFSET_exact;
848     default:
849       return -1;
850     }
851   }
852 
853   if (IsD16Unpacked) {
854     switch (NumElts) {
855     case 1:
856       return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_X_OFFEN_exact :
857                      AMDGPU::BUFFER_STORE_FORMAT_D16_X_OFFSET_exact;
858     case 2:
859       return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_XY_gfx80_OFFEN_exact :
860                      AMDGPU::BUFFER_STORE_FORMAT_D16_XY_gfx80_OFFSET_exact;
861     case 3:
862       return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_XYZ_gfx80_OFFEN_exact :
863                      AMDGPU::BUFFER_STORE_FORMAT_D16_XYZ_gfx80_OFFSET_exact;
864     case 4:
865       return Offen ? AMDGPU::BUFFER_STORE_FORMAT_D16_XYZW_gfx80_OFFEN_exact :
866                      AMDGPU::BUFFER_STORE_FORMAT_D16_XYZW_gfx80_OFFSET_exact;
867     default:
868       return -1;
869     }
870   }
871 
872   switch (NumElts) {
873   case 1:
874     return Offen ? AMDGPU::BUFFER_STORE_FORMAT_X_OFFEN_exact :
875                    AMDGPU::BUFFER_STORE_FORMAT_X_OFFSET_exact;
876   case 2:
877     return Offen ? AMDGPU::BUFFER_STORE_FORMAT_XY_OFFEN_exact :
878                   AMDGPU::BUFFER_STORE_FORMAT_XY_OFFSET_exact;
879   case 3:
880     return Offen ? AMDGPU::BUFFER_STORE_FORMAT_XYZ_OFFEN_exact :
881                    AMDGPU::BUFFER_STORE_FORMAT_XYZ_OFFSET_exact;
882   case 4:
883     return Offen ? AMDGPU::BUFFER_STORE_FORMAT_XYZW_OFFEN_exact :
884                    AMDGPU::BUFFER_STORE_FORMAT_XYZW_OFFSET_exact;
885   default:
886     return -1;
887   }
888 
889   llvm_unreachable("unhandled buffer store");
890 }
891 
892 // TODO: Move this to combiner
893 // Returns base register, imm offset, total constant offset.
894 std::tuple<Register, unsigned, unsigned>
895 AMDGPUInstructionSelector::splitBufferOffsets(MachineIRBuilder &B,
896                                               Register OrigOffset) const {
897   const unsigned MaxImm = 4095;
898   Register BaseReg;
899   unsigned TotalConstOffset;
900   MachineInstr *OffsetDef;
901 
902   std::tie(BaseReg, TotalConstOffset, OffsetDef)
903     = AMDGPU::getBaseWithConstantOffset(*MRI, OrigOffset);
904 
905   unsigned ImmOffset = TotalConstOffset;
906 
907   // If the immediate value is too big for the immoffset field, put the value
908   // and -4096 into the immoffset field so that the value that is copied/added
909   // for the voffset field is a multiple of 4096, and it stands more chance
910   // of being CSEd with the copy/add for another similar load/store.f
911   // However, do not do that rounding down to a multiple of 4096 if that is a
912   // negative number, as it appears to be illegal to have a negative offset
913   // in the vgpr, even if adding the immediate offset makes it positive.
914   unsigned Overflow = ImmOffset & ~MaxImm;
915   ImmOffset -= Overflow;
916   if ((int32_t)Overflow < 0) {
917     Overflow += ImmOffset;
918     ImmOffset = 0;
919   }
920 
921   if (Overflow != 0) {
922     // In case this is in a waterfall loop, insert offset code at the def point
923     // of the offset, not inside the loop.
924     MachineBasicBlock::iterator OldInsPt = B.getInsertPt();
925     MachineBasicBlock &OldMBB = B.getMBB();
926     B.setInstr(*OffsetDef);
927 
928     if (!BaseReg) {
929       BaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
930       B.buildInstr(AMDGPU::V_MOV_B32_e32)
931         .addDef(BaseReg)
932         .addImm(Overflow);
933     } else {
934       Register OverflowVal = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
935       B.buildInstr(AMDGPU::V_MOV_B32_e32)
936         .addDef(OverflowVal)
937         .addImm(Overflow);
938 
939       Register NewBaseReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
940       TII.getAddNoCarry(B.getMBB(), B.getInsertPt(), B.getDebugLoc(), NewBaseReg)
941         .addReg(BaseReg)
942         .addReg(OverflowVal, RegState::Kill)
943         .addImm(0);
944       BaseReg = NewBaseReg;
945     }
946 
947     B.setInsertPt(OldMBB, OldInsPt);
948   }
949 
950   return std::make_tuple(BaseReg, ImmOffset, TotalConstOffset);
951 }
952 
953 bool AMDGPUInstructionSelector::selectStoreIntrinsic(MachineInstr &MI,
954                                                      bool IsFormat) const {
955   MachineIRBuilder B(MI);
956   MachineFunction &MF = B.getMF();
957   Register VData = MI.getOperand(1).getReg();
958   LLT Ty = MRI->getType(VData);
959 
960   int Size = Ty.getSizeInBits();
961   if (Size % 32 != 0)
962     return false;
963 
964   // FIXME: Verifier should enforce 1 MMO for these intrinsics.
965   MachineMemOperand *MMO = *MI.memoperands_begin();
966   const int MemSize = MMO->getSize();
967 
968   Register RSrc = MI.getOperand(2).getReg();
969   Register VOffset = MI.getOperand(3).getReg();
970   Register SOffset = MI.getOperand(4).getReg();
971   unsigned AuxiliaryData = MI.getOperand(5).getImm();
972   unsigned ImmOffset;
973   unsigned TotalOffset;
974 
975   std::tie(VOffset, ImmOffset, TotalOffset) = splitBufferOffsets(B, VOffset);
976   if (TotalOffset != 0)
977     MMO = MF.getMachineMemOperand(MMO, TotalOffset, MemSize);
978 
979   const bool Offen = !isZero(VOffset, *MRI);
980 
981   int Opc = IsFormat ? getBufferStoreFormatOpcode(Ty, MemSize, Offen) :
982     getBufferStoreOpcode(Ty, MemSize, Offen);
983   if (Opc == -1)
984     return false;
985 
986   MachineInstrBuilder MIB = B.buildInstr(Opc)
987     .addUse(VData);
988 
989   if (Offen)
990     MIB.addUse(VOffset);
991 
992   MIB.addUse(RSrc)
993      .addUse(SOffset)
994      .addImm(ImmOffset)
995      .addImm(extractGLC(AuxiliaryData))
996      .addImm(extractSLC(AuxiliaryData))
997      .addImm(0) // tfe: FIXME: Remove from inst
998      .addImm(extractDLC(AuxiliaryData))
999      .addImm(extractSWZ(AuxiliaryData))
1000      .addMemOperand(MMO);
1001 
1002   MI.eraseFromParent();
1003 
1004   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1005 }
1006 
1007 static unsigned getDSShaderTypeValue(const MachineFunction &MF) {
1008   switch (MF.getFunction().getCallingConv()) {
1009   case CallingConv::AMDGPU_PS:
1010     return 1;
1011   case CallingConv::AMDGPU_VS:
1012     return 2;
1013   case CallingConv::AMDGPU_GS:
1014     return 3;
1015   case CallingConv::AMDGPU_HS:
1016   case CallingConv::AMDGPU_LS:
1017   case CallingConv::AMDGPU_ES:
1018     report_fatal_error("ds_ordered_count unsupported for this calling conv");
1019   case CallingConv::AMDGPU_CS:
1020   case CallingConv::AMDGPU_KERNEL:
1021   case CallingConv::C:
1022   case CallingConv::Fast:
1023   default:
1024     // Assume other calling conventions are various compute callable functions
1025     return 0;
1026   }
1027 }
1028 
1029 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1030   MachineInstr &MI, Intrinsic::ID IntrID) const {
1031   MachineBasicBlock *MBB = MI.getParent();
1032   MachineFunction *MF = MBB->getParent();
1033   const DebugLoc &DL = MI.getDebugLoc();
1034 
1035   unsigned IndexOperand = MI.getOperand(7).getImm();
1036   bool WaveRelease = MI.getOperand(8).getImm() != 0;
1037   bool WaveDone = MI.getOperand(9).getImm() != 0;
1038 
1039   if (WaveDone && !WaveRelease)
1040     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1041 
1042   unsigned OrderedCountIndex = IndexOperand & 0x3f;
1043   IndexOperand &= ~0x3f;
1044   unsigned CountDw = 0;
1045 
1046   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1047     CountDw = (IndexOperand >> 24) & 0xf;
1048     IndexOperand &= ~(0xf << 24);
1049 
1050     if (CountDw < 1 || CountDw > 4) {
1051       report_fatal_error(
1052         "ds_ordered_count: dword count must be between 1 and 4");
1053     }
1054   }
1055 
1056   if (IndexOperand)
1057     report_fatal_error("ds_ordered_count: bad index operand");
1058 
1059   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1060   unsigned ShaderType = getDSShaderTypeValue(*MF);
1061 
1062   unsigned Offset0 = OrderedCountIndex << 2;
1063   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
1064                      (Instruction << 4);
1065 
1066   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1067     Offset1 |= (CountDw - 1) << 6;
1068 
1069   unsigned Offset = Offset0 | (Offset1 << 8);
1070 
1071   Register M0Val = MI.getOperand(2).getReg();
1072   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1073     .addReg(M0Val);
1074 
1075   Register DstReg = MI.getOperand(0).getReg();
1076   Register ValReg = MI.getOperand(3).getReg();
1077   MachineInstrBuilder DS =
1078     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1079       .addReg(ValReg)
1080       .addImm(Offset)
1081       .cloneMemRefs(MI);
1082 
1083   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1084     return false;
1085 
1086   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1087   MI.eraseFromParent();
1088   return Ret;
1089 }
1090 
1091 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1092   switch (IntrID) {
1093   case Intrinsic::amdgcn_ds_gws_init:
1094     return AMDGPU::DS_GWS_INIT;
1095   case Intrinsic::amdgcn_ds_gws_barrier:
1096     return AMDGPU::DS_GWS_BARRIER;
1097   case Intrinsic::amdgcn_ds_gws_sema_v:
1098     return AMDGPU::DS_GWS_SEMA_V;
1099   case Intrinsic::amdgcn_ds_gws_sema_br:
1100     return AMDGPU::DS_GWS_SEMA_BR;
1101   case Intrinsic::amdgcn_ds_gws_sema_p:
1102     return AMDGPU::DS_GWS_SEMA_P;
1103   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1104     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1105   default:
1106     llvm_unreachable("not a gws intrinsic");
1107   }
1108 }
1109 
1110 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1111                                                      Intrinsic::ID IID) const {
1112   if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1113       !STI.hasGWSSemaReleaseAll())
1114     return false;
1115 
1116   // intrinsic ID, vsrc, offset
1117   const bool HasVSrc = MI.getNumOperands() == 3;
1118   assert(HasVSrc || MI.getNumOperands() == 2);
1119 
1120   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1121   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1122   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1123     return false;
1124 
1125   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1126   assert(OffsetDef);
1127 
1128   unsigned ImmOffset;
1129 
1130   MachineBasicBlock *MBB = MI.getParent();
1131   const DebugLoc &DL = MI.getDebugLoc();
1132 
1133   MachineInstr *Readfirstlane = nullptr;
1134 
1135   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1136   // incoming offset, in case there's an add of a constant. We'll have to put it
1137   // back later.
1138   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1139     Readfirstlane = OffsetDef;
1140     BaseOffset = OffsetDef->getOperand(1).getReg();
1141     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1142   }
1143 
1144   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1145     // If we have a constant offset, try to use the 0 in m0 as the base.
1146     // TODO: Look into changing the default m0 initialization value. If the
1147     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1148     // the immediate offset.
1149 
1150     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1151     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1152       .addImm(0);
1153   } else {
1154     std::tie(BaseOffset, ImmOffset, OffsetDef)
1155       = AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1156 
1157     if (Readfirstlane) {
1158       // We have the constant offset now, so put the readfirstlane back on the
1159       // variable component.
1160       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1161         return false;
1162 
1163       Readfirstlane->getOperand(1).setReg(BaseOffset);
1164       BaseOffset = Readfirstlane->getOperand(0).getReg();
1165     } else {
1166       if (!RBI.constrainGenericRegister(BaseOffset,
1167                                         AMDGPU::SReg_32RegClass, *MRI))
1168         return false;
1169     }
1170 
1171     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1172     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1173       .addReg(BaseOffset)
1174       .addImm(16);
1175 
1176     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1177       .addReg(M0Base);
1178   }
1179 
1180   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1181   // offset field) % 64. Some versions of the programming guide omit the m0
1182   // part, or claim it's from offset 0.
1183   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1184 
1185   if (HasVSrc) {
1186     Register VSrc = MI.getOperand(1).getReg();
1187     MIB.addReg(VSrc);
1188     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1189       return false;
1190   }
1191 
1192   MIB.addImm(ImmOffset)
1193      .addImm(-1) // $gds
1194      .cloneMemRefs(MI);
1195 
1196   MI.eraseFromParent();
1197   return true;
1198 }
1199 
1200 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1201                                                       bool IsAppend) const {
1202   Register PtrBase = MI.getOperand(2).getReg();
1203   LLT PtrTy = MRI->getType(PtrBase);
1204   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1205 
1206   unsigned Offset;
1207   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1208 
1209   // TODO: Should this try to look through readfirstlane like GWS?
1210   if (!isDSOffsetLegal(PtrBase, Offset, 16)) {
1211     PtrBase = MI.getOperand(2).getReg();
1212     Offset = 0;
1213   }
1214 
1215   MachineBasicBlock *MBB = MI.getParent();
1216   const DebugLoc &DL = MI.getDebugLoc();
1217   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1218 
1219   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1220     .addReg(PtrBase);
1221   BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1222     .addImm(Offset)
1223     .addImm(IsGDS ? -1 : 0)
1224     .cloneMemRefs(MI);
1225 
1226   MI.eraseFromParent();
1227   return true;
1228 }
1229 
1230 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1231     MachineInstr &I) const {
1232   MachineBasicBlock *BB = I.getParent();
1233   unsigned IntrinsicID = I.getIntrinsicID();
1234   switch (IntrinsicID) {
1235   case Intrinsic::amdgcn_end_cf: {
1236     // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
1237     // SelectionDAG uses for wave32 vs wave64.
1238     BuildMI(*BB, &I, I.getDebugLoc(),
1239             TII.get(AMDGPU::SI_END_CF))
1240       .add(I.getOperand(1));
1241 
1242     Register Reg = I.getOperand(1).getReg();
1243     I.eraseFromParent();
1244 
1245     if (!MRI->getRegClassOrNull(Reg))
1246       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1247     return true;
1248   }
1249   case Intrinsic::amdgcn_raw_buffer_store:
1250     return selectStoreIntrinsic(I, false);
1251   case Intrinsic::amdgcn_raw_buffer_store_format:
1252     return selectStoreIntrinsic(I, true);
1253   case Intrinsic::amdgcn_ds_ordered_add:
1254   case Intrinsic::amdgcn_ds_ordered_swap:
1255     return selectDSOrderedIntrinsic(I, IntrinsicID);
1256   case Intrinsic::amdgcn_ds_gws_init:
1257   case Intrinsic::amdgcn_ds_gws_barrier:
1258   case Intrinsic::amdgcn_ds_gws_sema_v:
1259   case Intrinsic::amdgcn_ds_gws_sema_br:
1260   case Intrinsic::amdgcn_ds_gws_sema_p:
1261   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1262     return selectDSGWSIntrinsic(I, IntrinsicID);
1263   case Intrinsic::amdgcn_ds_append:
1264     return selectDSAppendConsume(I, true);
1265   case Intrinsic::amdgcn_ds_consume:
1266     return selectDSAppendConsume(I, false);
1267   default:
1268     return selectImpl(I, *CoverageInfo);
1269   }
1270 }
1271 
1272 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1273   MachineBasicBlock *BB = I.getParent();
1274   const DebugLoc &DL = I.getDebugLoc();
1275 
1276   Register DstReg = I.getOperand(0).getReg();
1277   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1278   assert(Size <= 32 || Size == 64);
1279   const MachineOperand &CCOp = I.getOperand(1);
1280   Register CCReg = CCOp.getReg();
1281   if (!isVCC(CCReg, *MRI)) {
1282     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1283                                          AMDGPU::S_CSELECT_B32;
1284     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1285             .addReg(CCReg);
1286 
1287     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1288     // bank, because it does not cover the register class that we used to represent
1289     // for it.  So we need to manually set the register class here.
1290     if (!MRI->getRegClassOrNull(CCReg))
1291         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1292     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1293             .add(I.getOperand(2))
1294             .add(I.getOperand(3));
1295 
1296     bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI) |
1297                constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1298     I.eraseFromParent();
1299     return Ret;
1300   }
1301 
1302   // Wide VGPR select should have been split in RegBankSelect.
1303   if (Size > 32)
1304     return false;
1305 
1306   MachineInstr *Select =
1307       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1308               .addImm(0)
1309               .add(I.getOperand(3))
1310               .addImm(0)
1311               .add(I.getOperand(2))
1312               .add(I.getOperand(1));
1313 
1314   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1315   I.eraseFromParent();
1316   return Ret;
1317 }
1318 
1319 bool AMDGPUInstructionSelector::selectG_STORE(MachineInstr &I) const {
1320   initM0(I);
1321   return selectImpl(I, *CoverageInfo);
1322 }
1323 
1324 static int sizeToSubRegIndex(unsigned Size) {
1325   switch (Size) {
1326   case 32:
1327     return AMDGPU::sub0;
1328   case 64:
1329     return AMDGPU::sub0_sub1;
1330   case 96:
1331     return AMDGPU::sub0_sub1_sub2;
1332   case 128:
1333     return AMDGPU::sub0_sub1_sub2_sub3;
1334   case 256:
1335     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1336   default:
1337     if (Size < 32)
1338       return AMDGPU::sub0;
1339     if (Size > 256)
1340       return -1;
1341     return sizeToSubRegIndex(PowerOf2Ceil(Size));
1342   }
1343 }
1344 
1345 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1346   Register DstReg = I.getOperand(0).getReg();
1347   Register SrcReg = I.getOperand(1).getReg();
1348   const LLT DstTy = MRI->getType(DstReg);
1349   const LLT SrcTy = MRI->getType(SrcReg);
1350   if (!DstTy.isScalar())
1351     return false;
1352 
1353   const LLT S1 = LLT::scalar(1);
1354 
1355   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1356   const RegisterBank *DstRB;
1357   if (DstTy == S1) {
1358     // This is a special case. We don't treat s1 for legalization artifacts as
1359     // vcc booleans.
1360     DstRB = SrcRB;
1361   } else {
1362     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1363     if (SrcRB != DstRB)
1364       return false;
1365   }
1366 
1367   unsigned DstSize = DstTy.getSizeInBits();
1368   unsigned SrcSize = SrcTy.getSizeInBits();
1369 
1370   const TargetRegisterClass *SrcRC
1371     = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI);
1372   const TargetRegisterClass *DstRC
1373     = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI);
1374 
1375   if (SrcSize > 32) {
1376     int SubRegIdx = sizeToSubRegIndex(DstSize);
1377     if (SubRegIdx == -1)
1378       return false;
1379 
1380     // Deal with weird cases where the class only partially supports the subreg
1381     // index.
1382     SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
1383     if (!SrcRC)
1384       return false;
1385 
1386     I.getOperand(1).setSubReg(SubRegIdx);
1387   }
1388 
1389   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1390       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1391     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1392     return false;
1393   }
1394 
1395   I.setDesc(TII.get(TargetOpcode::COPY));
1396   return true;
1397 }
1398 
1399 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
1400 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
1401   Mask = maskTrailingOnes<unsigned>(Size);
1402   int SignedMask = static_cast<int>(Mask);
1403   return SignedMask >= -16 && SignedMask <= 64;
1404 }
1405 
1406 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
1407 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
1408   Register Reg, const MachineRegisterInfo &MRI,
1409   const TargetRegisterInfo &TRI) const {
1410   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
1411   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
1412     return RB;
1413 
1414   // Ignore the type, since we don't use vcc in artifacts.
1415   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
1416     return &RBI.getRegBankFromRegClass(*RC, LLT());
1417   return nullptr;
1418 }
1419 
1420 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
1421   bool Signed = I.getOpcode() == AMDGPU::G_SEXT;
1422   const DebugLoc &DL = I.getDebugLoc();
1423   MachineBasicBlock &MBB = *I.getParent();
1424   const Register DstReg = I.getOperand(0).getReg();
1425   const Register SrcReg = I.getOperand(1).getReg();
1426 
1427   const LLT DstTy = MRI->getType(DstReg);
1428   const LLT SrcTy = MRI->getType(SrcReg);
1429   const unsigned SrcSize = SrcTy.getSizeInBits();
1430   const unsigned DstSize = DstTy.getSizeInBits();
1431   if (!DstTy.isScalar())
1432     return false;
1433 
1434   if (I.getOpcode() == AMDGPU::G_ANYEXT)
1435     return selectCOPY(I);
1436 
1437   // Artifact casts should never use vcc.
1438   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
1439 
1440   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
1441     // 64-bit should have been split up in RegBankSelect
1442 
1443     // Try to use an and with a mask if it will save code size.
1444     unsigned Mask;
1445     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
1446       MachineInstr *ExtI =
1447       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
1448         .addImm(Mask)
1449         .addReg(SrcReg);
1450       I.eraseFromParent();
1451       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1452     }
1453 
1454     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32 : AMDGPU::V_BFE_U32;
1455     MachineInstr *ExtI =
1456       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
1457       .addReg(SrcReg)
1458       .addImm(0) // Offset
1459       .addImm(SrcSize); // Width
1460     I.eraseFromParent();
1461     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1462   }
1463 
1464   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
1465     if (!RBI.constrainGenericRegister(SrcReg, AMDGPU::SReg_32RegClass, *MRI))
1466       return false;
1467 
1468     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
1469       const unsigned SextOpc = SrcSize == 8 ?
1470         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
1471       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
1472         .addReg(SrcReg);
1473       I.eraseFromParent();
1474       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
1475     }
1476 
1477     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
1478     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
1479 
1480     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
1481     if (DstSize > 32 && SrcSize <= 32) {
1482       // We need a 64-bit register source, but the high bits don't matter.
1483       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
1484       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1485       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
1486       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
1487         .addReg(SrcReg)
1488         .addImm(AMDGPU::sub0)
1489         .addReg(UndefReg)
1490         .addImm(AMDGPU::sub1);
1491 
1492       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
1493         .addReg(ExtReg)
1494         .addImm(SrcSize << 16);
1495 
1496       I.eraseFromParent();
1497       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
1498     }
1499 
1500     unsigned Mask;
1501     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
1502       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
1503         .addReg(SrcReg)
1504         .addImm(Mask);
1505     } else {
1506       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
1507         .addReg(SrcReg)
1508         .addImm(SrcSize << 16);
1509     }
1510 
1511     I.eraseFromParent();
1512     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
1513   }
1514 
1515   return false;
1516 }
1517 
1518 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
1519   MachineBasicBlock *BB = I.getParent();
1520   MachineOperand &ImmOp = I.getOperand(1);
1521 
1522   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
1523   if (ImmOp.isFPImm()) {
1524     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
1525     ImmOp.ChangeToImmediate(Imm.getZExtValue());
1526   } else if (ImmOp.isCImm()) {
1527     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getZExtValue());
1528   }
1529 
1530   Register DstReg = I.getOperand(0).getReg();
1531   unsigned Size;
1532   bool IsSgpr;
1533   const RegisterBank *RB = MRI->getRegBankOrNull(I.getOperand(0).getReg());
1534   if (RB) {
1535     IsSgpr = RB->getID() == AMDGPU::SGPRRegBankID;
1536     Size = MRI->getType(DstReg).getSizeInBits();
1537   } else {
1538     const TargetRegisterClass *RC = TRI.getRegClassForReg(*MRI, DstReg);
1539     IsSgpr = TRI.isSGPRClass(RC);
1540     Size = TRI.getRegSizeInBits(*RC);
1541   }
1542 
1543   if (Size != 32 && Size != 64)
1544     return false;
1545 
1546   unsigned Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1547   if (Size == 32) {
1548     I.setDesc(TII.get(Opcode));
1549     I.addImplicitDefUseOperands(*MF);
1550     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1551   }
1552 
1553   const DebugLoc &DL = I.getDebugLoc();
1554 
1555   APInt Imm(Size, I.getOperand(1).getImm());
1556 
1557   MachineInstr *ResInst;
1558   if (IsSgpr && TII.isInlineConstant(Imm)) {
1559     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1560       .addImm(I.getOperand(1).getImm());
1561   } else {
1562     const TargetRegisterClass *RC = IsSgpr ?
1563       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
1564     Register LoReg = MRI->createVirtualRegister(RC);
1565     Register HiReg = MRI->createVirtualRegister(RC);
1566 
1567     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
1568       .addImm(Imm.trunc(32).getZExtValue());
1569 
1570     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
1571       .addImm(Imm.ashr(32).getZExtValue());
1572 
1573     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
1574       .addReg(LoReg)
1575       .addImm(AMDGPU::sub0)
1576       .addReg(HiReg)
1577       .addImm(AMDGPU::sub1);
1578   }
1579 
1580   // We can't call constrainSelectedInstRegOperands here, because it doesn't
1581   // work for target independent opcodes
1582   I.eraseFromParent();
1583   const TargetRegisterClass *DstRC =
1584     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
1585   if (!DstRC)
1586     return true;
1587   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
1588 }
1589 
1590 static bool isConstant(const MachineInstr &MI) {
1591   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
1592 }
1593 
1594 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
1595     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
1596 
1597   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
1598 
1599   assert(PtrMI);
1600 
1601   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
1602     return;
1603 
1604   GEPInfo GEPInfo(*PtrMI);
1605 
1606   for (unsigned i = 1; i != 3; ++i) {
1607     const MachineOperand &GEPOp = PtrMI->getOperand(i);
1608     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
1609     assert(OpDef);
1610     if (i == 2 && isConstant(*OpDef)) {
1611       // TODO: Could handle constant base + variable offset, but a combine
1612       // probably should have commuted it.
1613       assert(GEPInfo.Imm == 0);
1614       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
1615       continue;
1616     }
1617     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
1618     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
1619       GEPInfo.SgprParts.push_back(GEPOp.getReg());
1620     else
1621       GEPInfo.VgprParts.push_back(GEPOp.getReg());
1622   }
1623 
1624   AddrInfo.push_back(GEPInfo);
1625   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
1626 }
1627 
1628 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
1629   if (!MI.hasOneMemOperand())
1630     return false;
1631 
1632   const MachineMemOperand *MMO = *MI.memoperands_begin();
1633   const Value *Ptr = MMO->getValue();
1634 
1635   // UndefValue means this is a load of a kernel input.  These are uniform.
1636   // Sometimes LDS instructions have constant pointers.
1637   // If Ptr is null, then that means this mem operand contains a
1638   // PseudoSourceValue like GOT.
1639   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
1640       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
1641     return true;
1642 
1643   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
1644     return true;
1645 
1646   const Instruction *I = dyn_cast<Instruction>(Ptr);
1647   return I && I->getMetadata("amdgpu.uniform");
1648 }
1649 
1650 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
1651   for (const GEPInfo &GEPInfo : AddrInfo) {
1652     if (!GEPInfo.VgprParts.empty())
1653       return true;
1654   }
1655   return false;
1656 }
1657 
1658 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
1659   MachineBasicBlock *BB = I.getParent();
1660 
1661   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
1662   unsigned AS = PtrTy.getAddressSpace();
1663   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
1664       STI.ldsRequiresM0Init()) {
1665     // If DS instructions require M0 initializtion, insert it before selecting.
1666     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1667       .addImm(-1);
1668   }
1669 }
1670 
1671 bool AMDGPUInstructionSelector::selectG_LOAD_ATOMICRMW(MachineInstr &I) const {
1672   initM0(I);
1673   return selectImpl(I, *CoverageInfo);
1674 }
1675 
1676 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
1677   MachineBasicBlock *BB = I.getParent();
1678   MachineOperand &CondOp = I.getOperand(0);
1679   Register CondReg = CondOp.getReg();
1680   const DebugLoc &DL = I.getDebugLoc();
1681 
1682   unsigned BrOpcode;
1683   Register CondPhysReg;
1684   const TargetRegisterClass *ConstrainRC;
1685 
1686   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
1687   // whether the branch is uniform when selecting the instruction. In
1688   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
1689   // RegBankSelect knows what it's doing if the branch condition is scc, even
1690   // though it currently does not.
1691   if (!isVCC(CondReg, *MRI)) {
1692     if (MRI->getType(CondReg) != LLT::scalar(32))
1693       return false;
1694 
1695     CondPhysReg = AMDGPU::SCC;
1696     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
1697     // FIXME: Hack for isSCC tests
1698     ConstrainRC = &AMDGPU::SGPR_32RegClass;
1699   } else {
1700     // FIXME: Do we have to insert an and with exec here, like in SelectionDAG?
1701     // We sort of know that a VCC producer based on the register bank, that ands
1702     // inactive lanes with 0. What if there was a logical operation with vcc
1703     // producers in different blocks/with different exec masks?
1704     // FIXME: Should scc->vcc copies and with exec?
1705     CondPhysReg = TRI.getVCC();
1706     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
1707     ConstrainRC = TRI.getBoolRC();
1708   }
1709 
1710   if (!MRI->getRegClassOrNull(CondReg))
1711     MRI->setRegClass(CondReg, ConstrainRC);
1712 
1713   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
1714     .addReg(CondReg);
1715   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
1716     .addMBB(I.getOperand(1).getMBB());
1717 
1718   I.eraseFromParent();
1719   return true;
1720 }
1721 
1722 bool AMDGPUInstructionSelector::selectG_FRAME_INDEX(MachineInstr &I) const {
1723   Register DstReg = I.getOperand(0).getReg();
1724   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1725   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
1726   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
1727   if (IsVGPR)
1728     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
1729 
1730   return RBI.constrainGenericRegister(
1731     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
1732 }
1733 
1734 bool AMDGPUInstructionSelector::selectG_PTR_MASK(MachineInstr &I) const {
1735   uint64_t Align = I.getOperand(2).getImm();
1736   const uint64_t Mask = ~((UINT64_C(1) << Align) - 1);
1737 
1738   MachineBasicBlock *BB = I.getParent();
1739 
1740   Register DstReg = I.getOperand(0).getReg();
1741   Register SrcReg = I.getOperand(1).getReg();
1742 
1743   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1744   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1745   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
1746   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1747   unsigned MovOpc = IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1748   const TargetRegisterClass &RegRC
1749     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
1750 
1751   LLT Ty = MRI->getType(DstReg);
1752 
1753   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB,
1754                                                                   *MRI);
1755   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB,
1756                                                                   *MRI);
1757   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
1758       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
1759     return false;
1760 
1761   const DebugLoc &DL = I.getDebugLoc();
1762   Register ImmReg = MRI->createVirtualRegister(&RegRC);
1763   BuildMI(*BB, &I, DL, TII.get(MovOpc), ImmReg)
1764     .addImm(Mask);
1765 
1766   if (Ty.getSizeInBits() == 32) {
1767     BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
1768       .addReg(SrcReg)
1769       .addReg(ImmReg);
1770     I.eraseFromParent();
1771     return true;
1772   }
1773 
1774   Register HiReg = MRI->createVirtualRegister(&RegRC);
1775   Register LoReg = MRI->createVirtualRegister(&RegRC);
1776   Register MaskLo = MRI->createVirtualRegister(&RegRC);
1777 
1778   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
1779     .addReg(SrcReg, 0, AMDGPU::sub0);
1780   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
1781     .addReg(SrcReg, 0, AMDGPU::sub1);
1782 
1783   BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskLo)
1784     .addReg(LoReg)
1785     .addReg(ImmReg);
1786   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
1787     .addReg(MaskLo)
1788     .addImm(AMDGPU::sub0)
1789     .addReg(HiReg)
1790     .addImm(AMDGPU::sub1);
1791   I.eraseFromParent();
1792   return true;
1793 }
1794 
1795 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
1796   MachineInstr &MI) const {
1797   Register DstReg = MI.getOperand(0).getReg();
1798   Register SrcReg = MI.getOperand(1).getReg();
1799   Register IdxReg = MI.getOperand(2).getReg();
1800 
1801   LLT DstTy = MRI->getType(DstReg);
1802   LLT SrcTy = MRI->getType(SrcReg);
1803 
1804   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1805   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1806   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
1807 
1808   // The index must be scalar. If it wasn't RegBankSelect should have moved this
1809   // into a waterfall loop.
1810   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
1811     return false;
1812 
1813   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB,
1814                                                                   *MRI);
1815   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(DstTy, *DstRB,
1816                                                                   *MRI);
1817   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1818       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
1819       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
1820     return false;
1821 
1822   MachineBasicBlock *BB = MI.getParent();
1823   const DebugLoc &DL = MI.getDebugLoc();
1824   const bool Is64 = DstTy.getSizeInBits() == 64;
1825 
1826   unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1827 
1828   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
1829     if (DstTy.getSizeInBits() != 32 && !Is64)
1830       return false;
1831 
1832     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1833       .addReg(IdxReg);
1834 
1835     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
1836     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
1837       .addReg(SrcReg, 0, SubReg)
1838       .addReg(SrcReg, RegState::Implicit);
1839     MI.eraseFromParent();
1840     return true;
1841   }
1842 
1843   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
1844     return false;
1845 
1846   if (!STI.useVGPRIndexMode()) {
1847     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1848       .addReg(IdxReg);
1849     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
1850       .addReg(SrcReg, RegState::Undef, SubReg)
1851       .addReg(SrcReg, RegState::Implicit);
1852     MI.eraseFromParent();
1853     return true;
1854   }
1855 
1856   BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON))
1857     .addReg(IdxReg)
1858     .addImm(AMDGPU::VGPRIndexMode::SRC0_ENABLE);
1859   BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), DstReg)
1860     .addReg(SrcReg, RegState::Undef, SubReg)
1861     .addReg(SrcReg, RegState::Implicit)
1862     .addReg(AMDGPU::M0, RegState::Implicit);
1863   BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF));
1864 
1865   MI.eraseFromParent();
1866   return true;
1867 }
1868 
1869 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
1870   if (I.isPHI())
1871     return selectPHI(I);
1872 
1873   if (!I.isPreISelOpcode()) {
1874     if (I.isCopy())
1875       return selectCOPY(I);
1876     return true;
1877   }
1878 
1879   switch (I.getOpcode()) {
1880   case TargetOpcode::G_AND:
1881   case TargetOpcode::G_OR:
1882   case TargetOpcode::G_XOR:
1883     if (selectG_AND_OR_XOR(I))
1884       return true;
1885     return selectImpl(I, *CoverageInfo);
1886   case TargetOpcode::G_ADD:
1887   case TargetOpcode::G_SUB:
1888     if (selectImpl(I, *CoverageInfo))
1889       return true;
1890     return selectG_ADD_SUB(I);
1891   case TargetOpcode::G_UADDO:
1892   case TargetOpcode::G_USUBO:
1893   case TargetOpcode::G_UADDE:
1894   case TargetOpcode::G_USUBE:
1895     return selectG_UADDO_USUBO_UADDE_USUBE(I);
1896   case TargetOpcode::G_INTTOPTR:
1897   case TargetOpcode::G_BITCAST:
1898   case TargetOpcode::G_PTRTOINT:
1899     return selectCOPY(I);
1900   case TargetOpcode::G_CONSTANT:
1901   case TargetOpcode::G_FCONSTANT:
1902     return selectG_CONSTANT(I);
1903   case TargetOpcode::G_EXTRACT:
1904     return selectG_EXTRACT(I);
1905   case TargetOpcode::G_MERGE_VALUES:
1906   case TargetOpcode::G_BUILD_VECTOR:
1907   case TargetOpcode::G_CONCAT_VECTORS:
1908     return selectG_MERGE_VALUES(I);
1909   case TargetOpcode::G_UNMERGE_VALUES:
1910     return selectG_UNMERGE_VALUES(I);
1911   case TargetOpcode::G_PTR_ADD:
1912     return selectG_PTR_ADD(I);
1913   case TargetOpcode::G_IMPLICIT_DEF:
1914     return selectG_IMPLICIT_DEF(I);
1915   case TargetOpcode::G_INSERT:
1916     return selectG_INSERT(I);
1917   case TargetOpcode::G_INTRINSIC:
1918     return selectG_INTRINSIC(I);
1919   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1920     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
1921   case TargetOpcode::G_ICMP:
1922     if (selectG_ICMP(I))
1923       return true;
1924     return selectImpl(I, *CoverageInfo);
1925   case TargetOpcode::G_LOAD:
1926   case TargetOpcode::G_ATOMIC_CMPXCHG:
1927   case TargetOpcode::G_ATOMICRMW_XCHG:
1928   case TargetOpcode::G_ATOMICRMW_ADD:
1929   case TargetOpcode::G_ATOMICRMW_SUB:
1930   case TargetOpcode::G_ATOMICRMW_AND:
1931   case TargetOpcode::G_ATOMICRMW_OR:
1932   case TargetOpcode::G_ATOMICRMW_XOR:
1933   case TargetOpcode::G_ATOMICRMW_MIN:
1934   case TargetOpcode::G_ATOMICRMW_MAX:
1935   case TargetOpcode::G_ATOMICRMW_UMIN:
1936   case TargetOpcode::G_ATOMICRMW_UMAX:
1937   case TargetOpcode::G_ATOMICRMW_FADD:
1938     return selectG_LOAD_ATOMICRMW(I);
1939   case TargetOpcode::G_SELECT:
1940     return selectG_SELECT(I);
1941   case TargetOpcode::G_STORE:
1942     return selectG_STORE(I);
1943   case TargetOpcode::G_TRUNC:
1944     return selectG_TRUNC(I);
1945   case TargetOpcode::G_SEXT:
1946   case TargetOpcode::G_ZEXT:
1947   case TargetOpcode::G_ANYEXT:
1948     if (selectImpl(I, *CoverageInfo))
1949       return true;
1950     return selectG_SZA_EXT(I);
1951   case TargetOpcode::G_BRCOND:
1952     return selectG_BRCOND(I);
1953   case TargetOpcode::G_FRAME_INDEX:
1954     return selectG_FRAME_INDEX(I);
1955   case TargetOpcode::G_PTR_MASK:
1956     return selectG_PTR_MASK(I);
1957   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
1958     return selectG_EXTRACT_VECTOR_ELT(I);
1959   default:
1960     return selectImpl(I, *CoverageInfo);
1961   }
1962   return false;
1963 }
1964 
1965 InstructionSelector::ComplexRendererFns
1966 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
1967   return {{
1968       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
1969   }};
1970 
1971 }
1972 
1973 std::pair<Register, unsigned>
1974 AMDGPUInstructionSelector::selectVOP3ModsImpl(
1975   Register Src) const {
1976   unsigned Mods = 0;
1977   MachineInstr *MI = MRI->getVRegDef(Src);
1978 
1979   if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
1980     Src = MI->getOperand(1).getReg();
1981     Mods |= SISrcMods::NEG;
1982     MI = MRI->getVRegDef(Src);
1983   }
1984 
1985   if (MI && MI->getOpcode() == AMDGPU::G_FABS) {
1986     Src = MI->getOperand(1).getReg();
1987     Mods |= SISrcMods::ABS;
1988   }
1989 
1990   return std::make_pair(Src, Mods);
1991 }
1992 
1993 ///
1994 /// This will select either an SGPR or VGPR operand and will save us from
1995 /// having to write an extra tablegen pattern.
1996 InstructionSelector::ComplexRendererFns
1997 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
1998   return {{
1999       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
2000   }};
2001 }
2002 
2003 InstructionSelector::ComplexRendererFns
2004 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
2005   Register Src;
2006   unsigned Mods;
2007   std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg());
2008 
2009   return {{
2010       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
2011       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
2012       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
2013       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
2014   }};
2015 }
2016 
2017 InstructionSelector::ComplexRendererFns
2018 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
2019   return {{
2020       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
2021       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
2022       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
2023   }};
2024 }
2025 
2026 InstructionSelector::ComplexRendererFns
2027 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
2028   Register Src;
2029   unsigned Mods;
2030   std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg());
2031 
2032   return {{
2033       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
2034       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
2035   }};
2036 }
2037 
2038 InstructionSelector::ComplexRendererFns
2039 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
2040   Register Src;
2041   unsigned Mods;
2042   std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg());
2043   if (!TM.Options.NoNaNsFPMath && !isKnownNeverNaN(Src, *MRI))
2044     return None;
2045 
2046   return {{
2047       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
2048       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
2049   }};
2050 }
2051 
2052 InstructionSelector::ComplexRendererFns
2053 AMDGPUInstructionSelector::selectVOP3OpSelMods0(MachineOperand &Root) const {
2054   // FIXME: Handle clamp and op_sel
2055   return {{
2056       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
2057       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // src_mods
2058       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // clamp
2059   }};
2060 }
2061 
2062 InstructionSelector::ComplexRendererFns
2063 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
2064   // FIXME: Handle op_sel
2065   return {{
2066       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
2067       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
2068   }};
2069 }
2070 
2071 InstructionSelector::ComplexRendererFns
2072 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
2073   SmallVector<GEPInfo, 4> AddrInfo;
2074   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
2075 
2076   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
2077     return None;
2078 
2079   const GEPInfo &GEPInfo = AddrInfo[0];
2080 
2081   if (!AMDGPU::isLegalSMRDImmOffset(STI, GEPInfo.Imm))
2082     return None;
2083 
2084   unsigned PtrReg = GEPInfo.SgprParts[0];
2085   int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm);
2086   return {{
2087     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
2088     [=](MachineInstrBuilder &MIB) { MIB.addImm(EncodedImm); }
2089   }};
2090 }
2091 
2092 InstructionSelector::ComplexRendererFns
2093 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
2094   SmallVector<GEPInfo, 4> AddrInfo;
2095   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
2096 
2097   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
2098     return None;
2099 
2100   const GEPInfo &GEPInfo = AddrInfo[0];
2101   unsigned PtrReg = GEPInfo.SgprParts[0];
2102   int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm);
2103   if (!isUInt<32>(EncodedImm))
2104     return None;
2105 
2106   return {{
2107     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
2108     [=](MachineInstrBuilder &MIB) { MIB.addImm(EncodedImm); }
2109   }};
2110 }
2111 
2112 InstructionSelector::ComplexRendererFns
2113 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
2114   MachineInstr *MI = Root.getParent();
2115   MachineBasicBlock *MBB = MI->getParent();
2116 
2117   SmallVector<GEPInfo, 4> AddrInfo;
2118   getAddrModeInfo(*MI, *MRI, AddrInfo);
2119 
2120   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
2121   // then we can select all ptr + 32-bit offsets not just immediate offsets.
2122   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
2123     return None;
2124 
2125   const GEPInfo &GEPInfo = AddrInfo[0];
2126   if (!GEPInfo.Imm || !isUInt<32>(GEPInfo.Imm))
2127     return None;
2128 
2129   // If we make it this far we have a load with an 32-bit immediate offset.
2130   // It is OK to select this using a sgpr offset, because we have already
2131   // failed trying to select this load into one of the _IMM variants since
2132   // the _IMM Patterns are considered before the _SGPR patterns.
2133   unsigned PtrReg = GEPInfo.SgprParts[0];
2134   Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2135   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
2136           .addImm(GEPInfo.Imm);
2137   return {{
2138     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
2139     [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
2140   }};
2141 }
2142 
2143 template <bool Signed>
2144 InstructionSelector::ComplexRendererFns
2145 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root) const {
2146   MachineInstr *MI = Root.getParent();
2147 
2148   InstructionSelector::ComplexRendererFns Default = {{
2149       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
2150       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },  // offset
2151       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // slc
2152     }};
2153 
2154   if (!STI.hasFlatInstOffsets())
2155     return Default;
2156 
2157   const MachineInstr *OpDef = MRI->getVRegDef(Root.getReg());
2158   if (!OpDef || OpDef->getOpcode() != AMDGPU::G_PTR_ADD)
2159     return Default;
2160 
2161   Optional<int64_t> Offset =
2162     getConstantVRegVal(OpDef->getOperand(2).getReg(), *MRI);
2163   if (!Offset.hasValue())
2164     return Default;
2165 
2166   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
2167   if (!TII.isLegalFLATOffset(Offset.getValue(), AddrSpace, Signed))
2168     return Default;
2169 
2170   Register BasePtr = OpDef->getOperand(1).getReg();
2171 
2172   return {{
2173       [=](MachineInstrBuilder &MIB) { MIB.addReg(BasePtr); },
2174       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset.getValue()); },
2175       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // slc
2176     }};
2177 }
2178 
2179 InstructionSelector::ComplexRendererFns
2180 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
2181   return selectFlatOffsetImpl<false>(Root);
2182 }
2183 
2184 InstructionSelector::ComplexRendererFns
2185 AMDGPUInstructionSelector::selectFlatOffsetSigned(MachineOperand &Root) const {
2186   return selectFlatOffsetImpl<true>(Root);
2187 }
2188 
2189 static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
2190   auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
2191   return PSV && PSV->isStack();
2192 }
2193 
2194 InstructionSelector::ComplexRendererFns
2195 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
2196   MachineInstr *MI = Root.getParent();
2197   MachineBasicBlock *MBB = MI->getParent();
2198   MachineFunction *MF = MBB->getParent();
2199   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
2200 
2201   int64_t Offset = 0;
2202   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset))) {
2203     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2204 
2205     // TODO: Should this be inside the render function? The iterator seems to
2206     // move.
2207     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
2208             HighBits)
2209       .addImm(Offset & ~4095);
2210 
2211     return {{[=](MachineInstrBuilder &MIB) { // rsrc
2212                MIB.addReg(Info->getScratchRSrcReg());
2213              },
2214              [=](MachineInstrBuilder &MIB) { // vaddr
2215                MIB.addReg(HighBits);
2216              },
2217              [=](MachineInstrBuilder &MIB) { // soffset
2218                const MachineMemOperand *MMO = *MI->memoperands_begin();
2219                const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
2220 
2221                Register SOffsetReg = isStackPtrRelative(PtrInfo)
2222                                          ? Info->getStackPtrOffsetReg()
2223                                          : Info->getScratchWaveOffsetReg();
2224                MIB.addReg(SOffsetReg);
2225              },
2226              [=](MachineInstrBuilder &MIB) { // offset
2227                MIB.addImm(Offset & 4095);
2228              }}};
2229   }
2230 
2231   assert(Offset == 0);
2232 
2233   // Try to fold a frame index directly into the MUBUF vaddr field, and any
2234   // offsets.
2235   Optional<int> FI;
2236   Register VAddr = Root.getReg();
2237   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
2238     if (isBaseWithConstantOffset(Root, *MRI)) {
2239       const MachineOperand &LHS = RootDef->getOperand(1);
2240       const MachineOperand &RHS = RootDef->getOperand(2);
2241       const MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
2242       const MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
2243       if (LHSDef && RHSDef) {
2244         int64_t PossibleOffset =
2245             RHSDef->getOperand(1).getCImm()->getSExtValue();
2246         if (SIInstrInfo::isLegalMUBUFImmOffset(PossibleOffset) &&
2247             (!STI.privateMemoryResourceIsRangeChecked() ||
2248              KnownBits->signBitIsZero(LHS.getReg()))) {
2249           if (LHSDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
2250             FI = LHSDef->getOperand(1).getIndex();
2251           else
2252             VAddr = LHS.getReg();
2253           Offset = PossibleOffset;
2254         }
2255       }
2256     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
2257       FI = RootDef->getOperand(1).getIndex();
2258     }
2259   }
2260 
2261   // If we don't know this private access is a local stack object, it needs to
2262   // be relative to the entry point's scratch wave offset register.
2263   // TODO: Should split large offsets that don't fit like above.
2264   // TODO: Don't use scratch wave offset just because the offset didn't fit.
2265   Register SOffset = FI.hasValue() ? Info->getStackPtrOffsetReg()
2266                                    : Info->getScratchWaveOffsetReg();
2267 
2268   return {{[=](MachineInstrBuilder &MIB) { // rsrc
2269              MIB.addReg(Info->getScratchRSrcReg());
2270            },
2271            [=](MachineInstrBuilder &MIB) { // vaddr
2272              if (FI.hasValue())
2273                MIB.addFrameIndex(FI.getValue());
2274              else
2275                MIB.addReg(VAddr);
2276            },
2277            [=](MachineInstrBuilder &MIB) { // soffset
2278              MIB.addReg(SOffset);
2279            },
2280            [=](MachineInstrBuilder &MIB) { // offset
2281              MIB.addImm(Offset);
2282            }}};
2283 }
2284 
2285 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
2286                                                 int64_t Offset,
2287                                                 unsigned OffsetBits) const {
2288   if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
2289       (OffsetBits == 8 && !isUInt<8>(Offset)))
2290     return false;
2291 
2292   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
2293     return true;
2294 
2295   // On Southern Islands instruction with a negative base value and an offset
2296   // don't seem to work.
2297   return KnownBits->signBitIsZero(Base);
2298 }
2299 
2300 InstructionSelector::ComplexRendererFns
2301 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
2302     MachineOperand &Root) const {
2303   MachineInstr *MI = Root.getParent();
2304   MachineBasicBlock *MBB = MI->getParent();
2305 
2306   int64_t Offset = 0;
2307   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
2308       !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
2309     return {};
2310 
2311   const MachineFunction *MF = MBB->getParent();
2312   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
2313   const MachineMemOperand *MMO = *MI->memoperands_begin();
2314   const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
2315 
2316   Register SOffsetReg = isStackPtrRelative(PtrInfo)
2317                             ? Info->getStackPtrOffsetReg()
2318                             : Info->getScratchWaveOffsetReg();
2319   return {{
2320       [=](MachineInstrBuilder &MIB) {
2321         MIB.addReg(Info->getScratchRSrcReg());
2322       },                                                         // rsrc
2323       [=](MachineInstrBuilder &MIB) { MIB.addReg(SOffsetReg); }, // soffset
2324       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }      // offset
2325   }};
2326 }
2327 
2328 std::pair<Register, unsigned>
2329 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
2330   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
2331   if (!RootDef)
2332     return std::make_pair(Root.getReg(), 0);
2333 
2334   int64_t ConstAddr = 0;
2335   if (isBaseWithConstantOffset(Root, *MRI)) {
2336     const MachineOperand &LHS = RootDef->getOperand(1);
2337     const MachineOperand &RHS = RootDef->getOperand(2);
2338     const MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
2339     const MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
2340     if (LHSDef && RHSDef) {
2341       int64_t PossibleOffset =
2342         RHSDef->getOperand(1).getCImm()->getSExtValue();
2343       if (isDSOffsetLegal(LHS.getReg(), PossibleOffset, 16)) {
2344         // (add n0, c0)
2345         return std::make_pair(LHS.getReg(), PossibleOffset);
2346       }
2347     }
2348   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
2349     // TODO
2350 
2351 
2352   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
2353     // TODO
2354 
2355   }
2356 
2357   return std::make_pair(Root.getReg(), 0);
2358 }
2359 
2360 InstructionSelector::ComplexRendererFns
2361 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
2362 
2363   Register Reg;
2364   unsigned Offset;
2365   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
2366   return {{
2367       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
2368       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
2369     }};
2370 }
2371 
2372 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
2373                                                  const MachineInstr &MI,
2374                                                  int OpIdx) const {
2375   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
2376          "Expected G_CONSTANT");
2377   Optional<int64_t> CstVal = getConstantVRegVal(MI.getOperand(0).getReg(), *MRI);
2378   assert(CstVal && "Expected constant value");
2379   MIB.addImm(CstVal.getValue());
2380 }
2381 
2382 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
2383                                                 const MachineInstr &MI,
2384                                                 int OpIdx) const {
2385   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
2386          "Expected G_CONSTANT");
2387   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
2388 }
2389 
2390 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
2391                                                  const MachineInstr &MI,
2392                                                  int OpIdx) const {
2393   assert(OpIdx == -1);
2394 
2395   const MachineOperand &Op = MI.getOperand(1);
2396   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
2397     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
2398   else {
2399     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
2400     MIB.addImm(Op.getCImm()->getSExtValue());
2401   }
2402 }
2403 
2404 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
2405                                                 const MachineInstr &MI,
2406                                                 int OpIdx) const {
2407   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
2408          "Expected G_CONSTANT");
2409   MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
2410 }
2411 
2412 /// This only really exists to satisfy DAG type checking machinery, so is a
2413 /// no-op here.
2414 void AMDGPUInstructionSelector::renderTruncTImm32(MachineInstrBuilder &MIB,
2415                                                   const MachineInstr &MI,
2416                                                   int OpIdx) const {
2417   MIB.addImm(MI.getOperand(OpIdx).getImm());
2418 }
2419 
2420 void AMDGPUInstructionSelector::renderTruncTImm16(MachineInstrBuilder &MIB,
2421                                                   const MachineInstr &MI,
2422                                                   int OpIdx) const {
2423   MIB.addImm(MI.getOperand(OpIdx).getImm());
2424 }
2425 
2426 void AMDGPUInstructionSelector::renderTruncTImm1(MachineInstrBuilder &MIB,
2427                                                  const MachineInstr &MI,
2428                                                  int OpIdx) const {
2429   MIB.addImm(MI.getOperand(OpIdx).getImm());
2430 }
2431 
2432 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
2433   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
2434 }
2435 
2436 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
2437   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
2438 }
2439 
2440 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
2441   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
2442 }
2443 
2444 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
2445   return TII.isInlineConstant(Imm);
2446 }
2447