1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPUInstrInfo.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPURegisterBankInfo.h"
18 #include "AMDGPUSubtarget.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
21 #include "SIMachineFunctionInfo.h"
22 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
24 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
27 #include "llvm/CodeGen/GlobalISel/Utils.h"
28 #include "llvm/CodeGen/MachineBasicBlock.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineInstr.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/IR/Type.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/raw_ostream.h"
36 
37 #define DEBUG_TYPE "amdgpu-isel"
38 
39 using namespace llvm;
40 using namespace MIPatternMatch;
41 
42 #define GET_GLOBALISEL_IMPL
43 #define AMDGPUSubtarget GCNSubtarget
44 #include "AMDGPUGenGlobalISel.inc"
45 #undef GET_GLOBALISEL_IMPL
46 #undef AMDGPUSubtarget
47 
48 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
49     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
50     const AMDGPUTargetMachine &TM)
51     : InstructionSelector(), TII(*STI.getInstrInfo()),
52       TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
53       STI(STI),
54       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
55 #define GET_GLOBALISEL_PREDICATES_INIT
56 #include "AMDGPUGenGlobalISel.inc"
57 #undef GET_GLOBALISEL_PREDICATES_INIT
58 #define GET_GLOBALISEL_TEMPORARIES_INIT
59 #include "AMDGPUGenGlobalISel.inc"
60 #undef GET_GLOBALISEL_TEMPORARIES_INIT
61 {
62 }
63 
64 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
65 
66 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits &KB,
67                                         CodeGenCoverage &CoverageInfo) {
68   MRI = &MF.getRegInfo();
69   InstructionSelector::setupMF(MF, KB, CoverageInfo);
70 }
71 
72 bool AMDGPUInstructionSelector::isVCC(Register Reg,
73                                       const MachineRegisterInfo &MRI) const {
74   if (Register::isPhysicalRegister(Reg))
75     return Reg == TRI.getVCC();
76 
77   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
78   const TargetRegisterClass *RC =
79       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
80   if (RC) {
81     const LLT Ty = MRI.getType(Reg);
82     return RC->hasSuperClassEq(TRI.getBoolRC()) &&
83            Ty.isValid() && Ty.getSizeInBits() == 1;
84   }
85 
86   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
87   return RB->getID() == AMDGPU::VCCRegBankID;
88 }
89 
90 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
91                                                         unsigned NewOpc) const {
92   MI.setDesc(TII.get(NewOpc));
93   MI.RemoveOperand(1); // Remove intrinsic ID.
94   MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
95 
96   MachineOperand &Dst = MI.getOperand(0);
97   MachineOperand &Src = MI.getOperand(1);
98 
99   // TODO: This should be legalized to s32 if needed
100   if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
101     return false;
102 
103   const TargetRegisterClass *DstRC
104     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
105   const TargetRegisterClass *SrcRC
106     = TRI.getConstrainedRegClassForOperand(Src, *MRI);
107   if (!DstRC || DstRC != SrcRC)
108     return false;
109 
110   return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
111          RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
112 }
113 
114 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
115   const DebugLoc &DL = I.getDebugLoc();
116   MachineBasicBlock *BB = I.getParent();
117   I.setDesc(TII.get(TargetOpcode::COPY));
118 
119   const MachineOperand &Src = I.getOperand(1);
120   MachineOperand &Dst = I.getOperand(0);
121   Register DstReg = Dst.getReg();
122   Register SrcReg = Src.getReg();
123 
124   if (isVCC(DstReg, *MRI)) {
125     if (SrcReg == AMDGPU::SCC) {
126       const TargetRegisterClass *RC
127         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
128       if (!RC)
129         return true;
130       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
131     }
132 
133     if (!isVCC(SrcReg, *MRI)) {
134       // TODO: Should probably leave the copy and let copyPhysReg expand it.
135       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
136         return false;
137 
138       const TargetRegisterClass *SrcRC
139         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
140 
141       Register MaskedReg = MRI->createVirtualRegister(SrcRC);
142 
143       // We can't trust the high bits at this point, so clear them.
144 
145       // TODO: Skip masking high bits if def is known boolean.
146 
147       unsigned AndOpc = TRI.isSGPRClass(SrcRC) ?
148         AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
149       BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
150         .addImm(1)
151         .addReg(SrcReg);
152       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
153         .addImm(0)
154         .addReg(MaskedReg);
155 
156       if (!MRI->getRegClassOrNull(SrcReg))
157         MRI->setRegClass(SrcReg, SrcRC);
158       I.eraseFromParent();
159       return true;
160     }
161 
162     const TargetRegisterClass *RC =
163       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
164     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
165       return false;
166 
167     // Don't constrain the source register to a class so the def instruction
168     // handles it (unless it's undef).
169     //
170     // FIXME: This is a hack. When selecting the def, we neeed to know
171     // specifically know that the result is VCCRegBank, and not just an SGPR
172     // with size 1. An SReg_32 with size 1 is ambiguous with wave32.
173     if (Src.isUndef()) {
174       const TargetRegisterClass *SrcRC =
175         TRI.getConstrainedRegClassForOperand(Src, *MRI);
176       if (SrcRC && !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
177         return false;
178     }
179 
180     return true;
181   }
182 
183   for (const MachineOperand &MO : I.operands()) {
184     if (Register::isPhysicalRegister(MO.getReg()))
185       continue;
186 
187     const TargetRegisterClass *RC =
188             TRI.getConstrainedRegClassForOperand(MO, *MRI);
189     if (!RC)
190       continue;
191     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
192   }
193   return true;
194 }
195 
196 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
197   const Register DefReg = I.getOperand(0).getReg();
198   const LLT DefTy = MRI->getType(DefReg);
199 
200   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
201 
202   const RegClassOrRegBank &RegClassOrBank =
203     MRI->getRegClassOrRegBank(DefReg);
204 
205   const TargetRegisterClass *DefRC
206     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
207   if (!DefRC) {
208     if (!DefTy.isValid()) {
209       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
210       return false;
211     }
212 
213     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
214     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI);
215     if (!DefRC) {
216       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
217       return false;
218     }
219   }
220 
221   // TODO: Verify that all registers have the same bank
222   I.setDesc(TII.get(TargetOpcode::PHI));
223   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
224 }
225 
226 MachineOperand
227 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
228                                            const TargetRegisterClass &SubRC,
229                                            unsigned SubIdx) const {
230 
231   MachineInstr *MI = MO.getParent();
232   MachineBasicBlock *BB = MO.getParent()->getParent();
233   Register DstReg = MRI->createVirtualRegister(&SubRC);
234 
235   if (MO.isReg()) {
236     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
237     Register Reg = MO.getReg();
238     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
239             .addReg(Reg, 0, ComposedSubIdx);
240 
241     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
242                                      MO.isKill(), MO.isDead(), MO.isUndef(),
243                                      MO.isEarlyClobber(), 0, MO.isDebug(),
244                                      MO.isInternalRead());
245   }
246 
247   assert(MO.isImm());
248 
249   APInt Imm(64, MO.getImm());
250 
251   switch (SubIdx) {
252   default:
253     llvm_unreachable("do not know to split immediate with this sub index.");
254   case AMDGPU::sub0:
255     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
256   case AMDGPU::sub1:
257     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
258   }
259 }
260 
261 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
262   switch (Opc) {
263   case AMDGPU::G_AND:
264     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
265   case AMDGPU::G_OR:
266     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
267   case AMDGPU::G_XOR:
268     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
269   default:
270     llvm_unreachable("not a bit op");
271   }
272 }
273 
274 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
275   MachineOperand &Dst = I.getOperand(0);
276   MachineOperand &Src0 = I.getOperand(1);
277   MachineOperand &Src1 = I.getOperand(2);
278   Register DstReg = Dst.getReg();
279   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
280 
281   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
282   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
283     const TargetRegisterClass *RC = TRI.getBoolRC();
284     unsigned InstOpc = getLogicalBitOpcode(I.getOpcode(),
285                                            RC == &AMDGPU::SReg_64RegClass);
286     I.setDesc(TII.get(InstOpc));
287 
288     // FIXME: Hack to avoid turning the register bank into a register class.
289     // The selector for G_ICMP relies on seeing the register bank for the result
290     // is VCC. In wave32 if we constrain the registers to SReg_32 here, it will
291     // be ambiguous whether it's a scalar or vector bool.
292     if (Src0.isUndef() && !MRI->getRegClassOrNull(Src0.getReg()))
293       MRI->setRegClass(Src0.getReg(), RC);
294     if (Src1.isUndef() && !MRI->getRegClassOrNull(Src1.getReg()))
295       MRI->setRegClass(Src1.getReg(), RC);
296 
297     return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
298   }
299 
300   // TODO: Should this allow an SCC bank result, and produce a copy from SCC for
301   // the result?
302   if (DstRB->getID() == AMDGPU::SGPRRegBankID) {
303     unsigned InstOpc = getLogicalBitOpcode(I.getOpcode(), Size > 32);
304     I.setDesc(TII.get(InstOpc));
305     // Dead implicit-def of scc
306     I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
307                                            true, // isImp
308                                            false, // isKill
309                                            true)); // isDead
310     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
311   }
312 
313   return false;
314 }
315 
316 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
317   MachineBasicBlock *BB = I.getParent();
318   MachineFunction *MF = BB->getParent();
319   Register DstReg = I.getOperand(0).getReg();
320   const DebugLoc &DL = I.getDebugLoc();
321   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
322   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
323   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
324   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
325 
326   if (Size == 32) {
327     if (IsSALU) {
328       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
329       MachineInstr *Add =
330         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
331         .add(I.getOperand(1))
332         .add(I.getOperand(2));
333       I.eraseFromParent();
334       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
335     }
336 
337     if (STI.hasAddNoCarry()) {
338       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
339       I.setDesc(TII.get(Opc));
340       I.addOperand(*MF, MachineOperand::CreateImm(0));
341       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
342       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
343     }
344 
345     const unsigned Opc = Sub ? AMDGPU::V_SUB_I32_e64 : AMDGPU::V_ADD_I32_e64;
346 
347     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
348     MachineInstr *Add
349       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
350       .addDef(UnusedCarry, RegState::Dead)
351       .add(I.getOperand(1))
352       .add(I.getOperand(2))
353       .addImm(0);
354     I.eraseFromParent();
355     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
356   }
357 
358   assert(!Sub && "illegal sub should not reach here");
359 
360   const TargetRegisterClass &RC
361     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
362   const TargetRegisterClass &HalfRC
363     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
364 
365   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
366   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
367   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
368   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
369 
370   Register DstLo = MRI->createVirtualRegister(&HalfRC);
371   Register DstHi = MRI->createVirtualRegister(&HalfRC);
372 
373   if (IsSALU) {
374     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
375       .add(Lo1)
376       .add(Lo2);
377     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
378       .add(Hi1)
379       .add(Hi2);
380   } else {
381     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
382     Register CarryReg = MRI->createVirtualRegister(CarryRC);
383     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_I32_e64), DstLo)
384       .addDef(CarryReg)
385       .add(Lo1)
386       .add(Lo2)
387       .addImm(0);
388     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
389       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
390       .add(Hi1)
391       .add(Hi2)
392       .addReg(CarryReg, RegState::Kill)
393       .addImm(0);
394 
395     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
396       return false;
397   }
398 
399   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
400     .addReg(DstLo)
401     .addImm(AMDGPU::sub0)
402     .addReg(DstHi)
403     .addImm(AMDGPU::sub1);
404 
405 
406   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
407     return false;
408 
409   I.eraseFromParent();
410   return true;
411 }
412 
413 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
414   MachineInstr &I) const {
415   MachineBasicBlock *BB = I.getParent();
416   MachineFunction *MF = BB->getParent();
417   const DebugLoc &DL = I.getDebugLoc();
418   Register Dst0Reg = I.getOperand(0).getReg();
419   Register Dst1Reg = I.getOperand(1).getReg();
420   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
421                      I.getOpcode() == AMDGPU::G_UADDE;
422   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
423                           I.getOpcode() == AMDGPU::G_USUBE;
424 
425   if (isVCC(Dst1Reg, *MRI)) {
426       // The name of the opcodes are misleading. v_add_i32/v_sub_i32 have unsigned
427       // carry out despite the _i32 name. These were renamed in VI to _U32.
428       // FIXME: We should probably rename the opcodes here.
429     unsigned NoCarryOpc = IsAdd ? AMDGPU::V_ADD_I32_e64 : AMDGPU::V_SUB_I32_e64;
430     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
431     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
432     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
433     I.addOperand(*MF, MachineOperand::CreateImm(0));
434     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
435   }
436 
437   Register Src0Reg = I.getOperand(2).getReg();
438   Register Src1Reg = I.getOperand(3).getReg();
439 
440   if (HasCarryIn) {
441     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
442       .addReg(I.getOperand(4).getReg());
443   }
444 
445   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
446   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
447 
448   BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
449     .add(I.getOperand(2))
450     .add(I.getOperand(3));
451   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
452     .addReg(AMDGPU::SCC);
453 
454   if (!MRI->getRegClassOrNull(Dst1Reg))
455     MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
456 
457   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
458       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
459       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
460     return false;
461 
462   if (HasCarryIn &&
463       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
464                                     AMDGPU::SReg_32RegClass, *MRI))
465     return false;
466 
467   I.eraseFromParent();
468   return true;
469 }
470 
471 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
472   MachineBasicBlock *BB = I.getParent();
473   Register DstReg = I.getOperand(0).getReg();
474   Register SrcReg = I.getOperand(1).getReg();
475   LLT DstTy = MRI->getType(DstReg);
476   LLT SrcTy = MRI->getType(SrcReg);
477   const unsigned SrcSize = SrcTy.getSizeInBits();
478   const unsigned DstSize = DstTy.getSizeInBits();
479 
480   // TODO: Should handle any multiple of 32 offset.
481   unsigned Offset = I.getOperand(2).getImm();
482   if (Offset % DstSize != 0)
483     return false;
484 
485   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
486   const TargetRegisterClass *SrcRC =
487     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
488   if (!SrcRC)
489     return false;
490 
491   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
492 
493   const DebugLoc &DL = I.getDebugLoc();
494   MachineInstr *Copy = BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
495                                .addReg(SrcReg, 0, SubRegs[Offset / DstSize]);
496 
497   for (const MachineOperand &MO : Copy->operands()) {
498     const TargetRegisterClass *RC =
499             TRI.getConstrainedRegClassForOperand(MO, *MRI);
500     if (!RC)
501       continue;
502     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
503   }
504   I.eraseFromParent();
505   return true;
506 }
507 
508 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
509   MachineBasicBlock *BB = MI.getParent();
510   Register DstReg = MI.getOperand(0).getReg();
511   LLT DstTy = MRI->getType(DstReg);
512   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
513 
514   const unsigned SrcSize = SrcTy.getSizeInBits();
515   if (SrcSize < 32)
516     return selectImpl(MI, *CoverageInfo);
517 
518   const DebugLoc &DL = MI.getDebugLoc();
519   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
520   const unsigned DstSize = DstTy.getSizeInBits();
521   const TargetRegisterClass *DstRC =
522     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
523   if (!DstRC)
524     return false;
525 
526   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
527   MachineInstrBuilder MIB =
528     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
529   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
530     MachineOperand &Src = MI.getOperand(I + 1);
531     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
532     MIB.addImm(SubRegs[I]);
533 
534     const TargetRegisterClass *SrcRC
535       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
536     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
537       return false;
538   }
539 
540   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
541     return false;
542 
543   MI.eraseFromParent();
544   return true;
545 }
546 
547 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
548   MachineBasicBlock *BB = MI.getParent();
549   const int NumDst = MI.getNumOperands() - 1;
550 
551   MachineOperand &Src = MI.getOperand(NumDst);
552 
553   Register SrcReg = Src.getReg();
554   Register DstReg0 = MI.getOperand(0).getReg();
555   LLT DstTy = MRI->getType(DstReg0);
556   LLT SrcTy = MRI->getType(SrcReg);
557 
558   const unsigned DstSize = DstTy.getSizeInBits();
559   const unsigned SrcSize = SrcTy.getSizeInBits();
560   const DebugLoc &DL = MI.getDebugLoc();
561   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
562 
563   const TargetRegisterClass *SrcRC =
564     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
565   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
566     return false;
567 
568   const unsigned SrcFlags = getUndefRegState(Src.isUndef());
569 
570   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
571   // source, and this relies on the fact that the same subregister indices are
572   // used for both.
573   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
574   for (int I = 0, E = NumDst; I != E; ++I) {
575     MachineOperand &Dst = MI.getOperand(I);
576     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
577       .addReg(SrcReg, SrcFlags, SubRegs[I]);
578 
579     const TargetRegisterClass *DstRC =
580       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
581     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
582       return false;
583   }
584 
585   MI.eraseFromParent();
586   return true;
587 }
588 
589 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
590   return selectG_ADD_SUB(I);
591 }
592 
593 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
594   const MachineOperand &MO = I.getOperand(0);
595 
596   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
597   // regbank check here is to know why getConstrainedRegClassForOperand failed.
598   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
599   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
600       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
601     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
602     return true;
603   }
604 
605   return false;
606 }
607 
608 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
609   MachineBasicBlock *BB = I.getParent();
610 
611   Register DstReg = I.getOperand(0).getReg();
612   Register Src0Reg = I.getOperand(1).getReg();
613   Register Src1Reg = I.getOperand(2).getReg();
614   LLT Src1Ty = MRI->getType(Src1Reg);
615 
616   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
617   unsigned InsSize = Src1Ty.getSizeInBits();
618 
619   int64_t Offset = I.getOperand(3).getImm();
620   if (Offset % 32 != 0)
621     return false;
622 
623   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
624   if (SubReg == AMDGPU::NoSubRegister)
625     return false;
626 
627   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
628   const TargetRegisterClass *DstRC =
629     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
630   if (!DstRC)
631     return false;
632 
633   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
634   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
635   const TargetRegisterClass *Src0RC =
636     TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI);
637   const TargetRegisterClass *Src1RC =
638     TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI);
639 
640   // Deal with weird cases where the class only partially supports the subreg
641   // index.
642   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
643   if (!Src0RC)
644     return false;
645 
646   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
647       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
648       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
649     return false;
650 
651   const DebugLoc &DL = I.getDebugLoc();
652   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
653     .addReg(Src0Reg)
654     .addReg(Src1Reg)
655     .addImm(SubReg);
656 
657   I.eraseFromParent();
658   return true;
659 }
660 
661 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
662   if (STI.getLDSBankCount() != 16)
663     return selectImpl(MI, *CoverageInfo);
664 
665   Register Dst = MI.getOperand(0).getReg();
666   Register Src0 = MI.getOperand(2).getReg();
667   Register M0Val = MI.getOperand(6).getReg();
668   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
669       !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
670       !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
671     return false;
672 
673   // This requires 2 instructions. It is possible to write a pattern to support
674   // this, but the generated isel emitter doesn't correctly deal with multiple
675   // output instructions using the same physical register input. The copy to m0
676   // is incorrectly placed before the second instruction.
677   //
678   // TODO: Match source modifiers.
679 
680   Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
681   const DebugLoc &DL = MI.getDebugLoc();
682   MachineBasicBlock *MBB = MI.getParent();
683 
684   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
685     .addReg(M0Val);
686   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
687     .addImm(2)
688     .addImm(MI.getOperand(4).getImm())  // $attr
689     .addImm(MI.getOperand(3).getImm()); // $attrchan
690 
691   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
692     .addImm(0)                          // $src0_modifiers
693     .addReg(Src0)                       // $src0
694     .addImm(MI.getOperand(4).getImm())  // $attr
695     .addImm(MI.getOperand(3).getImm())  // $attrchan
696     .addImm(0)                          // $src2_modifiers
697     .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
698     .addImm(MI.getOperand(5).getImm())  // $high
699     .addImm(0)                          // $clamp
700     .addImm(0);                         // $omod
701 
702   MI.eraseFromParent();
703   return true;
704 }
705 
706 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
707   unsigned IntrinsicID = I.getIntrinsicID();
708   switch (IntrinsicID) {
709   case Intrinsic::amdgcn_if_break: {
710     MachineBasicBlock *BB = I.getParent();
711 
712     // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
713     // SelectionDAG uses for wave32 vs wave64.
714     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
715       .add(I.getOperand(0))
716       .add(I.getOperand(2))
717       .add(I.getOperand(3));
718 
719     Register DstReg = I.getOperand(0).getReg();
720     Register Src0Reg = I.getOperand(2).getReg();
721     Register Src1Reg = I.getOperand(3).getReg();
722 
723     I.eraseFromParent();
724 
725     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
726       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
727 
728     return true;
729   }
730   case Intrinsic::amdgcn_interp_p1_f16:
731     return selectInterpP1F16(I);
732   case Intrinsic::amdgcn_wqm:
733     return constrainCopyLikeIntrin(I, AMDGPU::WQM);
734   case Intrinsic::amdgcn_softwqm:
735     return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
736   case Intrinsic::amdgcn_wwm:
737     return constrainCopyLikeIntrin(I, AMDGPU::WWM);
738   default:
739     return selectImpl(I, *CoverageInfo);
740   }
741 }
742 
743 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
744   if (Size != 32 && Size != 64)
745     return -1;
746   switch (P) {
747   default:
748     llvm_unreachable("Unknown condition code!");
749   case CmpInst::ICMP_NE:
750     return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
751   case CmpInst::ICMP_EQ:
752     return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
753   case CmpInst::ICMP_SGT:
754     return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
755   case CmpInst::ICMP_SGE:
756     return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
757   case CmpInst::ICMP_SLT:
758     return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
759   case CmpInst::ICMP_SLE:
760     return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
761   case CmpInst::ICMP_UGT:
762     return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
763   case CmpInst::ICMP_UGE:
764     return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
765   case CmpInst::ICMP_ULT:
766     return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
767   case CmpInst::ICMP_ULE:
768     return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
769   }
770 }
771 
772 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
773                                               unsigned Size) const {
774   if (Size == 64) {
775     if (!STI.hasScalarCompareEq64())
776       return -1;
777 
778     switch (P) {
779     case CmpInst::ICMP_NE:
780       return AMDGPU::S_CMP_LG_U64;
781     case CmpInst::ICMP_EQ:
782       return AMDGPU::S_CMP_EQ_U64;
783     default:
784       return -1;
785     }
786   }
787 
788   if (Size != 32)
789     return -1;
790 
791   switch (P) {
792   case CmpInst::ICMP_NE:
793     return AMDGPU::S_CMP_LG_U32;
794   case CmpInst::ICMP_EQ:
795     return AMDGPU::S_CMP_EQ_U32;
796   case CmpInst::ICMP_SGT:
797     return AMDGPU::S_CMP_GT_I32;
798   case CmpInst::ICMP_SGE:
799     return AMDGPU::S_CMP_GE_I32;
800   case CmpInst::ICMP_SLT:
801     return AMDGPU::S_CMP_LT_I32;
802   case CmpInst::ICMP_SLE:
803     return AMDGPU::S_CMP_LE_I32;
804   case CmpInst::ICMP_UGT:
805     return AMDGPU::S_CMP_GT_U32;
806   case CmpInst::ICMP_UGE:
807     return AMDGPU::S_CMP_GE_U32;
808   case CmpInst::ICMP_ULT:
809     return AMDGPU::S_CMP_LT_U32;
810   case CmpInst::ICMP_ULE:
811     return AMDGPU::S_CMP_LE_U32;
812   default:
813     llvm_unreachable("Unknown condition code!");
814   }
815 }
816 
817 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
818   MachineBasicBlock *BB = I.getParent();
819   const DebugLoc &DL = I.getDebugLoc();
820 
821   Register SrcReg = I.getOperand(2).getReg();
822   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
823 
824   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
825 
826   Register CCReg = I.getOperand(0).getReg();
827   if (!isVCC(CCReg, *MRI)) {
828     int Opcode = getS_CMPOpcode(Pred, Size);
829     if (Opcode == -1)
830       return false;
831     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
832             .add(I.getOperand(2))
833             .add(I.getOperand(3));
834     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
835       .addReg(AMDGPU::SCC);
836     bool Ret =
837         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
838         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
839     I.eraseFromParent();
840     return Ret;
841   }
842 
843   int Opcode = getV_CMPOpcode(Pred, Size);
844   if (Opcode == -1)
845     return false;
846 
847   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
848             I.getOperand(0).getReg())
849             .add(I.getOperand(2))
850             .add(I.getOperand(3));
851   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
852                                *TRI.getBoolRC(), *MRI);
853   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
854   I.eraseFromParent();
855   return Ret;
856 }
857 
858 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
859   // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
860   // SelectionDAG uses for wave32 vs wave64.
861   MachineBasicBlock *BB = MI.getParent();
862   BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
863       .add(MI.getOperand(1));
864 
865   Register Reg = MI.getOperand(1).getReg();
866   MI.eraseFromParent();
867 
868   if (!MRI->getRegClassOrNull(Reg))
869     MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
870   return true;
871 }
872 
873 static unsigned getDSShaderTypeValue(const MachineFunction &MF) {
874   switch (MF.getFunction().getCallingConv()) {
875   case CallingConv::AMDGPU_PS:
876     return 1;
877   case CallingConv::AMDGPU_VS:
878     return 2;
879   case CallingConv::AMDGPU_GS:
880     return 3;
881   case CallingConv::AMDGPU_HS:
882   case CallingConv::AMDGPU_LS:
883   case CallingConv::AMDGPU_ES:
884     report_fatal_error("ds_ordered_count unsupported for this calling conv");
885   case CallingConv::AMDGPU_CS:
886   case CallingConv::AMDGPU_KERNEL:
887   case CallingConv::C:
888   case CallingConv::Fast:
889   default:
890     // Assume other calling conventions are various compute callable functions
891     return 0;
892   }
893 }
894 
895 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
896   MachineInstr &MI, Intrinsic::ID IntrID) const {
897   MachineBasicBlock *MBB = MI.getParent();
898   MachineFunction *MF = MBB->getParent();
899   const DebugLoc &DL = MI.getDebugLoc();
900 
901   unsigned IndexOperand = MI.getOperand(7).getImm();
902   bool WaveRelease = MI.getOperand(8).getImm() != 0;
903   bool WaveDone = MI.getOperand(9).getImm() != 0;
904 
905   if (WaveDone && !WaveRelease)
906     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
907 
908   unsigned OrderedCountIndex = IndexOperand & 0x3f;
909   IndexOperand &= ~0x3f;
910   unsigned CountDw = 0;
911 
912   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
913     CountDw = (IndexOperand >> 24) & 0xf;
914     IndexOperand &= ~(0xf << 24);
915 
916     if (CountDw < 1 || CountDw > 4) {
917       report_fatal_error(
918         "ds_ordered_count: dword count must be between 1 and 4");
919     }
920   }
921 
922   if (IndexOperand)
923     report_fatal_error("ds_ordered_count: bad index operand");
924 
925   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
926   unsigned ShaderType = getDSShaderTypeValue(*MF);
927 
928   unsigned Offset0 = OrderedCountIndex << 2;
929   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
930                      (Instruction << 4);
931 
932   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
933     Offset1 |= (CountDw - 1) << 6;
934 
935   unsigned Offset = Offset0 | (Offset1 << 8);
936 
937   Register M0Val = MI.getOperand(2).getReg();
938   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
939     .addReg(M0Val);
940 
941   Register DstReg = MI.getOperand(0).getReg();
942   Register ValReg = MI.getOperand(3).getReg();
943   MachineInstrBuilder DS =
944     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
945       .addReg(ValReg)
946       .addImm(Offset)
947       .cloneMemRefs(MI);
948 
949   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
950     return false;
951 
952   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
953   MI.eraseFromParent();
954   return Ret;
955 }
956 
957 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
958   switch (IntrID) {
959   case Intrinsic::amdgcn_ds_gws_init:
960     return AMDGPU::DS_GWS_INIT;
961   case Intrinsic::amdgcn_ds_gws_barrier:
962     return AMDGPU::DS_GWS_BARRIER;
963   case Intrinsic::amdgcn_ds_gws_sema_v:
964     return AMDGPU::DS_GWS_SEMA_V;
965   case Intrinsic::amdgcn_ds_gws_sema_br:
966     return AMDGPU::DS_GWS_SEMA_BR;
967   case Intrinsic::amdgcn_ds_gws_sema_p:
968     return AMDGPU::DS_GWS_SEMA_P;
969   case Intrinsic::amdgcn_ds_gws_sema_release_all:
970     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
971   default:
972     llvm_unreachable("not a gws intrinsic");
973   }
974 }
975 
976 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
977                                                      Intrinsic::ID IID) const {
978   if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
979       !STI.hasGWSSemaReleaseAll())
980     return false;
981 
982   // intrinsic ID, vsrc, offset
983   const bool HasVSrc = MI.getNumOperands() == 3;
984   assert(HasVSrc || MI.getNumOperands() == 2);
985 
986   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
987   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
988   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
989     return false;
990 
991   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
992   assert(OffsetDef);
993 
994   unsigned ImmOffset;
995 
996   MachineBasicBlock *MBB = MI.getParent();
997   const DebugLoc &DL = MI.getDebugLoc();
998 
999   MachineInstr *Readfirstlane = nullptr;
1000 
1001   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1002   // incoming offset, in case there's an add of a constant. We'll have to put it
1003   // back later.
1004   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1005     Readfirstlane = OffsetDef;
1006     BaseOffset = OffsetDef->getOperand(1).getReg();
1007     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1008   }
1009 
1010   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1011     // If we have a constant offset, try to use the 0 in m0 as the base.
1012     // TODO: Look into changing the default m0 initialization value. If the
1013     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1014     // the immediate offset.
1015 
1016     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1017     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1018       .addImm(0);
1019   } else {
1020     std::tie(BaseOffset, ImmOffset, OffsetDef)
1021       = AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1022 
1023     if (Readfirstlane) {
1024       // We have the constant offset now, so put the readfirstlane back on the
1025       // variable component.
1026       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1027         return false;
1028 
1029       Readfirstlane->getOperand(1).setReg(BaseOffset);
1030       BaseOffset = Readfirstlane->getOperand(0).getReg();
1031     } else {
1032       if (!RBI.constrainGenericRegister(BaseOffset,
1033                                         AMDGPU::SReg_32RegClass, *MRI))
1034         return false;
1035     }
1036 
1037     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1038     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1039       .addReg(BaseOffset)
1040       .addImm(16);
1041 
1042     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1043       .addReg(M0Base);
1044   }
1045 
1046   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1047   // offset field) % 64. Some versions of the programming guide omit the m0
1048   // part, or claim it's from offset 0.
1049   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1050 
1051   if (HasVSrc) {
1052     Register VSrc = MI.getOperand(1).getReg();
1053     MIB.addReg(VSrc);
1054     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1055       return false;
1056   }
1057 
1058   MIB.addImm(ImmOffset)
1059      .addImm(-1) // $gds
1060      .cloneMemRefs(MI);
1061 
1062   MI.eraseFromParent();
1063   return true;
1064 }
1065 
1066 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1067                                                       bool IsAppend) const {
1068   Register PtrBase = MI.getOperand(2).getReg();
1069   LLT PtrTy = MRI->getType(PtrBase);
1070   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1071 
1072   unsigned Offset;
1073   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1074 
1075   // TODO: Should this try to look through readfirstlane like GWS?
1076   if (!isDSOffsetLegal(PtrBase, Offset, 16)) {
1077     PtrBase = MI.getOperand(2).getReg();
1078     Offset = 0;
1079   }
1080 
1081   MachineBasicBlock *MBB = MI.getParent();
1082   const DebugLoc &DL = MI.getDebugLoc();
1083   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1084 
1085   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1086     .addReg(PtrBase);
1087   BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1088     .addImm(Offset)
1089     .addImm(IsGDS ? -1 : 0)
1090     .cloneMemRefs(MI);
1091 
1092   MI.eraseFromParent();
1093   return true;
1094 }
1095 
1096 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1097     MachineInstr &I) const {
1098   unsigned IntrinsicID = I.getIntrinsicID();
1099   switch (IntrinsicID) {
1100   case Intrinsic::amdgcn_end_cf:
1101     return selectEndCfIntrinsic(I);
1102   case Intrinsic::amdgcn_ds_ordered_add:
1103   case Intrinsic::amdgcn_ds_ordered_swap:
1104     return selectDSOrderedIntrinsic(I, IntrinsicID);
1105   case Intrinsic::amdgcn_ds_gws_init:
1106   case Intrinsic::amdgcn_ds_gws_barrier:
1107   case Intrinsic::amdgcn_ds_gws_sema_v:
1108   case Intrinsic::amdgcn_ds_gws_sema_br:
1109   case Intrinsic::amdgcn_ds_gws_sema_p:
1110   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1111     return selectDSGWSIntrinsic(I, IntrinsicID);
1112   case Intrinsic::amdgcn_ds_append:
1113     return selectDSAppendConsume(I, true);
1114   case Intrinsic::amdgcn_ds_consume:
1115     return selectDSAppendConsume(I, false);
1116   default:
1117     return selectImpl(I, *CoverageInfo);
1118   }
1119 }
1120 
1121 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1122   if (selectImpl(I, *CoverageInfo))
1123     return true;
1124 
1125   MachineBasicBlock *BB = I.getParent();
1126   const DebugLoc &DL = I.getDebugLoc();
1127 
1128   Register DstReg = I.getOperand(0).getReg();
1129   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1130   assert(Size <= 32 || Size == 64);
1131   const MachineOperand &CCOp = I.getOperand(1);
1132   Register CCReg = CCOp.getReg();
1133   if (!isVCC(CCReg, *MRI)) {
1134     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1135                                          AMDGPU::S_CSELECT_B32;
1136     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1137             .addReg(CCReg);
1138 
1139     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1140     // bank, because it does not cover the register class that we used to represent
1141     // for it.  So we need to manually set the register class here.
1142     if (!MRI->getRegClassOrNull(CCReg))
1143         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1144     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1145             .add(I.getOperand(2))
1146             .add(I.getOperand(3));
1147 
1148     bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI) |
1149                constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1150     I.eraseFromParent();
1151     return Ret;
1152   }
1153 
1154   // Wide VGPR select should have been split in RegBankSelect.
1155   if (Size > 32)
1156     return false;
1157 
1158   MachineInstr *Select =
1159       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1160               .addImm(0)
1161               .add(I.getOperand(3))
1162               .addImm(0)
1163               .add(I.getOperand(2))
1164               .add(I.getOperand(1));
1165 
1166   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1167   I.eraseFromParent();
1168   return Ret;
1169 }
1170 
1171 bool AMDGPUInstructionSelector::selectG_STORE(MachineInstr &I) const {
1172   initM0(I);
1173   return selectImpl(I, *CoverageInfo);
1174 }
1175 
1176 static int sizeToSubRegIndex(unsigned Size) {
1177   switch (Size) {
1178   case 32:
1179     return AMDGPU::sub0;
1180   case 64:
1181     return AMDGPU::sub0_sub1;
1182   case 96:
1183     return AMDGPU::sub0_sub1_sub2;
1184   case 128:
1185     return AMDGPU::sub0_sub1_sub2_sub3;
1186   case 256:
1187     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1188   default:
1189     if (Size < 32)
1190       return AMDGPU::sub0;
1191     if (Size > 256)
1192       return -1;
1193     return sizeToSubRegIndex(PowerOf2Ceil(Size));
1194   }
1195 }
1196 
1197 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1198   Register DstReg = I.getOperand(0).getReg();
1199   Register SrcReg = I.getOperand(1).getReg();
1200   const LLT DstTy = MRI->getType(DstReg);
1201   const LLT SrcTy = MRI->getType(SrcReg);
1202   if (!DstTy.isScalar())
1203     return false;
1204 
1205   const LLT S1 = LLT::scalar(1);
1206 
1207   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1208   const RegisterBank *DstRB;
1209   if (DstTy == S1) {
1210     // This is a special case. We don't treat s1 for legalization artifacts as
1211     // vcc booleans.
1212     DstRB = SrcRB;
1213   } else {
1214     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1215     if (SrcRB != DstRB)
1216       return false;
1217   }
1218 
1219   unsigned DstSize = DstTy.getSizeInBits();
1220   unsigned SrcSize = SrcTy.getSizeInBits();
1221 
1222   const TargetRegisterClass *SrcRC
1223     = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI);
1224   const TargetRegisterClass *DstRC
1225     = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI);
1226 
1227   if (SrcSize > 32) {
1228     int SubRegIdx = sizeToSubRegIndex(DstSize);
1229     if (SubRegIdx == -1)
1230       return false;
1231 
1232     // Deal with weird cases where the class only partially supports the subreg
1233     // index.
1234     SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
1235     if (!SrcRC)
1236       return false;
1237 
1238     I.getOperand(1).setSubReg(SubRegIdx);
1239   }
1240 
1241   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1242       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1243     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1244     return false;
1245   }
1246 
1247   I.setDesc(TII.get(TargetOpcode::COPY));
1248   return true;
1249 }
1250 
1251 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
1252 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
1253   Mask = maskTrailingOnes<unsigned>(Size);
1254   int SignedMask = static_cast<int>(Mask);
1255   return SignedMask >= -16 && SignedMask <= 64;
1256 }
1257 
1258 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
1259 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
1260   Register Reg, const MachineRegisterInfo &MRI,
1261   const TargetRegisterInfo &TRI) const {
1262   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
1263   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
1264     return RB;
1265 
1266   // Ignore the type, since we don't use vcc in artifacts.
1267   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
1268     return &RBI.getRegBankFromRegClass(*RC, LLT());
1269   return nullptr;
1270 }
1271 
1272 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
1273   bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
1274   bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
1275   const DebugLoc &DL = I.getDebugLoc();
1276   MachineBasicBlock &MBB = *I.getParent();
1277   const Register DstReg = I.getOperand(0).getReg();
1278   const Register SrcReg = I.getOperand(1).getReg();
1279 
1280   const LLT DstTy = MRI->getType(DstReg);
1281   const LLT SrcTy = MRI->getType(SrcReg);
1282   const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
1283     I.getOperand(2).getImm() : SrcTy.getSizeInBits();
1284   const unsigned DstSize = DstTy.getSizeInBits();
1285   if (!DstTy.isScalar())
1286     return false;
1287 
1288   if (I.getOpcode() == AMDGPU::G_ANYEXT)
1289     return selectCOPY(I);
1290 
1291   // Artifact casts should never use vcc.
1292   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
1293 
1294   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
1295     // 64-bit should have been split up in RegBankSelect
1296 
1297     // Try to use an and with a mask if it will save code size.
1298     unsigned Mask;
1299     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
1300       MachineInstr *ExtI =
1301       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
1302         .addImm(Mask)
1303         .addReg(SrcReg);
1304       I.eraseFromParent();
1305       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1306     }
1307 
1308     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32 : AMDGPU::V_BFE_U32;
1309     MachineInstr *ExtI =
1310       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
1311       .addReg(SrcReg)
1312       .addImm(0) // Offset
1313       .addImm(SrcSize); // Width
1314     I.eraseFromParent();
1315     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1316   }
1317 
1318   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
1319     const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
1320       AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
1321     if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
1322       return false;
1323 
1324     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
1325       const unsigned SextOpc = SrcSize == 8 ?
1326         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
1327       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
1328         .addReg(SrcReg);
1329       I.eraseFromParent();
1330       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
1331     }
1332 
1333     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
1334     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
1335 
1336     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
1337     if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
1338       // We need a 64-bit register source, but the high bits don't matter.
1339       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
1340       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1341       unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
1342 
1343       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
1344       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
1345         .addReg(SrcReg, 0, SubReg)
1346         .addImm(AMDGPU::sub0)
1347         .addReg(UndefReg)
1348         .addImm(AMDGPU::sub1);
1349 
1350       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
1351         .addReg(ExtReg)
1352         .addImm(SrcSize << 16);
1353 
1354       I.eraseFromParent();
1355       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
1356     }
1357 
1358     unsigned Mask;
1359     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
1360       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
1361         .addReg(SrcReg)
1362         .addImm(Mask);
1363     } else {
1364       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
1365         .addReg(SrcReg)
1366         .addImm(SrcSize << 16);
1367     }
1368 
1369     I.eraseFromParent();
1370     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
1371   }
1372 
1373   return false;
1374 }
1375 
1376 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
1377   MachineBasicBlock *BB = I.getParent();
1378   MachineOperand &ImmOp = I.getOperand(1);
1379 
1380   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
1381   if (ImmOp.isFPImm()) {
1382     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
1383     ImmOp.ChangeToImmediate(Imm.getZExtValue());
1384   } else if (ImmOp.isCImm()) {
1385     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getZExtValue());
1386   }
1387 
1388   Register DstReg = I.getOperand(0).getReg();
1389   unsigned Size;
1390   bool IsSgpr;
1391   const RegisterBank *RB = MRI->getRegBankOrNull(I.getOperand(0).getReg());
1392   if (RB) {
1393     IsSgpr = RB->getID() == AMDGPU::SGPRRegBankID;
1394     Size = MRI->getType(DstReg).getSizeInBits();
1395   } else {
1396     const TargetRegisterClass *RC = TRI.getRegClassForReg(*MRI, DstReg);
1397     IsSgpr = TRI.isSGPRClass(RC);
1398     Size = TRI.getRegSizeInBits(*RC);
1399   }
1400 
1401   if (Size != 32 && Size != 64)
1402     return false;
1403 
1404   unsigned Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1405   if (Size == 32) {
1406     I.setDesc(TII.get(Opcode));
1407     I.addImplicitDefUseOperands(*MF);
1408     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1409   }
1410 
1411   const DebugLoc &DL = I.getDebugLoc();
1412 
1413   APInt Imm(Size, I.getOperand(1).getImm());
1414 
1415   MachineInstr *ResInst;
1416   if (IsSgpr && TII.isInlineConstant(Imm)) {
1417     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1418       .addImm(I.getOperand(1).getImm());
1419   } else {
1420     const TargetRegisterClass *RC = IsSgpr ?
1421       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
1422     Register LoReg = MRI->createVirtualRegister(RC);
1423     Register HiReg = MRI->createVirtualRegister(RC);
1424 
1425     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
1426       .addImm(Imm.trunc(32).getZExtValue());
1427 
1428     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
1429       .addImm(Imm.ashr(32).getZExtValue());
1430 
1431     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
1432       .addReg(LoReg)
1433       .addImm(AMDGPU::sub0)
1434       .addReg(HiReg)
1435       .addImm(AMDGPU::sub1);
1436   }
1437 
1438   // We can't call constrainSelectedInstRegOperands here, because it doesn't
1439   // work for target independent opcodes
1440   I.eraseFromParent();
1441   const TargetRegisterClass *DstRC =
1442     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
1443   if (!DstRC)
1444     return true;
1445   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
1446 }
1447 
1448 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
1449   // Only manually handle the f64 SGPR case.
1450   //
1451   // FIXME: This is a workaround for 2.5 different tablegen problems. Because
1452   // the bit ops theoretically have a second result due to the implicit def of
1453   // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
1454   // that is easy by disabling the check. The result works, but uses a
1455   // nonsensical sreg32orlds_and_sreg_1 regclass.
1456   //
1457   // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
1458   // the variadic REG_SEQUENCE operands.
1459 
1460   Register Dst = MI.getOperand(0).getReg();
1461   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
1462   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
1463       MRI->getType(Dst) != LLT::scalar(64))
1464     return false;
1465 
1466   Register Src = MI.getOperand(1).getReg();
1467   MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
1468   if (Fabs)
1469     Src = Fabs->getOperand(1).getReg();
1470 
1471   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
1472       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
1473     return false;
1474 
1475   MachineBasicBlock *BB = MI.getParent();
1476   const DebugLoc &DL = MI.getDebugLoc();
1477   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1478   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1479   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1480   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1481 
1482   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
1483     .addReg(Src, 0, AMDGPU::sub0);
1484   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
1485     .addReg(Src, 0, AMDGPU::sub1);
1486   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
1487     .addImm(0x80000000);
1488 
1489   // Set or toggle sign bit.
1490   unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
1491   BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
1492     .addReg(HiReg)
1493     .addReg(ConstReg);
1494   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
1495     .addReg(LoReg)
1496     .addImm(AMDGPU::sub0)
1497     .addReg(OpReg)
1498     .addImm(AMDGPU::sub1);
1499   MI.eraseFromParent();
1500   return true;
1501 }
1502 
1503 static bool isConstant(const MachineInstr &MI) {
1504   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
1505 }
1506 
1507 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
1508     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
1509 
1510   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
1511 
1512   assert(PtrMI);
1513 
1514   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
1515     return;
1516 
1517   GEPInfo GEPInfo(*PtrMI);
1518 
1519   for (unsigned i = 1; i != 3; ++i) {
1520     const MachineOperand &GEPOp = PtrMI->getOperand(i);
1521     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
1522     assert(OpDef);
1523     if (i == 2 && isConstant(*OpDef)) {
1524       // TODO: Could handle constant base + variable offset, but a combine
1525       // probably should have commuted it.
1526       assert(GEPInfo.Imm == 0);
1527       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
1528       continue;
1529     }
1530     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
1531     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
1532       GEPInfo.SgprParts.push_back(GEPOp.getReg());
1533     else
1534       GEPInfo.VgprParts.push_back(GEPOp.getReg());
1535   }
1536 
1537   AddrInfo.push_back(GEPInfo);
1538   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
1539 }
1540 
1541 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
1542   if (!MI.hasOneMemOperand())
1543     return false;
1544 
1545   const MachineMemOperand *MMO = *MI.memoperands_begin();
1546   const Value *Ptr = MMO->getValue();
1547 
1548   // UndefValue means this is a load of a kernel input.  These are uniform.
1549   // Sometimes LDS instructions have constant pointers.
1550   // If Ptr is null, then that means this mem operand contains a
1551   // PseudoSourceValue like GOT.
1552   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
1553       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
1554     return true;
1555 
1556   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
1557     return true;
1558 
1559   const Instruction *I = dyn_cast<Instruction>(Ptr);
1560   return I && I->getMetadata("amdgpu.uniform");
1561 }
1562 
1563 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
1564   for (const GEPInfo &GEPInfo : AddrInfo) {
1565     if (!GEPInfo.VgprParts.empty())
1566       return true;
1567   }
1568   return false;
1569 }
1570 
1571 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
1572   MachineBasicBlock *BB = I.getParent();
1573 
1574   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
1575   unsigned AS = PtrTy.getAddressSpace();
1576   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
1577       STI.ldsRequiresM0Init()) {
1578     // If DS instructions require M0 initializtion, insert it before selecting.
1579     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1580       .addImm(-1);
1581   }
1582 }
1583 
1584 bool AMDGPUInstructionSelector::selectG_LOAD_ATOMICRMW(MachineInstr &I) const {
1585   initM0(I);
1586   return selectImpl(I, *CoverageInfo);
1587 }
1588 
1589 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
1590   MachineBasicBlock *BB = I.getParent();
1591   MachineOperand &CondOp = I.getOperand(0);
1592   Register CondReg = CondOp.getReg();
1593   const DebugLoc &DL = I.getDebugLoc();
1594 
1595   unsigned BrOpcode;
1596   Register CondPhysReg;
1597   const TargetRegisterClass *ConstrainRC;
1598 
1599   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
1600   // whether the branch is uniform when selecting the instruction. In
1601   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
1602   // RegBankSelect knows what it's doing if the branch condition is scc, even
1603   // though it currently does not.
1604   if (!isVCC(CondReg, *MRI)) {
1605     if (MRI->getType(CondReg) != LLT::scalar(32))
1606       return false;
1607 
1608     CondPhysReg = AMDGPU::SCC;
1609     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
1610     // FIXME: Hack for isSCC tests
1611     ConstrainRC = &AMDGPU::SGPR_32RegClass;
1612   } else {
1613     // FIXME: Do we have to insert an and with exec here, like in SelectionDAG?
1614     // We sort of know that a VCC producer based on the register bank, that ands
1615     // inactive lanes with 0. What if there was a logical operation with vcc
1616     // producers in different blocks/with different exec masks?
1617     // FIXME: Should scc->vcc copies and with exec?
1618     CondPhysReg = TRI.getVCC();
1619     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
1620     ConstrainRC = TRI.getBoolRC();
1621   }
1622 
1623   if (!MRI->getRegClassOrNull(CondReg))
1624     MRI->setRegClass(CondReg, ConstrainRC);
1625 
1626   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
1627     .addReg(CondReg);
1628   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
1629     .addMBB(I.getOperand(1).getMBB());
1630 
1631   I.eraseFromParent();
1632   return true;
1633 }
1634 
1635 bool AMDGPUInstructionSelector::selectG_FRAME_INDEX_GLOBAL_VALUE(
1636   MachineInstr &I) const {
1637   Register DstReg = I.getOperand(0).getReg();
1638   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1639   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
1640   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
1641   if (IsVGPR)
1642     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
1643 
1644   return RBI.constrainGenericRegister(
1645     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
1646 }
1647 
1648 bool AMDGPUInstructionSelector::selectG_PTR_MASK(MachineInstr &I) const {
1649   uint64_t Align = I.getOperand(2).getImm();
1650   const uint64_t Mask = ~((UINT64_C(1) << Align) - 1);
1651 
1652   MachineBasicBlock *BB = I.getParent();
1653 
1654   Register DstReg = I.getOperand(0).getReg();
1655   Register SrcReg = I.getOperand(1).getReg();
1656 
1657   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1658   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1659   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
1660   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1661   unsigned MovOpc = IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1662   const TargetRegisterClass &RegRC
1663     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
1664 
1665   LLT Ty = MRI->getType(DstReg);
1666 
1667   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB,
1668                                                                   *MRI);
1669   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB,
1670                                                                   *MRI);
1671   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
1672       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
1673     return false;
1674 
1675   const DebugLoc &DL = I.getDebugLoc();
1676   Register ImmReg = MRI->createVirtualRegister(&RegRC);
1677   BuildMI(*BB, &I, DL, TII.get(MovOpc), ImmReg)
1678     .addImm(Mask);
1679 
1680   if (Ty.getSizeInBits() == 32) {
1681     BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
1682       .addReg(SrcReg)
1683       .addReg(ImmReg);
1684     I.eraseFromParent();
1685     return true;
1686   }
1687 
1688   Register HiReg = MRI->createVirtualRegister(&RegRC);
1689   Register LoReg = MRI->createVirtualRegister(&RegRC);
1690   Register MaskLo = MRI->createVirtualRegister(&RegRC);
1691 
1692   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
1693     .addReg(SrcReg, 0, AMDGPU::sub0);
1694   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
1695     .addReg(SrcReg, 0, AMDGPU::sub1);
1696 
1697   BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskLo)
1698     .addReg(LoReg)
1699     .addReg(ImmReg);
1700   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
1701     .addReg(MaskLo)
1702     .addImm(AMDGPU::sub0)
1703     .addReg(HiReg)
1704     .addImm(AMDGPU::sub1);
1705   I.eraseFromParent();
1706   return true;
1707 }
1708 
1709 /// Return the register to use for the index value, and the subregister to use
1710 /// for the indirectly accessed register.
1711 static std::pair<Register, unsigned>
1712 computeIndirectRegIndex(MachineRegisterInfo &MRI,
1713                         const SIRegisterInfo &TRI,
1714                         const TargetRegisterClass *SuperRC,
1715                         Register IdxReg,
1716                         unsigned EltSize) {
1717   Register IdxBaseReg;
1718   int Offset;
1719   MachineInstr *Unused;
1720 
1721   std::tie(IdxBaseReg, Offset, Unused)
1722     = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
1723   if (IdxBaseReg == AMDGPU::NoRegister) {
1724     // This will happen if the index is a known constant. This should ordinarily
1725     // be legalized out, but handle it as a register just in case.
1726     assert(Offset == 0);
1727     IdxBaseReg = IdxReg;
1728   }
1729 
1730   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
1731 
1732   // Skip out of bounds offsets, or else we would end up using an undefined
1733   // register.
1734   if (static_cast<unsigned>(Offset) >= SubRegs.size())
1735     return std::make_pair(IdxReg, SubRegs[0]);
1736   return std::make_pair(IdxBaseReg, SubRegs[Offset]);
1737 }
1738 
1739 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
1740   MachineInstr &MI) const {
1741   Register DstReg = MI.getOperand(0).getReg();
1742   Register SrcReg = MI.getOperand(1).getReg();
1743   Register IdxReg = MI.getOperand(2).getReg();
1744 
1745   LLT DstTy = MRI->getType(DstReg);
1746   LLT SrcTy = MRI->getType(SrcReg);
1747 
1748   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1749   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1750   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
1751 
1752   // The index must be scalar. If it wasn't RegBankSelect should have moved this
1753   // into a waterfall loop.
1754   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
1755     return false;
1756 
1757   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB,
1758                                                                   *MRI);
1759   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(DstTy, *DstRB,
1760                                                                   *MRI);
1761   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1762       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
1763       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
1764     return false;
1765 
1766   MachineBasicBlock *BB = MI.getParent();
1767   const DebugLoc &DL = MI.getDebugLoc();
1768   const bool Is64 = DstTy.getSizeInBits() == 64;
1769 
1770   unsigned SubReg;
1771   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
1772                                                      DstTy.getSizeInBits() / 8);
1773 
1774   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
1775     if (DstTy.getSizeInBits() != 32 && !Is64)
1776       return false;
1777 
1778     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1779       .addReg(IdxReg);
1780 
1781     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
1782     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
1783       .addReg(SrcReg, 0, SubReg)
1784       .addReg(SrcReg, RegState::Implicit);
1785     MI.eraseFromParent();
1786     return true;
1787   }
1788 
1789   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
1790     return false;
1791 
1792   if (!STI.useVGPRIndexMode()) {
1793     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1794       .addReg(IdxReg);
1795     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
1796       .addReg(SrcReg, RegState::Undef, SubReg)
1797       .addReg(SrcReg, RegState::Implicit);
1798     MI.eraseFromParent();
1799     return true;
1800   }
1801 
1802   BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON))
1803     .addReg(IdxReg)
1804     .addImm(AMDGPU::VGPRIndexMode::SRC0_ENABLE);
1805   BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), DstReg)
1806     .addReg(SrcReg, RegState::Undef, SubReg)
1807     .addReg(SrcReg, RegState::Implicit)
1808     .addReg(AMDGPU::M0, RegState::Implicit);
1809   BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF));
1810 
1811   MI.eraseFromParent();
1812   return true;
1813 }
1814 
1815 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
1816 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
1817   MachineInstr &MI) const {
1818   Register DstReg = MI.getOperand(0).getReg();
1819   Register VecReg = MI.getOperand(1).getReg();
1820   Register ValReg = MI.getOperand(2).getReg();
1821   Register IdxReg = MI.getOperand(3).getReg();
1822 
1823   LLT VecTy = MRI->getType(DstReg);
1824   LLT ValTy = MRI->getType(ValReg);
1825   unsigned VecSize = VecTy.getSizeInBits();
1826   unsigned ValSize = ValTy.getSizeInBits();
1827 
1828   const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
1829   const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
1830   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
1831 
1832   assert(VecTy.getElementType() == ValTy);
1833 
1834   // The index must be scalar. If it wasn't RegBankSelect should have moved this
1835   // into a waterfall loop.
1836   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
1837     return false;
1838 
1839   const TargetRegisterClass *VecRC = TRI.getRegClassForTypeOnBank(VecTy, *VecRB,
1840                                                                   *MRI);
1841   const TargetRegisterClass *ValRC = TRI.getRegClassForTypeOnBank(ValTy, *ValRB,
1842                                                                   *MRI);
1843 
1844   if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
1845       !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
1846       !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
1847       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
1848     return false;
1849 
1850   if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
1851     return false;
1852 
1853   unsigned SubReg;
1854   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
1855                                                      ValSize / 8);
1856 
1857   const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
1858                          STI.useVGPRIndexMode();
1859 
1860   MachineBasicBlock *BB = MI.getParent();
1861   const DebugLoc &DL = MI.getDebugLoc();
1862 
1863   if (IndexMode) {
1864     BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON))
1865       .addReg(IdxReg)
1866       .addImm(AMDGPU::VGPRIndexMode::DST_ENABLE);
1867   } else {
1868     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1869       .addReg(IdxReg);
1870   }
1871 
1872   const MCInstrDesc &RegWriteOp
1873     = TII.getIndirectRegWritePseudo(VecSize, ValSize,
1874                                     VecRB->getID() == AMDGPU::SGPRRegBankID);
1875   BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
1876     .addReg(VecReg)
1877     .addReg(ValReg)
1878     .addImm(SubReg);
1879 
1880   if (IndexMode)
1881     BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF));
1882 
1883   MI.eraseFromParent();
1884   return true;
1885 }
1886 
1887 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
1888   if (I.isPHI())
1889     return selectPHI(I);
1890 
1891   if (!I.isPreISelOpcode()) {
1892     if (I.isCopy())
1893       return selectCOPY(I);
1894     return true;
1895   }
1896 
1897   switch (I.getOpcode()) {
1898   case TargetOpcode::G_AND:
1899   case TargetOpcode::G_OR:
1900   case TargetOpcode::G_XOR:
1901     if (selectG_AND_OR_XOR(I))
1902       return true;
1903     return selectImpl(I, *CoverageInfo);
1904   case TargetOpcode::G_ADD:
1905   case TargetOpcode::G_SUB:
1906     if (selectImpl(I, *CoverageInfo))
1907       return true;
1908     return selectG_ADD_SUB(I);
1909   case TargetOpcode::G_UADDO:
1910   case TargetOpcode::G_USUBO:
1911   case TargetOpcode::G_UADDE:
1912   case TargetOpcode::G_USUBE:
1913     return selectG_UADDO_USUBO_UADDE_USUBE(I);
1914   case TargetOpcode::G_INTTOPTR:
1915   case TargetOpcode::G_BITCAST:
1916   case TargetOpcode::G_PTRTOINT:
1917     return selectCOPY(I);
1918   case TargetOpcode::G_CONSTANT:
1919   case TargetOpcode::G_FCONSTANT:
1920     return selectG_CONSTANT(I);
1921   case TargetOpcode::G_FNEG:
1922     if (selectImpl(I, *CoverageInfo))
1923       return true;
1924     return selectG_FNEG(I);
1925   case TargetOpcode::G_EXTRACT:
1926     return selectG_EXTRACT(I);
1927   case TargetOpcode::G_MERGE_VALUES:
1928   case TargetOpcode::G_BUILD_VECTOR:
1929   case TargetOpcode::G_CONCAT_VECTORS:
1930     return selectG_MERGE_VALUES(I);
1931   case TargetOpcode::G_UNMERGE_VALUES:
1932     return selectG_UNMERGE_VALUES(I);
1933   case TargetOpcode::G_PTR_ADD:
1934     return selectG_PTR_ADD(I);
1935   case TargetOpcode::G_IMPLICIT_DEF:
1936     return selectG_IMPLICIT_DEF(I);
1937   case TargetOpcode::G_INSERT:
1938     return selectG_INSERT(I);
1939   case TargetOpcode::G_INTRINSIC:
1940     return selectG_INTRINSIC(I);
1941   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1942     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
1943   case TargetOpcode::G_ICMP:
1944     if (selectG_ICMP(I))
1945       return true;
1946     return selectImpl(I, *CoverageInfo);
1947   case TargetOpcode::G_LOAD:
1948   case TargetOpcode::G_ATOMIC_CMPXCHG:
1949   case TargetOpcode::G_ATOMICRMW_XCHG:
1950   case TargetOpcode::G_ATOMICRMW_ADD:
1951   case TargetOpcode::G_ATOMICRMW_SUB:
1952   case TargetOpcode::G_ATOMICRMW_AND:
1953   case TargetOpcode::G_ATOMICRMW_OR:
1954   case TargetOpcode::G_ATOMICRMW_XOR:
1955   case TargetOpcode::G_ATOMICRMW_MIN:
1956   case TargetOpcode::G_ATOMICRMW_MAX:
1957   case TargetOpcode::G_ATOMICRMW_UMIN:
1958   case TargetOpcode::G_ATOMICRMW_UMAX:
1959   case TargetOpcode::G_ATOMICRMW_FADD:
1960     return selectG_LOAD_ATOMICRMW(I);
1961   case TargetOpcode::G_SELECT:
1962     return selectG_SELECT(I);
1963   case TargetOpcode::G_STORE:
1964     return selectG_STORE(I);
1965   case TargetOpcode::G_TRUNC:
1966     return selectG_TRUNC(I);
1967   case TargetOpcode::G_SEXT:
1968   case TargetOpcode::G_ZEXT:
1969   case TargetOpcode::G_ANYEXT:
1970   case TargetOpcode::G_SEXT_INREG:
1971     if (selectImpl(I, *CoverageInfo))
1972       return true;
1973     return selectG_SZA_EXT(I);
1974   case TargetOpcode::G_BRCOND:
1975     return selectG_BRCOND(I);
1976   case TargetOpcode::G_FRAME_INDEX:
1977   case TargetOpcode::G_GLOBAL_VALUE:
1978     return selectG_FRAME_INDEX_GLOBAL_VALUE(I);
1979   case TargetOpcode::G_PTR_MASK:
1980     return selectG_PTR_MASK(I);
1981   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
1982     return selectG_EXTRACT_VECTOR_ELT(I);
1983   case TargetOpcode::G_INSERT_VECTOR_ELT:
1984     return selectG_INSERT_VECTOR_ELT(I);
1985   case AMDGPU::G_AMDGPU_ATOMIC_INC:
1986   case AMDGPU::G_AMDGPU_ATOMIC_DEC:
1987     initM0(I);
1988     return selectImpl(I, *CoverageInfo);
1989   default:
1990     return selectImpl(I, *CoverageInfo);
1991   }
1992   return false;
1993 }
1994 
1995 InstructionSelector::ComplexRendererFns
1996 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
1997   return {{
1998       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
1999   }};
2000 
2001 }
2002 
2003 std::pair<Register, unsigned>
2004 AMDGPUInstructionSelector::selectVOP3ModsImpl(
2005   Register Src) const {
2006   unsigned Mods = 0;
2007   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
2008 
2009   if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
2010     Src = MI->getOperand(1).getReg();
2011     Mods |= SISrcMods::NEG;
2012     MI = getDefIgnoringCopies(Src, *MRI);
2013   }
2014 
2015   if (MI && MI->getOpcode() == AMDGPU::G_FABS) {
2016     Src = MI->getOperand(1).getReg();
2017     Mods |= SISrcMods::ABS;
2018   }
2019 
2020   return std::make_pair(Src, Mods);
2021 }
2022 
2023 ///
2024 /// This will select either an SGPR or VGPR operand and will save us from
2025 /// having to write an extra tablegen pattern.
2026 InstructionSelector::ComplexRendererFns
2027 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
2028   return {{
2029       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
2030   }};
2031 }
2032 
2033 InstructionSelector::ComplexRendererFns
2034 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
2035   Register Src;
2036   unsigned Mods;
2037   std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg());
2038 
2039   return {{
2040       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
2041       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
2042       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
2043       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
2044   }};
2045 }
2046 
2047 InstructionSelector::ComplexRendererFns
2048 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
2049   return {{
2050       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
2051       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
2052       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
2053   }};
2054 }
2055 
2056 InstructionSelector::ComplexRendererFns
2057 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
2058   Register Src;
2059   unsigned Mods;
2060   std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg());
2061 
2062   return {{
2063       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
2064       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
2065   }};
2066 }
2067 
2068 InstructionSelector::ComplexRendererFns
2069 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
2070   Register Reg = Root.getReg();
2071   const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
2072   if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
2073               Def->getOpcode() == AMDGPU::G_FABS))
2074     return {};
2075   return {{
2076       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
2077   }};
2078 }
2079 
2080 InstructionSelector::ComplexRendererFns
2081 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
2082   Register Src;
2083   unsigned Mods;
2084   std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg());
2085   if (!TM.Options.NoNaNsFPMath && !isKnownNeverNaN(Src, *MRI))
2086     return None;
2087 
2088   return {{
2089       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
2090       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
2091   }};
2092 }
2093 
2094 InstructionSelector::ComplexRendererFns
2095 AMDGPUInstructionSelector::selectVOP3OpSelMods0(MachineOperand &Root) const {
2096   // FIXME: Handle clamp and op_sel
2097   return {{
2098       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
2099       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // src_mods
2100       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // clamp
2101   }};
2102 }
2103 
2104 InstructionSelector::ComplexRendererFns
2105 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
2106   // FIXME: Handle op_sel
2107   return {{
2108       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
2109       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
2110   }};
2111 }
2112 
2113 InstructionSelector::ComplexRendererFns
2114 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
2115   SmallVector<GEPInfo, 4> AddrInfo;
2116   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
2117 
2118   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
2119     return None;
2120 
2121   const GEPInfo &GEPInfo = AddrInfo[0];
2122   Optional<int64_t> EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm);
2123   if (!EncodedImm)
2124     return None;
2125 
2126   unsigned PtrReg = GEPInfo.SgprParts[0];
2127   return {{
2128     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
2129     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
2130   }};
2131 }
2132 
2133 InstructionSelector::ComplexRendererFns
2134 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
2135   SmallVector<GEPInfo, 4> AddrInfo;
2136   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
2137 
2138   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
2139     return None;
2140 
2141   const GEPInfo &GEPInfo = AddrInfo[0];
2142   unsigned PtrReg = GEPInfo.SgprParts[0];
2143   Optional<int64_t> EncodedImm =
2144       AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
2145   if (!EncodedImm)
2146     return None;
2147 
2148   return {{
2149     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
2150     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
2151   }};
2152 }
2153 
2154 InstructionSelector::ComplexRendererFns
2155 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
2156   MachineInstr *MI = Root.getParent();
2157   MachineBasicBlock *MBB = MI->getParent();
2158 
2159   SmallVector<GEPInfo, 4> AddrInfo;
2160   getAddrModeInfo(*MI, *MRI, AddrInfo);
2161 
2162   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
2163   // then we can select all ptr + 32-bit offsets not just immediate offsets.
2164   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
2165     return None;
2166 
2167   const GEPInfo &GEPInfo = AddrInfo[0];
2168   if (!GEPInfo.Imm || !isUInt<32>(GEPInfo.Imm))
2169     return None;
2170 
2171   // If we make it this far we have a load with an 32-bit immediate offset.
2172   // It is OK to select this using a sgpr offset, because we have already
2173   // failed trying to select this load into one of the _IMM variants since
2174   // the _IMM Patterns are considered before the _SGPR patterns.
2175   unsigned PtrReg = GEPInfo.SgprParts[0];
2176   Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2177   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
2178           .addImm(GEPInfo.Imm);
2179   return {{
2180     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
2181     [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
2182   }};
2183 }
2184 
2185 template <bool Signed>
2186 InstructionSelector::ComplexRendererFns
2187 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root) const {
2188   MachineInstr *MI = Root.getParent();
2189 
2190   InstructionSelector::ComplexRendererFns Default = {{
2191       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
2192       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },  // offset
2193       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // slc
2194     }};
2195 
2196   if (!STI.hasFlatInstOffsets())
2197     return Default;
2198 
2199   const MachineInstr *OpDef = MRI->getVRegDef(Root.getReg());
2200   if (!OpDef || OpDef->getOpcode() != AMDGPU::G_PTR_ADD)
2201     return Default;
2202 
2203   Optional<int64_t> Offset =
2204     getConstantVRegVal(OpDef->getOperand(2).getReg(), *MRI);
2205   if (!Offset.hasValue())
2206     return Default;
2207 
2208   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
2209   if (!TII.isLegalFLATOffset(Offset.getValue(), AddrSpace, Signed))
2210     return Default;
2211 
2212   Register BasePtr = OpDef->getOperand(1).getReg();
2213 
2214   return {{
2215       [=](MachineInstrBuilder &MIB) { MIB.addReg(BasePtr); },
2216       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset.getValue()); },
2217       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // slc
2218     }};
2219 }
2220 
2221 InstructionSelector::ComplexRendererFns
2222 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
2223   return selectFlatOffsetImpl<false>(Root);
2224 }
2225 
2226 InstructionSelector::ComplexRendererFns
2227 AMDGPUInstructionSelector::selectFlatOffsetSigned(MachineOperand &Root) const {
2228   return selectFlatOffsetImpl<true>(Root);
2229 }
2230 
2231 static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
2232   auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
2233   return PSV && PSV->isStack();
2234 }
2235 
2236 InstructionSelector::ComplexRendererFns
2237 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
2238   MachineInstr *MI = Root.getParent();
2239   MachineBasicBlock *MBB = MI->getParent();
2240   MachineFunction *MF = MBB->getParent();
2241   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
2242 
2243   int64_t Offset = 0;
2244   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset))) {
2245     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2246 
2247     // TODO: Should this be inside the render function? The iterator seems to
2248     // move.
2249     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
2250             HighBits)
2251       .addImm(Offset & ~4095);
2252 
2253     return {{[=](MachineInstrBuilder &MIB) { // rsrc
2254                MIB.addReg(Info->getScratchRSrcReg());
2255              },
2256              [=](MachineInstrBuilder &MIB) { // vaddr
2257                MIB.addReg(HighBits);
2258              },
2259              [=](MachineInstrBuilder &MIB) { // soffset
2260                const MachineMemOperand *MMO = *MI->memoperands_begin();
2261                const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
2262 
2263                Register SOffsetReg = isStackPtrRelative(PtrInfo)
2264                                          ? Info->getStackPtrOffsetReg()
2265                                          : Info->getScratchWaveOffsetReg();
2266                MIB.addReg(SOffsetReg);
2267              },
2268              [=](MachineInstrBuilder &MIB) { // offset
2269                MIB.addImm(Offset & 4095);
2270              }}};
2271   }
2272 
2273   assert(Offset == 0);
2274 
2275   // Try to fold a frame index directly into the MUBUF vaddr field, and any
2276   // offsets.
2277   Optional<int> FI;
2278   Register VAddr = Root.getReg();
2279   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
2280     if (isBaseWithConstantOffset(Root, *MRI)) {
2281       const MachineOperand &LHS = RootDef->getOperand(1);
2282       const MachineOperand &RHS = RootDef->getOperand(2);
2283       const MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
2284       const MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
2285       if (LHSDef && RHSDef) {
2286         int64_t PossibleOffset =
2287             RHSDef->getOperand(1).getCImm()->getSExtValue();
2288         if (SIInstrInfo::isLegalMUBUFImmOffset(PossibleOffset) &&
2289             (!STI.privateMemoryResourceIsRangeChecked() ||
2290              KnownBits->signBitIsZero(LHS.getReg()))) {
2291           if (LHSDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
2292             FI = LHSDef->getOperand(1).getIndex();
2293           else
2294             VAddr = LHS.getReg();
2295           Offset = PossibleOffset;
2296         }
2297       }
2298     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
2299       FI = RootDef->getOperand(1).getIndex();
2300     }
2301   }
2302 
2303   // If we don't know this private access is a local stack object, it needs to
2304   // be relative to the entry point's scratch wave offset register.
2305   // TODO: Should split large offsets that don't fit like above.
2306   // TODO: Don't use scratch wave offset just because the offset didn't fit.
2307   Register SOffset = FI.hasValue() ? Info->getStackPtrOffsetReg()
2308                                    : Info->getScratchWaveOffsetReg();
2309 
2310   return {{[=](MachineInstrBuilder &MIB) { // rsrc
2311              MIB.addReg(Info->getScratchRSrcReg());
2312            },
2313            [=](MachineInstrBuilder &MIB) { // vaddr
2314              if (FI.hasValue())
2315                MIB.addFrameIndex(FI.getValue());
2316              else
2317                MIB.addReg(VAddr);
2318            },
2319            [=](MachineInstrBuilder &MIB) { // soffset
2320              MIB.addReg(SOffset);
2321            },
2322            [=](MachineInstrBuilder &MIB) { // offset
2323              MIB.addImm(Offset);
2324            }}};
2325 }
2326 
2327 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
2328                                                 int64_t Offset,
2329                                                 unsigned OffsetBits) const {
2330   if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
2331       (OffsetBits == 8 && !isUInt<8>(Offset)))
2332     return false;
2333 
2334   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
2335     return true;
2336 
2337   // On Southern Islands instruction with a negative base value and an offset
2338   // don't seem to work.
2339   return KnownBits->signBitIsZero(Base);
2340 }
2341 
2342 InstructionSelector::ComplexRendererFns
2343 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
2344     MachineOperand &Root) const {
2345   MachineInstr *MI = Root.getParent();
2346   MachineBasicBlock *MBB = MI->getParent();
2347 
2348   int64_t Offset = 0;
2349   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
2350       !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
2351     return {};
2352 
2353   const MachineFunction *MF = MBB->getParent();
2354   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
2355   const MachineMemOperand *MMO = *MI->memoperands_begin();
2356   const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
2357 
2358   Register SOffsetReg = isStackPtrRelative(PtrInfo)
2359                             ? Info->getStackPtrOffsetReg()
2360                             : Info->getScratchWaveOffsetReg();
2361   return {{
2362       [=](MachineInstrBuilder &MIB) {
2363         MIB.addReg(Info->getScratchRSrcReg());
2364       },                                                         // rsrc
2365       [=](MachineInstrBuilder &MIB) { MIB.addReg(SOffsetReg); }, // soffset
2366       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }      // offset
2367   }};
2368 }
2369 
2370 std::pair<Register, unsigned>
2371 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
2372   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
2373   if (!RootDef)
2374     return std::make_pair(Root.getReg(), 0);
2375 
2376   int64_t ConstAddr = 0;
2377 
2378   Register PtrBase;
2379   int64_t Offset;
2380   std::tie(PtrBase, Offset) =
2381     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
2382 
2383   if (Offset) {
2384     if (isDSOffsetLegal(PtrBase, Offset, 16)) {
2385       // (add n0, c0)
2386       return std::make_pair(PtrBase, Offset);
2387     }
2388   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
2389     // TODO
2390 
2391 
2392   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
2393     // TODO
2394 
2395   }
2396 
2397   return std::make_pair(Root.getReg(), 0);
2398 }
2399 
2400 InstructionSelector::ComplexRendererFns
2401 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
2402   Register Reg;
2403   unsigned Offset;
2404   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
2405   return {{
2406       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
2407       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
2408     }};
2409 }
2410 
2411 InstructionSelector::ComplexRendererFns
2412 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
2413   Register Reg;
2414   unsigned Offset;
2415   std::tie(Reg, Offset) = selectDS64Bit4ByteAlignedImpl(Root);
2416   return {{
2417       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
2418       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
2419       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
2420     }};
2421 }
2422 
2423 std::pair<Register, unsigned>
2424 AMDGPUInstructionSelector::selectDS64Bit4ByteAlignedImpl(MachineOperand &Root) const {
2425   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
2426   if (!RootDef)
2427     return std::make_pair(Root.getReg(), 0);
2428 
2429   int64_t ConstAddr = 0;
2430 
2431   Register PtrBase;
2432   int64_t Offset;
2433   std::tie(PtrBase, Offset) =
2434     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
2435 
2436   if (Offset) {
2437     int64_t DWordOffset0 = Offset / 4;
2438     int64_t DWordOffset1 = DWordOffset0 + 1;
2439     if (isDSOffsetLegal(PtrBase, DWordOffset1, 8)) {
2440       // (add n0, c0)
2441       return std::make_pair(PtrBase, DWordOffset0);
2442     }
2443   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
2444     // TODO
2445 
2446   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
2447     // TODO
2448 
2449   }
2450 
2451   return std::make_pair(Root.getReg(), 0);
2452 }
2453 
2454 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
2455 /// the base value with the constant offset. There may be intervening copies
2456 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
2457 /// not match the pattern.
2458 std::pair<Register, int64_t>
2459 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
2460   Register Root, const MachineRegisterInfo &MRI) const {
2461   MachineInstr *RootI = MRI.getVRegDef(Root);
2462   if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
2463     return {Root, 0};
2464 
2465   MachineOperand &RHS = RootI->getOperand(2);
2466   Optional<ValueAndVReg> MaybeOffset
2467     = getConstantVRegValWithLookThrough(RHS.getReg(), MRI, true);
2468   if (!MaybeOffset)
2469     return {Root, 0};
2470   return {RootI->getOperand(1).getReg(), MaybeOffset->Value};
2471 }
2472 
2473 static void addZeroImm(MachineInstrBuilder &MIB) {
2474   MIB.addImm(0);
2475 }
2476 
2477 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
2478 /// BasePtr is not valid, a null base pointer will be used.
2479 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
2480                           uint32_t FormatLo, uint32_t FormatHi,
2481                           Register BasePtr) {
2482   Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
2483   Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
2484   Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2485   Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
2486 
2487   B.buildInstr(AMDGPU::S_MOV_B32)
2488     .addDef(RSrc2)
2489     .addImm(FormatLo);
2490   B.buildInstr(AMDGPU::S_MOV_B32)
2491     .addDef(RSrc3)
2492     .addImm(FormatHi);
2493 
2494   // Build the half of the subregister with the constants before building the
2495   // full 128-bit register. If we are building multiple resource descriptors,
2496   // this will allow CSEing of the 2-component register.
2497   B.buildInstr(AMDGPU::REG_SEQUENCE)
2498     .addDef(RSrcHi)
2499     .addReg(RSrc2)
2500     .addImm(AMDGPU::sub0)
2501     .addReg(RSrc3)
2502     .addImm(AMDGPU::sub1);
2503 
2504   Register RSrcLo = BasePtr;
2505   if (!BasePtr) {
2506     RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2507     B.buildInstr(AMDGPU::S_MOV_B64)
2508       .addDef(RSrcLo)
2509       .addImm(0);
2510   }
2511 
2512   B.buildInstr(AMDGPU::REG_SEQUENCE)
2513     .addDef(RSrc)
2514     .addReg(RSrcLo)
2515     .addImm(AMDGPU::sub0_sub1)
2516     .addReg(RSrcHi)
2517     .addImm(AMDGPU::sub2_sub3);
2518 
2519   return RSrc;
2520 }
2521 
2522 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
2523                                 const SIInstrInfo &TII, Register BasePtr) {
2524   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
2525 
2526   // FIXME: Why are half the "default" bits ignored based on the addressing
2527   // mode?
2528   return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
2529 }
2530 
2531 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
2532                                const SIInstrInfo &TII, Register BasePtr) {
2533   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
2534 
2535   // FIXME: Why are half the "default" bits ignored based on the addressing
2536   // mode?
2537   return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
2538 }
2539 
2540 AMDGPUInstructionSelector::MUBUFAddressData
2541 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
2542   MUBUFAddressData Data;
2543   Data.N0 = Src;
2544 
2545   Register PtrBase;
2546   int64_t Offset;
2547 
2548   std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
2549   if (isUInt<32>(Offset)) {
2550     Data.N0 = PtrBase;
2551     Data.Offset = Offset;
2552   }
2553 
2554   if (MachineInstr *InputAdd
2555       = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
2556     Data.N2 = InputAdd->getOperand(1).getReg();
2557     Data.N3 = InputAdd->getOperand(2).getReg();
2558 
2559     // FIXME: Need to fix extra SGPR->VGPRcopies inserted
2560     // FIXME: Don't know this was defined by operand 0
2561     //
2562     // TODO: Remove this when we have copy folding optimizations after
2563     // RegBankSelect.
2564     Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
2565     Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
2566   }
2567 
2568   return Data;
2569 }
2570 
2571 /// Return if the addr64 mubuf mode should be used for the given address.
2572 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
2573   // (ptr_add N2, N3) -> addr64, or
2574   // (ptr_add (ptr_add N2, N3), C1) -> addr64
2575   if (Addr.N2)
2576     return true;
2577 
2578   const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
2579   return N0Bank->getID() == AMDGPU::VGPRRegBankID;
2580 }
2581 
2582 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
2583 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
2584 /// component.
2585 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
2586   MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
2587   if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
2588     return;
2589 
2590   // Illegal offset, store it in soffset.
2591   SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2592   B.buildInstr(AMDGPU::S_MOV_B32)
2593     .addDef(SOffset)
2594     .addImm(ImmOffset);
2595   ImmOffset = 0;
2596 }
2597 
2598 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
2599   MachineOperand &Root, Register &VAddr, Register &RSrcReg,
2600   Register &SOffset, int64_t &Offset) const {
2601   // FIXME: Predicates should stop this from reaching here.
2602   // addr64 bit was removed for volcanic islands.
2603   if (!STI.hasAddr64() || STI.useFlatForGlobal())
2604     return false;
2605 
2606   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
2607   if (!shouldUseAddr64(AddrData))
2608     return false;
2609 
2610   Register N0 = AddrData.N0;
2611   Register N2 = AddrData.N2;
2612   Register N3 = AddrData.N3;
2613   Offset = AddrData.Offset;
2614 
2615   // Base pointer for the SRD.
2616   Register SRDPtr;
2617 
2618   if (N2) {
2619     if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
2620       assert(N3);
2621       if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
2622         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
2623         // addr64, and construct the default resource from a 0 address.
2624         VAddr = N0;
2625       } else {
2626         SRDPtr = N3;
2627         VAddr = N2;
2628       }
2629     } else {
2630       // N2 is not divergent.
2631       SRDPtr = N2;
2632       VAddr = N3;
2633     }
2634   } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
2635     // Use the default null pointer in the resource
2636     VAddr = N0;
2637   } else {
2638     // N0 -> offset, or
2639     // (N0 + C1) -> offset
2640     SRDPtr = N0;
2641   }
2642 
2643   MachineIRBuilder B(*Root.getParent());
2644   RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
2645   splitIllegalMUBUFOffset(B, SOffset, Offset);
2646   return true;
2647 }
2648 
2649 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
2650   MachineOperand &Root, Register &RSrcReg, Register &SOffset,
2651   int64_t &Offset) const {
2652   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
2653   if (shouldUseAddr64(AddrData))
2654     return false;
2655 
2656   // N0 -> offset, or
2657   // (N0 + C1) -> offset
2658   Register SRDPtr = AddrData.N0;
2659   Offset = AddrData.Offset;
2660 
2661   // TODO: Look through extensions for 32-bit soffset.
2662   MachineIRBuilder B(*Root.getParent());
2663 
2664   RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
2665   splitIllegalMUBUFOffset(B, SOffset, Offset);
2666   return true;
2667 }
2668 
2669 InstructionSelector::ComplexRendererFns
2670 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
2671   Register VAddr;
2672   Register RSrcReg;
2673   Register SOffset;
2674   int64_t Offset = 0;
2675 
2676   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
2677     return {};
2678 
2679   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
2680   // pattern.
2681   return {{
2682       [=](MachineInstrBuilder &MIB) {  // rsrc
2683         MIB.addReg(RSrcReg);
2684       },
2685       [=](MachineInstrBuilder &MIB) { // vaddr
2686         MIB.addReg(VAddr);
2687       },
2688       [=](MachineInstrBuilder &MIB) { // soffset
2689         if (SOffset)
2690           MIB.addReg(SOffset);
2691         else
2692           MIB.addImm(0);
2693       },
2694       [=](MachineInstrBuilder &MIB) { // offset
2695         MIB.addImm(Offset);
2696       },
2697       addZeroImm, //  glc
2698       addZeroImm, //  slc
2699       addZeroImm, //  tfe
2700       addZeroImm, //  dlc
2701       addZeroImm  //  swz
2702     }};
2703 }
2704 
2705 InstructionSelector::ComplexRendererFns
2706 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
2707   Register RSrcReg;
2708   Register SOffset;
2709   int64_t Offset = 0;
2710 
2711   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
2712     return {};
2713 
2714   return {{
2715       [=](MachineInstrBuilder &MIB) {  // rsrc
2716         MIB.addReg(RSrcReg);
2717       },
2718       [=](MachineInstrBuilder &MIB) { // soffset
2719         if (SOffset)
2720           MIB.addReg(SOffset);
2721         else
2722           MIB.addImm(0);
2723       },
2724       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
2725       addZeroImm, //  glc
2726       addZeroImm, //  slc
2727       addZeroImm, //  tfe
2728       addZeroImm, //  dlc
2729       addZeroImm  //  swz
2730     }};
2731 }
2732 
2733 InstructionSelector::ComplexRendererFns
2734 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
2735   Register VAddr;
2736   Register RSrcReg;
2737   Register SOffset;
2738   int64_t Offset = 0;
2739 
2740   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
2741     return {};
2742 
2743   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
2744   // pattern.
2745   return {{
2746       [=](MachineInstrBuilder &MIB) {  // rsrc
2747         MIB.addReg(RSrcReg);
2748       },
2749       [=](MachineInstrBuilder &MIB) { // vaddr
2750         MIB.addReg(VAddr);
2751       },
2752       [=](MachineInstrBuilder &MIB) { // soffset
2753         if (SOffset)
2754           MIB.addReg(SOffset);
2755         else
2756           MIB.addImm(0);
2757       },
2758       [=](MachineInstrBuilder &MIB) { // offset
2759         MIB.addImm(Offset);
2760       },
2761       addZeroImm //  slc
2762     }};
2763 }
2764 
2765 InstructionSelector::ComplexRendererFns
2766 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
2767   Register RSrcReg;
2768   Register SOffset;
2769   int64_t Offset = 0;
2770 
2771   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
2772     return {};
2773 
2774   return {{
2775       [=](MachineInstrBuilder &MIB) {  // rsrc
2776         MIB.addReg(RSrcReg);
2777       },
2778       [=](MachineInstrBuilder &MIB) { // soffset
2779         if (SOffset)
2780           MIB.addReg(SOffset);
2781         else
2782           MIB.addImm(0);
2783       },
2784       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
2785       addZeroImm //  slc
2786     }};
2787 }
2788 
2789 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
2790                                                  const MachineInstr &MI,
2791                                                  int OpIdx) const {
2792   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
2793          "Expected G_CONSTANT");
2794   MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
2795 }
2796 
2797 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
2798                                                 const MachineInstr &MI,
2799                                                 int OpIdx) const {
2800   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
2801          "Expected G_CONSTANT");
2802   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
2803 }
2804 
2805 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
2806                                                  const MachineInstr &MI,
2807                                                  int OpIdx) const {
2808   assert(OpIdx == -1);
2809 
2810   const MachineOperand &Op = MI.getOperand(1);
2811   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
2812     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
2813   else {
2814     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
2815     MIB.addImm(Op.getCImm()->getSExtValue());
2816   }
2817 }
2818 
2819 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
2820                                                 const MachineInstr &MI,
2821                                                 int OpIdx) const {
2822   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
2823          "Expected G_CONSTANT");
2824   MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
2825 }
2826 
2827 /// This only really exists to satisfy DAG type checking machinery, so is a
2828 /// no-op here.
2829 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
2830                                                 const MachineInstr &MI,
2831                                                 int OpIdx) const {
2832   MIB.addImm(MI.getOperand(OpIdx).getImm());
2833 }
2834 
2835 void AMDGPUInstructionSelector::renderExtractGLC(MachineInstrBuilder &MIB,
2836                                                  const MachineInstr &MI,
2837                                                  int OpIdx) const {
2838   assert(OpIdx >= 0 && "expected to match an immediate operand");
2839   MIB.addImm(MI.getOperand(OpIdx).getImm() & 1);
2840 }
2841 
2842 void AMDGPUInstructionSelector::renderExtractSLC(MachineInstrBuilder &MIB,
2843                                                  const MachineInstr &MI,
2844                                                  int OpIdx) const {
2845   assert(OpIdx >= 0 && "expected to match an immediate operand");
2846   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 1) & 1);
2847 }
2848 
2849 void AMDGPUInstructionSelector::renderExtractDLC(MachineInstrBuilder &MIB,
2850                                                  const MachineInstr &MI,
2851                                                  int OpIdx) const {
2852   assert(OpIdx >= 0 && "expected to match an immediate operand");
2853   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 2) & 1);
2854 }
2855 
2856 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
2857                                                  const MachineInstr &MI,
2858                                                  int OpIdx) const {
2859   assert(OpIdx >= 0 && "expected to match an immediate operand");
2860   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
2861 }
2862 
2863 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
2864   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
2865 }
2866 
2867 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
2868   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
2869 }
2870 
2871 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
2872   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
2873 }
2874 
2875 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
2876   return TII.isInlineConstant(Imm);
2877 }
2878