1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPUInstrInfo.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPURegisterBankInfo.h"
18 #include "AMDGPUSubtarget.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
21 #include "SIMachineFunctionInfo.h"
22 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
24 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
27 #include "llvm/CodeGen/GlobalISel/Utils.h"
28 #include "llvm/CodeGen/MachineBasicBlock.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineInstr.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/IR/Type.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/raw_ostream.h"
36 
37 #define DEBUG_TYPE "amdgpu-isel"
38 
39 using namespace llvm;
40 using namespace MIPatternMatch;
41 
42 #define GET_GLOBALISEL_IMPL
43 #define AMDGPUSubtarget GCNSubtarget
44 #include "AMDGPUGenGlobalISel.inc"
45 #undef GET_GLOBALISEL_IMPL
46 #undef AMDGPUSubtarget
47 
48 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
49     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
50     const AMDGPUTargetMachine &TM)
51     : InstructionSelector(), TII(*STI.getInstrInfo()),
52       TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
53       STI(STI),
54       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
55 #define GET_GLOBALISEL_PREDICATES_INIT
56 #include "AMDGPUGenGlobalISel.inc"
57 #undef GET_GLOBALISEL_PREDICATES_INIT
58 #define GET_GLOBALISEL_TEMPORARIES_INIT
59 #include "AMDGPUGenGlobalISel.inc"
60 #undef GET_GLOBALISEL_TEMPORARIES_INIT
61 {
62 }
63 
64 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
65 
66 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits &KB,
67                                         CodeGenCoverage &CoverageInfo) {
68   MRI = &MF.getRegInfo();
69   InstructionSelector::setupMF(MF, KB, CoverageInfo);
70 }
71 
72 bool AMDGPUInstructionSelector::isVCC(Register Reg,
73                                       const MachineRegisterInfo &MRI) const {
74   if (Register::isPhysicalRegister(Reg))
75     return Reg == TRI.getVCC();
76 
77   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
78   const TargetRegisterClass *RC =
79       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
80   if (RC) {
81     const LLT Ty = MRI.getType(Reg);
82     return RC->hasSuperClassEq(TRI.getBoolRC()) &&
83            Ty.isValid() && Ty.getSizeInBits() == 1;
84   }
85 
86   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
87   return RB->getID() == AMDGPU::VCCRegBankID;
88 }
89 
90 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
91                                                         unsigned NewOpc) const {
92   MI.setDesc(TII.get(NewOpc));
93   MI.RemoveOperand(1); // Remove intrinsic ID.
94   MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
95 
96   MachineOperand &Dst = MI.getOperand(0);
97   MachineOperand &Src = MI.getOperand(1);
98 
99   // TODO: This should be legalized to s32 if needed
100   if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
101     return false;
102 
103   const TargetRegisterClass *DstRC
104     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
105   const TargetRegisterClass *SrcRC
106     = TRI.getConstrainedRegClassForOperand(Src, *MRI);
107   if (!DstRC || DstRC != SrcRC)
108     return false;
109 
110   return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
111          RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
112 }
113 
114 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
115   const DebugLoc &DL = I.getDebugLoc();
116   MachineBasicBlock *BB = I.getParent();
117   I.setDesc(TII.get(TargetOpcode::COPY));
118 
119   const MachineOperand &Src = I.getOperand(1);
120   MachineOperand &Dst = I.getOperand(0);
121   Register DstReg = Dst.getReg();
122   Register SrcReg = Src.getReg();
123 
124   if (isVCC(DstReg, *MRI)) {
125     if (SrcReg == AMDGPU::SCC) {
126       const TargetRegisterClass *RC
127         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
128       if (!RC)
129         return true;
130       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
131     }
132 
133     if (!isVCC(SrcReg, *MRI)) {
134       // TODO: Should probably leave the copy and let copyPhysReg expand it.
135       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
136         return false;
137 
138       const TargetRegisterClass *SrcRC
139         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
140 
141       Register MaskedReg = MRI->createVirtualRegister(SrcRC);
142 
143       // We can't trust the high bits at this point, so clear them.
144 
145       // TODO: Skip masking high bits if def is known boolean.
146 
147       unsigned AndOpc = TRI.isSGPRClass(SrcRC) ?
148         AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
149       BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
150         .addImm(1)
151         .addReg(SrcReg);
152       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
153         .addImm(0)
154         .addReg(MaskedReg);
155 
156       if (!MRI->getRegClassOrNull(SrcReg))
157         MRI->setRegClass(SrcReg, SrcRC);
158       I.eraseFromParent();
159       return true;
160     }
161 
162     const TargetRegisterClass *RC =
163       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
164     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
165       return false;
166 
167     // Don't constrain the source register to a class so the def instruction
168     // handles it (unless it's undef).
169     //
170     // FIXME: This is a hack. When selecting the def, we neeed to know
171     // specifically know that the result is VCCRegBank, and not just an SGPR
172     // with size 1. An SReg_32 with size 1 is ambiguous with wave32.
173     if (Src.isUndef()) {
174       const TargetRegisterClass *SrcRC =
175         TRI.getConstrainedRegClassForOperand(Src, *MRI);
176       if (SrcRC && !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
177         return false;
178     }
179 
180     return true;
181   }
182 
183   for (const MachineOperand &MO : I.operands()) {
184     if (Register::isPhysicalRegister(MO.getReg()))
185       continue;
186 
187     const TargetRegisterClass *RC =
188             TRI.getConstrainedRegClassForOperand(MO, *MRI);
189     if (!RC)
190       continue;
191     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
192   }
193   return true;
194 }
195 
196 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
197   const Register DefReg = I.getOperand(0).getReg();
198   const LLT DefTy = MRI->getType(DefReg);
199 
200   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
201 
202   const RegClassOrRegBank &RegClassOrBank =
203     MRI->getRegClassOrRegBank(DefReg);
204 
205   const TargetRegisterClass *DefRC
206     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
207   if (!DefRC) {
208     if (!DefTy.isValid()) {
209       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
210       return false;
211     }
212 
213     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
214     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI);
215     if (!DefRC) {
216       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
217       return false;
218     }
219   }
220 
221   // TODO: Verify that all registers have the same bank
222   I.setDesc(TII.get(TargetOpcode::PHI));
223   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
224 }
225 
226 MachineOperand
227 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
228                                            const TargetRegisterClass &SubRC,
229                                            unsigned SubIdx) const {
230 
231   MachineInstr *MI = MO.getParent();
232   MachineBasicBlock *BB = MO.getParent()->getParent();
233   Register DstReg = MRI->createVirtualRegister(&SubRC);
234 
235   if (MO.isReg()) {
236     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
237     Register Reg = MO.getReg();
238     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
239             .addReg(Reg, 0, ComposedSubIdx);
240 
241     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
242                                      MO.isKill(), MO.isDead(), MO.isUndef(),
243                                      MO.isEarlyClobber(), 0, MO.isDebug(),
244                                      MO.isInternalRead());
245   }
246 
247   assert(MO.isImm());
248 
249   APInt Imm(64, MO.getImm());
250 
251   switch (SubIdx) {
252   default:
253     llvm_unreachable("do not know to split immediate with this sub index.");
254   case AMDGPU::sub0:
255     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
256   case AMDGPU::sub1:
257     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
258   }
259 }
260 
261 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
262   switch (Opc) {
263   case AMDGPU::G_AND:
264     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
265   case AMDGPU::G_OR:
266     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
267   case AMDGPU::G_XOR:
268     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
269   default:
270     llvm_unreachable("not a bit op");
271   }
272 }
273 
274 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
275   MachineOperand &Dst = I.getOperand(0);
276   MachineOperand &Src0 = I.getOperand(1);
277   MachineOperand &Src1 = I.getOperand(2);
278   Register DstReg = Dst.getReg();
279   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
280 
281   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
282   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
283     const TargetRegisterClass *RC = TRI.getBoolRC();
284     unsigned InstOpc = getLogicalBitOpcode(I.getOpcode(),
285                                            RC == &AMDGPU::SReg_64RegClass);
286     I.setDesc(TII.get(InstOpc));
287 
288     // FIXME: Hack to avoid turning the register bank into a register class.
289     // The selector for G_ICMP relies on seeing the register bank for the result
290     // is VCC. In wave32 if we constrain the registers to SReg_32 here, it will
291     // be ambiguous whether it's a scalar or vector bool.
292     if (Src0.isUndef() && !MRI->getRegClassOrNull(Src0.getReg()))
293       MRI->setRegClass(Src0.getReg(), RC);
294     if (Src1.isUndef() && !MRI->getRegClassOrNull(Src1.getReg()))
295       MRI->setRegClass(Src1.getReg(), RC);
296 
297     return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
298   }
299 
300   // TODO: Should this allow an SCC bank result, and produce a copy from SCC for
301   // the result?
302   if (DstRB->getID() == AMDGPU::SGPRRegBankID) {
303     unsigned InstOpc = getLogicalBitOpcode(I.getOpcode(), Size > 32);
304     I.setDesc(TII.get(InstOpc));
305     // Dead implicit-def of scc
306     I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
307                                            true, // isImp
308                                            false, // isKill
309                                            true)); // isDead
310     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
311   }
312 
313   return false;
314 }
315 
316 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
317   MachineBasicBlock *BB = I.getParent();
318   MachineFunction *MF = BB->getParent();
319   Register DstReg = I.getOperand(0).getReg();
320   const DebugLoc &DL = I.getDebugLoc();
321   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
322   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
323   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
324   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
325 
326   if (Size == 32) {
327     if (IsSALU) {
328       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
329       MachineInstr *Add =
330         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
331         .add(I.getOperand(1))
332         .add(I.getOperand(2));
333       I.eraseFromParent();
334       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
335     }
336 
337     if (STI.hasAddNoCarry()) {
338       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
339       I.setDesc(TII.get(Opc));
340       I.addOperand(*MF, MachineOperand::CreateImm(0));
341       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
342       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
343     }
344 
345     const unsigned Opc = Sub ? AMDGPU::V_SUB_I32_e64 : AMDGPU::V_ADD_I32_e64;
346 
347     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
348     MachineInstr *Add
349       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
350       .addDef(UnusedCarry, RegState::Dead)
351       .add(I.getOperand(1))
352       .add(I.getOperand(2))
353       .addImm(0);
354     I.eraseFromParent();
355     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
356   }
357 
358   assert(!Sub && "illegal sub should not reach here");
359 
360   const TargetRegisterClass &RC
361     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
362   const TargetRegisterClass &HalfRC
363     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
364 
365   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
366   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
367   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
368   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
369 
370   Register DstLo = MRI->createVirtualRegister(&HalfRC);
371   Register DstHi = MRI->createVirtualRegister(&HalfRC);
372 
373   if (IsSALU) {
374     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
375       .add(Lo1)
376       .add(Lo2);
377     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
378       .add(Hi1)
379       .add(Hi2);
380   } else {
381     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
382     Register CarryReg = MRI->createVirtualRegister(CarryRC);
383     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_I32_e64), DstLo)
384       .addDef(CarryReg)
385       .add(Lo1)
386       .add(Lo2)
387       .addImm(0);
388     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
389       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
390       .add(Hi1)
391       .add(Hi2)
392       .addReg(CarryReg, RegState::Kill)
393       .addImm(0);
394 
395     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
396       return false;
397   }
398 
399   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
400     .addReg(DstLo)
401     .addImm(AMDGPU::sub0)
402     .addReg(DstHi)
403     .addImm(AMDGPU::sub1);
404 
405 
406   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
407     return false;
408 
409   I.eraseFromParent();
410   return true;
411 }
412 
413 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
414   MachineInstr &I) const {
415   MachineBasicBlock *BB = I.getParent();
416   MachineFunction *MF = BB->getParent();
417   const DebugLoc &DL = I.getDebugLoc();
418   Register Dst0Reg = I.getOperand(0).getReg();
419   Register Dst1Reg = I.getOperand(1).getReg();
420   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
421                      I.getOpcode() == AMDGPU::G_UADDE;
422   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
423                           I.getOpcode() == AMDGPU::G_USUBE;
424 
425   if (isVCC(Dst1Reg, *MRI)) {
426       // The name of the opcodes are misleading. v_add_i32/v_sub_i32 have unsigned
427       // carry out despite the _i32 name. These were renamed in VI to _U32.
428       // FIXME: We should probably rename the opcodes here.
429     unsigned NoCarryOpc = IsAdd ? AMDGPU::V_ADD_I32_e64 : AMDGPU::V_SUB_I32_e64;
430     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
431     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
432     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
433     I.addOperand(*MF, MachineOperand::CreateImm(0));
434     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
435   }
436 
437   Register Src0Reg = I.getOperand(2).getReg();
438   Register Src1Reg = I.getOperand(3).getReg();
439 
440   if (HasCarryIn) {
441     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
442       .addReg(I.getOperand(4).getReg());
443   }
444 
445   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
446   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
447 
448   BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
449     .add(I.getOperand(2))
450     .add(I.getOperand(3));
451   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
452     .addReg(AMDGPU::SCC);
453 
454   if (!MRI->getRegClassOrNull(Dst1Reg))
455     MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
456 
457   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
458       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
459       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
460     return false;
461 
462   if (HasCarryIn &&
463       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
464                                     AMDGPU::SReg_32RegClass, *MRI))
465     return false;
466 
467   I.eraseFromParent();
468   return true;
469 }
470 
471 // TODO: We should probably legalize these to only using 32-bit results.
472 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
473   MachineBasicBlock *BB = I.getParent();
474   Register DstReg = I.getOperand(0).getReg();
475   Register SrcReg = I.getOperand(1).getReg();
476   LLT DstTy = MRI->getType(DstReg);
477   LLT SrcTy = MRI->getType(SrcReg);
478   const unsigned SrcSize = SrcTy.getSizeInBits();
479   const unsigned DstSize = DstTy.getSizeInBits();
480 
481   // TODO: Should handle any multiple of 32 offset.
482   unsigned Offset = I.getOperand(2).getImm();
483   if (Offset % 32 != 0 || DstSize > 128)
484     return false;
485 
486   const TargetRegisterClass *DstRC =
487     TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
488   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
489     return false;
490 
491   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
492   const TargetRegisterClass *SrcRC =
493     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
494   if (!SrcRC)
495     return false;
496   unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
497                                                          DstSize / 32);
498   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
499   if (!SrcRC)
500     return false;
501 
502   SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
503                                     *SrcRC, I.getOperand(1));
504   const DebugLoc &DL = I.getDebugLoc();
505   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
506     .addReg(SrcReg, 0, SubReg);
507 
508   I.eraseFromParent();
509   return true;
510 }
511 
512 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
513   MachineBasicBlock *BB = MI.getParent();
514   Register DstReg = MI.getOperand(0).getReg();
515   LLT DstTy = MRI->getType(DstReg);
516   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
517 
518   const unsigned SrcSize = SrcTy.getSizeInBits();
519   if (SrcSize < 32)
520     return selectImpl(MI, *CoverageInfo);
521 
522   const DebugLoc &DL = MI.getDebugLoc();
523   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
524   const unsigned DstSize = DstTy.getSizeInBits();
525   const TargetRegisterClass *DstRC =
526     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
527   if (!DstRC)
528     return false;
529 
530   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
531   MachineInstrBuilder MIB =
532     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
533   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
534     MachineOperand &Src = MI.getOperand(I + 1);
535     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
536     MIB.addImm(SubRegs[I]);
537 
538     const TargetRegisterClass *SrcRC
539       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
540     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
541       return false;
542   }
543 
544   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
545     return false;
546 
547   MI.eraseFromParent();
548   return true;
549 }
550 
551 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
552   MachineBasicBlock *BB = MI.getParent();
553   const int NumDst = MI.getNumOperands() - 1;
554 
555   MachineOperand &Src = MI.getOperand(NumDst);
556 
557   Register SrcReg = Src.getReg();
558   Register DstReg0 = MI.getOperand(0).getReg();
559   LLT DstTy = MRI->getType(DstReg0);
560   LLT SrcTy = MRI->getType(SrcReg);
561 
562   const unsigned DstSize = DstTy.getSizeInBits();
563   const unsigned SrcSize = SrcTy.getSizeInBits();
564   const DebugLoc &DL = MI.getDebugLoc();
565   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
566 
567   const TargetRegisterClass *SrcRC =
568     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
569   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
570     return false;
571 
572   const unsigned SrcFlags = getUndefRegState(Src.isUndef());
573 
574   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
575   // source, and this relies on the fact that the same subregister indices are
576   // used for both.
577   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
578   for (int I = 0, E = NumDst; I != E; ++I) {
579     MachineOperand &Dst = MI.getOperand(I);
580     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
581       .addReg(SrcReg, SrcFlags, SubRegs[I]);
582 
583     const TargetRegisterClass *DstRC =
584       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
585     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
586       return false;
587   }
588 
589   MI.eraseFromParent();
590   return true;
591 }
592 
593 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
594   return selectG_ADD_SUB(I);
595 }
596 
597 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
598   const MachineOperand &MO = I.getOperand(0);
599 
600   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
601   // regbank check here is to know why getConstrainedRegClassForOperand failed.
602   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
603   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
604       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
605     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
606     return true;
607   }
608 
609   return false;
610 }
611 
612 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
613   MachineBasicBlock *BB = I.getParent();
614 
615   Register DstReg = I.getOperand(0).getReg();
616   Register Src0Reg = I.getOperand(1).getReg();
617   Register Src1Reg = I.getOperand(2).getReg();
618   LLT Src1Ty = MRI->getType(Src1Reg);
619 
620   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
621   unsigned InsSize = Src1Ty.getSizeInBits();
622 
623   int64_t Offset = I.getOperand(3).getImm();
624   if (Offset % 32 != 0)
625     return false;
626 
627   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
628   if (SubReg == AMDGPU::NoSubRegister)
629     return false;
630 
631   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
632   const TargetRegisterClass *DstRC =
633     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
634   if (!DstRC)
635     return false;
636 
637   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
638   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
639   const TargetRegisterClass *Src0RC =
640     TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI);
641   const TargetRegisterClass *Src1RC =
642     TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI);
643 
644   // Deal with weird cases where the class only partially supports the subreg
645   // index.
646   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
647   if (!Src0RC)
648     return false;
649 
650   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
651       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
652       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
653     return false;
654 
655   const DebugLoc &DL = I.getDebugLoc();
656   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
657     .addReg(Src0Reg)
658     .addReg(Src1Reg)
659     .addImm(SubReg);
660 
661   I.eraseFromParent();
662   return true;
663 }
664 
665 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
666   if (STI.getLDSBankCount() != 16)
667     return selectImpl(MI, *CoverageInfo);
668 
669   Register Dst = MI.getOperand(0).getReg();
670   Register Src0 = MI.getOperand(2).getReg();
671   Register M0Val = MI.getOperand(6).getReg();
672   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
673       !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
674       !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
675     return false;
676 
677   // This requires 2 instructions. It is possible to write a pattern to support
678   // this, but the generated isel emitter doesn't correctly deal with multiple
679   // output instructions using the same physical register input. The copy to m0
680   // is incorrectly placed before the second instruction.
681   //
682   // TODO: Match source modifiers.
683 
684   Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
685   const DebugLoc &DL = MI.getDebugLoc();
686   MachineBasicBlock *MBB = MI.getParent();
687 
688   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
689     .addReg(M0Val);
690   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
691     .addImm(2)
692     .addImm(MI.getOperand(4).getImm())  // $attr
693     .addImm(MI.getOperand(3).getImm()); // $attrchan
694 
695   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
696     .addImm(0)                          // $src0_modifiers
697     .addReg(Src0)                       // $src0
698     .addImm(MI.getOperand(4).getImm())  // $attr
699     .addImm(MI.getOperand(3).getImm())  // $attrchan
700     .addImm(0)                          // $src2_modifiers
701     .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
702     .addImm(MI.getOperand(5).getImm())  // $high
703     .addImm(0)                          // $clamp
704     .addImm(0);                         // $omod
705 
706   MI.eraseFromParent();
707   return true;
708 }
709 
710 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
711   unsigned IntrinsicID = I.getIntrinsicID();
712   switch (IntrinsicID) {
713   case Intrinsic::amdgcn_if_break: {
714     MachineBasicBlock *BB = I.getParent();
715 
716     // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
717     // SelectionDAG uses for wave32 vs wave64.
718     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
719       .add(I.getOperand(0))
720       .add(I.getOperand(2))
721       .add(I.getOperand(3));
722 
723     Register DstReg = I.getOperand(0).getReg();
724     Register Src0Reg = I.getOperand(2).getReg();
725     Register Src1Reg = I.getOperand(3).getReg();
726 
727     I.eraseFromParent();
728 
729     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
730       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
731 
732     return true;
733   }
734   case Intrinsic::amdgcn_interp_p1_f16:
735     return selectInterpP1F16(I);
736   case Intrinsic::amdgcn_wqm:
737     return constrainCopyLikeIntrin(I, AMDGPU::WQM);
738   case Intrinsic::amdgcn_softwqm:
739     return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
740   case Intrinsic::amdgcn_wwm:
741     return constrainCopyLikeIntrin(I, AMDGPU::WWM);
742   default:
743     return selectImpl(I, *CoverageInfo);
744   }
745 }
746 
747 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
748   if (Size != 32 && Size != 64)
749     return -1;
750   switch (P) {
751   default:
752     llvm_unreachable("Unknown condition code!");
753   case CmpInst::ICMP_NE:
754     return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
755   case CmpInst::ICMP_EQ:
756     return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
757   case CmpInst::ICMP_SGT:
758     return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
759   case CmpInst::ICMP_SGE:
760     return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
761   case CmpInst::ICMP_SLT:
762     return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
763   case CmpInst::ICMP_SLE:
764     return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
765   case CmpInst::ICMP_UGT:
766     return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
767   case CmpInst::ICMP_UGE:
768     return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
769   case CmpInst::ICMP_ULT:
770     return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
771   case CmpInst::ICMP_ULE:
772     return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
773   }
774 }
775 
776 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
777                                               unsigned Size) const {
778   if (Size == 64) {
779     if (!STI.hasScalarCompareEq64())
780       return -1;
781 
782     switch (P) {
783     case CmpInst::ICMP_NE:
784       return AMDGPU::S_CMP_LG_U64;
785     case CmpInst::ICMP_EQ:
786       return AMDGPU::S_CMP_EQ_U64;
787     default:
788       return -1;
789     }
790   }
791 
792   if (Size != 32)
793     return -1;
794 
795   switch (P) {
796   case CmpInst::ICMP_NE:
797     return AMDGPU::S_CMP_LG_U32;
798   case CmpInst::ICMP_EQ:
799     return AMDGPU::S_CMP_EQ_U32;
800   case CmpInst::ICMP_SGT:
801     return AMDGPU::S_CMP_GT_I32;
802   case CmpInst::ICMP_SGE:
803     return AMDGPU::S_CMP_GE_I32;
804   case CmpInst::ICMP_SLT:
805     return AMDGPU::S_CMP_LT_I32;
806   case CmpInst::ICMP_SLE:
807     return AMDGPU::S_CMP_LE_I32;
808   case CmpInst::ICMP_UGT:
809     return AMDGPU::S_CMP_GT_U32;
810   case CmpInst::ICMP_UGE:
811     return AMDGPU::S_CMP_GE_U32;
812   case CmpInst::ICMP_ULT:
813     return AMDGPU::S_CMP_LT_U32;
814   case CmpInst::ICMP_ULE:
815     return AMDGPU::S_CMP_LE_U32;
816   default:
817     llvm_unreachable("Unknown condition code!");
818   }
819 }
820 
821 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
822   MachineBasicBlock *BB = I.getParent();
823   const DebugLoc &DL = I.getDebugLoc();
824 
825   Register SrcReg = I.getOperand(2).getReg();
826   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
827 
828   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
829 
830   Register CCReg = I.getOperand(0).getReg();
831   if (!isVCC(CCReg, *MRI)) {
832     int Opcode = getS_CMPOpcode(Pred, Size);
833     if (Opcode == -1)
834       return false;
835     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
836             .add(I.getOperand(2))
837             .add(I.getOperand(3));
838     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
839       .addReg(AMDGPU::SCC);
840     bool Ret =
841         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
842         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
843     I.eraseFromParent();
844     return Ret;
845   }
846 
847   int Opcode = getV_CMPOpcode(Pred, Size);
848   if (Opcode == -1)
849     return false;
850 
851   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
852             I.getOperand(0).getReg())
853             .add(I.getOperand(2))
854             .add(I.getOperand(3));
855   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
856                                *TRI.getBoolRC(), *MRI);
857   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
858   I.eraseFromParent();
859   return Ret;
860 }
861 
862 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
863   // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
864   // SelectionDAG uses for wave32 vs wave64.
865   MachineBasicBlock *BB = MI.getParent();
866   BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
867       .add(MI.getOperand(1));
868 
869   Register Reg = MI.getOperand(1).getReg();
870   MI.eraseFromParent();
871 
872   if (!MRI->getRegClassOrNull(Reg))
873     MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
874   return true;
875 }
876 
877 static unsigned getDSShaderTypeValue(const MachineFunction &MF) {
878   switch (MF.getFunction().getCallingConv()) {
879   case CallingConv::AMDGPU_PS:
880     return 1;
881   case CallingConv::AMDGPU_VS:
882     return 2;
883   case CallingConv::AMDGPU_GS:
884     return 3;
885   case CallingConv::AMDGPU_HS:
886   case CallingConv::AMDGPU_LS:
887   case CallingConv::AMDGPU_ES:
888     report_fatal_error("ds_ordered_count unsupported for this calling conv");
889   case CallingConv::AMDGPU_CS:
890   case CallingConv::AMDGPU_KERNEL:
891   case CallingConv::C:
892   case CallingConv::Fast:
893   default:
894     // Assume other calling conventions are various compute callable functions
895     return 0;
896   }
897 }
898 
899 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
900   MachineInstr &MI, Intrinsic::ID IntrID) const {
901   MachineBasicBlock *MBB = MI.getParent();
902   MachineFunction *MF = MBB->getParent();
903   const DebugLoc &DL = MI.getDebugLoc();
904 
905   unsigned IndexOperand = MI.getOperand(7).getImm();
906   bool WaveRelease = MI.getOperand(8).getImm() != 0;
907   bool WaveDone = MI.getOperand(9).getImm() != 0;
908 
909   if (WaveDone && !WaveRelease)
910     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
911 
912   unsigned OrderedCountIndex = IndexOperand & 0x3f;
913   IndexOperand &= ~0x3f;
914   unsigned CountDw = 0;
915 
916   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
917     CountDw = (IndexOperand >> 24) & 0xf;
918     IndexOperand &= ~(0xf << 24);
919 
920     if (CountDw < 1 || CountDw > 4) {
921       report_fatal_error(
922         "ds_ordered_count: dword count must be between 1 and 4");
923     }
924   }
925 
926   if (IndexOperand)
927     report_fatal_error("ds_ordered_count: bad index operand");
928 
929   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
930   unsigned ShaderType = getDSShaderTypeValue(*MF);
931 
932   unsigned Offset0 = OrderedCountIndex << 2;
933   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
934                      (Instruction << 4);
935 
936   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
937     Offset1 |= (CountDw - 1) << 6;
938 
939   unsigned Offset = Offset0 | (Offset1 << 8);
940 
941   Register M0Val = MI.getOperand(2).getReg();
942   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
943     .addReg(M0Val);
944 
945   Register DstReg = MI.getOperand(0).getReg();
946   Register ValReg = MI.getOperand(3).getReg();
947   MachineInstrBuilder DS =
948     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
949       .addReg(ValReg)
950       .addImm(Offset)
951       .cloneMemRefs(MI);
952 
953   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
954     return false;
955 
956   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
957   MI.eraseFromParent();
958   return Ret;
959 }
960 
961 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
962   switch (IntrID) {
963   case Intrinsic::amdgcn_ds_gws_init:
964     return AMDGPU::DS_GWS_INIT;
965   case Intrinsic::amdgcn_ds_gws_barrier:
966     return AMDGPU::DS_GWS_BARRIER;
967   case Intrinsic::amdgcn_ds_gws_sema_v:
968     return AMDGPU::DS_GWS_SEMA_V;
969   case Intrinsic::amdgcn_ds_gws_sema_br:
970     return AMDGPU::DS_GWS_SEMA_BR;
971   case Intrinsic::amdgcn_ds_gws_sema_p:
972     return AMDGPU::DS_GWS_SEMA_P;
973   case Intrinsic::amdgcn_ds_gws_sema_release_all:
974     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
975   default:
976     llvm_unreachable("not a gws intrinsic");
977   }
978 }
979 
980 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
981                                                      Intrinsic::ID IID) const {
982   if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
983       !STI.hasGWSSemaReleaseAll())
984     return false;
985 
986   // intrinsic ID, vsrc, offset
987   const bool HasVSrc = MI.getNumOperands() == 3;
988   assert(HasVSrc || MI.getNumOperands() == 2);
989 
990   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
991   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
992   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
993     return false;
994 
995   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
996   assert(OffsetDef);
997 
998   unsigned ImmOffset;
999 
1000   MachineBasicBlock *MBB = MI.getParent();
1001   const DebugLoc &DL = MI.getDebugLoc();
1002 
1003   MachineInstr *Readfirstlane = nullptr;
1004 
1005   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1006   // incoming offset, in case there's an add of a constant. We'll have to put it
1007   // back later.
1008   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1009     Readfirstlane = OffsetDef;
1010     BaseOffset = OffsetDef->getOperand(1).getReg();
1011     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1012   }
1013 
1014   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1015     // If we have a constant offset, try to use the 0 in m0 as the base.
1016     // TODO: Look into changing the default m0 initialization value. If the
1017     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1018     // the immediate offset.
1019 
1020     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1021     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1022       .addImm(0);
1023   } else {
1024     std::tie(BaseOffset, ImmOffset, OffsetDef)
1025       = AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1026 
1027     if (Readfirstlane) {
1028       // We have the constant offset now, so put the readfirstlane back on the
1029       // variable component.
1030       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1031         return false;
1032 
1033       Readfirstlane->getOperand(1).setReg(BaseOffset);
1034       BaseOffset = Readfirstlane->getOperand(0).getReg();
1035     } else {
1036       if (!RBI.constrainGenericRegister(BaseOffset,
1037                                         AMDGPU::SReg_32RegClass, *MRI))
1038         return false;
1039     }
1040 
1041     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1042     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1043       .addReg(BaseOffset)
1044       .addImm(16);
1045 
1046     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1047       .addReg(M0Base);
1048   }
1049 
1050   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1051   // offset field) % 64. Some versions of the programming guide omit the m0
1052   // part, or claim it's from offset 0.
1053   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1054 
1055   if (HasVSrc) {
1056     Register VSrc = MI.getOperand(1).getReg();
1057     MIB.addReg(VSrc);
1058     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1059       return false;
1060   }
1061 
1062   MIB.addImm(ImmOffset)
1063      .addImm(-1) // $gds
1064      .cloneMemRefs(MI);
1065 
1066   MI.eraseFromParent();
1067   return true;
1068 }
1069 
1070 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1071                                                       bool IsAppend) const {
1072   Register PtrBase = MI.getOperand(2).getReg();
1073   LLT PtrTy = MRI->getType(PtrBase);
1074   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1075 
1076   unsigned Offset;
1077   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1078 
1079   // TODO: Should this try to look through readfirstlane like GWS?
1080   if (!isDSOffsetLegal(PtrBase, Offset, 16)) {
1081     PtrBase = MI.getOperand(2).getReg();
1082     Offset = 0;
1083   }
1084 
1085   MachineBasicBlock *MBB = MI.getParent();
1086   const DebugLoc &DL = MI.getDebugLoc();
1087   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1088 
1089   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1090     .addReg(PtrBase);
1091   BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1092     .addImm(Offset)
1093     .addImm(IsGDS ? -1 : 0)
1094     .cloneMemRefs(MI);
1095 
1096   MI.eraseFromParent();
1097   return true;
1098 }
1099 
1100 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1101     MachineInstr &I) const {
1102   unsigned IntrinsicID = I.getIntrinsicID();
1103   switch (IntrinsicID) {
1104   case Intrinsic::amdgcn_end_cf:
1105     return selectEndCfIntrinsic(I);
1106   case Intrinsic::amdgcn_ds_ordered_add:
1107   case Intrinsic::amdgcn_ds_ordered_swap:
1108     return selectDSOrderedIntrinsic(I, IntrinsicID);
1109   case Intrinsic::amdgcn_ds_gws_init:
1110   case Intrinsic::amdgcn_ds_gws_barrier:
1111   case Intrinsic::amdgcn_ds_gws_sema_v:
1112   case Intrinsic::amdgcn_ds_gws_sema_br:
1113   case Intrinsic::amdgcn_ds_gws_sema_p:
1114   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1115     return selectDSGWSIntrinsic(I, IntrinsicID);
1116   case Intrinsic::amdgcn_ds_append:
1117     return selectDSAppendConsume(I, true);
1118   case Intrinsic::amdgcn_ds_consume:
1119     return selectDSAppendConsume(I, false);
1120   default:
1121     return selectImpl(I, *CoverageInfo);
1122   }
1123 }
1124 
1125 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1126   if (selectImpl(I, *CoverageInfo))
1127     return true;
1128 
1129   MachineBasicBlock *BB = I.getParent();
1130   const DebugLoc &DL = I.getDebugLoc();
1131 
1132   Register DstReg = I.getOperand(0).getReg();
1133   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1134   assert(Size <= 32 || Size == 64);
1135   const MachineOperand &CCOp = I.getOperand(1);
1136   Register CCReg = CCOp.getReg();
1137   if (!isVCC(CCReg, *MRI)) {
1138     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1139                                          AMDGPU::S_CSELECT_B32;
1140     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1141             .addReg(CCReg);
1142 
1143     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1144     // bank, because it does not cover the register class that we used to represent
1145     // for it.  So we need to manually set the register class here.
1146     if (!MRI->getRegClassOrNull(CCReg))
1147         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1148     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1149             .add(I.getOperand(2))
1150             .add(I.getOperand(3));
1151 
1152     bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI) |
1153                constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1154     I.eraseFromParent();
1155     return Ret;
1156   }
1157 
1158   // Wide VGPR select should have been split in RegBankSelect.
1159   if (Size > 32)
1160     return false;
1161 
1162   MachineInstr *Select =
1163       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1164               .addImm(0)
1165               .add(I.getOperand(3))
1166               .addImm(0)
1167               .add(I.getOperand(2))
1168               .add(I.getOperand(1));
1169 
1170   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1171   I.eraseFromParent();
1172   return Ret;
1173 }
1174 
1175 bool AMDGPUInstructionSelector::selectG_STORE(MachineInstr &I) const {
1176   initM0(I);
1177   return selectImpl(I, *CoverageInfo);
1178 }
1179 
1180 static int sizeToSubRegIndex(unsigned Size) {
1181   switch (Size) {
1182   case 32:
1183     return AMDGPU::sub0;
1184   case 64:
1185     return AMDGPU::sub0_sub1;
1186   case 96:
1187     return AMDGPU::sub0_sub1_sub2;
1188   case 128:
1189     return AMDGPU::sub0_sub1_sub2_sub3;
1190   case 256:
1191     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1192   default:
1193     if (Size < 32)
1194       return AMDGPU::sub0;
1195     if (Size > 256)
1196       return -1;
1197     return sizeToSubRegIndex(PowerOf2Ceil(Size));
1198   }
1199 }
1200 
1201 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1202   Register DstReg = I.getOperand(0).getReg();
1203   Register SrcReg = I.getOperand(1).getReg();
1204   const LLT DstTy = MRI->getType(DstReg);
1205   const LLT SrcTy = MRI->getType(SrcReg);
1206   if (!DstTy.isScalar())
1207     return false;
1208 
1209   const LLT S1 = LLT::scalar(1);
1210 
1211   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1212   const RegisterBank *DstRB;
1213   if (DstTy == S1) {
1214     // This is a special case. We don't treat s1 for legalization artifacts as
1215     // vcc booleans.
1216     DstRB = SrcRB;
1217   } else {
1218     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1219     if (SrcRB != DstRB)
1220       return false;
1221   }
1222 
1223   unsigned DstSize = DstTy.getSizeInBits();
1224   unsigned SrcSize = SrcTy.getSizeInBits();
1225 
1226   const TargetRegisterClass *SrcRC
1227     = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI);
1228   const TargetRegisterClass *DstRC
1229     = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI);
1230 
1231   if (SrcSize > 32) {
1232     int SubRegIdx = sizeToSubRegIndex(DstSize);
1233     if (SubRegIdx == -1)
1234       return false;
1235 
1236     // Deal with weird cases where the class only partially supports the subreg
1237     // index.
1238     SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
1239     if (!SrcRC)
1240       return false;
1241 
1242     I.getOperand(1).setSubReg(SubRegIdx);
1243   }
1244 
1245   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1246       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1247     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1248     return false;
1249   }
1250 
1251   I.setDesc(TII.get(TargetOpcode::COPY));
1252   return true;
1253 }
1254 
1255 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
1256 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
1257   Mask = maskTrailingOnes<unsigned>(Size);
1258   int SignedMask = static_cast<int>(Mask);
1259   return SignedMask >= -16 && SignedMask <= 64;
1260 }
1261 
1262 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
1263 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
1264   Register Reg, const MachineRegisterInfo &MRI,
1265   const TargetRegisterInfo &TRI) const {
1266   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
1267   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
1268     return RB;
1269 
1270   // Ignore the type, since we don't use vcc in artifacts.
1271   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
1272     return &RBI.getRegBankFromRegClass(*RC, LLT());
1273   return nullptr;
1274 }
1275 
1276 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
1277   bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
1278   bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
1279   const DebugLoc &DL = I.getDebugLoc();
1280   MachineBasicBlock &MBB = *I.getParent();
1281   const Register DstReg = I.getOperand(0).getReg();
1282   const Register SrcReg = I.getOperand(1).getReg();
1283 
1284   const LLT DstTy = MRI->getType(DstReg);
1285   const LLT SrcTy = MRI->getType(SrcReg);
1286   const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
1287     I.getOperand(2).getImm() : SrcTy.getSizeInBits();
1288   const unsigned DstSize = DstTy.getSizeInBits();
1289   if (!DstTy.isScalar())
1290     return false;
1291 
1292   if (I.getOpcode() == AMDGPU::G_ANYEXT)
1293     return selectCOPY(I);
1294 
1295   // Artifact casts should never use vcc.
1296   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
1297 
1298   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
1299     // 64-bit should have been split up in RegBankSelect
1300 
1301     // Try to use an and with a mask if it will save code size.
1302     unsigned Mask;
1303     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
1304       MachineInstr *ExtI =
1305       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
1306         .addImm(Mask)
1307         .addReg(SrcReg);
1308       I.eraseFromParent();
1309       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1310     }
1311 
1312     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32 : AMDGPU::V_BFE_U32;
1313     MachineInstr *ExtI =
1314       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
1315       .addReg(SrcReg)
1316       .addImm(0) // Offset
1317       .addImm(SrcSize); // Width
1318     I.eraseFromParent();
1319     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1320   }
1321 
1322   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
1323     const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
1324       AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
1325     if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
1326       return false;
1327 
1328     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
1329       const unsigned SextOpc = SrcSize == 8 ?
1330         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
1331       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
1332         .addReg(SrcReg);
1333       I.eraseFromParent();
1334       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
1335     }
1336 
1337     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
1338     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
1339 
1340     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
1341     if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
1342       // We need a 64-bit register source, but the high bits don't matter.
1343       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
1344       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1345       unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
1346 
1347       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
1348       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
1349         .addReg(SrcReg, 0, SubReg)
1350         .addImm(AMDGPU::sub0)
1351         .addReg(UndefReg)
1352         .addImm(AMDGPU::sub1);
1353 
1354       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
1355         .addReg(ExtReg)
1356         .addImm(SrcSize << 16);
1357 
1358       I.eraseFromParent();
1359       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
1360     }
1361 
1362     unsigned Mask;
1363     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
1364       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
1365         .addReg(SrcReg)
1366         .addImm(Mask);
1367     } else {
1368       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
1369         .addReg(SrcReg)
1370         .addImm(SrcSize << 16);
1371     }
1372 
1373     I.eraseFromParent();
1374     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
1375   }
1376 
1377   return false;
1378 }
1379 
1380 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
1381   MachineBasicBlock *BB = I.getParent();
1382   MachineOperand &ImmOp = I.getOperand(1);
1383 
1384   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
1385   if (ImmOp.isFPImm()) {
1386     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
1387     ImmOp.ChangeToImmediate(Imm.getZExtValue());
1388   } else if (ImmOp.isCImm()) {
1389     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getZExtValue());
1390   }
1391 
1392   Register DstReg = I.getOperand(0).getReg();
1393   unsigned Size;
1394   bool IsSgpr;
1395   const RegisterBank *RB = MRI->getRegBankOrNull(I.getOperand(0).getReg());
1396   if (RB) {
1397     IsSgpr = RB->getID() == AMDGPU::SGPRRegBankID;
1398     Size = MRI->getType(DstReg).getSizeInBits();
1399   } else {
1400     const TargetRegisterClass *RC = TRI.getRegClassForReg(*MRI, DstReg);
1401     IsSgpr = TRI.isSGPRClass(RC);
1402     Size = TRI.getRegSizeInBits(*RC);
1403   }
1404 
1405   if (Size != 32 && Size != 64)
1406     return false;
1407 
1408   unsigned Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1409   if (Size == 32) {
1410     I.setDesc(TII.get(Opcode));
1411     I.addImplicitDefUseOperands(*MF);
1412     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1413   }
1414 
1415   const DebugLoc &DL = I.getDebugLoc();
1416 
1417   APInt Imm(Size, I.getOperand(1).getImm());
1418 
1419   MachineInstr *ResInst;
1420   if (IsSgpr && TII.isInlineConstant(Imm)) {
1421     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1422       .addImm(I.getOperand(1).getImm());
1423   } else {
1424     const TargetRegisterClass *RC = IsSgpr ?
1425       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
1426     Register LoReg = MRI->createVirtualRegister(RC);
1427     Register HiReg = MRI->createVirtualRegister(RC);
1428 
1429     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
1430       .addImm(Imm.trunc(32).getZExtValue());
1431 
1432     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
1433       .addImm(Imm.ashr(32).getZExtValue());
1434 
1435     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
1436       .addReg(LoReg)
1437       .addImm(AMDGPU::sub0)
1438       .addReg(HiReg)
1439       .addImm(AMDGPU::sub1);
1440   }
1441 
1442   // We can't call constrainSelectedInstRegOperands here, because it doesn't
1443   // work for target independent opcodes
1444   I.eraseFromParent();
1445   const TargetRegisterClass *DstRC =
1446     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
1447   if (!DstRC)
1448     return true;
1449   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
1450 }
1451 
1452 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
1453   // Only manually handle the f64 SGPR case.
1454   //
1455   // FIXME: This is a workaround for 2.5 different tablegen problems. Because
1456   // the bit ops theoretically have a second result due to the implicit def of
1457   // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
1458   // that is easy by disabling the check. The result works, but uses a
1459   // nonsensical sreg32orlds_and_sreg_1 regclass.
1460   //
1461   // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
1462   // the variadic REG_SEQUENCE operands.
1463 
1464   Register Dst = MI.getOperand(0).getReg();
1465   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
1466   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
1467       MRI->getType(Dst) != LLT::scalar(64))
1468     return false;
1469 
1470   Register Src = MI.getOperand(1).getReg();
1471   MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
1472   if (Fabs)
1473     Src = Fabs->getOperand(1).getReg();
1474 
1475   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
1476       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
1477     return false;
1478 
1479   MachineBasicBlock *BB = MI.getParent();
1480   const DebugLoc &DL = MI.getDebugLoc();
1481   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1482   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1483   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1484   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1485 
1486   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
1487     .addReg(Src, 0, AMDGPU::sub0);
1488   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
1489     .addReg(Src, 0, AMDGPU::sub1);
1490   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
1491     .addImm(0x80000000);
1492 
1493   // Set or toggle sign bit.
1494   unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
1495   BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
1496     .addReg(HiReg)
1497     .addReg(ConstReg);
1498   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
1499     .addReg(LoReg)
1500     .addImm(AMDGPU::sub0)
1501     .addReg(OpReg)
1502     .addImm(AMDGPU::sub1);
1503   MI.eraseFromParent();
1504   return true;
1505 }
1506 
1507 static bool isConstant(const MachineInstr &MI) {
1508   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
1509 }
1510 
1511 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
1512     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
1513 
1514   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
1515 
1516   assert(PtrMI);
1517 
1518   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
1519     return;
1520 
1521   GEPInfo GEPInfo(*PtrMI);
1522 
1523   for (unsigned i = 1; i != 3; ++i) {
1524     const MachineOperand &GEPOp = PtrMI->getOperand(i);
1525     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
1526     assert(OpDef);
1527     if (i == 2 && isConstant(*OpDef)) {
1528       // TODO: Could handle constant base + variable offset, but a combine
1529       // probably should have commuted it.
1530       assert(GEPInfo.Imm == 0);
1531       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
1532       continue;
1533     }
1534     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
1535     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
1536       GEPInfo.SgprParts.push_back(GEPOp.getReg());
1537     else
1538       GEPInfo.VgprParts.push_back(GEPOp.getReg());
1539   }
1540 
1541   AddrInfo.push_back(GEPInfo);
1542   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
1543 }
1544 
1545 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
1546   if (!MI.hasOneMemOperand())
1547     return false;
1548 
1549   const MachineMemOperand *MMO = *MI.memoperands_begin();
1550   const Value *Ptr = MMO->getValue();
1551 
1552   // UndefValue means this is a load of a kernel input.  These are uniform.
1553   // Sometimes LDS instructions have constant pointers.
1554   // If Ptr is null, then that means this mem operand contains a
1555   // PseudoSourceValue like GOT.
1556   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
1557       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
1558     return true;
1559 
1560   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
1561     return true;
1562 
1563   const Instruction *I = dyn_cast<Instruction>(Ptr);
1564   return I && I->getMetadata("amdgpu.uniform");
1565 }
1566 
1567 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
1568   for (const GEPInfo &GEPInfo : AddrInfo) {
1569     if (!GEPInfo.VgprParts.empty())
1570       return true;
1571   }
1572   return false;
1573 }
1574 
1575 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
1576   MachineBasicBlock *BB = I.getParent();
1577 
1578   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
1579   unsigned AS = PtrTy.getAddressSpace();
1580   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
1581       STI.ldsRequiresM0Init()) {
1582     // If DS instructions require M0 initializtion, insert it before selecting.
1583     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1584       .addImm(-1);
1585   }
1586 }
1587 
1588 bool AMDGPUInstructionSelector::selectG_LOAD_ATOMICRMW(MachineInstr &I) const {
1589   initM0(I);
1590   return selectImpl(I, *CoverageInfo);
1591 }
1592 
1593 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
1594   MachineBasicBlock *BB = I.getParent();
1595   MachineOperand &CondOp = I.getOperand(0);
1596   Register CondReg = CondOp.getReg();
1597   const DebugLoc &DL = I.getDebugLoc();
1598 
1599   unsigned BrOpcode;
1600   Register CondPhysReg;
1601   const TargetRegisterClass *ConstrainRC;
1602 
1603   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
1604   // whether the branch is uniform when selecting the instruction. In
1605   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
1606   // RegBankSelect knows what it's doing if the branch condition is scc, even
1607   // though it currently does not.
1608   if (!isVCC(CondReg, *MRI)) {
1609     if (MRI->getType(CondReg) != LLT::scalar(32))
1610       return false;
1611 
1612     CondPhysReg = AMDGPU::SCC;
1613     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
1614     // FIXME: Hack for isSCC tests
1615     ConstrainRC = &AMDGPU::SGPR_32RegClass;
1616   } else {
1617     // FIXME: Do we have to insert an and with exec here, like in SelectionDAG?
1618     // We sort of know that a VCC producer based on the register bank, that ands
1619     // inactive lanes with 0. What if there was a logical operation with vcc
1620     // producers in different blocks/with different exec masks?
1621     // FIXME: Should scc->vcc copies and with exec?
1622     CondPhysReg = TRI.getVCC();
1623     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
1624     ConstrainRC = TRI.getBoolRC();
1625   }
1626 
1627   if (!MRI->getRegClassOrNull(CondReg))
1628     MRI->setRegClass(CondReg, ConstrainRC);
1629 
1630   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
1631     .addReg(CondReg);
1632   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
1633     .addMBB(I.getOperand(1).getMBB());
1634 
1635   I.eraseFromParent();
1636   return true;
1637 }
1638 
1639 bool AMDGPUInstructionSelector::selectG_FRAME_INDEX_GLOBAL_VALUE(
1640   MachineInstr &I) const {
1641   Register DstReg = I.getOperand(0).getReg();
1642   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1643   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
1644   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
1645   if (IsVGPR)
1646     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
1647 
1648   return RBI.constrainGenericRegister(
1649     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
1650 }
1651 
1652 bool AMDGPUInstructionSelector::selectG_PTR_MASK(MachineInstr &I) const {
1653   uint64_t Align = I.getOperand(2).getImm();
1654   const uint64_t Mask = ~((UINT64_C(1) << Align) - 1);
1655 
1656   MachineBasicBlock *BB = I.getParent();
1657 
1658   Register DstReg = I.getOperand(0).getReg();
1659   Register SrcReg = I.getOperand(1).getReg();
1660 
1661   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1662   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1663   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
1664   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1665   unsigned MovOpc = IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1666   const TargetRegisterClass &RegRC
1667     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
1668 
1669   LLT Ty = MRI->getType(DstReg);
1670 
1671   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB,
1672                                                                   *MRI);
1673   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB,
1674                                                                   *MRI);
1675   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
1676       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
1677     return false;
1678 
1679   const DebugLoc &DL = I.getDebugLoc();
1680   Register ImmReg = MRI->createVirtualRegister(&RegRC);
1681   BuildMI(*BB, &I, DL, TII.get(MovOpc), ImmReg)
1682     .addImm(Mask);
1683 
1684   if (Ty.getSizeInBits() == 32) {
1685     BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
1686       .addReg(SrcReg)
1687       .addReg(ImmReg);
1688     I.eraseFromParent();
1689     return true;
1690   }
1691 
1692   Register HiReg = MRI->createVirtualRegister(&RegRC);
1693   Register LoReg = MRI->createVirtualRegister(&RegRC);
1694   Register MaskLo = MRI->createVirtualRegister(&RegRC);
1695 
1696   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
1697     .addReg(SrcReg, 0, AMDGPU::sub0);
1698   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
1699     .addReg(SrcReg, 0, AMDGPU::sub1);
1700 
1701   BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskLo)
1702     .addReg(LoReg)
1703     .addReg(ImmReg);
1704   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
1705     .addReg(MaskLo)
1706     .addImm(AMDGPU::sub0)
1707     .addReg(HiReg)
1708     .addImm(AMDGPU::sub1);
1709   I.eraseFromParent();
1710   return true;
1711 }
1712 
1713 /// Return the register to use for the index value, and the subregister to use
1714 /// for the indirectly accessed register.
1715 static std::pair<Register, unsigned>
1716 computeIndirectRegIndex(MachineRegisterInfo &MRI,
1717                         const SIRegisterInfo &TRI,
1718                         const TargetRegisterClass *SuperRC,
1719                         Register IdxReg,
1720                         unsigned EltSize) {
1721   Register IdxBaseReg;
1722   int Offset;
1723   MachineInstr *Unused;
1724 
1725   std::tie(IdxBaseReg, Offset, Unused)
1726     = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
1727   if (IdxBaseReg == AMDGPU::NoRegister) {
1728     // This will happen if the index is a known constant. This should ordinarily
1729     // be legalized out, but handle it as a register just in case.
1730     assert(Offset == 0);
1731     IdxBaseReg = IdxReg;
1732   }
1733 
1734   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
1735 
1736   // Skip out of bounds offsets, or else we would end up using an undefined
1737   // register.
1738   if (static_cast<unsigned>(Offset) >= SubRegs.size())
1739     return std::make_pair(IdxReg, SubRegs[0]);
1740   return std::make_pair(IdxBaseReg, SubRegs[Offset]);
1741 }
1742 
1743 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
1744   MachineInstr &MI) const {
1745   Register DstReg = MI.getOperand(0).getReg();
1746   Register SrcReg = MI.getOperand(1).getReg();
1747   Register IdxReg = MI.getOperand(2).getReg();
1748 
1749   LLT DstTy = MRI->getType(DstReg);
1750   LLT SrcTy = MRI->getType(SrcReg);
1751 
1752   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1753   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1754   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
1755 
1756   // The index must be scalar. If it wasn't RegBankSelect should have moved this
1757   // into a waterfall loop.
1758   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
1759     return false;
1760 
1761   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB,
1762                                                                   *MRI);
1763   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(DstTy, *DstRB,
1764                                                                   *MRI);
1765   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1766       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
1767       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
1768     return false;
1769 
1770   MachineBasicBlock *BB = MI.getParent();
1771   const DebugLoc &DL = MI.getDebugLoc();
1772   const bool Is64 = DstTy.getSizeInBits() == 64;
1773 
1774   unsigned SubReg;
1775   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
1776                                                      DstTy.getSizeInBits() / 8);
1777 
1778   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
1779     if (DstTy.getSizeInBits() != 32 && !Is64)
1780       return false;
1781 
1782     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1783       .addReg(IdxReg);
1784 
1785     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
1786     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
1787       .addReg(SrcReg, 0, SubReg)
1788       .addReg(SrcReg, RegState::Implicit);
1789     MI.eraseFromParent();
1790     return true;
1791   }
1792 
1793   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
1794     return false;
1795 
1796   if (!STI.useVGPRIndexMode()) {
1797     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1798       .addReg(IdxReg);
1799     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
1800       .addReg(SrcReg, RegState::Undef, SubReg)
1801       .addReg(SrcReg, RegState::Implicit);
1802     MI.eraseFromParent();
1803     return true;
1804   }
1805 
1806   BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON))
1807     .addReg(IdxReg)
1808     .addImm(AMDGPU::VGPRIndexMode::SRC0_ENABLE);
1809   BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), DstReg)
1810     .addReg(SrcReg, RegState::Undef, SubReg)
1811     .addReg(SrcReg, RegState::Implicit)
1812     .addReg(AMDGPU::M0, RegState::Implicit);
1813   BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF));
1814 
1815   MI.eraseFromParent();
1816   return true;
1817 }
1818 
1819 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
1820 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
1821   MachineInstr &MI) const {
1822   Register DstReg = MI.getOperand(0).getReg();
1823   Register VecReg = MI.getOperand(1).getReg();
1824   Register ValReg = MI.getOperand(2).getReg();
1825   Register IdxReg = MI.getOperand(3).getReg();
1826 
1827   LLT VecTy = MRI->getType(DstReg);
1828   LLT ValTy = MRI->getType(ValReg);
1829   unsigned VecSize = VecTy.getSizeInBits();
1830   unsigned ValSize = ValTy.getSizeInBits();
1831 
1832   const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
1833   const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
1834   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
1835 
1836   assert(VecTy.getElementType() == ValTy);
1837 
1838   // The index must be scalar. If it wasn't RegBankSelect should have moved this
1839   // into a waterfall loop.
1840   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
1841     return false;
1842 
1843   const TargetRegisterClass *VecRC = TRI.getRegClassForTypeOnBank(VecTy, *VecRB,
1844                                                                   *MRI);
1845   const TargetRegisterClass *ValRC = TRI.getRegClassForTypeOnBank(ValTy, *ValRB,
1846                                                                   *MRI);
1847 
1848   if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
1849       !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
1850       !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
1851       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
1852     return false;
1853 
1854   if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
1855     return false;
1856 
1857   unsigned SubReg;
1858   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
1859                                                      ValSize / 8);
1860 
1861   const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
1862                          STI.useVGPRIndexMode();
1863 
1864   MachineBasicBlock *BB = MI.getParent();
1865   const DebugLoc &DL = MI.getDebugLoc();
1866 
1867   if (IndexMode) {
1868     BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON))
1869       .addReg(IdxReg)
1870       .addImm(AMDGPU::VGPRIndexMode::DST_ENABLE);
1871   } else {
1872     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1873       .addReg(IdxReg);
1874   }
1875 
1876   const MCInstrDesc &RegWriteOp
1877     = TII.getIndirectRegWritePseudo(VecSize, ValSize,
1878                                     VecRB->getID() == AMDGPU::SGPRRegBankID);
1879   BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
1880     .addReg(VecReg)
1881     .addReg(ValReg)
1882     .addImm(SubReg);
1883 
1884   if (IndexMode)
1885     BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF));
1886 
1887   MI.eraseFromParent();
1888   return true;
1889 }
1890 
1891 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
1892   if (I.isPHI())
1893     return selectPHI(I);
1894 
1895   if (!I.isPreISelOpcode()) {
1896     if (I.isCopy())
1897       return selectCOPY(I);
1898     return true;
1899   }
1900 
1901   switch (I.getOpcode()) {
1902   case TargetOpcode::G_AND:
1903   case TargetOpcode::G_OR:
1904   case TargetOpcode::G_XOR:
1905     if (selectG_AND_OR_XOR(I))
1906       return true;
1907     return selectImpl(I, *CoverageInfo);
1908   case TargetOpcode::G_ADD:
1909   case TargetOpcode::G_SUB:
1910     if (selectImpl(I, *CoverageInfo))
1911       return true;
1912     return selectG_ADD_SUB(I);
1913   case TargetOpcode::G_UADDO:
1914   case TargetOpcode::G_USUBO:
1915   case TargetOpcode::G_UADDE:
1916   case TargetOpcode::G_USUBE:
1917     return selectG_UADDO_USUBO_UADDE_USUBE(I);
1918   case TargetOpcode::G_INTTOPTR:
1919   case TargetOpcode::G_BITCAST:
1920   case TargetOpcode::G_PTRTOINT:
1921     return selectCOPY(I);
1922   case TargetOpcode::G_CONSTANT:
1923   case TargetOpcode::G_FCONSTANT:
1924     return selectG_CONSTANT(I);
1925   case TargetOpcode::G_FNEG:
1926     if (selectImpl(I, *CoverageInfo))
1927       return true;
1928     return selectG_FNEG(I);
1929   case TargetOpcode::G_EXTRACT:
1930     return selectG_EXTRACT(I);
1931   case TargetOpcode::G_MERGE_VALUES:
1932   case TargetOpcode::G_BUILD_VECTOR:
1933   case TargetOpcode::G_CONCAT_VECTORS:
1934     return selectG_MERGE_VALUES(I);
1935   case TargetOpcode::G_UNMERGE_VALUES:
1936     return selectG_UNMERGE_VALUES(I);
1937   case TargetOpcode::G_PTR_ADD:
1938     return selectG_PTR_ADD(I);
1939   case TargetOpcode::G_IMPLICIT_DEF:
1940     return selectG_IMPLICIT_DEF(I);
1941   case TargetOpcode::G_INSERT:
1942     return selectG_INSERT(I);
1943   case TargetOpcode::G_INTRINSIC:
1944     return selectG_INTRINSIC(I);
1945   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1946     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
1947   case TargetOpcode::G_ICMP:
1948     if (selectG_ICMP(I))
1949       return true;
1950     return selectImpl(I, *CoverageInfo);
1951   case TargetOpcode::G_LOAD:
1952   case TargetOpcode::G_ATOMIC_CMPXCHG:
1953   case TargetOpcode::G_ATOMICRMW_XCHG:
1954   case TargetOpcode::G_ATOMICRMW_ADD:
1955   case TargetOpcode::G_ATOMICRMW_SUB:
1956   case TargetOpcode::G_ATOMICRMW_AND:
1957   case TargetOpcode::G_ATOMICRMW_OR:
1958   case TargetOpcode::G_ATOMICRMW_XOR:
1959   case TargetOpcode::G_ATOMICRMW_MIN:
1960   case TargetOpcode::G_ATOMICRMW_MAX:
1961   case TargetOpcode::G_ATOMICRMW_UMIN:
1962   case TargetOpcode::G_ATOMICRMW_UMAX:
1963   case TargetOpcode::G_ATOMICRMW_FADD:
1964     return selectG_LOAD_ATOMICRMW(I);
1965   case TargetOpcode::G_SELECT:
1966     return selectG_SELECT(I);
1967   case TargetOpcode::G_STORE:
1968     return selectG_STORE(I);
1969   case TargetOpcode::G_TRUNC:
1970     return selectG_TRUNC(I);
1971   case TargetOpcode::G_SEXT:
1972   case TargetOpcode::G_ZEXT:
1973   case TargetOpcode::G_ANYEXT:
1974   case TargetOpcode::G_SEXT_INREG:
1975     if (selectImpl(I, *CoverageInfo))
1976       return true;
1977     return selectG_SZA_EXT(I);
1978   case TargetOpcode::G_BRCOND:
1979     return selectG_BRCOND(I);
1980   case TargetOpcode::G_FRAME_INDEX:
1981   case TargetOpcode::G_GLOBAL_VALUE:
1982     return selectG_FRAME_INDEX_GLOBAL_VALUE(I);
1983   case TargetOpcode::G_PTR_MASK:
1984     return selectG_PTR_MASK(I);
1985   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
1986     return selectG_EXTRACT_VECTOR_ELT(I);
1987   case TargetOpcode::G_INSERT_VECTOR_ELT:
1988     return selectG_INSERT_VECTOR_ELT(I);
1989   case AMDGPU::G_AMDGPU_ATOMIC_INC:
1990   case AMDGPU::G_AMDGPU_ATOMIC_DEC:
1991     initM0(I);
1992     return selectImpl(I, *CoverageInfo);
1993   default:
1994     return selectImpl(I, *CoverageInfo);
1995   }
1996   return false;
1997 }
1998 
1999 InstructionSelector::ComplexRendererFns
2000 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
2001   return {{
2002       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
2003   }};
2004 
2005 }
2006 
2007 std::pair<Register, unsigned>
2008 AMDGPUInstructionSelector::selectVOP3ModsImpl(
2009   Register Src) const {
2010   unsigned Mods = 0;
2011   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
2012 
2013   if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
2014     Src = MI->getOperand(1).getReg();
2015     Mods |= SISrcMods::NEG;
2016     MI = getDefIgnoringCopies(Src, *MRI);
2017   }
2018 
2019   if (MI && MI->getOpcode() == AMDGPU::G_FABS) {
2020     Src = MI->getOperand(1).getReg();
2021     Mods |= SISrcMods::ABS;
2022   }
2023 
2024   return std::make_pair(Src, Mods);
2025 }
2026 
2027 ///
2028 /// This will select either an SGPR or VGPR operand and will save us from
2029 /// having to write an extra tablegen pattern.
2030 InstructionSelector::ComplexRendererFns
2031 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
2032   return {{
2033       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
2034   }};
2035 }
2036 
2037 InstructionSelector::ComplexRendererFns
2038 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
2039   Register Src;
2040   unsigned Mods;
2041   std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg());
2042 
2043   return {{
2044       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
2045       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
2046       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
2047       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
2048   }};
2049 }
2050 
2051 InstructionSelector::ComplexRendererFns
2052 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
2053   return {{
2054       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
2055       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
2056       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
2057   }};
2058 }
2059 
2060 InstructionSelector::ComplexRendererFns
2061 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
2062   Register Src;
2063   unsigned Mods;
2064   std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg());
2065 
2066   return {{
2067       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
2068       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
2069   }};
2070 }
2071 
2072 InstructionSelector::ComplexRendererFns
2073 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
2074   Register Reg = Root.getReg();
2075   const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
2076   if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
2077               Def->getOpcode() == AMDGPU::G_FABS))
2078     return {};
2079   return {{
2080       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
2081   }};
2082 }
2083 
2084 InstructionSelector::ComplexRendererFns
2085 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
2086   Register Src;
2087   unsigned Mods;
2088   std::tie(Src, Mods) = selectVOP3ModsImpl(Root.getReg());
2089   if (!TM.Options.NoNaNsFPMath && !isKnownNeverNaN(Src, *MRI))
2090     return None;
2091 
2092   return {{
2093       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
2094       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
2095   }};
2096 }
2097 
2098 InstructionSelector::ComplexRendererFns
2099 AMDGPUInstructionSelector::selectVOP3OpSelMods0(MachineOperand &Root) const {
2100   // FIXME: Handle clamp and op_sel
2101   return {{
2102       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
2103       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // src_mods
2104       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // clamp
2105   }};
2106 }
2107 
2108 InstructionSelector::ComplexRendererFns
2109 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
2110   // FIXME: Handle op_sel
2111   return {{
2112       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
2113       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
2114   }};
2115 }
2116 
2117 InstructionSelector::ComplexRendererFns
2118 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
2119   SmallVector<GEPInfo, 4> AddrInfo;
2120   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
2121 
2122   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
2123     return None;
2124 
2125   const GEPInfo &GEPInfo = AddrInfo[0];
2126   Optional<int64_t> EncodedImm = AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm);
2127   if (!EncodedImm)
2128     return None;
2129 
2130   unsigned PtrReg = GEPInfo.SgprParts[0];
2131   return {{
2132     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
2133     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
2134   }};
2135 }
2136 
2137 InstructionSelector::ComplexRendererFns
2138 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
2139   SmallVector<GEPInfo, 4> AddrInfo;
2140   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
2141 
2142   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
2143     return None;
2144 
2145   const GEPInfo &GEPInfo = AddrInfo[0];
2146   unsigned PtrReg = GEPInfo.SgprParts[0];
2147   Optional<int64_t> EncodedImm =
2148       AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
2149   if (!EncodedImm)
2150     return None;
2151 
2152   return {{
2153     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
2154     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
2155   }};
2156 }
2157 
2158 InstructionSelector::ComplexRendererFns
2159 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
2160   MachineInstr *MI = Root.getParent();
2161   MachineBasicBlock *MBB = MI->getParent();
2162 
2163   SmallVector<GEPInfo, 4> AddrInfo;
2164   getAddrModeInfo(*MI, *MRI, AddrInfo);
2165 
2166   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
2167   // then we can select all ptr + 32-bit offsets not just immediate offsets.
2168   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
2169     return None;
2170 
2171   const GEPInfo &GEPInfo = AddrInfo[0];
2172   if (!GEPInfo.Imm || !isUInt<32>(GEPInfo.Imm))
2173     return None;
2174 
2175   // If we make it this far we have a load with an 32-bit immediate offset.
2176   // It is OK to select this using a sgpr offset, because we have already
2177   // failed trying to select this load into one of the _IMM variants since
2178   // the _IMM Patterns are considered before the _SGPR patterns.
2179   unsigned PtrReg = GEPInfo.SgprParts[0];
2180   Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2181   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
2182           .addImm(GEPInfo.Imm);
2183   return {{
2184     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
2185     [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
2186   }};
2187 }
2188 
2189 template <bool Signed>
2190 InstructionSelector::ComplexRendererFns
2191 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root) const {
2192   MachineInstr *MI = Root.getParent();
2193 
2194   InstructionSelector::ComplexRendererFns Default = {{
2195       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
2196       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },  // offset
2197       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // slc
2198     }};
2199 
2200   if (!STI.hasFlatInstOffsets())
2201     return Default;
2202 
2203   const MachineInstr *OpDef = MRI->getVRegDef(Root.getReg());
2204   if (!OpDef || OpDef->getOpcode() != AMDGPU::G_PTR_ADD)
2205     return Default;
2206 
2207   Optional<int64_t> Offset =
2208     getConstantVRegVal(OpDef->getOperand(2).getReg(), *MRI);
2209   if (!Offset.hasValue())
2210     return Default;
2211 
2212   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
2213   if (!TII.isLegalFLATOffset(Offset.getValue(), AddrSpace, Signed))
2214     return Default;
2215 
2216   Register BasePtr = OpDef->getOperand(1).getReg();
2217 
2218   return {{
2219       [=](MachineInstrBuilder &MIB) { MIB.addReg(BasePtr); },
2220       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset.getValue()); },
2221       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // slc
2222     }};
2223 }
2224 
2225 InstructionSelector::ComplexRendererFns
2226 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
2227   return selectFlatOffsetImpl<false>(Root);
2228 }
2229 
2230 InstructionSelector::ComplexRendererFns
2231 AMDGPUInstructionSelector::selectFlatOffsetSigned(MachineOperand &Root) const {
2232   return selectFlatOffsetImpl<true>(Root);
2233 }
2234 
2235 static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
2236   auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
2237   return PSV && PSV->isStack();
2238 }
2239 
2240 InstructionSelector::ComplexRendererFns
2241 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
2242   MachineInstr *MI = Root.getParent();
2243   MachineBasicBlock *MBB = MI->getParent();
2244   MachineFunction *MF = MBB->getParent();
2245   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
2246 
2247   int64_t Offset = 0;
2248   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset))) {
2249     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2250 
2251     // TODO: Should this be inside the render function? The iterator seems to
2252     // move.
2253     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
2254             HighBits)
2255       .addImm(Offset & ~4095);
2256 
2257     return {{[=](MachineInstrBuilder &MIB) { // rsrc
2258                MIB.addReg(Info->getScratchRSrcReg());
2259              },
2260              [=](MachineInstrBuilder &MIB) { // vaddr
2261                MIB.addReg(HighBits);
2262              },
2263              [=](MachineInstrBuilder &MIB) { // soffset
2264                const MachineMemOperand *MMO = *MI->memoperands_begin();
2265                const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
2266 
2267                Register SOffsetReg = isStackPtrRelative(PtrInfo)
2268                                          ? Info->getStackPtrOffsetReg()
2269                                          : Info->getScratchWaveOffsetReg();
2270                MIB.addReg(SOffsetReg);
2271              },
2272              [=](MachineInstrBuilder &MIB) { // offset
2273                MIB.addImm(Offset & 4095);
2274              }}};
2275   }
2276 
2277   assert(Offset == 0);
2278 
2279   // Try to fold a frame index directly into the MUBUF vaddr field, and any
2280   // offsets.
2281   Optional<int> FI;
2282   Register VAddr = Root.getReg();
2283   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
2284     if (isBaseWithConstantOffset(Root, *MRI)) {
2285       const MachineOperand &LHS = RootDef->getOperand(1);
2286       const MachineOperand &RHS = RootDef->getOperand(2);
2287       const MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
2288       const MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
2289       if (LHSDef && RHSDef) {
2290         int64_t PossibleOffset =
2291             RHSDef->getOperand(1).getCImm()->getSExtValue();
2292         if (SIInstrInfo::isLegalMUBUFImmOffset(PossibleOffset) &&
2293             (!STI.privateMemoryResourceIsRangeChecked() ||
2294              KnownBits->signBitIsZero(LHS.getReg()))) {
2295           if (LHSDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
2296             FI = LHSDef->getOperand(1).getIndex();
2297           else
2298             VAddr = LHS.getReg();
2299           Offset = PossibleOffset;
2300         }
2301       }
2302     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
2303       FI = RootDef->getOperand(1).getIndex();
2304     }
2305   }
2306 
2307   // If we don't know this private access is a local stack object, it needs to
2308   // be relative to the entry point's scratch wave offset register.
2309   // TODO: Should split large offsets that don't fit like above.
2310   // TODO: Don't use scratch wave offset just because the offset didn't fit.
2311   Register SOffset = FI.hasValue() ? Info->getStackPtrOffsetReg()
2312                                    : Info->getScratchWaveOffsetReg();
2313 
2314   return {{[=](MachineInstrBuilder &MIB) { // rsrc
2315              MIB.addReg(Info->getScratchRSrcReg());
2316            },
2317            [=](MachineInstrBuilder &MIB) { // vaddr
2318              if (FI.hasValue())
2319                MIB.addFrameIndex(FI.getValue());
2320              else
2321                MIB.addReg(VAddr);
2322            },
2323            [=](MachineInstrBuilder &MIB) { // soffset
2324              MIB.addReg(SOffset);
2325            },
2326            [=](MachineInstrBuilder &MIB) { // offset
2327              MIB.addImm(Offset);
2328            }}};
2329 }
2330 
2331 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
2332                                                 int64_t Offset,
2333                                                 unsigned OffsetBits) const {
2334   if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
2335       (OffsetBits == 8 && !isUInt<8>(Offset)))
2336     return false;
2337 
2338   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
2339     return true;
2340 
2341   // On Southern Islands instruction with a negative base value and an offset
2342   // don't seem to work.
2343   return KnownBits->signBitIsZero(Base);
2344 }
2345 
2346 InstructionSelector::ComplexRendererFns
2347 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
2348     MachineOperand &Root) const {
2349   MachineInstr *MI = Root.getParent();
2350   MachineBasicBlock *MBB = MI->getParent();
2351 
2352   int64_t Offset = 0;
2353   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
2354       !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
2355     return {};
2356 
2357   const MachineFunction *MF = MBB->getParent();
2358   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
2359   const MachineMemOperand *MMO = *MI->memoperands_begin();
2360   const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
2361 
2362   Register SOffsetReg = isStackPtrRelative(PtrInfo)
2363                             ? Info->getStackPtrOffsetReg()
2364                             : Info->getScratchWaveOffsetReg();
2365   return {{
2366       [=](MachineInstrBuilder &MIB) {
2367         MIB.addReg(Info->getScratchRSrcReg());
2368       },                                                         // rsrc
2369       [=](MachineInstrBuilder &MIB) { MIB.addReg(SOffsetReg); }, // soffset
2370       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }      // offset
2371   }};
2372 }
2373 
2374 std::pair<Register, unsigned>
2375 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
2376   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
2377   if (!RootDef)
2378     return std::make_pair(Root.getReg(), 0);
2379 
2380   int64_t ConstAddr = 0;
2381 
2382   Register PtrBase;
2383   int64_t Offset;
2384   std::tie(PtrBase, Offset) =
2385     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
2386 
2387   if (Offset) {
2388     if (isDSOffsetLegal(PtrBase, Offset, 16)) {
2389       // (add n0, c0)
2390       return std::make_pair(PtrBase, Offset);
2391     }
2392   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
2393     // TODO
2394 
2395 
2396   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
2397     // TODO
2398 
2399   }
2400 
2401   return std::make_pair(Root.getReg(), 0);
2402 }
2403 
2404 InstructionSelector::ComplexRendererFns
2405 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
2406   Register Reg;
2407   unsigned Offset;
2408   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
2409   return {{
2410       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
2411       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
2412     }};
2413 }
2414 
2415 InstructionSelector::ComplexRendererFns
2416 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
2417   Register Reg;
2418   unsigned Offset;
2419   std::tie(Reg, Offset) = selectDS64Bit4ByteAlignedImpl(Root);
2420   return {{
2421       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
2422       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
2423       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
2424     }};
2425 }
2426 
2427 std::pair<Register, unsigned>
2428 AMDGPUInstructionSelector::selectDS64Bit4ByteAlignedImpl(MachineOperand &Root) const {
2429   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
2430   if (!RootDef)
2431     return std::make_pair(Root.getReg(), 0);
2432 
2433   int64_t ConstAddr = 0;
2434 
2435   Register PtrBase;
2436   int64_t Offset;
2437   std::tie(PtrBase, Offset) =
2438     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
2439 
2440   if (Offset) {
2441     int64_t DWordOffset0 = Offset / 4;
2442     int64_t DWordOffset1 = DWordOffset0 + 1;
2443     if (isDSOffsetLegal(PtrBase, DWordOffset1, 8)) {
2444       // (add n0, c0)
2445       return std::make_pair(PtrBase, DWordOffset0);
2446     }
2447   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
2448     // TODO
2449 
2450   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
2451     // TODO
2452 
2453   }
2454 
2455   return std::make_pair(Root.getReg(), 0);
2456 }
2457 
2458 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
2459 /// the base value with the constant offset. There may be intervening copies
2460 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
2461 /// not match the pattern.
2462 std::pair<Register, int64_t>
2463 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
2464   Register Root, const MachineRegisterInfo &MRI) const {
2465   MachineInstr *RootI = MRI.getVRegDef(Root);
2466   if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
2467     return {Root, 0};
2468 
2469   MachineOperand &RHS = RootI->getOperand(2);
2470   Optional<ValueAndVReg> MaybeOffset
2471     = getConstantVRegValWithLookThrough(RHS.getReg(), MRI, true);
2472   if (!MaybeOffset)
2473     return {Root, 0};
2474   return {RootI->getOperand(1).getReg(), MaybeOffset->Value};
2475 }
2476 
2477 static void addZeroImm(MachineInstrBuilder &MIB) {
2478   MIB.addImm(0);
2479 }
2480 
2481 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
2482 /// BasePtr is not valid, a null base pointer will be used.
2483 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
2484                           uint32_t FormatLo, uint32_t FormatHi,
2485                           Register BasePtr) {
2486   Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
2487   Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
2488   Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2489   Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
2490 
2491   B.buildInstr(AMDGPU::S_MOV_B32)
2492     .addDef(RSrc2)
2493     .addImm(FormatLo);
2494   B.buildInstr(AMDGPU::S_MOV_B32)
2495     .addDef(RSrc3)
2496     .addImm(FormatHi);
2497 
2498   // Build the half of the subregister with the constants before building the
2499   // full 128-bit register. If we are building multiple resource descriptors,
2500   // this will allow CSEing of the 2-component register.
2501   B.buildInstr(AMDGPU::REG_SEQUENCE)
2502     .addDef(RSrcHi)
2503     .addReg(RSrc2)
2504     .addImm(AMDGPU::sub0)
2505     .addReg(RSrc3)
2506     .addImm(AMDGPU::sub1);
2507 
2508   Register RSrcLo = BasePtr;
2509   if (!BasePtr) {
2510     RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
2511     B.buildInstr(AMDGPU::S_MOV_B64)
2512       .addDef(RSrcLo)
2513       .addImm(0);
2514   }
2515 
2516   B.buildInstr(AMDGPU::REG_SEQUENCE)
2517     .addDef(RSrc)
2518     .addReg(RSrcLo)
2519     .addImm(AMDGPU::sub0_sub1)
2520     .addReg(RSrcHi)
2521     .addImm(AMDGPU::sub2_sub3);
2522 
2523   return RSrc;
2524 }
2525 
2526 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
2527                                 const SIInstrInfo &TII, Register BasePtr) {
2528   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
2529 
2530   // FIXME: Why are half the "default" bits ignored based on the addressing
2531   // mode?
2532   return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
2533 }
2534 
2535 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
2536                                const SIInstrInfo &TII, Register BasePtr) {
2537   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
2538 
2539   // FIXME: Why are half the "default" bits ignored based on the addressing
2540   // mode?
2541   return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
2542 }
2543 
2544 AMDGPUInstructionSelector::MUBUFAddressData
2545 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
2546   MUBUFAddressData Data;
2547   Data.N0 = Src;
2548 
2549   Register PtrBase;
2550   int64_t Offset;
2551 
2552   std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
2553   if (isUInt<32>(Offset)) {
2554     Data.N0 = PtrBase;
2555     Data.Offset = Offset;
2556   }
2557 
2558   if (MachineInstr *InputAdd
2559       = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
2560     Data.N2 = InputAdd->getOperand(1).getReg();
2561     Data.N3 = InputAdd->getOperand(2).getReg();
2562 
2563     // FIXME: Need to fix extra SGPR->VGPRcopies inserted
2564     // FIXME: Don't know this was defined by operand 0
2565     //
2566     // TODO: Remove this when we have copy folding optimizations after
2567     // RegBankSelect.
2568     Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
2569     Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
2570   }
2571 
2572   return Data;
2573 }
2574 
2575 /// Return if the addr64 mubuf mode should be used for the given address.
2576 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
2577   // (ptr_add N2, N3) -> addr64, or
2578   // (ptr_add (ptr_add N2, N3), C1) -> addr64
2579   if (Addr.N2)
2580     return true;
2581 
2582   const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
2583   return N0Bank->getID() == AMDGPU::VGPRRegBankID;
2584 }
2585 
2586 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
2587 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
2588 /// component.
2589 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
2590   MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
2591   if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
2592     return;
2593 
2594   // Illegal offset, store it in soffset.
2595   SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2596   B.buildInstr(AMDGPU::S_MOV_B32)
2597     .addDef(SOffset)
2598     .addImm(ImmOffset);
2599   ImmOffset = 0;
2600 }
2601 
2602 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
2603   MachineOperand &Root, Register &VAddr, Register &RSrcReg,
2604   Register &SOffset, int64_t &Offset) const {
2605   // FIXME: Predicates should stop this from reaching here.
2606   // addr64 bit was removed for volcanic islands.
2607   if (!STI.hasAddr64() || STI.useFlatForGlobal())
2608     return false;
2609 
2610   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
2611   if (!shouldUseAddr64(AddrData))
2612     return false;
2613 
2614   Register N0 = AddrData.N0;
2615   Register N2 = AddrData.N2;
2616   Register N3 = AddrData.N3;
2617   Offset = AddrData.Offset;
2618 
2619   // Base pointer for the SRD.
2620   Register SRDPtr;
2621 
2622   if (N2) {
2623     if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
2624       assert(N3);
2625       if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
2626         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
2627         // addr64, and construct the default resource from a 0 address.
2628         VAddr = N0;
2629       } else {
2630         SRDPtr = N3;
2631         VAddr = N2;
2632       }
2633     } else {
2634       // N2 is not divergent.
2635       SRDPtr = N2;
2636       VAddr = N3;
2637     }
2638   } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
2639     // Use the default null pointer in the resource
2640     VAddr = N0;
2641   } else {
2642     // N0 -> offset, or
2643     // (N0 + C1) -> offset
2644     SRDPtr = N0;
2645   }
2646 
2647   MachineIRBuilder B(*Root.getParent());
2648   RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
2649   splitIllegalMUBUFOffset(B, SOffset, Offset);
2650   return true;
2651 }
2652 
2653 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
2654   MachineOperand &Root, Register &RSrcReg, Register &SOffset,
2655   int64_t &Offset) const {
2656   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
2657   if (shouldUseAddr64(AddrData))
2658     return false;
2659 
2660   // N0 -> offset, or
2661   // (N0 + C1) -> offset
2662   Register SRDPtr = AddrData.N0;
2663   Offset = AddrData.Offset;
2664 
2665   // TODO: Look through extensions for 32-bit soffset.
2666   MachineIRBuilder B(*Root.getParent());
2667 
2668   RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
2669   splitIllegalMUBUFOffset(B, SOffset, Offset);
2670   return true;
2671 }
2672 
2673 InstructionSelector::ComplexRendererFns
2674 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
2675   Register VAddr;
2676   Register RSrcReg;
2677   Register SOffset;
2678   int64_t Offset = 0;
2679 
2680   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
2681     return {};
2682 
2683   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
2684   // pattern.
2685   return {{
2686       [=](MachineInstrBuilder &MIB) {  // rsrc
2687         MIB.addReg(RSrcReg);
2688       },
2689       [=](MachineInstrBuilder &MIB) { // vaddr
2690         MIB.addReg(VAddr);
2691       },
2692       [=](MachineInstrBuilder &MIB) { // soffset
2693         if (SOffset)
2694           MIB.addReg(SOffset);
2695         else
2696           MIB.addImm(0);
2697       },
2698       [=](MachineInstrBuilder &MIB) { // offset
2699         MIB.addImm(Offset);
2700       },
2701       addZeroImm, //  glc
2702       addZeroImm, //  slc
2703       addZeroImm, //  tfe
2704       addZeroImm, //  dlc
2705       addZeroImm  //  swz
2706     }};
2707 }
2708 
2709 InstructionSelector::ComplexRendererFns
2710 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
2711   Register RSrcReg;
2712   Register SOffset;
2713   int64_t Offset = 0;
2714 
2715   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
2716     return {};
2717 
2718   return {{
2719       [=](MachineInstrBuilder &MIB) {  // rsrc
2720         MIB.addReg(RSrcReg);
2721       },
2722       [=](MachineInstrBuilder &MIB) { // soffset
2723         if (SOffset)
2724           MIB.addReg(SOffset);
2725         else
2726           MIB.addImm(0);
2727       },
2728       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
2729       addZeroImm, //  glc
2730       addZeroImm, //  slc
2731       addZeroImm, //  tfe
2732       addZeroImm, //  dlc
2733       addZeroImm  //  swz
2734     }};
2735 }
2736 
2737 InstructionSelector::ComplexRendererFns
2738 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
2739   Register VAddr;
2740   Register RSrcReg;
2741   Register SOffset;
2742   int64_t Offset = 0;
2743 
2744   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
2745     return {};
2746 
2747   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
2748   // pattern.
2749   return {{
2750       [=](MachineInstrBuilder &MIB) {  // rsrc
2751         MIB.addReg(RSrcReg);
2752       },
2753       [=](MachineInstrBuilder &MIB) { // vaddr
2754         MIB.addReg(VAddr);
2755       },
2756       [=](MachineInstrBuilder &MIB) { // soffset
2757         if (SOffset)
2758           MIB.addReg(SOffset);
2759         else
2760           MIB.addImm(0);
2761       },
2762       [=](MachineInstrBuilder &MIB) { // offset
2763         MIB.addImm(Offset);
2764       },
2765       addZeroImm //  slc
2766     }};
2767 }
2768 
2769 InstructionSelector::ComplexRendererFns
2770 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
2771   Register RSrcReg;
2772   Register SOffset;
2773   int64_t Offset = 0;
2774 
2775   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
2776     return {};
2777 
2778   return {{
2779       [=](MachineInstrBuilder &MIB) {  // rsrc
2780         MIB.addReg(RSrcReg);
2781       },
2782       [=](MachineInstrBuilder &MIB) { // soffset
2783         if (SOffset)
2784           MIB.addReg(SOffset);
2785         else
2786           MIB.addImm(0);
2787       },
2788       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
2789       addZeroImm //  slc
2790     }};
2791 }
2792 
2793 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
2794                                                  const MachineInstr &MI,
2795                                                  int OpIdx) const {
2796   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
2797          "Expected G_CONSTANT");
2798   MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
2799 }
2800 
2801 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
2802                                                 const MachineInstr &MI,
2803                                                 int OpIdx) const {
2804   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
2805          "Expected G_CONSTANT");
2806   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
2807 }
2808 
2809 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
2810                                                  const MachineInstr &MI,
2811                                                  int OpIdx) const {
2812   assert(OpIdx == -1);
2813 
2814   const MachineOperand &Op = MI.getOperand(1);
2815   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
2816     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
2817   else {
2818     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
2819     MIB.addImm(Op.getCImm()->getSExtValue());
2820   }
2821 }
2822 
2823 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
2824                                                 const MachineInstr &MI,
2825                                                 int OpIdx) const {
2826   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
2827          "Expected G_CONSTANT");
2828   MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
2829 }
2830 
2831 /// This only really exists to satisfy DAG type checking machinery, so is a
2832 /// no-op here.
2833 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
2834                                                 const MachineInstr &MI,
2835                                                 int OpIdx) const {
2836   MIB.addImm(MI.getOperand(OpIdx).getImm());
2837 }
2838 
2839 void AMDGPUInstructionSelector::renderExtractGLC(MachineInstrBuilder &MIB,
2840                                                  const MachineInstr &MI,
2841                                                  int OpIdx) const {
2842   assert(OpIdx >= 0 && "expected to match an immediate operand");
2843   MIB.addImm(MI.getOperand(OpIdx).getImm() & 1);
2844 }
2845 
2846 void AMDGPUInstructionSelector::renderExtractSLC(MachineInstrBuilder &MIB,
2847                                                  const MachineInstr &MI,
2848                                                  int OpIdx) const {
2849   assert(OpIdx >= 0 && "expected to match an immediate operand");
2850   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 1) & 1);
2851 }
2852 
2853 void AMDGPUInstructionSelector::renderExtractDLC(MachineInstrBuilder &MIB,
2854                                                  const MachineInstr &MI,
2855                                                  int OpIdx) const {
2856   assert(OpIdx >= 0 && "expected to match an immediate operand");
2857   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 2) & 1);
2858 }
2859 
2860 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
2861                                                  const MachineInstr &MI,
2862                                                  int OpIdx) const {
2863   assert(OpIdx >= 0 && "expected to match an immediate operand");
2864   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
2865 }
2866 
2867 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
2868   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
2869 }
2870 
2871 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
2872   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
2873 }
2874 
2875 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
2876   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
2877 }
2878 
2879 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
2880   return TII.isInlineConstant(Imm);
2881 }
2882