1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPUInstrInfo.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPURegisterBankInfo.h"
18 #include "AMDGPUSubtarget.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
21 #include "SIMachineFunctionInfo.h"
22 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
24 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
27 #include "llvm/CodeGen/GlobalISel/Utils.h"
28 #include "llvm/CodeGen/MachineBasicBlock.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineInstr.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/IR/Type.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/raw_ostream.h"
36 
37 #define DEBUG_TYPE "amdgpu-isel"
38 
39 using namespace llvm;
40 using namespace MIPatternMatch;
41 
42 #define GET_GLOBALISEL_IMPL
43 #define AMDGPUSubtarget GCNSubtarget
44 #include "AMDGPUGenGlobalISel.inc"
45 #undef GET_GLOBALISEL_IMPL
46 #undef AMDGPUSubtarget
47 
48 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
49     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
50     const AMDGPUTargetMachine &TM)
51     : InstructionSelector(), TII(*STI.getInstrInfo()),
52       TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
53       STI(STI),
54       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
55 #define GET_GLOBALISEL_PREDICATES_INIT
56 #include "AMDGPUGenGlobalISel.inc"
57 #undef GET_GLOBALISEL_PREDICATES_INIT
58 #define GET_GLOBALISEL_TEMPORARIES_INIT
59 #include "AMDGPUGenGlobalISel.inc"
60 #undef GET_GLOBALISEL_TEMPORARIES_INIT
61 {
62 }
63 
64 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
65 
66 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits &KB,
67                                         CodeGenCoverage &CoverageInfo) {
68   MRI = &MF.getRegInfo();
69   InstructionSelector::setupMF(MF, KB, CoverageInfo);
70 }
71 
72 bool AMDGPUInstructionSelector::isVCC(Register Reg,
73                                       const MachineRegisterInfo &MRI) const {
74   if (Register::isPhysicalRegister(Reg))
75     return Reg == TRI.getVCC();
76 
77   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
78   const TargetRegisterClass *RC =
79       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
80   if (RC) {
81     const LLT Ty = MRI.getType(Reg);
82     return RC->hasSuperClassEq(TRI.getBoolRC()) &&
83            Ty.isValid() && Ty.getSizeInBits() == 1;
84   }
85 
86   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
87   return RB->getID() == AMDGPU::VCCRegBankID;
88 }
89 
90 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
91                                                         unsigned NewOpc) const {
92   MI.setDesc(TII.get(NewOpc));
93   MI.RemoveOperand(1); // Remove intrinsic ID.
94   MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
95 
96   MachineOperand &Dst = MI.getOperand(0);
97   MachineOperand &Src = MI.getOperand(1);
98 
99   // TODO: This should be legalized to s32 if needed
100   if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
101     return false;
102 
103   const TargetRegisterClass *DstRC
104     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
105   const TargetRegisterClass *SrcRC
106     = TRI.getConstrainedRegClassForOperand(Src, *MRI);
107   if (!DstRC || DstRC != SrcRC)
108     return false;
109 
110   return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
111          RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
112 }
113 
114 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
115   const DebugLoc &DL = I.getDebugLoc();
116   MachineBasicBlock *BB = I.getParent();
117   I.setDesc(TII.get(TargetOpcode::COPY));
118 
119   const MachineOperand &Src = I.getOperand(1);
120   MachineOperand &Dst = I.getOperand(0);
121   Register DstReg = Dst.getReg();
122   Register SrcReg = Src.getReg();
123 
124   if (isVCC(DstReg, *MRI)) {
125     if (SrcReg == AMDGPU::SCC) {
126       const TargetRegisterClass *RC
127         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
128       if (!RC)
129         return true;
130       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
131     }
132 
133     if (!isVCC(SrcReg, *MRI)) {
134       // TODO: Should probably leave the copy and let copyPhysReg expand it.
135       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
136         return false;
137 
138       const TargetRegisterClass *SrcRC
139         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
140 
141       Register MaskedReg = MRI->createVirtualRegister(SrcRC);
142 
143       // We can't trust the high bits at this point, so clear them.
144 
145       // TODO: Skip masking high bits if def is known boolean.
146 
147       unsigned AndOpc = TRI.isSGPRClass(SrcRC) ?
148         AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
149       BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
150         .addImm(1)
151         .addReg(SrcReg);
152       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
153         .addImm(0)
154         .addReg(MaskedReg);
155 
156       if (!MRI->getRegClassOrNull(SrcReg))
157         MRI->setRegClass(SrcReg, SrcRC);
158       I.eraseFromParent();
159       return true;
160     }
161 
162     const TargetRegisterClass *RC =
163       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
164     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
165       return false;
166 
167     // Don't constrain the source register to a class so the def instruction
168     // handles it (unless it's undef).
169     //
170     // FIXME: This is a hack. When selecting the def, we neeed to know
171     // specifically know that the result is VCCRegBank, and not just an SGPR
172     // with size 1. An SReg_32 with size 1 is ambiguous with wave32.
173     if (Src.isUndef()) {
174       const TargetRegisterClass *SrcRC =
175         TRI.getConstrainedRegClassForOperand(Src, *MRI);
176       if (SrcRC && !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
177         return false;
178     }
179 
180     return true;
181   }
182 
183   for (const MachineOperand &MO : I.operands()) {
184     if (Register::isPhysicalRegister(MO.getReg()))
185       continue;
186 
187     const TargetRegisterClass *RC =
188             TRI.getConstrainedRegClassForOperand(MO, *MRI);
189     if (!RC)
190       continue;
191     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
192   }
193   return true;
194 }
195 
196 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
197   const Register DefReg = I.getOperand(0).getReg();
198   const LLT DefTy = MRI->getType(DefReg);
199 
200   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
201 
202   const RegClassOrRegBank &RegClassOrBank =
203     MRI->getRegClassOrRegBank(DefReg);
204 
205   const TargetRegisterClass *DefRC
206     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
207   if (!DefRC) {
208     if (!DefTy.isValid()) {
209       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
210       return false;
211     }
212 
213     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
214     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB, *MRI);
215     if (!DefRC) {
216       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
217       return false;
218     }
219   }
220 
221   // TODO: Verify that all registers have the same bank
222   I.setDesc(TII.get(TargetOpcode::PHI));
223   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
224 }
225 
226 MachineOperand
227 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
228                                            const TargetRegisterClass &SubRC,
229                                            unsigned SubIdx) const {
230 
231   MachineInstr *MI = MO.getParent();
232   MachineBasicBlock *BB = MO.getParent()->getParent();
233   Register DstReg = MRI->createVirtualRegister(&SubRC);
234 
235   if (MO.isReg()) {
236     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
237     Register Reg = MO.getReg();
238     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
239             .addReg(Reg, 0, ComposedSubIdx);
240 
241     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
242                                      MO.isKill(), MO.isDead(), MO.isUndef(),
243                                      MO.isEarlyClobber(), 0, MO.isDebug(),
244                                      MO.isInternalRead());
245   }
246 
247   assert(MO.isImm());
248 
249   APInt Imm(64, MO.getImm());
250 
251   switch (SubIdx) {
252   default:
253     llvm_unreachable("do not know to split immediate with this sub index.");
254   case AMDGPU::sub0:
255     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
256   case AMDGPU::sub1:
257     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
258   }
259 }
260 
261 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
262   switch (Opc) {
263   case AMDGPU::G_AND:
264     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
265   case AMDGPU::G_OR:
266     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
267   case AMDGPU::G_XOR:
268     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
269   default:
270     llvm_unreachable("not a bit op");
271   }
272 }
273 
274 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
275   MachineOperand &Dst = I.getOperand(0);
276   MachineOperand &Src0 = I.getOperand(1);
277   MachineOperand &Src1 = I.getOperand(2);
278   Register DstReg = Dst.getReg();
279   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
280 
281   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
282   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
283     const TargetRegisterClass *RC = TRI.getBoolRC();
284     unsigned InstOpc = getLogicalBitOpcode(I.getOpcode(),
285                                            RC == &AMDGPU::SReg_64RegClass);
286     I.setDesc(TII.get(InstOpc));
287     // Dead implicit-def of scc
288     I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
289                                            true, // isImp
290                                            false, // isKill
291                                            true)); // isDead
292 
293     // FIXME: Hack to avoid turning the register bank into a register class.
294     // The selector for G_ICMP relies on seeing the register bank for the result
295     // is VCC. In wave32 if we constrain the registers to SReg_32 here, it will
296     // be ambiguous whether it's a scalar or vector bool.
297     if (Src0.isUndef() && !MRI->getRegClassOrNull(Src0.getReg()))
298       MRI->setRegClass(Src0.getReg(), RC);
299     if (Src1.isUndef() && !MRI->getRegClassOrNull(Src1.getReg()))
300       MRI->setRegClass(Src1.getReg(), RC);
301 
302     return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
303   }
304 
305   // TODO: Should this allow an SCC bank result, and produce a copy from SCC for
306   // the result?
307   if (DstRB->getID() == AMDGPU::SGPRRegBankID) {
308     unsigned InstOpc = getLogicalBitOpcode(I.getOpcode(), Size > 32);
309     I.setDesc(TII.get(InstOpc));
310     // Dead implicit-def of scc
311     I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
312                                            true, // isImp
313                                            false, // isKill
314                                            true)); // isDead
315     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
316   }
317 
318   return false;
319 }
320 
321 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
322   MachineBasicBlock *BB = I.getParent();
323   MachineFunction *MF = BB->getParent();
324   Register DstReg = I.getOperand(0).getReg();
325   const DebugLoc &DL = I.getDebugLoc();
326   LLT Ty = MRI->getType(DstReg);
327   if (Ty.isVector())
328     return false;
329 
330   unsigned Size = Ty.getSizeInBits();
331   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
332   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
333   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
334 
335   if (Size == 32) {
336     if (IsSALU) {
337       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
338       MachineInstr *Add =
339         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
340         .add(I.getOperand(1))
341         .add(I.getOperand(2));
342       I.eraseFromParent();
343       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
344     }
345 
346     if (STI.hasAddNoCarry()) {
347       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
348       I.setDesc(TII.get(Opc));
349       I.addOperand(*MF, MachineOperand::CreateImm(0));
350       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
351       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
352     }
353 
354     const unsigned Opc = Sub ? AMDGPU::V_SUB_I32_e64 : AMDGPU::V_ADD_I32_e64;
355 
356     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
357     MachineInstr *Add
358       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
359       .addDef(UnusedCarry, RegState::Dead)
360       .add(I.getOperand(1))
361       .add(I.getOperand(2))
362       .addImm(0);
363     I.eraseFromParent();
364     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
365   }
366 
367   assert(!Sub && "illegal sub should not reach here");
368 
369   const TargetRegisterClass &RC
370     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
371   const TargetRegisterClass &HalfRC
372     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
373 
374   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
375   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
376   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
377   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
378 
379   Register DstLo = MRI->createVirtualRegister(&HalfRC);
380   Register DstHi = MRI->createVirtualRegister(&HalfRC);
381 
382   if (IsSALU) {
383     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
384       .add(Lo1)
385       .add(Lo2);
386     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
387       .add(Hi1)
388       .add(Hi2);
389   } else {
390     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
391     Register CarryReg = MRI->createVirtualRegister(CarryRC);
392     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_I32_e64), DstLo)
393       .addDef(CarryReg)
394       .add(Lo1)
395       .add(Lo2)
396       .addImm(0);
397     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
398       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
399       .add(Hi1)
400       .add(Hi2)
401       .addReg(CarryReg, RegState::Kill)
402       .addImm(0);
403 
404     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
405       return false;
406   }
407 
408   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
409     .addReg(DstLo)
410     .addImm(AMDGPU::sub0)
411     .addReg(DstHi)
412     .addImm(AMDGPU::sub1);
413 
414 
415   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
416     return false;
417 
418   I.eraseFromParent();
419   return true;
420 }
421 
422 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
423   MachineInstr &I) const {
424   MachineBasicBlock *BB = I.getParent();
425   MachineFunction *MF = BB->getParent();
426   const DebugLoc &DL = I.getDebugLoc();
427   Register Dst0Reg = I.getOperand(0).getReg();
428   Register Dst1Reg = I.getOperand(1).getReg();
429   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
430                      I.getOpcode() == AMDGPU::G_UADDE;
431   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
432                           I.getOpcode() == AMDGPU::G_USUBE;
433 
434   if (isVCC(Dst1Reg, *MRI)) {
435       // The name of the opcodes are misleading. v_add_i32/v_sub_i32 have unsigned
436       // carry out despite the _i32 name. These were renamed in VI to _U32.
437       // FIXME: We should probably rename the opcodes here.
438     unsigned NoCarryOpc = IsAdd ? AMDGPU::V_ADD_I32_e64 : AMDGPU::V_SUB_I32_e64;
439     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
440     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
441     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
442     I.addOperand(*MF, MachineOperand::CreateImm(0));
443     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
444   }
445 
446   Register Src0Reg = I.getOperand(2).getReg();
447   Register Src1Reg = I.getOperand(3).getReg();
448 
449   if (HasCarryIn) {
450     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
451       .addReg(I.getOperand(4).getReg());
452   }
453 
454   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
455   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
456 
457   BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
458     .add(I.getOperand(2))
459     .add(I.getOperand(3));
460   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
461     .addReg(AMDGPU::SCC);
462 
463   if (!MRI->getRegClassOrNull(Dst1Reg))
464     MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
465 
466   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
467       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
468       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
469     return false;
470 
471   if (HasCarryIn &&
472       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
473                                     AMDGPU::SReg_32RegClass, *MRI))
474     return false;
475 
476   I.eraseFromParent();
477   return true;
478 }
479 
480 // TODO: We should probably legalize these to only using 32-bit results.
481 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
482   MachineBasicBlock *BB = I.getParent();
483   Register DstReg = I.getOperand(0).getReg();
484   Register SrcReg = I.getOperand(1).getReg();
485   LLT DstTy = MRI->getType(DstReg);
486   LLT SrcTy = MRI->getType(SrcReg);
487   const unsigned SrcSize = SrcTy.getSizeInBits();
488   const unsigned DstSize = DstTy.getSizeInBits();
489 
490   // TODO: Should handle any multiple of 32 offset.
491   unsigned Offset = I.getOperand(2).getImm();
492   if (Offset % 32 != 0 || DstSize > 128)
493     return false;
494 
495   const TargetRegisterClass *DstRC =
496     TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
497   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
498     return false;
499 
500   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
501   const TargetRegisterClass *SrcRC =
502     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
503   if (!SrcRC)
504     return false;
505   unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
506                                                          DstSize / 32);
507   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
508   if (!SrcRC)
509     return false;
510 
511   SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
512                                     *SrcRC, I.getOperand(1));
513   const DebugLoc &DL = I.getDebugLoc();
514   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
515     .addReg(SrcReg, 0, SubReg);
516 
517   I.eraseFromParent();
518   return true;
519 }
520 
521 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
522   MachineBasicBlock *BB = MI.getParent();
523   Register DstReg = MI.getOperand(0).getReg();
524   LLT DstTy = MRI->getType(DstReg);
525   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
526 
527   const unsigned SrcSize = SrcTy.getSizeInBits();
528   if (SrcSize < 32)
529     return selectImpl(MI, *CoverageInfo);
530 
531   const DebugLoc &DL = MI.getDebugLoc();
532   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
533   const unsigned DstSize = DstTy.getSizeInBits();
534   const TargetRegisterClass *DstRC =
535     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
536   if (!DstRC)
537     return false;
538 
539   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
540   MachineInstrBuilder MIB =
541     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
542   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
543     MachineOperand &Src = MI.getOperand(I + 1);
544     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
545     MIB.addImm(SubRegs[I]);
546 
547     const TargetRegisterClass *SrcRC
548       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
549     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
550       return false;
551   }
552 
553   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
554     return false;
555 
556   MI.eraseFromParent();
557   return true;
558 }
559 
560 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
561   MachineBasicBlock *BB = MI.getParent();
562   const int NumDst = MI.getNumOperands() - 1;
563 
564   MachineOperand &Src = MI.getOperand(NumDst);
565 
566   Register SrcReg = Src.getReg();
567   Register DstReg0 = MI.getOperand(0).getReg();
568   LLT DstTy = MRI->getType(DstReg0);
569   LLT SrcTy = MRI->getType(SrcReg);
570 
571   const unsigned DstSize = DstTy.getSizeInBits();
572   const unsigned SrcSize = SrcTy.getSizeInBits();
573   const DebugLoc &DL = MI.getDebugLoc();
574   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
575 
576   const TargetRegisterClass *SrcRC =
577     TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank, *MRI);
578   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
579     return false;
580 
581   const unsigned SrcFlags = getUndefRegState(Src.isUndef());
582 
583   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
584   // source, and this relies on the fact that the same subregister indices are
585   // used for both.
586   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
587   for (int I = 0, E = NumDst; I != E; ++I) {
588     MachineOperand &Dst = MI.getOperand(I);
589     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
590       .addReg(SrcReg, SrcFlags, SubRegs[I]);
591 
592     const TargetRegisterClass *DstRC =
593       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
594     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
595       return false;
596   }
597 
598   MI.eraseFromParent();
599   return true;
600 }
601 
602 static bool isZero(Register Reg, const MachineRegisterInfo &MRI) {
603   int64_t Val;
604   return mi_match(Reg, MRI, m_ICst(Val)) && Val == 0;
605 }
606 
607 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
608   MachineInstr &MI) const {
609   if (selectImpl(MI, *CoverageInfo))
610     return true;
611 
612   const LLT S32 = LLT::scalar(32);
613   const LLT V2S16 = LLT::vector(2, 16);
614 
615   Register Dst = MI.getOperand(0).getReg();
616   if (MRI->getType(Dst) != V2S16)
617     return false;
618 
619   const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
620   if (DstBank->getID() != AMDGPU::SGPRRegBankID)
621     return false;
622 
623   Register Src0 = MI.getOperand(1).getReg();
624   Register Src1 = MI.getOperand(2).getReg();
625   if (MRI->getType(Src0) != S32)
626     return false;
627 
628   const DebugLoc &DL = MI.getDebugLoc();
629   MachineBasicBlock *BB = MI.getParent();
630 
631   // TODO: This should probably be a combine somewhere
632   // (build_vector_trunc $src0, undef -> copy $src0
633   MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
634   if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
635     MI.setDesc(TII.get(AMDGPU::COPY));
636     MI.RemoveOperand(2);
637     return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) &&
638            RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI);
639   }
640 
641   Register ShiftSrc0;
642   Register ShiftSrc1;
643   int64_t ShiftAmt;
644 
645   // With multiple uses of the shift, this will duplicate the shift and
646   // increase register pressure.
647   //
648   // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
649   //  => (S_PACK_HH_B32_B16 $src0, $src1)
650   // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16))
651   //  => (S_PACK_LH_B32_B16 $src0, $src1)
652   // (build_vector_trunc $src0, $src1)
653   //  => (S_PACK_LL_B32_B16 $src0, $src1)
654 
655   // FIXME: This is an inconvenient way to check a specific value
656   bool Shift0 = mi_match(
657     Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_ICst(ShiftAmt)))) &&
658     ShiftAmt == 16;
659 
660   bool Shift1 = mi_match(
661     Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_ICst(ShiftAmt)))) &&
662     ShiftAmt == 16;
663 
664   unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
665   if (Shift0 && Shift1) {
666     Opc = AMDGPU::S_PACK_HH_B32_B16;
667     MI.getOperand(1).setReg(ShiftSrc0);
668     MI.getOperand(2).setReg(ShiftSrc1);
669   } else if (Shift1) {
670     Opc = AMDGPU::S_PACK_LH_B32_B16;
671     MI.getOperand(2).setReg(ShiftSrc1);
672   } else if (Shift0 && isZero(Src1, *MRI)) {
673     // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
674     auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
675       .addReg(ShiftSrc0)
676       .addImm(16);
677 
678     MI.eraseFromParent();
679     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
680   }
681 
682   MI.setDesc(TII.get(Opc));
683   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
684 }
685 
686 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
687   return selectG_ADD_SUB(I);
688 }
689 
690 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
691   const MachineOperand &MO = I.getOperand(0);
692 
693   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
694   // regbank check here is to know why getConstrainedRegClassForOperand failed.
695   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
696   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
697       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
698     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
699     return true;
700   }
701 
702   return false;
703 }
704 
705 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
706   MachineBasicBlock *BB = I.getParent();
707 
708   Register DstReg = I.getOperand(0).getReg();
709   Register Src0Reg = I.getOperand(1).getReg();
710   Register Src1Reg = I.getOperand(2).getReg();
711   LLT Src1Ty = MRI->getType(Src1Reg);
712 
713   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
714   unsigned InsSize = Src1Ty.getSizeInBits();
715 
716   int64_t Offset = I.getOperand(3).getImm();
717   if (Offset % 32 != 0)
718     return false;
719 
720   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
721   if (SubReg == AMDGPU::NoSubRegister)
722     return false;
723 
724   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
725   const TargetRegisterClass *DstRC =
726     TRI.getRegClassForSizeOnBank(DstSize, *DstBank, *MRI);
727   if (!DstRC)
728     return false;
729 
730   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
731   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
732   const TargetRegisterClass *Src0RC =
733     TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank, *MRI);
734   const TargetRegisterClass *Src1RC =
735     TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank, *MRI);
736 
737   // Deal with weird cases where the class only partially supports the subreg
738   // index.
739   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
740   if (!Src0RC || !Src1RC)
741     return false;
742 
743   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
744       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
745       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
746     return false;
747 
748   const DebugLoc &DL = I.getDebugLoc();
749   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
750     .addReg(Src0Reg)
751     .addReg(Src1Reg)
752     .addImm(SubReg);
753 
754   I.eraseFromParent();
755   return true;
756 }
757 
758 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
759   if (STI.getLDSBankCount() != 16)
760     return selectImpl(MI, *CoverageInfo);
761 
762   Register Dst = MI.getOperand(0).getReg();
763   Register Src0 = MI.getOperand(2).getReg();
764   Register M0Val = MI.getOperand(6).getReg();
765   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
766       !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
767       !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
768     return false;
769 
770   // This requires 2 instructions. It is possible to write a pattern to support
771   // this, but the generated isel emitter doesn't correctly deal with multiple
772   // output instructions using the same physical register input. The copy to m0
773   // is incorrectly placed before the second instruction.
774   //
775   // TODO: Match source modifiers.
776 
777   Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
778   const DebugLoc &DL = MI.getDebugLoc();
779   MachineBasicBlock *MBB = MI.getParent();
780 
781   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
782     .addReg(M0Val);
783   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
784     .addImm(2)
785     .addImm(MI.getOperand(4).getImm())  // $attr
786     .addImm(MI.getOperand(3).getImm()); // $attrchan
787 
788   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
789     .addImm(0)                          // $src0_modifiers
790     .addReg(Src0)                       // $src0
791     .addImm(MI.getOperand(4).getImm())  // $attr
792     .addImm(MI.getOperand(3).getImm())  // $attrchan
793     .addImm(0)                          // $src2_modifiers
794     .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
795     .addImm(MI.getOperand(5).getImm())  // $high
796     .addImm(0)                          // $clamp
797     .addImm(0);                         // $omod
798 
799   MI.eraseFromParent();
800   return true;
801 }
802 
803 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
804   unsigned IntrinsicID = I.getIntrinsicID();
805   switch (IntrinsicID) {
806   case Intrinsic::amdgcn_if_break: {
807     MachineBasicBlock *BB = I.getParent();
808 
809     // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
810     // SelectionDAG uses for wave32 vs wave64.
811     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
812       .add(I.getOperand(0))
813       .add(I.getOperand(2))
814       .add(I.getOperand(3));
815 
816     Register DstReg = I.getOperand(0).getReg();
817     Register Src0Reg = I.getOperand(2).getReg();
818     Register Src1Reg = I.getOperand(3).getReg();
819 
820     I.eraseFromParent();
821 
822     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
823       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
824 
825     return true;
826   }
827   case Intrinsic::amdgcn_interp_p1_f16:
828     return selectInterpP1F16(I);
829   case Intrinsic::amdgcn_wqm:
830     return constrainCopyLikeIntrin(I, AMDGPU::WQM);
831   case Intrinsic::amdgcn_softwqm:
832     return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
833   case Intrinsic::amdgcn_wwm:
834     return constrainCopyLikeIntrin(I, AMDGPU::WWM);
835   default:
836     return selectImpl(I, *CoverageInfo);
837   }
838 }
839 
840 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
841   if (Size != 32 && Size != 64)
842     return -1;
843   switch (P) {
844   default:
845     llvm_unreachable("Unknown condition code!");
846   case CmpInst::ICMP_NE:
847     return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
848   case CmpInst::ICMP_EQ:
849     return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
850   case CmpInst::ICMP_SGT:
851     return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
852   case CmpInst::ICMP_SGE:
853     return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
854   case CmpInst::ICMP_SLT:
855     return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
856   case CmpInst::ICMP_SLE:
857     return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
858   case CmpInst::ICMP_UGT:
859     return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
860   case CmpInst::ICMP_UGE:
861     return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
862   case CmpInst::ICMP_ULT:
863     return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
864   case CmpInst::ICMP_ULE:
865     return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
866   }
867 }
868 
869 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
870                                               unsigned Size) const {
871   if (Size == 64) {
872     if (!STI.hasScalarCompareEq64())
873       return -1;
874 
875     switch (P) {
876     case CmpInst::ICMP_NE:
877       return AMDGPU::S_CMP_LG_U64;
878     case CmpInst::ICMP_EQ:
879       return AMDGPU::S_CMP_EQ_U64;
880     default:
881       return -1;
882     }
883   }
884 
885   if (Size != 32)
886     return -1;
887 
888   switch (P) {
889   case CmpInst::ICMP_NE:
890     return AMDGPU::S_CMP_LG_U32;
891   case CmpInst::ICMP_EQ:
892     return AMDGPU::S_CMP_EQ_U32;
893   case CmpInst::ICMP_SGT:
894     return AMDGPU::S_CMP_GT_I32;
895   case CmpInst::ICMP_SGE:
896     return AMDGPU::S_CMP_GE_I32;
897   case CmpInst::ICMP_SLT:
898     return AMDGPU::S_CMP_LT_I32;
899   case CmpInst::ICMP_SLE:
900     return AMDGPU::S_CMP_LE_I32;
901   case CmpInst::ICMP_UGT:
902     return AMDGPU::S_CMP_GT_U32;
903   case CmpInst::ICMP_UGE:
904     return AMDGPU::S_CMP_GE_U32;
905   case CmpInst::ICMP_ULT:
906     return AMDGPU::S_CMP_LT_U32;
907   case CmpInst::ICMP_ULE:
908     return AMDGPU::S_CMP_LE_U32;
909   default:
910     llvm_unreachable("Unknown condition code!");
911   }
912 }
913 
914 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
915   MachineBasicBlock *BB = I.getParent();
916   const DebugLoc &DL = I.getDebugLoc();
917 
918   Register SrcReg = I.getOperand(2).getReg();
919   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
920 
921   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
922 
923   Register CCReg = I.getOperand(0).getReg();
924   if (!isVCC(CCReg, *MRI)) {
925     int Opcode = getS_CMPOpcode(Pred, Size);
926     if (Opcode == -1)
927       return false;
928     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
929             .add(I.getOperand(2))
930             .add(I.getOperand(3));
931     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
932       .addReg(AMDGPU::SCC);
933     bool Ret =
934         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
935         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
936     I.eraseFromParent();
937     return Ret;
938   }
939 
940   int Opcode = getV_CMPOpcode(Pred, Size);
941   if (Opcode == -1)
942     return false;
943 
944   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
945             I.getOperand(0).getReg())
946             .add(I.getOperand(2))
947             .add(I.getOperand(3));
948   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
949                                *TRI.getBoolRC(), *MRI);
950   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
951   I.eraseFromParent();
952   return Ret;
953 }
954 
955 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
956   // FIXME: Manually selecting to avoid dealiing with the SReg_1 trick
957   // SelectionDAG uses for wave32 vs wave64.
958   MachineBasicBlock *BB = MI.getParent();
959   BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
960       .add(MI.getOperand(1));
961 
962   Register Reg = MI.getOperand(1).getReg();
963   MI.eraseFromParent();
964 
965   if (!MRI->getRegClassOrNull(Reg))
966     MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
967   return true;
968 }
969 
970 static unsigned getDSShaderTypeValue(const MachineFunction &MF) {
971   switch (MF.getFunction().getCallingConv()) {
972   case CallingConv::AMDGPU_PS:
973     return 1;
974   case CallingConv::AMDGPU_VS:
975     return 2;
976   case CallingConv::AMDGPU_GS:
977     return 3;
978   case CallingConv::AMDGPU_HS:
979   case CallingConv::AMDGPU_LS:
980   case CallingConv::AMDGPU_ES:
981     report_fatal_error("ds_ordered_count unsupported for this calling conv");
982   case CallingConv::AMDGPU_CS:
983   case CallingConv::AMDGPU_KERNEL:
984   case CallingConv::C:
985   case CallingConv::Fast:
986   default:
987     // Assume other calling conventions are various compute callable functions
988     return 0;
989   }
990 }
991 
992 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
993   MachineInstr &MI, Intrinsic::ID IntrID) const {
994   MachineBasicBlock *MBB = MI.getParent();
995   MachineFunction *MF = MBB->getParent();
996   const DebugLoc &DL = MI.getDebugLoc();
997 
998   unsigned IndexOperand = MI.getOperand(7).getImm();
999   bool WaveRelease = MI.getOperand(8).getImm() != 0;
1000   bool WaveDone = MI.getOperand(9).getImm() != 0;
1001 
1002   if (WaveDone && !WaveRelease)
1003     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1004 
1005   unsigned OrderedCountIndex = IndexOperand & 0x3f;
1006   IndexOperand &= ~0x3f;
1007   unsigned CountDw = 0;
1008 
1009   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1010     CountDw = (IndexOperand >> 24) & 0xf;
1011     IndexOperand &= ~(0xf << 24);
1012 
1013     if (CountDw < 1 || CountDw > 4) {
1014       report_fatal_error(
1015         "ds_ordered_count: dword count must be between 1 and 4");
1016     }
1017   }
1018 
1019   if (IndexOperand)
1020     report_fatal_error("ds_ordered_count: bad index operand");
1021 
1022   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1023   unsigned ShaderType = getDSShaderTypeValue(*MF);
1024 
1025   unsigned Offset0 = OrderedCountIndex << 2;
1026   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
1027                      (Instruction << 4);
1028 
1029   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1030     Offset1 |= (CountDw - 1) << 6;
1031 
1032   unsigned Offset = Offset0 | (Offset1 << 8);
1033 
1034   Register M0Val = MI.getOperand(2).getReg();
1035   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1036     .addReg(M0Val);
1037 
1038   Register DstReg = MI.getOperand(0).getReg();
1039   Register ValReg = MI.getOperand(3).getReg();
1040   MachineInstrBuilder DS =
1041     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1042       .addReg(ValReg)
1043       .addImm(Offset)
1044       .cloneMemRefs(MI);
1045 
1046   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1047     return false;
1048 
1049   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1050   MI.eraseFromParent();
1051   return Ret;
1052 }
1053 
1054 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1055   switch (IntrID) {
1056   case Intrinsic::amdgcn_ds_gws_init:
1057     return AMDGPU::DS_GWS_INIT;
1058   case Intrinsic::amdgcn_ds_gws_barrier:
1059     return AMDGPU::DS_GWS_BARRIER;
1060   case Intrinsic::amdgcn_ds_gws_sema_v:
1061     return AMDGPU::DS_GWS_SEMA_V;
1062   case Intrinsic::amdgcn_ds_gws_sema_br:
1063     return AMDGPU::DS_GWS_SEMA_BR;
1064   case Intrinsic::amdgcn_ds_gws_sema_p:
1065     return AMDGPU::DS_GWS_SEMA_P;
1066   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1067     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1068   default:
1069     llvm_unreachable("not a gws intrinsic");
1070   }
1071 }
1072 
1073 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1074                                                      Intrinsic::ID IID) const {
1075   if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1076       !STI.hasGWSSemaReleaseAll())
1077     return false;
1078 
1079   // intrinsic ID, vsrc, offset
1080   const bool HasVSrc = MI.getNumOperands() == 3;
1081   assert(HasVSrc || MI.getNumOperands() == 2);
1082 
1083   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1084   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1085   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1086     return false;
1087 
1088   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1089   assert(OffsetDef);
1090 
1091   unsigned ImmOffset;
1092 
1093   MachineBasicBlock *MBB = MI.getParent();
1094   const DebugLoc &DL = MI.getDebugLoc();
1095 
1096   MachineInstr *Readfirstlane = nullptr;
1097 
1098   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1099   // incoming offset, in case there's an add of a constant. We'll have to put it
1100   // back later.
1101   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1102     Readfirstlane = OffsetDef;
1103     BaseOffset = OffsetDef->getOperand(1).getReg();
1104     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1105   }
1106 
1107   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1108     // If we have a constant offset, try to use the 0 in m0 as the base.
1109     // TODO: Look into changing the default m0 initialization value. If the
1110     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1111     // the immediate offset.
1112 
1113     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1114     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1115       .addImm(0);
1116   } else {
1117     std::tie(BaseOffset, ImmOffset, OffsetDef)
1118       = AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1119 
1120     if (Readfirstlane) {
1121       // We have the constant offset now, so put the readfirstlane back on the
1122       // variable component.
1123       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1124         return false;
1125 
1126       Readfirstlane->getOperand(1).setReg(BaseOffset);
1127       BaseOffset = Readfirstlane->getOperand(0).getReg();
1128     } else {
1129       if (!RBI.constrainGenericRegister(BaseOffset,
1130                                         AMDGPU::SReg_32RegClass, *MRI))
1131         return false;
1132     }
1133 
1134     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1135     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1136       .addReg(BaseOffset)
1137       .addImm(16);
1138 
1139     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1140       .addReg(M0Base);
1141   }
1142 
1143   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1144   // offset field) % 64. Some versions of the programming guide omit the m0
1145   // part, or claim it's from offset 0.
1146   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1147 
1148   if (HasVSrc) {
1149     Register VSrc = MI.getOperand(1).getReg();
1150     MIB.addReg(VSrc);
1151     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1152       return false;
1153   }
1154 
1155   MIB.addImm(ImmOffset)
1156      .addImm(-1) // $gds
1157      .cloneMemRefs(MI);
1158 
1159   MI.eraseFromParent();
1160   return true;
1161 }
1162 
1163 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1164                                                       bool IsAppend) const {
1165   Register PtrBase = MI.getOperand(2).getReg();
1166   LLT PtrTy = MRI->getType(PtrBase);
1167   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1168 
1169   unsigned Offset;
1170   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1171 
1172   // TODO: Should this try to look through readfirstlane like GWS?
1173   if (!isDSOffsetLegal(PtrBase, Offset, 16)) {
1174     PtrBase = MI.getOperand(2).getReg();
1175     Offset = 0;
1176   }
1177 
1178   MachineBasicBlock *MBB = MI.getParent();
1179   const DebugLoc &DL = MI.getDebugLoc();
1180   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1181 
1182   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1183     .addReg(PtrBase);
1184   BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1185     .addImm(Offset)
1186     .addImm(IsGDS ? -1 : 0)
1187     .cloneMemRefs(MI);
1188   MI.eraseFromParent();
1189   return true;
1190 }
1191 
1192 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1193                          bool &IsTexFail) {
1194   if (TexFailCtrl)
1195     IsTexFail = true;
1196 
1197   TFE = (TexFailCtrl & 0x1) ? 1 : 0;
1198   TexFailCtrl &= ~(uint64_t)0x1;
1199   LWE = (TexFailCtrl & 0x2) ? 1 : 0;
1200   TexFailCtrl &= ~(uint64_t)0x2;
1201 
1202   return TexFailCtrl == 0;
1203 }
1204 
1205 static bool parseCachePolicy(uint64_t Value,
1206                              bool *GLC, bool *SLC, bool *DLC) {
1207   if (GLC) {
1208     *GLC = (Value & 0x1) ? 1 : 0;
1209     Value &= ~(uint64_t)0x1;
1210   }
1211   if (SLC) {
1212     *SLC = (Value & 0x2) ? 1 : 0;
1213     Value &= ~(uint64_t)0x2;
1214   }
1215   if (DLC) {
1216     *DLC = (Value & 0x4) ? 1 : 0;
1217     Value &= ~(uint64_t)0x4;
1218   }
1219 
1220   return Value == 0;
1221 }
1222 
1223 bool AMDGPUInstructionSelector::selectImageIntrinsic(
1224   MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1225   MachineBasicBlock *MBB = MI.getParent();
1226   const DebugLoc &DL = MI.getDebugLoc();
1227 
1228   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1229     AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1230 
1231   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1232   const AMDGPU::MIMGLZMappingInfo *LZMappingInfo =
1233       AMDGPU::getMIMGLZMappingInfo(Intr->BaseOpcode);
1234   const AMDGPU::MIMGMIPMappingInfo *MIPMappingInfo =
1235       AMDGPU::getMIMGMIPMappingInfo(Intr->BaseOpcode);
1236   unsigned IntrOpcode = Intr->BaseOpcode;
1237   const bool IsGFX10 = STI.getGeneration() >= AMDGPUSubtarget::GFX10;
1238 
1239   const LLT S16 = LLT::scalar(16);
1240   const int VAddrIdx = getImageVAddrIdxBegin(BaseOpcode,
1241                                              MI.getNumExplicitDefs());
1242   int NumVAddr, NumGradients;
1243   std::tie(NumVAddr, NumGradients) = getImageNumVAddr(Intr, BaseOpcode);
1244 
1245   const LLT AddrTy = MRI->getType(MI.getOperand(VAddrIdx).getReg());
1246   const bool IsA16 = AddrTy.getScalarType() == S16;
1247 
1248   Register VDataIn, VDataOut;
1249   LLT VDataTy;
1250   int NumVDataDwords = -1;
1251   bool IsD16 = false;
1252 
1253   // XXX - Can we just get the second to last argument for ctrl?
1254   unsigned CtrlIdx; // Index of texfailctrl argument
1255   bool Unorm;
1256   if (!BaseOpcode->Sampler) {
1257     Unorm = true;
1258     CtrlIdx = VAddrIdx + NumVAddr + 1;
1259   } else {
1260     Unorm = MI.getOperand(VAddrIdx + NumVAddr + 2).getImm() != 0;
1261     CtrlIdx = VAddrIdx + NumVAddr + 3;
1262   }
1263 
1264   bool TFE;
1265   bool LWE;
1266   bool IsTexFail = false;
1267   if (!parseTexFail(MI.getOperand(CtrlIdx).getImm(), TFE, LWE, IsTexFail))
1268     return false;
1269 
1270   unsigned DMask = 0;
1271   unsigned DMaskLanes = 0;
1272 
1273   if (BaseOpcode->Atomic) {
1274     VDataOut = MI.getOperand(0).getReg();
1275     VDataIn = MI.getOperand(2).getReg();
1276     LLT Ty = MRI->getType(VDataIn);
1277 
1278     // Be careful to allow atomic swap on 16-bit element vectors.
1279     const bool Is64Bit = BaseOpcode->AtomicX2 ?
1280       Ty.getSizeInBits() == 128 :
1281       Ty.getSizeInBits() == 64;
1282 
1283     if (BaseOpcode->AtomicX2) {
1284       assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
1285 
1286       DMask = Is64Bit ? 0xf : 0x3;
1287       NumVDataDwords = Is64Bit ? 4 : 2;
1288     } else {
1289       DMask = Is64Bit ? 0x3 : 0x1;
1290       NumVDataDwords = Is64Bit ? 2 : 1;
1291     }
1292   } else {
1293     const int DMaskIdx = 2; // Input/output + intrinsic ID.
1294 
1295     DMask = MI.getOperand(DMaskIdx).getImm();
1296     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
1297 
1298     if (BaseOpcode->Store) {
1299       VDataIn = MI.getOperand(1).getReg();
1300       VDataTy = MRI->getType(VDataIn);
1301       NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1302     } else {
1303       VDataOut = MI.getOperand(0).getReg();
1304       VDataTy = MRI->getType(VDataOut);
1305       NumVDataDwords = DMaskLanes;
1306 
1307       // One memoperand is mandatory, except for getresinfo.
1308       // FIXME: Check this in verifier.
1309       if (!MI.memoperands_empty()) {
1310         const MachineMemOperand *MMO = *MI.memoperands_begin();
1311 
1312         // Infer d16 from the memory size, as the register type will be mangled by
1313         // unpacked subtargets, or by TFE.
1314         IsD16 = ((8 * MMO->getSize()) / DMaskLanes) < 32;
1315 
1316         if (IsD16 && !STI.hasUnpackedD16VMem())
1317           NumVDataDwords = (DMaskLanes + 1) / 2;
1318       }
1319     }
1320   }
1321 
1322   // Optimize _L to _LZ when _L is zero
1323   if (LZMappingInfo) {
1324     // The legalizer replaced the register with an immediate 0 if we need to
1325     // change the opcode.
1326     const MachineOperand &Lod = MI.getOperand(VAddrIdx + NumVAddr - 1);
1327     if (Lod.isImm()) {
1328       assert(Lod.getImm() == 0);
1329       IntrOpcode = LZMappingInfo->LZ;  // set new opcode to _lz variant of _l
1330     }
1331   }
1332 
1333   // Optimize _mip away, when 'lod' is zero
1334   if (MIPMappingInfo) {
1335     const MachineOperand &Lod = MI.getOperand(VAddrIdx + NumVAddr - 1);
1336     if (Lod.isImm()) {
1337       assert(Lod.getImm() == 0);
1338       IntrOpcode = MIPMappingInfo->NONMIP;  // set new opcode to variant without _mip
1339     }
1340   }
1341 
1342   // TODO: Check this in verifier.
1343   assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
1344 
1345   bool GLC = false;
1346   bool SLC = false;
1347   bool DLC = false;
1348   if (BaseOpcode->Atomic) {
1349     GLC = true; // TODO no-return optimization
1350     if (!parseCachePolicy(MI.getOperand(CtrlIdx + 1).getImm(), nullptr, &SLC,
1351                           IsGFX10 ? &DLC : nullptr))
1352       return false;
1353   } else {
1354     if (!parseCachePolicy(MI.getOperand(CtrlIdx + 1).getImm(), &GLC, &SLC,
1355                           IsGFX10 ? &DLC : nullptr))
1356       return false;
1357   }
1358 
1359   int NumVAddrRegs = 0;
1360   int NumVAddrDwords = 0;
1361   for (int I = 0; I < NumVAddr; ++I) {
1362     // Skip the $noregs and 0s inserted during legalization.
1363     MachineOperand &AddrOp = MI.getOperand(VAddrIdx + I);
1364     if (!AddrOp.isReg())
1365       continue; // XXX - Break?
1366 
1367     Register Addr = AddrOp.getReg();
1368     if (!Addr)
1369       break;
1370 
1371     ++NumVAddrRegs;
1372     NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1373   }
1374 
1375   // The legalizer preprocessed the intrinsic arguments. If we aren't using
1376   // NSA, these should have beeen packed into a single value in the first
1377   // address register
1378   const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
1379   if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1380     LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
1381     return false;
1382   }
1383 
1384   if (IsTexFail)
1385     ++NumVDataDwords;
1386 
1387   int Opcode = -1;
1388   if (IsGFX10) {
1389     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1390                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
1391                                           : AMDGPU::MIMGEncGfx10Default,
1392                                    NumVDataDwords, NumVAddrDwords);
1393   } else {
1394     if (STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1395       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1396                                      NumVDataDwords, NumVAddrDwords);
1397     if (Opcode == -1)
1398       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1399                                      NumVDataDwords, NumVAddrDwords);
1400   }
1401   assert(Opcode != -1);
1402 
1403   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1404     .cloneMemRefs(MI);
1405 
1406   if (VDataOut) {
1407     if (BaseOpcode->AtomicX2) {
1408       const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1409 
1410       Register TmpReg = MRI->createVirtualRegister(
1411         Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1412       unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1413 
1414       MIB.addDef(TmpReg);
1415       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1416         .addReg(TmpReg, RegState::Kill, SubReg);
1417 
1418     } else {
1419       MIB.addDef(VDataOut); // vdata output
1420     }
1421   }
1422 
1423   if (VDataIn)
1424     MIB.addReg(VDataIn); // vdata input
1425 
1426   for (int i = 0; i != NumVAddrRegs; ++i) {
1427     MachineOperand &SrcOp = MI.getOperand(VAddrIdx + i);
1428     if (SrcOp.isReg()) {
1429       assert(SrcOp.getReg() != 0);
1430       MIB.addReg(SrcOp.getReg());
1431     }
1432   }
1433 
1434   MIB.addReg(MI.getOperand(VAddrIdx + NumVAddr).getReg()); // rsrc
1435   if (BaseOpcode->Sampler)
1436     MIB.addReg(MI.getOperand(VAddrIdx + NumVAddr + 1).getReg()); // sampler
1437 
1438   MIB.addImm(DMask); // dmask
1439 
1440   if (IsGFX10)
1441     MIB.addImm(DimInfo->Encoding);
1442   MIB.addImm(Unorm);
1443   if (IsGFX10)
1444     MIB.addImm(DLC);
1445 
1446   MIB.addImm(GLC);
1447   MIB.addImm(SLC);
1448   MIB.addImm(IsA16 &&  // a16 or r128
1449              STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1450   if (IsGFX10)
1451     MIB.addImm(IsA16 ? -1 : 0);
1452 
1453   MIB.addImm(TFE); // tfe
1454   MIB.addImm(LWE); // lwe
1455   if (!IsGFX10)
1456     MIB.addImm(DimInfo->DA ? -1 : 0);
1457   if (BaseOpcode->HasD16)
1458     MIB.addImm(IsD16 ? -1 : 0);
1459 
1460   MI.eraseFromParent();
1461   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1462 }
1463 
1464 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1465     MachineInstr &I) const {
1466   unsigned IntrinsicID = I.getIntrinsicID();
1467   switch (IntrinsicID) {
1468   case Intrinsic::amdgcn_end_cf:
1469     return selectEndCfIntrinsic(I);
1470   case Intrinsic::amdgcn_ds_ordered_add:
1471   case Intrinsic::amdgcn_ds_ordered_swap:
1472     return selectDSOrderedIntrinsic(I, IntrinsicID);
1473   case Intrinsic::amdgcn_ds_gws_init:
1474   case Intrinsic::amdgcn_ds_gws_barrier:
1475   case Intrinsic::amdgcn_ds_gws_sema_v:
1476   case Intrinsic::amdgcn_ds_gws_sema_br:
1477   case Intrinsic::amdgcn_ds_gws_sema_p:
1478   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1479     return selectDSGWSIntrinsic(I, IntrinsicID);
1480   case Intrinsic::amdgcn_ds_append:
1481     return selectDSAppendConsume(I, true);
1482   case Intrinsic::amdgcn_ds_consume:
1483     return selectDSAppendConsume(I, false);
1484   default: {
1485     return selectImpl(I, *CoverageInfo);
1486   }
1487   }
1488 }
1489 
1490 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1491   if (selectImpl(I, *CoverageInfo))
1492     return true;
1493 
1494   MachineBasicBlock *BB = I.getParent();
1495   const DebugLoc &DL = I.getDebugLoc();
1496 
1497   Register DstReg = I.getOperand(0).getReg();
1498   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1499   assert(Size <= 32 || Size == 64);
1500   const MachineOperand &CCOp = I.getOperand(1);
1501   Register CCReg = CCOp.getReg();
1502   if (!isVCC(CCReg, *MRI)) {
1503     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1504                                          AMDGPU::S_CSELECT_B32;
1505     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1506             .addReg(CCReg);
1507 
1508     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1509     // bank, because it does not cover the register class that we used to represent
1510     // for it.  So we need to manually set the register class here.
1511     if (!MRI->getRegClassOrNull(CCReg))
1512         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1513     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1514             .add(I.getOperand(2))
1515             .add(I.getOperand(3));
1516 
1517     bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI) |
1518                constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1519     I.eraseFromParent();
1520     return Ret;
1521   }
1522 
1523   // Wide VGPR select should have been split in RegBankSelect.
1524   if (Size > 32)
1525     return false;
1526 
1527   MachineInstr *Select =
1528       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1529               .addImm(0)
1530               .add(I.getOperand(3))
1531               .addImm(0)
1532               .add(I.getOperand(2))
1533               .add(I.getOperand(1));
1534 
1535   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1536   I.eraseFromParent();
1537   return Ret;
1538 }
1539 
1540 bool AMDGPUInstructionSelector::selectG_STORE(MachineInstr &I) const {
1541   initM0(I);
1542   return selectImpl(I, *CoverageInfo);
1543 }
1544 
1545 static int sizeToSubRegIndex(unsigned Size) {
1546   switch (Size) {
1547   case 32:
1548     return AMDGPU::sub0;
1549   case 64:
1550     return AMDGPU::sub0_sub1;
1551   case 96:
1552     return AMDGPU::sub0_sub1_sub2;
1553   case 128:
1554     return AMDGPU::sub0_sub1_sub2_sub3;
1555   case 256:
1556     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1557   default:
1558     if (Size < 32)
1559       return AMDGPU::sub0;
1560     if (Size > 256)
1561       return -1;
1562     return sizeToSubRegIndex(PowerOf2Ceil(Size));
1563   }
1564 }
1565 
1566 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1567   Register DstReg = I.getOperand(0).getReg();
1568   Register SrcReg = I.getOperand(1).getReg();
1569   const LLT DstTy = MRI->getType(DstReg);
1570   const LLT SrcTy = MRI->getType(SrcReg);
1571   const LLT S1 = LLT::scalar(1);
1572 
1573   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1574   const RegisterBank *DstRB;
1575   if (DstTy == S1) {
1576     // This is a special case. We don't treat s1 for legalization artifacts as
1577     // vcc booleans.
1578     DstRB = SrcRB;
1579   } else {
1580     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1581     if (SrcRB != DstRB)
1582       return false;
1583   }
1584 
1585   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
1586 
1587   unsigned DstSize = DstTy.getSizeInBits();
1588   unsigned SrcSize = SrcTy.getSizeInBits();
1589 
1590   const TargetRegisterClass *SrcRC
1591     = TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB, *MRI);
1592   const TargetRegisterClass *DstRC
1593     = TRI.getRegClassForSizeOnBank(DstSize, *DstRB, *MRI);
1594 
1595   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1596       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1597     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1598     return false;
1599   }
1600 
1601   if (DstTy == LLT::vector(2, 16) && SrcTy == LLT::vector(2, 32)) {
1602     MachineBasicBlock *MBB = I.getParent();
1603     const DebugLoc &DL = I.getDebugLoc();
1604 
1605     Register LoReg = MRI->createVirtualRegister(DstRC);
1606     Register HiReg = MRI->createVirtualRegister(DstRC);
1607     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
1608       .addReg(SrcReg, 0, AMDGPU::sub0);
1609     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
1610       .addReg(SrcReg, 0, AMDGPU::sub1);
1611 
1612     if (IsVALU && STI.hasSDWA()) {
1613       // Write the low 16-bits of the high element into the high 16-bits of the
1614       // low element.
1615       MachineInstr *MovSDWA =
1616         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
1617         .addImm(0)                             // $src0_modifiers
1618         .addReg(HiReg)                         // $src0
1619         .addImm(0)                             // $clamp
1620         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
1621         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
1622         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
1623         .addReg(LoReg, RegState::Implicit);
1624       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
1625     } else {
1626       Register TmpReg0 = MRI->createVirtualRegister(DstRC);
1627       Register TmpReg1 = MRI->createVirtualRegister(DstRC);
1628       Register ImmReg = MRI->createVirtualRegister(DstRC);
1629       if (IsVALU) {
1630         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
1631           .addImm(16)
1632           .addReg(HiReg);
1633       } else {
1634         BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
1635           .addReg(HiReg)
1636           .addImm(16);
1637       }
1638 
1639       unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1640       unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1641       unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
1642 
1643       BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
1644         .addImm(0xffff);
1645       BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
1646         .addReg(LoReg)
1647         .addReg(ImmReg);
1648       BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
1649         .addReg(TmpReg0)
1650         .addReg(TmpReg1);
1651     }
1652 
1653     I.eraseFromParent();
1654     return true;
1655   }
1656 
1657   if (!DstTy.isScalar())
1658     return false;
1659 
1660   if (SrcSize > 32) {
1661     int SubRegIdx = sizeToSubRegIndex(DstSize);
1662     if (SubRegIdx == -1)
1663       return false;
1664 
1665     // Deal with weird cases where the class only partially supports the subreg
1666     // index.
1667     const TargetRegisterClass *SrcWithSubRC
1668       = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
1669     if (!SrcWithSubRC)
1670       return false;
1671 
1672     if (SrcWithSubRC != SrcRC) {
1673       if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
1674         return false;
1675     }
1676 
1677     I.getOperand(1).setSubReg(SubRegIdx);
1678   }
1679 
1680   I.setDesc(TII.get(TargetOpcode::COPY));
1681   return true;
1682 }
1683 
1684 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
1685 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
1686   Mask = maskTrailingOnes<unsigned>(Size);
1687   int SignedMask = static_cast<int>(Mask);
1688   return SignedMask >= -16 && SignedMask <= 64;
1689 }
1690 
1691 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
1692 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
1693   Register Reg, const MachineRegisterInfo &MRI,
1694   const TargetRegisterInfo &TRI) const {
1695   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
1696   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
1697     return RB;
1698 
1699   // Ignore the type, since we don't use vcc in artifacts.
1700   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
1701     return &RBI.getRegBankFromRegClass(*RC, LLT());
1702   return nullptr;
1703 }
1704 
1705 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
1706   bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
1707   bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
1708   const DebugLoc &DL = I.getDebugLoc();
1709   MachineBasicBlock &MBB = *I.getParent();
1710   const Register DstReg = I.getOperand(0).getReg();
1711   const Register SrcReg = I.getOperand(1).getReg();
1712 
1713   const LLT DstTy = MRI->getType(DstReg);
1714   const LLT SrcTy = MRI->getType(SrcReg);
1715   const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
1716     I.getOperand(2).getImm() : SrcTy.getSizeInBits();
1717   const unsigned DstSize = DstTy.getSizeInBits();
1718   if (!DstTy.isScalar())
1719     return false;
1720 
1721   if (I.getOpcode() == AMDGPU::G_ANYEXT)
1722     return selectCOPY(I);
1723 
1724   // Artifact casts should never use vcc.
1725   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
1726 
1727   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
1728     // 64-bit should have been split up in RegBankSelect
1729 
1730     // Try to use an and with a mask if it will save code size.
1731     unsigned Mask;
1732     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
1733       MachineInstr *ExtI =
1734       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
1735         .addImm(Mask)
1736         .addReg(SrcReg);
1737       I.eraseFromParent();
1738       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1739     }
1740 
1741     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32 : AMDGPU::V_BFE_U32;
1742     MachineInstr *ExtI =
1743       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
1744       .addReg(SrcReg)
1745       .addImm(0) // Offset
1746       .addImm(SrcSize); // Width
1747     I.eraseFromParent();
1748     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
1749   }
1750 
1751   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
1752     const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
1753       AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
1754     if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
1755       return false;
1756 
1757     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
1758       const unsigned SextOpc = SrcSize == 8 ?
1759         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
1760       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
1761         .addReg(SrcReg);
1762       I.eraseFromParent();
1763       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
1764     }
1765 
1766     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
1767     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
1768 
1769     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
1770     if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
1771       // We need a 64-bit register source, but the high bits don't matter.
1772       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
1773       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1774       unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
1775 
1776       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
1777       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
1778         .addReg(SrcReg, 0, SubReg)
1779         .addImm(AMDGPU::sub0)
1780         .addReg(UndefReg)
1781         .addImm(AMDGPU::sub1);
1782 
1783       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
1784         .addReg(ExtReg)
1785         .addImm(SrcSize << 16);
1786 
1787       I.eraseFromParent();
1788       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
1789     }
1790 
1791     unsigned Mask;
1792     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
1793       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
1794         .addReg(SrcReg)
1795         .addImm(Mask);
1796     } else {
1797       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
1798         .addReg(SrcReg)
1799         .addImm(SrcSize << 16);
1800     }
1801 
1802     I.eraseFromParent();
1803     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
1804   }
1805 
1806   return false;
1807 }
1808 
1809 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
1810   MachineBasicBlock *BB = I.getParent();
1811   MachineOperand &ImmOp = I.getOperand(1);
1812 
1813   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
1814   if (ImmOp.isFPImm()) {
1815     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
1816     ImmOp.ChangeToImmediate(Imm.getZExtValue());
1817   } else if (ImmOp.isCImm()) {
1818     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getZExtValue());
1819   }
1820 
1821   Register DstReg = I.getOperand(0).getReg();
1822   unsigned Size;
1823   bool IsSgpr;
1824   const RegisterBank *RB = MRI->getRegBankOrNull(I.getOperand(0).getReg());
1825   if (RB) {
1826     IsSgpr = RB->getID() == AMDGPU::SGPRRegBankID;
1827     Size = MRI->getType(DstReg).getSizeInBits();
1828   } else {
1829     const TargetRegisterClass *RC = TRI.getRegClassForReg(*MRI, DstReg);
1830     IsSgpr = TRI.isSGPRClass(RC);
1831     Size = TRI.getRegSizeInBits(*RC);
1832   }
1833 
1834   if (Size != 32 && Size != 64)
1835     return false;
1836 
1837   unsigned Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1838   if (Size == 32) {
1839     I.setDesc(TII.get(Opcode));
1840     I.addImplicitDefUseOperands(*MF);
1841     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1842   }
1843 
1844   const DebugLoc &DL = I.getDebugLoc();
1845 
1846   APInt Imm(Size, I.getOperand(1).getImm());
1847 
1848   MachineInstr *ResInst;
1849   if (IsSgpr && TII.isInlineConstant(Imm)) {
1850     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1851       .addImm(I.getOperand(1).getImm());
1852   } else {
1853     const TargetRegisterClass *RC = IsSgpr ?
1854       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
1855     Register LoReg = MRI->createVirtualRegister(RC);
1856     Register HiReg = MRI->createVirtualRegister(RC);
1857 
1858     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
1859       .addImm(Imm.trunc(32).getZExtValue());
1860 
1861     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
1862       .addImm(Imm.ashr(32).getZExtValue());
1863 
1864     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
1865       .addReg(LoReg)
1866       .addImm(AMDGPU::sub0)
1867       .addReg(HiReg)
1868       .addImm(AMDGPU::sub1);
1869   }
1870 
1871   // We can't call constrainSelectedInstRegOperands here, because it doesn't
1872   // work for target independent opcodes
1873   I.eraseFromParent();
1874   const TargetRegisterClass *DstRC =
1875     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
1876   if (!DstRC)
1877     return true;
1878   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
1879 }
1880 
1881 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
1882   // Only manually handle the f64 SGPR case.
1883   //
1884   // FIXME: This is a workaround for 2.5 different tablegen problems. Because
1885   // the bit ops theoretically have a second result due to the implicit def of
1886   // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
1887   // that is easy by disabling the check. The result works, but uses a
1888   // nonsensical sreg32orlds_and_sreg_1 regclass.
1889   //
1890   // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
1891   // the variadic REG_SEQUENCE operands.
1892 
1893   Register Dst = MI.getOperand(0).getReg();
1894   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
1895   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
1896       MRI->getType(Dst) != LLT::scalar(64))
1897     return false;
1898 
1899   Register Src = MI.getOperand(1).getReg();
1900   MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
1901   if (Fabs)
1902     Src = Fabs->getOperand(1).getReg();
1903 
1904   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
1905       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
1906     return false;
1907 
1908   MachineBasicBlock *BB = MI.getParent();
1909   const DebugLoc &DL = MI.getDebugLoc();
1910   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1911   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1912   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1913   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1914 
1915   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
1916     .addReg(Src, 0, AMDGPU::sub0);
1917   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
1918     .addReg(Src, 0, AMDGPU::sub1);
1919   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
1920     .addImm(0x80000000);
1921 
1922   // Set or toggle sign bit.
1923   unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
1924   BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
1925     .addReg(HiReg)
1926     .addReg(ConstReg);
1927   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
1928     .addReg(LoReg)
1929     .addImm(AMDGPU::sub0)
1930     .addReg(OpReg)
1931     .addImm(AMDGPU::sub1);
1932   MI.eraseFromParent();
1933   return true;
1934 }
1935 
1936 static bool isConstant(const MachineInstr &MI) {
1937   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
1938 }
1939 
1940 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
1941     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
1942 
1943   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
1944 
1945   assert(PtrMI);
1946 
1947   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
1948     return;
1949 
1950   GEPInfo GEPInfo(*PtrMI);
1951 
1952   for (unsigned i = 1; i != 3; ++i) {
1953     const MachineOperand &GEPOp = PtrMI->getOperand(i);
1954     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
1955     assert(OpDef);
1956     if (i == 2 && isConstant(*OpDef)) {
1957       // TODO: Could handle constant base + variable offset, but a combine
1958       // probably should have commuted it.
1959       assert(GEPInfo.Imm == 0);
1960       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
1961       continue;
1962     }
1963     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
1964     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
1965       GEPInfo.SgprParts.push_back(GEPOp.getReg());
1966     else
1967       GEPInfo.VgprParts.push_back(GEPOp.getReg());
1968   }
1969 
1970   AddrInfo.push_back(GEPInfo);
1971   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
1972 }
1973 
1974 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
1975   if (!MI.hasOneMemOperand())
1976     return false;
1977 
1978   const MachineMemOperand *MMO = *MI.memoperands_begin();
1979   const Value *Ptr = MMO->getValue();
1980 
1981   // UndefValue means this is a load of a kernel input.  These are uniform.
1982   // Sometimes LDS instructions have constant pointers.
1983   // If Ptr is null, then that means this mem operand contains a
1984   // PseudoSourceValue like GOT.
1985   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
1986       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
1987     return true;
1988 
1989   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
1990     return true;
1991 
1992   const Instruction *I = dyn_cast<Instruction>(Ptr);
1993   return I && I->getMetadata("amdgpu.uniform");
1994 }
1995 
1996 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
1997   for (const GEPInfo &GEPInfo : AddrInfo) {
1998     if (!GEPInfo.VgprParts.empty())
1999       return true;
2000   }
2001   return false;
2002 }
2003 
2004 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2005   MachineBasicBlock *BB = I.getParent();
2006 
2007   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2008   unsigned AS = PtrTy.getAddressSpace();
2009   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2010       STI.ldsRequiresM0Init()) {
2011     // If DS instructions require M0 initializtion, insert it before selecting.
2012     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2013       .addImm(-1);
2014   }
2015 }
2016 
2017 bool AMDGPUInstructionSelector::selectG_LOAD_ATOMICRMW(MachineInstr &I) const {
2018   initM0(I);
2019   return selectImpl(I, *CoverageInfo);
2020 }
2021 
2022 // TODO: No rtn optimization.
2023 bool AMDGPUInstructionSelector::selectG_AMDGPU_ATOMIC_CMPXCHG(
2024   MachineInstr &MI) const {
2025   Register PtrReg = MI.getOperand(1).getReg();
2026   const LLT PtrTy = MRI->getType(PtrReg);
2027   if (PtrTy.getAddressSpace() == AMDGPUAS::FLAT_ADDRESS ||
2028       STI.useFlatForGlobal())
2029     return selectImpl(MI, *CoverageInfo);
2030 
2031   Register DstReg = MI.getOperand(0).getReg();
2032   const LLT Ty = MRI->getType(DstReg);
2033   const bool Is64 = Ty.getSizeInBits() == 64;
2034   const unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
2035   Register TmpReg = MRI->createVirtualRegister(
2036     Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
2037 
2038   const DebugLoc &DL = MI.getDebugLoc();
2039   MachineBasicBlock *BB = MI.getParent();
2040 
2041   Register VAddr, RSrcReg, SOffset;
2042   int64_t Offset = 0;
2043 
2044   unsigned Opcode;
2045   if (selectMUBUFOffsetImpl(MI.getOperand(1), RSrcReg, SOffset, Offset)) {
2046     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_OFFSET_RTN :
2047                              AMDGPU::BUFFER_ATOMIC_CMPSWAP_OFFSET_RTN;
2048   } else if (selectMUBUFAddr64Impl(MI.getOperand(1), VAddr,
2049                                    RSrcReg, SOffset, Offset)) {
2050     Opcode = Is64 ? AMDGPU::BUFFER_ATOMIC_CMPSWAP_X2_ADDR64_RTN :
2051                     AMDGPU::BUFFER_ATOMIC_CMPSWAP_ADDR64_RTN;
2052   } else
2053     return selectImpl(MI, *CoverageInfo);
2054 
2055   auto MIB = BuildMI(*BB, &MI, DL, TII.get(Opcode), TmpReg)
2056     .addReg(MI.getOperand(2).getReg());
2057 
2058   if (VAddr)
2059     MIB.addReg(VAddr);
2060 
2061   MIB.addReg(RSrcReg);
2062   if (SOffset)
2063     MIB.addReg(SOffset);
2064   else
2065     MIB.addImm(0);
2066 
2067   MIB.addImm(Offset);
2068   MIB.addImm(0); // slc
2069   MIB.cloneMemRefs(MI);
2070 
2071   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), DstReg)
2072     .addReg(TmpReg, RegState::Kill, SubReg);
2073 
2074   MI.eraseFromParent();
2075 
2076   MRI->setRegClass(
2077     DstReg, Is64 ? &AMDGPU::VReg_64RegClass : &AMDGPU::VGPR_32RegClass);
2078   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2079 }
2080 
2081 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2082   MachineBasicBlock *BB = I.getParent();
2083   MachineOperand &CondOp = I.getOperand(0);
2084   Register CondReg = CondOp.getReg();
2085   const DebugLoc &DL = I.getDebugLoc();
2086 
2087   unsigned BrOpcode;
2088   Register CondPhysReg;
2089   const TargetRegisterClass *ConstrainRC;
2090 
2091   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2092   // whether the branch is uniform when selecting the instruction. In
2093   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2094   // RegBankSelect knows what it's doing if the branch condition is scc, even
2095   // though it currently does not.
2096   if (!isVCC(CondReg, *MRI)) {
2097     if (MRI->getType(CondReg) != LLT::scalar(32))
2098       return false;
2099 
2100     CondPhysReg = AMDGPU::SCC;
2101     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2102     // FIXME: Hack for isSCC tests
2103     ConstrainRC = &AMDGPU::SGPR_32RegClass;
2104   } else {
2105     // FIXME: Do we have to insert an and with exec here, like in SelectionDAG?
2106     // We sort of know that a VCC producer based on the register bank, that ands
2107     // inactive lanes with 0. What if there was a logical operation with vcc
2108     // producers in different blocks/with different exec masks?
2109     // FIXME: Should scc->vcc copies and with exec?
2110     CondPhysReg = TRI.getVCC();
2111     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2112     ConstrainRC = TRI.getBoolRC();
2113   }
2114 
2115   if (!MRI->getRegClassOrNull(CondReg))
2116     MRI->setRegClass(CondReg, ConstrainRC);
2117 
2118   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2119     .addReg(CondReg);
2120   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2121     .addMBB(I.getOperand(1).getMBB());
2122 
2123   I.eraseFromParent();
2124   return true;
2125 }
2126 
2127 bool AMDGPUInstructionSelector::selectG_FRAME_INDEX_GLOBAL_VALUE(
2128   MachineInstr &I) const {
2129   Register DstReg = I.getOperand(0).getReg();
2130   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2131   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2132   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2133   if (IsVGPR)
2134     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2135 
2136   return RBI.constrainGenericRegister(
2137     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2138 }
2139 
2140 bool AMDGPUInstructionSelector::selectG_PTR_MASK(MachineInstr &I) const {
2141   uint64_t Align = I.getOperand(2).getImm();
2142   const uint64_t Mask = ~((UINT64_C(1) << Align) - 1);
2143 
2144   MachineBasicBlock *BB = I.getParent();
2145 
2146   Register DstReg = I.getOperand(0).getReg();
2147   Register SrcReg = I.getOperand(1).getReg();
2148 
2149   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2150   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2151   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2152   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2153   unsigned MovOpc = IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
2154   const TargetRegisterClass &RegRC
2155     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2156 
2157   LLT Ty = MRI->getType(DstReg);
2158 
2159   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB,
2160                                                                   *MRI);
2161   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB,
2162                                                                   *MRI);
2163   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2164       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
2165     return false;
2166 
2167   const DebugLoc &DL = I.getDebugLoc();
2168   Register ImmReg = MRI->createVirtualRegister(&RegRC);
2169   BuildMI(*BB, &I, DL, TII.get(MovOpc), ImmReg)
2170     .addImm(Mask);
2171 
2172   if (Ty.getSizeInBits() == 32) {
2173     BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2174       .addReg(SrcReg)
2175       .addReg(ImmReg);
2176     I.eraseFromParent();
2177     return true;
2178   }
2179 
2180   Register HiReg = MRI->createVirtualRegister(&RegRC);
2181   Register LoReg = MRI->createVirtualRegister(&RegRC);
2182   Register MaskLo = MRI->createVirtualRegister(&RegRC);
2183 
2184   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2185     .addReg(SrcReg, 0, AMDGPU::sub0);
2186   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2187     .addReg(SrcReg, 0, AMDGPU::sub1);
2188 
2189   BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskLo)
2190     .addReg(LoReg)
2191     .addReg(ImmReg);
2192   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2193     .addReg(MaskLo)
2194     .addImm(AMDGPU::sub0)
2195     .addReg(HiReg)
2196     .addImm(AMDGPU::sub1);
2197   I.eraseFromParent();
2198   return true;
2199 }
2200 
2201 /// Return the register to use for the index value, and the subregister to use
2202 /// for the indirectly accessed register.
2203 static std::pair<Register, unsigned>
2204 computeIndirectRegIndex(MachineRegisterInfo &MRI,
2205                         const SIRegisterInfo &TRI,
2206                         const TargetRegisterClass *SuperRC,
2207                         Register IdxReg,
2208                         unsigned EltSize) {
2209   Register IdxBaseReg;
2210   int Offset;
2211   MachineInstr *Unused;
2212 
2213   std::tie(IdxBaseReg, Offset, Unused)
2214     = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
2215   if (IdxBaseReg == AMDGPU::NoRegister) {
2216     // This will happen if the index is a known constant. This should ordinarily
2217     // be legalized out, but handle it as a register just in case.
2218     assert(Offset == 0);
2219     IdxBaseReg = IdxReg;
2220   }
2221 
2222   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2223 
2224   // Skip out of bounds offsets, or else we would end up using an undefined
2225   // register.
2226   if (static_cast<unsigned>(Offset) >= SubRegs.size())
2227     return std::make_pair(IdxReg, SubRegs[0]);
2228   return std::make_pair(IdxBaseReg, SubRegs[Offset]);
2229 }
2230 
2231 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2232   MachineInstr &MI) const {
2233   Register DstReg = MI.getOperand(0).getReg();
2234   Register SrcReg = MI.getOperand(1).getReg();
2235   Register IdxReg = MI.getOperand(2).getReg();
2236 
2237   LLT DstTy = MRI->getType(DstReg);
2238   LLT SrcTy = MRI->getType(SrcReg);
2239 
2240   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2241   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2242   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2243 
2244   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2245   // into a waterfall loop.
2246   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2247     return false;
2248 
2249   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB,
2250                                                                   *MRI);
2251   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(DstTy, *DstRB,
2252                                                                   *MRI);
2253   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2254       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2255       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2256     return false;
2257 
2258   MachineBasicBlock *BB = MI.getParent();
2259   const DebugLoc &DL = MI.getDebugLoc();
2260   const bool Is64 = DstTy.getSizeInBits() == 64;
2261 
2262   unsigned SubReg;
2263   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
2264                                                      DstTy.getSizeInBits() / 8);
2265 
2266   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2267     if (DstTy.getSizeInBits() != 32 && !Is64)
2268       return false;
2269 
2270     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2271       .addReg(IdxReg);
2272 
2273     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2274     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2275       .addReg(SrcReg, 0, SubReg)
2276       .addReg(SrcReg, RegState::Implicit);
2277     MI.eraseFromParent();
2278     return true;
2279   }
2280 
2281   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2282     return false;
2283 
2284   if (!STI.useVGPRIndexMode()) {
2285     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2286       .addReg(IdxReg);
2287     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2288       .addReg(SrcReg, RegState::Undef, SubReg)
2289       .addReg(SrcReg, RegState::Implicit);
2290     MI.eraseFromParent();
2291     return true;
2292   }
2293 
2294   BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON))
2295     .addReg(IdxReg)
2296     .addImm(AMDGPU::VGPRIndexMode::SRC0_ENABLE);
2297   BuildMI(*BB, MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), DstReg)
2298     .addReg(SrcReg, RegState::Undef, SubReg)
2299     .addReg(SrcReg, RegState::Implicit)
2300     .addReg(AMDGPU::M0, RegState::Implicit);
2301   BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF));
2302 
2303   MI.eraseFromParent();
2304   return true;
2305 }
2306 
2307 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
2308 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2309   MachineInstr &MI) const {
2310   Register DstReg = MI.getOperand(0).getReg();
2311   Register VecReg = MI.getOperand(1).getReg();
2312   Register ValReg = MI.getOperand(2).getReg();
2313   Register IdxReg = MI.getOperand(3).getReg();
2314 
2315   LLT VecTy = MRI->getType(DstReg);
2316   LLT ValTy = MRI->getType(ValReg);
2317   unsigned VecSize = VecTy.getSizeInBits();
2318   unsigned ValSize = ValTy.getSizeInBits();
2319 
2320   const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2321   const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2322   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2323 
2324   assert(VecTy.getElementType() == ValTy);
2325 
2326   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2327   // into a waterfall loop.
2328   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2329     return false;
2330 
2331   const TargetRegisterClass *VecRC = TRI.getRegClassForTypeOnBank(VecTy, *VecRB,
2332                                                                   *MRI);
2333   const TargetRegisterClass *ValRC = TRI.getRegClassForTypeOnBank(ValTy, *ValRB,
2334                                                                   *MRI);
2335 
2336   if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
2337       !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
2338       !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
2339       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2340     return false;
2341 
2342   if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
2343     return false;
2344 
2345   unsigned SubReg;
2346   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
2347                                                      ValSize / 8);
2348 
2349   const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
2350                          STI.useVGPRIndexMode();
2351 
2352   MachineBasicBlock *BB = MI.getParent();
2353   const DebugLoc &DL = MI.getDebugLoc();
2354 
2355   if (IndexMode) {
2356     BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_ON))
2357       .addReg(IdxReg)
2358       .addImm(AMDGPU::VGPRIndexMode::DST_ENABLE);
2359   } else {
2360     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2361       .addReg(IdxReg);
2362   }
2363 
2364   const MCInstrDesc &RegWriteOp
2365     = TII.getIndirectRegWritePseudo(VecSize, ValSize,
2366                                     VecRB->getID() == AMDGPU::SGPRRegBankID);
2367   BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
2368     .addReg(VecReg)
2369     .addReg(ValReg)
2370     .addImm(SubReg);
2371 
2372   if (IndexMode)
2373     BuildMI(*BB, MI, DL, TII.get(AMDGPU::S_SET_GPR_IDX_OFF));
2374 
2375   MI.eraseFromParent();
2376   return true;
2377 }
2378 
2379 static bool isZeroOrUndef(int X) {
2380   return X == 0 || X == -1;
2381 }
2382 
2383 static bool isOneOrUndef(int X) {
2384   return X == 1 || X == -1;
2385 }
2386 
2387 static bool isZeroOrOneOrUndef(int X) {
2388   return X == 0 || X == 1 || X == -1;
2389 }
2390 
2391 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single
2392 // 32-bit register.
2393 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1,
2394                                    ArrayRef<int> Mask) {
2395   NewMask[0] = Mask[0];
2396   NewMask[1] = Mask[1];
2397   if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1]))
2398     return Src0;
2399 
2400   assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1);
2401   assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1);
2402 
2403   // Shift the mask inputs to be 0/1;
2404   NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2;
2405   NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2;
2406   return Src1;
2407 }
2408 
2409 // This is only legal with VOP3P instructions as an aid to op_sel matching.
2410 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
2411   MachineInstr &MI) const {
2412   Register DstReg = MI.getOperand(0).getReg();
2413   Register Src0Reg = MI.getOperand(1).getReg();
2414   Register Src1Reg = MI.getOperand(2).getReg();
2415   ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
2416 
2417   const LLT V2S16 = LLT::vector(2, 16);
2418   if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
2419     return false;
2420 
2421   if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask))
2422     return false;
2423 
2424   assert(ShufMask.size() == 2);
2425   assert(STI.hasSDWA() && "no target has VOP3P but not SDWA");
2426 
2427   MachineBasicBlock *MBB = MI.getParent();
2428   const DebugLoc &DL = MI.getDebugLoc();
2429 
2430   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2431   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2432   const TargetRegisterClass &RC = IsVALU ?
2433     AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2434 
2435   // Handle the degenerate case which should have folded out.
2436   if (ShufMask[0] == -1 && ShufMask[1] == -1) {
2437     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg);
2438 
2439     MI.eraseFromParent();
2440     return RBI.constrainGenericRegister(DstReg, RC, *MRI);
2441   }
2442 
2443   // A legal VOP3P mask only reads one of the sources.
2444   int Mask[2];
2445   Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask);
2446 
2447   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) ||
2448       !RBI.constrainGenericRegister(SrcVec, RC, *MRI))
2449     return false;
2450 
2451   // TODO: This also should have been folded out
2452   if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) {
2453     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg)
2454       .addReg(SrcVec);
2455 
2456     MI.eraseFromParent();
2457     return true;
2458   }
2459 
2460   if (Mask[0] == 1 && Mask[1] == -1) {
2461     if (IsVALU) {
2462       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
2463         .addImm(16)
2464         .addReg(SrcVec);
2465     } else {
2466       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
2467         .addReg(SrcVec)
2468         .addImm(16);
2469     }
2470   } else if (Mask[0] == -1 && Mask[1] == 0) {
2471     if (IsVALU) {
2472       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg)
2473         .addImm(16)
2474         .addReg(SrcVec);
2475     } else {
2476       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg)
2477         .addReg(SrcVec)
2478         .addImm(16);
2479     }
2480   } else if (Mask[0] == 0 && Mask[1] == 0) {
2481     if (IsVALU) {
2482       // Write low half of the register into the high half.
2483       MachineInstr *MovSDWA =
2484         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2485         .addImm(0)                             // $src0_modifiers
2486         .addReg(SrcVec)                        // $src0
2487         .addImm(0)                             // $clamp
2488         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
2489         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2490         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
2491         .addReg(SrcVec, RegState::Implicit);
2492       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2493     } else {
2494       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2495         .addReg(SrcVec)
2496         .addReg(SrcVec);
2497     }
2498   } else if (Mask[0] == 1 && Mask[1] == 1) {
2499     if (IsVALU) {
2500       // Write high half of the register into the low half.
2501       MachineInstr *MovSDWA =
2502         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2503         .addImm(0)                             // $src0_modifiers
2504         .addReg(SrcVec)                        // $src0
2505         .addImm(0)                             // $clamp
2506         .addImm(AMDGPU::SDWA::WORD_0)          // $dst_sel
2507         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2508         .addImm(AMDGPU::SDWA::WORD_1)          // $src0_sel
2509         .addReg(SrcVec, RegState::Implicit);
2510       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2511     } else {
2512       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg)
2513         .addReg(SrcVec)
2514         .addReg(SrcVec);
2515     }
2516   } else if (Mask[0] == 1 && Mask[1] == 0) {
2517     if (IsVALU) {
2518       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32), DstReg)
2519         .addReg(SrcVec)
2520         .addReg(SrcVec)
2521         .addImm(16);
2522     } else {
2523       Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2524       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg)
2525         .addReg(SrcVec)
2526         .addImm(16);
2527       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2528         .addReg(TmpReg)
2529         .addReg(SrcVec);
2530     }
2531   } else
2532     llvm_unreachable("all shuffle masks should be handled");
2533 
2534   MI.eraseFromParent();
2535   return true;
2536 }
2537 
2538 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
2539   if (I.isPHI())
2540     return selectPHI(I);
2541 
2542   if (!I.isPreISelOpcode()) {
2543     if (I.isCopy())
2544       return selectCOPY(I);
2545     return true;
2546   }
2547 
2548   switch (I.getOpcode()) {
2549   case TargetOpcode::G_AND:
2550   case TargetOpcode::G_OR:
2551   case TargetOpcode::G_XOR:
2552     if (selectImpl(I, *CoverageInfo))
2553       return true;
2554     return selectG_AND_OR_XOR(I);
2555   case TargetOpcode::G_ADD:
2556   case TargetOpcode::G_SUB:
2557     if (selectImpl(I, *CoverageInfo))
2558       return true;
2559     return selectG_ADD_SUB(I);
2560   case TargetOpcode::G_UADDO:
2561   case TargetOpcode::G_USUBO:
2562   case TargetOpcode::G_UADDE:
2563   case TargetOpcode::G_USUBE:
2564     return selectG_UADDO_USUBO_UADDE_USUBE(I);
2565   case TargetOpcode::G_INTTOPTR:
2566   case TargetOpcode::G_BITCAST:
2567   case TargetOpcode::G_PTRTOINT:
2568     return selectCOPY(I);
2569   case TargetOpcode::G_CONSTANT:
2570   case TargetOpcode::G_FCONSTANT:
2571     return selectG_CONSTANT(I);
2572   case TargetOpcode::G_FNEG:
2573     if (selectImpl(I, *CoverageInfo))
2574       return true;
2575     return selectG_FNEG(I);
2576   case TargetOpcode::G_EXTRACT:
2577     return selectG_EXTRACT(I);
2578   case TargetOpcode::G_MERGE_VALUES:
2579   case TargetOpcode::G_BUILD_VECTOR:
2580   case TargetOpcode::G_CONCAT_VECTORS:
2581     return selectG_MERGE_VALUES(I);
2582   case TargetOpcode::G_UNMERGE_VALUES:
2583     return selectG_UNMERGE_VALUES(I);
2584   case TargetOpcode::G_BUILD_VECTOR_TRUNC:
2585     return selectG_BUILD_VECTOR_TRUNC(I);
2586   case TargetOpcode::G_PTR_ADD:
2587     return selectG_PTR_ADD(I);
2588   case TargetOpcode::G_IMPLICIT_DEF:
2589     return selectG_IMPLICIT_DEF(I);
2590   case TargetOpcode::G_INSERT:
2591     return selectG_INSERT(I);
2592   case TargetOpcode::G_INTRINSIC:
2593     return selectG_INTRINSIC(I);
2594   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
2595     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
2596   case TargetOpcode::G_ICMP:
2597     if (selectG_ICMP(I))
2598       return true;
2599     return selectImpl(I, *CoverageInfo);
2600   case TargetOpcode::G_LOAD:
2601   case TargetOpcode::G_ATOMIC_CMPXCHG:
2602   case TargetOpcode::G_ATOMICRMW_XCHG:
2603   case TargetOpcode::G_ATOMICRMW_ADD:
2604   case TargetOpcode::G_ATOMICRMW_SUB:
2605   case TargetOpcode::G_ATOMICRMW_AND:
2606   case TargetOpcode::G_ATOMICRMW_OR:
2607   case TargetOpcode::G_ATOMICRMW_XOR:
2608   case TargetOpcode::G_ATOMICRMW_MIN:
2609   case TargetOpcode::G_ATOMICRMW_MAX:
2610   case TargetOpcode::G_ATOMICRMW_UMIN:
2611   case TargetOpcode::G_ATOMICRMW_UMAX:
2612   case TargetOpcode::G_ATOMICRMW_FADD:
2613     return selectG_LOAD_ATOMICRMW(I);
2614   case AMDGPU::G_AMDGPU_ATOMIC_CMPXCHG:
2615     return selectG_AMDGPU_ATOMIC_CMPXCHG(I);
2616   case TargetOpcode::G_SELECT:
2617     return selectG_SELECT(I);
2618   case TargetOpcode::G_STORE:
2619     return selectG_STORE(I);
2620   case TargetOpcode::G_TRUNC:
2621     return selectG_TRUNC(I);
2622   case TargetOpcode::G_SEXT:
2623   case TargetOpcode::G_ZEXT:
2624   case TargetOpcode::G_ANYEXT:
2625   case TargetOpcode::G_SEXT_INREG:
2626     if (selectImpl(I, *CoverageInfo))
2627       return true;
2628     return selectG_SZA_EXT(I);
2629   case TargetOpcode::G_BRCOND:
2630     return selectG_BRCOND(I);
2631   case TargetOpcode::G_FRAME_INDEX:
2632   case TargetOpcode::G_GLOBAL_VALUE:
2633     return selectG_FRAME_INDEX_GLOBAL_VALUE(I);
2634   case TargetOpcode::G_PTR_MASK:
2635     return selectG_PTR_MASK(I);
2636   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
2637     return selectG_EXTRACT_VECTOR_ELT(I);
2638   case TargetOpcode::G_INSERT_VECTOR_ELT:
2639     return selectG_INSERT_VECTOR_ELT(I);
2640   case TargetOpcode::G_SHUFFLE_VECTOR:
2641     return selectG_SHUFFLE_VECTOR(I);
2642   case AMDGPU::G_AMDGPU_ATOMIC_INC:
2643   case AMDGPU::G_AMDGPU_ATOMIC_DEC:
2644     initM0(I);
2645     return selectImpl(I, *CoverageInfo);
2646   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
2647   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE: {
2648     const AMDGPU::ImageDimIntrinsicInfo *Intr
2649       = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
2650     assert(Intr && "not an image intrinsic with image pseudo");
2651     return selectImageIntrinsic(I, Intr);
2652   }
2653   default:
2654     return selectImpl(I, *CoverageInfo);
2655   }
2656   return false;
2657 }
2658 
2659 InstructionSelector::ComplexRendererFns
2660 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
2661   return {{
2662       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
2663   }};
2664 
2665 }
2666 
2667 std::pair<Register, unsigned>
2668 AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root) const {
2669   Register Src = Root.getReg();
2670   Register OrigSrc = Src;
2671   unsigned Mods = 0;
2672   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
2673 
2674   if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
2675     Src = MI->getOperand(1).getReg();
2676     Mods |= SISrcMods::NEG;
2677     MI = getDefIgnoringCopies(Src, *MRI);
2678   }
2679 
2680   if (MI && MI->getOpcode() == AMDGPU::G_FABS) {
2681     Src = MI->getOperand(1).getReg();
2682     Mods |= SISrcMods::ABS;
2683   }
2684 
2685   if (Mods != 0 &&
2686       RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
2687     MachineInstr *UseMI = Root.getParent();
2688 
2689     // If we looked through copies to find source modifiers on an SGPR operand,
2690     // we now have an SGPR register source. To avoid potentially violating the
2691     // constant bus restriction, we need to insert a copy to a VGPR.
2692     Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc);
2693     BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(),
2694             TII.get(AMDGPU::COPY), VGPRSrc)
2695       .addReg(Src);
2696     Src = VGPRSrc;
2697   }
2698 
2699   return std::make_pair(Src, Mods);
2700 }
2701 
2702 ///
2703 /// This will select either an SGPR or VGPR operand and will save us from
2704 /// having to write an extra tablegen pattern.
2705 InstructionSelector::ComplexRendererFns
2706 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
2707   return {{
2708       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
2709   }};
2710 }
2711 
2712 InstructionSelector::ComplexRendererFns
2713 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
2714   Register Src;
2715   unsigned Mods;
2716   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
2717 
2718   return {{
2719       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
2720       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
2721       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
2722       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
2723   }};
2724 }
2725 
2726 InstructionSelector::ComplexRendererFns
2727 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
2728   return {{
2729       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
2730       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
2731       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
2732   }};
2733 }
2734 
2735 InstructionSelector::ComplexRendererFns
2736 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
2737   Register Src;
2738   unsigned Mods;
2739   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
2740 
2741   return {{
2742       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
2743       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
2744   }};
2745 }
2746 
2747 InstructionSelector::ComplexRendererFns
2748 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
2749   Register Reg = Root.getReg();
2750   const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
2751   if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
2752               Def->getOpcode() == AMDGPU::G_FABS))
2753     return {};
2754   return {{
2755       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
2756   }};
2757 }
2758 
2759 std::pair<Register, unsigned>
2760 AMDGPUInstructionSelector::selectVOP3PModsImpl(
2761   Register Src, const MachineRegisterInfo &MRI) const {
2762   unsigned Mods = 0;
2763   MachineInstr *MI = MRI.getVRegDef(Src);
2764 
2765   if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
2766       // It's possible to see an f32 fneg here, but unlikely.
2767       // TODO: Treat f32 fneg as only high bit.
2768       MRI.getType(Src) == LLT::vector(2, 16)) {
2769     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
2770     Src = MI->getOperand(1).getReg();
2771     MI = MRI.getVRegDef(Src);
2772   }
2773 
2774   // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
2775 
2776   // Packed instructions do not have abs modifiers.
2777   Mods |= SISrcMods::OP_SEL_1;
2778 
2779   return std::make_pair(Src, Mods);
2780 }
2781 
2782 InstructionSelector::ComplexRendererFns
2783 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
2784   MachineRegisterInfo &MRI
2785     = Root.getParent()->getParent()->getParent()->getRegInfo();
2786 
2787   Register Src;
2788   unsigned Mods;
2789   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
2790 
2791   return {{
2792       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
2793       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
2794   }};
2795 }
2796 
2797 InstructionSelector::ComplexRendererFns
2798 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
2799   Register Src;
2800   unsigned Mods;
2801   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
2802   if (!TM.Options.NoNaNsFPMath && !isKnownNeverNaN(Src, *MRI))
2803     return None;
2804 
2805   return {{
2806       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
2807       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
2808   }};
2809 }
2810 
2811 InstructionSelector::ComplexRendererFns
2812 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
2813   // FIXME: Handle op_sel
2814   return {{
2815       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
2816       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
2817   }};
2818 }
2819 
2820 InstructionSelector::ComplexRendererFns
2821 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
2822   SmallVector<GEPInfo, 4> AddrInfo;
2823   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
2824 
2825   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
2826     return None;
2827 
2828   const GEPInfo &GEPInfo = AddrInfo[0];
2829   Optional<int64_t> EncodedImm =
2830       AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false);
2831   if (!EncodedImm)
2832     return None;
2833 
2834   unsigned PtrReg = GEPInfo.SgprParts[0];
2835   return {{
2836     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
2837     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
2838   }};
2839 }
2840 
2841 InstructionSelector::ComplexRendererFns
2842 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
2843   SmallVector<GEPInfo, 4> AddrInfo;
2844   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
2845 
2846   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
2847     return None;
2848 
2849   const GEPInfo &GEPInfo = AddrInfo[0];
2850   unsigned PtrReg = GEPInfo.SgprParts[0];
2851   Optional<int64_t> EncodedImm =
2852       AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
2853   if (!EncodedImm)
2854     return None;
2855 
2856   return {{
2857     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
2858     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
2859   }};
2860 }
2861 
2862 InstructionSelector::ComplexRendererFns
2863 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
2864   MachineInstr *MI = Root.getParent();
2865   MachineBasicBlock *MBB = MI->getParent();
2866 
2867   SmallVector<GEPInfo, 4> AddrInfo;
2868   getAddrModeInfo(*MI, *MRI, AddrInfo);
2869 
2870   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
2871   // then we can select all ptr + 32-bit offsets not just immediate offsets.
2872   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
2873     return None;
2874 
2875   const GEPInfo &GEPInfo = AddrInfo[0];
2876   // SGPR offset is unsigned.
2877   if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm))
2878     return None;
2879 
2880   // If we make it this far we have a load with an 32-bit immediate offset.
2881   // It is OK to select this using a sgpr offset, because we have already
2882   // failed trying to select this load into one of the _IMM variants since
2883   // the _IMM Patterns are considered before the _SGPR patterns.
2884   unsigned PtrReg = GEPInfo.SgprParts[0];
2885   Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2886   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
2887           .addImm(GEPInfo.Imm);
2888   return {{
2889     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
2890     [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
2891   }};
2892 }
2893 
2894 template <bool Signed>
2895 InstructionSelector::ComplexRendererFns
2896 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root) const {
2897   MachineInstr *MI = Root.getParent();
2898 
2899   InstructionSelector::ComplexRendererFns Default = {{
2900       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
2901       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },  // offset
2902       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // slc
2903     }};
2904 
2905   if (!STI.hasFlatInstOffsets())
2906     return Default;
2907 
2908   const MachineInstr *OpDef = MRI->getVRegDef(Root.getReg());
2909   if (!OpDef || OpDef->getOpcode() != AMDGPU::G_PTR_ADD)
2910     return Default;
2911 
2912   Optional<int64_t> Offset =
2913     getConstantVRegVal(OpDef->getOperand(2).getReg(), *MRI);
2914   if (!Offset.hasValue())
2915     return Default;
2916 
2917   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
2918   if (!TII.isLegalFLATOffset(Offset.getValue(), AddrSpace, Signed))
2919     return Default;
2920 
2921   Register BasePtr = OpDef->getOperand(1).getReg();
2922 
2923   return {{
2924       [=](MachineInstrBuilder &MIB) { MIB.addReg(BasePtr); },
2925       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset.getValue()); },
2926       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // slc
2927     }};
2928 }
2929 
2930 InstructionSelector::ComplexRendererFns
2931 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
2932   return selectFlatOffsetImpl<false>(Root);
2933 }
2934 
2935 InstructionSelector::ComplexRendererFns
2936 AMDGPUInstructionSelector::selectFlatOffsetSigned(MachineOperand &Root) const {
2937   return selectFlatOffsetImpl<true>(Root);
2938 }
2939 
2940 static bool isStackPtrRelative(const MachinePointerInfo &PtrInfo) {
2941   auto PSV = PtrInfo.V.dyn_cast<const PseudoSourceValue *>();
2942   return PSV && PSV->isStack();
2943 }
2944 
2945 InstructionSelector::ComplexRendererFns
2946 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
2947   MachineInstr *MI = Root.getParent();
2948   MachineBasicBlock *MBB = MI->getParent();
2949   MachineFunction *MF = MBB->getParent();
2950   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
2951 
2952   int64_t Offset = 0;
2953   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset))) {
2954     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2955 
2956     // TODO: Should this be inside the render function? The iterator seems to
2957     // move.
2958     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
2959             HighBits)
2960       .addImm(Offset & ~4095);
2961 
2962     return {{[=](MachineInstrBuilder &MIB) { // rsrc
2963                MIB.addReg(Info->getScratchRSrcReg());
2964              },
2965              [=](MachineInstrBuilder &MIB) { // vaddr
2966                MIB.addReg(HighBits);
2967              },
2968              [=](MachineInstrBuilder &MIB) { // soffset
2969                const MachineMemOperand *MMO = *MI->memoperands_begin();
2970                const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
2971 
2972                if (isStackPtrRelative(PtrInfo))
2973                  MIB.addReg(Info->getStackPtrOffsetReg());
2974                else
2975                  MIB.addImm(0);
2976              },
2977              [=](MachineInstrBuilder &MIB) { // offset
2978                MIB.addImm(Offset & 4095);
2979              }}};
2980   }
2981 
2982   assert(Offset == 0);
2983 
2984   // Try to fold a frame index directly into the MUBUF vaddr field, and any
2985   // offsets.
2986   Optional<int> FI;
2987   Register VAddr = Root.getReg();
2988   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
2989     if (isBaseWithConstantOffset(Root, *MRI)) {
2990       const MachineOperand &LHS = RootDef->getOperand(1);
2991       const MachineOperand &RHS = RootDef->getOperand(2);
2992       const MachineInstr *LHSDef = MRI->getVRegDef(LHS.getReg());
2993       const MachineInstr *RHSDef = MRI->getVRegDef(RHS.getReg());
2994       if (LHSDef && RHSDef) {
2995         int64_t PossibleOffset =
2996             RHSDef->getOperand(1).getCImm()->getSExtValue();
2997         if (SIInstrInfo::isLegalMUBUFImmOffset(PossibleOffset) &&
2998             (!STI.privateMemoryResourceIsRangeChecked() ||
2999              KnownBits->signBitIsZero(LHS.getReg()))) {
3000           if (LHSDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
3001             FI = LHSDef->getOperand(1).getIndex();
3002           else
3003             VAddr = LHS.getReg();
3004           Offset = PossibleOffset;
3005         }
3006       }
3007     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3008       FI = RootDef->getOperand(1).getIndex();
3009     }
3010   }
3011 
3012   return {{[=](MachineInstrBuilder &MIB) { // rsrc
3013              MIB.addReg(Info->getScratchRSrcReg());
3014            },
3015            [=](MachineInstrBuilder &MIB) { // vaddr
3016              if (FI.hasValue())
3017                MIB.addFrameIndex(FI.getValue());
3018              else
3019                MIB.addReg(VAddr);
3020            },
3021            [=](MachineInstrBuilder &MIB) { // soffset
3022              // If we don't know this private access is a local stack object, it
3023              // needs to be relative to the entry point's scratch wave offset.
3024              // TODO: Should split large offsets that don't fit like above.
3025              // TODO: Don't use scratch wave offset just because the offset
3026              // didn't fit.
3027              if (!Info->isEntryFunction() && FI.hasValue())
3028                MIB.addReg(Info->getStackPtrOffsetReg());
3029              else
3030                MIB.addImm(0);
3031            },
3032            [=](MachineInstrBuilder &MIB) { // offset
3033              MIB.addImm(Offset);
3034            }}};
3035 }
3036 
3037 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
3038                                                 int64_t Offset,
3039                                                 unsigned OffsetBits) const {
3040   if ((OffsetBits == 16 && !isUInt<16>(Offset)) ||
3041       (OffsetBits == 8 && !isUInt<8>(Offset)))
3042     return false;
3043 
3044   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
3045     return true;
3046 
3047   // On Southern Islands instruction with a negative base value and an offset
3048   // don't seem to work.
3049   return KnownBits->signBitIsZero(Base);
3050 }
3051 
3052 InstructionSelector::ComplexRendererFns
3053 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
3054     MachineOperand &Root) const {
3055   MachineInstr *MI = Root.getParent();
3056   MachineBasicBlock *MBB = MI->getParent();
3057 
3058   int64_t Offset = 0;
3059   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
3060       !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
3061     return {};
3062 
3063   const MachineFunction *MF = MBB->getParent();
3064   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
3065   const MachineMemOperand *MMO = *MI->memoperands_begin();
3066   const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
3067 
3068   return {{
3069       [=](MachineInstrBuilder &MIB) { // rsrc
3070         MIB.addReg(Info->getScratchRSrcReg());
3071       },
3072       [=](MachineInstrBuilder &MIB) { // soffset
3073         if (isStackPtrRelative(PtrInfo))
3074           MIB.addReg(Info->getStackPtrOffsetReg());
3075         else
3076           MIB.addImm(0);
3077       },
3078       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
3079   }};
3080 }
3081 
3082 std::pair<Register, unsigned>
3083 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
3084   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3085   if (!RootDef)
3086     return std::make_pair(Root.getReg(), 0);
3087 
3088   int64_t ConstAddr = 0;
3089 
3090   Register PtrBase;
3091   int64_t Offset;
3092   std::tie(PtrBase, Offset) =
3093     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3094 
3095   if (Offset) {
3096     if (isDSOffsetLegal(PtrBase, Offset, 16)) {
3097       // (add n0, c0)
3098       return std::make_pair(PtrBase, Offset);
3099     }
3100   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3101     // TODO
3102 
3103 
3104   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3105     // TODO
3106 
3107   }
3108 
3109   return std::make_pair(Root.getReg(), 0);
3110 }
3111 
3112 InstructionSelector::ComplexRendererFns
3113 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
3114   Register Reg;
3115   unsigned Offset;
3116   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
3117   return {{
3118       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3119       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
3120     }};
3121 }
3122 
3123 InstructionSelector::ComplexRendererFns
3124 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
3125   Register Reg;
3126   unsigned Offset;
3127   std::tie(Reg, Offset) = selectDS64Bit4ByteAlignedImpl(Root);
3128   return {{
3129       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3130       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
3131       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
3132     }};
3133 }
3134 
3135 std::pair<Register, unsigned>
3136 AMDGPUInstructionSelector::selectDS64Bit4ByteAlignedImpl(MachineOperand &Root) const {
3137   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
3138   if (!RootDef)
3139     return std::make_pair(Root.getReg(), 0);
3140 
3141   int64_t ConstAddr = 0;
3142 
3143   Register PtrBase;
3144   int64_t Offset;
3145   std::tie(PtrBase, Offset) =
3146     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3147 
3148   if (Offset) {
3149     int64_t DWordOffset0 = Offset / 4;
3150     int64_t DWordOffset1 = DWordOffset0 + 1;
3151     if (isDSOffsetLegal(PtrBase, DWordOffset1, 8)) {
3152       // (add n0, c0)
3153       return std::make_pair(PtrBase, DWordOffset0);
3154     }
3155   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
3156     // TODO
3157 
3158   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
3159     // TODO
3160 
3161   }
3162 
3163   return std::make_pair(Root.getReg(), 0);
3164 }
3165 
3166 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
3167 /// the base value with the constant offset. There may be intervening copies
3168 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
3169 /// not match the pattern.
3170 std::pair<Register, int64_t>
3171 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
3172   Register Root, const MachineRegisterInfo &MRI) const {
3173   MachineInstr *RootI = MRI.getVRegDef(Root);
3174   if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
3175     return {Root, 0};
3176 
3177   MachineOperand &RHS = RootI->getOperand(2);
3178   Optional<ValueAndVReg> MaybeOffset
3179     = getConstantVRegValWithLookThrough(RHS.getReg(), MRI, true);
3180   if (!MaybeOffset)
3181     return {Root, 0};
3182   return {RootI->getOperand(1).getReg(), MaybeOffset->Value};
3183 }
3184 
3185 static void addZeroImm(MachineInstrBuilder &MIB) {
3186   MIB.addImm(0);
3187 }
3188 
3189 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
3190 /// BasePtr is not valid, a null base pointer will be used.
3191 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3192                           uint32_t FormatLo, uint32_t FormatHi,
3193                           Register BasePtr) {
3194   Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3195   Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
3196   Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3197   Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
3198 
3199   B.buildInstr(AMDGPU::S_MOV_B32)
3200     .addDef(RSrc2)
3201     .addImm(FormatLo);
3202   B.buildInstr(AMDGPU::S_MOV_B32)
3203     .addDef(RSrc3)
3204     .addImm(FormatHi);
3205 
3206   // Build the half of the subregister with the constants before building the
3207   // full 128-bit register. If we are building multiple resource descriptors,
3208   // this will allow CSEing of the 2-component register.
3209   B.buildInstr(AMDGPU::REG_SEQUENCE)
3210     .addDef(RSrcHi)
3211     .addReg(RSrc2)
3212     .addImm(AMDGPU::sub0)
3213     .addReg(RSrc3)
3214     .addImm(AMDGPU::sub1);
3215 
3216   Register RSrcLo = BasePtr;
3217   if (!BasePtr) {
3218     RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
3219     B.buildInstr(AMDGPU::S_MOV_B64)
3220       .addDef(RSrcLo)
3221       .addImm(0);
3222   }
3223 
3224   B.buildInstr(AMDGPU::REG_SEQUENCE)
3225     .addDef(RSrc)
3226     .addReg(RSrcLo)
3227     .addImm(AMDGPU::sub0_sub1)
3228     .addReg(RSrcHi)
3229     .addImm(AMDGPU::sub2_sub3);
3230 
3231   return RSrc;
3232 }
3233 
3234 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3235                                 const SIInstrInfo &TII, Register BasePtr) {
3236   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
3237 
3238   // FIXME: Why are half the "default" bits ignored based on the addressing
3239   // mode?
3240   return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
3241 }
3242 
3243 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
3244                                const SIInstrInfo &TII, Register BasePtr) {
3245   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
3246 
3247   // FIXME: Why are half the "default" bits ignored based on the addressing
3248   // mode?
3249   return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
3250 }
3251 
3252 AMDGPUInstructionSelector::MUBUFAddressData
3253 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
3254   MUBUFAddressData Data;
3255   Data.N0 = Src;
3256 
3257   Register PtrBase;
3258   int64_t Offset;
3259 
3260   std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
3261   if (isUInt<32>(Offset)) {
3262     Data.N0 = PtrBase;
3263     Data.Offset = Offset;
3264   }
3265 
3266   if (MachineInstr *InputAdd
3267       = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
3268     Data.N2 = InputAdd->getOperand(1).getReg();
3269     Data.N3 = InputAdd->getOperand(2).getReg();
3270 
3271     // FIXME: Need to fix extra SGPR->VGPRcopies inserted
3272     // FIXME: Don't know this was defined by operand 0
3273     //
3274     // TODO: Remove this when we have copy folding optimizations after
3275     // RegBankSelect.
3276     Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
3277     Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
3278   }
3279 
3280   return Data;
3281 }
3282 
3283 /// Return if the addr64 mubuf mode should be used for the given address.
3284 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
3285   // (ptr_add N2, N3) -> addr64, or
3286   // (ptr_add (ptr_add N2, N3), C1) -> addr64
3287   if (Addr.N2)
3288     return true;
3289 
3290   const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
3291   return N0Bank->getID() == AMDGPU::VGPRRegBankID;
3292 }
3293 
3294 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
3295 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
3296 /// component.
3297 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
3298   MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
3299   if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
3300     return;
3301 
3302   // Illegal offset, store it in soffset.
3303   SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3304   B.buildInstr(AMDGPU::S_MOV_B32)
3305     .addDef(SOffset)
3306     .addImm(ImmOffset);
3307   ImmOffset = 0;
3308 }
3309 
3310 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
3311   MachineOperand &Root, Register &VAddr, Register &RSrcReg,
3312   Register &SOffset, int64_t &Offset) const {
3313   // FIXME: Predicates should stop this from reaching here.
3314   // addr64 bit was removed for volcanic islands.
3315   if (!STI.hasAddr64() || STI.useFlatForGlobal())
3316     return false;
3317 
3318   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
3319   if (!shouldUseAddr64(AddrData))
3320     return false;
3321 
3322   Register N0 = AddrData.N0;
3323   Register N2 = AddrData.N2;
3324   Register N3 = AddrData.N3;
3325   Offset = AddrData.Offset;
3326 
3327   // Base pointer for the SRD.
3328   Register SRDPtr;
3329 
3330   if (N2) {
3331     if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
3332       assert(N3);
3333       if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
3334         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
3335         // addr64, and construct the default resource from a 0 address.
3336         VAddr = N0;
3337       } else {
3338         SRDPtr = N3;
3339         VAddr = N2;
3340       }
3341     } else {
3342       // N2 is not divergent.
3343       SRDPtr = N2;
3344       VAddr = N3;
3345     }
3346   } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
3347     // Use the default null pointer in the resource
3348     VAddr = N0;
3349   } else {
3350     // N0 -> offset, or
3351     // (N0 + C1) -> offset
3352     SRDPtr = N0;
3353   }
3354 
3355   MachineIRBuilder B(*Root.getParent());
3356   RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
3357   splitIllegalMUBUFOffset(B, SOffset, Offset);
3358   return true;
3359 }
3360 
3361 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
3362   MachineOperand &Root, Register &RSrcReg, Register &SOffset,
3363   int64_t &Offset) const {
3364   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
3365   if (shouldUseAddr64(AddrData))
3366     return false;
3367 
3368   // N0 -> offset, or
3369   // (N0 + C1) -> offset
3370   Register SRDPtr = AddrData.N0;
3371   Offset = AddrData.Offset;
3372 
3373   // TODO: Look through extensions for 32-bit soffset.
3374   MachineIRBuilder B(*Root.getParent());
3375 
3376   RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
3377   splitIllegalMUBUFOffset(B, SOffset, Offset);
3378   return true;
3379 }
3380 
3381 InstructionSelector::ComplexRendererFns
3382 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
3383   Register VAddr;
3384   Register RSrcReg;
3385   Register SOffset;
3386   int64_t Offset = 0;
3387 
3388   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
3389     return {};
3390 
3391   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
3392   // pattern.
3393   return {{
3394       [=](MachineInstrBuilder &MIB) {  // rsrc
3395         MIB.addReg(RSrcReg);
3396       },
3397       [=](MachineInstrBuilder &MIB) { // vaddr
3398         MIB.addReg(VAddr);
3399       },
3400       [=](MachineInstrBuilder &MIB) { // soffset
3401         if (SOffset)
3402           MIB.addReg(SOffset);
3403         else
3404           MIB.addImm(0);
3405       },
3406       [=](MachineInstrBuilder &MIB) { // offset
3407         MIB.addImm(Offset);
3408       },
3409       addZeroImm, //  glc
3410       addZeroImm, //  slc
3411       addZeroImm, //  tfe
3412       addZeroImm, //  dlc
3413       addZeroImm  //  swz
3414     }};
3415 }
3416 
3417 InstructionSelector::ComplexRendererFns
3418 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
3419   Register RSrcReg;
3420   Register SOffset;
3421   int64_t Offset = 0;
3422 
3423   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
3424     return {};
3425 
3426   return {{
3427       [=](MachineInstrBuilder &MIB) {  // rsrc
3428         MIB.addReg(RSrcReg);
3429       },
3430       [=](MachineInstrBuilder &MIB) { // soffset
3431         if (SOffset)
3432           MIB.addReg(SOffset);
3433         else
3434           MIB.addImm(0);
3435       },
3436       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
3437       addZeroImm, //  glc
3438       addZeroImm, //  slc
3439       addZeroImm, //  tfe
3440       addZeroImm, //  dlc
3441       addZeroImm  //  swz
3442     }};
3443 }
3444 
3445 InstructionSelector::ComplexRendererFns
3446 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
3447   Register VAddr;
3448   Register RSrcReg;
3449   Register SOffset;
3450   int64_t Offset = 0;
3451 
3452   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
3453     return {};
3454 
3455   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
3456   // pattern.
3457   return {{
3458       [=](MachineInstrBuilder &MIB) {  // rsrc
3459         MIB.addReg(RSrcReg);
3460       },
3461       [=](MachineInstrBuilder &MIB) { // vaddr
3462         MIB.addReg(VAddr);
3463       },
3464       [=](MachineInstrBuilder &MIB) { // soffset
3465         if (SOffset)
3466           MIB.addReg(SOffset);
3467         else
3468           MIB.addImm(0);
3469       },
3470       [=](MachineInstrBuilder &MIB) { // offset
3471         MIB.addImm(Offset);
3472       },
3473       addZeroImm //  slc
3474     }};
3475 }
3476 
3477 InstructionSelector::ComplexRendererFns
3478 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
3479   Register RSrcReg;
3480   Register SOffset;
3481   int64_t Offset = 0;
3482 
3483   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
3484     return {};
3485 
3486   return {{
3487       [=](MachineInstrBuilder &MIB) {  // rsrc
3488         MIB.addReg(RSrcReg);
3489       },
3490       [=](MachineInstrBuilder &MIB) { // soffset
3491         if (SOffset)
3492           MIB.addReg(SOffset);
3493         else
3494           MIB.addImm(0);
3495       },
3496       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
3497       addZeroImm //  slc
3498     }};
3499 }
3500 
3501 /// Get an immediate that must be 32-bits, and treated as zero extended.
3502 static Optional<uint64_t> getConstantZext32Val(Register Reg,
3503                                                const MachineRegisterInfo &MRI) {
3504   // getConstantVRegVal sexts any values, so see if that matters.
3505   Optional<int64_t> OffsetVal = getConstantVRegVal(Reg, MRI);
3506   if (!OffsetVal || !isInt<32>(*OffsetVal))
3507     return None;
3508   return Lo_32(*OffsetVal);
3509 }
3510 
3511 InstructionSelector::ComplexRendererFns
3512 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
3513   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
3514   if (!OffsetVal)
3515     return {};
3516 
3517   Optional<int64_t> EncodedImm =
3518       AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
3519   if (!EncodedImm)
3520     return {};
3521 
3522   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
3523 }
3524 
3525 InstructionSelector::ComplexRendererFns
3526 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
3527   assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
3528 
3529   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
3530   if (!OffsetVal)
3531     return {};
3532 
3533   Optional<int64_t> EncodedImm
3534     = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
3535   if (!EncodedImm)
3536     return {};
3537 
3538   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
3539 }
3540 
3541 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
3542                                                  const MachineInstr &MI,
3543                                                  int OpIdx) const {
3544   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
3545          "Expected G_CONSTANT");
3546   MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
3547 }
3548 
3549 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
3550                                                 const MachineInstr &MI,
3551                                                 int OpIdx) const {
3552   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
3553          "Expected G_CONSTANT");
3554   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
3555 }
3556 
3557 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
3558                                                  const MachineInstr &MI,
3559                                                  int OpIdx) const {
3560   assert(OpIdx == -1);
3561 
3562   const MachineOperand &Op = MI.getOperand(1);
3563   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
3564     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
3565   else {
3566     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
3567     MIB.addImm(Op.getCImm()->getSExtValue());
3568   }
3569 }
3570 
3571 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
3572                                                 const MachineInstr &MI,
3573                                                 int OpIdx) const {
3574   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
3575          "Expected G_CONSTANT");
3576   MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
3577 }
3578 
3579 /// This only really exists to satisfy DAG type checking machinery, so is a
3580 /// no-op here.
3581 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
3582                                                 const MachineInstr &MI,
3583                                                 int OpIdx) const {
3584   MIB.addImm(MI.getOperand(OpIdx).getImm());
3585 }
3586 
3587 void AMDGPUInstructionSelector::renderExtractGLC(MachineInstrBuilder &MIB,
3588                                                  const MachineInstr &MI,
3589                                                  int OpIdx) const {
3590   assert(OpIdx >= 0 && "expected to match an immediate operand");
3591   MIB.addImm(MI.getOperand(OpIdx).getImm() & 1);
3592 }
3593 
3594 void AMDGPUInstructionSelector::renderExtractSLC(MachineInstrBuilder &MIB,
3595                                                  const MachineInstr &MI,
3596                                                  int OpIdx) const {
3597   assert(OpIdx >= 0 && "expected to match an immediate operand");
3598   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 1) & 1);
3599 }
3600 
3601 void AMDGPUInstructionSelector::renderExtractDLC(MachineInstrBuilder &MIB,
3602                                                  const MachineInstr &MI,
3603                                                  int OpIdx) const {
3604   assert(OpIdx >= 0 && "expected to match an immediate operand");
3605   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 2) & 1);
3606 }
3607 
3608 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
3609                                                  const MachineInstr &MI,
3610                                                  int OpIdx) const {
3611   assert(OpIdx >= 0 && "expected to match an immediate operand");
3612   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
3613 }
3614 
3615 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
3616   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
3617 }
3618 
3619 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
3620   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
3621 }
3622 
3623 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
3624   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
3625 }
3626 
3627 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
3628   return TII.isInlineConstant(Imm);
3629 }
3630