1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPU.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPURegisterBankInfo.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "Utils/AMDGPUBaseInfo.h"
22 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
24 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/IR/DiagnosticInfo.h"
28 #include "llvm/IR/IntrinsicsAMDGPU.h"
29 
30 #define DEBUG_TYPE "amdgpu-isel"
31 
32 using namespace llvm;
33 using namespace MIPatternMatch;
34 
35 static cl::opt<bool> AllowRiskySelect(
36   "amdgpu-global-isel-risky-select",
37   cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
38   cl::init(false),
39   cl::ReallyHidden);
40 
41 #define GET_GLOBALISEL_IMPL
42 #define AMDGPUSubtarget GCNSubtarget
43 #include "AMDGPUGenGlobalISel.inc"
44 #undef GET_GLOBALISEL_IMPL
45 #undef AMDGPUSubtarget
46 
47 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
48     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
49     const AMDGPUTargetMachine &TM)
50     : TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
51       STI(STI),
52       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
53 #define GET_GLOBALISEL_PREDICATES_INIT
54 #include "AMDGPUGenGlobalISel.inc"
55 #undef GET_GLOBALISEL_PREDICATES_INIT
56 #define GET_GLOBALISEL_TEMPORARIES_INIT
57 #include "AMDGPUGenGlobalISel.inc"
58 #undef GET_GLOBALISEL_TEMPORARIES_INIT
59 {
60 }
61 
62 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
63 
64 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
65                                         CodeGenCoverage &CoverageInfo,
66                                         ProfileSummaryInfo *PSI,
67                                         BlockFrequencyInfo *BFI) {
68   MRI = &MF.getRegInfo();
69   Subtarget = &MF.getSubtarget<GCNSubtarget>();
70   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
71 }
72 
73 bool AMDGPUInstructionSelector::isVCC(Register Reg,
74                                       const MachineRegisterInfo &MRI) const {
75   // The verifier is oblivious to s1 being a valid value for wavesize registers.
76   if (Reg.isPhysical())
77     return false;
78 
79   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
80   const TargetRegisterClass *RC =
81       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
82   if (RC) {
83     const LLT Ty = MRI.getType(Reg);
84     if (!Ty.isValid() || Ty.getSizeInBits() != 1)
85       return false;
86     // G_TRUNC s1 result is never vcc.
87     return MRI.getVRegDef(Reg)->getOpcode() != AMDGPU::G_TRUNC &&
88            RC->hasSuperClassEq(TRI.getBoolRC());
89   }
90 
91   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
92   return RB->getID() == AMDGPU::VCCRegBankID;
93 }
94 
95 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
96                                                         unsigned NewOpc) const {
97   MI.setDesc(TII.get(NewOpc));
98   MI.removeOperand(1); // Remove intrinsic ID.
99   MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
100 
101   MachineOperand &Dst = MI.getOperand(0);
102   MachineOperand &Src = MI.getOperand(1);
103 
104   // TODO: This should be legalized to s32 if needed
105   if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
106     return false;
107 
108   const TargetRegisterClass *DstRC
109     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
110   const TargetRegisterClass *SrcRC
111     = TRI.getConstrainedRegClassForOperand(Src, *MRI);
112   if (!DstRC || DstRC != SrcRC)
113     return false;
114 
115   return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
116          RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
117 }
118 
119 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
120   const DebugLoc &DL = I.getDebugLoc();
121   MachineBasicBlock *BB = I.getParent();
122   I.setDesc(TII.get(TargetOpcode::COPY));
123 
124   const MachineOperand &Src = I.getOperand(1);
125   MachineOperand &Dst = I.getOperand(0);
126   Register DstReg = Dst.getReg();
127   Register SrcReg = Src.getReg();
128 
129   if (isVCC(DstReg, *MRI)) {
130     if (SrcReg == AMDGPU::SCC) {
131       const TargetRegisterClass *RC
132         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
133       if (!RC)
134         return true;
135       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
136     }
137 
138     if (!isVCC(SrcReg, *MRI)) {
139       // TODO: Should probably leave the copy and let copyPhysReg expand it.
140       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
141         return false;
142 
143       const TargetRegisterClass *SrcRC
144         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
145 
146       Optional<ValueAndVReg> ConstVal =
147           getIConstantVRegValWithLookThrough(SrcReg, *MRI, true);
148       if (ConstVal) {
149         unsigned MovOpc =
150             STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
151         BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg)
152             .addImm(ConstVal->Value.getBoolValue() ? -1 : 0);
153       } else {
154         Register MaskedReg = MRI->createVirtualRegister(SrcRC);
155 
156         // We can't trust the high bits at this point, so clear them.
157 
158         // TODO: Skip masking high bits if def is known boolean.
159 
160         unsigned AndOpc =
161             TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
162         BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
163             .addImm(1)
164             .addReg(SrcReg);
165         BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
166             .addImm(0)
167             .addReg(MaskedReg);
168       }
169 
170       if (!MRI->getRegClassOrNull(SrcReg))
171         MRI->setRegClass(SrcReg, SrcRC);
172       I.eraseFromParent();
173       return true;
174     }
175 
176     const TargetRegisterClass *RC =
177       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
178     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
179       return false;
180 
181     return true;
182   }
183 
184   for (const MachineOperand &MO : I.operands()) {
185     if (MO.getReg().isPhysical())
186       continue;
187 
188     const TargetRegisterClass *RC =
189             TRI.getConstrainedRegClassForOperand(MO, *MRI);
190     if (!RC)
191       continue;
192     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
193   }
194   return true;
195 }
196 
197 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
198   const Register DefReg = I.getOperand(0).getReg();
199   const LLT DefTy = MRI->getType(DefReg);
200   if (DefTy == LLT::scalar(1)) {
201     if (!AllowRiskySelect) {
202       LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
203       return false;
204     }
205 
206     LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
207   }
208 
209   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
210 
211   const RegClassOrRegBank &RegClassOrBank =
212     MRI->getRegClassOrRegBank(DefReg);
213 
214   const TargetRegisterClass *DefRC
215     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
216   if (!DefRC) {
217     if (!DefTy.isValid()) {
218       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
219       return false;
220     }
221 
222     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
223     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB);
224     if (!DefRC) {
225       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
226       return false;
227     }
228   }
229 
230   // TODO: Verify that all registers have the same bank
231   I.setDesc(TII.get(TargetOpcode::PHI));
232   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
233 }
234 
235 MachineOperand
236 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
237                                            const TargetRegisterClass &SubRC,
238                                            unsigned SubIdx) const {
239 
240   MachineInstr *MI = MO.getParent();
241   MachineBasicBlock *BB = MO.getParent()->getParent();
242   Register DstReg = MRI->createVirtualRegister(&SubRC);
243 
244   if (MO.isReg()) {
245     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
246     Register Reg = MO.getReg();
247     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
248             .addReg(Reg, 0, ComposedSubIdx);
249 
250     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
251                                      MO.isKill(), MO.isDead(), MO.isUndef(),
252                                      MO.isEarlyClobber(), 0, MO.isDebug(),
253                                      MO.isInternalRead());
254   }
255 
256   assert(MO.isImm());
257 
258   APInt Imm(64, MO.getImm());
259 
260   switch (SubIdx) {
261   default:
262     llvm_unreachable("do not know to split immediate with this sub index.");
263   case AMDGPU::sub0:
264     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
265   case AMDGPU::sub1:
266     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
267   }
268 }
269 
270 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
271   switch (Opc) {
272   case AMDGPU::G_AND:
273     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
274   case AMDGPU::G_OR:
275     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
276   case AMDGPU::G_XOR:
277     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
278   default:
279     llvm_unreachable("not a bit op");
280   }
281 }
282 
283 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
284   Register DstReg = I.getOperand(0).getReg();
285   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
286 
287   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
288   if (DstRB->getID() != AMDGPU::SGPRRegBankID &&
289       DstRB->getID() != AMDGPU::VCCRegBankID)
290     return false;
291 
292   bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID &&
293                             STI.isWave64());
294   I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64)));
295 
296   // Dead implicit-def of scc
297   I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
298                                          true, // isImp
299                                          false, // isKill
300                                          true)); // isDead
301   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
302 }
303 
304 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
305   MachineBasicBlock *BB = I.getParent();
306   MachineFunction *MF = BB->getParent();
307   Register DstReg = I.getOperand(0).getReg();
308   const DebugLoc &DL = I.getDebugLoc();
309   LLT Ty = MRI->getType(DstReg);
310   if (Ty.isVector())
311     return false;
312 
313   unsigned Size = Ty.getSizeInBits();
314   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
315   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
316   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
317 
318   if (Size == 32) {
319     if (IsSALU) {
320       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
321       MachineInstr *Add =
322         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
323         .add(I.getOperand(1))
324         .add(I.getOperand(2));
325       I.eraseFromParent();
326       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
327     }
328 
329     if (STI.hasAddNoCarry()) {
330       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
331       I.setDesc(TII.get(Opc));
332       I.addOperand(*MF, MachineOperand::CreateImm(0));
333       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
334       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
335     }
336 
337     const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64;
338 
339     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
340     MachineInstr *Add
341       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
342       .addDef(UnusedCarry, RegState::Dead)
343       .add(I.getOperand(1))
344       .add(I.getOperand(2))
345       .addImm(0);
346     I.eraseFromParent();
347     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
348   }
349 
350   assert(!Sub && "illegal sub should not reach here");
351 
352   const TargetRegisterClass &RC
353     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
354   const TargetRegisterClass &HalfRC
355     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
356 
357   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
358   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
359   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
360   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
361 
362   Register DstLo = MRI->createVirtualRegister(&HalfRC);
363   Register DstHi = MRI->createVirtualRegister(&HalfRC);
364 
365   if (IsSALU) {
366     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
367       .add(Lo1)
368       .add(Lo2);
369     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
370       .add(Hi1)
371       .add(Hi2);
372   } else {
373     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
374     Register CarryReg = MRI->createVirtualRegister(CarryRC);
375     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo)
376       .addDef(CarryReg)
377       .add(Lo1)
378       .add(Lo2)
379       .addImm(0);
380     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
381       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
382       .add(Hi1)
383       .add(Hi2)
384       .addReg(CarryReg, RegState::Kill)
385       .addImm(0);
386 
387     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
388       return false;
389   }
390 
391   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
392     .addReg(DstLo)
393     .addImm(AMDGPU::sub0)
394     .addReg(DstHi)
395     .addImm(AMDGPU::sub1);
396 
397 
398   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
399     return false;
400 
401   I.eraseFromParent();
402   return true;
403 }
404 
405 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
406   MachineInstr &I) const {
407   MachineBasicBlock *BB = I.getParent();
408   MachineFunction *MF = BB->getParent();
409   const DebugLoc &DL = I.getDebugLoc();
410   Register Dst0Reg = I.getOperand(0).getReg();
411   Register Dst1Reg = I.getOperand(1).getReg();
412   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
413                      I.getOpcode() == AMDGPU::G_UADDE;
414   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
415                           I.getOpcode() == AMDGPU::G_USUBE;
416 
417   if (isVCC(Dst1Reg, *MRI)) {
418     unsigned NoCarryOpc =
419         IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
420     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
421     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
422     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
423     I.addOperand(*MF, MachineOperand::CreateImm(0));
424     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
425   }
426 
427   Register Src0Reg = I.getOperand(2).getReg();
428   Register Src1Reg = I.getOperand(3).getReg();
429 
430   if (HasCarryIn) {
431     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
432       .addReg(I.getOperand(4).getReg());
433   }
434 
435   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
436   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
437 
438   BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
439     .add(I.getOperand(2))
440     .add(I.getOperand(3));
441   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
442     .addReg(AMDGPU::SCC);
443 
444   if (!MRI->getRegClassOrNull(Dst1Reg))
445     MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
446 
447   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
448       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
449       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
450     return false;
451 
452   if (HasCarryIn &&
453       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
454                                     AMDGPU::SReg_32RegClass, *MRI))
455     return false;
456 
457   I.eraseFromParent();
458   return true;
459 }
460 
461 bool AMDGPUInstructionSelector::selectG_AMDGPU_MAD_64_32(
462     MachineInstr &I) const {
463   MachineBasicBlock *BB = I.getParent();
464   MachineFunction *MF = BB->getParent();
465   const bool IsUnsigned = I.getOpcode() == AMDGPU::G_AMDGPU_MAD_U64_U32;
466 
467   I.setDesc(TII.get(IsUnsigned ? AMDGPU::V_MAD_U64_U32_e64
468                                : AMDGPU::V_MAD_I64_I32_e64));
469   I.addOperand(*MF, MachineOperand::CreateImm(0));
470   I.addImplicitDefUseOperands(*MF);
471   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
472 }
473 
474 // TODO: We should probably legalize these to only using 32-bit results.
475 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
476   MachineBasicBlock *BB = I.getParent();
477   Register DstReg = I.getOperand(0).getReg();
478   Register SrcReg = I.getOperand(1).getReg();
479   LLT DstTy = MRI->getType(DstReg);
480   LLT SrcTy = MRI->getType(SrcReg);
481   const unsigned SrcSize = SrcTy.getSizeInBits();
482   unsigned DstSize = DstTy.getSizeInBits();
483 
484   // TODO: Should handle any multiple of 32 offset.
485   unsigned Offset = I.getOperand(2).getImm();
486   if (Offset % 32 != 0 || DstSize > 128)
487     return false;
488 
489   // 16-bit operations really use 32-bit registers.
490   // FIXME: Probably should not allow 16-bit G_EXTRACT results.
491   if (DstSize == 16)
492     DstSize = 32;
493 
494   const TargetRegisterClass *DstRC =
495     TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
496   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
497     return false;
498 
499   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
500   const TargetRegisterClass *SrcRC =
501       TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
502   if (!SrcRC)
503     return false;
504   unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
505                                                          DstSize / 32);
506   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
507   if (!SrcRC)
508     return false;
509 
510   SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
511                                     *SrcRC, I.getOperand(1));
512   const DebugLoc &DL = I.getDebugLoc();
513   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
514     .addReg(SrcReg, 0, SubReg);
515 
516   I.eraseFromParent();
517   return true;
518 }
519 
520 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
521   MachineBasicBlock *BB = MI.getParent();
522   Register DstReg = MI.getOperand(0).getReg();
523   LLT DstTy = MRI->getType(DstReg);
524   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
525 
526   const unsigned SrcSize = SrcTy.getSizeInBits();
527   if (SrcSize < 32)
528     return selectImpl(MI, *CoverageInfo);
529 
530   const DebugLoc &DL = MI.getDebugLoc();
531   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
532   const unsigned DstSize = DstTy.getSizeInBits();
533   const TargetRegisterClass *DstRC =
534       TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
535   if (!DstRC)
536     return false;
537 
538   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
539   MachineInstrBuilder MIB =
540     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
541   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
542     MachineOperand &Src = MI.getOperand(I + 1);
543     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
544     MIB.addImm(SubRegs[I]);
545 
546     const TargetRegisterClass *SrcRC
547       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
548     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
549       return false;
550   }
551 
552   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
553     return false;
554 
555   MI.eraseFromParent();
556   return true;
557 }
558 
559 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
560   MachineBasicBlock *BB = MI.getParent();
561   const int NumDst = MI.getNumOperands() - 1;
562 
563   MachineOperand &Src = MI.getOperand(NumDst);
564 
565   Register SrcReg = Src.getReg();
566   Register DstReg0 = MI.getOperand(0).getReg();
567   LLT DstTy = MRI->getType(DstReg0);
568   LLT SrcTy = MRI->getType(SrcReg);
569 
570   const unsigned DstSize = DstTy.getSizeInBits();
571   const unsigned SrcSize = SrcTy.getSizeInBits();
572   const DebugLoc &DL = MI.getDebugLoc();
573   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
574 
575   const TargetRegisterClass *SrcRC =
576       TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
577   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
578     return false;
579 
580   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
581   // source, and this relies on the fact that the same subregister indices are
582   // used for both.
583   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
584   for (int I = 0, E = NumDst; I != E; ++I) {
585     MachineOperand &Dst = MI.getOperand(I);
586     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
587       .addReg(SrcReg, 0, SubRegs[I]);
588 
589     // Make sure the subregister index is valid for the source register.
590     SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]);
591     if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
592       return false;
593 
594     const TargetRegisterClass *DstRC =
595       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
596     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
597       return false;
598   }
599 
600   MI.eraseFromParent();
601   return true;
602 }
603 
604 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
605   MachineInstr &MI) const {
606   if (selectImpl(MI, *CoverageInfo))
607     return true;
608 
609   const LLT S32 = LLT::scalar(32);
610   const LLT V2S16 = LLT::fixed_vector(2, 16);
611 
612   Register Dst = MI.getOperand(0).getReg();
613   if (MRI->getType(Dst) != V2S16)
614     return false;
615 
616   const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
617   if (DstBank->getID() != AMDGPU::SGPRRegBankID)
618     return false;
619 
620   Register Src0 = MI.getOperand(1).getReg();
621   Register Src1 = MI.getOperand(2).getReg();
622   if (MRI->getType(Src0) != S32)
623     return false;
624 
625   const DebugLoc &DL = MI.getDebugLoc();
626   MachineBasicBlock *BB = MI.getParent();
627 
628   auto ConstSrc1 = getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true);
629   if (ConstSrc1) {
630     auto ConstSrc0 =
631         getAnyConstantVRegValWithLookThrough(Src0, *MRI, true, true);
632     if (ConstSrc0) {
633       const int64_t K0 = ConstSrc0->Value.getSExtValue();
634       const int64_t K1 = ConstSrc1->Value.getSExtValue();
635       uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff;
636       uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff;
637 
638       BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst)
639         .addImm(Lo16 | (Hi16 << 16));
640       MI.eraseFromParent();
641       return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
642     }
643   }
644 
645   // TODO: This should probably be a combine somewhere
646   // (build_vector_trunc $src0, undef -> copy $src0
647   MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
648   if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
649     MI.setDesc(TII.get(AMDGPU::COPY));
650     MI.removeOperand(2);
651     return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) &&
652            RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI);
653   }
654 
655   Register ShiftSrc0;
656   Register ShiftSrc1;
657 
658   // With multiple uses of the shift, this will duplicate the shift and
659   // increase register pressure.
660   //
661   // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
662   //  => (S_PACK_HH_B32_B16 $src0, $src1)
663   // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16))
664   //  => (S_PACK_LH_B32_B16 $src0, $src1)
665   // (build_vector_trunc $src0, $src1)
666   //  => (S_PACK_LL_B32_B16 $src0, $src1)
667 
668   bool Shift0 = mi_match(
669       Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16))));
670 
671   bool Shift1 = mi_match(
672       Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16))));
673 
674   unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
675   if (Shift0 && Shift1) {
676     Opc = AMDGPU::S_PACK_HH_B32_B16;
677     MI.getOperand(1).setReg(ShiftSrc0);
678     MI.getOperand(2).setReg(ShiftSrc1);
679   } else if (Shift1) {
680     Opc = AMDGPU::S_PACK_LH_B32_B16;
681     MI.getOperand(2).setReg(ShiftSrc1);
682   } else if (Shift0 && ConstSrc1 && ConstSrc1->Value == 0) {
683     // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
684     auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
685       .addReg(ShiftSrc0)
686       .addImm(16);
687 
688     MI.eraseFromParent();
689     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
690   }
691 
692   MI.setDesc(TII.get(Opc));
693   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
694 }
695 
696 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
697   return selectG_ADD_SUB(I);
698 }
699 
700 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
701   const MachineOperand &MO = I.getOperand(0);
702 
703   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
704   // regbank check here is to know why getConstrainedRegClassForOperand failed.
705   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
706   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
707       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
708     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
709     return true;
710   }
711 
712   return false;
713 }
714 
715 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
716   MachineBasicBlock *BB = I.getParent();
717 
718   Register DstReg = I.getOperand(0).getReg();
719   Register Src0Reg = I.getOperand(1).getReg();
720   Register Src1Reg = I.getOperand(2).getReg();
721   LLT Src1Ty = MRI->getType(Src1Reg);
722 
723   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
724   unsigned InsSize = Src1Ty.getSizeInBits();
725 
726   int64_t Offset = I.getOperand(3).getImm();
727 
728   // FIXME: These cases should have been illegal and unnecessary to check here.
729   if (Offset % 32 != 0 || InsSize % 32 != 0)
730     return false;
731 
732   // Currently not handled by getSubRegFromChannel.
733   if (InsSize > 128)
734     return false;
735 
736   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
737   if (SubReg == AMDGPU::NoSubRegister)
738     return false;
739 
740   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
741   const TargetRegisterClass *DstRC =
742       TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
743   if (!DstRC)
744     return false;
745 
746   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
747   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
748   const TargetRegisterClass *Src0RC =
749       TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank);
750   const TargetRegisterClass *Src1RC =
751       TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank);
752 
753   // Deal with weird cases where the class only partially supports the subreg
754   // index.
755   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
756   if (!Src0RC || !Src1RC)
757     return false;
758 
759   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
760       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
761       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
762     return false;
763 
764   const DebugLoc &DL = I.getDebugLoc();
765   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
766     .addReg(Src0Reg)
767     .addReg(Src1Reg)
768     .addImm(SubReg);
769 
770   I.eraseFromParent();
771   return true;
772 }
773 
774 bool AMDGPUInstructionSelector::selectG_SBFX_UBFX(MachineInstr &MI) const {
775   Register DstReg = MI.getOperand(0).getReg();
776   Register SrcReg = MI.getOperand(1).getReg();
777   Register OffsetReg = MI.getOperand(2).getReg();
778   Register WidthReg = MI.getOperand(3).getReg();
779 
780   assert(RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID &&
781          "scalar BFX instructions are expanded in regbankselect");
782   assert(MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 &&
783          "64-bit vector BFX instructions are expanded in regbankselect");
784 
785   const DebugLoc &DL = MI.getDebugLoc();
786   MachineBasicBlock *MBB = MI.getParent();
787 
788   bool IsSigned = MI.getOpcode() == TargetOpcode::G_SBFX;
789   unsigned Opc = IsSigned ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
790   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), DstReg)
791                  .addReg(SrcReg)
792                  .addReg(OffsetReg)
793                  .addReg(WidthReg);
794   MI.eraseFromParent();
795   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
796 }
797 
798 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
799   if (STI.getLDSBankCount() != 16)
800     return selectImpl(MI, *CoverageInfo);
801 
802   Register Dst = MI.getOperand(0).getReg();
803   Register Src0 = MI.getOperand(2).getReg();
804   Register M0Val = MI.getOperand(6).getReg();
805   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
806       !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
807       !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
808     return false;
809 
810   // This requires 2 instructions. It is possible to write a pattern to support
811   // this, but the generated isel emitter doesn't correctly deal with multiple
812   // output instructions using the same physical register input. The copy to m0
813   // is incorrectly placed before the second instruction.
814   //
815   // TODO: Match source modifiers.
816 
817   Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
818   const DebugLoc &DL = MI.getDebugLoc();
819   MachineBasicBlock *MBB = MI.getParent();
820 
821   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
822     .addReg(M0Val);
823   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
824     .addImm(2)
825     .addImm(MI.getOperand(4).getImm())  // $attr
826     .addImm(MI.getOperand(3).getImm()); // $attrchan
827 
828   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
829     .addImm(0)                          // $src0_modifiers
830     .addReg(Src0)                       // $src0
831     .addImm(MI.getOperand(4).getImm())  // $attr
832     .addImm(MI.getOperand(3).getImm())  // $attrchan
833     .addImm(0)                          // $src2_modifiers
834     .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
835     .addImm(MI.getOperand(5).getImm())  // $high
836     .addImm(0)                          // $clamp
837     .addImm(0);                         // $omod
838 
839   MI.eraseFromParent();
840   return true;
841 }
842 
843 // Writelane is special in that it can use SGPR and M0 (which would normally
844 // count as using the constant bus twice - but in this case it is allowed since
845 // the lane selector doesn't count as a use of the constant bus). However, it is
846 // still required to abide by the 1 SGPR rule. Fix this up if we might have
847 // multiple SGPRs.
848 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
849   // With a constant bus limit of at least 2, there's no issue.
850   if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1)
851     return selectImpl(MI, *CoverageInfo);
852 
853   MachineBasicBlock *MBB = MI.getParent();
854   const DebugLoc &DL = MI.getDebugLoc();
855   Register VDst = MI.getOperand(0).getReg();
856   Register Val = MI.getOperand(2).getReg();
857   Register LaneSelect = MI.getOperand(3).getReg();
858   Register VDstIn = MI.getOperand(4).getReg();
859 
860   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
861 
862   Optional<ValueAndVReg> ConstSelect =
863       getIConstantVRegValWithLookThrough(LaneSelect, *MRI);
864   if (ConstSelect) {
865     // The selector has to be an inline immediate, so we can use whatever for
866     // the other operands.
867     MIB.addReg(Val);
868     MIB.addImm(ConstSelect->Value.getSExtValue() &
869                maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
870   } else {
871     Optional<ValueAndVReg> ConstVal =
872         getIConstantVRegValWithLookThrough(Val, *MRI);
873 
874     // If the value written is an inline immediate, we can get away without a
875     // copy to m0.
876     if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(),
877                                                  STI.hasInv2PiInlineImm())) {
878       MIB.addImm(ConstVal->Value.getSExtValue());
879       MIB.addReg(LaneSelect);
880     } else {
881       MIB.addReg(Val);
882 
883       // If the lane selector was originally in a VGPR and copied with
884       // readfirstlane, there's a hazard to read the same SGPR from the
885       // VALU. Constrain to a different SGPR to help avoid needing a nop later.
886       RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI);
887 
888       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
889         .addReg(LaneSelect);
890       MIB.addReg(AMDGPU::M0);
891     }
892   }
893 
894   MIB.addReg(VDstIn);
895 
896   MI.eraseFromParent();
897   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
898 }
899 
900 // We need to handle this here because tablegen doesn't support matching
901 // instructions with multiple outputs.
902 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
903   Register Dst0 = MI.getOperand(0).getReg();
904   Register Dst1 = MI.getOperand(1).getReg();
905 
906   LLT Ty = MRI->getType(Dst0);
907   unsigned Opc;
908   if (Ty == LLT::scalar(32))
909     Opc = AMDGPU::V_DIV_SCALE_F32_e64;
910   else if (Ty == LLT::scalar(64))
911     Opc = AMDGPU::V_DIV_SCALE_F64_e64;
912   else
913     return false;
914 
915   // TODO: Match source modifiers.
916 
917   const DebugLoc &DL = MI.getDebugLoc();
918   MachineBasicBlock *MBB = MI.getParent();
919 
920   Register Numer = MI.getOperand(3).getReg();
921   Register Denom = MI.getOperand(4).getReg();
922   unsigned ChooseDenom = MI.getOperand(5).getImm();
923 
924   Register Src0 = ChooseDenom != 0 ? Numer : Denom;
925 
926   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
927     .addDef(Dst1)
928     .addImm(0)     // $src0_modifiers
929     .addUse(Src0)  // $src0
930     .addImm(0)     // $src1_modifiers
931     .addUse(Denom) // $src1
932     .addImm(0)     // $src2_modifiers
933     .addUse(Numer) // $src2
934     .addImm(0)     // $clamp
935     .addImm(0);    // $omod
936 
937   MI.eraseFromParent();
938   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
939 }
940 
941 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
942   unsigned IntrinsicID = I.getIntrinsicID();
943   switch (IntrinsicID) {
944   case Intrinsic::amdgcn_if_break: {
945     MachineBasicBlock *BB = I.getParent();
946 
947     // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
948     // SelectionDAG uses for wave32 vs wave64.
949     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
950       .add(I.getOperand(0))
951       .add(I.getOperand(2))
952       .add(I.getOperand(3));
953 
954     Register DstReg = I.getOperand(0).getReg();
955     Register Src0Reg = I.getOperand(2).getReg();
956     Register Src1Reg = I.getOperand(3).getReg();
957 
958     I.eraseFromParent();
959 
960     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
961       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
962 
963     return true;
964   }
965   case Intrinsic::amdgcn_interp_p1_f16:
966     return selectInterpP1F16(I);
967   case Intrinsic::amdgcn_wqm:
968     return constrainCopyLikeIntrin(I, AMDGPU::WQM);
969   case Intrinsic::amdgcn_softwqm:
970     return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
971   case Intrinsic::amdgcn_strict_wwm:
972   case Intrinsic::amdgcn_wwm:
973     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WWM);
974   case Intrinsic::amdgcn_strict_wqm:
975     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WQM);
976   case Intrinsic::amdgcn_writelane:
977     return selectWritelane(I);
978   case Intrinsic::amdgcn_div_scale:
979     return selectDivScale(I);
980   case Intrinsic::amdgcn_icmp:
981     return selectIntrinsicIcmp(I);
982   case Intrinsic::amdgcn_ballot:
983     return selectBallot(I);
984   case Intrinsic::amdgcn_reloc_constant:
985     return selectRelocConstant(I);
986   case Intrinsic::amdgcn_groupstaticsize:
987     return selectGroupStaticSize(I);
988   case Intrinsic::returnaddress:
989     return selectReturnAddress(I);
990   case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
991   case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
992   case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
993   case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
994   case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
995   case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
996     return selectSMFMACIntrin(I);
997   default:
998     return selectImpl(I, *CoverageInfo);
999   }
1000 }
1001 
1002 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
1003   if (Size != 32 && Size != 64)
1004     return -1;
1005   switch (P) {
1006   default:
1007     llvm_unreachable("Unknown condition code!");
1008   case CmpInst::ICMP_NE:
1009     return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
1010   case CmpInst::ICMP_EQ:
1011     return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
1012   case CmpInst::ICMP_SGT:
1013     return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
1014   case CmpInst::ICMP_SGE:
1015     return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
1016   case CmpInst::ICMP_SLT:
1017     return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
1018   case CmpInst::ICMP_SLE:
1019     return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
1020   case CmpInst::ICMP_UGT:
1021     return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
1022   case CmpInst::ICMP_UGE:
1023     return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
1024   case CmpInst::ICMP_ULT:
1025     return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
1026   case CmpInst::ICMP_ULE:
1027     return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
1028   }
1029 }
1030 
1031 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
1032                                               unsigned Size) const {
1033   if (Size == 64) {
1034     if (!STI.hasScalarCompareEq64())
1035       return -1;
1036 
1037     switch (P) {
1038     case CmpInst::ICMP_NE:
1039       return AMDGPU::S_CMP_LG_U64;
1040     case CmpInst::ICMP_EQ:
1041       return AMDGPU::S_CMP_EQ_U64;
1042     default:
1043       return -1;
1044     }
1045   }
1046 
1047   if (Size != 32)
1048     return -1;
1049 
1050   switch (P) {
1051   case CmpInst::ICMP_NE:
1052     return AMDGPU::S_CMP_LG_U32;
1053   case CmpInst::ICMP_EQ:
1054     return AMDGPU::S_CMP_EQ_U32;
1055   case CmpInst::ICMP_SGT:
1056     return AMDGPU::S_CMP_GT_I32;
1057   case CmpInst::ICMP_SGE:
1058     return AMDGPU::S_CMP_GE_I32;
1059   case CmpInst::ICMP_SLT:
1060     return AMDGPU::S_CMP_LT_I32;
1061   case CmpInst::ICMP_SLE:
1062     return AMDGPU::S_CMP_LE_I32;
1063   case CmpInst::ICMP_UGT:
1064     return AMDGPU::S_CMP_GT_U32;
1065   case CmpInst::ICMP_UGE:
1066     return AMDGPU::S_CMP_GE_U32;
1067   case CmpInst::ICMP_ULT:
1068     return AMDGPU::S_CMP_LT_U32;
1069   case CmpInst::ICMP_ULE:
1070     return AMDGPU::S_CMP_LE_U32;
1071   default:
1072     llvm_unreachable("Unknown condition code!");
1073   }
1074 }
1075 
1076 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
1077   MachineBasicBlock *BB = I.getParent();
1078   const DebugLoc &DL = I.getDebugLoc();
1079 
1080   Register SrcReg = I.getOperand(2).getReg();
1081   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1082 
1083   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
1084 
1085   Register CCReg = I.getOperand(0).getReg();
1086   if (!isVCC(CCReg, *MRI)) {
1087     int Opcode = getS_CMPOpcode(Pred, Size);
1088     if (Opcode == -1)
1089       return false;
1090     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
1091             .add(I.getOperand(2))
1092             .add(I.getOperand(3));
1093     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
1094       .addReg(AMDGPU::SCC);
1095     bool Ret =
1096         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
1097         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
1098     I.eraseFromParent();
1099     return Ret;
1100   }
1101 
1102   int Opcode = getV_CMPOpcode(Pred, Size);
1103   if (Opcode == -1)
1104     return false;
1105 
1106   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1107             I.getOperand(0).getReg())
1108             .add(I.getOperand(2))
1109             .add(I.getOperand(3));
1110   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1111                                *TRI.getBoolRC(), *MRI);
1112   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1113   I.eraseFromParent();
1114   return Ret;
1115 }
1116 
1117 bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const {
1118   Register Dst = I.getOperand(0).getReg();
1119   if (isVCC(Dst, *MRI))
1120     return false;
1121 
1122   if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize())
1123     return false;
1124 
1125   MachineBasicBlock *BB = I.getParent();
1126   const DebugLoc &DL = I.getDebugLoc();
1127   Register SrcReg = I.getOperand(2).getReg();
1128   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1129 
1130   auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
1131   if (!ICmpInst::isIntPredicate(static_cast<ICmpInst::Predicate>(Pred))) {
1132     MachineInstr *ICmp =
1133         BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Dst);
1134 
1135     if (!RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1136                                       *TRI.getBoolRC(), *MRI))
1137       return false;
1138     I.eraseFromParent();
1139     return true;
1140   }
1141 
1142   int Opcode = getV_CMPOpcode(Pred, Size);
1143   if (Opcode == -1)
1144     return false;
1145 
1146   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst)
1147                            .add(I.getOperand(2))
1148                            .add(I.getOperand(3));
1149   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(),
1150                                *MRI);
1151   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1152   I.eraseFromParent();
1153   return Ret;
1154 }
1155 
1156 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
1157   MachineBasicBlock *BB = I.getParent();
1158   const DebugLoc &DL = I.getDebugLoc();
1159   Register DstReg = I.getOperand(0).getReg();
1160   const unsigned Size = MRI->getType(DstReg).getSizeInBits();
1161   const bool Is64 = Size == 64;
1162 
1163   if (Size != STI.getWavefrontSize())
1164     return false;
1165 
1166   Optional<ValueAndVReg> Arg =
1167       getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI);
1168 
1169   if (Arg.hasValue()) {
1170     const int64_t Value = Arg.getValue().Value.getSExtValue();
1171     if (Value == 0) {
1172       unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
1173       BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
1174     } else if (Value == -1) { // all ones
1175       Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
1176       BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1177     } else
1178       return false;
1179   } else {
1180     Register SrcReg = I.getOperand(2).getReg();
1181     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1182   }
1183 
1184   I.eraseFromParent();
1185   return true;
1186 }
1187 
1188 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {
1189   Register DstReg = I.getOperand(0).getReg();
1190   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1191   const TargetRegisterClass *DstRC = TRI.getRegClassForSizeOnBank(32, *DstBank);
1192   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1193     return false;
1194 
1195   const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID;
1196 
1197   Module *M = MF->getFunction().getParent();
1198   const MDNode *Metadata = I.getOperand(2).getMetadata();
1199   auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
1200   auto RelocSymbol = cast<GlobalVariable>(
1201     M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
1202 
1203   MachineBasicBlock *BB = I.getParent();
1204   BuildMI(*BB, &I, I.getDebugLoc(),
1205           TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg)
1206     .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO);
1207 
1208   I.eraseFromParent();
1209   return true;
1210 }
1211 
1212 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
1213   Triple::OSType OS = MF->getTarget().getTargetTriple().getOS();
1214 
1215   Register DstReg = I.getOperand(0).getReg();
1216   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1217   unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ?
1218     AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1219 
1220   MachineBasicBlock *MBB = I.getParent();
1221   const DebugLoc &DL = I.getDebugLoc();
1222 
1223   auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg);
1224 
1225   if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) {
1226     const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1227     MIB.addImm(MFI->getLDSSize());
1228   } else {
1229     Module *M = MF->getFunction().getParent();
1230     const GlobalValue *GV
1231       = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
1232     MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO);
1233   }
1234 
1235   I.eraseFromParent();
1236   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1237 }
1238 
1239 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
1240   MachineBasicBlock *MBB = I.getParent();
1241   MachineFunction &MF = *MBB->getParent();
1242   const DebugLoc &DL = I.getDebugLoc();
1243 
1244   MachineOperand &Dst = I.getOperand(0);
1245   Register DstReg = Dst.getReg();
1246   unsigned Depth = I.getOperand(2).getImm();
1247 
1248   const TargetRegisterClass *RC
1249     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1250   if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) ||
1251       !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
1252     return false;
1253 
1254   // Check for kernel and shader functions
1255   if (Depth != 0 ||
1256       MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) {
1257     BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1258       .addImm(0);
1259     I.eraseFromParent();
1260     return true;
1261   }
1262 
1263   MachineFrameInfo &MFI = MF.getFrameInfo();
1264   // There is a call to @llvm.returnaddress in this function
1265   MFI.setReturnAddressIsTaken(true);
1266 
1267   // Get the return address reg and mark it as an implicit live-in
1268   Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
1269   Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
1270                                              AMDGPU::SReg_64RegClass, DL);
1271   BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1272     .addReg(LiveIn);
1273   I.eraseFromParent();
1274   return true;
1275 }
1276 
1277 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1278   // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
1279   // SelectionDAG uses for wave32 vs wave64.
1280   MachineBasicBlock *BB = MI.getParent();
1281   BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1282       .add(MI.getOperand(1));
1283 
1284   Register Reg = MI.getOperand(1).getReg();
1285   MI.eraseFromParent();
1286 
1287   if (!MRI->getRegClassOrNull(Reg))
1288     MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1289   return true;
1290 }
1291 
1292 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1293   MachineInstr &MI, Intrinsic::ID IntrID) const {
1294   MachineBasicBlock *MBB = MI.getParent();
1295   MachineFunction *MF = MBB->getParent();
1296   const DebugLoc &DL = MI.getDebugLoc();
1297 
1298   unsigned IndexOperand = MI.getOperand(7).getImm();
1299   bool WaveRelease = MI.getOperand(8).getImm() != 0;
1300   bool WaveDone = MI.getOperand(9).getImm() != 0;
1301 
1302   if (WaveDone && !WaveRelease)
1303     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1304 
1305   unsigned OrderedCountIndex = IndexOperand & 0x3f;
1306   IndexOperand &= ~0x3f;
1307   unsigned CountDw = 0;
1308 
1309   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1310     CountDw = (IndexOperand >> 24) & 0xf;
1311     IndexOperand &= ~(0xf << 24);
1312 
1313     if (CountDw < 1 || CountDw > 4) {
1314       report_fatal_error(
1315         "ds_ordered_count: dword count must be between 1 and 4");
1316     }
1317   }
1318 
1319   if (IndexOperand)
1320     report_fatal_error("ds_ordered_count: bad index operand");
1321 
1322   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1323   unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF);
1324 
1325   unsigned Offset0 = OrderedCountIndex << 2;
1326   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
1327                      (Instruction << 4);
1328 
1329   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1330     Offset1 |= (CountDw - 1) << 6;
1331 
1332   unsigned Offset = Offset0 | (Offset1 << 8);
1333 
1334   Register M0Val = MI.getOperand(2).getReg();
1335   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1336     .addReg(M0Val);
1337 
1338   Register DstReg = MI.getOperand(0).getReg();
1339   Register ValReg = MI.getOperand(3).getReg();
1340   MachineInstrBuilder DS =
1341     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1342       .addReg(ValReg)
1343       .addImm(Offset)
1344       .cloneMemRefs(MI);
1345 
1346   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1347     return false;
1348 
1349   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1350   MI.eraseFromParent();
1351   return Ret;
1352 }
1353 
1354 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1355   switch (IntrID) {
1356   case Intrinsic::amdgcn_ds_gws_init:
1357     return AMDGPU::DS_GWS_INIT;
1358   case Intrinsic::amdgcn_ds_gws_barrier:
1359     return AMDGPU::DS_GWS_BARRIER;
1360   case Intrinsic::amdgcn_ds_gws_sema_v:
1361     return AMDGPU::DS_GWS_SEMA_V;
1362   case Intrinsic::amdgcn_ds_gws_sema_br:
1363     return AMDGPU::DS_GWS_SEMA_BR;
1364   case Intrinsic::amdgcn_ds_gws_sema_p:
1365     return AMDGPU::DS_GWS_SEMA_P;
1366   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1367     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1368   default:
1369     llvm_unreachable("not a gws intrinsic");
1370   }
1371 }
1372 
1373 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1374                                                      Intrinsic::ID IID) const {
1375   if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1376       !STI.hasGWSSemaReleaseAll())
1377     return false;
1378 
1379   // intrinsic ID, vsrc, offset
1380   const bool HasVSrc = MI.getNumOperands() == 3;
1381   assert(HasVSrc || MI.getNumOperands() == 2);
1382 
1383   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1384   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1385   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1386     return false;
1387 
1388   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1389   assert(OffsetDef);
1390 
1391   unsigned ImmOffset;
1392 
1393   MachineBasicBlock *MBB = MI.getParent();
1394   const DebugLoc &DL = MI.getDebugLoc();
1395 
1396   MachineInstr *Readfirstlane = nullptr;
1397 
1398   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1399   // incoming offset, in case there's an add of a constant. We'll have to put it
1400   // back later.
1401   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1402     Readfirstlane = OffsetDef;
1403     BaseOffset = OffsetDef->getOperand(1).getReg();
1404     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1405   }
1406 
1407   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1408     // If we have a constant offset, try to use the 0 in m0 as the base.
1409     // TODO: Look into changing the default m0 initialization value. If the
1410     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1411     // the immediate offset.
1412 
1413     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1414     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1415       .addImm(0);
1416   } else {
1417     std::tie(BaseOffset, ImmOffset) =
1418         AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1419 
1420     if (Readfirstlane) {
1421       // We have the constant offset now, so put the readfirstlane back on the
1422       // variable component.
1423       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1424         return false;
1425 
1426       Readfirstlane->getOperand(1).setReg(BaseOffset);
1427       BaseOffset = Readfirstlane->getOperand(0).getReg();
1428     } else {
1429       if (!RBI.constrainGenericRegister(BaseOffset,
1430                                         AMDGPU::SReg_32RegClass, *MRI))
1431         return false;
1432     }
1433 
1434     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1435     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1436       .addReg(BaseOffset)
1437       .addImm(16);
1438 
1439     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1440       .addReg(M0Base);
1441   }
1442 
1443   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1444   // offset field) % 64. Some versions of the programming guide omit the m0
1445   // part, or claim it's from offset 0.
1446   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1447 
1448   if (HasVSrc) {
1449     Register VSrc = MI.getOperand(1).getReg();
1450     MIB.addReg(VSrc);
1451 
1452     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1453       return false;
1454   }
1455 
1456   MIB.addImm(ImmOffset)
1457      .cloneMemRefs(MI);
1458 
1459   TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::data0);
1460 
1461   MI.eraseFromParent();
1462   return true;
1463 }
1464 
1465 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1466                                                       bool IsAppend) const {
1467   Register PtrBase = MI.getOperand(2).getReg();
1468   LLT PtrTy = MRI->getType(PtrBase);
1469   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1470 
1471   unsigned Offset;
1472   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1473 
1474   // TODO: Should this try to look through readfirstlane like GWS?
1475   if (!isDSOffsetLegal(PtrBase, Offset)) {
1476     PtrBase = MI.getOperand(2).getReg();
1477     Offset = 0;
1478   }
1479 
1480   MachineBasicBlock *MBB = MI.getParent();
1481   const DebugLoc &DL = MI.getDebugLoc();
1482   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1483 
1484   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1485     .addReg(PtrBase);
1486   if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI))
1487     return false;
1488 
1489   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1490     .addImm(Offset)
1491     .addImm(IsGDS ? -1 : 0)
1492     .cloneMemRefs(MI);
1493   MI.eraseFromParent();
1494   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1495 }
1496 
1497 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {
1498   if (TM.getOptLevel() > CodeGenOpt::None) {
1499     unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second;
1500     if (WGSize <= STI.getWavefrontSize()) {
1501       MachineBasicBlock *MBB = MI.getParent();
1502       const DebugLoc &DL = MI.getDebugLoc();
1503       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER));
1504       MI.eraseFromParent();
1505       return true;
1506     }
1507   }
1508   return selectImpl(MI, *CoverageInfo);
1509 }
1510 
1511 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1512                          bool &IsTexFail) {
1513   if (TexFailCtrl)
1514     IsTexFail = true;
1515 
1516   TFE = (TexFailCtrl & 0x1) ? true : false;
1517   TexFailCtrl &= ~(uint64_t)0x1;
1518   LWE = (TexFailCtrl & 0x2) ? true : false;
1519   TexFailCtrl &= ~(uint64_t)0x2;
1520 
1521   return TexFailCtrl == 0;
1522 }
1523 
1524 bool AMDGPUInstructionSelector::selectImageIntrinsic(
1525   MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1526   MachineBasicBlock *MBB = MI.getParent();
1527   const DebugLoc &DL = MI.getDebugLoc();
1528 
1529   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1530     AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1531 
1532   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1533   unsigned IntrOpcode = Intr->BaseOpcode;
1534   const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI);
1535 
1536   const unsigned ArgOffset = MI.getNumExplicitDefs() + 1;
1537 
1538   Register VDataIn, VDataOut;
1539   LLT VDataTy;
1540   int NumVDataDwords = -1;
1541   bool IsD16 = MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16 ||
1542                MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16;
1543 
1544   bool Unorm;
1545   if (!BaseOpcode->Sampler)
1546     Unorm = true;
1547   else
1548     Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0;
1549 
1550   bool TFE;
1551   bool LWE;
1552   bool IsTexFail = false;
1553   if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(),
1554                     TFE, LWE, IsTexFail))
1555     return false;
1556 
1557   const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm();
1558   const bool IsA16 = (Flags & 1) != 0;
1559   const bool IsG16 = (Flags & 2) != 0;
1560 
1561   // A16 implies 16 bit gradients if subtarget doesn't support G16
1562   if (IsA16 && !STI.hasG16() && !IsG16)
1563     return false;
1564 
1565   unsigned DMask = 0;
1566   unsigned DMaskLanes = 0;
1567 
1568   if (BaseOpcode->Atomic) {
1569     VDataOut = MI.getOperand(0).getReg();
1570     VDataIn = MI.getOperand(2).getReg();
1571     LLT Ty = MRI->getType(VDataIn);
1572 
1573     // Be careful to allow atomic swap on 16-bit element vectors.
1574     const bool Is64Bit = BaseOpcode->AtomicX2 ?
1575       Ty.getSizeInBits() == 128 :
1576       Ty.getSizeInBits() == 64;
1577 
1578     if (BaseOpcode->AtomicX2) {
1579       assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
1580 
1581       DMask = Is64Bit ? 0xf : 0x3;
1582       NumVDataDwords = Is64Bit ? 4 : 2;
1583     } else {
1584       DMask = Is64Bit ? 0x3 : 0x1;
1585       NumVDataDwords = Is64Bit ? 2 : 1;
1586     }
1587   } else {
1588     DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm();
1589     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
1590 
1591     if (BaseOpcode->Store) {
1592       VDataIn = MI.getOperand(1).getReg();
1593       VDataTy = MRI->getType(VDataIn);
1594       NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1595     } else {
1596       VDataOut = MI.getOperand(0).getReg();
1597       VDataTy = MRI->getType(VDataOut);
1598       NumVDataDwords = DMaskLanes;
1599 
1600       if (IsD16 && !STI.hasUnpackedD16VMem())
1601         NumVDataDwords = (DMaskLanes + 1) / 2;
1602     }
1603   }
1604 
1605   // Set G16 opcode
1606   if (IsG16 && !IsA16) {
1607     const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
1608         AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
1609     assert(G16MappingInfo);
1610     IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
1611   }
1612 
1613   // TODO: Check this in verifier.
1614   assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
1615 
1616   unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm();
1617   if (BaseOpcode->Atomic)
1618     CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization
1619   if (CPol & ~AMDGPU::CPol::ALL)
1620     return false;
1621 
1622   int NumVAddrRegs = 0;
1623   int NumVAddrDwords = 0;
1624   for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) {
1625     // Skip the $noregs and 0s inserted during legalization.
1626     MachineOperand &AddrOp = MI.getOperand(ArgOffset + I);
1627     if (!AddrOp.isReg())
1628       continue; // XXX - Break?
1629 
1630     Register Addr = AddrOp.getReg();
1631     if (!Addr)
1632       break;
1633 
1634     ++NumVAddrRegs;
1635     NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1636   }
1637 
1638   // The legalizer preprocessed the intrinsic arguments. If we aren't using
1639   // NSA, these should have been packed into a single value in the first
1640   // address register
1641   const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
1642   if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1643     LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
1644     return false;
1645   }
1646 
1647   if (IsTexFail)
1648     ++NumVDataDwords;
1649 
1650   int Opcode = -1;
1651   if (IsGFX10Plus) {
1652     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1653                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
1654                                           : AMDGPU::MIMGEncGfx10Default,
1655                                    NumVDataDwords, NumVAddrDwords);
1656   } else {
1657     if (Subtarget->hasGFX90AInsts()) {
1658       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx90a,
1659                                      NumVDataDwords, NumVAddrDwords);
1660       if (Opcode == -1) {
1661         LLVM_DEBUG(
1662             dbgs()
1663             << "requested image instruction is not supported on this GPU\n");
1664         return false;
1665       }
1666     }
1667     if (Opcode == -1 &&
1668         STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1669       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1670                                      NumVDataDwords, NumVAddrDwords);
1671     if (Opcode == -1)
1672       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1673                                      NumVDataDwords, NumVAddrDwords);
1674   }
1675   assert(Opcode != -1);
1676 
1677   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1678     .cloneMemRefs(MI);
1679 
1680   if (VDataOut) {
1681     if (BaseOpcode->AtomicX2) {
1682       const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1683 
1684       Register TmpReg = MRI->createVirtualRegister(
1685         Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1686       unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1687 
1688       MIB.addDef(TmpReg);
1689       if (!MRI->use_empty(VDataOut)) {
1690         BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1691             .addReg(TmpReg, RegState::Kill, SubReg);
1692       }
1693 
1694     } else {
1695       MIB.addDef(VDataOut); // vdata output
1696     }
1697   }
1698 
1699   if (VDataIn)
1700     MIB.addReg(VDataIn); // vdata input
1701 
1702   for (int I = 0; I != NumVAddrRegs; ++I) {
1703     MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I);
1704     if (SrcOp.isReg()) {
1705       assert(SrcOp.getReg() != 0);
1706       MIB.addReg(SrcOp.getReg());
1707     }
1708   }
1709 
1710   MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg());
1711   if (BaseOpcode->Sampler)
1712     MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg());
1713 
1714   MIB.addImm(DMask); // dmask
1715 
1716   if (IsGFX10Plus)
1717     MIB.addImm(DimInfo->Encoding);
1718   MIB.addImm(Unorm);
1719 
1720   MIB.addImm(CPol);
1721   MIB.addImm(IsA16 &&  // a16 or r128
1722              STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1723   if (IsGFX10Plus)
1724     MIB.addImm(IsA16 ? -1 : 0);
1725 
1726   if (!Subtarget->hasGFX90AInsts())
1727     MIB.addImm(TFE); // tfe
1728   else if (TFE)
1729     report_fatal_error("TFE is not supported on this GPU");
1730 
1731   MIB.addImm(LWE); // lwe
1732   if (!IsGFX10Plus)
1733     MIB.addImm(DimInfo->DA ? -1 : 0);
1734   if (BaseOpcode->HasD16)
1735     MIB.addImm(IsD16 ? -1 : 0);
1736 
1737   if (IsTexFail) {
1738     // An image load instruction with TFE/LWE only conditionally writes to its
1739     // result registers. Initialize them to zero so that we always get well
1740     // defined result values.
1741     assert(VDataOut && !VDataIn);
1742     Register Tied = MRI->cloneVirtualRegister(VDataOut);
1743     Register Zero = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1744     BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::V_MOV_B32_e32), Zero)
1745       .addImm(0);
1746     auto Parts = TRI.getRegSplitParts(MRI->getRegClass(Tied), 4);
1747     if (STI.usePRTStrictNull()) {
1748       // With enable-prt-strict-null enabled, initialize all result registers to
1749       // zero.
1750       auto RegSeq =
1751           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1752       for (auto Sub : Parts)
1753         RegSeq.addReg(Zero).addImm(Sub);
1754     } else {
1755       // With enable-prt-strict-null disabled, only initialize the extra TFE/LWE
1756       // result register.
1757       Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1758       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
1759       auto RegSeq =
1760           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1761       for (auto Sub : Parts.drop_back(1))
1762         RegSeq.addReg(Undef).addImm(Sub);
1763       RegSeq.addReg(Zero).addImm(Parts.back());
1764     }
1765     MIB.addReg(Tied, RegState::Implicit);
1766     MIB->tieOperands(0, MIB->getNumOperands() - 1);
1767   }
1768 
1769   MI.eraseFromParent();
1770   constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1771   TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::vaddr);
1772   return true;
1773 }
1774 
1775 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1776     MachineInstr &I) const {
1777   unsigned IntrinsicID = I.getIntrinsicID();
1778   switch (IntrinsicID) {
1779   case Intrinsic::amdgcn_end_cf:
1780     return selectEndCfIntrinsic(I);
1781   case Intrinsic::amdgcn_ds_ordered_add:
1782   case Intrinsic::amdgcn_ds_ordered_swap:
1783     return selectDSOrderedIntrinsic(I, IntrinsicID);
1784   case Intrinsic::amdgcn_ds_gws_init:
1785   case Intrinsic::amdgcn_ds_gws_barrier:
1786   case Intrinsic::amdgcn_ds_gws_sema_v:
1787   case Intrinsic::amdgcn_ds_gws_sema_br:
1788   case Intrinsic::amdgcn_ds_gws_sema_p:
1789   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1790     return selectDSGWSIntrinsic(I, IntrinsicID);
1791   case Intrinsic::amdgcn_ds_append:
1792     return selectDSAppendConsume(I, true);
1793   case Intrinsic::amdgcn_ds_consume:
1794     return selectDSAppendConsume(I, false);
1795   case Intrinsic::amdgcn_s_barrier:
1796     return selectSBarrier(I);
1797   case Intrinsic::amdgcn_global_atomic_fadd:
1798     return selectGlobalAtomicFadd(I, I.getOperand(2), I.getOperand(3));
1799   case Intrinsic::amdgcn_raw_buffer_load_lds:
1800   case Intrinsic::amdgcn_struct_buffer_load_lds:
1801     return selectBufferLoadLds(I);
1802   case Intrinsic::amdgcn_global_load_lds:
1803     return selectGlobalLoadLds(I);
1804   default: {
1805     return selectImpl(I, *CoverageInfo);
1806   }
1807   }
1808 }
1809 
1810 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1811   if (selectImpl(I, *CoverageInfo))
1812     return true;
1813 
1814   MachineBasicBlock *BB = I.getParent();
1815   const DebugLoc &DL = I.getDebugLoc();
1816 
1817   Register DstReg = I.getOperand(0).getReg();
1818   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1819   assert(Size <= 32 || Size == 64);
1820   const MachineOperand &CCOp = I.getOperand(1);
1821   Register CCReg = CCOp.getReg();
1822   if (!isVCC(CCReg, *MRI)) {
1823     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1824                                          AMDGPU::S_CSELECT_B32;
1825     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1826             .addReg(CCReg);
1827 
1828     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1829     // bank, because it does not cover the register class that we used to represent
1830     // for it.  So we need to manually set the register class here.
1831     if (!MRI->getRegClassOrNull(CCReg))
1832         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1833     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1834             .add(I.getOperand(2))
1835             .add(I.getOperand(3));
1836 
1837     bool Ret = false;
1838     Ret |= constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1839     Ret |= constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1840     I.eraseFromParent();
1841     return Ret;
1842   }
1843 
1844   // Wide VGPR select should have been split in RegBankSelect.
1845   if (Size > 32)
1846     return false;
1847 
1848   MachineInstr *Select =
1849       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1850               .addImm(0)
1851               .add(I.getOperand(3))
1852               .addImm(0)
1853               .add(I.getOperand(2))
1854               .add(I.getOperand(1));
1855 
1856   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1857   I.eraseFromParent();
1858   return Ret;
1859 }
1860 
1861 static int sizeToSubRegIndex(unsigned Size) {
1862   switch (Size) {
1863   case 32:
1864     return AMDGPU::sub0;
1865   case 64:
1866     return AMDGPU::sub0_sub1;
1867   case 96:
1868     return AMDGPU::sub0_sub1_sub2;
1869   case 128:
1870     return AMDGPU::sub0_sub1_sub2_sub3;
1871   case 256:
1872     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1873   default:
1874     if (Size < 32)
1875       return AMDGPU::sub0;
1876     if (Size > 256)
1877       return -1;
1878     return sizeToSubRegIndex(PowerOf2Ceil(Size));
1879   }
1880 }
1881 
1882 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1883   Register DstReg = I.getOperand(0).getReg();
1884   Register SrcReg = I.getOperand(1).getReg();
1885   const LLT DstTy = MRI->getType(DstReg);
1886   const LLT SrcTy = MRI->getType(SrcReg);
1887   const LLT S1 = LLT::scalar(1);
1888 
1889   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1890   const RegisterBank *DstRB;
1891   if (DstTy == S1) {
1892     // This is a special case. We don't treat s1 for legalization artifacts as
1893     // vcc booleans.
1894     DstRB = SrcRB;
1895   } else {
1896     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1897     if (SrcRB != DstRB)
1898       return false;
1899   }
1900 
1901   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
1902 
1903   unsigned DstSize = DstTy.getSizeInBits();
1904   unsigned SrcSize = SrcTy.getSizeInBits();
1905 
1906   const TargetRegisterClass *SrcRC =
1907       TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB);
1908   const TargetRegisterClass *DstRC =
1909       TRI.getRegClassForSizeOnBank(DstSize, *DstRB);
1910   if (!SrcRC || !DstRC)
1911     return false;
1912 
1913   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1914       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1915     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1916     return false;
1917   }
1918 
1919   if (DstTy == LLT::fixed_vector(2, 16) && SrcTy == LLT::fixed_vector(2, 32)) {
1920     MachineBasicBlock *MBB = I.getParent();
1921     const DebugLoc &DL = I.getDebugLoc();
1922 
1923     Register LoReg = MRI->createVirtualRegister(DstRC);
1924     Register HiReg = MRI->createVirtualRegister(DstRC);
1925     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
1926       .addReg(SrcReg, 0, AMDGPU::sub0);
1927     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
1928       .addReg(SrcReg, 0, AMDGPU::sub1);
1929 
1930     if (IsVALU && STI.hasSDWA()) {
1931       // Write the low 16-bits of the high element into the high 16-bits of the
1932       // low element.
1933       MachineInstr *MovSDWA =
1934         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
1935         .addImm(0)                             // $src0_modifiers
1936         .addReg(HiReg)                         // $src0
1937         .addImm(0)                             // $clamp
1938         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
1939         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
1940         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
1941         .addReg(LoReg, RegState::Implicit);
1942       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
1943     } else {
1944       Register TmpReg0 = MRI->createVirtualRegister(DstRC);
1945       Register TmpReg1 = MRI->createVirtualRegister(DstRC);
1946       Register ImmReg = MRI->createVirtualRegister(DstRC);
1947       if (IsVALU) {
1948         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
1949           .addImm(16)
1950           .addReg(HiReg);
1951       } else {
1952         BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
1953           .addReg(HiReg)
1954           .addImm(16);
1955       }
1956 
1957       unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1958       unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1959       unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
1960 
1961       BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
1962         .addImm(0xffff);
1963       BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
1964         .addReg(LoReg)
1965         .addReg(ImmReg);
1966       BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
1967         .addReg(TmpReg0)
1968         .addReg(TmpReg1);
1969     }
1970 
1971     I.eraseFromParent();
1972     return true;
1973   }
1974 
1975   if (!DstTy.isScalar())
1976     return false;
1977 
1978   if (SrcSize > 32) {
1979     int SubRegIdx = sizeToSubRegIndex(DstSize);
1980     if (SubRegIdx == -1)
1981       return false;
1982 
1983     // Deal with weird cases where the class only partially supports the subreg
1984     // index.
1985     const TargetRegisterClass *SrcWithSubRC
1986       = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
1987     if (!SrcWithSubRC)
1988       return false;
1989 
1990     if (SrcWithSubRC != SrcRC) {
1991       if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
1992         return false;
1993     }
1994 
1995     I.getOperand(1).setSubReg(SubRegIdx);
1996   }
1997 
1998   I.setDesc(TII.get(TargetOpcode::COPY));
1999   return true;
2000 }
2001 
2002 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
2003 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
2004   Mask = maskTrailingOnes<unsigned>(Size);
2005   int SignedMask = static_cast<int>(Mask);
2006   return SignedMask >= -16 && SignedMask <= 64;
2007 }
2008 
2009 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
2010 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
2011   Register Reg, const MachineRegisterInfo &MRI,
2012   const TargetRegisterInfo &TRI) const {
2013   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
2014   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
2015     return RB;
2016 
2017   // Ignore the type, since we don't use vcc in artifacts.
2018   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
2019     return &RBI.getRegBankFromRegClass(*RC, LLT());
2020   return nullptr;
2021 }
2022 
2023 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
2024   bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
2025   bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
2026   const DebugLoc &DL = I.getDebugLoc();
2027   MachineBasicBlock &MBB = *I.getParent();
2028   const Register DstReg = I.getOperand(0).getReg();
2029   const Register SrcReg = I.getOperand(1).getReg();
2030 
2031   const LLT DstTy = MRI->getType(DstReg);
2032   const LLT SrcTy = MRI->getType(SrcReg);
2033   const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
2034     I.getOperand(2).getImm() : SrcTy.getSizeInBits();
2035   const unsigned DstSize = DstTy.getSizeInBits();
2036   if (!DstTy.isScalar())
2037     return false;
2038 
2039   // Artifact casts should never use vcc.
2040   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
2041 
2042   // FIXME: This should probably be illegal and split earlier.
2043   if (I.getOpcode() == AMDGPU::G_ANYEXT) {
2044     if (DstSize <= 32)
2045       return selectCOPY(I);
2046 
2047     const TargetRegisterClass *SrcRC =
2048         TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank);
2049     const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
2050     const TargetRegisterClass *DstRC =
2051         TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
2052 
2053     Register UndefReg = MRI->createVirtualRegister(SrcRC);
2054     BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2055     BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2056       .addReg(SrcReg)
2057       .addImm(AMDGPU::sub0)
2058       .addReg(UndefReg)
2059       .addImm(AMDGPU::sub1);
2060     I.eraseFromParent();
2061 
2062     return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) &&
2063            RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI);
2064   }
2065 
2066   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
2067     // 64-bit should have been split up in RegBankSelect
2068 
2069     // Try to use an and with a mask if it will save code size.
2070     unsigned Mask;
2071     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2072       MachineInstr *ExtI =
2073       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
2074         .addImm(Mask)
2075         .addReg(SrcReg);
2076       I.eraseFromParent();
2077       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2078     }
2079 
2080     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
2081     MachineInstr *ExtI =
2082       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
2083       .addReg(SrcReg)
2084       .addImm(0) // Offset
2085       .addImm(SrcSize); // Width
2086     I.eraseFromParent();
2087     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2088   }
2089 
2090   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
2091     const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
2092       AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
2093     if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
2094       return false;
2095 
2096     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
2097       const unsigned SextOpc = SrcSize == 8 ?
2098         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
2099       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
2100         .addReg(SrcReg);
2101       I.eraseFromParent();
2102       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2103     }
2104 
2105     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
2106     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2107 
2108     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
2109     if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
2110       // We need a 64-bit register source, but the high bits don't matter.
2111       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
2112       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2113       unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
2114 
2115       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2116       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
2117         .addReg(SrcReg, 0, SubReg)
2118         .addImm(AMDGPU::sub0)
2119         .addReg(UndefReg)
2120         .addImm(AMDGPU::sub1);
2121 
2122       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
2123         .addReg(ExtReg)
2124         .addImm(SrcSize << 16);
2125 
2126       I.eraseFromParent();
2127       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
2128     }
2129 
2130     unsigned Mask;
2131     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2132       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
2133         .addReg(SrcReg)
2134         .addImm(Mask);
2135     } else {
2136       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
2137         .addReg(SrcReg)
2138         .addImm(SrcSize << 16);
2139     }
2140 
2141     I.eraseFromParent();
2142     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2143   }
2144 
2145   return false;
2146 }
2147 
2148 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
2149   MachineBasicBlock *BB = I.getParent();
2150   MachineOperand &ImmOp = I.getOperand(1);
2151   Register DstReg = I.getOperand(0).getReg();
2152   unsigned Size = MRI->getType(DstReg).getSizeInBits();
2153 
2154   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
2155   if (ImmOp.isFPImm()) {
2156     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
2157     ImmOp.ChangeToImmediate(Imm.getZExtValue());
2158   } else if (ImmOp.isCImm()) {
2159     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue());
2160   } else {
2161     llvm_unreachable("Not supported by g_constants");
2162   }
2163 
2164   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2165   const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID;
2166 
2167   unsigned Opcode;
2168   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
2169     Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2170   } else {
2171     Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2172 
2173     // We should never produce s1 values on banks other than VCC. If the user of
2174     // this already constrained the register, we may incorrectly think it's VCC
2175     // if it wasn't originally.
2176     if (Size == 1)
2177       return false;
2178   }
2179 
2180   if (Size != 64) {
2181     I.setDesc(TII.get(Opcode));
2182     I.addImplicitDefUseOperands(*MF);
2183     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2184   }
2185 
2186   const DebugLoc &DL = I.getDebugLoc();
2187 
2188   APInt Imm(Size, I.getOperand(1).getImm());
2189 
2190   MachineInstr *ResInst;
2191   if (IsSgpr && TII.isInlineConstant(Imm)) {
2192     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
2193       .addImm(I.getOperand(1).getImm());
2194   } else {
2195     const TargetRegisterClass *RC = IsSgpr ?
2196       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
2197     Register LoReg = MRI->createVirtualRegister(RC);
2198     Register HiReg = MRI->createVirtualRegister(RC);
2199 
2200     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
2201       .addImm(Imm.trunc(32).getZExtValue());
2202 
2203     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
2204       .addImm(Imm.ashr(32).getZExtValue());
2205 
2206     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2207       .addReg(LoReg)
2208       .addImm(AMDGPU::sub0)
2209       .addReg(HiReg)
2210       .addImm(AMDGPU::sub1);
2211   }
2212 
2213   // We can't call constrainSelectedInstRegOperands here, because it doesn't
2214   // work for target independent opcodes
2215   I.eraseFromParent();
2216   const TargetRegisterClass *DstRC =
2217     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
2218   if (!DstRC)
2219     return true;
2220   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
2221 }
2222 
2223 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
2224   // Only manually handle the f64 SGPR case.
2225   //
2226   // FIXME: This is a workaround for 2.5 different tablegen problems. Because
2227   // the bit ops theoretically have a second result due to the implicit def of
2228   // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
2229   // that is easy by disabling the check. The result works, but uses a
2230   // nonsensical sreg32orlds_and_sreg_1 regclass.
2231   //
2232   // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
2233   // the variadic REG_SEQUENCE operands.
2234 
2235   Register Dst = MI.getOperand(0).getReg();
2236   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2237   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2238       MRI->getType(Dst) != LLT::scalar(64))
2239     return false;
2240 
2241   Register Src = MI.getOperand(1).getReg();
2242   MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
2243   if (Fabs)
2244     Src = Fabs->getOperand(1).getReg();
2245 
2246   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2247       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2248     return false;
2249 
2250   MachineBasicBlock *BB = MI.getParent();
2251   const DebugLoc &DL = MI.getDebugLoc();
2252   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2253   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2254   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2255   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2256 
2257   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2258     .addReg(Src, 0, AMDGPU::sub0);
2259   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2260     .addReg(Src, 0, AMDGPU::sub1);
2261   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2262     .addImm(0x80000000);
2263 
2264   // Set or toggle sign bit.
2265   unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
2266   BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
2267     .addReg(HiReg)
2268     .addReg(ConstReg);
2269   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2270     .addReg(LoReg)
2271     .addImm(AMDGPU::sub0)
2272     .addReg(OpReg)
2273     .addImm(AMDGPU::sub1);
2274   MI.eraseFromParent();
2275   return true;
2276 }
2277 
2278 // FIXME: This is a workaround for the same tablegen problems as G_FNEG
2279 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
2280   Register Dst = MI.getOperand(0).getReg();
2281   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2282   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2283       MRI->getType(Dst) != LLT::scalar(64))
2284     return false;
2285 
2286   Register Src = MI.getOperand(1).getReg();
2287   MachineBasicBlock *BB = MI.getParent();
2288   const DebugLoc &DL = MI.getDebugLoc();
2289   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2290   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2291   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2292   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2293 
2294   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2295       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2296     return false;
2297 
2298   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2299     .addReg(Src, 0, AMDGPU::sub0);
2300   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2301     .addReg(Src, 0, AMDGPU::sub1);
2302   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2303     .addImm(0x7fffffff);
2304 
2305   // Clear sign bit.
2306   // TODO: Should this used S_BITSET0_*?
2307   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2308     .addReg(HiReg)
2309     .addReg(ConstReg);
2310   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2311     .addReg(LoReg)
2312     .addImm(AMDGPU::sub0)
2313     .addReg(OpReg)
2314     .addImm(AMDGPU::sub1);
2315 
2316   MI.eraseFromParent();
2317   return true;
2318 }
2319 
2320 static bool isConstant(const MachineInstr &MI) {
2321   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2322 }
2323 
2324 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2325     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2326 
2327   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2328 
2329   assert(PtrMI);
2330 
2331   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2332     return;
2333 
2334   GEPInfo GEPInfo(*PtrMI);
2335 
2336   for (unsigned i = 1; i != 3; ++i) {
2337     const MachineOperand &GEPOp = PtrMI->getOperand(i);
2338     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2339     assert(OpDef);
2340     if (i == 2 && isConstant(*OpDef)) {
2341       // TODO: Could handle constant base + variable offset, but a combine
2342       // probably should have commuted it.
2343       assert(GEPInfo.Imm == 0);
2344       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2345       continue;
2346     }
2347     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2348     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2349       GEPInfo.SgprParts.push_back(GEPOp.getReg());
2350     else
2351       GEPInfo.VgprParts.push_back(GEPOp.getReg());
2352   }
2353 
2354   AddrInfo.push_back(GEPInfo);
2355   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2356 }
2357 
2358 bool AMDGPUInstructionSelector::isSGPR(Register Reg) const {
2359   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID;
2360 }
2361 
2362 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2363   if (!MI.hasOneMemOperand())
2364     return false;
2365 
2366   const MachineMemOperand *MMO = *MI.memoperands_begin();
2367   const Value *Ptr = MMO->getValue();
2368 
2369   // UndefValue means this is a load of a kernel input.  These are uniform.
2370   // Sometimes LDS instructions have constant pointers.
2371   // If Ptr is null, then that means this mem operand contains a
2372   // PseudoSourceValue like GOT.
2373   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2374       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2375     return true;
2376 
2377   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2378     return true;
2379 
2380   const Instruction *I = dyn_cast<Instruction>(Ptr);
2381   return I && I->getMetadata("amdgpu.uniform");
2382 }
2383 
2384 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2385   for (const GEPInfo &GEPInfo : AddrInfo) {
2386     if (!GEPInfo.VgprParts.empty())
2387       return true;
2388   }
2389   return false;
2390 }
2391 
2392 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2393   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2394   unsigned AS = PtrTy.getAddressSpace();
2395   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2396       STI.ldsRequiresM0Init()) {
2397     MachineBasicBlock *BB = I.getParent();
2398 
2399     // If DS instructions require M0 initialization, insert it before selecting.
2400     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2401       .addImm(-1);
2402   }
2403 }
2404 
2405 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
2406   MachineInstr &I) const {
2407   if (I.getOpcode() == TargetOpcode::G_ATOMICRMW_FADD) {
2408     const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2409     unsigned AS = PtrTy.getAddressSpace();
2410     if (AS == AMDGPUAS::GLOBAL_ADDRESS)
2411       return selectGlobalAtomicFadd(I, I.getOperand(1), I.getOperand(2));
2412   }
2413 
2414   initM0(I);
2415   return selectImpl(I, *CoverageInfo);
2416 }
2417 
2418 static bool isVCmpResult(Register Reg, MachineRegisterInfo &MRI) {
2419   if (Reg.isPhysical())
2420     return false;
2421 
2422   MachineInstr &MI = *MRI.getUniqueVRegDef(Reg);
2423   const unsigned Opcode = MI.getOpcode();
2424 
2425   if (Opcode == AMDGPU::COPY)
2426     return isVCmpResult(MI.getOperand(1).getReg(), MRI);
2427 
2428   if (Opcode == AMDGPU::G_AND || Opcode == AMDGPU::G_OR ||
2429       Opcode == AMDGPU::G_XOR)
2430     return isVCmpResult(MI.getOperand(1).getReg(), MRI) &&
2431            isVCmpResult(MI.getOperand(2).getReg(), MRI);
2432 
2433   if (Opcode == TargetOpcode::G_INTRINSIC)
2434     return MI.getIntrinsicID() == Intrinsic::amdgcn_class;
2435 
2436   return Opcode == AMDGPU::G_ICMP || Opcode == AMDGPU::G_FCMP;
2437 }
2438 
2439 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2440   MachineBasicBlock *BB = I.getParent();
2441   MachineOperand &CondOp = I.getOperand(0);
2442   Register CondReg = CondOp.getReg();
2443   const DebugLoc &DL = I.getDebugLoc();
2444 
2445   unsigned BrOpcode;
2446   Register CondPhysReg;
2447   const TargetRegisterClass *ConstrainRC;
2448 
2449   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2450   // whether the branch is uniform when selecting the instruction. In
2451   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2452   // RegBankSelect knows what it's doing if the branch condition is scc, even
2453   // though it currently does not.
2454   if (!isVCC(CondReg, *MRI)) {
2455     if (MRI->getType(CondReg) != LLT::scalar(32))
2456       return false;
2457 
2458     CondPhysReg = AMDGPU::SCC;
2459     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2460     ConstrainRC = &AMDGPU::SReg_32RegClass;
2461   } else {
2462     // FIXME: Should scc->vcc copies and with exec?
2463 
2464     // Unless the value of CondReg is a result of a V_CMP* instruction then we
2465     // need to insert an and with exec.
2466     if (!isVCmpResult(CondReg, *MRI)) {
2467       const bool Is64 = STI.isWave64();
2468       const unsigned Opcode = Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
2469       const Register Exec = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
2470 
2471       Register TmpReg = MRI->createVirtualRegister(TRI.getBoolRC());
2472       BuildMI(*BB, &I, DL, TII.get(Opcode), TmpReg)
2473           .addReg(CondReg)
2474           .addReg(Exec);
2475       CondReg = TmpReg;
2476     }
2477 
2478     CondPhysReg = TRI.getVCC();
2479     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2480     ConstrainRC = TRI.getBoolRC();
2481   }
2482 
2483   if (!MRI->getRegClassOrNull(CondReg))
2484     MRI->setRegClass(CondReg, ConstrainRC);
2485 
2486   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2487     .addReg(CondReg);
2488   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2489     .addMBB(I.getOperand(1).getMBB());
2490 
2491   I.eraseFromParent();
2492   return true;
2493 }
2494 
2495 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
2496   MachineInstr &I) const {
2497   Register DstReg = I.getOperand(0).getReg();
2498   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2499   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2500   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2501   if (IsVGPR)
2502     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2503 
2504   return RBI.constrainGenericRegister(
2505     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2506 }
2507 
2508 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2509   Register DstReg = I.getOperand(0).getReg();
2510   Register SrcReg = I.getOperand(1).getReg();
2511   Register MaskReg = I.getOperand(2).getReg();
2512   LLT Ty = MRI->getType(DstReg);
2513   LLT MaskTy = MRI->getType(MaskReg);
2514   MachineBasicBlock *BB = I.getParent();
2515   const DebugLoc &DL = I.getDebugLoc();
2516 
2517   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2518   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2519   const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
2520   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2521   if (DstRB != SrcRB) // Should only happen for hand written MIR.
2522     return false;
2523 
2524   // Try to avoid emitting a bit operation when we only need to touch half of
2525   // the 64-bit pointer.
2526   APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zext(64);
2527   const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
2528   const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
2529 
2530   const bool CanCopyLow32 = (MaskOnes & MaskLo32) == MaskLo32;
2531   const bool CanCopyHi32 = (MaskOnes & MaskHi32) == MaskHi32;
2532 
2533   if (!IsVGPR && Ty.getSizeInBits() == 64 &&
2534       !CanCopyLow32 && !CanCopyHi32) {
2535     auto MIB = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_AND_B64), DstReg)
2536       .addReg(SrcReg)
2537       .addReg(MaskReg);
2538     I.eraseFromParent();
2539     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2540   }
2541 
2542   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2543   const TargetRegisterClass &RegRC
2544     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2545 
2546   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB);
2547   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB);
2548   const TargetRegisterClass *MaskRC =
2549       TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB);
2550 
2551   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2552       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2553       !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
2554     return false;
2555 
2556   if (Ty.getSizeInBits() == 32) {
2557     assert(MaskTy.getSizeInBits() == 32 &&
2558            "ptrmask should have been narrowed during legalize");
2559 
2560     BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2561       .addReg(SrcReg)
2562       .addReg(MaskReg);
2563     I.eraseFromParent();
2564     return true;
2565   }
2566 
2567   Register HiReg = MRI->createVirtualRegister(&RegRC);
2568   Register LoReg = MRI->createVirtualRegister(&RegRC);
2569 
2570   // Extract the subregisters from the source pointer.
2571   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2572     .addReg(SrcReg, 0, AMDGPU::sub0);
2573   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2574     .addReg(SrcReg, 0, AMDGPU::sub1);
2575 
2576   Register MaskedLo, MaskedHi;
2577 
2578   if (CanCopyLow32) {
2579     // If all the bits in the low half are 1, we only need a copy for it.
2580     MaskedLo = LoReg;
2581   } else {
2582     // Extract the mask subregister and apply the and.
2583     Register MaskLo = MRI->createVirtualRegister(&RegRC);
2584     MaskedLo = MRI->createVirtualRegister(&RegRC);
2585 
2586     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
2587       .addReg(MaskReg, 0, AMDGPU::sub0);
2588     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
2589       .addReg(LoReg)
2590       .addReg(MaskLo);
2591   }
2592 
2593   if (CanCopyHi32) {
2594     // If all the bits in the high half are 1, we only need a copy for it.
2595     MaskedHi = HiReg;
2596   } else {
2597     Register MaskHi = MRI->createVirtualRegister(&RegRC);
2598     MaskedHi = MRI->createVirtualRegister(&RegRC);
2599 
2600     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
2601       .addReg(MaskReg, 0, AMDGPU::sub1);
2602     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
2603       .addReg(HiReg)
2604       .addReg(MaskHi);
2605   }
2606 
2607   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2608     .addReg(MaskedLo)
2609     .addImm(AMDGPU::sub0)
2610     .addReg(MaskedHi)
2611     .addImm(AMDGPU::sub1);
2612   I.eraseFromParent();
2613   return true;
2614 }
2615 
2616 /// Return the register to use for the index value, and the subregister to use
2617 /// for the indirectly accessed register.
2618 static std::pair<Register, unsigned>
2619 computeIndirectRegIndex(MachineRegisterInfo &MRI,
2620                         const SIRegisterInfo &TRI,
2621                         const TargetRegisterClass *SuperRC,
2622                         Register IdxReg,
2623                         unsigned EltSize) {
2624   Register IdxBaseReg;
2625   int Offset;
2626 
2627   std::tie(IdxBaseReg, Offset) = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
2628   if (IdxBaseReg == AMDGPU::NoRegister) {
2629     // This will happen if the index is a known constant. This should ordinarily
2630     // be legalized out, but handle it as a register just in case.
2631     assert(Offset == 0);
2632     IdxBaseReg = IdxReg;
2633   }
2634 
2635   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2636 
2637   // Skip out of bounds offsets, or else we would end up using an undefined
2638   // register.
2639   if (static_cast<unsigned>(Offset) >= SubRegs.size())
2640     return std::make_pair(IdxReg, SubRegs[0]);
2641   return std::make_pair(IdxBaseReg, SubRegs[Offset]);
2642 }
2643 
2644 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2645   MachineInstr &MI) const {
2646   Register DstReg = MI.getOperand(0).getReg();
2647   Register SrcReg = MI.getOperand(1).getReg();
2648   Register IdxReg = MI.getOperand(2).getReg();
2649 
2650   LLT DstTy = MRI->getType(DstReg);
2651   LLT SrcTy = MRI->getType(SrcReg);
2652 
2653   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2654   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2655   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2656 
2657   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2658   // into a waterfall loop.
2659   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2660     return false;
2661 
2662   const TargetRegisterClass *SrcRC =
2663       TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB);
2664   const TargetRegisterClass *DstRC =
2665       TRI.getRegClassForTypeOnBank(DstTy, *DstRB);
2666   if (!SrcRC || !DstRC)
2667     return false;
2668   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2669       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2670       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2671     return false;
2672 
2673   MachineBasicBlock *BB = MI.getParent();
2674   const DebugLoc &DL = MI.getDebugLoc();
2675   const bool Is64 = DstTy.getSizeInBits() == 64;
2676 
2677   unsigned SubReg;
2678   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
2679                                                      DstTy.getSizeInBits() / 8);
2680 
2681   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2682     if (DstTy.getSizeInBits() != 32 && !Is64)
2683       return false;
2684 
2685     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2686       .addReg(IdxReg);
2687 
2688     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2689     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2690       .addReg(SrcReg, 0, SubReg)
2691       .addReg(SrcReg, RegState::Implicit);
2692     MI.eraseFromParent();
2693     return true;
2694   }
2695 
2696   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2697     return false;
2698 
2699   if (!STI.useVGPRIndexMode()) {
2700     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2701       .addReg(IdxReg);
2702     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2703       .addReg(SrcReg, 0, SubReg)
2704       .addReg(SrcReg, RegState::Implicit);
2705     MI.eraseFromParent();
2706     return true;
2707   }
2708 
2709   const MCInstrDesc &GPRIDXDesc =
2710       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true);
2711   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2712       .addReg(SrcReg)
2713       .addReg(IdxReg)
2714       .addImm(SubReg);
2715 
2716   MI.eraseFromParent();
2717   return true;
2718 }
2719 
2720 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
2721 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2722   MachineInstr &MI) const {
2723   Register DstReg = MI.getOperand(0).getReg();
2724   Register VecReg = MI.getOperand(1).getReg();
2725   Register ValReg = MI.getOperand(2).getReg();
2726   Register IdxReg = MI.getOperand(3).getReg();
2727 
2728   LLT VecTy = MRI->getType(DstReg);
2729   LLT ValTy = MRI->getType(ValReg);
2730   unsigned VecSize = VecTy.getSizeInBits();
2731   unsigned ValSize = ValTy.getSizeInBits();
2732 
2733   const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2734   const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2735   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2736 
2737   assert(VecTy.getElementType() == ValTy);
2738 
2739   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2740   // into a waterfall loop.
2741   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2742     return false;
2743 
2744   const TargetRegisterClass *VecRC =
2745       TRI.getRegClassForTypeOnBank(VecTy, *VecRB);
2746   const TargetRegisterClass *ValRC =
2747       TRI.getRegClassForTypeOnBank(ValTy, *ValRB);
2748 
2749   if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
2750       !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
2751       !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
2752       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2753     return false;
2754 
2755   if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
2756     return false;
2757 
2758   unsigned SubReg;
2759   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
2760                                                      ValSize / 8);
2761 
2762   const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
2763                          STI.useVGPRIndexMode();
2764 
2765   MachineBasicBlock *BB = MI.getParent();
2766   const DebugLoc &DL = MI.getDebugLoc();
2767 
2768   if (!IndexMode) {
2769     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2770       .addReg(IdxReg);
2771 
2772     const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo(
2773         VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID);
2774     BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
2775         .addReg(VecReg)
2776         .addReg(ValReg)
2777         .addImm(SubReg);
2778     MI.eraseFromParent();
2779     return true;
2780   }
2781 
2782   const MCInstrDesc &GPRIDXDesc =
2783       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false);
2784   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2785       .addReg(VecReg)
2786       .addReg(ValReg)
2787       .addReg(IdxReg)
2788       .addImm(SubReg);
2789 
2790   MI.eraseFromParent();
2791   return true;
2792 }
2793 
2794 static bool isZeroOrUndef(int X) {
2795   return X == 0 || X == -1;
2796 }
2797 
2798 static bool isOneOrUndef(int X) {
2799   return X == 1 || X == -1;
2800 }
2801 
2802 static bool isZeroOrOneOrUndef(int X) {
2803   return X == 0 || X == 1 || X == -1;
2804 }
2805 
2806 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single
2807 // 32-bit register.
2808 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1,
2809                                    ArrayRef<int> Mask) {
2810   NewMask[0] = Mask[0];
2811   NewMask[1] = Mask[1];
2812   if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1]))
2813     return Src0;
2814 
2815   assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1);
2816   assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1);
2817 
2818   // Shift the mask inputs to be 0/1;
2819   NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2;
2820   NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2;
2821   return Src1;
2822 }
2823 
2824 // This is only legal with VOP3P instructions as an aid to op_sel matching.
2825 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
2826   MachineInstr &MI) const {
2827   Register DstReg = MI.getOperand(0).getReg();
2828   Register Src0Reg = MI.getOperand(1).getReg();
2829   Register Src1Reg = MI.getOperand(2).getReg();
2830   ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
2831 
2832   const LLT V2S16 = LLT::fixed_vector(2, 16);
2833   if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
2834     return false;
2835 
2836   if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask))
2837     return false;
2838 
2839   assert(ShufMask.size() == 2);
2840   assert(STI.hasSDWA() && "no target has VOP3P but not SDWA");
2841 
2842   MachineBasicBlock *MBB = MI.getParent();
2843   const DebugLoc &DL = MI.getDebugLoc();
2844 
2845   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2846   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2847   const TargetRegisterClass &RC = IsVALU ?
2848     AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2849 
2850   // Handle the degenerate case which should have folded out.
2851   if (ShufMask[0] == -1 && ShufMask[1] == -1) {
2852     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg);
2853 
2854     MI.eraseFromParent();
2855     return RBI.constrainGenericRegister(DstReg, RC, *MRI);
2856   }
2857 
2858   // A legal VOP3P mask only reads one of the sources.
2859   int Mask[2];
2860   Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask);
2861 
2862   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) ||
2863       !RBI.constrainGenericRegister(SrcVec, RC, *MRI))
2864     return false;
2865 
2866   // TODO: This also should have been folded out
2867   if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) {
2868     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg)
2869       .addReg(SrcVec);
2870 
2871     MI.eraseFromParent();
2872     return true;
2873   }
2874 
2875   if (Mask[0] == 1 && Mask[1] == -1) {
2876     if (IsVALU) {
2877       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
2878         .addImm(16)
2879         .addReg(SrcVec);
2880     } else {
2881       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
2882         .addReg(SrcVec)
2883         .addImm(16);
2884     }
2885   } else if (Mask[0] == -1 && Mask[1] == 0) {
2886     if (IsVALU) {
2887       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg)
2888         .addImm(16)
2889         .addReg(SrcVec);
2890     } else {
2891       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg)
2892         .addReg(SrcVec)
2893         .addImm(16);
2894     }
2895   } else if (Mask[0] == 0 && Mask[1] == 0) {
2896     if (IsVALU) {
2897       // Write low half of the register into the high half.
2898       MachineInstr *MovSDWA =
2899         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2900         .addImm(0)                             // $src0_modifiers
2901         .addReg(SrcVec)                        // $src0
2902         .addImm(0)                             // $clamp
2903         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
2904         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2905         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
2906         .addReg(SrcVec, RegState::Implicit);
2907       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2908     } else {
2909       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2910         .addReg(SrcVec)
2911         .addReg(SrcVec);
2912     }
2913   } else if (Mask[0] == 1 && Mask[1] == 1) {
2914     if (IsVALU) {
2915       // Write high half of the register into the low half.
2916       MachineInstr *MovSDWA =
2917         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2918         .addImm(0)                             // $src0_modifiers
2919         .addReg(SrcVec)                        // $src0
2920         .addImm(0)                             // $clamp
2921         .addImm(AMDGPU::SDWA::WORD_0)          // $dst_sel
2922         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2923         .addImm(AMDGPU::SDWA::WORD_1)          // $src0_sel
2924         .addReg(SrcVec, RegState::Implicit);
2925       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2926     } else {
2927       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg)
2928         .addReg(SrcVec)
2929         .addReg(SrcVec);
2930     }
2931   } else if (Mask[0] == 1 && Mask[1] == 0) {
2932     if (IsVALU) {
2933       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32_e64), DstReg)
2934         .addReg(SrcVec)
2935         .addReg(SrcVec)
2936         .addImm(16);
2937     } else {
2938       Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2939       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg)
2940         .addReg(SrcVec)
2941         .addImm(16);
2942       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2943         .addReg(TmpReg)
2944         .addReg(SrcVec);
2945     }
2946   } else
2947     llvm_unreachable("all shuffle masks should be handled");
2948 
2949   MI.eraseFromParent();
2950   return true;
2951 }
2952 
2953 bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD(
2954   MachineInstr &MI) const {
2955   if (STI.hasGFX90AInsts())
2956     return selectImpl(MI, *CoverageInfo);
2957 
2958   MachineBasicBlock *MBB = MI.getParent();
2959   const DebugLoc &DL = MI.getDebugLoc();
2960 
2961   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
2962     Function &F = MBB->getParent()->getFunction();
2963     DiagnosticInfoUnsupported
2964       NoFpRet(F, "return versions of fp atomics not supported",
2965               MI.getDebugLoc(), DS_Error);
2966     F.getContext().diagnose(NoFpRet);
2967     return false;
2968   }
2969 
2970   // FIXME: This is only needed because tablegen requires number of dst operands
2971   // in match and replace pattern to be the same. Otherwise patterns can be
2972   // exported from SDag path.
2973   MachineOperand &VDataIn = MI.getOperand(1);
2974   MachineOperand &VIndex = MI.getOperand(3);
2975   MachineOperand &VOffset = MI.getOperand(4);
2976   MachineOperand &SOffset = MI.getOperand(5);
2977   int16_t Offset = MI.getOperand(6).getImm();
2978 
2979   bool HasVOffset = !isOperandImmEqual(VOffset, 0, *MRI);
2980   bool HasVIndex = !isOperandImmEqual(VIndex, 0, *MRI);
2981 
2982   unsigned Opcode;
2983   if (HasVOffset) {
2984     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN
2985                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN;
2986   } else {
2987     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN
2988                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET;
2989   }
2990 
2991   if (MRI->getType(VDataIn.getReg()).isVector()) {
2992     switch (Opcode) {
2993     case AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN:
2994       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN;
2995       break;
2996     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN:
2997       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFEN;
2998       break;
2999     case AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN:
3000       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_IDXEN;
3001       break;
3002     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET:
3003       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFSET;
3004       break;
3005     }
3006   }
3007 
3008   auto I = BuildMI(*MBB, MI, DL, TII.get(Opcode));
3009   I.add(VDataIn);
3010 
3011   if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN ||
3012       Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) {
3013     Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
3014     BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
3015       .addReg(VIndex.getReg())
3016       .addImm(AMDGPU::sub0)
3017       .addReg(VOffset.getReg())
3018       .addImm(AMDGPU::sub1);
3019 
3020     I.addReg(IdxReg);
3021   } else if (HasVIndex) {
3022     I.add(VIndex);
3023   } else if (HasVOffset) {
3024     I.add(VOffset);
3025   }
3026 
3027   I.add(MI.getOperand(2)); // rsrc
3028   I.add(SOffset);
3029   I.addImm(Offset);
3030   I.addImm(MI.getOperand(7).getImm()); // cpol
3031   I.cloneMemRefs(MI);
3032 
3033   MI.eraseFromParent();
3034 
3035   return true;
3036 }
3037 
3038 bool AMDGPUInstructionSelector::selectGlobalAtomicFadd(
3039   MachineInstr &MI, MachineOperand &AddrOp, MachineOperand &DataOp) const {
3040 
3041   if (STI.hasGFX90AInsts()) {
3042     // gfx90a adds return versions of the global atomic fadd instructions so no
3043     // special handling is required.
3044     return selectImpl(MI, *CoverageInfo);
3045   }
3046 
3047   MachineBasicBlock *MBB = MI.getParent();
3048   const DebugLoc &DL = MI.getDebugLoc();
3049 
3050   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
3051     Function &F = MBB->getParent()->getFunction();
3052     DiagnosticInfoUnsupported
3053       NoFpRet(F, "return versions of fp atomics not supported",
3054               MI.getDebugLoc(), DS_Error);
3055     F.getContext().diagnose(NoFpRet);
3056     return false;
3057   }
3058 
3059   // FIXME: This is only needed because tablegen requires number of dst operands
3060   // in match and replace pattern to be the same. Otherwise patterns can be
3061   // exported from SDag path.
3062   auto Addr = selectFlatOffsetImpl(AddrOp, SIInstrFlags::FlatGlobal);
3063 
3064   Register Data = DataOp.getReg();
3065   const unsigned Opc = MRI->getType(Data).isVector() ?
3066     AMDGPU::GLOBAL_ATOMIC_PK_ADD_F16 : AMDGPU::GLOBAL_ATOMIC_ADD_F32;
3067   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3068     .addReg(Addr.first)
3069     .addReg(Data)
3070     .addImm(Addr.second)
3071     .addImm(0) // cpol
3072     .cloneMemRefs(MI);
3073 
3074   MI.eraseFromParent();
3075   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3076 }
3077 
3078 bool AMDGPUInstructionSelector::selectBufferLoadLds(MachineInstr &MI) const {
3079   unsigned Opc;
3080   unsigned Size = MI.getOperand(3).getImm();
3081 
3082   // The struct intrinsic variants add one additional operand over raw.
3083   const bool HasVIndex = MI.getNumOperands() == 9;
3084   Register VIndex;
3085   int OpOffset = 0;
3086   if (HasVIndex) {
3087     VIndex = MI.getOperand(4).getReg();
3088     OpOffset = 1;
3089   }
3090 
3091   Register VOffset = MI.getOperand(4 + OpOffset).getReg();
3092   Optional<ValueAndVReg> MaybeVOffset =
3093       getIConstantVRegValWithLookThrough(VOffset, *MRI);
3094   const bool HasVOffset = !MaybeVOffset || MaybeVOffset->Value.getZExtValue();
3095 
3096   switch (Size) {
3097   default:
3098     return false;
3099   case 1:
3100     Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_BOTHEN
3101                                  : AMDGPU::BUFFER_LOAD_UBYTE_LDS_IDXEN
3102                     : HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFEN
3103                                  : AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFSET;
3104     break;
3105   case 2:
3106     Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_BOTHEN
3107                                  : AMDGPU::BUFFER_LOAD_USHORT_LDS_IDXEN
3108                     : HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFEN
3109                                  : AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFSET;
3110     break;
3111   case 4:
3112     Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_BOTHEN
3113                                  : AMDGPU::BUFFER_LOAD_DWORD_LDS_IDXEN
3114                     : HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFEN
3115                                  : AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFSET;
3116     break;
3117   }
3118 
3119   MachineBasicBlock *MBB = MI.getParent();
3120   const DebugLoc &DL = MI.getDebugLoc();
3121   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3122     .add(MI.getOperand(2));
3123 
3124   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc));
3125 
3126   if (HasVIndex && HasVOffset) {
3127     Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
3128     BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
3129       .addReg(VIndex)
3130       .addImm(AMDGPU::sub0)
3131       .addReg(VOffset)
3132       .addImm(AMDGPU::sub1);
3133 
3134     MIB.addReg(IdxReg);
3135   } else if (HasVIndex) {
3136     MIB.addReg(VIndex);
3137   } else if (HasVOffset) {
3138     MIB.addReg(VOffset);
3139   }
3140 
3141   MIB.add(MI.getOperand(1));            // rsrc
3142   MIB.add(MI.getOperand(5 + OpOffset)); // soffset
3143   MIB.add(MI.getOperand(6 + OpOffset)); // imm offset
3144   unsigned Aux = MI.getOperand(7 + OpOffset).getImm();
3145   MIB.addImm(Aux & AMDGPU::CPol::ALL);  // cpol
3146   MIB.addImm((Aux >> 3) & 1);           // swz
3147 
3148   MachineMemOperand *LoadMMO = *MI.memoperands_begin();
3149   MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
3150   LoadPtrI.Offset = MI.getOperand(6 + OpOffset).getImm();
3151   MachinePointerInfo StorePtrI = LoadPtrI;
3152   StorePtrI.V = nullptr;
3153   StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
3154 
3155   auto F = LoadMMO->getFlags() &
3156            ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
3157   LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
3158                                      Size, LoadMMO->getBaseAlign());
3159 
3160   MachineMemOperand *StoreMMO =
3161       MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
3162                                sizeof(int32_t), LoadMMO->getBaseAlign());
3163 
3164   MIB.setMemRefs({LoadMMO, StoreMMO});
3165 
3166   MI.eraseFromParent();
3167   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3168 }
3169 
3170 /// Match a zero extend from a 32-bit value to 64-bits.
3171 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {
3172   Register ZExtSrc;
3173   if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3174     return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3175 
3176   // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
3177   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
3178   if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
3179     return false;
3180 
3181   if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) {
3182     return Def->getOperand(1).getReg();
3183   }
3184 
3185   return Register();
3186 }
3187 
3188 bool AMDGPUInstructionSelector::selectGlobalLoadLds(MachineInstr &MI) const{
3189   unsigned Opc;
3190   unsigned Size = MI.getOperand(3).getImm();
3191 
3192   switch (Size) {
3193   default:
3194     return false;
3195   case 1:
3196     Opc = AMDGPU::GLOBAL_LOAD_LDS_UBYTE;
3197     break;
3198   case 2:
3199     Opc = AMDGPU::GLOBAL_LOAD_LDS_USHORT;
3200     break;
3201   case 4:
3202     Opc = AMDGPU::GLOBAL_LOAD_LDS_DWORD;
3203     break;
3204   }
3205 
3206   MachineBasicBlock *MBB = MI.getParent();
3207   const DebugLoc &DL = MI.getDebugLoc();
3208   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3209     .add(MI.getOperand(2));
3210 
3211   Register Addr = MI.getOperand(1).getReg();
3212   Register VOffset;
3213   // Try to split SAddr and VOffset. Global and LDS pointers share the same
3214   // immediate offset, so we cannot use a regular SelectGlobalSAddr().
3215   if (!isSGPR(Addr)) {
3216     auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3217     if (isSGPR(AddrDef->Reg)) {
3218       Addr = AddrDef->Reg;
3219     } else if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3220       Register SAddr =
3221           getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3222       if (SAddr && isSGPR(SAddr)) {
3223         Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3224         if (Register Off = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
3225           Addr = SAddr;
3226           VOffset = Off;
3227         }
3228       }
3229     }
3230   }
3231 
3232   if (isSGPR(Addr)) {
3233     Opc = AMDGPU::getGlobalSaddrOp(Opc);
3234     if (!VOffset) {
3235       VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3236       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
3237         .addImm(0);
3238     }
3239   }
3240 
3241   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3242     .addReg(Addr);
3243 
3244   if (isSGPR(Addr))
3245     MIB.addReg(VOffset);
3246 
3247   MIB.add(MI.getOperand(4))  // offset
3248      .add(MI.getOperand(5)); // cpol
3249 
3250   MachineMemOperand *LoadMMO = *MI.memoperands_begin();
3251   MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
3252   LoadPtrI.Offset = MI.getOperand(4).getImm();
3253   MachinePointerInfo StorePtrI = LoadPtrI;
3254   LoadPtrI.AddrSpace = AMDGPUAS::GLOBAL_ADDRESS;
3255   StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
3256   auto F = LoadMMO->getFlags() &
3257            ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
3258   LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
3259                                      Size, LoadMMO->getBaseAlign());
3260   MachineMemOperand *StoreMMO =
3261       MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
3262                                sizeof(int32_t), Align(4));
3263 
3264   MIB.setMemRefs({LoadMMO, StoreMMO});
3265 
3266   MI.eraseFromParent();
3267   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3268 }
3269 
3270 bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{
3271   MI.setDesc(TII.get(MI.getOperand(1).getImm()));
3272   MI.removeOperand(1);
3273   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3274   return true;
3275 }
3276 
3277 bool AMDGPUInstructionSelector::selectSMFMACIntrin(MachineInstr &MI) const {
3278   unsigned Opc;
3279   switch (MI.getIntrinsicID()) {
3280   case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
3281     Opc = AMDGPU::V_SMFMAC_F32_16X16X32_F16_e64;
3282     break;
3283   case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
3284     Opc = AMDGPU::V_SMFMAC_F32_32X32X16_F16_e64;
3285     break;
3286   case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
3287     Opc = AMDGPU::V_SMFMAC_F32_16X16X32_BF16_e64;
3288     break;
3289   case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
3290     Opc = AMDGPU::V_SMFMAC_F32_32X32X16_BF16_e64;
3291     break;
3292   case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
3293     Opc = AMDGPU::V_SMFMAC_I32_16X16X64_I8_e64;
3294     break;
3295   case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
3296     Opc = AMDGPU::V_SMFMAC_I32_32X32X32_I8_e64;
3297     break;
3298   default:
3299     llvm_unreachable("unhandled smfmac intrinsic");
3300   }
3301 
3302   auto VDst_In = MI.getOperand(4);
3303 
3304   MI.setDesc(TII.get(Opc));
3305   MI.removeOperand(4); // VDst_In
3306   MI.removeOperand(1); // Intrinsic ID
3307   MI.addOperand(VDst_In); // Readd VDst_In to the end
3308   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3309   return true;
3310 }
3311 
3312 bool AMDGPUInstructionSelector::selectWaveAddress(MachineInstr &MI) const {
3313   Register DstReg = MI.getOperand(0).getReg();
3314   Register SrcReg = MI.getOperand(1).getReg();
3315   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
3316   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
3317   MachineBasicBlock *MBB = MI.getParent();
3318   const DebugLoc &DL = MI.getDebugLoc();
3319 
3320   if (IsVALU) {
3321     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
3322       .addImm(Subtarget->getWavefrontSizeLog2())
3323       .addReg(SrcReg);
3324   } else {
3325     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
3326       .addReg(SrcReg)
3327       .addImm(Subtarget->getWavefrontSizeLog2());
3328   }
3329 
3330   const TargetRegisterClass &RC =
3331       IsVALU ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
3332   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
3333     return false;
3334 
3335   MI.eraseFromParent();
3336   return true;
3337 }
3338 
3339 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
3340   if (I.isPHI())
3341     return selectPHI(I);
3342 
3343   if (!I.isPreISelOpcode()) {
3344     if (I.isCopy())
3345       return selectCOPY(I);
3346     return true;
3347   }
3348 
3349   switch (I.getOpcode()) {
3350   case TargetOpcode::G_AND:
3351   case TargetOpcode::G_OR:
3352   case TargetOpcode::G_XOR:
3353     if (selectImpl(I, *CoverageInfo))
3354       return true;
3355     return selectG_AND_OR_XOR(I);
3356   case TargetOpcode::G_ADD:
3357   case TargetOpcode::G_SUB:
3358     if (selectImpl(I, *CoverageInfo))
3359       return true;
3360     return selectG_ADD_SUB(I);
3361   case TargetOpcode::G_UADDO:
3362   case TargetOpcode::G_USUBO:
3363   case TargetOpcode::G_UADDE:
3364   case TargetOpcode::G_USUBE:
3365     return selectG_UADDO_USUBO_UADDE_USUBE(I);
3366   case AMDGPU::G_AMDGPU_MAD_U64_U32:
3367   case AMDGPU::G_AMDGPU_MAD_I64_I32:
3368     return selectG_AMDGPU_MAD_64_32(I);
3369   case TargetOpcode::G_INTTOPTR:
3370   case TargetOpcode::G_BITCAST:
3371   case TargetOpcode::G_PTRTOINT:
3372     return selectCOPY(I);
3373   case TargetOpcode::G_CONSTANT:
3374   case TargetOpcode::G_FCONSTANT:
3375     return selectG_CONSTANT(I);
3376   case TargetOpcode::G_FNEG:
3377     if (selectImpl(I, *CoverageInfo))
3378       return true;
3379     return selectG_FNEG(I);
3380   case TargetOpcode::G_FABS:
3381     if (selectImpl(I, *CoverageInfo))
3382       return true;
3383     return selectG_FABS(I);
3384   case TargetOpcode::G_EXTRACT:
3385     return selectG_EXTRACT(I);
3386   case TargetOpcode::G_MERGE_VALUES:
3387   case TargetOpcode::G_BUILD_VECTOR:
3388   case TargetOpcode::G_CONCAT_VECTORS:
3389     return selectG_MERGE_VALUES(I);
3390   case TargetOpcode::G_UNMERGE_VALUES:
3391     return selectG_UNMERGE_VALUES(I);
3392   case TargetOpcode::G_BUILD_VECTOR_TRUNC:
3393     return selectG_BUILD_VECTOR_TRUNC(I);
3394   case TargetOpcode::G_PTR_ADD:
3395     return selectG_PTR_ADD(I);
3396   case TargetOpcode::G_IMPLICIT_DEF:
3397     return selectG_IMPLICIT_DEF(I);
3398   case TargetOpcode::G_FREEZE:
3399     return selectCOPY(I);
3400   case TargetOpcode::G_INSERT:
3401     return selectG_INSERT(I);
3402   case TargetOpcode::G_INTRINSIC:
3403     return selectG_INTRINSIC(I);
3404   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3405     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
3406   case TargetOpcode::G_ICMP:
3407     if (selectG_ICMP(I))
3408       return true;
3409     return selectImpl(I, *CoverageInfo);
3410   case TargetOpcode::G_LOAD:
3411   case TargetOpcode::G_STORE:
3412   case TargetOpcode::G_ATOMIC_CMPXCHG:
3413   case TargetOpcode::G_ATOMICRMW_XCHG:
3414   case TargetOpcode::G_ATOMICRMW_ADD:
3415   case TargetOpcode::G_ATOMICRMW_SUB:
3416   case TargetOpcode::G_ATOMICRMW_AND:
3417   case TargetOpcode::G_ATOMICRMW_OR:
3418   case TargetOpcode::G_ATOMICRMW_XOR:
3419   case TargetOpcode::G_ATOMICRMW_MIN:
3420   case TargetOpcode::G_ATOMICRMW_MAX:
3421   case TargetOpcode::G_ATOMICRMW_UMIN:
3422   case TargetOpcode::G_ATOMICRMW_UMAX:
3423   case TargetOpcode::G_ATOMICRMW_FADD:
3424   case AMDGPU::G_AMDGPU_ATOMIC_INC:
3425   case AMDGPU::G_AMDGPU_ATOMIC_DEC:
3426   case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
3427   case AMDGPU::G_AMDGPU_ATOMIC_FMAX:
3428     return selectG_LOAD_STORE_ATOMICRMW(I);
3429   case TargetOpcode::G_SELECT:
3430     return selectG_SELECT(I);
3431   case TargetOpcode::G_TRUNC:
3432     return selectG_TRUNC(I);
3433   case TargetOpcode::G_SEXT:
3434   case TargetOpcode::G_ZEXT:
3435   case TargetOpcode::G_ANYEXT:
3436   case TargetOpcode::G_SEXT_INREG:
3437     if (selectImpl(I, *CoverageInfo))
3438       return true;
3439     return selectG_SZA_EXT(I);
3440   case TargetOpcode::G_BRCOND:
3441     return selectG_BRCOND(I);
3442   case TargetOpcode::G_GLOBAL_VALUE:
3443     return selectG_GLOBAL_VALUE(I);
3444   case TargetOpcode::G_PTRMASK:
3445     return selectG_PTRMASK(I);
3446   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3447     return selectG_EXTRACT_VECTOR_ELT(I);
3448   case TargetOpcode::G_INSERT_VECTOR_ELT:
3449     return selectG_INSERT_VECTOR_ELT(I);
3450   case TargetOpcode::G_SHUFFLE_VECTOR:
3451     return selectG_SHUFFLE_VECTOR(I);
3452   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
3453   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
3454   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
3455   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
3456     const AMDGPU::ImageDimIntrinsicInfo *Intr
3457       = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
3458     assert(Intr && "not an image intrinsic with image pseudo");
3459     return selectImageIntrinsic(I, Intr);
3460   }
3461   case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY:
3462     return selectBVHIntrinsic(I);
3463   case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD:
3464     return selectAMDGPU_BUFFER_ATOMIC_FADD(I);
3465   case AMDGPU::G_SBFX:
3466   case AMDGPU::G_UBFX:
3467     return selectG_SBFX_UBFX(I);
3468   case AMDGPU::G_SI_CALL:
3469     I.setDesc(TII.get(AMDGPU::SI_CALL));
3470     return true;
3471   case AMDGPU::G_AMDGPU_WAVE_ADDRESS:
3472     return selectWaveAddress(I);
3473   default:
3474     return selectImpl(I, *CoverageInfo);
3475   }
3476   return false;
3477 }
3478 
3479 InstructionSelector::ComplexRendererFns
3480 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
3481   return {{
3482       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3483   }};
3484 
3485 }
3486 
3487 std::pair<Register, unsigned>
3488 AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root,
3489                                               bool AllowAbs) const {
3490   Register Src = Root.getReg();
3491   Register OrigSrc = Src;
3492   unsigned Mods = 0;
3493   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
3494 
3495   if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
3496     Src = MI->getOperand(1).getReg();
3497     Mods |= SISrcMods::NEG;
3498     MI = getDefIgnoringCopies(Src, *MRI);
3499   }
3500 
3501   if (AllowAbs && MI && MI->getOpcode() == AMDGPU::G_FABS) {
3502     Src = MI->getOperand(1).getReg();
3503     Mods |= SISrcMods::ABS;
3504   }
3505 
3506   if (Mods != 0 &&
3507       RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
3508     MachineInstr *UseMI = Root.getParent();
3509 
3510     // If we looked through copies to find source modifiers on an SGPR operand,
3511     // we now have an SGPR register source. To avoid potentially violating the
3512     // constant bus restriction, we need to insert a copy to a VGPR.
3513     Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc);
3514     BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(),
3515             TII.get(AMDGPU::COPY), VGPRSrc)
3516       .addReg(Src);
3517     Src = VGPRSrc;
3518   }
3519 
3520   return std::make_pair(Src, Mods);
3521 }
3522 
3523 ///
3524 /// This will select either an SGPR or VGPR operand and will save us from
3525 /// having to write an extra tablegen pattern.
3526 InstructionSelector::ComplexRendererFns
3527 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
3528   return {{
3529       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3530   }};
3531 }
3532 
3533 InstructionSelector::ComplexRendererFns
3534 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
3535   Register Src;
3536   unsigned Mods;
3537   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3538 
3539   return {{
3540       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3541       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3542       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3543       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3544   }};
3545 }
3546 
3547 InstructionSelector::ComplexRendererFns
3548 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const {
3549   Register Src;
3550   unsigned Mods;
3551   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3552 
3553   return {{
3554       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3555       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3556       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3557       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3558   }};
3559 }
3560 
3561 InstructionSelector::ComplexRendererFns
3562 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
3563   return {{
3564       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3565       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3566       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
3567   }};
3568 }
3569 
3570 InstructionSelector::ComplexRendererFns
3571 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
3572   Register Src;
3573   unsigned Mods;
3574   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3575 
3576   return {{
3577       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3578       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3579   }};
3580 }
3581 
3582 InstructionSelector::ComplexRendererFns
3583 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const {
3584   Register Src;
3585   unsigned Mods;
3586   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3587 
3588   return {{
3589       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3590       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3591   }};
3592 }
3593 
3594 InstructionSelector::ComplexRendererFns
3595 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
3596   Register Reg = Root.getReg();
3597   const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
3598   if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
3599               Def->getOpcode() == AMDGPU::G_FABS))
3600     return {};
3601   return {{
3602       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3603   }};
3604 }
3605 
3606 std::pair<Register, unsigned>
3607 AMDGPUInstructionSelector::selectVOP3PModsImpl(
3608   Register Src, const MachineRegisterInfo &MRI, bool IsDOT) const {
3609   unsigned Mods = 0;
3610   MachineInstr *MI = MRI.getVRegDef(Src);
3611 
3612   if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
3613       // It's possible to see an f32 fneg here, but unlikely.
3614       // TODO: Treat f32 fneg as only high bit.
3615       MRI.getType(Src) == LLT::fixed_vector(2, 16)) {
3616     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
3617     Src = MI->getOperand(1).getReg();
3618     MI = MRI.getVRegDef(Src);
3619   }
3620 
3621   // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
3622   (void)IsDOT; // DOTs do not use OPSEL on gfx940+, check ST.hasDOTOpSelHazard()
3623 
3624   // Packed instructions do not have abs modifiers.
3625   Mods |= SISrcMods::OP_SEL_1;
3626 
3627   return std::make_pair(Src, Mods);
3628 }
3629 
3630 InstructionSelector::ComplexRendererFns
3631 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
3632   MachineRegisterInfo &MRI
3633     = Root.getParent()->getParent()->getParent()->getRegInfo();
3634 
3635   Register Src;
3636   unsigned Mods;
3637   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
3638 
3639   return {{
3640       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3641       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3642   }};
3643 }
3644 
3645 InstructionSelector::ComplexRendererFns
3646 AMDGPUInstructionSelector::selectVOP3PModsDOT(MachineOperand &Root) const {
3647   MachineRegisterInfo &MRI
3648     = Root.getParent()->getParent()->getParent()->getRegInfo();
3649 
3650   Register Src;
3651   unsigned Mods;
3652   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI, true);
3653 
3654   return {{
3655       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3656       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3657   }};
3658 }
3659 
3660 InstructionSelector::ComplexRendererFns
3661 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
3662   Register Src;
3663   unsigned Mods;
3664   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3665   if (!isKnownNeverNaN(Src, *MRI))
3666     return None;
3667 
3668   return {{
3669       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3670       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3671   }};
3672 }
3673 
3674 InstructionSelector::ComplexRendererFns
3675 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
3676   // FIXME: Handle op_sel
3677   return {{
3678       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
3679       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
3680   }};
3681 }
3682 
3683 InstructionSelector::ComplexRendererFns
3684 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
3685   SmallVector<GEPInfo, 4> AddrInfo;
3686   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3687 
3688   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3689     return None;
3690 
3691   const GEPInfo &GEPInfo = AddrInfo[0];
3692   Optional<int64_t> EncodedImm =
3693       AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false);
3694   if (!EncodedImm)
3695     return None;
3696 
3697   unsigned PtrReg = GEPInfo.SgprParts[0];
3698   return {{
3699     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3700     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3701   }};
3702 }
3703 
3704 InstructionSelector::ComplexRendererFns
3705 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
3706   SmallVector<GEPInfo, 4> AddrInfo;
3707   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3708 
3709   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3710     return None;
3711 
3712   const GEPInfo &GEPInfo = AddrInfo[0];
3713   Register PtrReg = GEPInfo.SgprParts[0];
3714   Optional<int64_t> EncodedImm =
3715       AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
3716   if (!EncodedImm)
3717     return None;
3718 
3719   return {{
3720     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3721     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3722   }};
3723 }
3724 
3725 InstructionSelector::ComplexRendererFns
3726 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
3727   MachineInstr *MI = Root.getParent();
3728   MachineBasicBlock *MBB = MI->getParent();
3729 
3730   SmallVector<GEPInfo, 4> AddrInfo;
3731   getAddrModeInfo(*MI, *MRI, AddrInfo);
3732 
3733   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
3734   // then we can select all ptr + 32-bit offsets not just immediate offsets.
3735   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3736     return None;
3737 
3738   const GEPInfo &GEPInfo = AddrInfo[0];
3739   // SGPR offset is unsigned.
3740   if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm))
3741     return None;
3742 
3743   // If we make it this far we have a load with an 32-bit immediate offset.
3744   // It is OK to select this using a sgpr offset, because we have already
3745   // failed trying to select this load into one of the _IMM variants since
3746   // the _IMM Patterns are considered before the _SGPR patterns.
3747   Register PtrReg = GEPInfo.SgprParts[0];
3748   Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3749   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
3750           .addImm(GEPInfo.Imm);
3751   return {{
3752     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3753     [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
3754   }};
3755 }
3756 
3757 std::pair<Register, int>
3758 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root,
3759                                                 uint64_t FlatVariant) const {
3760   MachineInstr *MI = Root.getParent();
3761 
3762   auto Default = std::make_pair(Root.getReg(), 0);
3763 
3764   if (!STI.hasFlatInstOffsets())
3765     return Default;
3766 
3767   Register PtrBase;
3768   int64_t ConstOffset;
3769   std::tie(PtrBase, ConstOffset) =
3770       getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3771   if (ConstOffset == 0)
3772     return Default;
3773 
3774   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3775   if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, FlatVariant))
3776     return Default;
3777 
3778   return std::make_pair(PtrBase, ConstOffset);
3779 }
3780 
3781 InstructionSelector::ComplexRendererFns
3782 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3783   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FLAT);
3784 
3785   return {{
3786       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3787       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3788     }};
3789 }
3790 
3791 InstructionSelector::ComplexRendererFns
3792 AMDGPUInstructionSelector::selectGlobalOffset(MachineOperand &Root) const {
3793   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatGlobal);
3794 
3795   return {{
3796       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3797       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3798   }};
3799 }
3800 
3801 InstructionSelector::ComplexRendererFns
3802 AMDGPUInstructionSelector::selectScratchOffset(MachineOperand &Root) const {
3803   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatScratch);
3804 
3805   return {{
3806       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3807       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3808     }};
3809 }
3810 
3811 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
3812 InstructionSelector::ComplexRendererFns
3813 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
3814   Register Addr = Root.getReg();
3815   Register PtrBase;
3816   int64_t ConstOffset;
3817   int64_t ImmOffset = 0;
3818 
3819   // Match the immediate offset first, which canonically is moved as low as
3820   // possible.
3821   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3822 
3823   if (ConstOffset != 0) {
3824     if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS,
3825                               SIInstrFlags::FlatGlobal)) {
3826       Addr = PtrBase;
3827       ImmOffset = ConstOffset;
3828     } else {
3829       auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI);
3830       if (isSGPR(PtrBaseDef->Reg)) {
3831         if (ConstOffset > 0) {
3832           // Offset is too large.
3833           //
3834           // saddr + large_offset -> saddr +
3835           //                         (voffset = large_offset & ~MaxOffset) +
3836           //                         (large_offset & MaxOffset);
3837           int64_t SplitImmOffset, RemainderOffset;
3838           std::tie(SplitImmOffset, RemainderOffset) = TII.splitFlatOffset(
3839               ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal);
3840 
3841           if (isUInt<32>(RemainderOffset)) {
3842             MachineInstr *MI = Root.getParent();
3843             MachineBasicBlock *MBB = MI->getParent();
3844             Register HighBits =
3845                 MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3846 
3847             BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3848                     HighBits)
3849                 .addImm(RemainderOffset);
3850 
3851             return {{
3852                 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); }, // saddr
3853                 [=](MachineInstrBuilder &MIB) {
3854                   MIB.addReg(HighBits);
3855                 }, // voffset
3856                 [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); },
3857             }};
3858           }
3859         }
3860 
3861         // We are adding a 64 bit SGPR and a constant. If constant bus limit
3862         // is 1 we would need to perform 1 or 2 extra moves for each half of
3863         // the constant and it is better to do a scalar add and then issue a
3864         // single VALU instruction to materialize zero. Otherwise it is less
3865         // instructions to perform VALU adds with immediates or inline literals.
3866         unsigned NumLiterals =
3867             !TII.isInlineConstant(APInt(32, ConstOffset & 0xffffffff)) +
3868             !TII.isInlineConstant(APInt(32, ConstOffset >> 32));
3869         if (STI.getConstantBusLimit(AMDGPU::V_ADD_U32_e64) > NumLiterals)
3870           return None;
3871       }
3872     }
3873   }
3874 
3875   // Match the variable offset.
3876   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3877   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3878     // Look through the SGPR->VGPR copy.
3879     Register SAddr =
3880         getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3881 
3882     if (SAddr && isSGPR(SAddr)) {
3883       Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3884 
3885       // It's possible voffset is an SGPR here, but the copy to VGPR will be
3886       // inserted later.
3887       if (Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
3888         return {{[=](MachineInstrBuilder &MIB) { // saddr
3889                    MIB.addReg(SAddr);
3890                  },
3891                  [=](MachineInstrBuilder &MIB) { // voffset
3892                    MIB.addReg(VOffset);
3893                  },
3894                  [=](MachineInstrBuilder &MIB) { // offset
3895                    MIB.addImm(ImmOffset);
3896                  }}};
3897       }
3898     }
3899   }
3900 
3901   // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and
3902   // drop this.
3903   if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF ||
3904       AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT || !isSGPR(AddrDef->Reg))
3905     return None;
3906 
3907   // It's cheaper to materialize a single 32-bit zero for vaddr than the two
3908   // moves required to copy a 64-bit SGPR to VGPR.
3909   MachineInstr *MI = Root.getParent();
3910   MachineBasicBlock *MBB = MI->getParent();
3911   Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3912 
3913   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
3914       .addImm(0);
3915 
3916   return {{
3917       [=](MachineInstrBuilder &MIB) { MIB.addReg(AddrDef->Reg); }, // saddr
3918       [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); },      // voffset
3919       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); }     // offset
3920   }};
3921 }
3922 
3923 InstructionSelector::ComplexRendererFns
3924 AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const {
3925   Register Addr = Root.getReg();
3926   Register PtrBase;
3927   int64_t ConstOffset;
3928   int64_t ImmOffset = 0;
3929 
3930   // Match the immediate offset first, which canonically is moved as low as
3931   // possible.
3932   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3933 
3934   if (ConstOffset != 0 &&
3935       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS,
3936                             SIInstrFlags::FlatScratch)) {
3937     Addr = PtrBase;
3938     ImmOffset = ConstOffset;
3939   }
3940 
3941   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3942   if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3943     int FI = AddrDef->MI->getOperand(1).getIndex();
3944     return {{
3945         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
3946         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3947     }};
3948   }
3949 
3950   Register SAddr = AddrDef->Reg;
3951 
3952   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3953     Register LHS = AddrDef->MI->getOperand(1).getReg();
3954     Register RHS = AddrDef->MI->getOperand(2).getReg();
3955     auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
3956     auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI);
3957 
3958     if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX &&
3959         isSGPR(RHSDef->Reg)) {
3960       int FI = LHSDef->MI->getOperand(1).getIndex();
3961       MachineInstr &I = *Root.getParent();
3962       MachineBasicBlock *BB = I.getParent();
3963       const DebugLoc &DL = I.getDebugLoc();
3964       SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3965 
3966       BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_I32), SAddr)
3967           .addFrameIndex(FI)
3968           .addReg(RHSDef->Reg);
3969     }
3970   }
3971 
3972   if (!isSGPR(SAddr))
3973     return None;
3974 
3975   return {{
3976       [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr
3977       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3978   }};
3979 }
3980 
3981 InstructionSelector::ComplexRendererFns
3982 AMDGPUInstructionSelector::selectScratchSVAddr(MachineOperand &Root) const {
3983   Register Addr = Root.getReg();
3984   Register PtrBase;
3985   int64_t ConstOffset;
3986   int64_t ImmOffset = 0;
3987 
3988   // Match the immediate offset first, which canonically is moved as low as
3989   // possible.
3990   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3991 
3992   if (ConstOffset != 0 &&
3993       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS, true)) {
3994     Addr = PtrBase;
3995     ImmOffset = ConstOffset;
3996   }
3997 
3998   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3999   if (AddrDef->MI->getOpcode() != AMDGPU::G_PTR_ADD)
4000     return None;
4001 
4002   Register RHS = AddrDef->MI->getOperand(2).getReg();
4003   if (RBI.getRegBank(RHS, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID)
4004     return None;
4005 
4006   Register LHS = AddrDef->MI->getOperand(1).getReg();
4007   auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
4008 
4009   if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4010     int FI = LHSDef->MI->getOperand(1).getIndex();
4011     return {{
4012         [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
4013         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
4014         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4015     }};
4016   }
4017 
4018   if (!isSGPR(LHS))
4019     return None;
4020 
4021   return {{
4022       [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
4023       [=](MachineInstrBuilder &MIB) { MIB.addReg(LHS); }, // saddr
4024       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4025   }};
4026 }
4027 
4028 InstructionSelector::ComplexRendererFns
4029 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
4030   MachineInstr *MI = Root.getParent();
4031   MachineBasicBlock *MBB = MI->getParent();
4032   MachineFunction *MF = MBB->getParent();
4033   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
4034 
4035   int64_t Offset = 0;
4036   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
4037       Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
4038     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4039 
4040     // TODO: Should this be inside the render function? The iterator seems to
4041     // move.
4042     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
4043             HighBits)
4044       .addImm(Offset & ~4095);
4045 
4046     return {{[=](MachineInstrBuilder &MIB) { // rsrc
4047                MIB.addReg(Info->getScratchRSrcReg());
4048              },
4049              [=](MachineInstrBuilder &MIB) { // vaddr
4050                MIB.addReg(HighBits);
4051              },
4052              [=](MachineInstrBuilder &MIB) { // soffset
4053                // Use constant zero for soffset and rely on eliminateFrameIndex
4054                // to choose the appropriate frame register if need be.
4055                MIB.addImm(0);
4056              },
4057              [=](MachineInstrBuilder &MIB) { // offset
4058                MIB.addImm(Offset & 4095);
4059              }}};
4060   }
4061 
4062   assert(Offset == 0 || Offset == -1);
4063 
4064   // Try to fold a frame index directly into the MUBUF vaddr field, and any
4065   // offsets.
4066   Optional<int> FI;
4067   Register VAddr = Root.getReg();
4068   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
4069     Register PtrBase;
4070     int64_t ConstOffset;
4071     std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(VAddr, *MRI);
4072     if (ConstOffset != 0) {
4073       if (SIInstrInfo::isLegalMUBUFImmOffset(ConstOffset) &&
4074           (!STI.privateMemoryResourceIsRangeChecked() ||
4075            KnownBits->signBitIsZero(PtrBase))) {
4076         const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase);
4077         if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
4078           FI = PtrBaseDef->getOperand(1).getIndex();
4079         else
4080           VAddr = PtrBase;
4081         Offset = ConstOffset;
4082       }
4083     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4084       FI = RootDef->getOperand(1).getIndex();
4085     }
4086   }
4087 
4088   return {{[=](MachineInstrBuilder &MIB) { // rsrc
4089              MIB.addReg(Info->getScratchRSrcReg());
4090            },
4091            [=](MachineInstrBuilder &MIB) { // vaddr
4092              if (FI.hasValue())
4093                MIB.addFrameIndex(FI.getValue());
4094              else
4095                MIB.addReg(VAddr);
4096            },
4097            [=](MachineInstrBuilder &MIB) { // soffset
4098              // Use constant zero for soffset and rely on eliminateFrameIndex
4099              // to choose the appropriate frame register if need be.
4100              MIB.addImm(0);
4101            },
4102            [=](MachineInstrBuilder &MIB) { // offset
4103              MIB.addImm(Offset);
4104            }}};
4105 }
4106 
4107 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
4108                                                 int64_t Offset) const {
4109   if (!isUInt<16>(Offset))
4110     return false;
4111 
4112   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
4113     return true;
4114 
4115   // On Southern Islands instruction with a negative base value and an offset
4116   // don't seem to work.
4117   return KnownBits->signBitIsZero(Base);
4118 }
4119 
4120 bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
4121                                                  int64_t Offset1,
4122                                                  unsigned Size) const {
4123   if (Offset0 % Size != 0 || Offset1 % Size != 0)
4124     return false;
4125   if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
4126     return false;
4127 
4128   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
4129     return true;
4130 
4131   // On Southern Islands instruction with a negative base value and an offset
4132   // don't seem to work.
4133   return KnownBits->signBitIsZero(Base);
4134 }
4135 
4136 bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI,
4137                                                     unsigned ShAmtBits) const {
4138   assert(MI.getOpcode() == TargetOpcode::G_AND);
4139 
4140   Optional<APInt> RHS = getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI);
4141   if (!RHS)
4142     return false;
4143 
4144   if (RHS->countTrailingOnes() >= ShAmtBits)
4145     return true;
4146 
4147   const APInt &LHSKnownZeros =
4148       KnownBits->getKnownZeroes(MI.getOperand(1).getReg());
4149   return (LHSKnownZeros | *RHS).countTrailingOnes() >= ShAmtBits;
4150 }
4151 
4152 // Return the wave level SGPR base address if this is a wave address.
4153 static Register getWaveAddress(const MachineInstr *Def) {
4154   return Def->getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS
4155              ? Def->getOperand(1).getReg()
4156              : Register();
4157 }
4158 
4159 InstructionSelector::ComplexRendererFns
4160 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
4161     MachineOperand &Root) const {
4162   Register Reg = Root.getReg();
4163   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
4164 
4165   const MachineInstr *Def = MRI->getVRegDef(Reg);
4166   if (Register WaveBase = getWaveAddress(Def)) {
4167     return {{
4168         [=](MachineInstrBuilder &MIB) { // rsrc
4169           MIB.addReg(Info->getScratchRSrcReg());
4170         },
4171         [=](MachineInstrBuilder &MIB) { // soffset
4172           MIB.addReg(WaveBase);
4173         },
4174         [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // offset
4175     }};
4176   }
4177 
4178   int64_t Offset = 0;
4179 
4180   // FIXME: Copy check is a hack
4181   Register BasePtr;
4182   if (mi_match(Reg, *MRI, m_GPtrAdd(m_Reg(BasePtr), m_Copy(m_ICst(Offset))))) {
4183     if (!SIInstrInfo::isLegalMUBUFImmOffset(Offset))
4184       return {};
4185     const MachineInstr *BasePtrDef = MRI->getVRegDef(BasePtr);
4186     Register WaveBase = getWaveAddress(BasePtrDef);
4187     if (!WaveBase)
4188       return {};
4189 
4190     return {{
4191         [=](MachineInstrBuilder &MIB) { // rsrc
4192           MIB.addReg(Info->getScratchRSrcReg());
4193         },
4194         [=](MachineInstrBuilder &MIB) { // soffset
4195           MIB.addReg(WaveBase);
4196         },
4197         [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4198     }};
4199   }
4200 
4201   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
4202       !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
4203     return {};
4204 
4205   return {{
4206       [=](MachineInstrBuilder &MIB) { // rsrc
4207         MIB.addReg(Info->getScratchRSrcReg());
4208       },
4209       [=](MachineInstrBuilder &MIB) { // soffset
4210         MIB.addImm(0);
4211       },
4212       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4213   }};
4214 }
4215 
4216 std::pair<Register, unsigned>
4217 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
4218   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4219   if (!RootDef)
4220     return std::make_pair(Root.getReg(), 0);
4221 
4222   int64_t ConstAddr = 0;
4223 
4224   Register PtrBase;
4225   int64_t Offset;
4226   std::tie(PtrBase, Offset) =
4227     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4228 
4229   if (Offset) {
4230     if (isDSOffsetLegal(PtrBase, Offset)) {
4231       // (add n0, c0)
4232       return std::make_pair(PtrBase, Offset);
4233     }
4234   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4235     // TODO
4236 
4237 
4238   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4239     // TODO
4240 
4241   }
4242 
4243   return std::make_pair(Root.getReg(), 0);
4244 }
4245 
4246 InstructionSelector::ComplexRendererFns
4247 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
4248   Register Reg;
4249   unsigned Offset;
4250   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
4251   return {{
4252       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4253       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
4254     }};
4255 }
4256 
4257 InstructionSelector::ComplexRendererFns
4258 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
4259   return selectDSReadWrite2(Root, 4);
4260 }
4261 
4262 InstructionSelector::ComplexRendererFns
4263 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {
4264   return selectDSReadWrite2(Root, 8);
4265 }
4266 
4267 InstructionSelector::ComplexRendererFns
4268 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
4269                                               unsigned Size) const {
4270   Register Reg;
4271   unsigned Offset;
4272   std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size);
4273   return {{
4274       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4275       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
4276       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
4277     }};
4278 }
4279 
4280 std::pair<Register, unsigned>
4281 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
4282                                                   unsigned Size) const {
4283   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4284   if (!RootDef)
4285     return std::make_pair(Root.getReg(), 0);
4286 
4287   int64_t ConstAddr = 0;
4288 
4289   Register PtrBase;
4290   int64_t Offset;
4291   std::tie(PtrBase, Offset) =
4292     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4293 
4294   if (Offset) {
4295     int64_t OffsetValue0 = Offset;
4296     int64_t OffsetValue1 = Offset + Size;
4297     if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) {
4298       // (add n0, c0)
4299       return std::make_pair(PtrBase, OffsetValue0 / Size);
4300     }
4301   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4302     // TODO
4303 
4304   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4305     // TODO
4306 
4307   }
4308 
4309   return std::make_pair(Root.getReg(), 0);
4310 }
4311 
4312 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
4313 /// the base value with the constant offset. There may be intervening copies
4314 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
4315 /// not match the pattern.
4316 std::pair<Register, int64_t>
4317 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
4318   Register Root, const MachineRegisterInfo &MRI) const {
4319   MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
4320   if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
4321     return {Root, 0};
4322 
4323   MachineOperand &RHS = RootI->getOperand(2);
4324   Optional<ValueAndVReg> MaybeOffset =
4325       getIConstantVRegValWithLookThrough(RHS.getReg(), MRI);
4326   if (!MaybeOffset)
4327     return {Root, 0};
4328   return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()};
4329 }
4330 
4331 static void addZeroImm(MachineInstrBuilder &MIB) {
4332   MIB.addImm(0);
4333 }
4334 
4335 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
4336 /// BasePtr is not valid, a null base pointer will be used.
4337 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4338                           uint32_t FormatLo, uint32_t FormatHi,
4339                           Register BasePtr) {
4340   Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4341   Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4342   Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4343   Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
4344 
4345   B.buildInstr(AMDGPU::S_MOV_B32)
4346     .addDef(RSrc2)
4347     .addImm(FormatLo);
4348   B.buildInstr(AMDGPU::S_MOV_B32)
4349     .addDef(RSrc3)
4350     .addImm(FormatHi);
4351 
4352   // Build the half of the subregister with the constants before building the
4353   // full 128-bit register. If we are building multiple resource descriptors,
4354   // this will allow CSEing of the 2-component register.
4355   B.buildInstr(AMDGPU::REG_SEQUENCE)
4356     .addDef(RSrcHi)
4357     .addReg(RSrc2)
4358     .addImm(AMDGPU::sub0)
4359     .addReg(RSrc3)
4360     .addImm(AMDGPU::sub1);
4361 
4362   Register RSrcLo = BasePtr;
4363   if (!BasePtr) {
4364     RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4365     B.buildInstr(AMDGPU::S_MOV_B64)
4366       .addDef(RSrcLo)
4367       .addImm(0);
4368   }
4369 
4370   B.buildInstr(AMDGPU::REG_SEQUENCE)
4371     .addDef(RSrc)
4372     .addReg(RSrcLo)
4373     .addImm(AMDGPU::sub0_sub1)
4374     .addReg(RSrcHi)
4375     .addImm(AMDGPU::sub2_sub3);
4376 
4377   return RSrc;
4378 }
4379 
4380 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4381                                 const SIInstrInfo &TII, Register BasePtr) {
4382   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4383 
4384   // FIXME: Why are half the "default" bits ignored based on the addressing
4385   // mode?
4386   return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
4387 }
4388 
4389 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4390                                const SIInstrInfo &TII, Register BasePtr) {
4391   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4392 
4393   // FIXME: Why are half the "default" bits ignored based on the addressing
4394   // mode?
4395   return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
4396 }
4397 
4398 AMDGPUInstructionSelector::MUBUFAddressData
4399 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
4400   MUBUFAddressData Data;
4401   Data.N0 = Src;
4402 
4403   Register PtrBase;
4404   int64_t Offset;
4405 
4406   std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
4407   if (isUInt<32>(Offset)) {
4408     Data.N0 = PtrBase;
4409     Data.Offset = Offset;
4410   }
4411 
4412   if (MachineInstr *InputAdd
4413       = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
4414     Data.N2 = InputAdd->getOperand(1).getReg();
4415     Data.N3 = InputAdd->getOperand(2).getReg();
4416 
4417     // FIXME: Need to fix extra SGPR->VGPRcopies inserted
4418     // FIXME: Don't know this was defined by operand 0
4419     //
4420     // TODO: Remove this when we have copy folding optimizations after
4421     // RegBankSelect.
4422     Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
4423     Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
4424   }
4425 
4426   return Data;
4427 }
4428 
4429 /// Return if the addr64 mubuf mode should be used for the given address.
4430 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
4431   // (ptr_add N2, N3) -> addr64, or
4432   // (ptr_add (ptr_add N2, N3), C1) -> addr64
4433   if (Addr.N2)
4434     return true;
4435 
4436   const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
4437   return N0Bank->getID() == AMDGPU::VGPRRegBankID;
4438 }
4439 
4440 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
4441 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
4442 /// component.
4443 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
4444   MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
4445   if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
4446     return;
4447 
4448   // Illegal offset, store it in soffset.
4449   SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4450   B.buildInstr(AMDGPU::S_MOV_B32)
4451     .addDef(SOffset)
4452     .addImm(ImmOffset);
4453   ImmOffset = 0;
4454 }
4455 
4456 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
4457   MachineOperand &Root, Register &VAddr, Register &RSrcReg,
4458   Register &SOffset, int64_t &Offset) const {
4459   // FIXME: Predicates should stop this from reaching here.
4460   // addr64 bit was removed for volcanic islands.
4461   if (!STI.hasAddr64() || STI.useFlatForGlobal())
4462     return false;
4463 
4464   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4465   if (!shouldUseAddr64(AddrData))
4466     return false;
4467 
4468   Register N0 = AddrData.N0;
4469   Register N2 = AddrData.N2;
4470   Register N3 = AddrData.N3;
4471   Offset = AddrData.Offset;
4472 
4473   // Base pointer for the SRD.
4474   Register SRDPtr;
4475 
4476   if (N2) {
4477     if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4478       assert(N3);
4479       if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4480         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
4481         // addr64, and construct the default resource from a 0 address.
4482         VAddr = N0;
4483       } else {
4484         SRDPtr = N3;
4485         VAddr = N2;
4486       }
4487     } else {
4488       // N2 is not divergent.
4489       SRDPtr = N2;
4490       VAddr = N3;
4491     }
4492   } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4493     // Use the default null pointer in the resource
4494     VAddr = N0;
4495   } else {
4496     // N0 -> offset, or
4497     // (N0 + C1) -> offset
4498     SRDPtr = N0;
4499   }
4500 
4501   MachineIRBuilder B(*Root.getParent());
4502   RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
4503   splitIllegalMUBUFOffset(B, SOffset, Offset);
4504   return true;
4505 }
4506 
4507 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
4508   MachineOperand &Root, Register &RSrcReg, Register &SOffset,
4509   int64_t &Offset) const {
4510 
4511   // FIXME: Pattern should not reach here.
4512   if (STI.useFlatForGlobal())
4513     return false;
4514 
4515   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4516   if (shouldUseAddr64(AddrData))
4517     return false;
4518 
4519   // N0 -> offset, or
4520   // (N0 + C1) -> offset
4521   Register SRDPtr = AddrData.N0;
4522   Offset = AddrData.Offset;
4523 
4524   // TODO: Look through extensions for 32-bit soffset.
4525   MachineIRBuilder B(*Root.getParent());
4526 
4527   RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
4528   splitIllegalMUBUFOffset(B, SOffset, Offset);
4529   return true;
4530 }
4531 
4532 InstructionSelector::ComplexRendererFns
4533 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
4534   Register VAddr;
4535   Register RSrcReg;
4536   Register SOffset;
4537   int64_t Offset = 0;
4538 
4539   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4540     return {};
4541 
4542   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4543   // pattern.
4544   return {{
4545       [=](MachineInstrBuilder &MIB) {  // rsrc
4546         MIB.addReg(RSrcReg);
4547       },
4548       [=](MachineInstrBuilder &MIB) { // vaddr
4549         MIB.addReg(VAddr);
4550       },
4551       [=](MachineInstrBuilder &MIB) { // soffset
4552         if (SOffset)
4553           MIB.addReg(SOffset);
4554         else
4555           MIB.addImm(0);
4556       },
4557       [=](MachineInstrBuilder &MIB) { // offset
4558         MIB.addImm(Offset);
4559       },
4560       addZeroImm, //  cpol
4561       addZeroImm, //  tfe
4562       addZeroImm  //  swz
4563     }};
4564 }
4565 
4566 InstructionSelector::ComplexRendererFns
4567 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
4568   Register RSrcReg;
4569   Register SOffset;
4570   int64_t Offset = 0;
4571 
4572   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4573     return {};
4574 
4575   return {{
4576       [=](MachineInstrBuilder &MIB) {  // rsrc
4577         MIB.addReg(RSrcReg);
4578       },
4579       [=](MachineInstrBuilder &MIB) { // soffset
4580         if (SOffset)
4581           MIB.addReg(SOffset);
4582         else
4583           MIB.addImm(0);
4584       },
4585       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4586       addZeroImm, //  cpol
4587       addZeroImm, //  tfe
4588       addZeroImm, //  swz
4589     }};
4590 }
4591 
4592 InstructionSelector::ComplexRendererFns
4593 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
4594   Register VAddr;
4595   Register RSrcReg;
4596   Register SOffset;
4597   int64_t Offset = 0;
4598 
4599   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4600     return {};
4601 
4602   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4603   // pattern.
4604   return {{
4605       [=](MachineInstrBuilder &MIB) {  // rsrc
4606         MIB.addReg(RSrcReg);
4607       },
4608       [=](MachineInstrBuilder &MIB) { // vaddr
4609         MIB.addReg(VAddr);
4610       },
4611       [=](MachineInstrBuilder &MIB) { // soffset
4612         if (SOffset)
4613           MIB.addReg(SOffset);
4614         else
4615           MIB.addImm(0);
4616       },
4617       [=](MachineInstrBuilder &MIB) { // offset
4618         MIB.addImm(Offset);
4619       },
4620       [=](MachineInstrBuilder &MIB) {
4621         MIB.addImm(AMDGPU::CPol::GLC); // cpol
4622       }
4623     }};
4624 }
4625 
4626 InstructionSelector::ComplexRendererFns
4627 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
4628   Register RSrcReg;
4629   Register SOffset;
4630   int64_t Offset = 0;
4631 
4632   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4633     return {};
4634 
4635   return {{
4636       [=](MachineInstrBuilder &MIB) {  // rsrc
4637         MIB.addReg(RSrcReg);
4638       },
4639       [=](MachineInstrBuilder &MIB) { // soffset
4640         if (SOffset)
4641           MIB.addReg(SOffset);
4642         else
4643           MIB.addImm(0);
4644       },
4645       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4646       [=](MachineInstrBuilder &MIB) { MIB.addImm(AMDGPU::CPol::GLC); } // cpol
4647     }};
4648 }
4649 
4650 /// Get an immediate that must be 32-bits, and treated as zero extended.
4651 static Optional<uint64_t> getConstantZext32Val(Register Reg,
4652                                                const MachineRegisterInfo &MRI) {
4653   // getIConstantVRegVal sexts any values, so see if that matters.
4654   Optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI);
4655   if (!OffsetVal || !isInt<32>(*OffsetVal))
4656     return None;
4657   return Lo_32(*OffsetVal);
4658 }
4659 
4660 InstructionSelector::ComplexRendererFns
4661 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
4662   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4663   if (!OffsetVal)
4664     return {};
4665 
4666   Optional<int64_t> EncodedImm =
4667       AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
4668   if (!EncodedImm)
4669     return {};
4670 
4671   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4672 }
4673 
4674 InstructionSelector::ComplexRendererFns
4675 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
4676   assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
4677 
4678   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4679   if (!OffsetVal)
4680     return {};
4681 
4682   Optional<int64_t> EncodedImm
4683     = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
4684   if (!EncodedImm)
4685     return {};
4686 
4687   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4688 }
4689 
4690 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
4691                                                  const MachineInstr &MI,
4692                                                  int OpIdx) const {
4693   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4694          "Expected G_CONSTANT");
4695   MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
4696 }
4697 
4698 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
4699                                                 const MachineInstr &MI,
4700                                                 int OpIdx) const {
4701   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4702          "Expected G_CONSTANT");
4703   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
4704 }
4705 
4706 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
4707                                                  const MachineInstr &MI,
4708                                                  int OpIdx) const {
4709   assert(OpIdx == -1);
4710 
4711   const MachineOperand &Op = MI.getOperand(1);
4712   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
4713     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
4714   else {
4715     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
4716     MIB.addImm(Op.getCImm()->getSExtValue());
4717   }
4718 }
4719 
4720 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
4721                                                 const MachineInstr &MI,
4722                                                 int OpIdx) const {
4723   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4724          "Expected G_CONSTANT");
4725   MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
4726 }
4727 
4728 /// This only really exists to satisfy DAG type checking machinery, so is a
4729 /// no-op here.
4730 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
4731                                                 const MachineInstr &MI,
4732                                                 int OpIdx) const {
4733   MIB.addImm(MI.getOperand(OpIdx).getImm());
4734 }
4735 
4736 void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB,
4737                                                   const MachineInstr &MI,
4738                                                   int OpIdx) const {
4739   assert(OpIdx >= 0 && "expected to match an immediate operand");
4740   MIB.addImm(MI.getOperand(OpIdx).getImm() & AMDGPU::CPol::ALL);
4741 }
4742 
4743 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
4744                                                  const MachineInstr &MI,
4745                                                  int OpIdx) const {
4746   assert(OpIdx >= 0 && "expected to match an immediate operand");
4747   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
4748 }
4749 
4750 void AMDGPUInstructionSelector::renderSetGLC(MachineInstrBuilder &MIB,
4751                                              const MachineInstr &MI,
4752                                              int OpIdx) const {
4753   assert(OpIdx >= 0 && "expected to match an immediate operand");
4754   MIB.addImm(MI.getOperand(OpIdx).getImm() | AMDGPU::CPol::GLC);
4755 }
4756 
4757 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
4758                                                  const MachineInstr &MI,
4759                                                  int OpIdx) const {
4760   MIB.addFrameIndex((MI.getOperand(1).getIndex()));
4761 }
4762 
4763 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
4764   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
4765 }
4766 
4767 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
4768   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
4769 }
4770 
4771 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
4772   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
4773 }
4774 
4775 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
4776   return TII.isInlineConstant(Imm);
4777 }
4778