1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPU.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPURegisterBankInfo.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "Utils/AMDGPUBaseInfo.h"
22 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
24 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/IR/DiagnosticInfo.h"
28 #include "llvm/IR/IntrinsicsAMDGPU.h"
29 
30 #define DEBUG_TYPE "amdgpu-isel"
31 
32 using namespace llvm;
33 using namespace MIPatternMatch;
34 
35 static cl::opt<bool> AllowRiskySelect(
36   "amdgpu-global-isel-risky-select",
37   cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
38   cl::init(false),
39   cl::ReallyHidden);
40 
41 #define GET_GLOBALISEL_IMPL
42 #define AMDGPUSubtarget GCNSubtarget
43 #include "AMDGPUGenGlobalISel.inc"
44 #undef GET_GLOBALISEL_IMPL
45 #undef AMDGPUSubtarget
46 
47 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
48     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
49     const AMDGPUTargetMachine &TM)
50     : TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
51       STI(STI),
52       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
53 #define GET_GLOBALISEL_PREDICATES_INIT
54 #include "AMDGPUGenGlobalISel.inc"
55 #undef GET_GLOBALISEL_PREDICATES_INIT
56 #define GET_GLOBALISEL_TEMPORARIES_INIT
57 #include "AMDGPUGenGlobalISel.inc"
58 #undef GET_GLOBALISEL_TEMPORARIES_INIT
59 {
60 }
61 
62 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
63 
64 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
65                                         CodeGenCoverage &CoverageInfo,
66                                         ProfileSummaryInfo *PSI,
67                                         BlockFrequencyInfo *BFI) {
68   MRI = &MF.getRegInfo();
69   Subtarget = &MF.getSubtarget<GCNSubtarget>();
70   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
71 }
72 
73 bool AMDGPUInstructionSelector::isVCC(Register Reg,
74                                       const MachineRegisterInfo &MRI) const {
75   // The verifier is oblivious to s1 being a valid value for wavesize registers.
76   if (Reg.isPhysical())
77     return false;
78 
79   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
80   const TargetRegisterClass *RC =
81       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
82   if (RC) {
83     const LLT Ty = MRI.getType(Reg);
84     if (!Ty.isValid() || Ty.getSizeInBits() != 1)
85       return false;
86     // G_TRUNC s1 result is never vcc.
87     return MRI.getVRegDef(Reg)->getOpcode() != AMDGPU::G_TRUNC &&
88            RC->hasSuperClassEq(TRI.getBoolRC());
89   }
90 
91   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
92   return RB->getID() == AMDGPU::VCCRegBankID;
93 }
94 
95 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
96                                                         unsigned NewOpc) const {
97   MI.setDesc(TII.get(NewOpc));
98   MI.removeOperand(1); // Remove intrinsic ID.
99   MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
100 
101   MachineOperand &Dst = MI.getOperand(0);
102   MachineOperand &Src = MI.getOperand(1);
103 
104   // TODO: This should be legalized to s32 if needed
105   if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
106     return false;
107 
108   const TargetRegisterClass *DstRC
109     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
110   const TargetRegisterClass *SrcRC
111     = TRI.getConstrainedRegClassForOperand(Src, *MRI);
112   if (!DstRC || DstRC != SrcRC)
113     return false;
114 
115   return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
116          RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
117 }
118 
119 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
120   const DebugLoc &DL = I.getDebugLoc();
121   MachineBasicBlock *BB = I.getParent();
122   I.setDesc(TII.get(TargetOpcode::COPY));
123 
124   const MachineOperand &Src = I.getOperand(1);
125   MachineOperand &Dst = I.getOperand(0);
126   Register DstReg = Dst.getReg();
127   Register SrcReg = Src.getReg();
128 
129   if (isVCC(DstReg, *MRI)) {
130     if (SrcReg == AMDGPU::SCC) {
131       const TargetRegisterClass *RC
132         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
133       if (!RC)
134         return true;
135       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
136     }
137 
138     if (!isVCC(SrcReg, *MRI)) {
139       // TODO: Should probably leave the copy and let copyPhysReg expand it.
140       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
141         return false;
142 
143       const TargetRegisterClass *SrcRC
144         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
145 
146       Optional<ValueAndVReg> ConstVal =
147           getIConstantVRegValWithLookThrough(SrcReg, *MRI, true);
148       if (ConstVal) {
149         unsigned MovOpc =
150             STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
151         BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg)
152             .addImm(ConstVal->Value.getBoolValue() ? -1 : 0);
153       } else {
154         Register MaskedReg = MRI->createVirtualRegister(SrcRC);
155 
156         // We can't trust the high bits at this point, so clear them.
157 
158         // TODO: Skip masking high bits if def is known boolean.
159 
160         unsigned AndOpc =
161             TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
162         BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
163             .addImm(1)
164             .addReg(SrcReg);
165         BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
166             .addImm(0)
167             .addReg(MaskedReg);
168       }
169 
170       if (!MRI->getRegClassOrNull(SrcReg))
171         MRI->setRegClass(SrcReg, SrcRC);
172       I.eraseFromParent();
173       return true;
174     }
175 
176     const TargetRegisterClass *RC =
177       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
178     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
179       return false;
180 
181     return true;
182   }
183 
184   for (const MachineOperand &MO : I.operands()) {
185     if (MO.getReg().isPhysical())
186       continue;
187 
188     const TargetRegisterClass *RC =
189             TRI.getConstrainedRegClassForOperand(MO, *MRI);
190     if (!RC)
191       continue;
192     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
193   }
194   return true;
195 }
196 
197 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
198   const Register DefReg = I.getOperand(0).getReg();
199   const LLT DefTy = MRI->getType(DefReg);
200   if (DefTy == LLT::scalar(1)) {
201     if (!AllowRiskySelect) {
202       LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
203       return false;
204     }
205 
206     LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
207   }
208 
209   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
210 
211   const RegClassOrRegBank &RegClassOrBank =
212     MRI->getRegClassOrRegBank(DefReg);
213 
214   const TargetRegisterClass *DefRC
215     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
216   if (!DefRC) {
217     if (!DefTy.isValid()) {
218       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
219       return false;
220     }
221 
222     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
223     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB);
224     if (!DefRC) {
225       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
226       return false;
227     }
228   }
229 
230   // TODO: Verify that all registers have the same bank
231   I.setDesc(TII.get(TargetOpcode::PHI));
232   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
233 }
234 
235 MachineOperand
236 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
237                                            const TargetRegisterClass &SubRC,
238                                            unsigned SubIdx) const {
239 
240   MachineInstr *MI = MO.getParent();
241   MachineBasicBlock *BB = MO.getParent()->getParent();
242   Register DstReg = MRI->createVirtualRegister(&SubRC);
243 
244   if (MO.isReg()) {
245     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
246     Register Reg = MO.getReg();
247     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
248             .addReg(Reg, 0, ComposedSubIdx);
249 
250     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
251                                      MO.isKill(), MO.isDead(), MO.isUndef(),
252                                      MO.isEarlyClobber(), 0, MO.isDebug(),
253                                      MO.isInternalRead());
254   }
255 
256   assert(MO.isImm());
257 
258   APInt Imm(64, MO.getImm());
259 
260   switch (SubIdx) {
261   default:
262     llvm_unreachable("do not know to split immediate with this sub index.");
263   case AMDGPU::sub0:
264     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
265   case AMDGPU::sub1:
266     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
267   }
268 }
269 
270 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
271   switch (Opc) {
272   case AMDGPU::G_AND:
273     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
274   case AMDGPU::G_OR:
275     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
276   case AMDGPU::G_XOR:
277     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
278   default:
279     llvm_unreachable("not a bit op");
280   }
281 }
282 
283 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
284   Register DstReg = I.getOperand(0).getReg();
285   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
286 
287   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
288   if (DstRB->getID() != AMDGPU::SGPRRegBankID &&
289       DstRB->getID() != AMDGPU::VCCRegBankID)
290     return false;
291 
292   bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID &&
293                             STI.isWave64());
294   I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64)));
295 
296   // Dead implicit-def of scc
297   I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
298                                          true, // isImp
299                                          false, // isKill
300                                          true)); // isDead
301   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
302 }
303 
304 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
305   MachineBasicBlock *BB = I.getParent();
306   MachineFunction *MF = BB->getParent();
307   Register DstReg = I.getOperand(0).getReg();
308   const DebugLoc &DL = I.getDebugLoc();
309   LLT Ty = MRI->getType(DstReg);
310   if (Ty.isVector())
311     return false;
312 
313   unsigned Size = Ty.getSizeInBits();
314   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
315   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
316   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
317 
318   if (Size == 32) {
319     if (IsSALU) {
320       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
321       MachineInstr *Add =
322         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
323         .add(I.getOperand(1))
324         .add(I.getOperand(2));
325       I.eraseFromParent();
326       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
327     }
328 
329     if (STI.hasAddNoCarry()) {
330       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
331       I.setDesc(TII.get(Opc));
332       I.addOperand(*MF, MachineOperand::CreateImm(0));
333       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
334       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
335     }
336 
337     const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64;
338 
339     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
340     MachineInstr *Add
341       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
342       .addDef(UnusedCarry, RegState::Dead)
343       .add(I.getOperand(1))
344       .add(I.getOperand(2))
345       .addImm(0);
346     I.eraseFromParent();
347     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
348   }
349 
350   assert(!Sub && "illegal sub should not reach here");
351 
352   const TargetRegisterClass &RC
353     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
354   const TargetRegisterClass &HalfRC
355     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
356 
357   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
358   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
359   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
360   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
361 
362   Register DstLo = MRI->createVirtualRegister(&HalfRC);
363   Register DstHi = MRI->createVirtualRegister(&HalfRC);
364 
365   if (IsSALU) {
366     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
367       .add(Lo1)
368       .add(Lo2);
369     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
370       .add(Hi1)
371       .add(Hi2);
372   } else {
373     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
374     Register CarryReg = MRI->createVirtualRegister(CarryRC);
375     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo)
376       .addDef(CarryReg)
377       .add(Lo1)
378       .add(Lo2)
379       .addImm(0);
380     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
381       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
382       .add(Hi1)
383       .add(Hi2)
384       .addReg(CarryReg, RegState::Kill)
385       .addImm(0);
386 
387     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
388       return false;
389   }
390 
391   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
392     .addReg(DstLo)
393     .addImm(AMDGPU::sub0)
394     .addReg(DstHi)
395     .addImm(AMDGPU::sub1);
396 
397 
398   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
399     return false;
400 
401   I.eraseFromParent();
402   return true;
403 }
404 
405 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
406   MachineInstr &I) const {
407   MachineBasicBlock *BB = I.getParent();
408   MachineFunction *MF = BB->getParent();
409   const DebugLoc &DL = I.getDebugLoc();
410   Register Dst0Reg = I.getOperand(0).getReg();
411   Register Dst1Reg = I.getOperand(1).getReg();
412   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
413                      I.getOpcode() == AMDGPU::G_UADDE;
414   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
415                           I.getOpcode() == AMDGPU::G_USUBE;
416 
417   if (isVCC(Dst1Reg, *MRI)) {
418     unsigned NoCarryOpc =
419         IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
420     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
421     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
422     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
423     I.addOperand(*MF, MachineOperand::CreateImm(0));
424     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
425   }
426 
427   Register Src0Reg = I.getOperand(2).getReg();
428   Register Src1Reg = I.getOperand(3).getReg();
429 
430   if (HasCarryIn) {
431     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
432       .addReg(I.getOperand(4).getReg());
433   }
434 
435   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
436   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
437 
438   BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
439     .add(I.getOperand(2))
440     .add(I.getOperand(3));
441   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
442     .addReg(AMDGPU::SCC);
443 
444   if (!MRI->getRegClassOrNull(Dst1Reg))
445     MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
446 
447   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
448       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
449       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
450     return false;
451 
452   if (HasCarryIn &&
453       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
454                                     AMDGPU::SReg_32RegClass, *MRI))
455     return false;
456 
457   I.eraseFromParent();
458   return true;
459 }
460 
461 bool AMDGPUInstructionSelector::selectG_AMDGPU_MAD_64_32(
462     MachineInstr &I) const {
463   MachineBasicBlock *BB = I.getParent();
464   MachineFunction *MF = BB->getParent();
465   const bool IsUnsigned = I.getOpcode() == AMDGPU::G_AMDGPU_MAD_U64_U32;
466 
467   unsigned Opc;
468   if (Subtarget->getGeneration() == AMDGPUSubtarget::GFX11)
469     Opc = IsUnsigned ? AMDGPU::V_MAD_U64_U32_gfx11_e64
470                      : AMDGPU::V_MAD_I64_I32_gfx11_e64;
471   else
472     Opc = IsUnsigned ? AMDGPU::V_MAD_U64_U32_e64 : AMDGPU::V_MAD_I64_I32_e64;
473   I.setDesc(TII.get(Opc));
474   I.addOperand(*MF, MachineOperand::CreateImm(0));
475   I.addImplicitDefUseOperands(*MF);
476   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
477 }
478 
479 // TODO: We should probably legalize these to only using 32-bit results.
480 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
481   MachineBasicBlock *BB = I.getParent();
482   Register DstReg = I.getOperand(0).getReg();
483   Register SrcReg = I.getOperand(1).getReg();
484   LLT DstTy = MRI->getType(DstReg);
485   LLT SrcTy = MRI->getType(SrcReg);
486   const unsigned SrcSize = SrcTy.getSizeInBits();
487   unsigned DstSize = DstTy.getSizeInBits();
488 
489   // TODO: Should handle any multiple of 32 offset.
490   unsigned Offset = I.getOperand(2).getImm();
491   if (Offset % 32 != 0 || DstSize > 128)
492     return false;
493 
494   // 16-bit operations really use 32-bit registers.
495   // FIXME: Probably should not allow 16-bit G_EXTRACT results.
496   if (DstSize == 16)
497     DstSize = 32;
498 
499   const TargetRegisterClass *DstRC =
500     TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
501   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
502     return false;
503 
504   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
505   const TargetRegisterClass *SrcRC =
506       TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
507   if (!SrcRC)
508     return false;
509   unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
510                                                          DstSize / 32);
511   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
512   if (!SrcRC)
513     return false;
514 
515   SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
516                                     *SrcRC, I.getOperand(1));
517   const DebugLoc &DL = I.getDebugLoc();
518   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
519     .addReg(SrcReg, 0, SubReg);
520 
521   I.eraseFromParent();
522   return true;
523 }
524 
525 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
526   MachineBasicBlock *BB = MI.getParent();
527   Register DstReg = MI.getOperand(0).getReg();
528   LLT DstTy = MRI->getType(DstReg);
529   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
530 
531   const unsigned SrcSize = SrcTy.getSizeInBits();
532   if (SrcSize < 32)
533     return selectImpl(MI, *CoverageInfo);
534 
535   const DebugLoc &DL = MI.getDebugLoc();
536   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
537   const unsigned DstSize = DstTy.getSizeInBits();
538   const TargetRegisterClass *DstRC =
539       TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
540   if (!DstRC)
541     return false;
542 
543   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
544   MachineInstrBuilder MIB =
545     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
546   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
547     MachineOperand &Src = MI.getOperand(I + 1);
548     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
549     MIB.addImm(SubRegs[I]);
550 
551     const TargetRegisterClass *SrcRC
552       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
553     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
554       return false;
555   }
556 
557   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
558     return false;
559 
560   MI.eraseFromParent();
561   return true;
562 }
563 
564 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
565   MachineBasicBlock *BB = MI.getParent();
566   const int NumDst = MI.getNumOperands() - 1;
567 
568   MachineOperand &Src = MI.getOperand(NumDst);
569 
570   Register SrcReg = Src.getReg();
571   Register DstReg0 = MI.getOperand(0).getReg();
572   LLT DstTy = MRI->getType(DstReg0);
573   LLT SrcTy = MRI->getType(SrcReg);
574 
575   const unsigned DstSize = DstTy.getSizeInBits();
576   const unsigned SrcSize = SrcTy.getSizeInBits();
577   const DebugLoc &DL = MI.getDebugLoc();
578   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
579 
580   const TargetRegisterClass *SrcRC =
581       TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
582   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
583     return false;
584 
585   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
586   // source, and this relies on the fact that the same subregister indices are
587   // used for both.
588   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
589   for (int I = 0, E = NumDst; I != E; ++I) {
590     MachineOperand &Dst = MI.getOperand(I);
591     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
592       .addReg(SrcReg, 0, SubRegs[I]);
593 
594     // Make sure the subregister index is valid for the source register.
595     SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]);
596     if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
597       return false;
598 
599     const TargetRegisterClass *DstRC =
600       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
601     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
602       return false;
603   }
604 
605   MI.eraseFromParent();
606   return true;
607 }
608 
609 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
610   MachineInstr &MI) const {
611   if (selectImpl(MI, *CoverageInfo))
612     return true;
613 
614   const LLT S32 = LLT::scalar(32);
615   const LLT V2S16 = LLT::fixed_vector(2, 16);
616 
617   Register Dst = MI.getOperand(0).getReg();
618   if (MRI->getType(Dst) != V2S16)
619     return false;
620 
621   const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
622   if (DstBank->getID() != AMDGPU::SGPRRegBankID)
623     return false;
624 
625   Register Src0 = MI.getOperand(1).getReg();
626   Register Src1 = MI.getOperand(2).getReg();
627   if (MRI->getType(Src0) != S32)
628     return false;
629 
630   const DebugLoc &DL = MI.getDebugLoc();
631   MachineBasicBlock *BB = MI.getParent();
632 
633   auto ConstSrc1 = getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true);
634   if (ConstSrc1) {
635     auto ConstSrc0 =
636         getAnyConstantVRegValWithLookThrough(Src0, *MRI, true, true);
637     if (ConstSrc0) {
638       const int64_t K0 = ConstSrc0->Value.getSExtValue();
639       const int64_t K1 = ConstSrc1->Value.getSExtValue();
640       uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff;
641       uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff;
642 
643       BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst)
644         .addImm(Lo16 | (Hi16 << 16));
645       MI.eraseFromParent();
646       return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
647     }
648   }
649 
650   // TODO: This should probably be a combine somewhere
651   // (build_vector_trunc $src0, undef -> copy $src0
652   MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
653   if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
654     MI.setDesc(TII.get(AMDGPU::COPY));
655     MI.removeOperand(2);
656     return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) &&
657            RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI);
658   }
659 
660   Register ShiftSrc0;
661   Register ShiftSrc1;
662 
663   // With multiple uses of the shift, this will duplicate the shift and
664   // increase register pressure.
665   //
666   // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
667   //  => (S_PACK_HH_B32_B16 $src0, $src1)
668   // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16))
669   //  => (S_PACK_LH_B32_B16 $src0, $src1)
670   // (build_vector_trunc $src0, $src1)
671   //  => (S_PACK_LL_B32_B16 $src0, $src1)
672 
673   bool Shift0 = mi_match(
674       Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16))));
675 
676   bool Shift1 = mi_match(
677       Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16))));
678 
679   unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
680   if (Shift0 && Shift1) {
681     Opc = AMDGPU::S_PACK_HH_B32_B16;
682     MI.getOperand(1).setReg(ShiftSrc0);
683     MI.getOperand(2).setReg(ShiftSrc1);
684   } else if (Shift1) {
685     Opc = AMDGPU::S_PACK_LH_B32_B16;
686     MI.getOperand(2).setReg(ShiftSrc1);
687   } else if (Shift0 && ConstSrc1 && ConstSrc1->Value == 0) {
688     // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
689     auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
690       .addReg(ShiftSrc0)
691       .addImm(16);
692 
693     MI.eraseFromParent();
694     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
695   }
696 
697   MI.setDesc(TII.get(Opc));
698   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
699 }
700 
701 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
702   return selectG_ADD_SUB(I);
703 }
704 
705 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
706   const MachineOperand &MO = I.getOperand(0);
707 
708   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
709   // regbank check here is to know why getConstrainedRegClassForOperand failed.
710   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
711   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
712       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
713     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
714     return true;
715   }
716 
717   return false;
718 }
719 
720 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
721   MachineBasicBlock *BB = I.getParent();
722 
723   Register DstReg = I.getOperand(0).getReg();
724   Register Src0Reg = I.getOperand(1).getReg();
725   Register Src1Reg = I.getOperand(2).getReg();
726   LLT Src1Ty = MRI->getType(Src1Reg);
727 
728   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
729   unsigned InsSize = Src1Ty.getSizeInBits();
730 
731   int64_t Offset = I.getOperand(3).getImm();
732 
733   // FIXME: These cases should have been illegal and unnecessary to check here.
734   if (Offset % 32 != 0 || InsSize % 32 != 0)
735     return false;
736 
737   // Currently not handled by getSubRegFromChannel.
738   if (InsSize > 128)
739     return false;
740 
741   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
742   if (SubReg == AMDGPU::NoSubRegister)
743     return false;
744 
745   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
746   const TargetRegisterClass *DstRC =
747       TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
748   if (!DstRC)
749     return false;
750 
751   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
752   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
753   const TargetRegisterClass *Src0RC =
754       TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank);
755   const TargetRegisterClass *Src1RC =
756       TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank);
757 
758   // Deal with weird cases where the class only partially supports the subreg
759   // index.
760   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
761   if (!Src0RC || !Src1RC)
762     return false;
763 
764   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
765       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
766       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
767     return false;
768 
769   const DebugLoc &DL = I.getDebugLoc();
770   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
771     .addReg(Src0Reg)
772     .addReg(Src1Reg)
773     .addImm(SubReg);
774 
775   I.eraseFromParent();
776   return true;
777 }
778 
779 bool AMDGPUInstructionSelector::selectG_SBFX_UBFX(MachineInstr &MI) const {
780   Register DstReg = MI.getOperand(0).getReg();
781   Register SrcReg = MI.getOperand(1).getReg();
782   Register OffsetReg = MI.getOperand(2).getReg();
783   Register WidthReg = MI.getOperand(3).getReg();
784 
785   assert(RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID &&
786          "scalar BFX instructions are expanded in regbankselect");
787   assert(MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 &&
788          "64-bit vector BFX instructions are expanded in regbankselect");
789 
790   const DebugLoc &DL = MI.getDebugLoc();
791   MachineBasicBlock *MBB = MI.getParent();
792 
793   bool IsSigned = MI.getOpcode() == TargetOpcode::G_SBFX;
794   unsigned Opc = IsSigned ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
795   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), DstReg)
796                  .addReg(SrcReg)
797                  .addReg(OffsetReg)
798                  .addReg(WidthReg);
799   MI.eraseFromParent();
800   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
801 }
802 
803 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
804   if (STI.getLDSBankCount() != 16)
805     return selectImpl(MI, *CoverageInfo);
806 
807   Register Dst = MI.getOperand(0).getReg();
808   Register Src0 = MI.getOperand(2).getReg();
809   Register M0Val = MI.getOperand(6).getReg();
810   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
811       !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
812       !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
813     return false;
814 
815   // This requires 2 instructions. It is possible to write a pattern to support
816   // this, but the generated isel emitter doesn't correctly deal with multiple
817   // output instructions using the same physical register input. The copy to m0
818   // is incorrectly placed before the second instruction.
819   //
820   // TODO: Match source modifiers.
821 
822   Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
823   const DebugLoc &DL = MI.getDebugLoc();
824   MachineBasicBlock *MBB = MI.getParent();
825 
826   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
827     .addReg(M0Val);
828   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
829     .addImm(2)
830     .addImm(MI.getOperand(4).getImm())  // $attr
831     .addImm(MI.getOperand(3).getImm()); // $attrchan
832 
833   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
834     .addImm(0)                          // $src0_modifiers
835     .addReg(Src0)                       // $src0
836     .addImm(MI.getOperand(4).getImm())  // $attr
837     .addImm(MI.getOperand(3).getImm())  // $attrchan
838     .addImm(0)                          // $src2_modifiers
839     .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
840     .addImm(MI.getOperand(5).getImm())  // $high
841     .addImm(0)                          // $clamp
842     .addImm(0);                         // $omod
843 
844   MI.eraseFromParent();
845   return true;
846 }
847 
848 // Writelane is special in that it can use SGPR and M0 (which would normally
849 // count as using the constant bus twice - but in this case it is allowed since
850 // the lane selector doesn't count as a use of the constant bus). However, it is
851 // still required to abide by the 1 SGPR rule. Fix this up if we might have
852 // multiple SGPRs.
853 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
854   // With a constant bus limit of at least 2, there's no issue.
855   if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1)
856     return selectImpl(MI, *CoverageInfo);
857 
858   MachineBasicBlock *MBB = MI.getParent();
859   const DebugLoc &DL = MI.getDebugLoc();
860   Register VDst = MI.getOperand(0).getReg();
861   Register Val = MI.getOperand(2).getReg();
862   Register LaneSelect = MI.getOperand(3).getReg();
863   Register VDstIn = MI.getOperand(4).getReg();
864 
865   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
866 
867   Optional<ValueAndVReg> ConstSelect =
868       getIConstantVRegValWithLookThrough(LaneSelect, *MRI);
869   if (ConstSelect) {
870     // The selector has to be an inline immediate, so we can use whatever for
871     // the other operands.
872     MIB.addReg(Val);
873     MIB.addImm(ConstSelect->Value.getSExtValue() &
874                maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
875   } else {
876     Optional<ValueAndVReg> ConstVal =
877         getIConstantVRegValWithLookThrough(Val, *MRI);
878 
879     // If the value written is an inline immediate, we can get away without a
880     // copy to m0.
881     if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(),
882                                                  STI.hasInv2PiInlineImm())) {
883       MIB.addImm(ConstVal->Value.getSExtValue());
884       MIB.addReg(LaneSelect);
885     } else {
886       MIB.addReg(Val);
887 
888       // If the lane selector was originally in a VGPR and copied with
889       // readfirstlane, there's a hazard to read the same SGPR from the
890       // VALU. Constrain to a different SGPR to help avoid needing a nop later.
891       RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI);
892 
893       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
894         .addReg(LaneSelect);
895       MIB.addReg(AMDGPU::M0);
896     }
897   }
898 
899   MIB.addReg(VDstIn);
900 
901   MI.eraseFromParent();
902   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
903 }
904 
905 // We need to handle this here because tablegen doesn't support matching
906 // instructions with multiple outputs.
907 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
908   Register Dst0 = MI.getOperand(0).getReg();
909   Register Dst1 = MI.getOperand(1).getReg();
910 
911   LLT Ty = MRI->getType(Dst0);
912   unsigned Opc;
913   if (Ty == LLT::scalar(32))
914     Opc = AMDGPU::V_DIV_SCALE_F32_e64;
915   else if (Ty == LLT::scalar(64))
916     Opc = AMDGPU::V_DIV_SCALE_F64_e64;
917   else
918     return false;
919 
920   // TODO: Match source modifiers.
921 
922   const DebugLoc &DL = MI.getDebugLoc();
923   MachineBasicBlock *MBB = MI.getParent();
924 
925   Register Numer = MI.getOperand(3).getReg();
926   Register Denom = MI.getOperand(4).getReg();
927   unsigned ChooseDenom = MI.getOperand(5).getImm();
928 
929   Register Src0 = ChooseDenom != 0 ? Numer : Denom;
930 
931   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
932     .addDef(Dst1)
933     .addImm(0)     // $src0_modifiers
934     .addUse(Src0)  // $src0
935     .addImm(0)     // $src1_modifiers
936     .addUse(Denom) // $src1
937     .addImm(0)     // $src2_modifiers
938     .addUse(Numer) // $src2
939     .addImm(0)     // $clamp
940     .addImm(0);    // $omod
941 
942   MI.eraseFromParent();
943   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
944 }
945 
946 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
947   unsigned IntrinsicID = I.getIntrinsicID();
948   switch (IntrinsicID) {
949   case Intrinsic::amdgcn_if_break: {
950     MachineBasicBlock *BB = I.getParent();
951 
952     // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
953     // SelectionDAG uses for wave32 vs wave64.
954     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
955       .add(I.getOperand(0))
956       .add(I.getOperand(2))
957       .add(I.getOperand(3));
958 
959     Register DstReg = I.getOperand(0).getReg();
960     Register Src0Reg = I.getOperand(2).getReg();
961     Register Src1Reg = I.getOperand(3).getReg();
962 
963     I.eraseFromParent();
964 
965     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
966       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
967 
968     return true;
969   }
970   case Intrinsic::amdgcn_interp_p1_f16:
971     return selectInterpP1F16(I);
972   case Intrinsic::amdgcn_wqm:
973     return constrainCopyLikeIntrin(I, AMDGPU::WQM);
974   case Intrinsic::amdgcn_softwqm:
975     return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
976   case Intrinsic::amdgcn_strict_wwm:
977   case Intrinsic::amdgcn_wwm:
978     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WWM);
979   case Intrinsic::amdgcn_strict_wqm:
980     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WQM);
981   case Intrinsic::amdgcn_writelane:
982     return selectWritelane(I);
983   case Intrinsic::amdgcn_div_scale:
984     return selectDivScale(I);
985   case Intrinsic::amdgcn_icmp:
986     return selectIntrinsicIcmp(I);
987   case Intrinsic::amdgcn_ballot:
988     return selectBallot(I);
989   case Intrinsic::amdgcn_reloc_constant:
990     return selectRelocConstant(I);
991   case Intrinsic::amdgcn_groupstaticsize:
992     return selectGroupStaticSize(I);
993   case Intrinsic::returnaddress:
994     return selectReturnAddress(I);
995   case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
996   case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
997   case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
998   case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
999   case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
1000   case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
1001     return selectSMFMACIntrin(I);
1002   default:
1003     return selectImpl(I, *CoverageInfo);
1004   }
1005 }
1006 
1007 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
1008   if (Size != 32 && Size != 64)
1009     return -1;
1010   switch (P) {
1011   default:
1012     llvm_unreachable("Unknown condition code!");
1013   case CmpInst::ICMP_NE:
1014     return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
1015   case CmpInst::ICMP_EQ:
1016     return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
1017   case CmpInst::ICMP_SGT:
1018     return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
1019   case CmpInst::ICMP_SGE:
1020     return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
1021   case CmpInst::ICMP_SLT:
1022     return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
1023   case CmpInst::ICMP_SLE:
1024     return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
1025   case CmpInst::ICMP_UGT:
1026     return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
1027   case CmpInst::ICMP_UGE:
1028     return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
1029   case CmpInst::ICMP_ULT:
1030     return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
1031   case CmpInst::ICMP_ULE:
1032     return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
1033   }
1034 }
1035 
1036 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
1037                                               unsigned Size) const {
1038   if (Size == 64) {
1039     if (!STI.hasScalarCompareEq64())
1040       return -1;
1041 
1042     switch (P) {
1043     case CmpInst::ICMP_NE:
1044       return AMDGPU::S_CMP_LG_U64;
1045     case CmpInst::ICMP_EQ:
1046       return AMDGPU::S_CMP_EQ_U64;
1047     default:
1048       return -1;
1049     }
1050   }
1051 
1052   if (Size != 32)
1053     return -1;
1054 
1055   switch (P) {
1056   case CmpInst::ICMP_NE:
1057     return AMDGPU::S_CMP_LG_U32;
1058   case CmpInst::ICMP_EQ:
1059     return AMDGPU::S_CMP_EQ_U32;
1060   case CmpInst::ICMP_SGT:
1061     return AMDGPU::S_CMP_GT_I32;
1062   case CmpInst::ICMP_SGE:
1063     return AMDGPU::S_CMP_GE_I32;
1064   case CmpInst::ICMP_SLT:
1065     return AMDGPU::S_CMP_LT_I32;
1066   case CmpInst::ICMP_SLE:
1067     return AMDGPU::S_CMP_LE_I32;
1068   case CmpInst::ICMP_UGT:
1069     return AMDGPU::S_CMP_GT_U32;
1070   case CmpInst::ICMP_UGE:
1071     return AMDGPU::S_CMP_GE_U32;
1072   case CmpInst::ICMP_ULT:
1073     return AMDGPU::S_CMP_LT_U32;
1074   case CmpInst::ICMP_ULE:
1075     return AMDGPU::S_CMP_LE_U32;
1076   default:
1077     llvm_unreachable("Unknown condition code!");
1078   }
1079 }
1080 
1081 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
1082   MachineBasicBlock *BB = I.getParent();
1083   const DebugLoc &DL = I.getDebugLoc();
1084 
1085   Register SrcReg = I.getOperand(2).getReg();
1086   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1087 
1088   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
1089 
1090   Register CCReg = I.getOperand(0).getReg();
1091   if (!isVCC(CCReg, *MRI)) {
1092     int Opcode = getS_CMPOpcode(Pred, Size);
1093     if (Opcode == -1)
1094       return false;
1095     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
1096             .add(I.getOperand(2))
1097             .add(I.getOperand(3));
1098     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
1099       .addReg(AMDGPU::SCC);
1100     bool Ret =
1101         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
1102         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
1103     I.eraseFromParent();
1104     return Ret;
1105   }
1106 
1107   int Opcode = getV_CMPOpcode(Pred, Size);
1108   if (Opcode == -1)
1109     return false;
1110 
1111   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1112             I.getOperand(0).getReg())
1113             .add(I.getOperand(2))
1114             .add(I.getOperand(3));
1115   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1116                                *TRI.getBoolRC(), *MRI);
1117   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1118   I.eraseFromParent();
1119   return Ret;
1120 }
1121 
1122 bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const {
1123   Register Dst = I.getOperand(0).getReg();
1124   if (isVCC(Dst, *MRI))
1125     return false;
1126 
1127   if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize())
1128     return false;
1129 
1130   MachineBasicBlock *BB = I.getParent();
1131   const DebugLoc &DL = I.getDebugLoc();
1132   Register SrcReg = I.getOperand(2).getReg();
1133   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1134 
1135   auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
1136   if (!ICmpInst::isIntPredicate(static_cast<ICmpInst::Predicate>(Pred))) {
1137     MachineInstr *ICmp =
1138         BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Dst);
1139 
1140     if (!RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1141                                       *TRI.getBoolRC(), *MRI))
1142       return false;
1143     I.eraseFromParent();
1144     return true;
1145   }
1146 
1147   int Opcode = getV_CMPOpcode(Pred, Size);
1148   if (Opcode == -1)
1149     return false;
1150 
1151   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst)
1152                            .add(I.getOperand(2))
1153                            .add(I.getOperand(3));
1154   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(),
1155                                *MRI);
1156   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1157   I.eraseFromParent();
1158   return Ret;
1159 }
1160 
1161 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
1162   MachineBasicBlock *BB = I.getParent();
1163   const DebugLoc &DL = I.getDebugLoc();
1164   Register DstReg = I.getOperand(0).getReg();
1165   const unsigned Size = MRI->getType(DstReg).getSizeInBits();
1166   const bool Is64 = Size == 64;
1167 
1168   if (Size != STI.getWavefrontSize())
1169     return false;
1170 
1171   Optional<ValueAndVReg> Arg =
1172       getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI);
1173 
1174   if (Arg.hasValue()) {
1175     const int64_t Value = Arg.getValue().Value.getSExtValue();
1176     if (Value == 0) {
1177       unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
1178       BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
1179     } else if (Value == -1) { // all ones
1180       Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
1181       BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1182     } else
1183       return false;
1184   } else {
1185     Register SrcReg = I.getOperand(2).getReg();
1186     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1187   }
1188 
1189   I.eraseFromParent();
1190   return true;
1191 }
1192 
1193 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {
1194   Register DstReg = I.getOperand(0).getReg();
1195   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1196   const TargetRegisterClass *DstRC = TRI.getRegClassForSizeOnBank(32, *DstBank);
1197   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1198     return false;
1199 
1200   const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID;
1201 
1202   Module *M = MF->getFunction().getParent();
1203   const MDNode *Metadata = I.getOperand(2).getMetadata();
1204   auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
1205   auto RelocSymbol = cast<GlobalVariable>(
1206     M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
1207 
1208   MachineBasicBlock *BB = I.getParent();
1209   BuildMI(*BB, &I, I.getDebugLoc(),
1210           TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg)
1211     .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO);
1212 
1213   I.eraseFromParent();
1214   return true;
1215 }
1216 
1217 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
1218   Triple::OSType OS = MF->getTarget().getTargetTriple().getOS();
1219 
1220   Register DstReg = I.getOperand(0).getReg();
1221   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1222   unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ?
1223     AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1224 
1225   MachineBasicBlock *MBB = I.getParent();
1226   const DebugLoc &DL = I.getDebugLoc();
1227 
1228   auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg);
1229 
1230   if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) {
1231     const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1232     MIB.addImm(MFI->getLDSSize());
1233   } else {
1234     Module *M = MF->getFunction().getParent();
1235     const GlobalValue *GV
1236       = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
1237     MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO);
1238   }
1239 
1240   I.eraseFromParent();
1241   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1242 }
1243 
1244 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
1245   MachineBasicBlock *MBB = I.getParent();
1246   MachineFunction &MF = *MBB->getParent();
1247   const DebugLoc &DL = I.getDebugLoc();
1248 
1249   MachineOperand &Dst = I.getOperand(0);
1250   Register DstReg = Dst.getReg();
1251   unsigned Depth = I.getOperand(2).getImm();
1252 
1253   const TargetRegisterClass *RC
1254     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1255   if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) ||
1256       !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
1257     return false;
1258 
1259   // Check for kernel and shader functions
1260   if (Depth != 0 ||
1261       MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) {
1262     BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1263       .addImm(0);
1264     I.eraseFromParent();
1265     return true;
1266   }
1267 
1268   MachineFrameInfo &MFI = MF.getFrameInfo();
1269   // There is a call to @llvm.returnaddress in this function
1270   MFI.setReturnAddressIsTaken(true);
1271 
1272   // Get the return address reg and mark it as an implicit live-in
1273   Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
1274   Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
1275                                              AMDGPU::SReg_64RegClass, DL);
1276   BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1277     .addReg(LiveIn);
1278   I.eraseFromParent();
1279   return true;
1280 }
1281 
1282 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1283   // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
1284   // SelectionDAG uses for wave32 vs wave64.
1285   MachineBasicBlock *BB = MI.getParent();
1286   BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1287       .add(MI.getOperand(1));
1288 
1289   Register Reg = MI.getOperand(1).getReg();
1290   MI.eraseFromParent();
1291 
1292   if (!MRI->getRegClassOrNull(Reg))
1293     MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1294   return true;
1295 }
1296 
1297 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1298   MachineInstr &MI, Intrinsic::ID IntrID) const {
1299   MachineBasicBlock *MBB = MI.getParent();
1300   MachineFunction *MF = MBB->getParent();
1301   const DebugLoc &DL = MI.getDebugLoc();
1302 
1303   unsigned IndexOperand = MI.getOperand(7).getImm();
1304   bool WaveRelease = MI.getOperand(8).getImm() != 0;
1305   bool WaveDone = MI.getOperand(9).getImm() != 0;
1306 
1307   if (WaveDone && !WaveRelease)
1308     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1309 
1310   unsigned OrderedCountIndex = IndexOperand & 0x3f;
1311   IndexOperand &= ~0x3f;
1312   unsigned CountDw = 0;
1313 
1314   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1315     CountDw = (IndexOperand >> 24) & 0xf;
1316     IndexOperand &= ~(0xf << 24);
1317 
1318     if (CountDw < 1 || CountDw > 4) {
1319       report_fatal_error(
1320         "ds_ordered_count: dword count must be between 1 and 4");
1321     }
1322   }
1323 
1324   if (IndexOperand)
1325     report_fatal_error("ds_ordered_count: bad index operand");
1326 
1327   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1328   unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF);
1329 
1330   unsigned Offset0 = OrderedCountIndex << 2;
1331   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (Instruction << 4);
1332 
1333   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1334     Offset1 |= (CountDw - 1) << 6;
1335 
1336   if (STI.getGeneration() < AMDGPUSubtarget::GFX11)
1337     Offset1 |= ShaderType << 2;
1338 
1339   unsigned Offset = Offset0 | (Offset1 << 8);
1340 
1341   Register M0Val = MI.getOperand(2).getReg();
1342   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1343     .addReg(M0Val);
1344 
1345   Register DstReg = MI.getOperand(0).getReg();
1346   Register ValReg = MI.getOperand(3).getReg();
1347   MachineInstrBuilder DS =
1348     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1349       .addReg(ValReg)
1350       .addImm(Offset)
1351       .cloneMemRefs(MI);
1352 
1353   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1354     return false;
1355 
1356   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1357   MI.eraseFromParent();
1358   return Ret;
1359 }
1360 
1361 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1362   switch (IntrID) {
1363   case Intrinsic::amdgcn_ds_gws_init:
1364     return AMDGPU::DS_GWS_INIT;
1365   case Intrinsic::amdgcn_ds_gws_barrier:
1366     return AMDGPU::DS_GWS_BARRIER;
1367   case Intrinsic::amdgcn_ds_gws_sema_v:
1368     return AMDGPU::DS_GWS_SEMA_V;
1369   case Intrinsic::amdgcn_ds_gws_sema_br:
1370     return AMDGPU::DS_GWS_SEMA_BR;
1371   case Intrinsic::amdgcn_ds_gws_sema_p:
1372     return AMDGPU::DS_GWS_SEMA_P;
1373   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1374     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1375   default:
1376     llvm_unreachable("not a gws intrinsic");
1377   }
1378 }
1379 
1380 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1381                                                      Intrinsic::ID IID) const {
1382   if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1383       !STI.hasGWSSemaReleaseAll())
1384     return false;
1385 
1386   // intrinsic ID, vsrc, offset
1387   const bool HasVSrc = MI.getNumOperands() == 3;
1388   assert(HasVSrc || MI.getNumOperands() == 2);
1389 
1390   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1391   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1392   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1393     return false;
1394 
1395   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1396   assert(OffsetDef);
1397 
1398   unsigned ImmOffset;
1399 
1400   MachineBasicBlock *MBB = MI.getParent();
1401   const DebugLoc &DL = MI.getDebugLoc();
1402 
1403   MachineInstr *Readfirstlane = nullptr;
1404 
1405   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1406   // incoming offset, in case there's an add of a constant. We'll have to put it
1407   // back later.
1408   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1409     Readfirstlane = OffsetDef;
1410     BaseOffset = OffsetDef->getOperand(1).getReg();
1411     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1412   }
1413 
1414   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1415     // If we have a constant offset, try to use the 0 in m0 as the base.
1416     // TODO: Look into changing the default m0 initialization value. If the
1417     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1418     // the immediate offset.
1419 
1420     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1421     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1422       .addImm(0);
1423   } else {
1424     std::tie(BaseOffset, ImmOffset) =
1425         AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1426 
1427     if (Readfirstlane) {
1428       // We have the constant offset now, so put the readfirstlane back on the
1429       // variable component.
1430       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1431         return false;
1432 
1433       Readfirstlane->getOperand(1).setReg(BaseOffset);
1434       BaseOffset = Readfirstlane->getOperand(0).getReg();
1435     } else {
1436       if (!RBI.constrainGenericRegister(BaseOffset,
1437                                         AMDGPU::SReg_32RegClass, *MRI))
1438         return false;
1439     }
1440 
1441     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1442     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1443       .addReg(BaseOffset)
1444       .addImm(16);
1445 
1446     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1447       .addReg(M0Base);
1448   }
1449 
1450   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1451   // offset field) % 64. Some versions of the programming guide omit the m0
1452   // part, or claim it's from offset 0.
1453   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1454 
1455   if (HasVSrc) {
1456     Register VSrc = MI.getOperand(1).getReg();
1457     MIB.addReg(VSrc);
1458 
1459     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1460       return false;
1461   }
1462 
1463   MIB.addImm(ImmOffset)
1464      .cloneMemRefs(MI);
1465 
1466   TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::data0);
1467 
1468   MI.eraseFromParent();
1469   return true;
1470 }
1471 
1472 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1473                                                       bool IsAppend) const {
1474   Register PtrBase = MI.getOperand(2).getReg();
1475   LLT PtrTy = MRI->getType(PtrBase);
1476   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1477 
1478   unsigned Offset;
1479   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1480 
1481   // TODO: Should this try to look through readfirstlane like GWS?
1482   if (!isDSOffsetLegal(PtrBase, Offset)) {
1483     PtrBase = MI.getOperand(2).getReg();
1484     Offset = 0;
1485   }
1486 
1487   MachineBasicBlock *MBB = MI.getParent();
1488   const DebugLoc &DL = MI.getDebugLoc();
1489   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1490 
1491   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1492     .addReg(PtrBase);
1493   if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI))
1494     return false;
1495 
1496   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1497     .addImm(Offset)
1498     .addImm(IsGDS ? -1 : 0)
1499     .cloneMemRefs(MI);
1500   MI.eraseFromParent();
1501   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1502 }
1503 
1504 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {
1505   if (TM.getOptLevel() > CodeGenOpt::None) {
1506     unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second;
1507     if (WGSize <= STI.getWavefrontSize()) {
1508       MachineBasicBlock *MBB = MI.getParent();
1509       const DebugLoc &DL = MI.getDebugLoc();
1510       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER));
1511       MI.eraseFromParent();
1512       return true;
1513     }
1514   }
1515   return selectImpl(MI, *CoverageInfo);
1516 }
1517 
1518 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1519                          bool &IsTexFail) {
1520   if (TexFailCtrl)
1521     IsTexFail = true;
1522 
1523   TFE = (TexFailCtrl & 0x1) ? true : false;
1524   TexFailCtrl &= ~(uint64_t)0x1;
1525   LWE = (TexFailCtrl & 0x2) ? true : false;
1526   TexFailCtrl &= ~(uint64_t)0x2;
1527 
1528   return TexFailCtrl == 0;
1529 }
1530 
1531 bool AMDGPUInstructionSelector::selectImageIntrinsic(
1532   MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1533   MachineBasicBlock *MBB = MI.getParent();
1534   const DebugLoc &DL = MI.getDebugLoc();
1535 
1536   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1537     AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1538 
1539   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1540   unsigned IntrOpcode = Intr->BaseOpcode;
1541   const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI);
1542   const bool IsGFX11Plus = AMDGPU::isGFX11Plus(STI);
1543 
1544   const unsigned ArgOffset = MI.getNumExplicitDefs() + 1;
1545 
1546   Register VDataIn, VDataOut;
1547   LLT VDataTy;
1548   int NumVDataDwords = -1;
1549   bool IsD16 = MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16 ||
1550                MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16;
1551 
1552   bool Unorm;
1553   if (!BaseOpcode->Sampler)
1554     Unorm = true;
1555   else
1556     Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0;
1557 
1558   bool TFE;
1559   bool LWE;
1560   bool IsTexFail = false;
1561   if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(),
1562                     TFE, LWE, IsTexFail))
1563     return false;
1564 
1565   const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm();
1566   const bool IsA16 = (Flags & 1) != 0;
1567   const bool IsG16 = (Flags & 2) != 0;
1568 
1569   // A16 implies 16 bit gradients if subtarget doesn't support G16
1570   if (IsA16 && !STI.hasG16() && !IsG16)
1571     return false;
1572 
1573   unsigned DMask = 0;
1574   unsigned DMaskLanes = 0;
1575 
1576   if (BaseOpcode->Atomic) {
1577     VDataOut = MI.getOperand(0).getReg();
1578     VDataIn = MI.getOperand(2).getReg();
1579     LLT Ty = MRI->getType(VDataIn);
1580 
1581     // Be careful to allow atomic swap on 16-bit element vectors.
1582     const bool Is64Bit = BaseOpcode->AtomicX2 ?
1583       Ty.getSizeInBits() == 128 :
1584       Ty.getSizeInBits() == 64;
1585 
1586     if (BaseOpcode->AtomicX2) {
1587       assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
1588 
1589       DMask = Is64Bit ? 0xf : 0x3;
1590       NumVDataDwords = Is64Bit ? 4 : 2;
1591     } else {
1592       DMask = Is64Bit ? 0x3 : 0x1;
1593       NumVDataDwords = Is64Bit ? 2 : 1;
1594     }
1595   } else {
1596     DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm();
1597     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
1598 
1599     if (BaseOpcode->Store) {
1600       VDataIn = MI.getOperand(1).getReg();
1601       VDataTy = MRI->getType(VDataIn);
1602       NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1603     } else {
1604       VDataOut = MI.getOperand(0).getReg();
1605       VDataTy = MRI->getType(VDataOut);
1606       NumVDataDwords = DMaskLanes;
1607 
1608       if (IsD16 && !STI.hasUnpackedD16VMem())
1609         NumVDataDwords = (DMaskLanes + 1) / 2;
1610     }
1611   }
1612 
1613   // Set G16 opcode
1614   if (IsG16 && !IsA16) {
1615     const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
1616         AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
1617     assert(G16MappingInfo);
1618     IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
1619   }
1620 
1621   // TODO: Check this in verifier.
1622   assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
1623 
1624   unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm();
1625   if (BaseOpcode->Atomic)
1626     CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization
1627   if (CPol & ~AMDGPU::CPol::ALL)
1628     return false;
1629 
1630   int NumVAddrRegs = 0;
1631   int NumVAddrDwords = 0;
1632   for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) {
1633     // Skip the $noregs and 0s inserted during legalization.
1634     MachineOperand &AddrOp = MI.getOperand(ArgOffset + I);
1635     if (!AddrOp.isReg())
1636       continue; // XXX - Break?
1637 
1638     Register Addr = AddrOp.getReg();
1639     if (!Addr)
1640       break;
1641 
1642     ++NumVAddrRegs;
1643     NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1644   }
1645 
1646   // The legalizer preprocessed the intrinsic arguments. If we aren't using
1647   // NSA, these should have been packed into a single value in the first
1648   // address register
1649   const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
1650   if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1651     LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
1652     return false;
1653   }
1654 
1655   if (IsTexFail)
1656     ++NumVDataDwords;
1657 
1658   int Opcode = -1;
1659   if (IsGFX11Plus) {
1660     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1661                                    UseNSA ? AMDGPU::MIMGEncGfx11NSA
1662                                           : AMDGPU::MIMGEncGfx11Default,
1663                                    NumVDataDwords, NumVAddrDwords);
1664   } else if (IsGFX10Plus) {
1665     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1666                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
1667                                           : AMDGPU::MIMGEncGfx10Default,
1668                                    NumVDataDwords, NumVAddrDwords);
1669   } else {
1670     if (Subtarget->hasGFX90AInsts()) {
1671       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx90a,
1672                                      NumVDataDwords, NumVAddrDwords);
1673       if (Opcode == -1) {
1674         LLVM_DEBUG(
1675             dbgs()
1676             << "requested image instruction is not supported on this GPU\n");
1677         return false;
1678       }
1679     }
1680     if (Opcode == -1 &&
1681         STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1682       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1683                                      NumVDataDwords, NumVAddrDwords);
1684     if (Opcode == -1)
1685       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1686                                      NumVDataDwords, NumVAddrDwords);
1687   }
1688   assert(Opcode != -1);
1689 
1690   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1691     .cloneMemRefs(MI);
1692 
1693   if (VDataOut) {
1694     if (BaseOpcode->AtomicX2) {
1695       const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1696 
1697       Register TmpReg = MRI->createVirtualRegister(
1698         Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1699       unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1700 
1701       MIB.addDef(TmpReg);
1702       if (!MRI->use_empty(VDataOut)) {
1703         BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1704             .addReg(TmpReg, RegState::Kill, SubReg);
1705       }
1706 
1707     } else {
1708       MIB.addDef(VDataOut); // vdata output
1709     }
1710   }
1711 
1712   if (VDataIn)
1713     MIB.addReg(VDataIn); // vdata input
1714 
1715   for (int I = 0; I != NumVAddrRegs; ++I) {
1716     MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I);
1717     if (SrcOp.isReg()) {
1718       assert(SrcOp.getReg() != 0);
1719       MIB.addReg(SrcOp.getReg());
1720     }
1721   }
1722 
1723   MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg());
1724   if (BaseOpcode->Sampler)
1725     MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg());
1726 
1727   MIB.addImm(DMask); // dmask
1728 
1729   if (IsGFX10Plus)
1730     MIB.addImm(DimInfo->Encoding);
1731   MIB.addImm(Unorm);
1732 
1733   MIB.addImm(CPol);
1734   MIB.addImm(IsA16 &&  // a16 or r128
1735              STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1736   if (IsGFX10Plus)
1737     MIB.addImm(IsA16 ? -1 : 0);
1738 
1739   if (!Subtarget->hasGFX90AInsts()) {
1740     MIB.addImm(TFE); // tfe
1741   } else if (TFE) {
1742     LLVM_DEBUG(dbgs() << "TFE is not supported on this GPU\n");
1743     return false;
1744   }
1745 
1746   MIB.addImm(LWE); // lwe
1747   if (!IsGFX10Plus)
1748     MIB.addImm(DimInfo->DA ? -1 : 0);
1749   if (BaseOpcode->HasD16)
1750     MIB.addImm(IsD16 ? -1 : 0);
1751 
1752   if (IsTexFail) {
1753     // An image load instruction with TFE/LWE only conditionally writes to its
1754     // result registers. Initialize them to zero so that we always get well
1755     // defined result values.
1756     assert(VDataOut && !VDataIn);
1757     Register Tied = MRI->cloneVirtualRegister(VDataOut);
1758     Register Zero = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1759     BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::V_MOV_B32_e32), Zero)
1760       .addImm(0);
1761     auto Parts = TRI.getRegSplitParts(MRI->getRegClass(Tied), 4);
1762     if (STI.usePRTStrictNull()) {
1763       // With enable-prt-strict-null enabled, initialize all result registers to
1764       // zero.
1765       auto RegSeq =
1766           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1767       for (auto Sub : Parts)
1768         RegSeq.addReg(Zero).addImm(Sub);
1769     } else {
1770       // With enable-prt-strict-null disabled, only initialize the extra TFE/LWE
1771       // result register.
1772       Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1773       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
1774       auto RegSeq =
1775           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1776       for (auto Sub : Parts.drop_back(1))
1777         RegSeq.addReg(Undef).addImm(Sub);
1778       RegSeq.addReg(Zero).addImm(Parts.back());
1779     }
1780     MIB.addReg(Tied, RegState::Implicit);
1781     MIB->tieOperands(0, MIB->getNumOperands() - 1);
1782   }
1783 
1784   MI.eraseFromParent();
1785   constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1786   TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::vaddr);
1787   return true;
1788 }
1789 
1790 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1791     MachineInstr &I) const {
1792   unsigned IntrinsicID = I.getIntrinsicID();
1793   switch (IntrinsicID) {
1794   case Intrinsic::amdgcn_end_cf:
1795     return selectEndCfIntrinsic(I);
1796   case Intrinsic::amdgcn_ds_ordered_add:
1797   case Intrinsic::amdgcn_ds_ordered_swap:
1798     return selectDSOrderedIntrinsic(I, IntrinsicID);
1799   case Intrinsic::amdgcn_ds_gws_init:
1800   case Intrinsic::amdgcn_ds_gws_barrier:
1801   case Intrinsic::amdgcn_ds_gws_sema_v:
1802   case Intrinsic::amdgcn_ds_gws_sema_br:
1803   case Intrinsic::amdgcn_ds_gws_sema_p:
1804   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1805     return selectDSGWSIntrinsic(I, IntrinsicID);
1806   case Intrinsic::amdgcn_ds_append:
1807     return selectDSAppendConsume(I, true);
1808   case Intrinsic::amdgcn_ds_consume:
1809     return selectDSAppendConsume(I, false);
1810   case Intrinsic::amdgcn_s_barrier:
1811     return selectSBarrier(I);
1812   case Intrinsic::amdgcn_global_atomic_fadd:
1813     return selectGlobalAtomicFadd(I, I.getOperand(2), I.getOperand(3));
1814   case Intrinsic::amdgcn_raw_buffer_load_lds:
1815   case Intrinsic::amdgcn_struct_buffer_load_lds:
1816     return selectBufferLoadLds(I);
1817   case Intrinsic::amdgcn_global_load_lds:
1818     return selectGlobalLoadLds(I);
1819   default: {
1820     return selectImpl(I, *CoverageInfo);
1821   }
1822   }
1823 }
1824 
1825 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1826   if (selectImpl(I, *CoverageInfo))
1827     return true;
1828 
1829   MachineBasicBlock *BB = I.getParent();
1830   const DebugLoc &DL = I.getDebugLoc();
1831 
1832   Register DstReg = I.getOperand(0).getReg();
1833   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1834   assert(Size <= 32 || Size == 64);
1835   const MachineOperand &CCOp = I.getOperand(1);
1836   Register CCReg = CCOp.getReg();
1837   if (!isVCC(CCReg, *MRI)) {
1838     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1839                                          AMDGPU::S_CSELECT_B32;
1840     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1841             .addReg(CCReg);
1842 
1843     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1844     // bank, because it does not cover the register class that we used to represent
1845     // for it.  So we need to manually set the register class here.
1846     if (!MRI->getRegClassOrNull(CCReg))
1847         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1848     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1849             .add(I.getOperand(2))
1850             .add(I.getOperand(3));
1851 
1852     bool Ret = false;
1853     Ret |= constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1854     Ret |= constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1855     I.eraseFromParent();
1856     return Ret;
1857   }
1858 
1859   // Wide VGPR select should have been split in RegBankSelect.
1860   if (Size > 32)
1861     return false;
1862 
1863   MachineInstr *Select =
1864       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1865               .addImm(0)
1866               .add(I.getOperand(3))
1867               .addImm(0)
1868               .add(I.getOperand(2))
1869               .add(I.getOperand(1));
1870 
1871   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1872   I.eraseFromParent();
1873   return Ret;
1874 }
1875 
1876 static int sizeToSubRegIndex(unsigned Size) {
1877   switch (Size) {
1878   case 32:
1879     return AMDGPU::sub0;
1880   case 64:
1881     return AMDGPU::sub0_sub1;
1882   case 96:
1883     return AMDGPU::sub0_sub1_sub2;
1884   case 128:
1885     return AMDGPU::sub0_sub1_sub2_sub3;
1886   case 256:
1887     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1888   default:
1889     if (Size < 32)
1890       return AMDGPU::sub0;
1891     if (Size > 256)
1892       return -1;
1893     return sizeToSubRegIndex(PowerOf2Ceil(Size));
1894   }
1895 }
1896 
1897 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1898   Register DstReg = I.getOperand(0).getReg();
1899   Register SrcReg = I.getOperand(1).getReg();
1900   const LLT DstTy = MRI->getType(DstReg);
1901   const LLT SrcTy = MRI->getType(SrcReg);
1902   const LLT S1 = LLT::scalar(1);
1903 
1904   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1905   const RegisterBank *DstRB;
1906   if (DstTy == S1) {
1907     // This is a special case. We don't treat s1 for legalization artifacts as
1908     // vcc booleans.
1909     DstRB = SrcRB;
1910   } else {
1911     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1912     if (SrcRB != DstRB)
1913       return false;
1914   }
1915 
1916   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
1917 
1918   unsigned DstSize = DstTy.getSizeInBits();
1919   unsigned SrcSize = SrcTy.getSizeInBits();
1920 
1921   const TargetRegisterClass *SrcRC =
1922       TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB);
1923   const TargetRegisterClass *DstRC =
1924       TRI.getRegClassForSizeOnBank(DstSize, *DstRB);
1925   if (!SrcRC || !DstRC)
1926     return false;
1927 
1928   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1929       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1930     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1931     return false;
1932   }
1933 
1934   if (DstTy == LLT::fixed_vector(2, 16) && SrcTy == LLT::fixed_vector(2, 32)) {
1935     MachineBasicBlock *MBB = I.getParent();
1936     const DebugLoc &DL = I.getDebugLoc();
1937 
1938     Register LoReg = MRI->createVirtualRegister(DstRC);
1939     Register HiReg = MRI->createVirtualRegister(DstRC);
1940     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
1941       .addReg(SrcReg, 0, AMDGPU::sub0);
1942     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
1943       .addReg(SrcReg, 0, AMDGPU::sub1);
1944 
1945     if (IsVALU && STI.hasSDWA()) {
1946       // Write the low 16-bits of the high element into the high 16-bits of the
1947       // low element.
1948       MachineInstr *MovSDWA =
1949         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
1950         .addImm(0)                             // $src0_modifiers
1951         .addReg(HiReg)                         // $src0
1952         .addImm(0)                             // $clamp
1953         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
1954         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
1955         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
1956         .addReg(LoReg, RegState::Implicit);
1957       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
1958     } else {
1959       Register TmpReg0 = MRI->createVirtualRegister(DstRC);
1960       Register TmpReg1 = MRI->createVirtualRegister(DstRC);
1961       Register ImmReg = MRI->createVirtualRegister(DstRC);
1962       if (IsVALU) {
1963         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
1964           .addImm(16)
1965           .addReg(HiReg);
1966       } else {
1967         BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
1968           .addReg(HiReg)
1969           .addImm(16);
1970       }
1971 
1972       unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1973       unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1974       unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
1975 
1976       BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
1977         .addImm(0xffff);
1978       BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
1979         .addReg(LoReg)
1980         .addReg(ImmReg);
1981       BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
1982         .addReg(TmpReg0)
1983         .addReg(TmpReg1);
1984     }
1985 
1986     I.eraseFromParent();
1987     return true;
1988   }
1989 
1990   if (!DstTy.isScalar())
1991     return false;
1992 
1993   if (SrcSize > 32) {
1994     int SubRegIdx = sizeToSubRegIndex(DstSize);
1995     if (SubRegIdx == -1)
1996       return false;
1997 
1998     // Deal with weird cases where the class only partially supports the subreg
1999     // index.
2000     const TargetRegisterClass *SrcWithSubRC
2001       = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
2002     if (!SrcWithSubRC)
2003       return false;
2004 
2005     if (SrcWithSubRC != SrcRC) {
2006       if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
2007         return false;
2008     }
2009 
2010     I.getOperand(1).setSubReg(SubRegIdx);
2011   }
2012 
2013   I.setDesc(TII.get(TargetOpcode::COPY));
2014   return true;
2015 }
2016 
2017 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
2018 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
2019   Mask = maskTrailingOnes<unsigned>(Size);
2020   int SignedMask = static_cast<int>(Mask);
2021   return SignedMask >= -16 && SignedMask <= 64;
2022 }
2023 
2024 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
2025 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
2026   Register Reg, const MachineRegisterInfo &MRI,
2027   const TargetRegisterInfo &TRI) const {
2028   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
2029   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
2030     return RB;
2031 
2032   // Ignore the type, since we don't use vcc in artifacts.
2033   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
2034     return &RBI.getRegBankFromRegClass(*RC, LLT());
2035   return nullptr;
2036 }
2037 
2038 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
2039   bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
2040   bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
2041   const DebugLoc &DL = I.getDebugLoc();
2042   MachineBasicBlock &MBB = *I.getParent();
2043   const Register DstReg = I.getOperand(0).getReg();
2044   const Register SrcReg = I.getOperand(1).getReg();
2045 
2046   const LLT DstTy = MRI->getType(DstReg);
2047   const LLT SrcTy = MRI->getType(SrcReg);
2048   const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
2049     I.getOperand(2).getImm() : SrcTy.getSizeInBits();
2050   const unsigned DstSize = DstTy.getSizeInBits();
2051   if (!DstTy.isScalar())
2052     return false;
2053 
2054   // Artifact casts should never use vcc.
2055   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
2056 
2057   // FIXME: This should probably be illegal and split earlier.
2058   if (I.getOpcode() == AMDGPU::G_ANYEXT) {
2059     if (DstSize <= 32)
2060       return selectCOPY(I);
2061 
2062     const TargetRegisterClass *SrcRC =
2063         TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank);
2064     const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
2065     const TargetRegisterClass *DstRC =
2066         TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
2067 
2068     Register UndefReg = MRI->createVirtualRegister(SrcRC);
2069     BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2070     BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2071       .addReg(SrcReg)
2072       .addImm(AMDGPU::sub0)
2073       .addReg(UndefReg)
2074       .addImm(AMDGPU::sub1);
2075     I.eraseFromParent();
2076 
2077     return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) &&
2078            RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI);
2079   }
2080 
2081   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
2082     // 64-bit should have been split up in RegBankSelect
2083 
2084     // Try to use an and with a mask if it will save code size.
2085     unsigned Mask;
2086     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2087       MachineInstr *ExtI =
2088       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
2089         .addImm(Mask)
2090         .addReg(SrcReg);
2091       I.eraseFromParent();
2092       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2093     }
2094 
2095     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
2096     MachineInstr *ExtI =
2097       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
2098       .addReg(SrcReg)
2099       .addImm(0) // Offset
2100       .addImm(SrcSize); // Width
2101     I.eraseFromParent();
2102     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2103   }
2104 
2105   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
2106     const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
2107       AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
2108     if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
2109       return false;
2110 
2111     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
2112       const unsigned SextOpc = SrcSize == 8 ?
2113         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
2114       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
2115         .addReg(SrcReg);
2116       I.eraseFromParent();
2117       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2118     }
2119 
2120     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
2121     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2122 
2123     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
2124     if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
2125       // We need a 64-bit register source, but the high bits don't matter.
2126       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
2127       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2128       unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
2129 
2130       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2131       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
2132         .addReg(SrcReg, 0, SubReg)
2133         .addImm(AMDGPU::sub0)
2134         .addReg(UndefReg)
2135         .addImm(AMDGPU::sub1);
2136 
2137       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
2138         .addReg(ExtReg)
2139         .addImm(SrcSize << 16);
2140 
2141       I.eraseFromParent();
2142       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
2143     }
2144 
2145     unsigned Mask;
2146     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2147       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
2148         .addReg(SrcReg)
2149         .addImm(Mask);
2150     } else {
2151       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
2152         .addReg(SrcReg)
2153         .addImm(SrcSize << 16);
2154     }
2155 
2156     I.eraseFromParent();
2157     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2158   }
2159 
2160   return false;
2161 }
2162 
2163 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
2164   MachineBasicBlock *BB = I.getParent();
2165   MachineOperand &ImmOp = I.getOperand(1);
2166   Register DstReg = I.getOperand(0).getReg();
2167   unsigned Size = MRI->getType(DstReg).getSizeInBits();
2168 
2169   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
2170   if (ImmOp.isFPImm()) {
2171     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
2172     ImmOp.ChangeToImmediate(Imm.getZExtValue());
2173   } else if (ImmOp.isCImm()) {
2174     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue());
2175   } else {
2176     llvm_unreachable("Not supported by g_constants");
2177   }
2178 
2179   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2180   const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID;
2181 
2182   unsigned Opcode;
2183   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
2184     Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2185   } else {
2186     Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2187 
2188     // We should never produce s1 values on banks other than VCC. If the user of
2189     // this already constrained the register, we may incorrectly think it's VCC
2190     // if it wasn't originally.
2191     if (Size == 1)
2192       return false;
2193   }
2194 
2195   if (Size != 64) {
2196     I.setDesc(TII.get(Opcode));
2197     I.addImplicitDefUseOperands(*MF);
2198     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2199   }
2200 
2201   const DebugLoc &DL = I.getDebugLoc();
2202 
2203   APInt Imm(Size, I.getOperand(1).getImm());
2204 
2205   MachineInstr *ResInst;
2206   if (IsSgpr && TII.isInlineConstant(Imm)) {
2207     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
2208       .addImm(I.getOperand(1).getImm());
2209   } else {
2210     const TargetRegisterClass *RC = IsSgpr ?
2211       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
2212     Register LoReg = MRI->createVirtualRegister(RC);
2213     Register HiReg = MRI->createVirtualRegister(RC);
2214 
2215     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
2216       .addImm(Imm.trunc(32).getZExtValue());
2217 
2218     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
2219       .addImm(Imm.ashr(32).getZExtValue());
2220 
2221     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2222       .addReg(LoReg)
2223       .addImm(AMDGPU::sub0)
2224       .addReg(HiReg)
2225       .addImm(AMDGPU::sub1);
2226   }
2227 
2228   // We can't call constrainSelectedInstRegOperands here, because it doesn't
2229   // work for target independent opcodes
2230   I.eraseFromParent();
2231   const TargetRegisterClass *DstRC =
2232     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
2233   if (!DstRC)
2234     return true;
2235   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
2236 }
2237 
2238 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
2239   // Only manually handle the f64 SGPR case.
2240   //
2241   // FIXME: This is a workaround for 2.5 different tablegen problems. Because
2242   // the bit ops theoretically have a second result due to the implicit def of
2243   // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
2244   // that is easy by disabling the check. The result works, but uses a
2245   // nonsensical sreg32orlds_and_sreg_1 regclass.
2246   //
2247   // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
2248   // the variadic REG_SEQUENCE operands.
2249 
2250   Register Dst = MI.getOperand(0).getReg();
2251   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2252   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2253       MRI->getType(Dst) != LLT::scalar(64))
2254     return false;
2255 
2256   Register Src = MI.getOperand(1).getReg();
2257   MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
2258   if (Fabs)
2259     Src = Fabs->getOperand(1).getReg();
2260 
2261   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2262       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2263     return false;
2264 
2265   MachineBasicBlock *BB = MI.getParent();
2266   const DebugLoc &DL = MI.getDebugLoc();
2267   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2268   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2269   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2270   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2271 
2272   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2273     .addReg(Src, 0, AMDGPU::sub0);
2274   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2275     .addReg(Src, 0, AMDGPU::sub1);
2276   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2277     .addImm(0x80000000);
2278 
2279   // Set or toggle sign bit.
2280   unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
2281   BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
2282     .addReg(HiReg)
2283     .addReg(ConstReg);
2284   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2285     .addReg(LoReg)
2286     .addImm(AMDGPU::sub0)
2287     .addReg(OpReg)
2288     .addImm(AMDGPU::sub1);
2289   MI.eraseFromParent();
2290   return true;
2291 }
2292 
2293 // FIXME: This is a workaround for the same tablegen problems as G_FNEG
2294 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
2295   Register Dst = MI.getOperand(0).getReg();
2296   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2297   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2298       MRI->getType(Dst) != LLT::scalar(64))
2299     return false;
2300 
2301   Register Src = MI.getOperand(1).getReg();
2302   MachineBasicBlock *BB = MI.getParent();
2303   const DebugLoc &DL = MI.getDebugLoc();
2304   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2305   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2306   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2307   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2308 
2309   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2310       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2311     return false;
2312 
2313   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2314     .addReg(Src, 0, AMDGPU::sub0);
2315   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2316     .addReg(Src, 0, AMDGPU::sub1);
2317   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2318     .addImm(0x7fffffff);
2319 
2320   // Clear sign bit.
2321   // TODO: Should this used S_BITSET0_*?
2322   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2323     .addReg(HiReg)
2324     .addReg(ConstReg);
2325   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2326     .addReg(LoReg)
2327     .addImm(AMDGPU::sub0)
2328     .addReg(OpReg)
2329     .addImm(AMDGPU::sub1);
2330 
2331   MI.eraseFromParent();
2332   return true;
2333 }
2334 
2335 static bool isConstant(const MachineInstr &MI) {
2336   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2337 }
2338 
2339 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2340     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2341 
2342   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2343 
2344   assert(PtrMI);
2345 
2346   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2347     return;
2348 
2349   GEPInfo GEPInfo(*PtrMI);
2350 
2351   for (unsigned i = 1; i != 3; ++i) {
2352     const MachineOperand &GEPOp = PtrMI->getOperand(i);
2353     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2354     assert(OpDef);
2355     if (i == 2 && isConstant(*OpDef)) {
2356       // TODO: Could handle constant base + variable offset, but a combine
2357       // probably should have commuted it.
2358       assert(GEPInfo.Imm == 0);
2359       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2360       continue;
2361     }
2362     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2363     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2364       GEPInfo.SgprParts.push_back(GEPOp.getReg());
2365     else
2366       GEPInfo.VgprParts.push_back(GEPOp.getReg());
2367   }
2368 
2369   AddrInfo.push_back(GEPInfo);
2370   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2371 }
2372 
2373 bool AMDGPUInstructionSelector::isSGPR(Register Reg) const {
2374   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID;
2375 }
2376 
2377 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2378   if (!MI.hasOneMemOperand())
2379     return false;
2380 
2381   const MachineMemOperand *MMO = *MI.memoperands_begin();
2382   const Value *Ptr = MMO->getValue();
2383 
2384   // UndefValue means this is a load of a kernel input.  These are uniform.
2385   // Sometimes LDS instructions have constant pointers.
2386   // If Ptr is null, then that means this mem operand contains a
2387   // PseudoSourceValue like GOT.
2388   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2389       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2390     return true;
2391 
2392   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2393     return true;
2394 
2395   const Instruction *I = dyn_cast<Instruction>(Ptr);
2396   return I && I->getMetadata("amdgpu.uniform");
2397 }
2398 
2399 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2400   for (const GEPInfo &GEPInfo : AddrInfo) {
2401     if (!GEPInfo.VgprParts.empty())
2402       return true;
2403   }
2404   return false;
2405 }
2406 
2407 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2408   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2409   unsigned AS = PtrTy.getAddressSpace();
2410   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2411       STI.ldsRequiresM0Init()) {
2412     MachineBasicBlock *BB = I.getParent();
2413 
2414     // If DS instructions require M0 initialization, insert it before selecting.
2415     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2416       .addImm(-1);
2417   }
2418 }
2419 
2420 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
2421   MachineInstr &I) const {
2422   if (I.getOpcode() == TargetOpcode::G_ATOMICRMW_FADD) {
2423     const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2424     unsigned AS = PtrTy.getAddressSpace();
2425     if (AS == AMDGPUAS::GLOBAL_ADDRESS)
2426       return selectGlobalAtomicFadd(I, I.getOperand(1), I.getOperand(2));
2427   }
2428 
2429   initM0(I);
2430   return selectImpl(I, *CoverageInfo);
2431 }
2432 
2433 static bool isVCmpResult(Register Reg, MachineRegisterInfo &MRI) {
2434   if (Reg.isPhysical())
2435     return false;
2436 
2437   MachineInstr &MI = *MRI.getUniqueVRegDef(Reg);
2438   const unsigned Opcode = MI.getOpcode();
2439 
2440   if (Opcode == AMDGPU::COPY)
2441     return isVCmpResult(MI.getOperand(1).getReg(), MRI);
2442 
2443   if (Opcode == AMDGPU::G_AND || Opcode == AMDGPU::G_OR ||
2444       Opcode == AMDGPU::G_XOR)
2445     return isVCmpResult(MI.getOperand(1).getReg(), MRI) &&
2446            isVCmpResult(MI.getOperand(2).getReg(), MRI);
2447 
2448   if (Opcode == TargetOpcode::G_INTRINSIC)
2449     return MI.getIntrinsicID() == Intrinsic::amdgcn_class;
2450 
2451   return Opcode == AMDGPU::G_ICMP || Opcode == AMDGPU::G_FCMP;
2452 }
2453 
2454 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2455   MachineBasicBlock *BB = I.getParent();
2456   MachineOperand &CondOp = I.getOperand(0);
2457   Register CondReg = CondOp.getReg();
2458   const DebugLoc &DL = I.getDebugLoc();
2459 
2460   unsigned BrOpcode;
2461   Register CondPhysReg;
2462   const TargetRegisterClass *ConstrainRC;
2463 
2464   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2465   // whether the branch is uniform when selecting the instruction. In
2466   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2467   // RegBankSelect knows what it's doing if the branch condition is scc, even
2468   // though it currently does not.
2469   if (!isVCC(CondReg, *MRI)) {
2470     if (MRI->getType(CondReg) != LLT::scalar(32))
2471       return false;
2472 
2473     CondPhysReg = AMDGPU::SCC;
2474     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2475     ConstrainRC = &AMDGPU::SReg_32RegClass;
2476   } else {
2477     // FIXME: Should scc->vcc copies and with exec?
2478 
2479     // Unless the value of CondReg is a result of a V_CMP* instruction then we
2480     // need to insert an and with exec.
2481     if (!isVCmpResult(CondReg, *MRI)) {
2482       const bool Is64 = STI.isWave64();
2483       const unsigned Opcode = Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
2484       const Register Exec = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
2485 
2486       Register TmpReg = MRI->createVirtualRegister(TRI.getBoolRC());
2487       BuildMI(*BB, &I, DL, TII.get(Opcode), TmpReg)
2488           .addReg(CondReg)
2489           .addReg(Exec);
2490       CondReg = TmpReg;
2491     }
2492 
2493     CondPhysReg = TRI.getVCC();
2494     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2495     ConstrainRC = TRI.getBoolRC();
2496   }
2497 
2498   if (!MRI->getRegClassOrNull(CondReg))
2499     MRI->setRegClass(CondReg, ConstrainRC);
2500 
2501   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2502     .addReg(CondReg);
2503   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2504     .addMBB(I.getOperand(1).getMBB());
2505 
2506   I.eraseFromParent();
2507   return true;
2508 }
2509 
2510 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
2511   MachineInstr &I) const {
2512   Register DstReg = I.getOperand(0).getReg();
2513   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2514   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2515   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2516   if (IsVGPR)
2517     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2518 
2519   return RBI.constrainGenericRegister(
2520     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2521 }
2522 
2523 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2524   Register DstReg = I.getOperand(0).getReg();
2525   Register SrcReg = I.getOperand(1).getReg();
2526   Register MaskReg = I.getOperand(2).getReg();
2527   LLT Ty = MRI->getType(DstReg);
2528   LLT MaskTy = MRI->getType(MaskReg);
2529   MachineBasicBlock *BB = I.getParent();
2530   const DebugLoc &DL = I.getDebugLoc();
2531 
2532   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2533   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2534   const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
2535   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2536   if (DstRB != SrcRB) // Should only happen for hand written MIR.
2537     return false;
2538 
2539   // Try to avoid emitting a bit operation when we only need to touch half of
2540   // the 64-bit pointer.
2541   APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zext(64);
2542   const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
2543   const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
2544 
2545   const bool CanCopyLow32 = (MaskOnes & MaskLo32) == MaskLo32;
2546   const bool CanCopyHi32 = (MaskOnes & MaskHi32) == MaskHi32;
2547 
2548   if (!IsVGPR && Ty.getSizeInBits() == 64 &&
2549       !CanCopyLow32 && !CanCopyHi32) {
2550     auto MIB = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_AND_B64), DstReg)
2551       .addReg(SrcReg)
2552       .addReg(MaskReg);
2553     I.eraseFromParent();
2554     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2555   }
2556 
2557   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2558   const TargetRegisterClass &RegRC
2559     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2560 
2561   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB);
2562   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB);
2563   const TargetRegisterClass *MaskRC =
2564       TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB);
2565 
2566   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2567       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2568       !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
2569     return false;
2570 
2571   if (Ty.getSizeInBits() == 32) {
2572     assert(MaskTy.getSizeInBits() == 32 &&
2573            "ptrmask should have been narrowed during legalize");
2574 
2575     BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2576       .addReg(SrcReg)
2577       .addReg(MaskReg);
2578     I.eraseFromParent();
2579     return true;
2580   }
2581 
2582   Register HiReg = MRI->createVirtualRegister(&RegRC);
2583   Register LoReg = MRI->createVirtualRegister(&RegRC);
2584 
2585   // Extract the subregisters from the source pointer.
2586   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2587     .addReg(SrcReg, 0, AMDGPU::sub0);
2588   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2589     .addReg(SrcReg, 0, AMDGPU::sub1);
2590 
2591   Register MaskedLo, MaskedHi;
2592 
2593   if (CanCopyLow32) {
2594     // If all the bits in the low half are 1, we only need a copy for it.
2595     MaskedLo = LoReg;
2596   } else {
2597     // Extract the mask subregister and apply the and.
2598     Register MaskLo = MRI->createVirtualRegister(&RegRC);
2599     MaskedLo = MRI->createVirtualRegister(&RegRC);
2600 
2601     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
2602       .addReg(MaskReg, 0, AMDGPU::sub0);
2603     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
2604       .addReg(LoReg)
2605       .addReg(MaskLo);
2606   }
2607 
2608   if (CanCopyHi32) {
2609     // If all the bits in the high half are 1, we only need a copy for it.
2610     MaskedHi = HiReg;
2611   } else {
2612     Register MaskHi = MRI->createVirtualRegister(&RegRC);
2613     MaskedHi = MRI->createVirtualRegister(&RegRC);
2614 
2615     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
2616       .addReg(MaskReg, 0, AMDGPU::sub1);
2617     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
2618       .addReg(HiReg)
2619       .addReg(MaskHi);
2620   }
2621 
2622   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2623     .addReg(MaskedLo)
2624     .addImm(AMDGPU::sub0)
2625     .addReg(MaskedHi)
2626     .addImm(AMDGPU::sub1);
2627   I.eraseFromParent();
2628   return true;
2629 }
2630 
2631 /// Return the register to use for the index value, and the subregister to use
2632 /// for the indirectly accessed register.
2633 static std::pair<Register, unsigned>
2634 computeIndirectRegIndex(MachineRegisterInfo &MRI,
2635                         const SIRegisterInfo &TRI,
2636                         const TargetRegisterClass *SuperRC,
2637                         Register IdxReg,
2638                         unsigned EltSize) {
2639   Register IdxBaseReg;
2640   int Offset;
2641 
2642   std::tie(IdxBaseReg, Offset) = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
2643   if (IdxBaseReg == AMDGPU::NoRegister) {
2644     // This will happen if the index is a known constant. This should ordinarily
2645     // be legalized out, but handle it as a register just in case.
2646     assert(Offset == 0);
2647     IdxBaseReg = IdxReg;
2648   }
2649 
2650   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2651 
2652   // Skip out of bounds offsets, or else we would end up using an undefined
2653   // register.
2654   if (static_cast<unsigned>(Offset) >= SubRegs.size())
2655     return std::make_pair(IdxReg, SubRegs[0]);
2656   return std::make_pair(IdxBaseReg, SubRegs[Offset]);
2657 }
2658 
2659 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2660   MachineInstr &MI) const {
2661   Register DstReg = MI.getOperand(0).getReg();
2662   Register SrcReg = MI.getOperand(1).getReg();
2663   Register IdxReg = MI.getOperand(2).getReg();
2664 
2665   LLT DstTy = MRI->getType(DstReg);
2666   LLT SrcTy = MRI->getType(SrcReg);
2667 
2668   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2669   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2670   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2671 
2672   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2673   // into a waterfall loop.
2674   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2675     return false;
2676 
2677   const TargetRegisterClass *SrcRC =
2678       TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB);
2679   const TargetRegisterClass *DstRC =
2680       TRI.getRegClassForTypeOnBank(DstTy, *DstRB);
2681   if (!SrcRC || !DstRC)
2682     return false;
2683   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2684       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2685       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2686     return false;
2687 
2688   MachineBasicBlock *BB = MI.getParent();
2689   const DebugLoc &DL = MI.getDebugLoc();
2690   const bool Is64 = DstTy.getSizeInBits() == 64;
2691 
2692   unsigned SubReg;
2693   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
2694                                                      DstTy.getSizeInBits() / 8);
2695 
2696   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2697     if (DstTy.getSizeInBits() != 32 && !Is64)
2698       return false;
2699 
2700     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2701       .addReg(IdxReg);
2702 
2703     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2704     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2705       .addReg(SrcReg, 0, SubReg)
2706       .addReg(SrcReg, RegState::Implicit);
2707     MI.eraseFromParent();
2708     return true;
2709   }
2710 
2711   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2712     return false;
2713 
2714   if (!STI.useVGPRIndexMode()) {
2715     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2716       .addReg(IdxReg);
2717     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2718       .addReg(SrcReg, 0, SubReg)
2719       .addReg(SrcReg, RegState::Implicit);
2720     MI.eraseFromParent();
2721     return true;
2722   }
2723 
2724   const MCInstrDesc &GPRIDXDesc =
2725       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true);
2726   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2727       .addReg(SrcReg)
2728       .addReg(IdxReg)
2729       .addImm(SubReg);
2730 
2731   MI.eraseFromParent();
2732   return true;
2733 }
2734 
2735 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
2736 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2737   MachineInstr &MI) const {
2738   Register DstReg = MI.getOperand(0).getReg();
2739   Register VecReg = MI.getOperand(1).getReg();
2740   Register ValReg = MI.getOperand(2).getReg();
2741   Register IdxReg = MI.getOperand(3).getReg();
2742 
2743   LLT VecTy = MRI->getType(DstReg);
2744   LLT ValTy = MRI->getType(ValReg);
2745   unsigned VecSize = VecTy.getSizeInBits();
2746   unsigned ValSize = ValTy.getSizeInBits();
2747 
2748   const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2749   const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2750   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2751 
2752   assert(VecTy.getElementType() == ValTy);
2753 
2754   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2755   // into a waterfall loop.
2756   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2757     return false;
2758 
2759   const TargetRegisterClass *VecRC =
2760       TRI.getRegClassForTypeOnBank(VecTy, *VecRB);
2761   const TargetRegisterClass *ValRC =
2762       TRI.getRegClassForTypeOnBank(ValTy, *ValRB);
2763 
2764   if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
2765       !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
2766       !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
2767       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2768     return false;
2769 
2770   if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
2771     return false;
2772 
2773   unsigned SubReg;
2774   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
2775                                                      ValSize / 8);
2776 
2777   const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
2778                          STI.useVGPRIndexMode();
2779 
2780   MachineBasicBlock *BB = MI.getParent();
2781   const DebugLoc &DL = MI.getDebugLoc();
2782 
2783   if (!IndexMode) {
2784     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2785       .addReg(IdxReg);
2786 
2787     const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo(
2788         VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID);
2789     BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
2790         .addReg(VecReg)
2791         .addReg(ValReg)
2792         .addImm(SubReg);
2793     MI.eraseFromParent();
2794     return true;
2795   }
2796 
2797   const MCInstrDesc &GPRIDXDesc =
2798       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false);
2799   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2800       .addReg(VecReg)
2801       .addReg(ValReg)
2802       .addReg(IdxReg)
2803       .addImm(SubReg);
2804 
2805   MI.eraseFromParent();
2806   return true;
2807 }
2808 
2809 static bool isZeroOrUndef(int X) {
2810   return X == 0 || X == -1;
2811 }
2812 
2813 static bool isOneOrUndef(int X) {
2814   return X == 1 || X == -1;
2815 }
2816 
2817 static bool isZeroOrOneOrUndef(int X) {
2818   return X == 0 || X == 1 || X == -1;
2819 }
2820 
2821 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single
2822 // 32-bit register.
2823 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1,
2824                                    ArrayRef<int> Mask) {
2825   NewMask[0] = Mask[0];
2826   NewMask[1] = Mask[1];
2827   if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1]))
2828     return Src0;
2829 
2830   assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1);
2831   assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1);
2832 
2833   // Shift the mask inputs to be 0/1;
2834   NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2;
2835   NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2;
2836   return Src1;
2837 }
2838 
2839 // This is only legal with VOP3P instructions as an aid to op_sel matching.
2840 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
2841   MachineInstr &MI) const {
2842   Register DstReg = MI.getOperand(0).getReg();
2843   Register Src0Reg = MI.getOperand(1).getReg();
2844   Register Src1Reg = MI.getOperand(2).getReg();
2845   ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
2846 
2847   const LLT V2S16 = LLT::fixed_vector(2, 16);
2848   if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
2849     return false;
2850 
2851   if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask))
2852     return false;
2853 
2854   assert(ShufMask.size() == 2);
2855 
2856   MachineBasicBlock *MBB = MI.getParent();
2857   const DebugLoc &DL = MI.getDebugLoc();
2858 
2859   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2860   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2861   const TargetRegisterClass &RC = IsVALU ?
2862     AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2863 
2864   // Handle the degenerate case which should have folded out.
2865   if (ShufMask[0] == -1 && ShufMask[1] == -1) {
2866     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg);
2867 
2868     MI.eraseFromParent();
2869     return RBI.constrainGenericRegister(DstReg, RC, *MRI);
2870   }
2871 
2872   // A legal VOP3P mask only reads one of the sources.
2873   int Mask[2];
2874   Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask);
2875 
2876   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) ||
2877       !RBI.constrainGenericRegister(SrcVec, RC, *MRI))
2878     return false;
2879 
2880   // TODO: This also should have been folded out
2881   if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) {
2882     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg)
2883       .addReg(SrcVec);
2884 
2885     MI.eraseFromParent();
2886     return true;
2887   }
2888 
2889   if (Mask[0] == 1 && Mask[1] == -1) {
2890     if (IsVALU) {
2891       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
2892         .addImm(16)
2893         .addReg(SrcVec);
2894     } else {
2895       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
2896         .addReg(SrcVec)
2897         .addImm(16);
2898     }
2899   } else if (Mask[0] == -1 && Mask[1] == 0) {
2900     if (IsVALU) {
2901       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg)
2902         .addImm(16)
2903         .addReg(SrcVec);
2904     } else {
2905       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg)
2906         .addReg(SrcVec)
2907         .addImm(16);
2908     }
2909   } else if (Mask[0] == 0 && Mask[1] == 0) {
2910     if (IsVALU) {
2911       if (STI.hasSDWA()) {
2912         // Write low half of the register into the high half.
2913         MachineInstr *MovSDWA =
2914             BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2915                 .addImm(0)                             // $src0_modifiers
2916                 .addReg(SrcVec)                        // $src0
2917                 .addImm(0)                             // $clamp
2918                 .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
2919                 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2920                 .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
2921                 .addReg(SrcVec, RegState::Implicit);
2922         MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2923       } else {
2924         Register TmpReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2925         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_AND_B32_e32), TmpReg)
2926             .addImm(0xFFFF)
2927             .addReg(SrcVec);
2928         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHL_OR_B32_e64), DstReg)
2929             .addReg(TmpReg)
2930             .addImm(16)
2931             .addReg(TmpReg);
2932       }
2933     } else {
2934       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2935         .addReg(SrcVec)
2936         .addReg(SrcVec);
2937     }
2938   } else if (Mask[0] == 1 && Mask[1] == 1) {
2939     if (IsVALU) {
2940       if (STI.hasSDWA()) {
2941         // Write high half of the register into the low half.
2942         MachineInstr *MovSDWA =
2943             BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2944                 .addImm(0)                             // $src0_modifiers
2945                 .addReg(SrcVec)                        // $src0
2946                 .addImm(0)                             // $clamp
2947                 .addImm(AMDGPU::SDWA::WORD_0)          // $dst_sel
2948                 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2949                 .addImm(AMDGPU::SDWA::WORD_1)          // $src0_sel
2950                 .addReg(SrcVec, RegState::Implicit);
2951         MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2952       } else {
2953         Register TmpReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2954         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), TmpReg)
2955             .addImm(16)
2956             .addReg(SrcVec);
2957         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHL_OR_B32_e64), DstReg)
2958             .addReg(TmpReg)
2959             .addImm(16)
2960             .addReg(TmpReg);
2961       }
2962     } else {
2963       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg)
2964         .addReg(SrcVec)
2965         .addReg(SrcVec);
2966     }
2967   } else if (Mask[0] == 1 && Mask[1] == 0) {
2968     if (IsVALU) {
2969       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32_e64), DstReg)
2970         .addReg(SrcVec)
2971         .addReg(SrcVec)
2972         .addImm(16);
2973     } else {
2974       if (STI.hasSPackHL()) {
2975         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HL_B32_B16), DstReg)
2976             .addReg(SrcVec)
2977             .addReg(SrcVec);
2978       } else {
2979         Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2980         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg)
2981             .addReg(SrcVec)
2982             .addImm(16);
2983         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2984             .addReg(TmpReg)
2985             .addReg(SrcVec);
2986       }
2987     }
2988   } else
2989     llvm_unreachable("all shuffle masks should be handled");
2990 
2991   MI.eraseFromParent();
2992   return true;
2993 }
2994 
2995 bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD(
2996   MachineInstr &MI) const {
2997   if (STI.hasGFX90AInsts())
2998     return selectImpl(MI, *CoverageInfo);
2999 
3000   MachineBasicBlock *MBB = MI.getParent();
3001   const DebugLoc &DL = MI.getDebugLoc();
3002 
3003   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
3004     Function &F = MBB->getParent()->getFunction();
3005     DiagnosticInfoUnsupported
3006       NoFpRet(F, "return versions of fp atomics not supported",
3007               MI.getDebugLoc(), DS_Error);
3008     F.getContext().diagnose(NoFpRet);
3009     return false;
3010   }
3011 
3012   // FIXME: This is only needed because tablegen requires number of dst operands
3013   // in match and replace pattern to be the same. Otherwise patterns can be
3014   // exported from SDag path.
3015   MachineOperand &VDataIn = MI.getOperand(1);
3016   MachineOperand &VIndex = MI.getOperand(3);
3017   MachineOperand &VOffset = MI.getOperand(4);
3018   MachineOperand &SOffset = MI.getOperand(5);
3019   int16_t Offset = MI.getOperand(6).getImm();
3020 
3021   bool HasVOffset = !isOperandImmEqual(VOffset, 0, *MRI);
3022   bool HasVIndex = !isOperandImmEqual(VIndex, 0, *MRI);
3023 
3024   unsigned Opcode;
3025   if (HasVOffset) {
3026     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN
3027                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN;
3028   } else {
3029     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN
3030                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET;
3031   }
3032 
3033   if (MRI->getType(VDataIn.getReg()).isVector()) {
3034     switch (Opcode) {
3035     case AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN:
3036       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN;
3037       break;
3038     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN:
3039       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFEN;
3040       break;
3041     case AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN:
3042       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_IDXEN;
3043       break;
3044     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET:
3045       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFSET;
3046       break;
3047     }
3048   }
3049 
3050   auto I = BuildMI(*MBB, MI, DL, TII.get(Opcode));
3051   I.add(VDataIn);
3052 
3053   if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN ||
3054       Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) {
3055     Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
3056     BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
3057       .addReg(VIndex.getReg())
3058       .addImm(AMDGPU::sub0)
3059       .addReg(VOffset.getReg())
3060       .addImm(AMDGPU::sub1);
3061 
3062     I.addReg(IdxReg);
3063   } else if (HasVIndex) {
3064     I.add(VIndex);
3065   } else if (HasVOffset) {
3066     I.add(VOffset);
3067   }
3068 
3069   I.add(MI.getOperand(2)); // rsrc
3070   I.add(SOffset);
3071   I.addImm(Offset);
3072   I.addImm(MI.getOperand(7).getImm()); // cpol
3073   I.cloneMemRefs(MI);
3074 
3075   MI.eraseFromParent();
3076 
3077   return true;
3078 }
3079 
3080 bool AMDGPUInstructionSelector::selectGlobalAtomicFadd(
3081   MachineInstr &MI, MachineOperand &AddrOp, MachineOperand &DataOp) const {
3082 
3083   if (STI.hasGFX90AInsts()) {
3084     // gfx90a adds return versions of the global atomic fadd instructions so no
3085     // special handling is required.
3086     return selectImpl(MI, *CoverageInfo);
3087   }
3088 
3089   MachineBasicBlock *MBB = MI.getParent();
3090   const DebugLoc &DL = MI.getDebugLoc();
3091 
3092   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
3093     Function &F = MBB->getParent()->getFunction();
3094     DiagnosticInfoUnsupported
3095       NoFpRet(F, "return versions of fp atomics not supported",
3096               MI.getDebugLoc(), DS_Error);
3097     F.getContext().diagnose(NoFpRet);
3098     return false;
3099   }
3100 
3101   // FIXME: This is only needed because tablegen requires number of dst operands
3102   // in match and replace pattern to be the same. Otherwise patterns can be
3103   // exported from SDag path.
3104   auto Addr = selectFlatOffsetImpl(AddrOp, SIInstrFlags::FlatGlobal);
3105 
3106   Register Data = DataOp.getReg();
3107   const unsigned Opc = MRI->getType(Data).isVector() ?
3108     AMDGPU::GLOBAL_ATOMIC_PK_ADD_F16 : AMDGPU::GLOBAL_ATOMIC_ADD_F32;
3109   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3110     .addReg(Addr.first)
3111     .addReg(Data)
3112     .addImm(Addr.second)
3113     .addImm(0) // cpol
3114     .cloneMemRefs(MI);
3115 
3116   MI.eraseFromParent();
3117   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3118 }
3119 
3120 bool AMDGPUInstructionSelector::selectBufferLoadLds(MachineInstr &MI) const {
3121   unsigned Opc;
3122   unsigned Size = MI.getOperand(3).getImm();
3123 
3124   // The struct intrinsic variants add one additional operand over raw.
3125   const bool HasVIndex = MI.getNumOperands() == 9;
3126   Register VIndex;
3127   int OpOffset = 0;
3128   if (HasVIndex) {
3129     VIndex = MI.getOperand(4).getReg();
3130     OpOffset = 1;
3131   }
3132 
3133   Register VOffset = MI.getOperand(4 + OpOffset).getReg();
3134   Optional<ValueAndVReg> MaybeVOffset =
3135       getIConstantVRegValWithLookThrough(VOffset, *MRI);
3136   const bool HasVOffset = !MaybeVOffset || MaybeVOffset->Value.getZExtValue();
3137 
3138   switch (Size) {
3139   default:
3140     return false;
3141   case 1:
3142     Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_BOTHEN
3143                                  : AMDGPU::BUFFER_LOAD_UBYTE_LDS_IDXEN
3144                     : HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFEN
3145                                  : AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFSET;
3146     break;
3147   case 2:
3148     Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_BOTHEN
3149                                  : AMDGPU::BUFFER_LOAD_USHORT_LDS_IDXEN
3150                     : HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFEN
3151                                  : AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFSET;
3152     break;
3153   case 4:
3154     Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_BOTHEN
3155                                  : AMDGPU::BUFFER_LOAD_DWORD_LDS_IDXEN
3156                     : HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFEN
3157                                  : AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFSET;
3158     break;
3159   }
3160 
3161   MachineBasicBlock *MBB = MI.getParent();
3162   const DebugLoc &DL = MI.getDebugLoc();
3163   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3164     .add(MI.getOperand(2));
3165 
3166   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc));
3167 
3168   if (HasVIndex && HasVOffset) {
3169     Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
3170     BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
3171       .addReg(VIndex)
3172       .addImm(AMDGPU::sub0)
3173       .addReg(VOffset)
3174       .addImm(AMDGPU::sub1);
3175 
3176     MIB.addReg(IdxReg);
3177   } else if (HasVIndex) {
3178     MIB.addReg(VIndex);
3179   } else if (HasVOffset) {
3180     MIB.addReg(VOffset);
3181   }
3182 
3183   MIB.add(MI.getOperand(1));            // rsrc
3184   MIB.add(MI.getOperand(5 + OpOffset)); // soffset
3185   MIB.add(MI.getOperand(6 + OpOffset)); // imm offset
3186   unsigned Aux = MI.getOperand(7 + OpOffset).getImm();
3187   MIB.addImm(Aux & AMDGPU::CPol::ALL);  // cpol
3188   MIB.addImm((Aux >> 3) & 1);           // swz
3189 
3190   MachineMemOperand *LoadMMO = *MI.memoperands_begin();
3191   MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
3192   LoadPtrI.Offset = MI.getOperand(6 + OpOffset).getImm();
3193   MachinePointerInfo StorePtrI = LoadPtrI;
3194   StorePtrI.V = nullptr;
3195   StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
3196 
3197   auto F = LoadMMO->getFlags() &
3198            ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
3199   LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
3200                                      Size, LoadMMO->getBaseAlign());
3201 
3202   MachineMemOperand *StoreMMO =
3203       MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
3204                                sizeof(int32_t), LoadMMO->getBaseAlign());
3205 
3206   MIB.setMemRefs({LoadMMO, StoreMMO});
3207 
3208   MI.eraseFromParent();
3209   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3210 }
3211 
3212 /// Match a zero extend from a 32-bit value to 64-bits.
3213 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {
3214   Register ZExtSrc;
3215   if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3216     return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3217 
3218   // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
3219   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
3220   if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
3221     return false;
3222 
3223   if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) {
3224     return Def->getOperand(1).getReg();
3225   }
3226 
3227   return Register();
3228 }
3229 
3230 bool AMDGPUInstructionSelector::selectGlobalLoadLds(MachineInstr &MI) const{
3231   unsigned Opc;
3232   unsigned Size = MI.getOperand(3).getImm();
3233 
3234   switch (Size) {
3235   default:
3236     return false;
3237   case 1:
3238     Opc = AMDGPU::GLOBAL_LOAD_LDS_UBYTE;
3239     break;
3240   case 2:
3241     Opc = AMDGPU::GLOBAL_LOAD_LDS_USHORT;
3242     break;
3243   case 4:
3244     Opc = AMDGPU::GLOBAL_LOAD_LDS_DWORD;
3245     break;
3246   }
3247 
3248   MachineBasicBlock *MBB = MI.getParent();
3249   const DebugLoc &DL = MI.getDebugLoc();
3250   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3251     .add(MI.getOperand(2));
3252 
3253   Register Addr = MI.getOperand(1).getReg();
3254   Register VOffset;
3255   // Try to split SAddr and VOffset. Global and LDS pointers share the same
3256   // immediate offset, so we cannot use a regular SelectGlobalSAddr().
3257   if (!isSGPR(Addr)) {
3258     auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3259     if (isSGPR(AddrDef->Reg)) {
3260       Addr = AddrDef->Reg;
3261     } else if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3262       Register SAddr =
3263           getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3264       if (SAddr && isSGPR(SAddr)) {
3265         Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3266         if (Register Off = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
3267           Addr = SAddr;
3268           VOffset = Off;
3269         }
3270       }
3271     }
3272   }
3273 
3274   if (isSGPR(Addr)) {
3275     Opc = AMDGPU::getGlobalSaddrOp(Opc);
3276     if (!VOffset) {
3277       VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3278       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
3279         .addImm(0);
3280     }
3281   }
3282 
3283   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3284     .addReg(Addr);
3285 
3286   if (isSGPR(Addr))
3287     MIB.addReg(VOffset);
3288 
3289   MIB.add(MI.getOperand(4))  // offset
3290      .add(MI.getOperand(5)); // cpol
3291 
3292   MachineMemOperand *LoadMMO = *MI.memoperands_begin();
3293   MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
3294   LoadPtrI.Offset = MI.getOperand(4).getImm();
3295   MachinePointerInfo StorePtrI = LoadPtrI;
3296   LoadPtrI.AddrSpace = AMDGPUAS::GLOBAL_ADDRESS;
3297   StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
3298   auto F = LoadMMO->getFlags() &
3299            ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
3300   LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
3301                                      Size, LoadMMO->getBaseAlign());
3302   MachineMemOperand *StoreMMO =
3303       MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
3304                                sizeof(int32_t), Align(4));
3305 
3306   MIB.setMemRefs({LoadMMO, StoreMMO});
3307 
3308   MI.eraseFromParent();
3309   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3310 }
3311 
3312 bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{
3313   MI.setDesc(TII.get(MI.getOperand(1).getImm()));
3314   MI.removeOperand(1);
3315   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3316   return true;
3317 }
3318 
3319 bool AMDGPUInstructionSelector::selectSMFMACIntrin(MachineInstr &MI) const {
3320   unsigned Opc;
3321   switch (MI.getIntrinsicID()) {
3322   case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
3323     Opc = AMDGPU::V_SMFMAC_F32_16X16X32_F16_e64;
3324     break;
3325   case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
3326     Opc = AMDGPU::V_SMFMAC_F32_32X32X16_F16_e64;
3327     break;
3328   case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
3329     Opc = AMDGPU::V_SMFMAC_F32_16X16X32_BF16_e64;
3330     break;
3331   case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
3332     Opc = AMDGPU::V_SMFMAC_F32_32X32X16_BF16_e64;
3333     break;
3334   case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
3335     Opc = AMDGPU::V_SMFMAC_I32_16X16X64_I8_e64;
3336     break;
3337   case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
3338     Opc = AMDGPU::V_SMFMAC_I32_32X32X32_I8_e64;
3339     break;
3340   default:
3341     llvm_unreachable("unhandled smfmac intrinsic");
3342   }
3343 
3344   auto VDst_In = MI.getOperand(4);
3345 
3346   MI.setDesc(TII.get(Opc));
3347   MI.removeOperand(4); // VDst_In
3348   MI.removeOperand(1); // Intrinsic ID
3349   MI.addOperand(VDst_In); // Readd VDst_In to the end
3350   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3351   return true;
3352 }
3353 
3354 bool AMDGPUInstructionSelector::selectWaveAddress(MachineInstr &MI) const {
3355   Register DstReg = MI.getOperand(0).getReg();
3356   Register SrcReg = MI.getOperand(1).getReg();
3357   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
3358   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
3359   MachineBasicBlock *MBB = MI.getParent();
3360   const DebugLoc &DL = MI.getDebugLoc();
3361 
3362   if (IsVALU) {
3363     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
3364       .addImm(Subtarget->getWavefrontSizeLog2())
3365       .addReg(SrcReg);
3366   } else {
3367     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
3368       .addReg(SrcReg)
3369       .addImm(Subtarget->getWavefrontSizeLog2());
3370   }
3371 
3372   const TargetRegisterClass &RC =
3373       IsVALU ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
3374   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
3375     return false;
3376 
3377   MI.eraseFromParent();
3378   return true;
3379 }
3380 
3381 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
3382   if (I.isPHI())
3383     return selectPHI(I);
3384 
3385   if (!I.isPreISelOpcode()) {
3386     if (I.isCopy())
3387       return selectCOPY(I);
3388     return true;
3389   }
3390 
3391   switch (I.getOpcode()) {
3392   case TargetOpcode::G_AND:
3393   case TargetOpcode::G_OR:
3394   case TargetOpcode::G_XOR:
3395     if (selectImpl(I, *CoverageInfo))
3396       return true;
3397     return selectG_AND_OR_XOR(I);
3398   case TargetOpcode::G_ADD:
3399   case TargetOpcode::G_SUB:
3400     if (selectImpl(I, *CoverageInfo))
3401       return true;
3402     return selectG_ADD_SUB(I);
3403   case TargetOpcode::G_UADDO:
3404   case TargetOpcode::G_USUBO:
3405   case TargetOpcode::G_UADDE:
3406   case TargetOpcode::G_USUBE:
3407     return selectG_UADDO_USUBO_UADDE_USUBE(I);
3408   case AMDGPU::G_AMDGPU_MAD_U64_U32:
3409   case AMDGPU::G_AMDGPU_MAD_I64_I32:
3410     return selectG_AMDGPU_MAD_64_32(I);
3411   case TargetOpcode::G_INTTOPTR:
3412   case TargetOpcode::G_BITCAST:
3413   case TargetOpcode::G_PTRTOINT:
3414     return selectCOPY(I);
3415   case TargetOpcode::G_CONSTANT:
3416   case TargetOpcode::G_FCONSTANT:
3417     return selectG_CONSTANT(I);
3418   case TargetOpcode::G_FNEG:
3419     if (selectImpl(I, *CoverageInfo))
3420       return true;
3421     return selectG_FNEG(I);
3422   case TargetOpcode::G_FABS:
3423     if (selectImpl(I, *CoverageInfo))
3424       return true;
3425     return selectG_FABS(I);
3426   case TargetOpcode::G_EXTRACT:
3427     return selectG_EXTRACT(I);
3428   case TargetOpcode::G_MERGE_VALUES:
3429   case TargetOpcode::G_BUILD_VECTOR:
3430   case TargetOpcode::G_CONCAT_VECTORS:
3431     return selectG_MERGE_VALUES(I);
3432   case TargetOpcode::G_UNMERGE_VALUES:
3433     return selectG_UNMERGE_VALUES(I);
3434   case TargetOpcode::G_BUILD_VECTOR_TRUNC:
3435     return selectG_BUILD_VECTOR_TRUNC(I);
3436   case TargetOpcode::G_PTR_ADD:
3437     return selectG_PTR_ADD(I);
3438   case TargetOpcode::G_IMPLICIT_DEF:
3439     return selectG_IMPLICIT_DEF(I);
3440   case TargetOpcode::G_FREEZE:
3441     return selectCOPY(I);
3442   case TargetOpcode::G_INSERT:
3443     return selectG_INSERT(I);
3444   case TargetOpcode::G_INTRINSIC:
3445     return selectG_INTRINSIC(I);
3446   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3447     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
3448   case TargetOpcode::G_ICMP:
3449     if (selectG_ICMP(I))
3450       return true;
3451     return selectImpl(I, *CoverageInfo);
3452   case TargetOpcode::G_LOAD:
3453   case TargetOpcode::G_STORE:
3454   case TargetOpcode::G_ATOMIC_CMPXCHG:
3455   case TargetOpcode::G_ATOMICRMW_XCHG:
3456   case TargetOpcode::G_ATOMICRMW_ADD:
3457   case TargetOpcode::G_ATOMICRMW_SUB:
3458   case TargetOpcode::G_ATOMICRMW_AND:
3459   case TargetOpcode::G_ATOMICRMW_OR:
3460   case TargetOpcode::G_ATOMICRMW_XOR:
3461   case TargetOpcode::G_ATOMICRMW_MIN:
3462   case TargetOpcode::G_ATOMICRMW_MAX:
3463   case TargetOpcode::G_ATOMICRMW_UMIN:
3464   case TargetOpcode::G_ATOMICRMW_UMAX:
3465   case TargetOpcode::G_ATOMICRMW_FADD:
3466   case AMDGPU::G_AMDGPU_ATOMIC_INC:
3467   case AMDGPU::G_AMDGPU_ATOMIC_DEC:
3468   case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
3469   case AMDGPU::G_AMDGPU_ATOMIC_FMAX:
3470     return selectG_LOAD_STORE_ATOMICRMW(I);
3471   case TargetOpcode::G_SELECT:
3472     return selectG_SELECT(I);
3473   case TargetOpcode::G_TRUNC:
3474     return selectG_TRUNC(I);
3475   case TargetOpcode::G_SEXT:
3476   case TargetOpcode::G_ZEXT:
3477   case TargetOpcode::G_ANYEXT:
3478   case TargetOpcode::G_SEXT_INREG:
3479     if (selectImpl(I, *CoverageInfo))
3480       return true;
3481     return selectG_SZA_EXT(I);
3482   case TargetOpcode::G_BRCOND:
3483     return selectG_BRCOND(I);
3484   case TargetOpcode::G_GLOBAL_VALUE:
3485     return selectG_GLOBAL_VALUE(I);
3486   case TargetOpcode::G_PTRMASK:
3487     return selectG_PTRMASK(I);
3488   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3489     return selectG_EXTRACT_VECTOR_ELT(I);
3490   case TargetOpcode::G_INSERT_VECTOR_ELT:
3491     return selectG_INSERT_VECTOR_ELT(I);
3492   case TargetOpcode::G_SHUFFLE_VECTOR:
3493     return selectG_SHUFFLE_VECTOR(I);
3494   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
3495   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
3496   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
3497   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
3498     const AMDGPU::ImageDimIntrinsicInfo *Intr
3499       = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
3500     assert(Intr && "not an image intrinsic with image pseudo");
3501     return selectImageIntrinsic(I, Intr);
3502   }
3503   case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY:
3504     return selectBVHIntrinsic(I);
3505   case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD:
3506     return selectAMDGPU_BUFFER_ATOMIC_FADD(I);
3507   case AMDGPU::G_SBFX:
3508   case AMDGPU::G_UBFX:
3509     return selectG_SBFX_UBFX(I);
3510   case AMDGPU::G_SI_CALL:
3511     I.setDesc(TII.get(AMDGPU::SI_CALL));
3512     return true;
3513   case AMDGPU::G_AMDGPU_WAVE_ADDRESS:
3514     return selectWaveAddress(I);
3515   default:
3516     return selectImpl(I, *CoverageInfo);
3517   }
3518   return false;
3519 }
3520 
3521 InstructionSelector::ComplexRendererFns
3522 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
3523   return {{
3524       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3525   }};
3526 
3527 }
3528 
3529 std::pair<Register, unsigned> AMDGPUInstructionSelector::selectVOP3ModsImpl(
3530     MachineOperand &Root, bool AllowAbs, bool OpSel, bool ForceVGPR) const {
3531   Register Src = Root.getReg();
3532   Register OrigSrc = Src;
3533   unsigned Mods = 0;
3534   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
3535 
3536   if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
3537     Src = MI->getOperand(1).getReg();
3538     Mods |= SISrcMods::NEG;
3539     MI = getDefIgnoringCopies(Src, *MRI);
3540   }
3541 
3542   if (AllowAbs && MI && MI->getOpcode() == AMDGPU::G_FABS) {
3543     Src = MI->getOperand(1).getReg();
3544     Mods |= SISrcMods::ABS;
3545   }
3546 
3547   if (OpSel)
3548     Mods |= SISrcMods::OP_SEL_0;
3549 
3550   if ((Mods != 0 || ForceVGPR) &&
3551       RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
3552     MachineInstr *UseMI = Root.getParent();
3553 
3554     // If we looked through copies to find source modifiers on an SGPR operand,
3555     // we now have an SGPR register source. To avoid potentially violating the
3556     // constant bus restriction, we need to insert a copy to a VGPR.
3557     Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc);
3558     BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(),
3559             TII.get(AMDGPU::COPY), VGPRSrc)
3560       .addReg(Src);
3561     Src = VGPRSrc;
3562   }
3563 
3564   return std::make_pair(Src, Mods);
3565 }
3566 
3567 ///
3568 /// This will select either an SGPR or VGPR operand and will save us from
3569 /// having to write an extra tablegen pattern.
3570 InstructionSelector::ComplexRendererFns
3571 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
3572   return {{
3573       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3574   }};
3575 }
3576 
3577 InstructionSelector::ComplexRendererFns
3578 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
3579   Register Src;
3580   unsigned Mods;
3581   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3582 
3583   return {{
3584       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3585       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3586       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3587       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3588   }};
3589 }
3590 
3591 InstructionSelector::ComplexRendererFns
3592 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const {
3593   Register Src;
3594   unsigned Mods;
3595   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3596 
3597   return {{
3598       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3599       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3600       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3601       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3602   }};
3603 }
3604 
3605 InstructionSelector::ComplexRendererFns
3606 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
3607   return {{
3608       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3609       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3610       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
3611   }};
3612 }
3613 
3614 InstructionSelector::ComplexRendererFns
3615 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
3616   Register Src;
3617   unsigned Mods;
3618   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3619 
3620   return {{
3621       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3622       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3623   }};
3624 }
3625 
3626 InstructionSelector::ComplexRendererFns
3627 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const {
3628   Register Src;
3629   unsigned Mods;
3630   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3631 
3632   return {{
3633       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3634       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3635   }};
3636 }
3637 
3638 InstructionSelector::ComplexRendererFns
3639 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
3640   Register Reg = Root.getReg();
3641   const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
3642   if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
3643               Def->getOpcode() == AMDGPU::G_FABS))
3644     return {};
3645   return {{
3646       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3647   }};
3648 }
3649 
3650 std::pair<Register, unsigned>
3651 AMDGPUInstructionSelector::selectVOP3PModsImpl(
3652   Register Src, const MachineRegisterInfo &MRI, bool IsDOT) const {
3653   unsigned Mods = 0;
3654   MachineInstr *MI = MRI.getVRegDef(Src);
3655 
3656   if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
3657       // It's possible to see an f32 fneg here, but unlikely.
3658       // TODO: Treat f32 fneg as only high bit.
3659       MRI.getType(Src) == LLT::fixed_vector(2, 16)) {
3660     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
3661     Src = MI->getOperand(1).getReg();
3662     MI = MRI.getVRegDef(Src);
3663   }
3664 
3665   // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
3666   (void)IsDOT; // DOTs do not use OPSEL on gfx940+, check ST.hasDOTOpSelHazard()
3667 
3668   // Packed instructions do not have abs modifiers.
3669   Mods |= SISrcMods::OP_SEL_1;
3670 
3671   return std::make_pair(Src, Mods);
3672 }
3673 
3674 InstructionSelector::ComplexRendererFns
3675 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
3676   MachineRegisterInfo &MRI
3677     = Root.getParent()->getParent()->getParent()->getRegInfo();
3678 
3679   Register Src;
3680   unsigned Mods;
3681   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
3682 
3683   return {{
3684       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3685       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3686   }};
3687 }
3688 
3689 InstructionSelector::ComplexRendererFns
3690 AMDGPUInstructionSelector::selectVOP3PModsDOT(MachineOperand &Root) const {
3691   MachineRegisterInfo &MRI
3692     = Root.getParent()->getParent()->getParent()->getRegInfo();
3693 
3694   Register Src;
3695   unsigned Mods;
3696   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI, true);
3697 
3698   return {{
3699       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3700       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3701   }};
3702 }
3703 
3704 InstructionSelector::ComplexRendererFns
3705 AMDGPUInstructionSelector::selectDotIUVOP3PMods(MachineOperand &Root) const {
3706   // Literal i1 value set in intrinsic, represents SrcMods for the next operand.
3707   // Value is in Imm operand as i1 sign extended to int64_t.
3708   // 1(-1) promotes packed values to signed, 0 treats them as unsigned.
3709   assert((Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) &&
3710          "expected i1 value");
3711   unsigned Mods = SISrcMods::OP_SEL_1;
3712   if (Root.getImm() == -1)
3713     Mods ^= SISrcMods::NEG;
3714   return {{
3715       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3716   }};
3717 }
3718 
3719 InstructionSelector::ComplexRendererFns
3720 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
3721   Register Src;
3722   unsigned Mods;
3723   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3724   if (!isKnownNeverNaN(Src, *MRI))
3725     return None;
3726 
3727   return {{
3728       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3729       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3730   }};
3731 }
3732 
3733 InstructionSelector::ComplexRendererFns
3734 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
3735   // FIXME: Handle op_sel
3736   return {{
3737       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
3738       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
3739   }};
3740 }
3741 
3742 InstructionSelector::ComplexRendererFns
3743 AMDGPUInstructionSelector::selectVINTERPMods(MachineOperand &Root) const {
3744   Register Src;
3745   unsigned Mods;
3746   std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
3747                                            /* AllowAbs */ false,
3748                                            /* OpSel */ false,
3749                                            /* ForceVGPR */ true);
3750 
3751   return {{
3752       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3753       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3754   }};
3755 }
3756 
3757 InstructionSelector::ComplexRendererFns
3758 AMDGPUInstructionSelector::selectVINTERPModsHi(MachineOperand &Root) const {
3759   Register Src;
3760   unsigned Mods;
3761   std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
3762                                            /* AllowAbs */ false,
3763                                            /* OpSel */ true,
3764                                            /* ForceVGPR */ true);
3765 
3766   return {{
3767       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3768       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3769   }};
3770 }
3771 
3772 InstructionSelector::ComplexRendererFns
3773 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
3774   SmallVector<GEPInfo, 4> AddrInfo;
3775   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3776 
3777   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3778     return None;
3779 
3780   const GEPInfo &GEPInfo = AddrInfo[0];
3781   Optional<int64_t> EncodedImm =
3782       AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false);
3783   if (!EncodedImm)
3784     return None;
3785 
3786   unsigned PtrReg = GEPInfo.SgprParts[0];
3787   return {{
3788     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3789     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3790   }};
3791 }
3792 
3793 InstructionSelector::ComplexRendererFns
3794 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
3795   SmallVector<GEPInfo, 4> AddrInfo;
3796   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3797 
3798   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3799     return None;
3800 
3801   const GEPInfo &GEPInfo = AddrInfo[0];
3802   Register PtrReg = GEPInfo.SgprParts[0];
3803   Optional<int64_t> EncodedImm =
3804       AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
3805   if (!EncodedImm)
3806     return None;
3807 
3808   return {{
3809     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3810     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3811   }};
3812 }
3813 
3814 InstructionSelector::ComplexRendererFns
3815 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
3816   MachineInstr *MI = Root.getParent();
3817   MachineBasicBlock *MBB = MI->getParent();
3818 
3819   SmallVector<GEPInfo, 4> AddrInfo;
3820   getAddrModeInfo(*MI, *MRI, AddrInfo);
3821 
3822   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
3823   // then we can select all ptr + 32-bit offsets not just immediate offsets.
3824   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3825     return None;
3826 
3827   const GEPInfo &GEPInfo = AddrInfo[0];
3828   // SGPR offset is unsigned.
3829   if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm))
3830     return None;
3831 
3832   // If we make it this far we have a load with an 32-bit immediate offset.
3833   // It is OK to select this using a sgpr offset, because we have already
3834   // failed trying to select this load into one of the _IMM variants since
3835   // the _IMM Patterns are considered before the _SGPR patterns.
3836   Register PtrReg = GEPInfo.SgprParts[0];
3837   Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3838   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
3839           .addImm(GEPInfo.Imm);
3840   return {{
3841     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3842     [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
3843   }};
3844 }
3845 
3846 std::pair<Register, int>
3847 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root,
3848                                                 uint64_t FlatVariant) const {
3849   MachineInstr *MI = Root.getParent();
3850 
3851   auto Default = std::make_pair(Root.getReg(), 0);
3852 
3853   if (!STI.hasFlatInstOffsets())
3854     return Default;
3855 
3856   Register PtrBase;
3857   int64_t ConstOffset;
3858   std::tie(PtrBase, ConstOffset) =
3859       getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3860   if (ConstOffset == 0)
3861     return Default;
3862 
3863   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3864   if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, FlatVariant))
3865     return Default;
3866 
3867   return std::make_pair(PtrBase, ConstOffset);
3868 }
3869 
3870 InstructionSelector::ComplexRendererFns
3871 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3872   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FLAT);
3873 
3874   return {{
3875       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3876       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3877     }};
3878 }
3879 
3880 InstructionSelector::ComplexRendererFns
3881 AMDGPUInstructionSelector::selectGlobalOffset(MachineOperand &Root) const {
3882   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatGlobal);
3883 
3884   return {{
3885       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3886       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3887   }};
3888 }
3889 
3890 InstructionSelector::ComplexRendererFns
3891 AMDGPUInstructionSelector::selectScratchOffset(MachineOperand &Root) const {
3892   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatScratch);
3893 
3894   return {{
3895       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3896       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3897     }};
3898 }
3899 
3900 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
3901 InstructionSelector::ComplexRendererFns
3902 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
3903   Register Addr = Root.getReg();
3904   Register PtrBase;
3905   int64_t ConstOffset;
3906   int64_t ImmOffset = 0;
3907 
3908   // Match the immediate offset first, which canonically is moved as low as
3909   // possible.
3910   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3911 
3912   if (ConstOffset != 0) {
3913     if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS,
3914                               SIInstrFlags::FlatGlobal)) {
3915       Addr = PtrBase;
3916       ImmOffset = ConstOffset;
3917     } else {
3918       auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI);
3919       if (isSGPR(PtrBaseDef->Reg)) {
3920         if (ConstOffset > 0) {
3921           // Offset is too large.
3922           //
3923           // saddr + large_offset -> saddr +
3924           //                         (voffset = large_offset & ~MaxOffset) +
3925           //                         (large_offset & MaxOffset);
3926           int64_t SplitImmOffset, RemainderOffset;
3927           std::tie(SplitImmOffset, RemainderOffset) = TII.splitFlatOffset(
3928               ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal);
3929 
3930           if (isUInt<32>(RemainderOffset)) {
3931             MachineInstr *MI = Root.getParent();
3932             MachineBasicBlock *MBB = MI->getParent();
3933             Register HighBits =
3934                 MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3935 
3936             BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3937                     HighBits)
3938                 .addImm(RemainderOffset);
3939 
3940             return {{
3941                 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); }, // saddr
3942                 [=](MachineInstrBuilder &MIB) {
3943                   MIB.addReg(HighBits);
3944                 }, // voffset
3945                 [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); },
3946             }};
3947           }
3948         }
3949 
3950         // We are adding a 64 bit SGPR and a constant. If constant bus limit
3951         // is 1 we would need to perform 1 or 2 extra moves for each half of
3952         // the constant and it is better to do a scalar add and then issue a
3953         // single VALU instruction to materialize zero. Otherwise it is less
3954         // instructions to perform VALU adds with immediates or inline literals.
3955         unsigned NumLiterals =
3956             !TII.isInlineConstant(APInt(32, ConstOffset & 0xffffffff)) +
3957             !TII.isInlineConstant(APInt(32, ConstOffset >> 32));
3958         if (STI.getConstantBusLimit(AMDGPU::V_ADD_U32_e64) > NumLiterals)
3959           return None;
3960       }
3961     }
3962   }
3963 
3964   // Match the variable offset.
3965   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3966   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3967     // Look through the SGPR->VGPR copy.
3968     Register SAddr =
3969         getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3970 
3971     if (SAddr && isSGPR(SAddr)) {
3972       Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3973 
3974       // It's possible voffset is an SGPR here, but the copy to VGPR will be
3975       // inserted later.
3976       if (Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
3977         return {{[=](MachineInstrBuilder &MIB) { // saddr
3978                    MIB.addReg(SAddr);
3979                  },
3980                  [=](MachineInstrBuilder &MIB) { // voffset
3981                    MIB.addReg(VOffset);
3982                  },
3983                  [=](MachineInstrBuilder &MIB) { // offset
3984                    MIB.addImm(ImmOffset);
3985                  }}};
3986       }
3987     }
3988   }
3989 
3990   // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and
3991   // drop this.
3992   if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF ||
3993       AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT || !isSGPR(AddrDef->Reg))
3994     return None;
3995 
3996   // It's cheaper to materialize a single 32-bit zero for vaddr than the two
3997   // moves required to copy a 64-bit SGPR to VGPR.
3998   MachineInstr *MI = Root.getParent();
3999   MachineBasicBlock *MBB = MI->getParent();
4000   Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4001 
4002   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
4003       .addImm(0);
4004 
4005   return {{
4006       [=](MachineInstrBuilder &MIB) { MIB.addReg(AddrDef->Reg); }, // saddr
4007       [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); },      // voffset
4008       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); }     // offset
4009   }};
4010 }
4011 
4012 InstructionSelector::ComplexRendererFns
4013 AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const {
4014   Register Addr = Root.getReg();
4015   Register PtrBase;
4016   int64_t ConstOffset;
4017   int64_t ImmOffset = 0;
4018 
4019   // Match the immediate offset first, which canonically is moved as low as
4020   // possible.
4021   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
4022 
4023   if (ConstOffset != 0 &&
4024       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS,
4025                             SIInstrFlags::FlatScratch)) {
4026     Addr = PtrBase;
4027     ImmOffset = ConstOffset;
4028   }
4029 
4030   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4031   if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4032     int FI = AddrDef->MI->getOperand(1).getIndex();
4033     return {{
4034         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
4035         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4036     }};
4037   }
4038 
4039   Register SAddr = AddrDef->Reg;
4040 
4041   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
4042     Register LHS = AddrDef->MI->getOperand(1).getReg();
4043     Register RHS = AddrDef->MI->getOperand(2).getReg();
4044     auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
4045     auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI);
4046 
4047     if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX &&
4048         isSGPR(RHSDef->Reg)) {
4049       int FI = LHSDef->MI->getOperand(1).getIndex();
4050       MachineInstr &I = *Root.getParent();
4051       MachineBasicBlock *BB = I.getParent();
4052       const DebugLoc &DL = I.getDebugLoc();
4053       SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4054 
4055       BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_I32), SAddr)
4056           .addFrameIndex(FI)
4057           .addReg(RHSDef->Reg);
4058     }
4059   }
4060 
4061   if (!isSGPR(SAddr))
4062     return None;
4063 
4064   return {{
4065       [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr
4066       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4067   }};
4068 }
4069 
4070 // Check whether the flat scratch SVS swizzle bug affects this access.
4071 bool AMDGPUInstructionSelector::checkFlatScratchSVSSwizzleBug(
4072     Register VAddr, Register SAddr, uint64_t ImmOffset) const {
4073   if (!Subtarget->hasFlatScratchSVSSwizzleBug())
4074     return false;
4075 
4076   // The bug affects the swizzling of SVS accesses if there is any carry out
4077   // from the two low order bits (i.e. from bit 1 into bit 2) when adding
4078   // voffset to (soffset + inst_offset).
4079   auto VKnown = KnownBits->getKnownBits(VAddr);
4080   auto SKnown = KnownBits::computeForAddSub(
4081       true, false, KnownBits->getKnownBits(SAddr),
4082       KnownBits::makeConstant(APInt(32, ImmOffset)));
4083   uint64_t VMax = VKnown.getMaxValue().getZExtValue();
4084   uint64_t SMax = SKnown.getMaxValue().getZExtValue();
4085   return (VMax & 3) + (SMax & 3) >= 4;
4086 }
4087 
4088 InstructionSelector::ComplexRendererFns
4089 AMDGPUInstructionSelector::selectScratchSVAddr(MachineOperand &Root) const {
4090   Register Addr = Root.getReg();
4091   Register PtrBase;
4092   int64_t ConstOffset;
4093   int64_t ImmOffset = 0;
4094 
4095   // Match the immediate offset first, which canonically is moved as low as
4096   // possible.
4097   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
4098 
4099   if (ConstOffset != 0 &&
4100       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS, true)) {
4101     Addr = PtrBase;
4102     ImmOffset = ConstOffset;
4103   }
4104 
4105   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4106   if (AddrDef->MI->getOpcode() != AMDGPU::G_PTR_ADD)
4107     return None;
4108 
4109   Register RHS = AddrDef->MI->getOperand(2).getReg();
4110   if (RBI.getRegBank(RHS, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID)
4111     return None;
4112 
4113   Register LHS = AddrDef->MI->getOperand(1).getReg();
4114   auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
4115 
4116   if (checkFlatScratchSVSSwizzleBug(RHS, LHS, ImmOffset))
4117     return None;
4118 
4119   if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4120     int FI = LHSDef->MI->getOperand(1).getIndex();
4121     return {{
4122         [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
4123         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
4124         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4125     }};
4126   }
4127 
4128   if (!isSGPR(LHS))
4129     return None;
4130 
4131   return {{
4132       [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
4133       [=](MachineInstrBuilder &MIB) { MIB.addReg(LHS); }, // saddr
4134       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4135   }};
4136 }
4137 
4138 InstructionSelector::ComplexRendererFns
4139 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
4140   MachineInstr *MI = Root.getParent();
4141   MachineBasicBlock *MBB = MI->getParent();
4142   MachineFunction *MF = MBB->getParent();
4143   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
4144 
4145   int64_t Offset = 0;
4146   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
4147       Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
4148     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4149 
4150     // TODO: Should this be inside the render function? The iterator seems to
4151     // move.
4152     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
4153             HighBits)
4154       .addImm(Offset & ~4095);
4155 
4156     return {{[=](MachineInstrBuilder &MIB) { // rsrc
4157                MIB.addReg(Info->getScratchRSrcReg());
4158              },
4159              [=](MachineInstrBuilder &MIB) { // vaddr
4160                MIB.addReg(HighBits);
4161              },
4162              [=](MachineInstrBuilder &MIB) { // soffset
4163                // Use constant zero for soffset and rely on eliminateFrameIndex
4164                // to choose the appropriate frame register if need be.
4165                MIB.addImm(0);
4166              },
4167              [=](MachineInstrBuilder &MIB) { // offset
4168                MIB.addImm(Offset & 4095);
4169              }}};
4170   }
4171 
4172   assert(Offset == 0 || Offset == -1);
4173 
4174   // Try to fold a frame index directly into the MUBUF vaddr field, and any
4175   // offsets.
4176   Optional<int> FI;
4177   Register VAddr = Root.getReg();
4178   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
4179     Register PtrBase;
4180     int64_t ConstOffset;
4181     std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(VAddr, *MRI);
4182     if (ConstOffset != 0) {
4183       if (SIInstrInfo::isLegalMUBUFImmOffset(ConstOffset) &&
4184           (!STI.privateMemoryResourceIsRangeChecked() ||
4185            KnownBits->signBitIsZero(PtrBase))) {
4186         const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase);
4187         if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
4188           FI = PtrBaseDef->getOperand(1).getIndex();
4189         else
4190           VAddr = PtrBase;
4191         Offset = ConstOffset;
4192       }
4193     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4194       FI = RootDef->getOperand(1).getIndex();
4195     }
4196   }
4197 
4198   return {{[=](MachineInstrBuilder &MIB) { // rsrc
4199              MIB.addReg(Info->getScratchRSrcReg());
4200            },
4201            [=](MachineInstrBuilder &MIB) { // vaddr
4202              if (FI.hasValue())
4203                MIB.addFrameIndex(FI.getValue());
4204              else
4205                MIB.addReg(VAddr);
4206            },
4207            [=](MachineInstrBuilder &MIB) { // soffset
4208              // Use constant zero for soffset and rely on eliminateFrameIndex
4209              // to choose the appropriate frame register if need be.
4210              MIB.addImm(0);
4211            },
4212            [=](MachineInstrBuilder &MIB) { // offset
4213              MIB.addImm(Offset);
4214            }}};
4215 }
4216 
4217 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
4218                                                 int64_t Offset) const {
4219   if (!isUInt<16>(Offset))
4220     return false;
4221 
4222   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
4223     return true;
4224 
4225   // On Southern Islands instruction with a negative base value and an offset
4226   // don't seem to work.
4227   return KnownBits->signBitIsZero(Base);
4228 }
4229 
4230 bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
4231                                                  int64_t Offset1,
4232                                                  unsigned Size) const {
4233   if (Offset0 % Size != 0 || Offset1 % Size != 0)
4234     return false;
4235   if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
4236     return false;
4237 
4238   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
4239     return true;
4240 
4241   // On Southern Islands instruction with a negative base value and an offset
4242   // don't seem to work.
4243   return KnownBits->signBitIsZero(Base);
4244 }
4245 
4246 bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI,
4247                                                     unsigned ShAmtBits) const {
4248   assert(MI.getOpcode() == TargetOpcode::G_AND);
4249 
4250   Optional<APInt> RHS = getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI);
4251   if (!RHS)
4252     return false;
4253 
4254   if (RHS->countTrailingOnes() >= ShAmtBits)
4255     return true;
4256 
4257   const APInt &LHSKnownZeros =
4258       KnownBits->getKnownZeroes(MI.getOperand(1).getReg());
4259   return (LHSKnownZeros | *RHS).countTrailingOnes() >= ShAmtBits;
4260 }
4261 
4262 // Return the wave level SGPR base address if this is a wave address.
4263 static Register getWaveAddress(const MachineInstr *Def) {
4264   return Def->getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS
4265              ? Def->getOperand(1).getReg()
4266              : Register();
4267 }
4268 
4269 InstructionSelector::ComplexRendererFns
4270 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
4271     MachineOperand &Root) const {
4272   Register Reg = Root.getReg();
4273   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
4274 
4275   const MachineInstr *Def = MRI->getVRegDef(Reg);
4276   if (Register WaveBase = getWaveAddress(Def)) {
4277     return {{
4278         [=](MachineInstrBuilder &MIB) { // rsrc
4279           MIB.addReg(Info->getScratchRSrcReg());
4280         },
4281         [=](MachineInstrBuilder &MIB) { // soffset
4282           MIB.addReg(WaveBase);
4283         },
4284         [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // offset
4285     }};
4286   }
4287 
4288   int64_t Offset = 0;
4289 
4290   // FIXME: Copy check is a hack
4291   Register BasePtr;
4292   if (mi_match(Reg, *MRI, m_GPtrAdd(m_Reg(BasePtr), m_Copy(m_ICst(Offset))))) {
4293     if (!SIInstrInfo::isLegalMUBUFImmOffset(Offset))
4294       return {};
4295     const MachineInstr *BasePtrDef = MRI->getVRegDef(BasePtr);
4296     Register WaveBase = getWaveAddress(BasePtrDef);
4297     if (!WaveBase)
4298       return {};
4299 
4300     return {{
4301         [=](MachineInstrBuilder &MIB) { // rsrc
4302           MIB.addReg(Info->getScratchRSrcReg());
4303         },
4304         [=](MachineInstrBuilder &MIB) { // soffset
4305           MIB.addReg(WaveBase);
4306         },
4307         [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4308     }};
4309   }
4310 
4311   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
4312       !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
4313     return {};
4314 
4315   return {{
4316       [=](MachineInstrBuilder &MIB) { // rsrc
4317         MIB.addReg(Info->getScratchRSrcReg());
4318       },
4319       [=](MachineInstrBuilder &MIB) { // soffset
4320         MIB.addImm(0);
4321       },
4322       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4323   }};
4324 }
4325 
4326 std::pair<Register, unsigned>
4327 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
4328   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4329   if (!RootDef)
4330     return std::make_pair(Root.getReg(), 0);
4331 
4332   int64_t ConstAddr = 0;
4333 
4334   Register PtrBase;
4335   int64_t Offset;
4336   std::tie(PtrBase, Offset) =
4337     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4338 
4339   if (Offset) {
4340     if (isDSOffsetLegal(PtrBase, Offset)) {
4341       // (add n0, c0)
4342       return std::make_pair(PtrBase, Offset);
4343     }
4344   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4345     // TODO
4346 
4347 
4348   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4349     // TODO
4350 
4351   }
4352 
4353   return std::make_pair(Root.getReg(), 0);
4354 }
4355 
4356 InstructionSelector::ComplexRendererFns
4357 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
4358   Register Reg;
4359   unsigned Offset;
4360   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
4361   return {{
4362       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4363       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
4364     }};
4365 }
4366 
4367 InstructionSelector::ComplexRendererFns
4368 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
4369   return selectDSReadWrite2(Root, 4);
4370 }
4371 
4372 InstructionSelector::ComplexRendererFns
4373 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {
4374   return selectDSReadWrite2(Root, 8);
4375 }
4376 
4377 InstructionSelector::ComplexRendererFns
4378 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
4379                                               unsigned Size) const {
4380   Register Reg;
4381   unsigned Offset;
4382   std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size);
4383   return {{
4384       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4385       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
4386       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
4387     }};
4388 }
4389 
4390 std::pair<Register, unsigned>
4391 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
4392                                                   unsigned Size) const {
4393   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4394   if (!RootDef)
4395     return std::make_pair(Root.getReg(), 0);
4396 
4397   int64_t ConstAddr = 0;
4398 
4399   Register PtrBase;
4400   int64_t Offset;
4401   std::tie(PtrBase, Offset) =
4402     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4403 
4404   if (Offset) {
4405     int64_t OffsetValue0 = Offset;
4406     int64_t OffsetValue1 = Offset + Size;
4407     if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) {
4408       // (add n0, c0)
4409       return std::make_pair(PtrBase, OffsetValue0 / Size);
4410     }
4411   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4412     // TODO
4413 
4414   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4415     // TODO
4416 
4417   }
4418 
4419   return std::make_pair(Root.getReg(), 0);
4420 }
4421 
4422 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
4423 /// the base value with the constant offset. There may be intervening copies
4424 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
4425 /// not match the pattern.
4426 std::pair<Register, int64_t>
4427 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
4428   Register Root, const MachineRegisterInfo &MRI) const {
4429   MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
4430   if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
4431     return {Root, 0};
4432 
4433   MachineOperand &RHS = RootI->getOperand(2);
4434   Optional<ValueAndVReg> MaybeOffset =
4435       getIConstantVRegValWithLookThrough(RHS.getReg(), MRI);
4436   if (!MaybeOffset)
4437     return {Root, 0};
4438   return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()};
4439 }
4440 
4441 static void addZeroImm(MachineInstrBuilder &MIB) {
4442   MIB.addImm(0);
4443 }
4444 
4445 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
4446 /// BasePtr is not valid, a null base pointer will be used.
4447 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4448                           uint32_t FormatLo, uint32_t FormatHi,
4449                           Register BasePtr) {
4450   Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4451   Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4452   Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4453   Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
4454 
4455   B.buildInstr(AMDGPU::S_MOV_B32)
4456     .addDef(RSrc2)
4457     .addImm(FormatLo);
4458   B.buildInstr(AMDGPU::S_MOV_B32)
4459     .addDef(RSrc3)
4460     .addImm(FormatHi);
4461 
4462   // Build the half of the subregister with the constants before building the
4463   // full 128-bit register. If we are building multiple resource descriptors,
4464   // this will allow CSEing of the 2-component register.
4465   B.buildInstr(AMDGPU::REG_SEQUENCE)
4466     .addDef(RSrcHi)
4467     .addReg(RSrc2)
4468     .addImm(AMDGPU::sub0)
4469     .addReg(RSrc3)
4470     .addImm(AMDGPU::sub1);
4471 
4472   Register RSrcLo = BasePtr;
4473   if (!BasePtr) {
4474     RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4475     B.buildInstr(AMDGPU::S_MOV_B64)
4476       .addDef(RSrcLo)
4477       .addImm(0);
4478   }
4479 
4480   B.buildInstr(AMDGPU::REG_SEQUENCE)
4481     .addDef(RSrc)
4482     .addReg(RSrcLo)
4483     .addImm(AMDGPU::sub0_sub1)
4484     .addReg(RSrcHi)
4485     .addImm(AMDGPU::sub2_sub3);
4486 
4487   return RSrc;
4488 }
4489 
4490 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4491                                 const SIInstrInfo &TII, Register BasePtr) {
4492   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4493 
4494   // FIXME: Why are half the "default" bits ignored based on the addressing
4495   // mode?
4496   return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
4497 }
4498 
4499 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4500                                const SIInstrInfo &TII, Register BasePtr) {
4501   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4502 
4503   // FIXME: Why are half the "default" bits ignored based on the addressing
4504   // mode?
4505   return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
4506 }
4507 
4508 AMDGPUInstructionSelector::MUBUFAddressData
4509 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
4510   MUBUFAddressData Data;
4511   Data.N0 = Src;
4512 
4513   Register PtrBase;
4514   int64_t Offset;
4515 
4516   std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
4517   if (isUInt<32>(Offset)) {
4518     Data.N0 = PtrBase;
4519     Data.Offset = Offset;
4520   }
4521 
4522   if (MachineInstr *InputAdd
4523       = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
4524     Data.N2 = InputAdd->getOperand(1).getReg();
4525     Data.N3 = InputAdd->getOperand(2).getReg();
4526 
4527     // FIXME: Need to fix extra SGPR->VGPRcopies inserted
4528     // FIXME: Don't know this was defined by operand 0
4529     //
4530     // TODO: Remove this when we have copy folding optimizations after
4531     // RegBankSelect.
4532     Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
4533     Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
4534   }
4535 
4536   return Data;
4537 }
4538 
4539 /// Return if the addr64 mubuf mode should be used for the given address.
4540 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
4541   // (ptr_add N2, N3) -> addr64, or
4542   // (ptr_add (ptr_add N2, N3), C1) -> addr64
4543   if (Addr.N2)
4544     return true;
4545 
4546   const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
4547   return N0Bank->getID() == AMDGPU::VGPRRegBankID;
4548 }
4549 
4550 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
4551 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
4552 /// component.
4553 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
4554   MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
4555   if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
4556     return;
4557 
4558   // Illegal offset, store it in soffset.
4559   SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4560   B.buildInstr(AMDGPU::S_MOV_B32)
4561     .addDef(SOffset)
4562     .addImm(ImmOffset);
4563   ImmOffset = 0;
4564 }
4565 
4566 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
4567   MachineOperand &Root, Register &VAddr, Register &RSrcReg,
4568   Register &SOffset, int64_t &Offset) const {
4569   // FIXME: Predicates should stop this from reaching here.
4570   // addr64 bit was removed for volcanic islands.
4571   if (!STI.hasAddr64() || STI.useFlatForGlobal())
4572     return false;
4573 
4574   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4575   if (!shouldUseAddr64(AddrData))
4576     return false;
4577 
4578   Register N0 = AddrData.N0;
4579   Register N2 = AddrData.N2;
4580   Register N3 = AddrData.N3;
4581   Offset = AddrData.Offset;
4582 
4583   // Base pointer for the SRD.
4584   Register SRDPtr;
4585 
4586   if (N2) {
4587     if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4588       assert(N3);
4589       if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4590         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
4591         // addr64, and construct the default resource from a 0 address.
4592         VAddr = N0;
4593       } else {
4594         SRDPtr = N3;
4595         VAddr = N2;
4596       }
4597     } else {
4598       // N2 is not divergent.
4599       SRDPtr = N2;
4600       VAddr = N3;
4601     }
4602   } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4603     // Use the default null pointer in the resource
4604     VAddr = N0;
4605   } else {
4606     // N0 -> offset, or
4607     // (N0 + C1) -> offset
4608     SRDPtr = N0;
4609   }
4610 
4611   MachineIRBuilder B(*Root.getParent());
4612   RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
4613   splitIllegalMUBUFOffset(B, SOffset, Offset);
4614   return true;
4615 }
4616 
4617 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
4618   MachineOperand &Root, Register &RSrcReg, Register &SOffset,
4619   int64_t &Offset) const {
4620 
4621   // FIXME: Pattern should not reach here.
4622   if (STI.useFlatForGlobal())
4623     return false;
4624 
4625   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4626   if (shouldUseAddr64(AddrData))
4627     return false;
4628 
4629   // N0 -> offset, or
4630   // (N0 + C1) -> offset
4631   Register SRDPtr = AddrData.N0;
4632   Offset = AddrData.Offset;
4633 
4634   // TODO: Look through extensions for 32-bit soffset.
4635   MachineIRBuilder B(*Root.getParent());
4636 
4637   RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
4638   splitIllegalMUBUFOffset(B, SOffset, Offset);
4639   return true;
4640 }
4641 
4642 InstructionSelector::ComplexRendererFns
4643 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
4644   Register VAddr;
4645   Register RSrcReg;
4646   Register SOffset;
4647   int64_t Offset = 0;
4648 
4649   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4650     return {};
4651 
4652   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4653   // pattern.
4654   return {{
4655       [=](MachineInstrBuilder &MIB) {  // rsrc
4656         MIB.addReg(RSrcReg);
4657       },
4658       [=](MachineInstrBuilder &MIB) { // vaddr
4659         MIB.addReg(VAddr);
4660       },
4661       [=](MachineInstrBuilder &MIB) { // soffset
4662         if (SOffset)
4663           MIB.addReg(SOffset);
4664         else
4665           MIB.addImm(0);
4666       },
4667       [=](MachineInstrBuilder &MIB) { // offset
4668         MIB.addImm(Offset);
4669       },
4670       addZeroImm, //  cpol
4671       addZeroImm, //  tfe
4672       addZeroImm  //  swz
4673     }};
4674 }
4675 
4676 InstructionSelector::ComplexRendererFns
4677 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
4678   Register RSrcReg;
4679   Register SOffset;
4680   int64_t Offset = 0;
4681 
4682   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4683     return {};
4684 
4685   return {{
4686       [=](MachineInstrBuilder &MIB) {  // rsrc
4687         MIB.addReg(RSrcReg);
4688       },
4689       [=](MachineInstrBuilder &MIB) { // soffset
4690         if (SOffset)
4691           MIB.addReg(SOffset);
4692         else
4693           MIB.addImm(0);
4694       },
4695       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4696       addZeroImm, //  cpol
4697       addZeroImm, //  tfe
4698       addZeroImm, //  swz
4699     }};
4700 }
4701 
4702 InstructionSelector::ComplexRendererFns
4703 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
4704   Register VAddr;
4705   Register RSrcReg;
4706   Register SOffset;
4707   int64_t Offset = 0;
4708 
4709   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4710     return {};
4711 
4712   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4713   // pattern.
4714   return {{
4715       [=](MachineInstrBuilder &MIB) {  // rsrc
4716         MIB.addReg(RSrcReg);
4717       },
4718       [=](MachineInstrBuilder &MIB) { // vaddr
4719         MIB.addReg(VAddr);
4720       },
4721       [=](MachineInstrBuilder &MIB) { // soffset
4722         if (SOffset)
4723           MIB.addReg(SOffset);
4724         else
4725           MIB.addImm(0);
4726       },
4727       [=](MachineInstrBuilder &MIB) { // offset
4728         MIB.addImm(Offset);
4729       },
4730       [=](MachineInstrBuilder &MIB) {
4731         MIB.addImm(AMDGPU::CPol::GLC); // cpol
4732       }
4733     }};
4734 }
4735 
4736 InstructionSelector::ComplexRendererFns
4737 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
4738   Register RSrcReg;
4739   Register SOffset;
4740   int64_t Offset = 0;
4741 
4742   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4743     return {};
4744 
4745   return {{
4746       [=](MachineInstrBuilder &MIB) {  // rsrc
4747         MIB.addReg(RSrcReg);
4748       },
4749       [=](MachineInstrBuilder &MIB) { // soffset
4750         if (SOffset)
4751           MIB.addReg(SOffset);
4752         else
4753           MIB.addImm(0);
4754       },
4755       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4756       [=](MachineInstrBuilder &MIB) { MIB.addImm(AMDGPU::CPol::GLC); } // cpol
4757     }};
4758 }
4759 
4760 /// Get an immediate that must be 32-bits, and treated as zero extended.
4761 static Optional<uint64_t> getConstantZext32Val(Register Reg,
4762                                                const MachineRegisterInfo &MRI) {
4763   // getIConstantVRegVal sexts any values, so see if that matters.
4764   Optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI);
4765   if (!OffsetVal || !isInt<32>(*OffsetVal))
4766     return None;
4767   return Lo_32(*OffsetVal);
4768 }
4769 
4770 InstructionSelector::ComplexRendererFns
4771 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
4772   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4773   if (!OffsetVal)
4774     return {};
4775 
4776   Optional<int64_t> EncodedImm =
4777       AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
4778   if (!EncodedImm)
4779     return {};
4780 
4781   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4782 }
4783 
4784 InstructionSelector::ComplexRendererFns
4785 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
4786   assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
4787 
4788   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4789   if (!OffsetVal)
4790     return {};
4791 
4792   Optional<int64_t> EncodedImm
4793     = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
4794   if (!EncodedImm)
4795     return {};
4796 
4797   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4798 }
4799 
4800 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
4801                                                  const MachineInstr &MI,
4802                                                  int OpIdx) const {
4803   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4804          "Expected G_CONSTANT");
4805   MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
4806 }
4807 
4808 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
4809                                                 const MachineInstr &MI,
4810                                                 int OpIdx) const {
4811   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4812          "Expected G_CONSTANT");
4813   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
4814 }
4815 
4816 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
4817                                                  const MachineInstr &MI,
4818                                                  int OpIdx) const {
4819   assert(OpIdx == -1);
4820 
4821   const MachineOperand &Op = MI.getOperand(1);
4822   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
4823     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
4824   else {
4825     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
4826     MIB.addImm(Op.getCImm()->getSExtValue());
4827   }
4828 }
4829 
4830 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
4831                                                 const MachineInstr &MI,
4832                                                 int OpIdx) const {
4833   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4834          "Expected G_CONSTANT");
4835   MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
4836 }
4837 
4838 /// This only really exists to satisfy DAG type checking machinery, so is a
4839 /// no-op here.
4840 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
4841                                                 const MachineInstr &MI,
4842                                                 int OpIdx) const {
4843   MIB.addImm(MI.getOperand(OpIdx).getImm());
4844 }
4845 
4846 void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB,
4847                                                   const MachineInstr &MI,
4848                                                   int OpIdx) const {
4849   assert(OpIdx >= 0 && "expected to match an immediate operand");
4850   MIB.addImm(MI.getOperand(OpIdx).getImm() & AMDGPU::CPol::ALL);
4851 }
4852 
4853 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
4854                                                  const MachineInstr &MI,
4855                                                  int OpIdx) const {
4856   assert(OpIdx >= 0 && "expected to match an immediate operand");
4857   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
4858 }
4859 
4860 void AMDGPUInstructionSelector::renderSetGLC(MachineInstrBuilder &MIB,
4861                                              const MachineInstr &MI,
4862                                              int OpIdx) const {
4863   assert(OpIdx >= 0 && "expected to match an immediate operand");
4864   MIB.addImm(MI.getOperand(OpIdx).getImm() | AMDGPU::CPol::GLC);
4865 }
4866 
4867 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
4868                                                  const MachineInstr &MI,
4869                                                  int OpIdx) const {
4870   MIB.addFrameIndex((MI.getOperand(1).getIndex()));
4871 }
4872 
4873 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
4874   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
4875 }
4876 
4877 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
4878   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
4879 }
4880 
4881 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
4882   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
4883 }
4884 
4885 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
4886   return TII.isInlineConstant(Imm);
4887 }
4888