1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPU.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPURegisterBankInfo.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "Utils/AMDGPUBaseInfo.h"
22 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
24 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/IR/DiagnosticInfo.h"
28 #include "llvm/IR/IntrinsicsAMDGPU.h"
29 
30 #define DEBUG_TYPE "amdgpu-isel"
31 
32 using namespace llvm;
33 using namespace MIPatternMatch;
34 
35 static cl::opt<bool> AllowRiskySelect(
36   "amdgpu-global-isel-risky-select",
37   cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
38   cl::init(false),
39   cl::ReallyHidden);
40 
41 #define GET_GLOBALISEL_IMPL
42 #define AMDGPUSubtarget GCNSubtarget
43 #include "AMDGPUGenGlobalISel.inc"
44 #undef GET_GLOBALISEL_IMPL
45 #undef AMDGPUSubtarget
46 
47 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
48     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
49     const AMDGPUTargetMachine &TM)
50     : TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
51       STI(STI),
52       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
53 #define GET_GLOBALISEL_PREDICATES_INIT
54 #include "AMDGPUGenGlobalISel.inc"
55 #undef GET_GLOBALISEL_PREDICATES_INIT
56 #define GET_GLOBALISEL_TEMPORARIES_INIT
57 #include "AMDGPUGenGlobalISel.inc"
58 #undef GET_GLOBALISEL_TEMPORARIES_INIT
59 {
60 }
61 
62 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
63 
64 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
65                                         CodeGenCoverage &CoverageInfo,
66                                         ProfileSummaryInfo *PSI,
67                                         BlockFrequencyInfo *BFI) {
68   MRI = &MF.getRegInfo();
69   Subtarget = &MF.getSubtarget<GCNSubtarget>();
70   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
71 }
72 
73 bool AMDGPUInstructionSelector::isVCC(Register Reg,
74                                       const MachineRegisterInfo &MRI) const {
75   // The verifier is oblivious to s1 being a valid value for wavesize registers.
76   if (Reg.isPhysical())
77     return false;
78 
79   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
80   const TargetRegisterClass *RC =
81       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
82   if (RC) {
83     const LLT Ty = MRI.getType(Reg);
84     if (!Ty.isValid() || Ty.getSizeInBits() != 1)
85       return false;
86     // G_TRUNC s1 result is never vcc.
87     return MRI.getVRegDef(Reg)->getOpcode() != AMDGPU::G_TRUNC &&
88            RC->hasSuperClassEq(TRI.getBoolRC());
89   }
90 
91   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
92   return RB->getID() == AMDGPU::VCCRegBankID;
93 }
94 
95 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
96                                                         unsigned NewOpc) const {
97   MI.setDesc(TII.get(NewOpc));
98   MI.removeOperand(1); // Remove intrinsic ID.
99   MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
100 
101   MachineOperand &Dst = MI.getOperand(0);
102   MachineOperand &Src = MI.getOperand(1);
103 
104   // TODO: This should be legalized to s32 if needed
105   if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
106     return false;
107 
108   const TargetRegisterClass *DstRC
109     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
110   const TargetRegisterClass *SrcRC
111     = TRI.getConstrainedRegClassForOperand(Src, *MRI);
112   if (!DstRC || DstRC != SrcRC)
113     return false;
114 
115   return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
116          RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
117 }
118 
119 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
120   const DebugLoc &DL = I.getDebugLoc();
121   MachineBasicBlock *BB = I.getParent();
122   I.setDesc(TII.get(TargetOpcode::COPY));
123 
124   const MachineOperand &Src = I.getOperand(1);
125   MachineOperand &Dst = I.getOperand(0);
126   Register DstReg = Dst.getReg();
127   Register SrcReg = Src.getReg();
128 
129   if (isVCC(DstReg, *MRI)) {
130     if (SrcReg == AMDGPU::SCC) {
131       const TargetRegisterClass *RC
132         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
133       if (!RC)
134         return true;
135       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
136     }
137 
138     if (!isVCC(SrcReg, *MRI)) {
139       // TODO: Should probably leave the copy and let copyPhysReg expand it.
140       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
141         return false;
142 
143       const TargetRegisterClass *SrcRC
144         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
145 
146       Optional<ValueAndVReg> ConstVal =
147           getIConstantVRegValWithLookThrough(SrcReg, *MRI, true);
148       if (ConstVal) {
149         unsigned MovOpc =
150             STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
151         BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg)
152             .addImm(ConstVal->Value.getBoolValue() ? -1 : 0);
153       } else {
154         Register MaskedReg = MRI->createVirtualRegister(SrcRC);
155 
156         // We can't trust the high bits at this point, so clear them.
157 
158         // TODO: Skip masking high bits if def is known boolean.
159 
160         unsigned AndOpc =
161             TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
162         BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
163             .addImm(1)
164             .addReg(SrcReg);
165         BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
166             .addImm(0)
167             .addReg(MaskedReg);
168       }
169 
170       if (!MRI->getRegClassOrNull(SrcReg))
171         MRI->setRegClass(SrcReg, SrcRC);
172       I.eraseFromParent();
173       return true;
174     }
175 
176     const TargetRegisterClass *RC =
177       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
178     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
179       return false;
180 
181     return true;
182   }
183 
184   for (const MachineOperand &MO : I.operands()) {
185     if (MO.getReg().isPhysical())
186       continue;
187 
188     const TargetRegisterClass *RC =
189             TRI.getConstrainedRegClassForOperand(MO, *MRI);
190     if (!RC)
191       continue;
192     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
193   }
194   return true;
195 }
196 
197 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
198   const Register DefReg = I.getOperand(0).getReg();
199   const LLT DefTy = MRI->getType(DefReg);
200   if (DefTy == LLT::scalar(1)) {
201     if (!AllowRiskySelect) {
202       LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
203       return false;
204     }
205 
206     LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
207   }
208 
209   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
210 
211   const RegClassOrRegBank &RegClassOrBank =
212     MRI->getRegClassOrRegBank(DefReg);
213 
214   const TargetRegisterClass *DefRC
215     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
216   if (!DefRC) {
217     if (!DefTy.isValid()) {
218       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
219       return false;
220     }
221 
222     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
223     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB);
224     if (!DefRC) {
225       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
226       return false;
227     }
228   }
229 
230   // TODO: Verify that all registers have the same bank
231   I.setDesc(TII.get(TargetOpcode::PHI));
232   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
233 }
234 
235 MachineOperand
236 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
237                                            const TargetRegisterClass &SubRC,
238                                            unsigned SubIdx) const {
239 
240   MachineInstr *MI = MO.getParent();
241   MachineBasicBlock *BB = MO.getParent()->getParent();
242   Register DstReg = MRI->createVirtualRegister(&SubRC);
243 
244   if (MO.isReg()) {
245     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
246     Register Reg = MO.getReg();
247     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
248             .addReg(Reg, 0, ComposedSubIdx);
249 
250     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
251                                      MO.isKill(), MO.isDead(), MO.isUndef(),
252                                      MO.isEarlyClobber(), 0, MO.isDebug(),
253                                      MO.isInternalRead());
254   }
255 
256   assert(MO.isImm());
257 
258   APInt Imm(64, MO.getImm());
259 
260   switch (SubIdx) {
261   default:
262     llvm_unreachable("do not know to split immediate with this sub index.");
263   case AMDGPU::sub0:
264     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
265   case AMDGPU::sub1:
266     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
267   }
268 }
269 
270 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
271   switch (Opc) {
272   case AMDGPU::G_AND:
273     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
274   case AMDGPU::G_OR:
275     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
276   case AMDGPU::G_XOR:
277     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
278   default:
279     llvm_unreachable("not a bit op");
280   }
281 }
282 
283 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
284   Register DstReg = I.getOperand(0).getReg();
285   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
286 
287   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
288   if (DstRB->getID() != AMDGPU::SGPRRegBankID &&
289       DstRB->getID() != AMDGPU::VCCRegBankID)
290     return false;
291 
292   bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID &&
293                             STI.isWave64());
294   I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64)));
295 
296   // Dead implicit-def of scc
297   I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
298                                          true, // isImp
299                                          false, // isKill
300                                          true)); // isDead
301   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
302 }
303 
304 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
305   MachineBasicBlock *BB = I.getParent();
306   MachineFunction *MF = BB->getParent();
307   Register DstReg = I.getOperand(0).getReg();
308   const DebugLoc &DL = I.getDebugLoc();
309   LLT Ty = MRI->getType(DstReg);
310   if (Ty.isVector())
311     return false;
312 
313   unsigned Size = Ty.getSizeInBits();
314   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
315   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
316   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
317 
318   if (Size == 32) {
319     if (IsSALU) {
320       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
321       MachineInstr *Add =
322         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
323         .add(I.getOperand(1))
324         .add(I.getOperand(2));
325       I.eraseFromParent();
326       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
327     }
328 
329     if (STI.hasAddNoCarry()) {
330       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
331       I.setDesc(TII.get(Opc));
332       I.addOperand(*MF, MachineOperand::CreateImm(0));
333       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
334       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
335     }
336 
337     const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64;
338 
339     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
340     MachineInstr *Add
341       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
342       .addDef(UnusedCarry, RegState::Dead)
343       .add(I.getOperand(1))
344       .add(I.getOperand(2))
345       .addImm(0);
346     I.eraseFromParent();
347     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
348   }
349 
350   assert(!Sub && "illegal sub should not reach here");
351 
352   const TargetRegisterClass &RC
353     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
354   const TargetRegisterClass &HalfRC
355     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
356 
357   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
358   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
359   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
360   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
361 
362   Register DstLo = MRI->createVirtualRegister(&HalfRC);
363   Register DstHi = MRI->createVirtualRegister(&HalfRC);
364 
365   if (IsSALU) {
366     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
367       .add(Lo1)
368       .add(Lo2);
369     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
370       .add(Hi1)
371       .add(Hi2);
372   } else {
373     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
374     Register CarryReg = MRI->createVirtualRegister(CarryRC);
375     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo)
376       .addDef(CarryReg)
377       .add(Lo1)
378       .add(Lo2)
379       .addImm(0);
380     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
381       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
382       .add(Hi1)
383       .add(Hi2)
384       .addReg(CarryReg, RegState::Kill)
385       .addImm(0);
386 
387     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
388       return false;
389   }
390 
391   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
392     .addReg(DstLo)
393     .addImm(AMDGPU::sub0)
394     .addReg(DstHi)
395     .addImm(AMDGPU::sub1);
396 
397 
398   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
399     return false;
400 
401   I.eraseFromParent();
402   return true;
403 }
404 
405 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
406   MachineInstr &I) const {
407   MachineBasicBlock *BB = I.getParent();
408   MachineFunction *MF = BB->getParent();
409   const DebugLoc &DL = I.getDebugLoc();
410   Register Dst0Reg = I.getOperand(0).getReg();
411   Register Dst1Reg = I.getOperand(1).getReg();
412   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
413                      I.getOpcode() == AMDGPU::G_UADDE;
414   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
415                           I.getOpcode() == AMDGPU::G_USUBE;
416 
417   if (isVCC(Dst1Reg, *MRI)) {
418     unsigned NoCarryOpc =
419         IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
420     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
421     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
422     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
423     I.addOperand(*MF, MachineOperand::CreateImm(0));
424     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
425   }
426 
427   Register Src0Reg = I.getOperand(2).getReg();
428   Register Src1Reg = I.getOperand(3).getReg();
429 
430   if (HasCarryIn) {
431     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
432       .addReg(I.getOperand(4).getReg());
433   }
434 
435   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
436   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
437 
438   BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
439     .add(I.getOperand(2))
440     .add(I.getOperand(3));
441   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
442     .addReg(AMDGPU::SCC);
443 
444   if (!MRI->getRegClassOrNull(Dst1Reg))
445     MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
446 
447   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
448       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
449       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
450     return false;
451 
452   if (HasCarryIn &&
453       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
454                                     AMDGPU::SReg_32RegClass, *MRI))
455     return false;
456 
457   I.eraseFromParent();
458   return true;
459 }
460 
461 bool AMDGPUInstructionSelector::selectG_AMDGPU_MAD_64_32(
462     MachineInstr &I) const {
463   MachineBasicBlock *BB = I.getParent();
464   MachineFunction *MF = BB->getParent();
465   const bool IsUnsigned = I.getOpcode() == AMDGPU::G_AMDGPU_MAD_U64_U32;
466 
467   unsigned Opc;
468   if (Subtarget->getGeneration() == AMDGPUSubtarget::GFX11)
469     Opc = IsUnsigned ? AMDGPU::V_MAD_U64_U32_gfx11_e64
470                      : AMDGPU::V_MAD_I64_I32_gfx11_e64;
471   else
472     Opc = IsUnsigned ? AMDGPU::V_MAD_U64_U32_e64 : AMDGPU::V_MAD_I64_I32_e64;
473   I.setDesc(TII.get(Opc));
474   I.addOperand(*MF, MachineOperand::CreateImm(0));
475   I.addImplicitDefUseOperands(*MF);
476   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
477 }
478 
479 // TODO: We should probably legalize these to only using 32-bit results.
480 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
481   MachineBasicBlock *BB = I.getParent();
482   Register DstReg = I.getOperand(0).getReg();
483   Register SrcReg = I.getOperand(1).getReg();
484   LLT DstTy = MRI->getType(DstReg);
485   LLT SrcTy = MRI->getType(SrcReg);
486   const unsigned SrcSize = SrcTy.getSizeInBits();
487   unsigned DstSize = DstTy.getSizeInBits();
488 
489   // TODO: Should handle any multiple of 32 offset.
490   unsigned Offset = I.getOperand(2).getImm();
491   if (Offset % 32 != 0 || DstSize > 128)
492     return false;
493 
494   // 16-bit operations really use 32-bit registers.
495   // FIXME: Probably should not allow 16-bit G_EXTRACT results.
496   if (DstSize == 16)
497     DstSize = 32;
498 
499   const TargetRegisterClass *DstRC =
500     TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
501   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
502     return false;
503 
504   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
505   const TargetRegisterClass *SrcRC =
506       TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
507   if (!SrcRC)
508     return false;
509   unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
510                                                          DstSize / 32);
511   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
512   if (!SrcRC)
513     return false;
514 
515   SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
516                                     *SrcRC, I.getOperand(1));
517   const DebugLoc &DL = I.getDebugLoc();
518   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
519     .addReg(SrcReg, 0, SubReg);
520 
521   I.eraseFromParent();
522   return true;
523 }
524 
525 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
526   MachineBasicBlock *BB = MI.getParent();
527   Register DstReg = MI.getOperand(0).getReg();
528   LLT DstTy = MRI->getType(DstReg);
529   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
530 
531   const unsigned SrcSize = SrcTy.getSizeInBits();
532   if (SrcSize < 32)
533     return selectImpl(MI, *CoverageInfo);
534 
535   const DebugLoc &DL = MI.getDebugLoc();
536   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
537   const unsigned DstSize = DstTy.getSizeInBits();
538   const TargetRegisterClass *DstRC =
539       TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
540   if (!DstRC)
541     return false;
542 
543   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
544   MachineInstrBuilder MIB =
545     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
546   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
547     MachineOperand &Src = MI.getOperand(I + 1);
548     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
549     MIB.addImm(SubRegs[I]);
550 
551     const TargetRegisterClass *SrcRC
552       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
553     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
554       return false;
555   }
556 
557   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
558     return false;
559 
560   MI.eraseFromParent();
561   return true;
562 }
563 
564 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
565   MachineBasicBlock *BB = MI.getParent();
566   const int NumDst = MI.getNumOperands() - 1;
567 
568   MachineOperand &Src = MI.getOperand(NumDst);
569 
570   Register SrcReg = Src.getReg();
571   Register DstReg0 = MI.getOperand(0).getReg();
572   LLT DstTy = MRI->getType(DstReg0);
573   LLT SrcTy = MRI->getType(SrcReg);
574 
575   const unsigned DstSize = DstTy.getSizeInBits();
576   const unsigned SrcSize = SrcTy.getSizeInBits();
577   const DebugLoc &DL = MI.getDebugLoc();
578   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
579 
580   const TargetRegisterClass *SrcRC =
581       TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
582   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
583     return false;
584 
585   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
586   // source, and this relies on the fact that the same subregister indices are
587   // used for both.
588   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
589   for (int I = 0, E = NumDst; I != E; ++I) {
590     MachineOperand &Dst = MI.getOperand(I);
591     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
592       .addReg(SrcReg, 0, SubRegs[I]);
593 
594     // Make sure the subregister index is valid for the source register.
595     SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]);
596     if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
597       return false;
598 
599     const TargetRegisterClass *DstRC =
600       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
601     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
602       return false;
603   }
604 
605   MI.eraseFromParent();
606   return true;
607 }
608 
609 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
610   MachineInstr &MI) const {
611   if (selectImpl(MI, *CoverageInfo))
612     return true;
613 
614   const LLT S32 = LLT::scalar(32);
615   const LLT V2S16 = LLT::fixed_vector(2, 16);
616 
617   Register Dst = MI.getOperand(0).getReg();
618   if (MRI->getType(Dst) != V2S16)
619     return false;
620 
621   const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
622   if (DstBank->getID() != AMDGPU::SGPRRegBankID)
623     return false;
624 
625   Register Src0 = MI.getOperand(1).getReg();
626   Register Src1 = MI.getOperand(2).getReg();
627   if (MRI->getType(Src0) != S32)
628     return false;
629 
630   const DebugLoc &DL = MI.getDebugLoc();
631   MachineBasicBlock *BB = MI.getParent();
632 
633   auto ConstSrc1 = getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true);
634   if (ConstSrc1) {
635     auto ConstSrc0 =
636         getAnyConstantVRegValWithLookThrough(Src0, *MRI, true, true);
637     if (ConstSrc0) {
638       const int64_t K0 = ConstSrc0->Value.getSExtValue();
639       const int64_t K1 = ConstSrc1->Value.getSExtValue();
640       uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff;
641       uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff;
642 
643       BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst)
644         .addImm(Lo16 | (Hi16 << 16));
645       MI.eraseFromParent();
646       return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
647     }
648   }
649 
650   // TODO: This should probably be a combine somewhere
651   // (build_vector_trunc $src0, undef -> copy $src0
652   MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
653   if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
654     MI.setDesc(TII.get(AMDGPU::COPY));
655     MI.removeOperand(2);
656     return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) &&
657            RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI);
658   }
659 
660   Register ShiftSrc0;
661   Register ShiftSrc1;
662 
663   // With multiple uses of the shift, this will duplicate the shift and
664   // increase register pressure.
665   //
666   // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
667   //  => (S_PACK_HH_B32_B16 $src0, $src1)
668   // (build_vector_trunc (lshr_oneuse SReg_32:$src0, 16), $src1)
669   //  => (S_PACK_HL_B32_B16 $src0, $src1)
670   // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16))
671   //  => (S_PACK_LH_B32_B16 $src0, $src1)
672   // (build_vector_trunc $src0, $src1)
673   //  => (S_PACK_LL_B32_B16 $src0, $src1)
674 
675   bool Shift0 = mi_match(
676       Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16))));
677 
678   bool Shift1 = mi_match(
679       Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16))));
680 
681   unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
682   if (Shift0 && Shift1) {
683     Opc = AMDGPU::S_PACK_HH_B32_B16;
684     MI.getOperand(1).setReg(ShiftSrc0);
685     MI.getOperand(2).setReg(ShiftSrc1);
686   } else if (Shift1) {
687     Opc = AMDGPU::S_PACK_LH_B32_B16;
688     MI.getOperand(2).setReg(ShiftSrc1);
689   } else if (Shift0) {
690     if (ConstSrc1 && ConstSrc1->Value == 0) {
691       // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
692       auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
693                      .addReg(ShiftSrc0)
694                      .addImm(16);
695 
696       MI.eraseFromParent();
697       return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
698     }
699     if (STI.hasSPackHL()) {
700       Opc = AMDGPU::S_PACK_HL_B32_B16;
701       MI.getOperand(1).setReg(ShiftSrc0);
702     }
703   }
704 
705   MI.setDesc(TII.get(Opc));
706   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
707 }
708 
709 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
710   return selectG_ADD_SUB(I);
711 }
712 
713 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
714   const MachineOperand &MO = I.getOperand(0);
715 
716   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
717   // regbank check here is to know why getConstrainedRegClassForOperand failed.
718   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
719   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
720       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
721     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
722     return true;
723   }
724 
725   return false;
726 }
727 
728 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
729   MachineBasicBlock *BB = I.getParent();
730 
731   Register DstReg = I.getOperand(0).getReg();
732   Register Src0Reg = I.getOperand(1).getReg();
733   Register Src1Reg = I.getOperand(2).getReg();
734   LLT Src1Ty = MRI->getType(Src1Reg);
735 
736   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
737   unsigned InsSize = Src1Ty.getSizeInBits();
738 
739   int64_t Offset = I.getOperand(3).getImm();
740 
741   // FIXME: These cases should have been illegal and unnecessary to check here.
742   if (Offset % 32 != 0 || InsSize % 32 != 0)
743     return false;
744 
745   // Currently not handled by getSubRegFromChannel.
746   if (InsSize > 128)
747     return false;
748 
749   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
750   if (SubReg == AMDGPU::NoSubRegister)
751     return false;
752 
753   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
754   const TargetRegisterClass *DstRC =
755       TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
756   if (!DstRC)
757     return false;
758 
759   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
760   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
761   const TargetRegisterClass *Src0RC =
762       TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank);
763   const TargetRegisterClass *Src1RC =
764       TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank);
765 
766   // Deal with weird cases where the class only partially supports the subreg
767   // index.
768   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
769   if (!Src0RC || !Src1RC)
770     return false;
771 
772   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
773       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
774       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
775     return false;
776 
777   const DebugLoc &DL = I.getDebugLoc();
778   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
779     .addReg(Src0Reg)
780     .addReg(Src1Reg)
781     .addImm(SubReg);
782 
783   I.eraseFromParent();
784   return true;
785 }
786 
787 bool AMDGPUInstructionSelector::selectG_SBFX_UBFX(MachineInstr &MI) const {
788   Register DstReg = MI.getOperand(0).getReg();
789   Register SrcReg = MI.getOperand(1).getReg();
790   Register OffsetReg = MI.getOperand(2).getReg();
791   Register WidthReg = MI.getOperand(3).getReg();
792 
793   assert(RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID &&
794          "scalar BFX instructions are expanded in regbankselect");
795   assert(MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 &&
796          "64-bit vector BFX instructions are expanded in regbankselect");
797 
798   const DebugLoc &DL = MI.getDebugLoc();
799   MachineBasicBlock *MBB = MI.getParent();
800 
801   bool IsSigned = MI.getOpcode() == TargetOpcode::G_SBFX;
802   unsigned Opc = IsSigned ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
803   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), DstReg)
804                  .addReg(SrcReg)
805                  .addReg(OffsetReg)
806                  .addReg(WidthReg);
807   MI.eraseFromParent();
808   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
809 }
810 
811 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
812   if (STI.getLDSBankCount() != 16)
813     return selectImpl(MI, *CoverageInfo);
814 
815   Register Dst = MI.getOperand(0).getReg();
816   Register Src0 = MI.getOperand(2).getReg();
817   Register M0Val = MI.getOperand(6).getReg();
818   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
819       !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
820       !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
821     return false;
822 
823   // This requires 2 instructions. It is possible to write a pattern to support
824   // this, but the generated isel emitter doesn't correctly deal with multiple
825   // output instructions using the same physical register input. The copy to m0
826   // is incorrectly placed before the second instruction.
827   //
828   // TODO: Match source modifiers.
829 
830   Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
831   const DebugLoc &DL = MI.getDebugLoc();
832   MachineBasicBlock *MBB = MI.getParent();
833 
834   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
835     .addReg(M0Val);
836   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
837     .addImm(2)
838     .addImm(MI.getOperand(4).getImm())  // $attr
839     .addImm(MI.getOperand(3).getImm()); // $attrchan
840 
841   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
842     .addImm(0)                          // $src0_modifiers
843     .addReg(Src0)                       // $src0
844     .addImm(MI.getOperand(4).getImm())  // $attr
845     .addImm(MI.getOperand(3).getImm())  // $attrchan
846     .addImm(0)                          // $src2_modifiers
847     .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
848     .addImm(MI.getOperand(5).getImm())  // $high
849     .addImm(0)                          // $clamp
850     .addImm(0);                         // $omod
851 
852   MI.eraseFromParent();
853   return true;
854 }
855 
856 // Writelane is special in that it can use SGPR and M0 (which would normally
857 // count as using the constant bus twice - but in this case it is allowed since
858 // the lane selector doesn't count as a use of the constant bus). However, it is
859 // still required to abide by the 1 SGPR rule. Fix this up if we might have
860 // multiple SGPRs.
861 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
862   // With a constant bus limit of at least 2, there's no issue.
863   if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1)
864     return selectImpl(MI, *CoverageInfo);
865 
866   MachineBasicBlock *MBB = MI.getParent();
867   const DebugLoc &DL = MI.getDebugLoc();
868   Register VDst = MI.getOperand(0).getReg();
869   Register Val = MI.getOperand(2).getReg();
870   Register LaneSelect = MI.getOperand(3).getReg();
871   Register VDstIn = MI.getOperand(4).getReg();
872 
873   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
874 
875   Optional<ValueAndVReg> ConstSelect =
876       getIConstantVRegValWithLookThrough(LaneSelect, *MRI);
877   if (ConstSelect) {
878     // The selector has to be an inline immediate, so we can use whatever for
879     // the other operands.
880     MIB.addReg(Val);
881     MIB.addImm(ConstSelect->Value.getSExtValue() &
882                maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
883   } else {
884     Optional<ValueAndVReg> ConstVal =
885         getIConstantVRegValWithLookThrough(Val, *MRI);
886 
887     // If the value written is an inline immediate, we can get away without a
888     // copy to m0.
889     if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(),
890                                                  STI.hasInv2PiInlineImm())) {
891       MIB.addImm(ConstVal->Value.getSExtValue());
892       MIB.addReg(LaneSelect);
893     } else {
894       MIB.addReg(Val);
895 
896       // If the lane selector was originally in a VGPR and copied with
897       // readfirstlane, there's a hazard to read the same SGPR from the
898       // VALU. Constrain to a different SGPR to help avoid needing a nop later.
899       RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI);
900 
901       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
902         .addReg(LaneSelect);
903       MIB.addReg(AMDGPU::M0);
904     }
905   }
906 
907   MIB.addReg(VDstIn);
908 
909   MI.eraseFromParent();
910   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
911 }
912 
913 // We need to handle this here because tablegen doesn't support matching
914 // instructions with multiple outputs.
915 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
916   Register Dst0 = MI.getOperand(0).getReg();
917   Register Dst1 = MI.getOperand(1).getReg();
918 
919   LLT Ty = MRI->getType(Dst0);
920   unsigned Opc;
921   if (Ty == LLT::scalar(32))
922     Opc = AMDGPU::V_DIV_SCALE_F32_e64;
923   else if (Ty == LLT::scalar(64))
924     Opc = AMDGPU::V_DIV_SCALE_F64_e64;
925   else
926     return false;
927 
928   // TODO: Match source modifiers.
929 
930   const DebugLoc &DL = MI.getDebugLoc();
931   MachineBasicBlock *MBB = MI.getParent();
932 
933   Register Numer = MI.getOperand(3).getReg();
934   Register Denom = MI.getOperand(4).getReg();
935   unsigned ChooseDenom = MI.getOperand(5).getImm();
936 
937   Register Src0 = ChooseDenom != 0 ? Numer : Denom;
938 
939   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
940     .addDef(Dst1)
941     .addImm(0)     // $src0_modifiers
942     .addUse(Src0)  // $src0
943     .addImm(0)     // $src1_modifiers
944     .addUse(Denom) // $src1
945     .addImm(0)     // $src2_modifiers
946     .addUse(Numer) // $src2
947     .addImm(0)     // $clamp
948     .addImm(0);    // $omod
949 
950   MI.eraseFromParent();
951   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
952 }
953 
954 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
955   unsigned IntrinsicID = I.getIntrinsicID();
956   switch (IntrinsicID) {
957   case Intrinsic::amdgcn_if_break: {
958     MachineBasicBlock *BB = I.getParent();
959 
960     // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
961     // SelectionDAG uses for wave32 vs wave64.
962     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
963       .add(I.getOperand(0))
964       .add(I.getOperand(2))
965       .add(I.getOperand(3));
966 
967     Register DstReg = I.getOperand(0).getReg();
968     Register Src0Reg = I.getOperand(2).getReg();
969     Register Src1Reg = I.getOperand(3).getReg();
970 
971     I.eraseFromParent();
972 
973     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
974       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
975 
976     return true;
977   }
978   case Intrinsic::amdgcn_interp_p1_f16:
979     return selectInterpP1F16(I);
980   case Intrinsic::amdgcn_wqm:
981     return constrainCopyLikeIntrin(I, AMDGPU::WQM);
982   case Intrinsic::amdgcn_softwqm:
983     return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
984   case Intrinsic::amdgcn_strict_wwm:
985   case Intrinsic::amdgcn_wwm:
986     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WWM);
987   case Intrinsic::amdgcn_strict_wqm:
988     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WQM);
989   case Intrinsic::amdgcn_writelane:
990     return selectWritelane(I);
991   case Intrinsic::amdgcn_div_scale:
992     return selectDivScale(I);
993   case Intrinsic::amdgcn_icmp:
994     return selectIntrinsicIcmp(I);
995   case Intrinsic::amdgcn_ballot:
996     return selectBallot(I);
997   case Intrinsic::amdgcn_reloc_constant:
998     return selectRelocConstant(I);
999   case Intrinsic::amdgcn_groupstaticsize:
1000     return selectGroupStaticSize(I);
1001   case Intrinsic::returnaddress:
1002     return selectReturnAddress(I);
1003   case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
1004   case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
1005   case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
1006   case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
1007   case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
1008   case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
1009     return selectSMFMACIntrin(I);
1010   default:
1011     return selectImpl(I, *CoverageInfo);
1012   }
1013 }
1014 
1015 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
1016   if (Size != 32 && Size != 64)
1017     return -1;
1018   switch (P) {
1019   default:
1020     llvm_unreachable("Unknown condition code!");
1021   case CmpInst::ICMP_NE:
1022     return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
1023   case CmpInst::ICMP_EQ:
1024     return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
1025   case CmpInst::ICMP_SGT:
1026     return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
1027   case CmpInst::ICMP_SGE:
1028     return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
1029   case CmpInst::ICMP_SLT:
1030     return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
1031   case CmpInst::ICMP_SLE:
1032     return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
1033   case CmpInst::ICMP_UGT:
1034     return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
1035   case CmpInst::ICMP_UGE:
1036     return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
1037   case CmpInst::ICMP_ULT:
1038     return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
1039   case CmpInst::ICMP_ULE:
1040     return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
1041   }
1042 }
1043 
1044 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
1045                                               unsigned Size) const {
1046   if (Size == 64) {
1047     if (!STI.hasScalarCompareEq64())
1048       return -1;
1049 
1050     switch (P) {
1051     case CmpInst::ICMP_NE:
1052       return AMDGPU::S_CMP_LG_U64;
1053     case CmpInst::ICMP_EQ:
1054       return AMDGPU::S_CMP_EQ_U64;
1055     default:
1056       return -1;
1057     }
1058   }
1059 
1060   if (Size != 32)
1061     return -1;
1062 
1063   switch (P) {
1064   case CmpInst::ICMP_NE:
1065     return AMDGPU::S_CMP_LG_U32;
1066   case CmpInst::ICMP_EQ:
1067     return AMDGPU::S_CMP_EQ_U32;
1068   case CmpInst::ICMP_SGT:
1069     return AMDGPU::S_CMP_GT_I32;
1070   case CmpInst::ICMP_SGE:
1071     return AMDGPU::S_CMP_GE_I32;
1072   case CmpInst::ICMP_SLT:
1073     return AMDGPU::S_CMP_LT_I32;
1074   case CmpInst::ICMP_SLE:
1075     return AMDGPU::S_CMP_LE_I32;
1076   case CmpInst::ICMP_UGT:
1077     return AMDGPU::S_CMP_GT_U32;
1078   case CmpInst::ICMP_UGE:
1079     return AMDGPU::S_CMP_GE_U32;
1080   case CmpInst::ICMP_ULT:
1081     return AMDGPU::S_CMP_LT_U32;
1082   case CmpInst::ICMP_ULE:
1083     return AMDGPU::S_CMP_LE_U32;
1084   default:
1085     llvm_unreachable("Unknown condition code!");
1086   }
1087 }
1088 
1089 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
1090   MachineBasicBlock *BB = I.getParent();
1091   const DebugLoc &DL = I.getDebugLoc();
1092 
1093   Register SrcReg = I.getOperand(2).getReg();
1094   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1095 
1096   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
1097 
1098   Register CCReg = I.getOperand(0).getReg();
1099   if (!isVCC(CCReg, *MRI)) {
1100     int Opcode = getS_CMPOpcode(Pred, Size);
1101     if (Opcode == -1)
1102       return false;
1103     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
1104             .add(I.getOperand(2))
1105             .add(I.getOperand(3));
1106     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
1107       .addReg(AMDGPU::SCC);
1108     bool Ret =
1109         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
1110         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
1111     I.eraseFromParent();
1112     return Ret;
1113   }
1114 
1115   int Opcode = getV_CMPOpcode(Pred, Size);
1116   if (Opcode == -1)
1117     return false;
1118 
1119   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1120             I.getOperand(0).getReg())
1121             .add(I.getOperand(2))
1122             .add(I.getOperand(3));
1123   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1124                                *TRI.getBoolRC(), *MRI);
1125   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1126   I.eraseFromParent();
1127   return Ret;
1128 }
1129 
1130 bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const {
1131   Register Dst = I.getOperand(0).getReg();
1132   if (isVCC(Dst, *MRI))
1133     return false;
1134 
1135   if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize())
1136     return false;
1137 
1138   MachineBasicBlock *BB = I.getParent();
1139   const DebugLoc &DL = I.getDebugLoc();
1140   Register SrcReg = I.getOperand(2).getReg();
1141   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1142 
1143   auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
1144   if (!ICmpInst::isIntPredicate(static_cast<ICmpInst::Predicate>(Pred))) {
1145     MachineInstr *ICmp =
1146         BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Dst);
1147 
1148     if (!RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1149                                       *TRI.getBoolRC(), *MRI))
1150       return false;
1151     I.eraseFromParent();
1152     return true;
1153   }
1154 
1155   int Opcode = getV_CMPOpcode(Pred, Size);
1156   if (Opcode == -1)
1157     return false;
1158 
1159   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst)
1160                            .add(I.getOperand(2))
1161                            .add(I.getOperand(3));
1162   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(),
1163                                *MRI);
1164   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1165   I.eraseFromParent();
1166   return Ret;
1167 }
1168 
1169 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
1170   MachineBasicBlock *BB = I.getParent();
1171   const DebugLoc &DL = I.getDebugLoc();
1172   Register DstReg = I.getOperand(0).getReg();
1173   const unsigned Size = MRI->getType(DstReg).getSizeInBits();
1174   const bool Is64 = Size == 64;
1175 
1176   if (Size != STI.getWavefrontSize())
1177     return false;
1178 
1179   Optional<ValueAndVReg> Arg =
1180       getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI);
1181 
1182   if (Arg) {
1183     const int64_t Value = Arg.getValue().Value.getSExtValue();
1184     if (Value == 0) {
1185       unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
1186       BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
1187     } else if (Value == -1) { // all ones
1188       Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
1189       BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1190     } else
1191       return false;
1192   } else {
1193     Register SrcReg = I.getOperand(2).getReg();
1194     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1195   }
1196 
1197   I.eraseFromParent();
1198   return true;
1199 }
1200 
1201 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {
1202   Register DstReg = I.getOperand(0).getReg();
1203   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1204   const TargetRegisterClass *DstRC = TRI.getRegClassForSizeOnBank(32, *DstBank);
1205   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1206     return false;
1207 
1208   const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID;
1209 
1210   Module *M = MF->getFunction().getParent();
1211   const MDNode *Metadata = I.getOperand(2).getMetadata();
1212   auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
1213   auto RelocSymbol = cast<GlobalVariable>(
1214     M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
1215 
1216   MachineBasicBlock *BB = I.getParent();
1217   BuildMI(*BB, &I, I.getDebugLoc(),
1218           TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg)
1219     .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO);
1220 
1221   I.eraseFromParent();
1222   return true;
1223 }
1224 
1225 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
1226   Triple::OSType OS = MF->getTarget().getTargetTriple().getOS();
1227 
1228   Register DstReg = I.getOperand(0).getReg();
1229   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1230   unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ?
1231     AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1232 
1233   MachineBasicBlock *MBB = I.getParent();
1234   const DebugLoc &DL = I.getDebugLoc();
1235 
1236   auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg);
1237 
1238   if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) {
1239     const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1240     MIB.addImm(MFI->getLDSSize());
1241   } else {
1242     Module *M = MF->getFunction().getParent();
1243     const GlobalValue *GV
1244       = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
1245     MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO);
1246   }
1247 
1248   I.eraseFromParent();
1249   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1250 }
1251 
1252 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
1253   MachineBasicBlock *MBB = I.getParent();
1254   MachineFunction &MF = *MBB->getParent();
1255   const DebugLoc &DL = I.getDebugLoc();
1256 
1257   MachineOperand &Dst = I.getOperand(0);
1258   Register DstReg = Dst.getReg();
1259   unsigned Depth = I.getOperand(2).getImm();
1260 
1261   const TargetRegisterClass *RC
1262     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1263   if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) ||
1264       !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
1265     return false;
1266 
1267   // Check for kernel and shader functions
1268   if (Depth != 0 ||
1269       MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) {
1270     BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1271       .addImm(0);
1272     I.eraseFromParent();
1273     return true;
1274   }
1275 
1276   MachineFrameInfo &MFI = MF.getFrameInfo();
1277   // There is a call to @llvm.returnaddress in this function
1278   MFI.setReturnAddressIsTaken(true);
1279 
1280   // Get the return address reg and mark it as an implicit live-in
1281   Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
1282   Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
1283                                              AMDGPU::SReg_64RegClass, DL);
1284   BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1285     .addReg(LiveIn);
1286   I.eraseFromParent();
1287   return true;
1288 }
1289 
1290 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1291   // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
1292   // SelectionDAG uses for wave32 vs wave64.
1293   MachineBasicBlock *BB = MI.getParent();
1294   BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1295       .add(MI.getOperand(1));
1296 
1297   Register Reg = MI.getOperand(1).getReg();
1298   MI.eraseFromParent();
1299 
1300   if (!MRI->getRegClassOrNull(Reg))
1301     MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1302   return true;
1303 }
1304 
1305 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1306   MachineInstr &MI, Intrinsic::ID IntrID) const {
1307   MachineBasicBlock *MBB = MI.getParent();
1308   MachineFunction *MF = MBB->getParent();
1309   const DebugLoc &DL = MI.getDebugLoc();
1310 
1311   unsigned IndexOperand = MI.getOperand(7).getImm();
1312   bool WaveRelease = MI.getOperand(8).getImm() != 0;
1313   bool WaveDone = MI.getOperand(9).getImm() != 0;
1314 
1315   if (WaveDone && !WaveRelease)
1316     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1317 
1318   unsigned OrderedCountIndex = IndexOperand & 0x3f;
1319   IndexOperand &= ~0x3f;
1320   unsigned CountDw = 0;
1321 
1322   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1323     CountDw = (IndexOperand >> 24) & 0xf;
1324     IndexOperand &= ~(0xf << 24);
1325 
1326     if (CountDw < 1 || CountDw > 4) {
1327       report_fatal_error(
1328         "ds_ordered_count: dword count must be between 1 and 4");
1329     }
1330   }
1331 
1332   if (IndexOperand)
1333     report_fatal_error("ds_ordered_count: bad index operand");
1334 
1335   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1336   unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF);
1337 
1338   unsigned Offset0 = OrderedCountIndex << 2;
1339   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (Instruction << 4);
1340 
1341   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1342     Offset1 |= (CountDw - 1) << 6;
1343 
1344   if (STI.getGeneration() < AMDGPUSubtarget::GFX11)
1345     Offset1 |= ShaderType << 2;
1346 
1347   unsigned Offset = Offset0 | (Offset1 << 8);
1348 
1349   Register M0Val = MI.getOperand(2).getReg();
1350   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1351     .addReg(M0Val);
1352 
1353   Register DstReg = MI.getOperand(0).getReg();
1354   Register ValReg = MI.getOperand(3).getReg();
1355   MachineInstrBuilder DS =
1356     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1357       .addReg(ValReg)
1358       .addImm(Offset)
1359       .cloneMemRefs(MI);
1360 
1361   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1362     return false;
1363 
1364   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1365   MI.eraseFromParent();
1366   return Ret;
1367 }
1368 
1369 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1370   switch (IntrID) {
1371   case Intrinsic::amdgcn_ds_gws_init:
1372     return AMDGPU::DS_GWS_INIT;
1373   case Intrinsic::amdgcn_ds_gws_barrier:
1374     return AMDGPU::DS_GWS_BARRIER;
1375   case Intrinsic::amdgcn_ds_gws_sema_v:
1376     return AMDGPU::DS_GWS_SEMA_V;
1377   case Intrinsic::amdgcn_ds_gws_sema_br:
1378     return AMDGPU::DS_GWS_SEMA_BR;
1379   case Intrinsic::amdgcn_ds_gws_sema_p:
1380     return AMDGPU::DS_GWS_SEMA_P;
1381   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1382     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1383   default:
1384     llvm_unreachable("not a gws intrinsic");
1385   }
1386 }
1387 
1388 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1389                                                      Intrinsic::ID IID) const {
1390   if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1391       !STI.hasGWSSemaReleaseAll())
1392     return false;
1393 
1394   // intrinsic ID, vsrc, offset
1395   const bool HasVSrc = MI.getNumOperands() == 3;
1396   assert(HasVSrc || MI.getNumOperands() == 2);
1397 
1398   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1399   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1400   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1401     return false;
1402 
1403   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1404   assert(OffsetDef);
1405 
1406   unsigned ImmOffset;
1407 
1408   MachineBasicBlock *MBB = MI.getParent();
1409   const DebugLoc &DL = MI.getDebugLoc();
1410 
1411   MachineInstr *Readfirstlane = nullptr;
1412 
1413   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1414   // incoming offset, in case there's an add of a constant. We'll have to put it
1415   // back later.
1416   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1417     Readfirstlane = OffsetDef;
1418     BaseOffset = OffsetDef->getOperand(1).getReg();
1419     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1420   }
1421 
1422   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1423     // If we have a constant offset, try to use the 0 in m0 as the base.
1424     // TODO: Look into changing the default m0 initialization value. If the
1425     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1426     // the immediate offset.
1427 
1428     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1429     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1430       .addImm(0);
1431   } else {
1432     std::tie(BaseOffset, ImmOffset) =
1433         AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1434 
1435     if (Readfirstlane) {
1436       // We have the constant offset now, so put the readfirstlane back on the
1437       // variable component.
1438       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1439         return false;
1440 
1441       Readfirstlane->getOperand(1).setReg(BaseOffset);
1442       BaseOffset = Readfirstlane->getOperand(0).getReg();
1443     } else {
1444       if (!RBI.constrainGenericRegister(BaseOffset,
1445                                         AMDGPU::SReg_32RegClass, *MRI))
1446         return false;
1447     }
1448 
1449     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1450     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1451       .addReg(BaseOffset)
1452       .addImm(16);
1453 
1454     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1455       .addReg(M0Base);
1456   }
1457 
1458   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1459   // offset field) % 64. Some versions of the programming guide omit the m0
1460   // part, or claim it's from offset 0.
1461   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1462 
1463   if (HasVSrc) {
1464     Register VSrc = MI.getOperand(1).getReg();
1465     MIB.addReg(VSrc);
1466 
1467     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1468       return false;
1469   }
1470 
1471   MIB.addImm(ImmOffset)
1472      .cloneMemRefs(MI);
1473 
1474   TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::data0);
1475 
1476   MI.eraseFromParent();
1477   return true;
1478 }
1479 
1480 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1481                                                       bool IsAppend) const {
1482   Register PtrBase = MI.getOperand(2).getReg();
1483   LLT PtrTy = MRI->getType(PtrBase);
1484   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1485 
1486   unsigned Offset;
1487   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1488 
1489   // TODO: Should this try to look through readfirstlane like GWS?
1490   if (!isDSOffsetLegal(PtrBase, Offset)) {
1491     PtrBase = MI.getOperand(2).getReg();
1492     Offset = 0;
1493   }
1494 
1495   MachineBasicBlock *MBB = MI.getParent();
1496   const DebugLoc &DL = MI.getDebugLoc();
1497   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1498 
1499   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1500     .addReg(PtrBase);
1501   if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI))
1502     return false;
1503 
1504   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1505     .addImm(Offset)
1506     .addImm(IsGDS ? -1 : 0)
1507     .cloneMemRefs(MI);
1508   MI.eraseFromParent();
1509   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1510 }
1511 
1512 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {
1513   if (TM.getOptLevel() > CodeGenOpt::None) {
1514     unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second;
1515     if (WGSize <= STI.getWavefrontSize()) {
1516       MachineBasicBlock *MBB = MI.getParent();
1517       const DebugLoc &DL = MI.getDebugLoc();
1518       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER));
1519       MI.eraseFromParent();
1520       return true;
1521     }
1522   }
1523   return selectImpl(MI, *CoverageInfo);
1524 }
1525 
1526 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1527                          bool &IsTexFail) {
1528   if (TexFailCtrl)
1529     IsTexFail = true;
1530 
1531   TFE = (TexFailCtrl & 0x1) ? true : false;
1532   TexFailCtrl &= ~(uint64_t)0x1;
1533   LWE = (TexFailCtrl & 0x2) ? true : false;
1534   TexFailCtrl &= ~(uint64_t)0x2;
1535 
1536   return TexFailCtrl == 0;
1537 }
1538 
1539 bool AMDGPUInstructionSelector::selectImageIntrinsic(
1540   MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1541   MachineBasicBlock *MBB = MI.getParent();
1542   const DebugLoc &DL = MI.getDebugLoc();
1543 
1544   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1545     AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1546 
1547   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1548   unsigned IntrOpcode = Intr->BaseOpcode;
1549   const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI);
1550   const bool IsGFX11Plus = AMDGPU::isGFX11Plus(STI);
1551 
1552   const unsigned ArgOffset = MI.getNumExplicitDefs() + 1;
1553 
1554   Register VDataIn, VDataOut;
1555   LLT VDataTy;
1556   int NumVDataDwords = -1;
1557   bool IsD16 = MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16 ||
1558                MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16;
1559 
1560   bool Unorm;
1561   if (!BaseOpcode->Sampler)
1562     Unorm = true;
1563   else
1564     Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0;
1565 
1566   bool TFE;
1567   bool LWE;
1568   bool IsTexFail = false;
1569   if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(),
1570                     TFE, LWE, IsTexFail))
1571     return false;
1572 
1573   const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm();
1574   const bool IsA16 = (Flags & 1) != 0;
1575   const bool IsG16 = (Flags & 2) != 0;
1576 
1577   // A16 implies 16 bit gradients if subtarget doesn't support G16
1578   if (IsA16 && !STI.hasG16() && !IsG16)
1579     return false;
1580 
1581   unsigned DMask = 0;
1582   unsigned DMaskLanes = 0;
1583 
1584   if (BaseOpcode->Atomic) {
1585     VDataOut = MI.getOperand(0).getReg();
1586     VDataIn = MI.getOperand(2).getReg();
1587     LLT Ty = MRI->getType(VDataIn);
1588 
1589     // Be careful to allow atomic swap on 16-bit element vectors.
1590     const bool Is64Bit = BaseOpcode->AtomicX2 ?
1591       Ty.getSizeInBits() == 128 :
1592       Ty.getSizeInBits() == 64;
1593 
1594     if (BaseOpcode->AtomicX2) {
1595       assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
1596 
1597       DMask = Is64Bit ? 0xf : 0x3;
1598       NumVDataDwords = Is64Bit ? 4 : 2;
1599     } else {
1600       DMask = Is64Bit ? 0x3 : 0x1;
1601       NumVDataDwords = Is64Bit ? 2 : 1;
1602     }
1603   } else {
1604     DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm();
1605     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
1606 
1607     if (BaseOpcode->Store) {
1608       VDataIn = MI.getOperand(1).getReg();
1609       VDataTy = MRI->getType(VDataIn);
1610       NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1611     } else {
1612       VDataOut = MI.getOperand(0).getReg();
1613       VDataTy = MRI->getType(VDataOut);
1614       NumVDataDwords = DMaskLanes;
1615 
1616       if (IsD16 && !STI.hasUnpackedD16VMem())
1617         NumVDataDwords = (DMaskLanes + 1) / 2;
1618     }
1619   }
1620 
1621   // Set G16 opcode
1622   if (IsG16 && !IsA16) {
1623     const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
1624         AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
1625     assert(G16MappingInfo);
1626     IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
1627   }
1628 
1629   // TODO: Check this in verifier.
1630   assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
1631 
1632   unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm();
1633   if (BaseOpcode->Atomic)
1634     CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization
1635   if (CPol & ~AMDGPU::CPol::ALL)
1636     return false;
1637 
1638   int NumVAddrRegs = 0;
1639   int NumVAddrDwords = 0;
1640   for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) {
1641     // Skip the $noregs and 0s inserted during legalization.
1642     MachineOperand &AddrOp = MI.getOperand(ArgOffset + I);
1643     if (!AddrOp.isReg())
1644       continue; // XXX - Break?
1645 
1646     Register Addr = AddrOp.getReg();
1647     if (!Addr)
1648       break;
1649 
1650     ++NumVAddrRegs;
1651     NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1652   }
1653 
1654   // The legalizer preprocessed the intrinsic arguments. If we aren't using
1655   // NSA, these should have been packed into a single value in the first
1656   // address register
1657   const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
1658   if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1659     LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
1660     return false;
1661   }
1662 
1663   if (IsTexFail)
1664     ++NumVDataDwords;
1665 
1666   int Opcode = -1;
1667   if (IsGFX11Plus) {
1668     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1669                                    UseNSA ? AMDGPU::MIMGEncGfx11NSA
1670                                           : AMDGPU::MIMGEncGfx11Default,
1671                                    NumVDataDwords, NumVAddrDwords);
1672   } else if (IsGFX10Plus) {
1673     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1674                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
1675                                           : AMDGPU::MIMGEncGfx10Default,
1676                                    NumVDataDwords, NumVAddrDwords);
1677   } else {
1678     if (Subtarget->hasGFX90AInsts()) {
1679       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx90a,
1680                                      NumVDataDwords, NumVAddrDwords);
1681       if (Opcode == -1) {
1682         LLVM_DEBUG(
1683             dbgs()
1684             << "requested image instruction is not supported on this GPU\n");
1685         return false;
1686       }
1687     }
1688     if (Opcode == -1 &&
1689         STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1690       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1691                                      NumVDataDwords, NumVAddrDwords);
1692     if (Opcode == -1)
1693       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1694                                      NumVDataDwords, NumVAddrDwords);
1695   }
1696   assert(Opcode != -1);
1697 
1698   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1699     .cloneMemRefs(MI);
1700 
1701   if (VDataOut) {
1702     if (BaseOpcode->AtomicX2) {
1703       const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1704 
1705       Register TmpReg = MRI->createVirtualRegister(
1706         Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1707       unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1708 
1709       MIB.addDef(TmpReg);
1710       if (!MRI->use_empty(VDataOut)) {
1711         BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1712             .addReg(TmpReg, RegState::Kill, SubReg);
1713       }
1714 
1715     } else {
1716       MIB.addDef(VDataOut); // vdata output
1717     }
1718   }
1719 
1720   if (VDataIn)
1721     MIB.addReg(VDataIn); // vdata input
1722 
1723   for (int I = 0; I != NumVAddrRegs; ++I) {
1724     MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I);
1725     if (SrcOp.isReg()) {
1726       assert(SrcOp.getReg() != 0);
1727       MIB.addReg(SrcOp.getReg());
1728     }
1729   }
1730 
1731   MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg());
1732   if (BaseOpcode->Sampler)
1733     MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg());
1734 
1735   MIB.addImm(DMask); // dmask
1736 
1737   if (IsGFX10Plus)
1738     MIB.addImm(DimInfo->Encoding);
1739   MIB.addImm(Unorm);
1740 
1741   MIB.addImm(CPol);
1742   MIB.addImm(IsA16 &&  // a16 or r128
1743              STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1744   if (IsGFX10Plus)
1745     MIB.addImm(IsA16 ? -1 : 0);
1746 
1747   if (!Subtarget->hasGFX90AInsts()) {
1748     MIB.addImm(TFE); // tfe
1749   } else if (TFE) {
1750     LLVM_DEBUG(dbgs() << "TFE is not supported on this GPU\n");
1751     return false;
1752   }
1753 
1754   MIB.addImm(LWE); // lwe
1755   if (!IsGFX10Plus)
1756     MIB.addImm(DimInfo->DA ? -1 : 0);
1757   if (BaseOpcode->HasD16)
1758     MIB.addImm(IsD16 ? -1 : 0);
1759 
1760   if (IsTexFail) {
1761     // An image load instruction with TFE/LWE only conditionally writes to its
1762     // result registers. Initialize them to zero so that we always get well
1763     // defined result values.
1764     assert(VDataOut && !VDataIn);
1765     Register Tied = MRI->cloneVirtualRegister(VDataOut);
1766     Register Zero = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1767     BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::V_MOV_B32_e32), Zero)
1768       .addImm(0);
1769     auto Parts = TRI.getRegSplitParts(MRI->getRegClass(Tied), 4);
1770     if (STI.usePRTStrictNull()) {
1771       // With enable-prt-strict-null enabled, initialize all result registers to
1772       // zero.
1773       auto RegSeq =
1774           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1775       for (auto Sub : Parts)
1776         RegSeq.addReg(Zero).addImm(Sub);
1777     } else {
1778       // With enable-prt-strict-null disabled, only initialize the extra TFE/LWE
1779       // result register.
1780       Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1781       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
1782       auto RegSeq =
1783           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1784       for (auto Sub : Parts.drop_back(1))
1785         RegSeq.addReg(Undef).addImm(Sub);
1786       RegSeq.addReg(Zero).addImm(Parts.back());
1787     }
1788     MIB.addReg(Tied, RegState::Implicit);
1789     MIB->tieOperands(0, MIB->getNumOperands() - 1);
1790   }
1791 
1792   MI.eraseFromParent();
1793   constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1794   TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::vaddr);
1795   return true;
1796 }
1797 
1798 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1799     MachineInstr &I) const {
1800   unsigned IntrinsicID = I.getIntrinsicID();
1801   switch (IntrinsicID) {
1802   case Intrinsic::amdgcn_end_cf:
1803     return selectEndCfIntrinsic(I);
1804   case Intrinsic::amdgcn_ds_ordered_add:
1805   case Intrinsic::amdgcn_ds_ordered_swap:
1806     return selectDSOrderedIntrinsic(I, IntrinsicID);
1807   case Intrinsic::amdgcn_ds_gws_init:
1808   case Intrinsic::amdgcn_ds_gws_barrier:
1809   case Intrinsic::amdgcn_ds_gws_sema_v:
1810   case Intrinsic::amdgcn_ds_gws_sema_br:
1811   case Intrinsic::amdgcn_ds_gws_sema_p:
1812   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1813     return selectDSGWSIntrinsic(I, IntrinsicID);
1814   case Intrinsic::amdgcn_ds_append:
1815     return selectDSAppendConsume(I, true);
1816   case Intrinsic::amdgcn_ds_consume:
1817     return selectDSAppendConsume(I, false);
1818   case Intrinsic::amdgcn_s_barrier:
1819     return selectSBarrier(I);
1820   case Intrinsic::amdgcn_global_atomic_fadd:
1821     return selectGlobalAtomicFadd(I, I.getOperand(2), I.getOperand(3));
1822   case Intrinsic::amdgcn_raw_buffer_load_lds:
1823   case Intrinsic::amdgcn_struct_buffer_load_lds:
1824     return selectBufferLoadLds(I);
1825   case Intrinsic::amdgcn_global_load_lds:
1826     return selectGlobalLoadLds(I);
1827   case Intrinsic::amdgcn_exp_compr:
1828     if (!STI.hasCompressedExport()) {
1829       Function &F = I.getMF()->getFunction();
1830       DiagnosticInfoUnsupported NoFpRet(
1831           F, "intrinsic not supported on subtarget", I.getDebugLoc(), DS_Error);
1832       F.getContext().diagnose(NoFpRet);
1833       return false;
1834     }
1835     break;
1836   }
1837   return selectImpl(I, *CoverageInfo);
1838 }
1839 
1840 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1841   if (selectImpl(I, *CoverageInfo))
1842     return true;
1843 
1844   MachineBasicBlock *BB = I.getParent();
1845   const DebugLoc &DL = I.getDebugLoc();
1846 
1847   Register DstReg = I.getOperand(0).getReg();
1848   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1849   assert(Size <= 32 || Size == 64);
1850   const MachineOperand &CCOp = I.getOperand(1);
1851   Register CCReg = CCOp.getReg();
1852   if (!isVCC(CCReg, *MRI)) {
1853     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1854                                          AMDGPU::S_CSELECT_B32;
1855     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1856             .addReg(CCReg);
1857 
1858     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1859     // bank, because it does not cover the register class that we used to represent
1860     // for it.  So we need to manually set the register class here.
1861     if (!MRI->getRegClassOrNull(CCReg))
1862         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1863     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1864             .add(I.getOperand(2))
1865             .add(I.getOperand(3));
1866 
1867     bool Ret = false;
1868     Ret |= constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1869     Ret |= constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1870     I.eraseFromParent();
1871     return Ret;
1872   }
1873 
1874   // Wide VGPR select should have been split in RegBankSelect.
1875   if (Size > 32)
1876     return false;
1877 
1878   MachineInstr *Select =
1879       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1880               .addImm(0)
1881               .add(I.getOperand(3))
1882               .addImm(0)
1883               .add(I.getOperand(2))
1884               .add(I.getOperand(1));
1885 
1886   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1887   I.eraseFromParent();
1888   return Ret;
1889 }
1890 
1891 static int sizeToSubRegIndex(unsigned Size) {
1892   switch (Size) {
1893   case 32:
1894     return AMDGPU::sub0;
1895   case 64:
1896     return AMDGPU::sub0_sub1;
1897   case 96:
1898     return AMDGPU::sub0_sub1_sub2;
1899   case 128:
1900     return AMDGPU::sub0_sub1_sub2_sub3;
1901   case 256:
1902     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1903   default:
1904     if (Size < 32)
1905       return AMDGPU::sub0;
1906     if (Size > 256)
1907       return -1;
1908     return sizeToSubRegIndex(PowerOf2Ceil(Size));
1909   }
1910 }
1911 
1912 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1913   Register DstReg = I.getOperand(0).getReg();
1914   Register SrcReg = I.getOperand(1).getReg();
1915   const LLT DstTy = MRI->getType(DstReg);
1916   const LLT SrcTy = MRI->getType(SrcReg);
1917   const LLT S1 = LLT::scalar(1);
1918 
1919   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1920   const RegisterBank *DstRB;
1921   if (DstTy == S1) {
1922     // This is a special case. We don't treat s1 for legalization artifacts as
1923     // vcc booleans.
1924     DstRB = SrcRB;
1925   } else {
1926     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1927     if (SrcRB != DstRB)
1928       return false;
1929   }
1930 
1931   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
1932 
1933   unsigned DstSize = DstTy.getSizeInBits();
1934   unsigned SrcSize = SrcTy.getSizeInBits();
1935 
1936   const TargetRegisterClass *SrcRC =
1937       TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB);
1938   const TargetRegisterClass *DstRC =
1939       TRI.getRegClassForSizeOnBank(DstSize, *DstRB);
1940   if (!SrcRC || !DstRC)
1941     return false;
1942 
1943   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1944       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1945     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1946     return false;
1947   }
1948 
1949   if (DstTy == LLT::fixed_vector(2, 16) && SrcTy == LLT::fixed_vector(2, 32)) {
1950     MachineBasicBlock *MBB = I.getParent();
1951     const DebugLoc &DL = I.getDebugLoc();
1952 
1953     Register LoReg = MRI->createVirtualRegister(DstRC);
1954     Register HiReg = MRI->createVirtualRegister(DstRC);
1955     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
1956       .addReg(SrcReg, 0, AMDGPU::sub0);
1957     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
1958       .addReg(SrcReg, 0, AMDGPU::sub1);
1959 
1960     if (IsVALU && STI.hasSDWA()) {
1961       // Write the low 16-bits of the high element into the high 16-bits of the
1962       // low element.
1963       MachineInstr *MovSDWA =
1964         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
1965         .addImm(0)                             // $src0_modifiers
1966         .addReg(HiReg)                         // $src0
1967         .addImm(0)                             // $clamp
1968         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
1969         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
1970         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
1971         .addReg(LoReg, RegState::Implicit);
1972       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
1973     } else {
1974       Register TmpReg0 = MRI->createVirtualRegister(DstRC);
1975       Register TmpReg1 = MRI->createVirtualRegister(DstRC);
1976       Register ImmReg = MRI->createVirtualRegister(DstRC);
1977       if (IsVALU) {
1978         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
1979           .addImm(16)
1980           .addReg(HiReg);
1981       } else {
1982         BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
1983           .addReg(HiReg)
1984           .addImm(16);
1985       }
1986 
1987       unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1988       unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1989       unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
1990 
1991       BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
1992         .addImm(0xffff);
1993       BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
1994         .addReg(LoReg)
1995         .addReg(ImmReg);
1996       BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
1997         .addReg(TmpReg0)
1998         .addReg(TmpReg1);
1999     }
2000 
2001     I.eraseFromParent();
2002     return true;
2003   }
2004 
2005   if (!DstTy.isScalar())
2006     return false;
2007 
2008   if (SrcSize > 32) {
2009     int SubRegIdx = sizeToSubRegIndex(DstSize);
2010     if (SubRegIdx == -1)
2011       return false;
2012 
2013     // Deal with weird cases where the class only partially supports the subreg
2014     // index.
2015     const TargetRegisterClass *SrcWithSubRC
2016       = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
2017     if (!SrcWithSubRC)
2018       return false;
2019 
2020     if (SrcWithSubRC != SrcRC) {
2021       if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
2022         return false;
2023     }
2024 
2025     I.getOperand(1).setSubReg(SubRegIdx);
2026   }
2027 
2028   I.setDesc(TII.get(TargetOpcode::COPY));
2029   return true;
2030 }
2031 
2032 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
2033 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
2034   Mask = maskTrailingOnes<unsigned>(Size);
2035   int SignedMask = static_cast<int>(Mask);
2036   return SignedMask >= -16 && SignedMask <= 64;
2037 }
2038 
2039 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
2040 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
2041   Register Reg, const MachineRegisterInfo &MRI,
2042   const TargetRegisterInfo &TRI) const {
2043   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
2044   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
2045     return RB;
2046 
2047   // Ignore the type, since we don't use vcc in artifacts.
2048   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
2049     return &RBI.getRegBankFromRegClass(*RC, LLT());
2050   return nullptr;
2051 }
2052 
2053 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
2054   bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
2055   bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
2056   const DebugLoc &DL = I.getDebugLoc();
2057   MachineBasicBlock &MBB = *I.getParent();
2058   const Register DstReg = I.getOperand(0).getReg();
2059   const Register SrcReg = I.getOperand(1).getReg();
2060 
2061   const LLT DstTy = MRI->getType(DstReg);
2062   const LLT SrcTy = MRI->getType(SrcReg);
2063   const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
2064     I.getOperand(2).getImm() : SrcTy.getSizeInBits();
2065   const unsigned DstSize = DstTy.getSizeInBits();
2066   if (!DstTy.isScalar())
2067     return false;
2068 
2069   // Artifact casts should never use vcc.
2070   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
2071 
2072   // FIXME: This should probably be illegal and split earlier.
2073   if (I.getOpcode() == AMDGPU::G_ANYEXT) {
2074     if (DstSize <= 32)
2075       return selectCOPY(I);
2076 
2077     const TargetRegisterClass *SrcRC =
2078         TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank);
2079     const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
2080     const TargetRegisterClass *DstRC =
2081         TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
2082 
2083     Register UndefReg = MRI->createVirtualRegister(SrcRC);
2084     BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2085     BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2086       .addReg(SrcReg)
2087       .addImm(AMDGPU::sub0)
2088       .addReg(UndefReg)
2089       .addImm(AMDGPU::sub1);
2090     I.eraseFromParent();
2091 
2092     return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) &&
2093            RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI);
2094   }
2095 
2096   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
2097     // 64-bit should have been split up in RegBankSelect
2098 
2099     // Try to use an and with a mask if it will save code size.
2100     unsigned Mask;
2101     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2102       MachineInstr *ExtI =
2103       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
2104         .addImm(Mask)
2105         .addReg(SrcReg);
2106       I.eraseFromParent();
2107       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2108     }
2109 
2110     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
2111     MachineInstr *ExtI =
2112       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
2113       .addReg(SrcReg)
2114       .addImm(0) // Offset
2115       .addImm(SrcSize); // Width
2116     I.eraseFromParent();
2117     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2118   }
2119 
2120   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
2121     const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
2122       AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
2123     if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
2124       return false;
2125 
2126     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
2127       const unsigned SextOpc = SrcSize == 8 ?
2128         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
2129       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
2130         .addReg(SrcReg);
2131       I.eraseFromParent();
2132       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2133     }
2134 
2135     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
2136     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2137 
2138     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
2139     if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
2140       // We need a 64-bit register source, but the high bits don't matter.
2141       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
2142       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2143       unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
2144 
2145       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2146       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
2147         .addReg(SrcReg, 0, SubReg)
2148         .addImm(AMDGPU::sub0)
2149         .addReg(UndefReg)
2150         .addImm(AMDGPU::sub1);
2151 
2152       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
2153         .addReg(ExtReg)
2154         .addImm(SrcSize << 16);
2155 
2156       I.eraseFromParent();
2157       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
2158     }
2159 
2160     unsigned Mask;
2161     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2162       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
2163         .addReg(SrcReg)
2164         .addImm(Mask);
2165     } else {
2166       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
2167         .addReg(SrcReg)
2168         .addImm(SrcSize << 16);
2169     }
2170 
2171     I.eraseFromParent();
2172     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2173   }
2174 
2175   return false;
2176 }
2177 
2178 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
2179   MachineBasicBlock *BB = I.getParent();
2180   MachineOperand &ImmOp = I.getOperand(1);
2181   Register DstReg = I.getOperand(0).getReg();
2182   unsigned Size = MRI->getType(DstReg).getSizeInBits();
2183 
2184   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
2185   if (ImmOp.isFPImm()) {
2186     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
2187     ImmOp.ChangeToImmediate(Imm.getZExtValue());
2188   } else if (ImmOp.isCImm()) {
2189     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue());
2190   } else {
2191     llvm_unreachable("Not supported by g_constants");
2192   }
2193 
2194   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2195   const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID;
2196 
2197   unsigned Opcode;
2198   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
2199     Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2200   } else {
2201     Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2202 
2203     // We should never produce s1 values on banks other than VCC. If the user of
2204     // this already constrained the register, we may incorrectly think it's VCC
2205     // if it wasn't originally.
2206     if (Size == 1)
2207       return false;
2208   }
2209 
2210   if (Size != 64) {
2211     I.setDesc(TII.get(Opcode));
2212     I.addImplicitDefUseOperands(*MF);
2213     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2214   }
2215 
2216   const DebugLoc &DL = I.getDebugLoc();
2217 
2218   APInt Imm(Size, I.getOperand(1).getImm());
2219 
2220   MachineInstr *ResInst;
2221   if (IsSgpr && TII.isInlineConstant(Imm)) {
2222     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
2223       .addImm(I.getOperand(1).getImm());
2224   } else {
2225     const TargetRegisterClass *RC = IsSgpr ?
2226       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
2227     Register LoReg = MRI->createVirtualRegister(RC);
2228     Register HiReg = MRI->createVirtualRegister(RC);
2229 
2230     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
2231       .addImm(Imm.trunc(32).getZExtValue());
2232 
2233     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
2234       .addImm(Imm.ashr(32).getZExtValue());
2235 
2236     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2237       .addReg(LoReg)
2238       .addImm(AMDGPU::sub0)
2239       .addReg(HiReg)
2240       .addImm(AMDGPU::sub1);
2241   }
2242 
2243   // We can't call constrainSelectedInstRegOperands here, because it doesn't
2244   // work for target independent opcodes
2245   I.eraseFromParent();
2246   const TargetRegisterClass *DstRC =
2247     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
2248   if (!DstRC)
2249     return true;
2250   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
2251 }
2252 
2253 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
2254   // Only manually handle the f64 SGPR case.
2255   //
2256   // FIXME: This is a workaround for 2.5 different tablegen problems. Because
2257   // the bit ops theoretically have a second result due to the implicit def of
2258   // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
2259   // that is easy by disabling the check. The result works, but uses a
2260   // nonsensical sreg32orlds_and_sreg_1 regclass.
2261   //
2262   // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
2263   // the variadic REG_SEQUENCE operands.
2264 
2265   Register Dst = MI.getOperand(0).getReg();
2266   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2267   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2268       MRI->getType(Dst) != LLT::scalar(64))
2269     return false;
2270 
2271   Register Src = MI.getOperand(1).getReg();
2272   MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
2273   if (Fabs)
2274     Src = Fabs->getOperand(1).getReg();
2275 
2276   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2277       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2278     return false;
2279 
2280   MachineBasicBlock *BB = MI.getParent();
2281   const DebugLoc &DL = MI.getDebugLoc();
2282   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2283   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2284   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2285   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2286 
2287   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2288     .addReg(Src, 0, AMDGPU::sub0);
2289   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2290     .addReg(Src, 0, AMDGPU::sub1);
2291   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2292     .addImm(0x80000000);
2293 
2294   // Set or toggle sign bit.
2295   unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
2296   BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
2297     .addReg(HiReg)
2298     .addReg(ConstReg);
2299   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2300     .addReg(LoReg)
2301     .addImm(AMDGPU::sub0)
2302     .addReg(OpReg)
2303     .addImm(AMDGPU::sub1);
2304   MI.eraseFromParent();
2305   return true;
2306 }
2307 
2308 // FIXME: This is a workaround for the same tablegen problems as G_FNEG
2309 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
2310   Register Dst = MI.getOperand(0).getReg();
2311   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2312   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2313       MRI->getType(Dst) != LLT::scalar(64))
2314     return false;
2315 
2316   Register Src = MI.getOperand(1).getReg();
2317   MachineBasicBlock *BB = MI.getParent();
2318   const DebugLoc &DL = MI.getDebugLoc();
2319   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2320   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2321   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2322   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2323 
2324   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2325       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2326     return false;
2327 
2328   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2329     .addReg(Src, 0, AMDGPU::sub0);
2330   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2331     .addReg(Src, 0, AMDGPU::sub1);
2332   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2333     .addImm(0x7fffffff);
2334 
2335   // Clear sign bit.
2336   // TODO: Should this used S_BITSET0_*?
2337   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2338     .addReg(HiReg)
2339     .addReg(ConstReg);
2340   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2341     .addReg(LoReg)
2342     .addImm(AMDGPU::sub0)
2343     .addReg(OpReg)
2344     .addImm(AMDGPU::sub1);
2345 
2346   MI.eraseFromParent();
2347   return true;
2348 }
2349 
2350 static bool isConstant(const MachineInstr &MI) {
2351   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2352 }
2353 
2354 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2355     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2356 
2357   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2358 
2359   assert(PtrMI);
2360 
2361   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2362     return;
2363 
2364   GEPInfo GEPInfo(*PtrMI);
2365 
2366   for (unsigned i = 1; i != 3; ++i) {
2367     const MachineOperand &GEPOp = PtrMI->getOperand(i);
2368     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2369     assert(OpDef);
2370     if (i == 2 && isConstant(*OpDef)) {
2371       // TODO: Could handle constant base + variable offset, but a combine
2372       // probably should have commuted it.
2373       assert(GEPInfo.Imm == 0);
2374       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2375       continue;
2376     }
2377     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2378     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2379       GEPInfo.SgprParts.push_back(GEPOp.getReg());
2380     else
2381       GEPInfo.VgprParts.push_back(GEPOp.getReg());
2382   }
2383 
2384   AddrInfo.push_back(GEPInfo);
2385   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2386 }
2387 
2388 bool AMDGPUInstructionSelector::isSGPR(Register Reg) const {
2389   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID;
2390 }
2391 
2392 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2393   if (!MI.hasOneMemOperand())
2394     return false;
2395 
2396   const MachineMemOperand *MMO = *MI.memoperands_begin();
2397   const Value *Ptr = MMO->getValue();
2398 
2399   // UndefValue means this is a load of a kernel input.  These are uniform.
2400   // Sometimes LDS instructions have constant pointers.
2401   // If Ptr is null, then that means this mem operand contains a
2402   // PseudoSourceValue like GOT.
2403   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2404       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2405     return true;
2406 
2407   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2408     return true;
2409 
2410   const Instruction *I = dyn_cast<Instruction>(Ptr);
2411   return I && I->getMetadata("amdgpu.uniform");
2412 }
2413 
2414 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2415   for (const GEPInfo &GEPInfo : AddrInfo) {
2416     if (!GEPInfo.VgprParts.empty())
2417       return true;
2418   }
2419   return false;
2420 }
2421 
2422 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2423   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2424   unsigned AS = PtrTy.getAddressSpace();
2425   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2426       STI.ldsRequiresM0Init()) {
2427     MachineBasicBlock *BB = I.getParent();
2428 
2429     // If DS instructions require M0 initialization, insert it before selecting.
2430     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2431       .addImm(-1);
2432   }
2433 }
2434 
2435 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
2436   MachineInstr &I) const {
2437   if (I.getOpcode() == TargetOpcode::G_ATOMICRMW_FADD) {
2438     const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2439     unsigned AS = PtrTy.getAddressSpace();
2440     if (AS == AMDGPUAS::GLOBAL_ADDRESS)
2441       return selectGlobalAtomicFadd(I, I.getOperand(1), I.getOperand(2));
2442   }
2443 
2444   initM0(I);
2445   return selectImpl(I, *CoverageInfo);
2446 }
2447 
2448 static bool isVCmpResult(Register Reg, MachineRegisterInfo &MRI) {
2449   if (Reg.isPhysical())
2450     return false;
2451 
2452   MachineInstr &MI = *MRI.getUniqueVRegDef(Reg);
2453   const unsigned Opcode = MI.getOpcode();
2454 
2455   if (Opcode == AMDGPU::COPY)
2456     return isVCmpResult(MI.getOperand(1).getReg(), MRI);
2457 
2458   if (Opcode == AMDGPU::G_AND || Opcode == AMDGPU::G_OR ||
2459       Opcode == AMDGPU::G_XOR)
2460     return isVCmpResult(MI.getOperand(1).getReg(), MRI) &&
2461            isVCmpResult(MI.getOperand(2).getReg(), MRI);
2462 
2463   if (Opcode == TargetOpcode::G_INTRINSIC)
2464     return MI.getIntrinsicID() == Intrinsic::amdgcn_class;
2465 
2466   return Opcode == AMDGPU::G_ICMP || Opcode == AMDGPU::G_FCMP;
2467 }
2468 
2469 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2470   MachineBasicBlock *BB = I.getParent();
2471   MachineOperand &CondOp = I.getOperand(0);
2472   Register CondReg = CondOp.getReg();
2473   const DebugLoc &DL = I.getDebugLoc();
2474 
2475   unsigned BrOpcode;
2476   Register CondPhysReg;
2477   const TargetRegisterClass *ConstrainRC;
2478 
2479   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2480   // whether the branch is uniform when selecting the instruction. In
2481   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2482   // RegBankSelect knows what it's doing if the branch condition is scc, even
2483   // though it currently does not.
2484   if (!isVCC(CondReg, *MRI)) {
2485     if (MRI->getType(CondReg) != LLT::scalar(32))
2486       return false;
2487 
2488     CondPhysReg = AMDGPU::SCC;
2489     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2490     ConstrainRC = &AMDGPU::SReg_32RegClass;
2491   } else {
2492     // FIXME: Should scc->vcc copies and with exec?
2493 
2494     // Unless the value of CondReg is a result of a V_CMP* instruction then we
2495     // need to insert an and with exec.
2496     if (!isVCmpResult(CondReg, *MRI)) {
2497       const bool Is64 = STI.isWave64();
2498       const unsigned Opcode = Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
2499       const Register Exec = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
2500 
2501       Register TmpReg = MRI->createVirtualRegister(TRI.getBoolRC());
2502       BuildMI(*BB, &I, DL, TII.get(Opcode), TmpReg)
2503           .addReg(CondReg)
2504           .addReg(Exec);
2505       CondReg = TmpReg;
2506     }
2507 
2508     CondPhysReg = TRI.getVCC();
2509     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2510     ConstrainRC = TRI.getBoolRC();
2511   }
2512 
2513   if (!MRI->getRegClassOrNull(CondReg))
2514     MRI->setRegClass(CondReg, ConstrainRC);
2515 
2516   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2517     .addReg(CondReg);
2518   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2519     .addMBB(I.getOperand(1).getMBB());
2520 
2521   I.eraseFromParent();
2522   return true;
2523 }
2524 
2525 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
2526   MachineInstr &I) const {
2527   Register DstReg = I.getOperand(0).getReg();
2528   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2529   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2530   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2531   if (IsVGPR)
2532     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2533 
2534   return RBI.constrainGenericRegister(
2535     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2536 }
2537 
2538 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2539   Register DstReg = I.getOperand(0).getReg();
2540   Register SrcReg = I.getOperand(1).getReg();
2541   Register MaskReg = I.getOperand(2).getReg();
2542   LLT Ty = MRI->getType(DstReg);
2543   LLT MaskTy = MRI->getType(MaskReg);
2544   MachineBasicBlock *BB = I.getParent();
2545   const DebugLoc &DL = I.getDebugLoc();
2546 
2547   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2548   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2549   const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
2550   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2551   if (DstRB != SrcRB) // Should only happen for hand written MIR.
2552     return false;
2553 
2554   // Try to avoid emitting a bit operation when we only need to touch half of
2555   // the 64-bit pointer.
2556   APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zext(64);
2557   const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
2558   const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
2559 
2560   const bool CanCopyLow32 = (MaskOnes & MaskLo32) == MaskLo32;
2561   const bool CanCopyHi32 = (MaskOnes & MaskHi32) == MaskHi32;
2562 
2563   if (!IsVGPR && Ty.getSizeInBits() == 64 &&
2564       !CanCopyLow32 && !CanCopyHi32) {
2565     auto MIB = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_AND_B64), DstReg)
2566       .addReg(SrcReg)
2567       .addReg(MaskReg);
2568     I.eraseFromParent();
2569     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2570   }
2571 
2572   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2573   const TargetRegisterClass &RegRC
2574     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2575 
2576   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB);
2577   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB);
2578   const TargetRegisterClass *MaskRC =
2579       TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB);
2580 
2581   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2582       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2583       !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
2584     return false;
2585 
2586   if (Ty.getSizeInBits() == 32) {
2587     assert(MaskTy.getSizeInBits() == 32 &&
2588            "ptrmask should have been narrowed during legalize");
2589 
2590     BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2591       .addReg(SrcReg)
2592       .addReg(MaskReg);
2593     I.eraseFromParent();
2594     return true;
2595   }
2596 
2597   Register HiReg = MRI->createVirtualRegister(&RegRC);
2598   Register LoReg = MRI->createVirtualRegister(&RegRC);
2599 
2600   // Extract the subregisters from the source pointer.
2601   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2602     .addReg(SrcReg, 0, AMDGPU::sub0);
2603   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2604     .addReg(SrcReg, 0, AMDGPU::sub1);
2605 
2606   Register MaskedLo, MaskedHi;
2607 
2608   if (CanCopyLow32) {
2609     // If all the bits in the low half are 1, we only need a copy for it.
2610     MaskedLo = LoReg;
2611   } else {
2612     // Extract the mask subregister and apply the and.
2613     Register MaskLo = MRI->createVirtualRegister(&RegRC);
2614     MaskedLo = MRI->createVirtualRegister(&RegRC);
2615 
2616     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
2617       .addReg(MaskReg, 0, AMDGPU::sub0);
2618     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
2619       .addReg(LoReg)
2620       .addReg(MaskLo);
2621   }
2622 
2623   if (CanCopyHi32) {
2624     // If all the bits in the high half are 1, we only need a copy for it.
2625     MaskedHi = HiReg;
2626   } else {
2627     Register MaskHi = MRI->createVirtualRegister(&RegRC);
2628     MaskedHi = MRI->createVirtualRegister(&RegRC);
2629 
2630     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
2631       .addReg(MaskReg, 0, AMDGPU::sub1);
2632     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
2633       .addReg(HiReg)
2634       .addReg(MaskHi);
2635   }
2636 
2637   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2638     .addReg(MaskedLo)
2639     .addImm(AMDGPU::sub0)
2640     .addReg(MaskedHi)
2641     .addImm(AMDGPU::sub1);
2642   I.eraseFromParent();
2643   return true;
2644 }
2645 
2646 /// Return the register to use for the index value, and the subregister to use
2647 /// for the indirectly accessed register.
2648 static std::pair<Register, unsigned>
2649 computeIndirectRegIndex(MachineRegisterInfo &MRI,
2650                         const SIRegisterInfo &TRI,
2651                         const TargetRegisterClass *SuperRC,
2652                         Register IdxReg,
2653                         unsigned EltSize) {
2654   Register IdxBaseReg;
2655   int Offset;
2656 
2657   std::tie(IdxBaseReg, Offset) = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
2658   if (IdxBaseReg == AMDGPU::NoRegister) {
2659     // This will happen if the index is a known constant. This should ordinarily
2660     // be legalized out, but handle it as a register just in case.
2661     assert(Offset == 0);
2662     IdxBaseReg = IdxReg;
2663   }
2664 
2665   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2666 
2667   // Skip out of bounds offsets, or else we would end up using an undefined
2668   // register.
2669   if (static_cast<unsigned>(Offset) >= SubRegs.size())
2670     return std::make_pair(IdxReg, SubRegs[0]);
2671   return std::make_pair(IdxBaseReg, SubRegs[Offset]);
2672 }
2673 
2674 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2675   MachineInstr &MI) const {
2676   Register DstReg = MI.getOperand(0).getReg();
2677   Register SrcReg = MI.getOperand(1).getReg();
2678   Register IdxReg = MI.getOperand(2).getReg();
2679 
2680   LLT DstTy = MRI->getType(DstReg);
2681   LLT SrcTy = MRI->getType(SrcReg);
2682 
2683   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2684   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2685   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2686 
2687   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2688   // into a waterfall loop.
2689   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2690     return false;
2691 
2692   const TargetRegisterClass *SrcRC =
2693       TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB);
2694   const TargetRegisterClass *DstRC =
2695       TRI.getRegClassForTypeOnBank(DstTy, *DstRB);
2696   if (!SrcRC || !DstRC)
2697     return false;
2698   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2699       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2700       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2701     return false;
2702 
2703   MachineBasicBlock *BB = MI.getParent();
2704   const DebugLoc &DL = MI.getDebugLoc();
2705   const bool Is64 = DstTy.getSizeInBits() == 64;
2706 
2707   unsigned SubReg;
2708   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
2709                                                      DstTy.getSizeInBits() / 8);
2710 
2711   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2712     if (DstTy.getSizeInBits() != 32 && !Is64)
2713       return false;
2714 
2715     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2716       .addReg(IdxReg);
2717 
2718     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2719     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2720       .addReg(SrcReg, 0, SubReg)
2721       .addReg(SrcReg, RegState::Implicit);
2722     MI.eraseFromParent();
2723     return true;
2724   }
2725 
2726   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2727     return false;
2728 
2729   if (!STI.useVGPRIndexMode()) {
2730     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2731       .addReg(IdxReg);
2732     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2733       .addReg(SrcReg, 0, SubReg)
2734       .addReg(SrcReg, RegState::Implicit);
2735     MI.eraseFromParent();
2736     return true;
2737   }
2738 
2739   const MCInstrDesc &GPRIDXDesc =
2740       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true);
2741   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2742       .addReg(SrcReg)
2743       .addReg(IdxReg)
2744       .addImm(SubReg);
2745 
2746   MI.eraseFromParent();
2747   return true;
2748 }
2749 
2750 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
2751 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2752   MachineInstr &MI) const {
2753   Register DstReg = MI.getOperand(0).getReg();
2754   Register VecReg = MI.getOperand(1).getReg();
2755   Register ValReg = MI.getOperand(2).getReg();
2756   Register IdxReg = MI.getOperand(3).getReg();
2757 
2758   LLT VecTy = MRI->getType(DstReg);
2759   LLT ValTy = MRI->getType(ValReg);
2760   unsigned VecSize = VecTy.getSizeInBits();
2761   unsigned ValSize = ValTy.getSizeInBits();
2762 
2763   const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2764   const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2765   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2766 
2767   assert(VecTy.getElementType() == ValTy);
2768 
2769   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2770   // into a waterfall loop.
2771   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2772     return false;
2773 
2774   const TargetRegisterClass *VecRC =
2775       TRI.getRegClassForTypeOnBank(VecTy, *VecRB);
2776   const TargetRegisterClass *ValRC =
2777       TRI.getRegClassForTypeOnBank(ValTy, *ValRB);
2778 
2779   if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
2780       !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
2781       !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
2782       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2783     return false;
2784 
2785   if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
2786     return false;
2787 
2788   unsigned SubReg;
2789   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
2790                                                      ValSize / 8);
2791 
2792   const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
2793                          STI.useVGPRIndexMode();
2794 
2795   MachineBasicBlock *BB = MI.getParent();
2796   const DebugLoc &DL = MI.getDebugLoc();
2797 
2798   if (!IndexMode) {
2799     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2800       .addReg(IdxReg);
2801 
2802     const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo(
2803         VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID);
2804     BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
2805         .addReg(VecReg)
2806         .addReg(ValReg)
2807         .addImm(SubReg);
2808     MI.eraseFromParent();
2809     return true;
2810   }
2811 
2812   const MCInstrDesc &GPRIDXDesc =
2813       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false);
2814   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2815       .addReg(VecReg)
2816       .addReg(ValReg)
2817       .addReg(IdxReg)
2818       .addImm(SubReg);
2819 
2820   MI.eraseFromParent();
2821   return true;
2822 }
2823 
2824 static bool isZeroOrUndef(int X) {
2825   return X == 0 || X == -1;
2826 }
2827 
2828 static bool isOneOrUndef(int X) {
2829   return X == 1 || X == -1;
2830 }
2831 
2832 static bool isZeroOrOneOrUndef(int X) {
2833   return X == 0 || X == 1 || X == -1;
2834 }
2835 
2836 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single
2837 // 32-bit register.
2838 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1,
2839                                    ArrayRef<int> Mask) {
2840   NewMask[0] = Mask[0];
2841   NewMask[1] = Mask[1];
2842   if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1]))
2843     return Src0;
2844 
2845   assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1);
2846   assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1);
2847 
2848   // Shift the mask inputs to be 0/1;
2849   NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2;
2850   NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2;
2851   return Src1;
2852 }
2853 
2854 // This is only legal with VOP3P instructions as an aid to op_sel matching.
2855 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
2856   MachineInstr &MI) const {
2857   Register DstReg = MI.getOperand(0).getReg();
2858   Register Src0Reg = MI.getOperand(1).getReg();
2859   Register Src1Reg = MI.getOperand(2).getReg();
2860   ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
2861 
2862   const LLT V2S16 = LLT::fixed_vector(2, 16);
2863   if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
2864     return false;
2865 
2866   if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask))
2867     return false;
2868 
2869   assert(ShufMask.size() == 2);
2870 
2871   MachineBasicBlock *MBB = MI.getParent();
2872   const DebugLoc &DL = MI.getDebugLoc();
2873 
2874   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2875   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2876   const TargetRegisterClass &RC = IsVALU ?
2877     AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2878 
2879   // Handle the degenerate case which should have folded out.
2880   if (ShufMask[0] == -1 && ShufMask[1] == -1) {
2881     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg);
2882 
2883     MI.eraseFromParent();
2884     return RBI.constrainGenericRegister(DstReg, RC, *MRI);
2885   }
2886 
2887   // A legal VOP3P mask only reads one of the sources.
2888   int Mask[2];
2889   Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask);
2890 
2891   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) ||
2892       !RBI.constrainGenericRegister(SrcVec, RC, *MRI))
2893     return false;
2894 
2895   // TODO: This also should have been folded out
2896   if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) {
2897     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg)
2898       .addReg(SrcVec);
2899 
2900     MI.eraseFromParent();
2901     return true;
2902   }
2903 
2904   if (Mask[0] == 1 && Mask[1] == -1) {
2905     if (IsVALU) {
2906       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
2907         .addImm(16)
2908         .addReg(SrcVec);
2909     } else {
2910       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
2911         .addReg(SrcVec)
2912         .addImm(16);
2913     }
2914   } else if (Mask[0] == -1 && Mask[1] == 0) {
2915     if (IsVALU) {
2916       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg)
2917         .addImm(16)
2918         .addReg(SrcVec);
2919     } else {
2920       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg)
2921         .addReg(SrcVec)
2922         .addImm(16);
2923     }
2924   } else if (Mask[0] == 0 && Mask[1] == 0) {
2925     if (IsVALU) {
2926       if (STI.hasSDWA()) {
2927         // Write low half of the register into the high half.
2928         MachineInstr *MovSDWA =
2929             BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2930                 .addImm(0)                             // $src0_modifiers
2931                 .addReg(SrcVec)                        // $src0
2932                 .addImm(0)                             // $clamp
2933                 .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
2934                 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2935                 .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
2936                 .addReg(SrcVec, RegState::Implicit);
2937         MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2938       } else {
2939         Register TmpReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2940         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_AND_B32_e32), TmpReg)
2941             .addImm(0xFFFF)
2942             .addReg(SrcVec);
2943         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHL_OR_B32_e64), DstReg)
2944             .addReg(TmpReg)
2945             .addImm(16)
2946             .addReg(TmpReg);
2947       }
2948     } else {
2949       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2950         .addReg(SrcVec)
2951         .addReg(SrcVec);
2952     }
2953   } else if (Mask[0] == 1 && Mask[1] == 1) {
2954     if (IsVALU) {
2955       if (STI.hasSDWA()) {
2956         // Write high half of the register into the low half.
2957         MachineInstr *MovSDWA =
2958             BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2959                 .addImm(0)                             // $src0_modifiers
2960                 .addReg(SrcVec)                        // $src0
2961                 .addImm(0)                             // $clamp
2962                 .addImm(AMDGPU::SDWA::WORD_0)          // $dst_sel
2963                 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2964                 .addImm(AMDGPU::SDWA::WORD_1)          // $src0_sel
2965                 .addReg(SrcVec, RegState::Implicit);
2966         MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2967       } else {
2968         Register TmpReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2969         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), TmpReg)
2970             .addImm(16)
2971             .addReg(SrcVec);
2972         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHL_OR_B32_e64), DstReg)
2973             .addReg(TmpReg)
2974             .addImm(16)
2975             .addReg(TmpReg);
2976       }
2977     } else {
2978       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg)
2979         .addReg(SrcVec)
2980         .addReg(SrcVec);
2981     }
2982   } else if (Mask[0] == 1 && Mask[1] == 0) {
2983     if (IsVALU) {
2984       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32_e64), DstReg)
2985         .addReg(SrcVec)
2986         .addReg(SrcVec)
2987         .addImm(16);
2988     } else {
2989       if (STI.hasSPackHL()) {
2990         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HL_B32_B16), DstReg)
2991             .addReg(SrcVec)
2992             .addReg(SrcVec);
2993       } else {
2994         Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2995         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg)
2996             .addReg(SrcVec)
2997             .addImm(16);
2998         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2999             .addReg(TmpReg)
3000             .addReg(SrcVec);
3001       }
3002     }
3003   } else
3004     llvm_unreachable("all shuffle masks should be handled");
3005 
3006   MI.eraseFromParent();
3007   return true;
3008 }
3009 
3010 bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD(
3011   MachineInstr &MI) const {
3012   const Register DefReg = MI.getOperand(0).getReg();
3013   LLT DefTy = MRI->getType(DefReg);
3014   if (AMDGPU::hasAtomicFaddRtnForTy(STI, DefTy))
3015     return selectImpl(MI, *CoverageInfo);
3016 
3017   MachineBasicBlock *MBB = MI.getParent();
3018   const DebugLoc &DL = MI.getDebugLoc();
3019 
3020   if (!MRI->use_nodbg_empty(DefReg)) {
3021     Function &F = MBB->getParent()->getFunction();
3022     DiagnosticInfoUnsupported
3023       NoFpRet(F, "return versions of fp atomics not supported",
3024               MI.getDebugLoc(), DS_Error);
3025     F.getContext().diagnose(NoFpRet);
3026     return false;
3027   }
3028 
3029   // FIXME: This is only needed because tablegen requires number of dst operands
3030   // in match and replace pattern to be the same. Otherwise patterns can be
3031   // exported from SDag path.
3032   MachineOperand &VDataIn = MI.getOperand(1);
3033   MachineOperand &VIndex = MI.getOperand(3);
3034   MachineOperand &VOffset = MI.getOperand(4);
3035   MachineOperand &SOffset = MI.getOperand(5);
3036   int16_t Offset = MI.getOperand(6).getImm();
3037 
3038   bool HasVOffset = !isOperandImmEqual(VOffset, 0, *MRI);
3039   bool HasVIndex = !isOperandImmEqual(VIndex, 0, *MRI);
3040 
3041   unsigned Opcode;
3042   if (HasVOffset) {
3043     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN
3044                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN;
3045   } else {
3046     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN
3047                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET;
3048   }
3049 
3050   if (MRI->getType(VDataIn.getReg()).isVector()) {
3051     switch (Opcode) {
3052     case AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN:
3053       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN;
3054       break;
3055     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN:
3056       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFEN;
3057       break;
3058     case AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN:
3059       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_IDXEN;
3060       break;
3061     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET:
3062       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFSET;
3063       break;
3064     }
3065   }
3066 
3067   auto I = BuildMI(*MBB, MI, DL, TII.get(Opcode));
3068   I.add(VDataIn);
3069 
3070   if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN ||
3071       Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) {
3072     Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
3073     BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
3074       .addReg(VIndex.getReg())
3075       .addImm(AMDGPU::sub0)
3076       .addReg(VOffset.getReg())
3077       .addImm(AMDGPU::sub1);
3078 
3079     I.addReg(IdxReg);
3080   } else if (HasVIndex) {
3081     I.add(VIndex);
3082   } else if (HasVOffset) {
3083     I.add(VOffset);
3084   }
3085 
3086   I.add(MI.getOperand(2)); // rsrc
3087   I.add(SOffset);
3088   I.addImm(Offset);
3089   I.addImm(MI.getOperand(7).getImm()); // cpol
3090   I.cloneMemRefs(MI);
3091 
3092   MI.eraseFromParent();
3093 
3094   return true;
3095 }
3096 
3097 bool AMDGPUInstructionSelector::selectGlobalAtomicFadd(
3098   MachineInstr &MI, MachineOperand &AddrOp, MachineOperand &DataOp) const {
3099 
3100   if (STI.hasGFX90AInsts()) {
3101     // gfx90a adds return versions of the global atomic fadd instructions so no
3102     // special handling is required.
3103     return selectImpl(MI, *CoverageInfo);
3104   }
3105 
3106   MachineBasicBlock *MBB = MI.getParent();
3107   const DebugLoc &DL = MI.getDebugLoc();
3108 
3109   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
3110     Function &F = MBB->getParent()->getFunction();
3111     DiagnosticInfoUnsupported
3112       NoFpRet(F, "return versions of fp atomics not supported",
3113               MI.getDebugLoc(), DS_Error);
3114     F.getContext().diagnose(NoFpRet);
3115     return false;
3116   }
3117 
3118   // FIXME: This is only needed because tablegen requires number of dst operands
3119   // in match and replace pattern to be the same. Otherwise patterns can be
3120   // exported from SDag path.
3121   auto Addr = selectFlatOffsetImpl(AddrOp, SIInstrFlags::FlatGlobal);
3122 
3123   Register Data = DataOp.getReg();
3124   const unsigned Opc = MRI->getType(Data).isVector() ?
3125     AMDGPU::GLOBAL_ATOMIC_PK_ADD_F16 : AMDGPU::GLOBAL_ATOMIC_ADD_F32;
3126   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3127     .addReg(Addr.first)
3128     .addReg(Data)
3129     .addImm(Addr.second)
3130     .addImm(0) // cpol
3131     .cloneMemRefs(MI);
3132 
3133   MI.eraseFromParent();
3134   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3135 }
3136 
3137 bool AMDGPUInstructionSelector::selectBufferLoadLds(MachineInstr &MI) const {
3138   unsigned Opc;
3139   unsigned Size = MI.getOperand(3).getImm();
3140 
3141   // The struct intrinsic variants add one additional operand over raw.
3142   const bool HasVIndex = MI.getNumOperands() == 9;
3143   Register VIndex;
3144   int OpOffset = 0;
3145   if (HasVIndex) {
3146     VIndex = MI.getOperand(4).getReg();
3147     OpOffset = 1;
3148   }
3149 
3150   Register VOffset = MI.getOperand(4 + OpOffset).getReg();
3151   Optional<ValueAndVReg> MaybeVOffset =
3152       getIConstantVRegValWithLookThrough(VOffset, *MRI);
3153   const bool HasVOffset = !MaybeVOffset || MaybeVOffset->Value.getZExtValue();
3154 
3155   switch (Size) {
3156   default:
3157     return false;
3158   case 1:
3159     Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_BOTHEN
3160                                  : AMDGPU::BUFFER_LOAD_UBYTE_LDS_IDXEN
3161                     : HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFEN
3162                                  : AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFSET;
3163     break;
3164   case 2:
3165     Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_BOTHEN
3166                                  : AMDGPU::BUFFER_LOAD_USHORT_LDS_IDXEN
3167                     : HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFEN
3168                                  : AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFSET;
3169     break;
3170   case 4:
3171     Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_BOTHEN
3172                                  : AMDGPU::BUFFER_LOAD_DWORD_LDS_IDXEN
3173                     : HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFEN
3174                                  : AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFSET;
3175     break;
3176   }
3177 
3178   MachineBasicBlock *MBB = MI.getParent();
3179   const DebugLoc &DL = MI.getDebugLoc();
3180   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3181     .add(MI.getOperand(2));
3182 
3183   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc));
3184 
3185   if (HasVIndex && HasVOffset) {
3186     Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
3187     BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
3188       .addReg(VIndex)
3189       .addImm(AMDGPU::sub0)
3190       .addReg(VOffset)
3191       .addImm(AMDGPU::sub1);
3192 
3193     MIB.addReg(IdxReg);
3194   } else if (HasVIndex) {
3195     MIB.addReg(VIndex);
3196   } else if (HasVOffset) {
3197     MIB.addReg(VOffset);
3198   }
3199 
3200   MIB.add(MI.getOperand(1));            // rsrc
3201   MIB.add(MI.getOperand(5 + OpOffset)); // soffset
3202   MIB.add(MI.getOperand(6 + OpOffset)); // imm offset
3203   unsigned Aux = MI.getOperand(7 + OpOffset).getImm();
3204   MIB.addImm(Aux & AMDGPU::CPol::ALL);  // cpol
3205   MIB.addImm((Aux >> 3) & 1);           // swz
3206 
3207   MachineMemOperand *LoadMMO = *MI.memoperands_begin();
3208   MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
3209   LoadPtrI.Offset = MI.getOperand(6 + OpOffset).getImm();
3210   MachinePointerInfo StorePtrI = LoadPtrI;
3211   StorePtrI.V = nullptr;
3212   StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
3213 
3214   auto F = LoadMMO->getFlags() &
3215            ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
3216   LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
3217                                      Size, LoadMMO->getBaseAlign());
3218 
3219   MachineMemOperand *StoreMMO =
3220       MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
3221                                sizeof(int32_t), LoadMMO->getBaseAlign());
3222 
3223   MIB.setMemRefs({LoadMMO, StoreMMO});
3224 
3225   MI.eraseFromParent();
3226   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3227 }
3228 
3229 /// Match a zero extend from a 32-bit value to 64-bits.
3230 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {
3231   Register ZExtSrc;
3232   if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3233     return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3234 
3235   // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
3236   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
3237   if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
3238     return Register();
3239 
3240   if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) {
3241     return Def->getOperand(1).getReg();
3242   }
3243 
3244   return Register();
3245 }
3246 
3247 bool AMDGPUInstructionSelector::selectGlobalLoadLds(MachineInstr &MI) const{
3248   unsigned Opc;
3249   unsigned Size = MI.getOperand(3).getImm();
3250 
3251   switch (Size) {
3252   default:
3253     return false;
3254   case 1:
3255     Opc = AMDGPU::GLOBAL_LOAD_LDS_UBYTE;
3256     break;
3257   case 2:
3258     Opc = AMDGPU::GLOBAL_LOAD_LDS_USHORT;
3259     break;
3260   case 4:
3261     Opc = AMDGPU::GLOBAL_LOAD_LDS_DWORD;
3262     break;
3263   }
3264 
3265   MachineBasicBlock *MBB = MI.getParent();
3266   const DebugLoc &DL = MI.getDebugLoc();
3267   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3268     .add(MI.getOperand(2));
3269 
3270   Register Addr = MI.getOperand(1).getReg();
3271   Register VOffset;
3272   // Try to split SAddr and VOffset. Global and LDS pointers share the same
3273   // immediate offset, so we cannot use a regular SelectGlobalSAddr().
3274   if (!isSGPR(Addr)) {
3275     auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3276     if (isSGPR(AddrDef->Reg)) {
3277       Addr = AddrDef->Reg;
3278     } else if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3279       Register SAddr =
3280           getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3281       if (SAddr && isSGPR(SAddr)) {
3282         Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3283         if (Register Off = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
3284           Addr = SAddr;
3285           VOffset = Off;
3286         }
3287       }
3288     }
3289   }
3290 
3291   if (isSGPR(Addr)) {
3292     Opc = AMDGPU::getGlobalSaddrOp(Opc);
3293     if (!VOffset) {
3294       VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3295       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
3296         .addImm(0);
3297     }
3298   }
3299 
3300   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3301     .addReg(Addr);
3302 
3303   if (isSGPR(Addr))
3304     MIB.addReg(VOffset);
3305 
3306   MIB.add(MI.getOperand(4))  // offset
3307      .add(MI.getOperand(5)); // cpol
3308 
3309   MachineMemOperand *LoadMMO = *MI.memoperands_begin();
3310   MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
3311   LoadPtrI.Offset = MI.getOperand(4).getImm();
3312   MachinePointerInfo StorePtrI = LoadPtrI;
3313   LoadPtrI.AddrSpace = AMDGPUAS::GLOBAL_ADDRESS;
3314   StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
3315   auto F = LoadMMO->getFlags() &
3316            ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
3317   LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
3318                                      Size, LoadMMO->getBaseAlign());
3319   MachineMemOperand *StoreMMO =
3320       MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
3321                                sizeof(int32_t), Align(4));
3322 
3323   MIB.setMemRefs({LoadMMO, StoreMMO});
3324 
3325   MI.eraseFromParent();
3326   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3327 }
3328 
3329 bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{
3330   MI.setDesc(TII.get(MI.getOperand(1).getImm()));
3331   MI.removeOperand(1);
3332   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3333   return true;
3334 }
3335 
3336 bool AMDGPUInstructionSelector::selectSMFMACIntrin(MachineInstr &MI) const {
3337   unsigned Opc;
3338   switch (MI.getIntrinsicID()) {
3339   case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
3340     Opc = AMDGPU::V_SMFMAC_F32_16X16X32_F16_e64;
3341     break;
3342   case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
3343     Opc = AMDGPU::V_SMFMAC_F32_32X32X16_F16_e64;
3344     break;
3345   case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
3346     Opc = AMDGPU::V_SMFMAC_F32_16X16X32_BF16_e64;
3347     break;
3348   case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
3349     Opc = AMDGPU::V_SMFMAC_F32_32X32X16_BF16_e64;
3350     break;
3351   case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
3352     Opc = AMDGPU::V_SMFMAC_I32_16X16X64_I8_e64;
3353     break;
3354   case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
3355     Opc = AMDGPU::V_SMFMAC_I32_32X32X32_I8_e64;
3356     break;
3357   default:
3358     llvm_unreachable("unhandled smfmac intrinsic");
3359   }
3360 
3361   auto VDst_In = MI.getOperand(4);
3362 
3363   MI.setDesc(TII.get(Opc));
3364   MI.removeOperand(4); // VDst_In
3365   MI.removeOperand(1); // Intrinsic ID
3366   MI.addOperand(VDst_In); // Readd VDst_In to the end
3367   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3368   return true;
3369 }
3370 
3371 bool AMDGPUInstructionSelector::selectWaveAddress(MachineInstr &MI) const {
3372   Register DstReg = MI.getOperand(0).getReg();
3373   Register SrcReg = MI.getOperand(1).getReg();
3374   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
3375   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
3376   MachineBasicBlock *MBB = MI.getParent();
3377   const DebugLoc &DL = MI.getDebugLoc();
3378 
3379   if (IsVALU) {
3380     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
3381       .addImm(Subtarget->getWavefrontSizeLog2())
3382       .addReg(SrcReg);
3383   } else {
3384     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
3385       .addReg(SrcReg)
3386       .addImm(Subtarget->getWavefrontSizeLog2());
3387   }
3388 
3389   const TargetRegisterClass &RC =
3390       IsVALU ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
3391   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
3392     return false;
3393 
3394   MI.eraseFromParent();
3395   return true;
3396 }
3397 
3398 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
3399   if (I.isPHI())
3400     return selectPHI(I);
3401 
3402   if (!I.isPreISelOpcode()) {
3403     if (I.isCopy())
3404       return selectCOPY(I);
3405     return true;
3406   }
3407 
3408   switch (I.getOpcode()) {
3409   case TargetOpcode::G_AND:
3410   case TargetOpcode::G_OR:
3411   case TargetOpcode::G_XOR:
3412     if (selectImpl(I, *CoverageInfo))
3413       return true;
3414     return selectG_AND_OR_XOR(I);
3415   case TargetOpcode::G_ADD:
3416   case TargetOpcode::G_SUB:
3417     if (selectImpl(I, *CoverageInfo))
3418       return true;
3419     return selectG_ADD_SUB(I);
3420   case TargetOpcode::G_UADDO:
3421   case TargetOpcode::G_USUBO:
3422   case TargetOpcode::G_UADDE:
3423   case TargetOpcode::G_USUBE:
3424     return selectG_UADDO_USUBO_UADDE_USUBE(I);
3425   case AMDGPU::G_AMDGPU_MAD_U64_U32:
3426   case AMDGPU::G_AMDGPU_MAD_I64_I32:
3427     return selectG_AMDGPU_MAD_64_32(I);
3428   case TargetOpcode::G_INTTOPTR:
3429   case TargetOpcode::G_BITCAST:
3430   case TargetOpcode::G_PTRTOINT:
3431     return selectCOPY(I);
3432   case TargetOpcode::G_CONSTANT:
3433   case TargetOpcode::G_FCONSTANT:
3434     return selectG_CONSTANT(I);
3435   case TargetOpcode::G_FNEG:
3436     if (selectImpl(I, *CoverageInfo))
3437       return true;
3438     return selectG_FNEG(I);
3439   case TargetOpcode::G_FABS:
3440     if (selectImpl(I, *CoverageInfo))
3441       return true;
3442     return selectG_FABS(I);
3443   case TargetOpcode::G_EXTRACT:
3444     return selectG_EXTRACT(I);
3445   case TargetOpcode::G_MERGE_VALUES:
3446   case TargetOpcode::G_BUILD_VECTOR:
3447   case TargetOpcode::G_CONCAT_VECTORS:
3448     return selectG_MERGE_VALUES(I);
3449   case TargetOpcode::G_UNMERGE_VALUES:
3450     return selectG_UNMERGE_VALUES(I);
3451   case TargetOpcode::G_BUILD_VECTOR_TRUNC:
3452     return selectG_BUILD_VECTOR_TRUNC(I);
3453   case TargetOpcode::G_PTR_ADD:
3454     return selectG_PTR_ADD(I);
3455   case TargetOpcode::G_IMPLICIT_DEF:
3456     return selectG_IMPLICIT_DEF(I);
3457   case TargetOpcode::G_FREEZE:
3458     return selectCOPY(I);
3459   case TargetOpcode::G_INSERT:
3460     return selectG_INSERT(I);
3461   case TargetOpcode::G_INTRINSIC:
3462     return selectG_INTRINSIC(I);
3463   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3464     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
3465   case TargetOpcode::G_ICMP:
3466     if (selectG_ICMP(I))
3467       return true;
3468     return selectImpl(I, *CoverageInfo);
3469   case TargetOpcode::G_LOAD:
3470   case TargetOpcode::G_STORE:
3471   case TargetOpcode::G_ATOMIC_CMPXCHG:
3472   case TargetOpcode::G_ATOMICRMW_XCHG:
3473   case TargetOpcode::G_ATOMICRMW_ADD:
3474   case TargetOpcode::G_ATOMICRMW_SUB:
3475   case TargetOpcode::G_ATOMICRMW_AND:
3476   case TargetOpcode::G_ATOMICRMW_OR:
3477   case TargetOpcode::G_ATOMICRMW_XOR:
3478   case TargetOpcode::G_ATOMICRMW_MIN:
3479   case TargetOpcode::G_ATOMICRMW_MAX:
3480   case TargetOpcode::G_ATOMICRMW_UMIN:
3481   case TargetOpcode::G_ATOMICRMW_UMAX:
3482   case TargetOpcode::G_ATOMICRMW_FADD:
3483   case AMDGPU::G_AMDGPU_ATOMIC_INC:
3484   case AMDGPU::G_AMDGPU_ATOMIC_DEC:
3485   case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
3486   case AMDGPU::G_AMDGPU_ATOMIC_FMAX:
3487     return selectG_LOAD_STORE_ATOMICRMW(I);
3488   case TargetOpcode::G_SELECT:
3489     return selectG_SELECT(I);
3490   case TargetOpcode::G_TRUNC:
3491     return selectG_TRUNC(I);
3492   case TargetOpcode::G_SEXT:
3493   case TargetOpcode::G_ZEXT:
3494   case TargetOpcode::G_ANYEXT:
3495   case TargetOpcode::G_SEXT_INREG:
3496     if (selectImpl(I, *CoverageInfo))
3497       return true;
3498     return selectG_SZA_EXT(I);
3499   case TargetOpcode::G_BRCOND:
3500     return selectG_BRCOND(I);
3501   case TargetOpcode::G_GLOBAL_VALUE:
3502     return selectG_GLOBAL_VALUE(I);
3503   case TargetOpcode::G_PTRMASK:
3504     return selectG_PTRMASK(I);
3505   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3506     return selectG_EXTRACT_VECTOR_ELT(I);
3507   case TargetOpcode::G_INSERT_VECTOR_ELT:
3508     return selectG_INSERT_VECTOR_ELT(I);
3509   case TargetOpcode::G_SHUFFLE_VECTOR:
3510     return selectG_SHUFFLE_VECTOR(I);
3511   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
3512   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
3513   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
3514   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
3515     const AMDGPU::ImageDimIntrinsicInfo *Intr
3516       = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
3517     assert(Intr && "not an image intrinsic with image pseudo");
3518     return selectImageIntrinsic(I, Intr);
3519   }
3520   case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY:
3521     return selectBVHIntrinsic(I);
3522   case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD:
3523     return selectAMDGPU_BUFFER_ATOMIC_FADD(I);
3524   case AMDGPU::G_SBFX:
3525   case AMDGPU::G_UBFX:
3526     return selectG_SBFX_UBFX(I);
3527   case AMDGPU::G_SI_CALL:
3528     I.setDesc(TII.get(AMDGPU::SI_CALL));
3529     return true;
3530   case AMDGPU::G_AMDGPU_WAVE_ADDRESS:
3531     return selectWaveAddress(I);
3532   default:
3533     return selectImpl(I, *CoverageInfo);
3534   }
3535   return false;
3536 }
3537 
3538 InstructionSelector::ComplexRendererFns
3539 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
3540   return {{
3541       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3542   }};
3543 
3544 }
3545 
3546 std::pair<Register, unsigned> AMDGPUInstructionSelector::selectVOP3ModsImpl(
3547     MachineOperand &Root, bool AllowAbs, bool OpSel, bool ForceVGPR) const {
3548   Register Src = Root.getReg();
3549   Register OrigSrc = Src;
3550   unsigned Mods = 0;
3551   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
3552 
3553   if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
3554     Src = MI->getOperand(1).getReg();
3555     Mods |= SISrcMods::NEG;
3556     MI = getDefIgnoringCopies(Src, *MRI);
3557   }
3558 
3559   if (AllowAbs && MI && MI->getOpcode() == AMDGPU::G_FABS) {
3560     Src = MI->getOperand(1).getReg();
3561     Mods |= SISrcMods::ABS;
3562   }
3563 
3564   if (OpSel)
3565     Mods |= SISrcMods::OP_SEL_0;
3566 
3567   if ((Mods != 0 || ForceVGPR) &&
3568       RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
3569     MachineInstr *UseMI = Root.getParent();
3570 
3571     // If we looked through copies to find source modifiers on an SGPR operand,
3572     // we now have an SGPR register source. To avoid potentially violating the
3573     // constant bus restriction, we need to insert a copy to a VGPR.
3574     Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc);
3575     BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(),
3576             TII.get(AMDGPU::COPY), VGPRSrc)
3577       .addReg(Src);
3578     Src = VGPRSrc;
3579   }
3580 
3581   return std::make_pair(Src, Mods);
3582 }
3583 
3584 ///
3585 /// This will select either an SGPR or VGPR operand and will save us from
3586 /// having to write an extra tablegen pattern.
3587 InstructionSelector::ComplexRendererFns
3588 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
3589   return {{
3590       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3591   }};
3592 }
3593 
3594 InstructionSelector::ComplexRendererFns
3595 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
3596   Register Src;
3597   unsigned Mods;
3598   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3599 
3600   return {{
3601       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3602       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3603       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3604       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3605   }};
3606 }
3607 
3608 InstructionSelector::ComplexRendererFns
3609 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const {
3610   Register Src;
3611   unsigned Mods;
3612   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3613 
3614   return {{
3615       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3616       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3617       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3618       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3619   }};
3620 }
3621 
3622 InstructionSelector::ComplexRendererFns
3623 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
3624   return {{
3625       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3626       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3627       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
3628   }};
3629 }
3630 
3631 InstructionSelector::ComplexRendererFns
3632 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
3633   Register Src;
3634   unsigned Mods;
3635   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3636 
3637   return {{
3638       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3639       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3640   }};
3641 }
3642 
3643 InstructionSelector::ComplexRendererFns
3644 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const {
3645   Register Src;
3646   unsigned Mods;
3647   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3648 
3649   return {{
3650       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3651       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3652   }};
3653 }
3654 
3655 InstructionSelector::ComplexRendererFns
3656 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
3657   Register Reg = Root.getReg();
3658   const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
3659   if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
3660               Def->getOpcode() == AMDGPU::G_FABS))
3661     return {};
3662   return {{
3663       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3664   }};
3665 }
3666 
3667 std::pair<Register, unsigned>
3668 AMDGPUInstructionSelector::selectVOP3PModsImpl(
3669   Register Src, const MachineRegisterInfo &MRI, bool IsDOT) const {
3670   unsigned Mods = 0;
3671   MachineInstr *MI = MRI.getVRegDef(Src);
3672 
3673   if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
3674       // It's possible to see an f32 fneg here, but unlikely.
3675       // TODO: Treat f32 fneg as only high bit.
3676       MRI.getType(Src) == LLT::fixed_vector(2, 16)) {
3677     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
3678     Src = MI->getOperand(1).getReg();
3679     MI = MRI.getVRegDef(Src);
3680   }
3681 
3682   // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
3683   (void)IsDOT; // DOTs do not use OPSEL on gfx940+, check ST.hasDOTOpSelHazard()
3684 
3685   // Packed instructions do not have abs modifiers.
3686   Mods |= SISrcMods::OP_SEL_1;
3687 
3688   return std::make_pair(Src, Mods);
3689 }
3690 
3691 InstructionSelector::ComplexRendererFns
3692 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
3693   MachineRegisterInfo &MRI
3694     = Root.getParent()->getParent()->getParent()->getRegInfo();
3695 
3696   Register Src;
3697   unsigned Mods;
3698   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
3699 
3700   return {{
3701       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3702       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3703   }};
3704 }
3705 
3706 InstructionSelector::ComplexRendererFns
3707 AMDGPUInstructionSelector::selectVOP3PModsDOT(MachineOperand &Root) const {
3708   MachineRegisterInfo &MRI
3709     = Root.getParent()->getParent()->getParent()->getRegInfo();
3710 
3711   Register Src;
3712   unsigned Mods;
3713   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI, true);
3714 
3715   return {{
3716       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3717       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3718   }};
3719 }
3720 
3721 InstructionSelector::ComplexRendererFns
3722 AMDGPUInstructionSelector::selectDotIUVOP3PMods(MachineOperand &Root) const {
3723   // Literal i1 value set in intrinsic, represents SrcMods for the next operand.
3724   // Value is in Imm operand as i1 sign extended to int64_t.
3725   // 1(-1) promotes packed values to signed, 0 treats them as unsigned.
3726   assert((Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) &&
3727          "expected i1 value");
3728   unsigned Mods = SISrcMods::OP_SEL_1;
3729   if (Root.getImm() == -1)
3730     Mods ^= SISrcMods::NEG;
3731   return {{
3732       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3733   }};
3734 }
3735 
3736 InstructionSelector::ComplexRendererFns
3737 AMDGPUInstructionSelector::selectWMMAOpSelVOP3PMods(
3738     MachineOperand &Root) const {
3739   assert((Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) &&
3740          "expected i1 value");
3741   unsigned Mods = SISrcMods::OP_SEL_1;
3742   if (Root.getImm() != 0)
3743     Mods |= SISrcMods::OP_SEL_0;
3744 
3745   return {{
3746       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3747   }};
3748 }
3749 
3750 InstructionSelector::ComplexRendererFns
3751 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
3752   Register Src;
3753   unsigned Mods;
3754   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3755   if (!isKnownNeverNaN(Src, *MRI))
3756     return None;
3757 
3758   return {{
3759       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3760       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3761   }};
3762 }
3763 
3764 InstructionSelector::ComplexRendererFns
3765 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
3766   // FIXME: Handle op_sel
3767   return {{
3768       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
3769       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
3770   }};
3771 }
3772 
3773 InstructionSelector::ComplexRendererFns
3774 AMDGPUInstructionSelector::selectVINTERPMods(MachineOperand &Root) const {
3775   Register Src;
3776   unsigned Mods;
3777   std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
3778                                            /* AllowAbs */ false,
3779                                            /* OpSel */ false,
3780                                            /* ForceVGPR */ true);
3781 
3782   return {{
3783       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3784       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3785   }};
3786 }
3787 
3788 InstructionSelector::ComplexRendererFns
3789 AMDGPUInstructionSelector::selectVINTERPModsHi(MachineOperand &Root) const {
3790   Register Src;
3791   unsigned Mods;
3792   std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
3793                                            /* AllowAbs */ false,
3794                                            /* OpSel */ true,
3795                                            /* ForceVGPR */ true);
3796 
3797   return {{
3798       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3799       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3800   }};
3801 }
3802 
3803 InstructionSelector::ComplexRendererFns
3804 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
3805   SmallVector<GEPInfo, 4> AddrInfo;
3806   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3807 
3808   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3809     return None;
3810 
3811   const GEPInfo &GEPInfo = AddrInfo[0];
3812   Optional<int64_t> EncodedImm =
3813       AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false);
3814   if (!EncodedImm)
3815     return None;
3816 
3817   unsigned PtrReg = GEPInfo.SgprParts[0];
3818   return {{
3819     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3820     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3821   }};
3822 }
3823 
3824 InstructionSelector::ComplexRendererFns
3825 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
3826   SmallVector<GEPInfo, 4> AddrInfo;
3827   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3828 
3829   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3830     return None;
3831 
3832   const GEPInfo &GEPInfo = AddrInfo[0];
3833   Register PtrReg = GEPInfo.SgprParts[0];
3834   Optional<int64_t> EncodedImm =
3835       AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
3836   if (!EncodedImm)
3837     return None;
3838 
3839   return {{
3840     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3841     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3842   }};
3843 }
3844 
3845 InstructionSelector::ComplexRendererFns
3846 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
3847   MachineInstr *MI = Root.getParent();
3848   MachineBasicBlock *MBB = MI->getParent();
3849 
3850   SmallVector<GEPInfo, 4> AddrInfo;
3851   getAddrModeInfo(*MI, *MRI, AddrInfo);
3852 
3853   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
3854   // then we can select all ptr + 32-bit offsets.
3855   if (AddrInfo.empty())
3856     return None;
3857 
3858   const GEPInfo &GEPInfo = AddrInfo[0];
3859   Register PtrReg = GEPInfo.SgprParts[0];
3860 
3861   // SGPR offset is unsigned.
3862   if (AddrInfo[0].SgprParts.size() == 1 && isUInt<32>(GEPInfo.Imm) &&
3863       GEPInfo.Imm != 0) {
3864     // If we make it this far we have a load with an 32-bit immediate offset.
3865     // It is OK to select this using a sgpr offset, because we have already
3866     // failed trying to select this load into one of the _IMM variants since
3867     // the _IMM Patterns are considered before the _SGPR patterns.
3868     Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3869     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
3870         .addImm(GEPInfo.Imm);
3871     return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3872              [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }}};
3873   }
3874 
3875   if (AddrInfo[0].SgprParts.size() == 2 && GEPInfo.Imm == 0) {
3876     if (Register OffsetReg =
3877             matchZeroExtendFromS32(*MRI, GEPInfo.SgprParts[1])) {
3878       return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3879                [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }}};
3880     }
3881   }
3882 
3883   return None;
3884 }
3885 
3886 std::pair<Register, int>
3887 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root,
3888                                                 uint64_t FlatVariant) const {
3889   MachineInstr *MI = Root.getParent();
3890 
3891   auto Default = std::make_pair(Root.getReg(), 0);
3892 
3893   if (!STI.hasFlatInstOffsets())
3894     return Default;
3895 
3896   Register PtrBase;
3897   int64_t ConstOffset;
3898   std::tie(PtrBase, ConstOffset) =
3899       getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3900   if (ConstOffset == 0)
3901     return Default;
3902 
3903   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3904   if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, FlatVariant))
3905     return Default;
3906 
3907   return std::make_pair(PtrBase, ConstOffset);
3908 }
3909 
3910 InstructionSelector::ComplexRendererFns
3911 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3912   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FLAT);
3913 
3914   return {{
3915       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3916       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3917     }};
3918 }
3919 
3920 InstructionSelector::ComplexRendererFns
3921 AMDGPUInstructionSelector::selectGlobalOffset(MachineOperand &Root) const {
3922   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatGlobal);
3923 
3924   return {{
3925       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3926       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3927   }};
3928 }
3929 
3930 InstructionSelector::ComplexRendererFns
3931 AMDGPUInstructionSelector::selectScratchOffset(MachineOperand &Root) const {
3932   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatScratch);
3933 
3934   return {{
3935       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3936       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3937     }};
3938 }
3939 
3940 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
3941 InstructionSelector::ComplexRendererFns
3942 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
3943   Register Addr = Root.getReg();
3944   Register PtrBase;
3945   int64_t ConstOffset;
3946   int64_t ImmOffset = 0;
3947 
3948   // Match the immediate offset first, which canonically is moved as low as
3949   // possible.
3950   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3951 
3952   if (ConstOffset != 0) {
3953     if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS,
3954                               SIInstrFlags::FlatGlobal)) {
3955       Addr = PtrBase;
3956       ImmOffset = ConstOffset;
3957     } else {
3958       auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI);
3959       if (isSGPR(PtrBaseDef->Reg)) {
3960         if (ConstOffset > 0) {
3961           // Offset is too large.
3962           //
3963           // saddr + large_offset -> saddr +
3964           //                         (voffset = large_offset & ~MaxOffset) +
3965           //                         (large_offset & MaxOffset);
3966           int64_t SplitImmOffset, RemainderOffset;
3967           std::tie(SplitImmOffset, RemainderOffset) = TII.splitFlatOffset(
3968               ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal);
3969 
3970           if (isUInt<32>(RemainderOffset)) {
3971             MachineInstr *MI = Root.getParent();
3972             MachineBasicBlock *MBB = MI->getParent();
3973             Register HighBits =
3974                 MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3975 
3976             BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3977                     HighBits)
3978                 .addImm(RemainderOffset);
3979 
3980             return {{
3981                 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); }, // saddr
3982                 [=](MachineInstrBuilder &MIB) {
3983                   MIB.addReg(HighBits);
3984                 }, // voffset
3985                 [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); },
3986             }};
3987           }
3988         }
3989 
3990         // We are adding a 64 bit SGPR and a constant. If constant bus limit
3991         // is 1 we would need to perform 1 or 2 extra moves for each half of
3992         // the constant and it is better to do a scalar add and then issue a
3993         // single VALU instruction to materialize zero. Otherwise it is less
3994         // instructions to perform VALU adds with immediates or inline literals.
3995         unsigned NumLiterals =
3996             !TII.isInlineConstant(APInt(32, ConstOffset & 0xffffffff)) +
3997             !TII.isInlineConstant(APInt(32, ConstOffset >> 32));
3998         if (STI.getConstantBusLimit(AMDGPU::V_ADD_U32_e64) > NumLiterals)
3999           return None;
4000       }
4001     }
4002   }
4003 
4004   // Match the variable offset.
4005   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4006   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
4007     // Look through the SGPR->VGPR copy.
4008     Register SAddr =
4009         getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
4010 
4011     if (SAddr && isSGPR(SAddr)) {
4012       Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
4013 
4014       // It's possible voffset is an SGPR here, but the copy to VGPR will be
4015       // inserted later.
4016       if (Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
4017         return {{[=](MachineInstrBuilder &MIB) { // saddr
4018                    MIB.addReg(SAddr);
4019                  },
4020                  [=](MachineInstrBuilder &MIB) { // voffset
4021                    MIB.addReg(VOffset);
4022                  },
4023                  [=](MachineInstrBuilder &MIB) { // offset
4024                    MIB.addImm(ImmOffset);
4025                  }}};
4026       }
4027     }
4028   }
4029 
4030   // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and
4031   // drop this.
4032   if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF ||
4033       AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT || !isSGPR(AddrDef->Reg))
4034     return None;
4035 
4036   // It's cheaper to materialize a single 32-bit zero for vaddr than the two
4037   // moves required to copy a 64-bit SGPR to VGPR.
4038   MachineInstr *MI = Root.getParent();
4039   MachineBasicBlock *MBB = MI->getParent();
4040   Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4041 
4042   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
4043       .addImm(0);
4044 
4045   return {{
4046       [=](MachineInstrBuilder &MIB) { MIB.addReg(AddrDef->Reg); }, // saddr
4047       [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); },      // voffset
4048       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); }     // offset
4049   }};
4050 }
4051 
4052 InstructionSelector::ComplexRendererFns
4053 AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const {
4054   Register Addr = Root.getReg();
4055   Register PtrBase;
4056   int64_t ConstOffset;
4057   int64_t ImmOffset = 0;
4058 
4059   // Match the immediate offset first, which canonically is moved as low as
4060   // possible.
4061   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
4062 
4063   if (ConstOffset != 0 &&
4064       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS,
4065                             SIInstrFlags::FlatScratch)) {
4066     Addr = PtrBase;
4067     ImmOffset = ConstOffset;
4068   }
4069 
4070   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4071   if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4072     int FI = AddrDef->MI->getOperand(1).getIndex();
4073     return {{
4074         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
4075         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4076     }};
4077   }
4078 
4079   Register SAddr = AddrDef->Reg;
4080 
4081   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
4082     Register LHS = AddrDef->MI->getOperand(1).getReg();
4083     Register RHS = AddrDef->MI->getOperand(2).getReg();
4084     auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
4085     auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI);
4086 
4087     if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX &&
4088         isSGPR(RHSDef->Reg)) {
4089       int FI = LHSDef->MI->getOperand(1).getIndex();
4090       MachineInstr &I = *Root.getParent();
4091       MachineBasicBlock *BB = I.getParent();
4092       const DebugLoc &DL = I.getDebugLoc();
4093       SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4094 
4095       BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_I32), SAddr)
4096           .addFrameIndex(FI)
4097           .addReg(RHSDef->Reg);
4098     }
4099   }
4100 
4101   if (!isSGPR(SAddr))
4102     return None;
4103 
4104   return {{
4105       [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr
4106       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4107   }};
4108 }
4109 
4110 // Check whether the flat scratch SVS swizzle bug affects this access.
4111 bool AMDGPUInstructionSelector::checkFlatScratchSVSSwizzleBug(
4112     Register VAddr, Register SAddr, uint64_t ImmOffset) const {
4113   if (!Subtarget->hasFlatScratchSVSSwizzleBug())
4114     return false;
4115 
4116   // The bug affects the swizzling of SVS accesses if there is any carry out
4117   // from the two low order bits (i.e. from bit 1 into bit 2) when adding
4118   // voffset to (soffset + inst_offset).
4119   auto VKnown = KnownBits->getKnownBits(VAddr);
4120   auto SKnown = KnownBits::computeForAddSub(
4121       true, false, KnownBits->getKnownBits(SAddr),
4122       KnownBits::makeConstant(APInt(32, ImmOffset)));
4123   uint64_t VMax = VKnown.getMaxValue().getZExtValue();
4124   uint64_t SMax = SKnown.getMaxValue().getZExtValue();
4125   return (VMax & 3) + (SMax & 3) >= 4;
4126 }
4127 
4128 InstructionSelector::ComplexRendererFns
4129 AMDGPUInstructionSelector::selectScratchSVAddr(MachineOperand &Root) const {
4130   Register Addr = Root.getReg();
4131   Register PtrBase;
4132   int64_t ConstOffset;
4133   int64_t ImmOffset = 0;
4134 
4135   // Match the immediate offset first, which canonically is moved as low as
4136   // possible.
4137   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
4138 
4139   if (ConstOffset != 0 &&
4140       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS, true)) {
4141     Addr = PtrBase;
4142     ImmOffset = ConstOffset;
4143   }
4144 
4145   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4146   if (AddrDef->MI->getOpcode() != AMDGPU::G_PTR_ADD)
4147     return None;
4148 
4149   Register RHS = AddrDef->MI->getOperand(2).getReg();
4150   if (RBI.getRegBank(RHS, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID)
4151     return None;
4152 
4153   Register LHS = AddrDef->MI->getOperand(1).getReg();
4154   auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
4155 
4156   if (checkFlatScratchSVSSwizzleBug(RHS, LHS, ImmOffset))
4157     return None;
4158 
4159   if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4160     int FI = LHSDef->MI->getOperand(1).getIndex();
4161     return {{
4162         [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
4163         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
4164         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4165     }};
4166   }
4167 
4168   if (!isSGPR(LHS))
4169     return None;
4170 
4171   return {{
4172       [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
4173       [=](MachineInstrBuilder &MIB) { MIB.addReg(LHS); }, // saddr
4174       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4175   }};
4176 }
4177 
4178 InstructionSelector::ComplexRendererFns
4179 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
4180   MachineInstr *MI = Root.getParent();
4181   MachineBasicBlock *MBB = MI->getParent();
4182   MachineFunction *MF = MBB->getParent();
4183   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
4184 
4185   int64_t Offset = 0;
4186   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
4187       Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
4188     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4189 
4190     // TODO: Should this be inside the render function? The iterator seems to
4191     // move.
4192     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
4193             HighBits)
4194       .addImm(Offset & ~4095);
4195 
4196     return {{[=](MachineInstrBuilder &MIB) { // rsrc
4197                MIB.addReg(Info->getScratchRSrcReg());
4198              },
4199              [=](MachineInstrBuilder &MIB) { // vaddr
4200                MIB.addReg(HighBits);
4201              },
4202              [=](MachineInstrBuilder &MIB) { // soffset
4203                // Use constant zero for soffset and rely on eliminateFrameIndex
4204                // to choose the appropriate frame register if need be.
4205                MIB.addImm(0);
4206              },
4207              [=](MachineInstrBuilder &MIB) { // offset
4208                MIB.addImm(Offset & 4095);
4209              }}};
4210   }
4211 
4212   assert(Offset == 0 || Offset == -1);
4213 
4214   // Try to fold a frame index directly into the MUBUF vaddr field, and any
4215   // offsets.
4216   Optional<int> FI;
4217   Register VAddr = Root.getReg();
4218   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
4219     Register PtrBase;
4220     int64_t ConstOffset;
4221     std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(VAddr, *MRI);
4222     if (ConstOffset != 0) {
4223       if (SIInstrInfo::isLegalMUBUFImmOffset(ConstOffset) &&
4224           (!STI.privateMemoryResourceIsRangeChecked() ||
4225            KnownBits->signBitIsZero(PtrBase))) {
4226         const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase);
4227         if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
4228           FI = PtrBaseDef->getOperand(1).getIndex();
4229         else
4230           VAddr = PtrBase;
4231         Offset = ConstOffset;
4232       }
4233     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4234       FI = RootDef->getOperand(1).getIndex();
4235     }
4236   }
4237 
4238   return {{[=](MachineInstrBuilder &MIB) { // rsrc
4239              MIB.addReg(Info->getScratchRSrcReg());
4240            },
4241            [=](MachineInstrBuilder &MIB) { // vaddr
4242              if (FI)
4243                MIB.addFrameIndex(FI.getValue());
4244              else
4245                MIB.addReg(VAddr);
4246            },
4247            [=](MachineInstrBuilder &MIB) { // soffset
4248              // Use constant zero for soffset and rely on eliminateFrameIndex
4249              // to choose the appropriate frame register if need be.
4250              MIB.addImm(0);
4251            },
4252            [=](MachineInstrBuilder &MIB) { // offset
4253              MIB.addImm(Offset);
4254            }}};
4255 }
4256 
4257 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
4258                                                 int64_t Offset) const {
4259   if (!isUInt<16>(Offset))
4260     return false;
4261 
4262   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
4263     return true;
4264 
4265   // On Southern Islands instruction with a negative base value and an offset
4266   // don't seem to work.
4267   return KnownBits->signBitIsZero(Base);
4268 }
4269 
4270 bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
4271                                                  int64_t Offset1,
4272                                                  unsigned Size) const {
4273   if (Offset0 % Size != 0 || Offset1 % Size != 0)
4274     return false;
4275   if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
4276     return false;
4277 
4278   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
4279     return true;
4280 
4281   // On Southern Islands instruction with a negative base value and an offset
4282   // don't seem to work.
4283   return KnownBits->signBitIsZero(Base);
4284 }
4285 
4286 bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI,
4287                                                     unsigned ShAmtBits) const {
4288   assert(MI.getOpcode() == TargetOpcode::G_AND);
4289 
4290   Optional<APInt> RHS = getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI);
4291   if (!RHS)
4292     return false;
4293 
4294   if (RHS->countTrailingOnes() >= ShAmtBits)
4295     return true;
4296 
4297   const APInt &LHSKnownZeros =
4298       KnownBits->getKnownZeroes(MI.getOperand(1).getReg());
4299   return (LHSKnownZeros | *RHS).countTrailingOnes() >= ShAmtBits;
4300 }
4301 
4302 // Return the wave level SGPR base address if this is a wave address.
4303 static Register getWaveAddress(const MachineInstr *Def) {
4304   return Def->getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS
4305              ? Def->getOperand(1).getReg()
4306              : Register();
4307 }
4308 
4309 InstructionSelector::ComplexRendererFns
4310 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
4311     MachineOperand &Root) const {
4312   Register Reg = Root.getReg();
4313   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
4314 
4315   const MachineInstr *Def = MRI->getVRegDef(Reg);
4316   if (Register WaveBase = getWaveAddress(Def)) {
4317     return {{
4318         [=](MachineInstrBuilder &MIB) { // rsrc
4319           MIB.addReg(Info->getScratchRSrcReg());
4320         },
4321         [=](MachineInstrBuilder &MIB) { // soffset
4322           MIB.addReg(WaveBase);
4323         },
4324         [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // offset
4325     }};
4326   }
4327 
4328   int64_t Offset = 0;
4329 
4330   // FIXME: Copy check is a hack
4331   Register BasePtr;
4332   if (mi_match(Reg, *MRI, m_GPtrAdd(m_Reg(BasePtr), m_Copy(m_ICst(Offset))))) {
4333     if (!SIInstrInfo::isLegalMUBUFImmOffset(Offset))
4334       return {};
4335     const MachineInstr *BasePtrDef = MRI->getVRegDef(BasePtr);
4336     Register WaveBase = getWaveAddress(BasePtrDef);
4337     if (!WaveBase)
4338       return {};
4339 
4340     return {{
4341         [=](MachineInstrBuilder &MIB) { // rsrc
4342           MIB.addReg(Info->getScratchRSrcReg());
4343         },
4344         [=](MachineInstrBuilder &MIB) { // soffset
4345           MIB.addReg(WaveBase);
4346         },
4347         [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4348     }};
4349   }
4350 
4351   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
4352       !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
4353     return {};
4354 
4355   return {{
4356       [=](MachineInstrBuilder &MIB) { // rsrc
4357         MIB.addReg(Info->getScratchRSrcReg());
4358       },
4359       [=](MachineInstrBuilder &MIB) { // soffset
4360         MIB.addImm(0);
4361       },
4362       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4363   }};
4364 }
4365 
4366 std::pair<Register, unsigned>
4367 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
4368   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4369   if (!RootDef)
4370     return std::make_pair(Root.getReg(), 0);
4371 
4372   int64_t ConstAddr = 0;
4373 
4374   Register PtrBase;
4375   int64_t Offset;
4376   std::tie(PtrBase, Offset) =
4377     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4378 
4379   if (Offset) {
4380     if (isDSOffsetLegal(PtrBase, Offset)) {
4381       // (add n0, c0)
4382       return std::make_pair(PtrBase, Offset);
4383     }
4384   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4385     // TODO
4386 
4387 
4388   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4389     // TODO
4390 
4391   }
4392 
4393   return std::make_pair(Root.getReg(), 0);
4394 }
4395 
4396 InstructionSelector::ComplexRendererFns
4397 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
4398   Register Reg;
4399   unsigned Offset;
4400   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
4401   return {{
4402       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4403       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
4404     }};
4405 }
4406 
4407 InstructionSelector::ComplexRendererFns
4408 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
4409   return selectDSReadWrite2(Root, 4);
4410 }
4411 
4412 InstructionSelector::ComplexRendererFns
4413 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {
4414   return selectDSReadWrite2(Root, 8);
4415 }
4416 
4417 InstructionSelector::ComplexRendererFns
4418 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
4419                                               unsigned Size) const {
4420   Register Reg;
4421   unsigned Offset;
4422   std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size);
4423   return {{
4424       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4425       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
4426       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
4427     }};
4428 }
4429 
4430 std::pair<Register, unsigned>
4431 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
4432                                                   unsigned Size) const {
4433   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4434   if (!RootDef)
4435     return std::make_pair(Root.getReg(), 0);
4436 
4437   int64_t ConstAddr = 0;
4438 
4439   Register PtrBase;
4440   int64_t Offset;
4441   std::tie(PtrBase, Offset) =
4442     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4443 
4444   if (Offset) {
4445     int64_t OffsetValue0 = Offset;
4446     int64_t OffsetValue1 = Offset + Size;
4447     if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) {
4448       // (add n0, c0)
4449       return std::make_pair(PtrBase, OffsetValue0 / Size);
4450     }
4451   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4452     // TODO
4453 
4454   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4455     // TODO
4456 
4457   }
4458 
4459   return std::make_pair(Root.getReg(), 0);
4460 }
4461 
4462 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
4463 /// the base value with the constant offset. There may be intervening copies
4464 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
4465 /// not match the pattern.
4466 std::pair<Register, int64_t>
4467 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
4468   Register Root, const MachineRegisterInfo &MRI) const {
4469   MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
4470   if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
4471     return {Root, 0};
4472 
4473   MachineOperand &RHS = RootI->getOperand(2);
4474   Optional<ValueAndVReg> MaybeOffset =
4475       getIConstantVRegValWithLookThrough(RHS.getReg(), MRI);
4476   if (!MaybeOffset)
4477     return {Root, 0};
4478   return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()};
4479 }
4480 
4481 static void addZeroImm(MachineInstrBuilder &MIB) {
4482   MIB.addImm(0);
4483 }
4484 
4485 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
4486 /// BasePtr is not valid, a null base pointer will be used.
4487 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4488                           uint32_t FormatLo, uint32_t FormatHi,
4489                           Register BasePtr) {
4490   Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4491   Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4492   Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4493   Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
4494 
4495   B.buildInstr(AMDGPU::S_MOV_B32)
4496     .addDef(RSrc2)
4497     .addImm(FormatLo);
4498   B.buildInstr(AMDGPU::S_MOV_B32)
4499     .addDef(RSrc3)
4500     .addImm(FormatHi);
4501 
4502   // Build the half of the subregister with the constants before building the
4503   // full 128-bit register. If we are building multiple resource descriptors,
4504   // this will allow CSEing of the 2-component register.
4505   B.buildInstr(AMDGPU::REG_SEQUENCE)
4506     .addDef(RSrcHi)
4507     .addReg(RSrc2)
4508     .addImm(AMDGPU::sub0)
4509     .addReg(RSrc3)
4510     .addImm(AMDGPU::sub1);
4511 
4512   Register RSrcLo = BasePtr;
4513   if (!BasePtr) {
4514     RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4515     B.buildInstr(AMDGPU::S_MOV_B64)
4516       .addDef(RSrcLo)
4517       .addImm(0);
4518   }
4519 
4520   B.buildInstr(AMDGPU::REG_SEQUENCE)
4521     .addDef(RSrc)
4522     .addReg(RSrcLo)
4523     .addImm(AMDGPU::sub0_sub1)
4524     .addReg(RSrcHi)
4525     .addImm(AMDGPU::sub2_sub3);
4526 
4527   return RSrc;
4528 }
4529 
4530 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4531                                 const SIInstrInfo &TII, Register BasePtr) {
4532   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4533 
4534   // FIXME: Why are half the "default" bits ignored based on the addressing
4535   // mode?
4536   return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
4537 }
4538 
4539 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4540                                const SIInstrInfo &TII, Register BasePtr) {
4541   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4542 
4543   // FIXME: Why are half the "default" bits ignored based on the addressing
4544   // mode?
4545   return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
4546 }
4547 
4548 AMDGPUInstructionSelector::MUBUFAddressData
4549 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
4550   MUBUFAddressData Data;
4551   Data.N0 = Src;
4552 
4553   Register PtrBase;
4554   int64_t Offset;
4555 
4556   std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
4557   if (isUInt<32>(Offset)) {
4558     Data.N0 = PtrBase;
4559     Data.Offset = Offset;
4560   }
4561 
4562   if (MachineInstr *InputAdd
4563       = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
4564     Data.N2 = InputAdd->getOperand(1).getReg();
4565     Data.N3 = InputAdd->getOperand(2).getReg();
4566 
4567     // FIXME: Need to fix extra SGPR->VGPRcopies inserted
4568     // FIXME: Don't know this was defined by operand 0
4569     //
4570     // TODO: Remove this when we have copy folding optimizations after
4571     // RegBankSelect.
4572     Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
4573     Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
4574   }
4575 
4576   return Data;
4577 }
4578 
4579 /// Return if the addr64 mubuf mode should be used for the given address.
4580 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
4581   // (ptr_add N2, N3) -> addr64, or
4582   // (ptr_add (ptr_add N2, N3), C1) -> addr64
4583   if (Addr.N2)
4584     return true;
4585 
4586   const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
4587   return N0Bank->getID() == AMDGPU::VGPRRegBankID;
4588 }
4589 
4590 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
4591 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
4592 /// component.
4593 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
4594   MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
4595   if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
4596     return;
4597 
4598   // Illegal offset, store it in soffset.
4599   SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4600   B.buildInstr(AMDGPU::S_MOV_B32)
4601     .addDef(SOffset)
4602     .addImm(ImmOffset);
4603   ImmOffset = 0;
4604 }
4605 
4606 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
4607   MachineOperand &Root, Register &VAddr, Register &RSrcReg,
4608   Register &SOffset, int64_t &Offset) const {
4609   // FIXME: Predicates should stop this from reaching here.
4610   // addr64 bit was removed for volcanic islands.
4611   if (!STI.hasAddr64() || STI.useFlatForGlobal())
4612     return false;
4613 
4614   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4615   if (!shouldUseAddr64(AddrData))
4616     return false;
4617 
4618   Register N0 = AddrData.N0;
4619   Register N2 = AddrData.N2;
4620   Register N3 = AddrData.N3;
4621   Offset = AddrData.Offset;
4622 
4623   // Base pointer for the SRD.
4624   Register SRDPtr;
4625 
4626   if (N2) {
4627     if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4628       assert(N3);
4629       if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4630         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
4631         // addr64, and construct the default resource from a 0 address.
4632         VAddr = N0;
4633       } else {
4634         SRDPtr = N3;
4635         VAddr = N2;
4636       }
4637     } else {
4638       // N2 is not divergent.
4639       SRDPtr = N2;
4640       VAddr = N3;
4641     }
4642   } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4643     // Use the default null pointer in the resource
4644     VAddr = N0;
4645   } else {
4646     // N0 -> offset, or
4647     // (N0 + C1) -> offset
4648     SRDPtr = N0;
4649   }
4650 
4651   MachineIRBuilder B(*Root.getParent());
4652   RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
4653   splitIllegalMUBUFOffset(B, SOffset, Offset);
4654   return true;
4655 }
4656 
4657 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
4658   MachineOperand &Root, Register &RSrcReg, Register &SOffset,
4659   int64_t &Offset) const {
4660 
4661   // FIXME: Pattern should not reach here.
4662   if (STI.useFlatForGlobal())
4663     return false;
4664 
4665   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4666   if (shouldUseAddr64(AddrData))
4667     return false;
4668 
4669   // N0 -> offset, or
4670   // (N0 + C1) -> offset
4671   Register SRDPtr = AddrData.N0;
4672   Offset = AddrData.Offset;
4673 
4674   // TODO: Look through extensions for 32-bit soffset.
4675   MachineIRBuilder B(*Root.getParent());
4676 
4677   RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
4678   splitIllegalMUBUFOffset(B, SOffset, Offset);
4679   return true;
4680 }
4681 
4682 InstructionSelector::ComplexRendererFns
4683 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
4684   Register VAddr;
4685   Register RSrcReg;
4686   Register SOffset;
4687   int64_t Offset = 0;
4688 
4689   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4690     return {};
4691 
4692   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4693   // pattern.
4694   return {{
4695       [=](MachineInstrBuilder &MIB) {  // rsrc
4696         MIB.addReg(RSrcReg);
4697       },
4698       [=](MachineInstrBuilder &MIB) { // vaddr
4699         MIB.addReg(VAddr);
4700       },
4701       [=](MachineInstrBuilder &MIB) { // soffset
4702         if (SOffset)
4703           MIB.addReg(SOffset);
4704         else
4705           MIB.addImm(0);
4706       },
4707       [=](MachineInstrBuilder &MIB) { // offset
4708         MIB.addImm(Offset);
4709       },
4710       addZeroImm, //  cpol
4711       addZeroImm, //  tfe
4712       addZeroImm  //  swz
4713     }};
4714 }
4715 
4716 InstructionSelector::ComplexRendererFns
4717 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
4718   Register RSrcReg;
4719   Register SOffset;
4720   int64_t Offset = 0;
4721 
4722   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4723     return {};
4724 
4725   return {{
4726       [=](MachineInstrBuilder &MIB) {  // rsrc
4727         MIB.addReg(RSrcReg);
4728       },
4729       [=](MachineInstrBuilder &MIB) { // soffset
4730         if (SOffset)
4731           MIB.addReg(SOffset);
4732         else
4733           MIB.addImm(0);
4734       },
4735       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4736       addZeroImm, //  cpol
4737       addZeroImm, //  tfe
4738       addZeroImm, //  swz
4739     }};
4740 }
4741 
4742 InstructionSelector::ComplexRendererFns
4743 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
4744   Register VAddr;
4745   Register RSrcReg;
4746   Register SOffset;
4747   int64_t Offset = 0;
4748 
4749   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4750     return {};
4751 
4752   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4753   // pattern.
4754   return {{
4755       [=](MachineInstrBuilder &MIB) {  // rsrc
4756         MIB.addReg(RSrcReg);
4757       },
4758       [=](MachineInstrBuilder &MIB) { // vaddr
4759         MIB.addReg(VAddr);
4760       },
4761       [=](MachineInstrBuilder &MIB) { // soffset
4762         if (SOffset)
4763           MIB.addReg(SOffset);
4764         else
4765           MIB.addImm(0);
4766       },
4767       [=](MachineInstrBuilder &MIB) { // offset
4768         MIB.addImm(Offset);
4769       },
4770       [=](MachineInstrBuilder &MIB) {
4771         MIB.addImm(AMDGPU::CPol::GLC); // cpol
4772       }
4773     }};
4774 }
4775 
4776 InstructionSelector::ComplexRendererFns
4777 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
4778   Register RSrcReg;
4779   Register SOffset;
4780   int64_t Offset = 0;
4781 
4782   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4783     return {};
4784 
4785   return {{
4786       [=](MachineInstrBuilder &MIB) {  // rsrc
4787         MIB.addReg(RSrcReg);
4788       },
4789       [=](MachineInstrBuilder &MIB) { // soffset
4790         if (SOffset)
4791           MIB.addReg(SOffset);
4792         else
4793           MIB.addImm(0);
4794       },
4795       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4796       [=](MachineInstrBuilder &MIB) { MIB.addImm(AMDGPU::CPol::GLC); } // cpol
4797     }};
4798 }
4799 
4800 /// Get an immediate that must be 32-bits, and treated as zero extended.
4801 static Optional<uint64_t> getConstantZext32Val(Register Reg,
4802                                                const MachineRegisterInfo &MRI) {
4803   // getIConstantVRegVal sexts any values, so see if that matters.
4804   Optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI);
4805   if (!OffsetVal || !isInt<32>(*OffsetVal))
4806     return None;
4807   return Lo_32(*OffsetVal);
4808 }
4809 
4810 InstructionSelector::ComplexRendererFns
4811 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
4812   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4813   if (!OffsetVal)
4814     return {};
4815 
4816   Optional<int64_t> EncodedImm =
4817       AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
4818   if (!EncodedImm)
4819     return {};
4820 
4821   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4822 }
4823 
4824 InstructionSelector::ComplexRendererFns
4825 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
4826   assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
4827 
4828   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4829   if (!OffsetVal)
4830     return {};
4831 
4832   Optional<int64_t> EncodedImm
4833     = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
4834   if (!EncodedImm)
4835     return {};
4836 
4837   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4838 }
4839 
4840 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
4841                                                  const MachineInstr &MI,
4842                                                  int OpIdx) const {
4843   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4844          "Expected G_CONSTANT");
4845   MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
4846 }
4847 
4848 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
4849                                                 const MachineInstr &MI,
4850                                                 int OpIdx) const {
4851   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4852          "Expected G_CONSTANT");
4853   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
4854 }
4855 
4856 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
4857                                                  const MachineInstr &MI,
4858                                                  int OpIdx) const {
4859   assert(OpIdx == -1);
4860 
4861   const MachineOperand &Op = MI.getOperand(1);
4862   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
4863     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
4864   else {
4865     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
4866     MIB.addImm(Op.getCImm()->getSExtValue());
4867   }
4868 }
4869 
4870 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
4871                                                 const MachineInstr &MI,
4872                                                 int OpIdx) const {
4873   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4874          "Expected G_CONSTANT");
4875   MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
4876 }
4877 
4878 /// This only really exists to satisfy DAG type checking machinery, so is a
4879 /// no-op here.
4880 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
4881                                                 const MachineInstr &MI,
4882                                                 int OpIdx) const {
4883   MIB.addImm(MI.getOperand(OpIdx).getImm());
4884 }
4885 
4886 void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB,
4887                                                   const MachineInstr &MI,
4888                                                   int OpIdx) const {
4889   assert(OpIdx >= 0 && "expected to match an immediate operand");
4890   MIB.addImm(MI.getOperand(OpIdx).getImm() & AMDGPU::CPol::ALL);
4891 }
4892 
4893 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
4894                                                  const MachineInstr &MI,
4895                                                  int OpIdx) const {
4896   assert(OpIdx >= 0 && "expected to match an immediate operand");
4897   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
4898 }
4899 
4900 void AMDGPUInstructionSelector::renderSetGLC(MachineInstrBuilder &MIB,
4901                                              const MachineInstr &MI,
4902                                              int OpIdx) const {
4903   assert(OpIdx >= 0 && "expected to match an immediate operand");
4904   MIB.addImm(MI.getOperand(OpIdx).getImm() | AMDGPU::CPol::GLC);
4905 }
4906 
4907 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
4908                                                  const MachineInstr &MI,
4909                                                  int OpIdx) const {
4910   MIB.addFrameIndex((MI.getOperand(1).getIndex()));
4911 }
4912 
4913 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
4914   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
4915 }
4916 
4917 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
4918   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
4919 }
4920 
4921 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
4922   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
4923 }
4924 
4925 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
4926   return TII.isInlineConstant(Imm);
4927 }
4928