1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPU.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPURegisterBankInfo.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "Utils/AMDGPUBaseInfo.h"
22 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
24 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/IR/DiagnosticInfo.h"
28 #include "llvm/IR/IntrinsicsAMDGPU.h"
29 
30 #define DEBUG_TYPE "amdgpu-isel"
31 
32 using namespace llvm;
33 using namespace MIPatternMatch;
34 
35 static cl::opt<bool> AllowRiskySelect(
36   "amdgpu-global-isel-risky-select",
37   cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
38   cl::init(false),
39   cl::ReallyHidden);
40 
41 #define GET_GLOBALISEL_IMPL
42 #define AMDGPUSubtarget GCNSubtarget
43 #include "AMDGPUGenGlobalISel.inc"
44 #undef GET_GLOBALISEL_IMPL
45 #undef AMDGPUSubtarget
46 
47 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
48     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
49     const AMDGPUTargetMachine &TM)
50     : TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
51       STI(STI),
52       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
53 #define GET_GLOBALISEL_PREDICATES_INIT
54 #include "AMDGPUGenGlobalISel.inc"
55 #undef GET_GLOBALISEL_PREDICATES_INIT
56 #define GET_GLOBALISEL_TEMPORARIES_INIT
57 #include "AMDGPUGenGlobalISel.inc"
58 #undef GET_GLOBALISEL_TEMPORARIES_INIT
59 {
60 }
61 
62 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
63 
64 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
65                                         CodeGenCoverage &CoverageInfo,
66                                         ProfileSummaryInfo *PSI,
67                                         BlockFrequencyInfo *BFI) {
68   MRI = &MF.getRegInfo();
69   Subtarget = &MF.getSubtarget<GCNSubtarget>();
70   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
71 }
72 
73 bool AMDGPUInstructionSelector::isVCC(Register Reg,
74                                       const MachineRegisterInfo &MRI) const {
75   // The verifier is oblivious to s1 being a valid value for wavesize registers.
76   if (Reg.isPhysical())
77     return false;
78 
79   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
80   const TargetRegisterClass *RC =
81       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
82   if (RC) {
83     const LLT Ty = MRI.getType(Reg);
84     if (!Ty.isValid() || Ty.getSizeInBits() != 1)
85       return false;
86     // G_TRUNC s1 result is never vcc.
87     return MRI.getVRegDef(Reg)->getOpcode() != AMDGPU::G_TRUNC &&
88            RC->hasSuperClassEq(TRI.getBoolRC());
89   }
90 
91   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
92   return RB->getID() == AMDGPU::VCCRegBankID;
93 }
94 
95 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
96                                                         unsigned NewOpc) const {
97   MI.setDesc(TII.get(NewOpc));
98   MI.removeOperand(1); // Remove intrinsic ID.
99   MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
100 
101   MachineOperand &Dst = MI.getOperand(0);
102   MachineOperand &Src = MI.getOperand(1);
103 
104   // TODO: This should be legalized to s32 if needed
105   if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
106     return false;
107 
108   const TargetRegisterClass *DstRC
109     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
110   const TargetRegisterClass *SrcRC
111     = TRI.getConstrainedRegClassForOperand(Src, *MRI);
112   if (!DstRC || DstRC != SrcRC)
113     return false;
114 
115   return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
116          RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
117 }
118 
119 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
120   const DebugLoc &DL = I.getDebugLoc();
121   MachineBasicBlock *BB = I.getParent();
122   I.setDesc(TII.get(TargetOpcode::COPY));
123 
124   const MachineOperand &Src = I.getOperand(1);
125   MachineOperand &Dst = I.getOperand(0);
126   Register DstReg = Dst.getReg();
127   Register SrcReg = Src.getReg();
128 
129   if (isVCC(DstReg, *MRI)) {
130     if (SrcReg == AMDGPU::SCC) {
131       const TargetRegisterClass *RC
132         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
133       if (!RC)
134         return true;
135       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
136     }
137 
138     if (!isVCC(SrcReg, *MRI)) {
139       // TODO: Should probably leave the copy and let copyPhysReg expand it.
140       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
141         return false;
142 
143       const TargetRegisterClass *SrcRC
144         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
145 
146       Optional<ValueAndVReg> ConstVal =
147           getIConstantVRegValWithLookThrough(SrcReg, *MRI, true);
148       if (ConstVal) {
149         unsigned MovOpc =
150             STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
151         BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg)
152             .addImm(ConstVal->Value.getBoolValue() ? -1 : 0);
153       } else {
154         Register MaskedReg = MRI->createVirtualRegister(SrcRC);
155 
156         // We can't trust the high bits at this point, so clear them.
157 
158         // TODO: Skip masking high bits if def is known boolean.
159 
160         unsigned AndOpc =
161             TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
162         BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
163             .addImm(1)
164             .addReg(SrcReg);
165         BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
166             .addImm(0)
167             .addReg(MaskedReg);
168       }
169 
170       if (!MRI->getRegClassOrNull(SrcReg))
171         MRI->setRegClass(SrcReg, SrcRC);
172       I.eraseFromParent();
173       return true;
174     }
175 
176     const TargetRegisterClass *RC =
177       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
178     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
179       return false;
180 
181     return true;
182   }
183 
184   for (const MachineOperand &MO : I.operands()) {
185     if (MO.getReg().isPhysical())
186       continue;
187 
188     const TargetRegisterClass *RC =
189             TRI.getConstrainedRegClassForOperand(MO, *MRI);
190     if (!RC)
191       continue;
192     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
193   }
194   return true;
195 }
196 
197 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
198   const Register DefReg = I.getOperand(0).getReg();
199   const LLT DefTy = MRI->getType(DefReg);
200   if (DefTy == LLT::scalar(1)) {
201     if (!AllowRiskySelect) {
202       LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
203       return false;
204     }
205 
206     LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
207   }
208 
209   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
210 
211   const RegClassOrRegBank &RegClassOrBank =
212     MRI->getRegClassOrRegBank(DefReg);
213 
214   const TargetRegisterClass *DefRC
215     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
216   if (!DefRC) {
217     if (!DefTy.isValid()) {
218       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
219       return false;
220     }
221 
222     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
223     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB);
224     if (!DefRC) {
225       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
226       return false;
227     }
228   }
229 
230   // TODO: Verify that all registers have the same bank
231   I.setDesc(TII.get(TargetOpcode::PHI));
232   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
233 }
234 
235 MachineOperand
236 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
237                                            const TargetRegisterClass &SubRC,
238                                            unsigned SubIdx) const {
239 
240   MachineInstr *MI = MO.getParent();
241   MachineBasicBlock *BB = MO.getParent()->getParent();
242   Register DstReg = MRI->createVirtualRegister(&SubRC);
243 
244   if (MO.isReg()) {
245     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
246     Register Reg = MO.getReg();
247     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
248             .addReg(Reg, 0, ComposedSubIdx);
249 
250     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
251                                      MO.isKill(), MO.isDead(), MO.isUndef(),
252                                      MO.isEarlyClobber(), 0, MO.isDebug(),
253                                      MO.isInternalRead());
254   }
255 
256   assert(MO.isImm());
257 
258   APInt Imm(64, MO.getImm());
259 
260   switch (SubIdx) {
261   default:
262     llvm_unreachable("do not know to split immediate with this sub index.");
263   case AMDGPU::sub0:
264     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
265   case AMDGPU::sub1:
266     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
267   }
268 }
269 
270 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
271   switch (Opc) {
272   case AMDGPU::G_AND:
273     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
274   case AMDGPU::G_OR:
275     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
276   case AMDGPU::G_XOR:
277     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
278   default:
279     llvm_unreachable("not a bit op");
280   }
281 }
282 
283 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
284   Register DstReg = I.getOperand(0).getReg();
285   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
286 
287   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
288   if (DstRB->getID() != AMDGPU::SGPRRegBankID &&
289       DstRB->getID() != AMDGPU::VCCRegBankID)
290     return false;
291 
292   bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID &&
293                             STI.isWave64());
294   I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64)));
295 
296   // Dead implicit-def of scc
297   I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
298                                          true, // isImp
299                                          false, // isKill
300                                          true)); // isDead
301   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
302 }
303 
304 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
305   MachineBasicBlock *BB = I.getParent();
306   MachineFunction *MF = BB->getParent();
307   Register DstReg = I.getOperand(0).getReg();
308   const DebugLoc &DL = I.getDebugLoc();
309   LLT Ty = MRI->getType(DstReg);
310   if (Ty.isVector())
311     return false;
312 
313   unsigned Size = Ty.getSizeInBits();
314   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
315   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
316   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
317 
318   if (Size == 32) {
319     if (IsSALU) {
320       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
321       MachineInstr *Add =
322         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
323         .add(I.getOperand(1))
324         .add(I.getOperand(2));
325       I.eraseFromParent();
326       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
327     }
328 
329     if (STI.hasAddNoCarry()) {
330       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
331       I.setDesc(TII.get(Opc));
332       I.addOperand(*MF, MachineOperand::CreateImm(0));
333       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
334       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
335     }
336 
337     const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64;
338 
339     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
340     MachineInstr *Add
341       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
342       .addDef(UnusedCarry, RegState::Dead)
343       .add(I.getOperand(1))
344       .add(I.getOperand(2))
345       .addImm(0);
346     I.eraseFromParent();
347     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
348   }
349 
350   assert(!Sub && "illegal sub should not reach here");
351 
352   const TargetRegisterClass &RC
353     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
354   const TargetRegisterClass &HalfRC
355     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
356 
357   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
358   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
359   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
360   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
361 
362   Register DstLo = MRI->createVirtualRegister(&HalfRC);
363   Register DstHi = MRI->createVirtualRegister(&HalfRC);
364 
365   if (IsSALU) {
366     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
367       .add(Lo1)
368       .add(Lo2);
369     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
370       .add(Hi1)
371       .add(Hi2);
372   } else {
373     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
374     Register CarryReg = MRI->createVirtualRegister(CarryRC);
375     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo)
376       .addDef(CarryReg)
377       .add(Lo1)
378       .add(Lo2)
379       .addImm(0);
380     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
381       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
382       .add(Hi1)
383       .add(Hi2)
384       .addReg(CarryReg, RegState::Kill)
385       .addImm(0);
386 
387     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
388       return false;
389   }
390 
391   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
392     .addReg(DstLo)
393     .addImm(AMDGPU::sub0)
394     .addReg(DstHi)
395     .addImm(AMDGPU::sub1);
396 
397 
398   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
399     return false;
400 
401   I.eraseFromParent();
402   return true;
403 }
404 
405 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
406   MachineInstr &I) const {
407   MachineBasicBlock *BB = I.getParent();
408   MachineFunction *MF = BB->getParent();
409   const DebugLoc &DL = I.getDebugLoc();
410   Register Dst0Reg = I.getOperand(0).getReg();
411   Register Dst1Reg = I.getOperand(1).getReg();
412   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
413                      I.getOpcode() == AMDGPU::G_UADDE;
414   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
415                           I.getOpcode() == AMDGPU::G_USUBE;
416 
417   if (isVCC(Dst1Reg, *MRI)) {
418     unsigned NoCarryOpc =
419         IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
420     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
421     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
422     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
423     I.addOperand(*MF, MachineOperand::CreateImm(0));
424     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
425   }
426 
427   Register Src0Reg = I.getOperand(2).getReg();
428   Register Src1Reg = I.getOperand(3).getReg();
429 
430   if (HasCarryIn) {
431     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
432       .addReg(I.getOperand(4).getReg());
433   }
434 
435   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
436   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
437 
438   BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
439     .add(I.getOperand(2))
440     .add(I.getOperand(3));
441   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
442     .addReg(AMDGPU::SCC);
443 
444   if (!MRI->getRegClassOrNull(Dst1Reg))
445     MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
446 
447   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
448       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
449       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
450     return false;
451 
452   if (HasCarryIn &&
453       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
454                                     AMDGPU::SReg_32RegClass, *MRI))
455     return false;
456 
457   I.eraseFromParent();
458   return true;
459 }
460 
461 bool AMDGPUInstructionSelector::selectG_AMDGPU_MAD_64_32(
462     MachineInstr &I) const {
463   MachineBasicBlock *BB = I.getParent();
464   MachineFunction *MF = BB->getParent();
465   const bool IsUnsigned = I.getOpcode() == AMDGPU::G_AMDGPU_MAD_U64_U32;
466 
467   unsigned Opc;
468   if (Subtarget->getGeneration() == AMDGPUSubtarget::GFX11)
469     Opc = IsUnsigned ? AMDGPU::V_MAD_U64_U32_gfx11_e64
470                      : AMDGPU::V_MAD_I64_I32_gfx11_e64;
471   else
472     Opc = IsUnsigned ? AMDGPU::V_MAD_U64_U32_e64 : AMDGPU::V_MAD_I64_I32_e64;
473   I.setDesc(TII.get(Opc));
474   I.addOperand(*MF, MachineOperand::CreateImm(0));
475   I.addImplicitDefUseOperands(*MF);
476   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
477 }
478 
479 // TODO: We should probably legalize these to only using 32-bit results.
480 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
481   MachineBasicBlock *BB = I.getParent();
482   Register DstReg = I.getOperand(0).getReg();
483   Register SrcReg = I.getOperand(1).getReg();
484   LLT DstTy = MRI->getType(DstReg);
485   LLT SrcTy = MRI->getType(SrcReg);
486   const unsigned SrcSize = SrcTy.getSizeInBits();
487   unsigned DstSize = DstTy.getSizeInBits();
488 
489   // TODO: Should handle any multiple of 32 offset.
490   unsigned Offset = I.getOperand(2).getImm();
491   if (Offset % 32 != 0 || DstSize > 128)
492     return false;
493 
494   // 16-bit operations really use 32-bit registers.
495   // FIXME: Probably should not allow 16-bit G_EXTRACT results.
496   if (DstSize == 16)
497     DstSize = 32;
498 
499   const TargetRegisterClass *DstRC =
500     TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
501   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
502     return false;
503 
504   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
505   const TargetRegisterClass *SrcRC =
506       TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
507   if (!SrcRC)
508     return false;
509   unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
510                                                          DstSize / 32);
511   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
512   if (!SrcRC)
513     return false;
514 
515   SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
516                                     *SrcRC, I.getOperand(1));
517   const DebugLoc &DL = I.getDebugLoc();
518   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
519     .addReg(SrcReg, 0, SubReg);
520 
521   I.eraseFromParent();
522   return true;
523 }
524 
525 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
526   MachineBasicBlock *BB = MI.getParent();
527   Register DstReg = MI.getOperand(0).getReg();
528   LLT DstTy = MRI->getType(DstReg);
529   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
530 
531   const unsigned SrcSize = SrcTy.getSizeInBits();
532   if (SrcSize < 32)
533     return selectImpl(MI, *CoverageInfo);
534 
535   const DebugLoc &DL = MI.getDebugLoc();
536   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
537   const unsigned DstSize = DstTy.getSizeInBits();
538   const TargetRegisterClass *DstRC =
539       TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
540   if (!DstRC)
541     return false;
542 
543   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
544   MachineInstrBuilder MIB =
545     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
546   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
547     MachineOperand &Src = MI.getOperand(I + 1);
548     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
549     MIB.addImm(SubRegs[I]);
550 
551     const TargetRegisterClass *SrcRC
552       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
553     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
554       return false;
555   }
556 
557   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
558     return false;
559 
560   MI.eraseFromParent();
561   return true;
562 }
563 
564 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
565   MachineBasicBlock *BB = MI.getParent();
566   const int NumDst = MI.getNumOperands() - 1;
567 
568   MachineOperand &Src = MI.getOperand(NumDst);
569 
570   Register SrcReg = Src.getReg();
571   Register DstReg0 = MI.getOperand(0).getReg();
572   LLT DstTy = MRI->getType(DstReg0);
573   LLT SrcTy = MRI->getType(SrcReg);
574 
575   const unsigned DstSize = DstTy.getSizeInBits();
576   const unsigned SrcSize = SrcTy.getSizeInBits();
577   const DebugLoc &DL = MI.getDebugLoc();
578   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
579 
580   const TargetRegisterClass *SrcRC =
581       TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
582   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
583     return false;
584 
585   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
586   // source, and this relies on the fact that the same subregister indices are
587   // used for both.
588   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
589   for (int I = 0, E = NumDst; I != E; ++I) {
590     MachineOperand &Dst = MI.getOperand(I);
591     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
592       .addReg(SrcReg, 0, SubRegs[I]);
593 
594     // Make sure the subregister index is valid for the source register.
595     SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]);
596     if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
597       return false;
598 
599     const TargetRegisterClass *DstRC =
600       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
601     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
602       return false;
603   }
604 
605   MI.eraseFromParent();
606   return true;
607 }
608 
609 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
610   MachineInstr &MI) const {
611   if (selectImpl(MI, *CoverageInfo))
612     return true;
613 
614   const LLT S32 = LLT::scalar(32);
615   const LLT V2S16 = LLT::fixed_vector(2, 16);
616 
617   Register Dst = MI.getOperand(0).getReg();
618   if (MRI->getType(Dst) != V2S16)
619     return false;
620 
621   const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
622   if (DstBank->getID() != AMDGPU::SGPRRegBankID)
623     return false;
624 
625   Register Src0 = MI.getOperand(1).getReg();
626   Register Src1 = MI.getOperand(2).getReg();
627   if (MRI->getType(Src0) != S32)
628     return false;
629 
630   const DebugLoc &DL = MI.getDebugLoc();
631   MachineBasicBlock *BB = MI.getParent();
632 
633   auto ConstSrc1 = getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true);
634   if (ConstSrc1) {
635     auto ConstSrc0 =
636         getAnyConstantVRegValWithLookThrough(Src0, *MRI, true, true);
637     if (ConstSrc0) {
638       const int64_t K0 = ConstSrc0->Value.getSExtValue();
639       const int64_t K1 = ConstSrc1->Value.getSExtValue();
640       uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff;
641       uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff;
642 
643       BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst)
644         .addImm(Lo16 | (Hi16 << 16));
645       MI.eraseFromParent();
646       return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
647     }
648   }
649 
650   // TODO: This should probably be a combine somewhere
651   // (build_vector_trunc $src0, undef -> copy $src0
652   MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
653   if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
654     MI.setDesc(TII.get(AMDGPU::COPY));
655     MI.removeOperand(2);
656     return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) &&
657            RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI);
658   }
659 
660   Register ShiftSrc0;
661   Register ShiftSrc1;
662 
663   // With multiple uses of the shift, this will duplicate the shift and
664   // increase register pressure.
665   //
666   // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
667   //  => (S_PACK_HH_B32_B16 $src0, $src1)
668   // (build_vector_trunc (lshr_oneuse SReg_32:$src0, 16), $src1)
669   //  => (S_PACK_HL_B32_B16 $src0, $src1)
670   // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16))
671   //  => (S_PACK_LH_B32_B16 $src0, $src1)
672   // (build_vector_trunc $src0, $src1)
673   //  => (S_PACK_LL_B32_B16 $src0, $src1)
674 
675   bool Shift0 = mi_match(
676       Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16))));
677 
678   bool Shift1 = mi_match(
679       Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16))));
680 
681   unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
682   if (Shift0 && Shift1) {
683     Opc = AMDGPU::S_PACK_HH_B32_B16;
684     MI.getOperand(1).setReg(ShiftSrc0);
685     MI.getOperand(2).setReg(ShiftSrc1);
686   } else if (Shift1) {
687     Opc = AMDGPU::S_PACK_LH_B32_B16;
688     MI.getOperand(2).setReg(ShiftSrc1);
689   } else if (Shift0) {
690     if (ConstSrc1 && ConstSrc1->Value == 0) {
691       // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
692       auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
693                      .addReg(ShiftSrc0)
694                      .addImm(16);
695 
696       MI.eraseFromParent();
697       return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
698     }
699     if (STI.hasSPackHL()) {
700       Opc = AMDGPU::S_PACK_HL_B32_B16;
701       MI.getOperand(1).setReg(ShiftSrc0);
702     }
703   }
704 
705   MI.setDesc(TII.get(Opc));
706   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
707 }
708 
709 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
710   return selectG_ADD_SUB(I);
711 }
712 
713 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
714   const MachineOperand &MO = I.getOperand(0);
715 
716   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
717   // regbank check here is to know why getConstrainedRegClassForOperand failed.
718   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
719   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
720       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
721     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
722     return true;
723   }
724 
725   return false;
726 }
727 
728 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
729   MachineBasicBlock *BB = I.getParent();
730 
731   Register DstReg = I.getOperand(0).getReg();
732   Register Src0Reg = I.getOperand(1).getReg();
733   Register Src1Reg = I.getOperand(2).getReg();
734   LLT Src1Ty = MRI->getType(Src1Reg);
735 
736   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
737   unsigned InsSize = Src1Ty.getSizeInBits();
738 
739   int64_t Offset = I.getOperand(3).getImm();
740 
741   // FIXME: These cases should have been illegal and unnecessary to check here.
742   if (Offset % 32 != 0 || InsSize % 32 != 0)
743     return false;
744 
745   // Currently not handled by getSubRegFromChannel.
746   if (InsSize > 128)
747     return false;
748 
749   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
750   if (SubReg == AMDGPU::NoSubRegister)
751     return false;
752 
753   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
754   const TargetRegisterClass *DstRC =
755       TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
756   if (!DstRC)
757     return false;
758 
759   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
760   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
761   const TargetRegisterClass *Src0RC =
762       TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank);
763   const TargetRegisterClass *Src1RC =
764       TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank);
765 
766   // Deal with weird cases where the class only partially supports the subreg
767   // index.
768   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
769   if (!Src0RC || !Src1RC)
770     return false;
771 
772   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
773       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
774       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
775     return false;
776 
777   const DebugLoc &DL = I.getDebugLoc();
778   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
779     .addReg(Src0Reg)
780     .addReg(Src1Reg)
781     .addImm(SubReg);
782 
783   I.eraseFromParent();
784   return true;
785 }
786 
787 bool AMDGPUInstructionSelector::selectG_SBFX_UBFX(MachineInstr &MI) const {
788   Register DstReg = MI.getOperand(0).getReg();
789   Register SrcReg = MI.getOperand(1).getReg();
790   Register OffsetReg = MI.getOperand(2).getReg();
791   Register WidthReg = MI.getOperand(3).getReg();
792 
793   assert(RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID &&
794          "scalar BFX instructions are expanded in regbankselect");
795   assert(MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 &&
796          "64-bit vector BFX instructions are expanded in regbankselect");
797 
798   const DebugLoc &DL = MI.getDebugLoc();
799   MachineBasicBlock *MBB = MI.getParent();
800 
801   bool IsSigned = MI.getOpcode() == TargetOpcode::G_SBFX;
802   unsigned Opc = IsSigned ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
803   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), DstReg)
804                  .addReg(SrcReg)
805                  .addReg(OffsetReg)
806                  .addReg(WidthReg);
807   MI.eraseFromParent();
808   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
809 }
810 
811 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
812   if (STI.getLDSBankCount() != 16)
813     return selectImpl(MI, *CoverageInfo);
814 
815   Register Dst = MI.getOperand(0).getReg();
816   Register Src0 = MI.getOperand(2).getReg();
817   Register M0Val = MI.getOperand(6).getReg();
818   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
819       !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
820       !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
821     return false;
822 
823   // This requires 2 instructions. It is possible to write a pattern to support
824   // this, but the generated isel emitter doesn't correctly deal with multiple
825   // output instructions using the same physical register input. The copy to m0
826   // is incorrectly placed before the second instruction.
827   //
828   // TODO: Match source modifiers.
829 
830   Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
831   const DebugLoc &DL = MI.getDebugLoc();
832   MachineBasicBlock *MBB = MI.getParent();
833 
834   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
835     .addReg(M0Val);
836   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
837     .addImm(2)
838     .addImm(MI.getOperand(4).getImm())  // $attr
839     .addImm(MI.getOperand(3).getImm()); // $attrchan
840 
841   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
842     .addImm(0)                          // $src0_modifiers
843     .addReg(Src0)                       // $src0
844     .addImm(MI.getOperand(4).getImm())  // $attr
845     .addImm(MI.getOperand(3).getImm())  // $attrchan
846     .addImm(0)                          // $src2_modifiers
847     .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
848     .addImm(MI.getOperand(5).getImm())  // $high
849     .addImm(0)                          // $clamp
850     .addImm(0);                         // $omod
851 
852   MI.eraseFromParent();
853   return true;
854 }
855 
856 // Writelane is special in that it can use SGPR and M0 (which would normally
857 // count as using the constant bus twice - but in this case it is allowed since
858 // the lane selector doesn't count as a use of the constant bus). However, it is
859 // still required to abide by the 1 SGPR rule. Fix this up if we might have
860 // multiple SGPRs.
861 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
862   // With a constant bus limit of at least 2, there's no issue.
863   if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1)
864     return selectImpl(MI, *CoverageInfo);
865 
866   MachineBasicBlock *MBB = MI.getParent();
867   const DebugLoc &DL = MI.getDebugLoc();
868   Register VDst = MI.getOperand(0).getReg();
869   Register Val = MI.getOperand(2).getReg();
870   Register LaneSelect = MI.getOperand(3).getReg();
871   Register VDstIn = MI.getOperand(4).getReg();
872 
873   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
874 
875   Optional<ValueAndVReg> ConstSelect =
876       getIConstantVRegValWithLookThrough(LaneSelect, *MRI);
877   if (ConstSelect) {
878     // The selector has to be an inline immediate, so we can use whatever for
879     // the other operands.
880     MIB.addReg(Val);
881     MIB.addImm(ConstSelect->Value.getSExtValue() &
882                maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
883   } else {
884     Optional<ValueAndVReg> ConstVal =
885         getIConstantVRegValWithLookThrough(Val, *MRI);
886 
887     // If the value written is an inline immediate, we can get away without a
888     // copy to m0.
889     if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(),
890                                                  STI.hasInv2PiInlineImm())) {
891       MIB.addImm(ConstVal->Value.getSExtValue());
892       MIB.addReg(LaneSelect);
893     } else {
894       MIB.addReg(Val);
895 
896       // If the lane selector was originally in a VGPR and copied with
897       // readfirstlane, there's a hazard to read the same SGPR from the
898       // VALU. Constrain to a different SGPR to help avoid needing a nop later.
899       RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI);
900 
901       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
902         .addReg(LaneSelect);
903       MIB.addReg(AMDGPU::M0);
904     }
905   }
906 
907   MIB.addReg(VDstIn);
908 
909   MI.eraseFromParent();
910   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
911 }
912 
913 // We need to handle this here because tablegen doesn't support matching
914 // instructions with multiple outputs.
915 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
916   Register Dst0 = MI.getOperand(0).getReg();
917   Register Dst1 = MI.getOperand(1).getReg();
918 
919   LLT Ty = MRI->getType(Dst0);
920   unsigned Opc;
921   if (Ty == LLT::scalar(32))
922     Opc = AMDGPU::V_DIV_SCALE_F32_e64;
923   else if (Ty == LLT::scalar(64))
924     Opc = AMDGPU::V_DIV_SCALE_F64_e64;
925   else
926     return false;
927 
928   // TODO: Match source modifiers.
929 
930   const DebugLoc &DL = MI.getDebugLoc();
931   MachineBasicBlock *MBB = MI.getParent();
932 
933   Register Numer = MI.getOperand(3).getReg();
934   Register Denom = MI.getOperand(4).getReg();
935   unsigned ChooseDenom = MI.getOperand(5).getImm();
936 
937   Register Src0 = ChooseDenom != 0 ? Numer : Denom;
938 
939   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
940     .addDef(Dst1)
941     .addImm(0)     // $src0_modifiers
942     .addUse(Src0)  // $src0
943     .addImm(0)     // $src1_modifiers
944     .addUse(Denom) // $src1
945     .addImm(0)     // $src2_modifiers
946     .addUse(Numer) // $src2
947     .addImm(0)     // $clamp
948     .addImm(0);    // $omod
949 
950   MI.eraseFromParent();
951   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
952 }
953 
954 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
955   unsigned IntrinsicID = I.getIntrinsicID();
956   switch (IntrinsicID) {
957   case Intrinsic::amdgcn_if_break: {
958     MachineBasicBlock *BB = I.getParent();
959 
960     // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
961     // SelectionDAG uses for wave32 vs wave64.
962     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
963       .add(I.getOperand(0))
964       .add(I.getOperand(2))
965       .add(I.getOperand(3));
966 
967     Register DstReg = I.getOperand(0).getReg();
968     Register Src0Reg = I.getOperand(2).getReg();
969     Register Src1Reg = I.getOperand(3).getReg();
970 
971     I.eraseFromParent();
972 
973     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
974       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
975 
976     return true;
977   }
978   case Intrinsic::amdgcn_interp_p1_f16:
979     return selectInterpP1F16(I);
980   case Intrinsic::amdgcn_wqm:
981     return constrainCopyLikeIntrin(I, AMDGPU::WQM);
982   case Intrinsic::amdgcn_softwqm:
983     return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
984   case Intrinsic::amdgcn_strict_wwm:
985   case Intrinsic::amdgcn_wwm:
986     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WWM);
987   case Intrinsic::amdgcn_strict_wqm:
988     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WQM);
989   case Intrinsic::amdgcn_writelane:
990     return selectWritelane(I);
991   case Intrinsic::amdgcn_div_scale:
992     return selectDivScale(I);
993   case Intrinsic::amdgcn_icmp:
994     return selectIntrinsicIcmp(I);
995   case Intrinsic::amdgcn_ballot:
996     return selectBallot(I);
997   case Intrinsic::amdgcn_reloc_constant:
998     return selectRelocConstant(I);
999   case Intrinsic::amdgcn_groupstaticsize:
1000     return selectGroupStaticSize(I);
1001   case Intrinsic::returnaddress:
1002     return selectReturnAddress(I);
1003   case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
1004   case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
1005   case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
1006   case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
1007   case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
1008   case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
1009   case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_bf8:
1010   case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_fp8:
1011   case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_bf8:
1012   case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_fp8:
1013   case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_bf8:
1014   case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_fp8:
1015   case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_bf8:
1016   case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_fp8:
1017     return selectSMFMACIntrin(I);
1018   default:
1019     return selectImpl(I, *CoverageInfo);
1020   }
1021 }
1022 
1023 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
1024   if (Size != 32 && Size != 64)
1025     return -1;
1026   switch (P) {
1027   default:
1028     llvm_unreachable("Unknown condition code!");
1029   case CmpInst::ICMP_NE:
1030     return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
1031   case CmpInst::ICMP_EQ:
1032     return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
1033   case CmpInst::ICMP_SGT:
1034     return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
1035   case CmpInst::ICMP_SGE:
1036     return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
1037   case CmpInst::ICMP_SLT:
1038     return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
1039   case CmpInst::ICMP_SLE:
1040     return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
1041   case CmpInst::ICMP_UGT:
1042     return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
1043   case CmpInst::ICMP_UGE:
1044     return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
1045   case CmpInst::ICMP_ULT:
1046     return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
1047   case CmpInst::ICMP_ULE:
1048     return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
1049   }
1050 }
1051 
1052 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
1053                                               unsigned Size) const {
1054   if (Size == 64) {
1055     if (!STI.hasScalarCompareEq64())
1056       return -1;
1057 
1058     switch (P) {
1059     case CmpInst::ICMP_NE:
1060       return AMDGPU::S_CMP_LG_U64;
1061     case CmpInst::ICMP_EQ:
1062       return AMDGPU::S_CMP_EQ_U64;
1063     default:
1064       return -1;
1065     }
1066   }
1067 
1068   if (Size != 32)
1069     return -1;
1070 
1071   switch (P) {
1072   case CmpInst::ICMP_NE:
1073     return AMDGPU::S_CMP_LG_U32;
1074   case CmpInst::ICMP_EQ:
1075     return AMDGPU::S_CMP_EQ_U32;
1076   case CmpInst::ICMP_SGT:
1077     return AMDGPU::S_CMP_GT_I32;
1078   case CmpInst::ICMP_SGE:
1079     return AMDGPU::S_CMP_GE_I32;
1080   case CmpInst::ICMP_SLT:
1081     return AMDGPU::S_CMP_LT_I32;
1082   case CmpInst::ICMP_SLE:
1083     return AMDGPU::S_CMP_LE_I32;
1084   case CmpInst::ICMP_UGT:
1085     return AMDGPU::S_CMP_GT_U32;
1086   case CmpInst::ICMP_UGE:
1087     return AMDGPU::S_CMP_GE_U32;
1088   case CmpInst::ICMP_ULT:
1089     return AMDGPU::S_CMP_LT_U32;
1090   case CmpInst::ICMP_ULE:
1091     return AMDGPU::S_CMP_LE_U32;
1092   default:
1093     llvm_unreachable("Unknown condition code!");
1094   }
1095 }
1096 
1097 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
1098   MachineBasicBlock *BB = I.getParent();
1099   const DebugLoc &DL = I.getDebugLoc();
1100 
1101   Register SrcReg = I.getOperand(2).getReg();
1102   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1103 
1104   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
1105 
1106   Register CCReg = I.getOperand(0).getReg();
1107   if (!isVCC(CCReg, *MRI)) {
1108     int Opcode = getS_CMPOpcode(Pred, Size);
1109     if (Opcode == -1)
1110       return false;
1111     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
1112             .add(I.getOperand(2))
1113             .add(I.getOperand(3));
1114     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
1115       .addReg(AMDGPU::SCC);
1116     bool Ret =
1117         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
1118         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
1119     I.eraseFromParent();
1120     return Ret;
1121   }
1122 
1123   int Opcode = getV_CMPOpcode(Pred, Size);
1124   if (Opcode == -1)
1125     return false;
1126 
1127   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1128             I.getOperand(0).getReg())
1129             .add(I.getOperand(2))
1130             .add(I.getOperand(3));
1131   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1132                                *TRI.getBoolRC(), *MRI);
1133   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1134   I.eraseFromParent();
1135   return Ret;
1136 }
1137 
1138 bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const {
1139   Register Dst = I.getOperand(0).getReg();
1140   if (isVCC(Dst, *MRI))
1141     return false;
1142 
1143   if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize())
1144     return false;
1145 
1146   MachineBasicBlock *BB = I.getParent();
1147   const DebugLoc &DL = I.getDebugLoc();
1148   Register SrcReg = I.getOperand(2).getReg();
1149   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1150 
1151   auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
1152   if (!ICmpInst::isIntPredicate(static_cast<ICmpInst::Predicate>(Pred))) {
1153     MachineInstr *ICmp =
1154         BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Dst);
1155 
1156     if (!RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1157                                       *TRI.getBoolRC(), *MRI))
1158       return false;
1159     I.eraseFromParent();
1160     return true;
1161   }
1162 
1163   int Opcode = getV_CMPOpcode(Pred, Size);
1164   if (Opcode == -1)
1165     return false;
1166 
1167   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst)
1168                            .add(I.getOperand(2))
1169                            .add(I.getOperand(3));
1170   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(),
1171                                *MRI);
1172   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1173   I.eraseFromParent();
1174   return Ret;
1175 }
1176 
1177 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
1178   MachineBasicBlock *BB = I.getParent();
1179   const DebugLoc &DL = I.getDebugLoc();
1180   Register DstReg = I.getOperand(0).getReg();
1181   const unsigned Size = MRI->getType(DstReg).getSizeInBits();
1182   const bool Is64 = Size == 64;
1183 
1184   if (Size != STI.getWavefrontSize())
1185     return false;
1186 
1187   Optional<ValueAndVReg> Arg =
1188       getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI);
1189 
1190   if (Arg) {
1191     const int64_t Value = Arg.value().Value.getSExtValue();
1192     if (Value == 0) {
1193       unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
1194       BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
1195     } else if (Value == -1) { // all ones
1196       Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
1197       BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1198     } else
1199       return false;
1200   } else {
1201     Register SrcReg = I.getOperand(2).getReg();
1202     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1203   }
1204 
1205   I.eraseFromParent();
1206   return true;
1207 }
1208 
1209 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {
1210   Register DstReg = I.getOperand(0).getReg();
1211   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1212   const TargetRegisterClass *DstRC = TRI.getRegClassForSizeOnBank(32, *DstBank);
1213   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1214     return false;
1215 
1216   const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID;
1217 
1218   Module *M = MF->getFunction().getParent();
1219   const MDNode *Metadata = I.getOperand(2).getMetadata();
1220   auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
1221   auto RelocSymbol = cast<GlobalVariable>(
1222     M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
1223 
1224   MachineBasicBlock *BB = I.getParent();
1225   BuildMI(*BB, &I, I.getDebugLoc(),
1226           TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg)
1227     .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO);
1228 
1229   I.eraseFromParent();
1230   return true;
1231 }
1232 
1233 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
1234   Triple::OSType OS = MF->getTarget().getTargetTriple().getOS();
1235 
1236   Register DstReg = I.getOperand(0).getReg();
1237   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1238   unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ?
1239     AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1240 
1241   MachineBasicBlock *MBB = I.getParent();
1242   const DebugLoc &DL = I.getDebugLoc();
1243 
1244   auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg);
1245 
1246   if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) {
1247     const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1248     MIB.addImm(MFI->getLDSSize());
1249   } else {
1250     Module *M = MF->getFunction().getParent();
1251     const GlobalValue *GV
1252       = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
1253     MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO);
1254   }
1255 
1256   I.eraseFromParent();
1257   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1258 }
1259 
1260 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
1261   MachineBasicBlock *MBB = I.getParent();
1262   MachineFunction &MF = *MBB->getParent();
1263   const DebugLoc &DL = I.getDebugLoc();
1264 
1265   MachineOperand &Dst = I.getOperand(0);
1266   Register DstReg = Dst.getReg();
1267   unsigned Depth = I.getOperand(2).getImm();
1268 
1269   const TargetRegisterClass *RC
1270     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1271   if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) ||
1272       !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
1273     return false;
1274 
1275   // Check for kernel and shader functions
1276   if (Depth != 0 ||
1277       MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) {
1278     BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1279       .addImm(0);
1280     I.eraseFromParent();
1281     return true;
1282   }
1283 
1284   MachineFrameInfo &MFI = MF.getFrameInfo();
1285   // There is a call to @llvm.returnaddress in this function
1286   MFI.setReturnAddressIsTaken(true);
1287 
1288   // Get the return address reg and mark it as an implicit live-in
1289   Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
1290   Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
1291                                              AMDGPU::SReg_64RegClass, DL);
1292   BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1293     .addReg(LiveIn);
1294   I.eraseFromParent();
1295   return true;
1296 }
1297 
1298 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1299   // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
1300   // SelectionDAG uses for wave32 vs wave64.
1301   MachineBasicBlock *BB = MI.getParent();
1302   BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1303       .add(MI.getOperand(1));
1304 
1305   Register Reg = MI.getOperand(1).getReg();
1306   MI.eraseFromParent();
1307 
1308   if (!MRI->getRegClassOrNull(Reg))
1309     MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1310   return true;
1311 }
1312 
1313 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1314   MachineInstr &MI, Intrinsic::ID IntrID) const {
1315   MachineBasicBlock *MBB = MI.getParent();
1316   MachineFunction *MF = MBB->getParent();
1317   const DebugLoc &DL = MI.getDebugLoc();
1318 
1319   unsigned IndexOperand = MI.getOperand(7).getImm();
1320   bool WaveRelease = MI.getOperand(8).getImm() != 0;
1321   bool WaveDone = MI.getOperand(9).getImm() != 0;
1322 
1323   if (WaveDone && !WaveRelease)
1324     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1325 
1326   unsigned OrderedCountIndex = IndexOperand & 0x3f;
1327   IndexOperand &= ~0x3f;
1328   unsigned CountDw = 0;
1329 
1330   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1331     CountDw = (IndexOperand >> 24) & 0xf;
1332     IndexOperand &= ~(0xf << 24);
1333 
1334     if (CountDw < 1 || CountDw > 4) {
1335       report_fatal_error(
1336         "ds_ordered_count: dword count must be between 1 and 4");
1337     }
1338   }
1339 
1340   if (IndexOperand)
1341     report_fatal_error("ds_ordered_count: bad index operand");
1342 
1343   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1344   unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF);
1345 
1346   unsigned Offset0 = OrderedCountIndex << 2;
1347   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (Instruction << 4);
1348 
1349   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1350     Offset1 |= (CountDw - 1) << 6;
1351 
1352   if (STI.getGeneration() < AMDGPUSubtarget::GFX11)
1353     Offset1 |= ShaderType << 2;
1354 
1355   unsigned Offset = Offset0 | (Offset1 << 8);
1356 
1357   Register M0Val = MI.getOperand(2).getReg();
1358   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1359     .addReg(M0Val);
1360 
1361   Register DstReg = MI.getOperand(0).getReg();
1362   Register ValReg = MI.getOperand(3).getReg();
1363   MachineInstrBuilder DS =
1364     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1365       .addReg(ValReg)
1366       .addImm(Offset)
1367       .cloneMemRefs(MI);
1368 
1369   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1370     return false;
1371 
1372   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1373   MI.eraseFromParent();
1374   return Ret;
1375 }
1376 
1377 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1378   switch (IntrID) {
1379   case Intrinsic::amdgcn_ds_gws_init:
1380     return AMDGPU::DS_GWS_INIT;
1381   case Intrinsic::amdgcn_ds_gws_barrier:
1382     return AMDGPU::DS_GWS_BARRIER;
1383   case Intrinsic::amdgcn_ds_gws_sema_v:
1384     return AMDGPU::DS_GWS_SEMA_V;
1385   case Intrinsic::amdgcn_ds_gws_sema_br:
1386     return AMDGPU::DS_GWS_SEMA_BR;
1387   case Intrinsic::amdgcn_ds_gws_sema_p:
1388     return AMDGPU::DS_GWS_SEMA_P;
1389   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1390     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1391   default:
1392     llvm_unreachable("not a gws intrinsic");
1393   }
1394 }
1395 
1396 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1397                                                      Intrinsic::ID IID) const {
1398   if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1399       !STI.hasGWSSemaReleaseAll())
1400     return false;
1401 
1402   // intrinsic ID, vsrc, offset
1403   const bool HasVSrc = MI.getNumOperands() == 3;
1404   assert(HasVSrc || MI.getNumOperands() == 2);
1405 
1406   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1407   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1408   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1409     return false;
1410 
1411   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1412   assert(OffsetDef);
1413 
1414   unsigned ImmOffset;
1415 
1416   MachineBasicBlock *MBB = MI.getParent();
1417   const DebugLoc &DL = MI.getDebugLoc();
1418 
1419   MachineInstr *Readfirstlane = nullptr;
1420 
1421   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1422   // incoming offset, in case there's an add of a constant. We'll have to put it
1423   // back later.
1424   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1425     Readfirstlane = OffsetDef;
1426     BaseOffset = OffsetDef->getOperand(1).getReg();
1427     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1428   }
1429 
1430   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1431     // If we have a constant offset, try to use the 0 in m0 as the base.
1432     // TODO: Look into changing the default m0 initialization value. If the
1433     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1434     // the immediate offset.
1435 
1436     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1437     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1438       .addImm(0);
1439   } else {
1440     std::tie(BaseOffset, ImmOffset) =
1441         AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1442 
1443     if (Readfirstlane) {
1444       // We have the constant offset now, so put the readfirstlane back on the
1445       // variable component.
1446       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1447         return false;
1448 
1449       Readfirstlane->getOperand(1).setReg(BaseOffset);
1450       BaseOffset = Readfirstlane->getOperand(0).getReg();
1451     } else {
1452       if (!RBI.constrainGenericRegister(BaseOffset,
1453                                         AMDGPU::SReg_32RegClass, *MRI))
1454         return false;
1455     }
1456 
1457     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1458     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1459       .addReg(BaseOffset)
1460       .addImm(16);
1461 
1462     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1463       .addReg(M0Base);
1464   }
1465 
1466   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1467   // offset field) % 64. Some versions of the programming guide omit the m0
1468   // part, or claim it's from offset 0.
1469   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1470 
1471   if (HasVSrc) {
1472     Register VSrc = MI.getOperand(1).getReg();
1473     MIB.addReg(VSrc);
1474 
1475     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1476       return false;
1477   }
1478 
1479   MIB.addImm(ImmOffset)
1480      .cloneMemRefs(MI);
1481 
1482   TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::data0);
1483 
1484   MI.eraseFromParent();
1485   return true;
1486 }
1487 
1488 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1489                                                       bool IsAppend) const {
1490   Register PtrBase = MI.getOperand(2).getReg();
1491   LLT PtrTy = MRI->getType(PtrBase);
1492   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1493 
1494   unsigned Offset;
1495   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1496 
1497   // TODO: Should this try to look through readfirstlane like GWS?
1498   if (!isDSOffsetLegal(PtrBase, Offset)) {
1499     PtrBase = MI.getOperand(2).getReg();
1500     Offset = 0;
1501   }
1502 
1503   MachineBasicBlock *MBB = MI.getParent();
1504   const DebugLoc &DL = MI.getDebugLoc();
1505   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1506 
1507   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1508     .addReg(PtrBase);
1509   if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI))
1510     return false;
1511 
1512   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1513     .addImm(Offset)
1514     .addImm(IsGDS ? -1 : 0)
1515     .cloneMemRefs(MI);
1516   MI.eraseFromParent();
1517   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1518 }
1519 
1520 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {
1521   if (TM.getOptLevel() > CodeGenOpt::None) {
1522     unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second;
1523     if (WGSize <= STI.getWavefrontSize()) {
1524       MachineBasicBlock *MBB = MI.getParent();
1525       const DebugLoc &DL = MI.getDebugLoc();
1526       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER));
1527       MI.eraseFromParent();
1528       return true;
1529     }
1530   }
1531   return selectImpl(MI, *CoverageInfo);
1532 }
1533 
1534 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1535                          bool &IsTexFail) {
1536   if (TexFailCtrl)
1537     IsTexFail = true;
1538 
1539   TFE = (TexFailCtrl & 0x1) ? true : false;
1540   TexFailCtrl &= ~(uint64_t)0x1;
1541   LWE = (TexFailCtrl & 0x2) ? true : false;
1542   TexFailCtrl &= ~(uint64_t)0x2;
1543 
1544   return TexFailCtrl == 0;
1545 }
1546 
1547 bool AMDGPUInstructionSelector::selectImageIntrinsic(
1548   MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1549   MachineBasicBlock *MBB = MI.getParent();
1550   const DebugLoc &DL = MI.getDebugLoc();
1551 
1552   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1553     AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1554 
1555   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1556   unsigned IntrOpcode = Intr->BaseOpcode;
1557   const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI);
1558   const bool IsGFX11Plus = AMDGPU::isGFX11Plus(STI);
1559 
1560   const unsigned ArgOffset = MI.getNumExplicitDefs() + 1;
1561 
1562   Register VDataIn, VDataOut;
1563   LLT VDataTy;
1564   int NumVDataDwords = -1;
1565   bool IsD16 = MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16 ||
1566                MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16;
1567 
1568   bool Unorm;
1569   if (!BaseOpcode->Sampler)
1570     Unorm = true;
1571   else
1572     Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0;
1573 
1574   bool TFE;
1575   bool LWE;
1576   bool IsTexFail = false;
1577   if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(),
1578                     TFE, LWE, IsTexFail))
1579     return false;
1580 
1581   const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm();
1582   const bool IsA16 = (Flags & 1) != 0;
1583   const bool IsG16 = (Flags & 2) != 0;
1584 
1585   // A16 implies 16 bit gradients if subtarget doesn't support G16
1586   if (IsA16 && !STI.hasG16() && !IsG16)
1587     return false;
1588 
1589   unsigned DMask = 0;
1590   unsigned DMaskLanes = 0;
1591 
1592   if (BaseOpcode->Atomic) {
1593     VDataOut = MI.getOperand(0).getReg();
1594     VDataIn = MI.getOperand(2).getReg();
1595     LLT Ty = MRI->getType(VDataIn);
1596 
1597     // Be careful to allow atomic swap on 16-bit element vectors.
1598     const bool Is64Bit = BaseOpcode->AtomicX2 ?
1599       Ty.getSizeInBits() == 128 :
1600       Ty.getSizeInBits() == 64;
1601 
1602     if (BaseOpcode->AtomicX2) {
1603       assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
1604 
1605       DMask = Is64Bit ? 0xf : 0x3;
1606       NumVDataDwords = Is64Bit ? 4 : 2;
1607     } else {
1608       DMask = Is64Bit ? 0x3 : 0x1;
1609       NumVDataDwords = Is64Bit ? 2 : 1;
1610     }
1611   } else {
1612     DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm();
1613     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
1614 
1615     if (BaseOpcode->Store) {
1616       VDataIn = MI.getOperand(1).getReg();
1617       VDataTy = MRI->getType(VDataIn);
1618       NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1619     } else {
1620       VDataOut = MI.getOperand(0).getReg();
1621       VDataTy = MRI->getType(VDataOut);
1622       NumVDataDwords = DMaskLanes;
1623 
1624       if (IsD16 && !STI.hasUnpackedD16VMem())
1625         NumVDataDwords = (DMaskLanes + 1) / 2;
1626     }
1627   }
1628 
1629   // Set G16 opcode
1630   if (IsG16 && !IsA16) {
1631     const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
1632         AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
1633     assert(G16MappingInfo);
1634     IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
1635   }
1636 
1637   // TODO: Check this in verifier.
1638   assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
1639 
1640   unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm();
1641   if (BaseOpcode->Atomic)
1642     CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization
1643   if (CPol & ~AMDGPU::CPol::ALL)
1644     return false;
1645 
1646   int NumVAddrRegs = 0;
1647   int NumVAddrDwords = 0;
1648   for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) {
1649     // Skip the $noregs and 0s inserted during legalization.
1650     MachineOperand &AddrOp = MI.getOperand(ArgOffset + I);
1651     if (!AddrOp.isReg())
1652       continue; // XXX - Break?
1653 
1654     Register Addr = AddrOp.getReg();
1655     if (!Addr)
1656       break;
1657 
1658     ++NumVAddrRegs;
1659     NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1660   }
1661 
1662   // The legalizer preprocessed the intrinsic arguments. If we aren't using
1663   // NSA, these should have been packed into a single value in the first
1664   // address register
1665   const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
1666   if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1667     LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
1668     return false;
1669   }
1670 
1671   if (IsTexFail)
1672     ++NumVDataDwords;
1673 
1674   int Opcode = -1;
1675   if (IsGFX11Plus) {
1676     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1677                                    UseNSA ? AMDGPU::MIMGEncGfx11NSA
1678                                           : AMDGPU::MIMGEncGfx11Default,
1679                                    NumVDataDwords, NumVAddrDwords);
1680   } else if (IsGFX10Plus) {
1681     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1682                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
1683                                           : AMDGPU::MIMGEncGfx10Default,
1684                                    NumVDataDwords, NumVAddrDwords);
1685   } else {
1686     if (Subtarget->hasGFX90AInsts()) {
1687       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx90a,
1688                                      NumVDataDwords, NumVAddrDwords);
1689       if (Opcode == -1) {
1690         LLVM_DEBUG(
1691             dbgs()
1692             << "requested image instruction is not supported on this GPU\n");
1693         return false;
1694       }
1695     }
1696     if (Opcode == -1 &&
1697         STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1698       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1699                                      NumVDataDwords, NumVAddrDwords);
1700     if (Opcode == -1)
1701       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1702                                      NumVDataDwords, NumVAddrDwords);
1703   }
1704   assert(Opcode != -1);
1705 
1706   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1707     .cloneMemRefs(MI);
1708 
1709   if (VDataOut) {
1710     if (BaseOpcode->AtomicX2) {
1711       const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1712 
1713       Register TmpReg = MRI->createVirtualRegister(
1714         Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1715       unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1716 
1717       MIB.addDef(TmpReg);
1718       if (!MRI->use_empty(VDataOut)) {
1719         BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1720             .addReg(TmpReg, RegState::Kill, SubReg);
1721       }
1722 
1723     } else {
1724       MIB.addDef(VDataOut); // vdata output
1725     }
1726   }
1727 
1728   if (VDataIn)
1729     MIB.addReg(VDataIn); // vdata input
1730 
1731   for (int I = 0; I != NumVAddrRegs; ++I) {
1732     MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I);
1733     if (SrcOp.isReg()) {
1734       assert(SrcOp.getReg() != 0);
1735       MIB.addReg(SrcOp.getReg());
1736     }
1737   }
1738 
1739   MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg());
1740   if (BaseOpcode->Sampler)
1741     MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg());
1742 
1743   MIB.addImm(DMask); // dmask
1744 
1745   if (IsGFX10Plus)
1746     MIB.addImm(DimInfo->Encoding);
1747   MIB.addImm(Unorm);
1748 
1749   MIB.addImm(CPol);
1750   MIB.addImm(IsA16 &&  // a16 or r128
1751              STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1752   if (IsGFX10Plus)
1753     MIB.addImm(IsA16 ? -1 : 0);
1754 
1755   if (!Subtarget->hasGFX90AInsts()) {
1756     MIB.addImm(TFE); // tfe
1757   } else if (TFE) {
1758     LLVM_DEBUG(dbgs() << "TFE is not supported on this GPU\n");
1759     return false;
1760   }
1761 
1762   MIB.addImm(LWE); // lwe
1763   if (!IsGFX10Plus)
1764     MIB.addImm(DimInfo->DA ? -1 : 0);
1765   if (BaseOpcode->HasD16)
1766     MIB.addImm(IsD16 ? -1 : 0);
1767 
1768   if (IsTexFail) {
1769     // An image load instruction with TFE/LWE only conditionally writes to its
1770     // result registers. Initialize them to zero so that we always get well
1771     // defined result values.
1772     assert(VDataOut && !VDataIn);
1773     Register Tied = MRI->cloneVirtualRegister(VDataOut);
1774     Register Zero = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1775     BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::V_MOV_B32_e32), Zero)
1776       .addImm(0);
1777     auto Parts = TRI.getRegSplitParts(MRI->getRegClass(Tied), 4);
1778     if (STI.usePRTStrictNull()) {
1779       // With enable-prt-strict-null enabled, initialize all result registers to
1780       // zero.
1781       auto RegSeq =
1782           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1783       for (auto Sub : Parts)
1784         RegSeq.addReg(Zero).addImm(Sub);
1785     } else {
1786       // With enable-prt-strict-null disabled, only initialize the extra TFE/LWE
1787       // result register.
1788       Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1789       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
1790       auto RegSeq =
1791           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1792       for (auto Sub : Parts.drop_back(1))
1793         RegSeq.addReg(Undef).addImm(Sub);
1794       RegSeq.addReg(Zero).addImm(Parts.back());
1795     }
1796     MIB.addReg(Tied, RegState::Implicit);
1797     MIB->tieOperands(0, MIB->getNumOperands() - 1);
1798   }
1799 
1800   MI.eraseFromParent();
1801   constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1802   TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::vaddr);
1803   return true;
1804 }
1805 
1806 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1807     MachineInstr &I) const {
1808   unsigned IntrinsicID = I.getIntrinsicID();
1809   switch (IntrinsicID) {
1810   case Intrinsic::amdgcn_end_cf:
1811     return selectEndCfIntrinsic(I);
1812   case Intrinsic::amdgcn_ds_ordered_add:
1813   case Intrinsic::amdgcn_ds_ordered_swap:
1814     return selectDSOrderedIntrinsic(I, IntrinsicID);
1815   case Intrinsic::amdgcn_ds_gws_init:
1816   case Intrinsic::amdgcn_ds_gws_barrier:
1817   case Intrinsic::amdgcn_ds_gws_sema_v:
1818   case Intrinsic::amdgcn_ds_gws_sema_br:
1819   case Intrinsic::amdgcn_ds_gws_sema_p:
1820   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1821     return selectDSGWSIntrinsic(I, IntrinsicID);
1822   case Intrinsic::amdgcn_ds_append:
1823     return selectDSAppendConsume(I, true);
1824   case Intrinsic::amdgcn_ds_consume:
1825     return selectDSAppendConsume(I, false);
1826   case Intrinsic::amdgcn_s_barrier:
1827     return selectSBarrier(I);
1828   case Intrinsic::amdgcn_global_atomic_fadd:
1829     return selectGlobalAtomicFadd(I, I.getOperand(2), I.getOperand(3));
1830   case Intrinsic::amdgcn_raw_buffer_load_lds:
1831   case Intrinsic::amdgcn_struct_buffer_load_lds:
1832     return selectBufferLoadLds(I);
1833   case Intrinsic::amdgcn_global_load_lds:
1834     return selectGlobalLoadLds(I);
1835   case Intrinsic::amdgcn_exp_compr:
1836     if (!STI.hasCompressedExport()) {
1837       Function &F = I.getMF()->getFunction();
1838       DiagnosticInfoUnsupported NoFpRet(
1839           F, "intrinsic not supported on subtarget", I.getDebugLoc(), DS_Error);
1840       F.getContext().diagnose(NoFpRet);
1841       return false;
1842     }
1843     break;
1844   }
1845   return selectImpl(I, *CoverageInfo);
1846 }
1847 
1848 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1849   if (selectImpl(I, *CoverageInfo))
1850     return true;
1851 
1852   MachineBasicBlock *BB = I.getParent();
1853   const DebugLoc &DL = I.getDebugLoc();
1854 
1855   Register DstReg = I.getOperand(0).getReg();
1856   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1857   assert(Size <= 32 || Size == 64);
1858   const MachineOperand &CCOp = I.getOperand(1);
1859   Register CCReg = CCOp.getReg();
1860   if (!isVCC(CCReg, *MRI)) {
1861     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1862                                          AMDGPU::S_CSELECT_B32;
1863     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1864             .addReg(CCReg);
1865 
1866     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1867     // bank, because it does not cover the register class that we used to represent
1868     // for it.  So we need to manually set the register class here.
1869     if (!MRI->getRegClassOrNull(CCReg))
1870         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1871     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1872             .add(I.getOperand(2))
1873             .add(I.getOperand(3));
1874 
1875     bool Ret = false;
1876     Ret |= constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1877     Ret |= constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1878     I.eraseFromParent();
1879     return Ret;
1880   }
1881 
1882   // Wide VGPR select should have been split in RegBankSelect.
1883   if (Size > 32)
1884     return false;
1885 
1886   MachineInstr *Select =
1887       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1888               .addImm(0)
1889               .add(I.getOperand(3))
1890               .addImm(0)
1891               .add(I.getOperand(2))
1892               .add(I.getOperand(1));
1893 
1894   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1895   I.eraseFromParent();
1896   return Ret;
1897 }
1898 
1899 static int sizeToSubRegIndex(unsigned Size) {
1900   switch (Size) {
1901   case 32:
1902     return AMDGPU::sub0;
1903   case 64:
1904     return AMDGPU::sub0_sub1;
1905   case 96:
1906     return AMDGPU::sub0_sub1_sub2;
1907   case 128:
1908     return AMDGPU::sub0_sub1_sub2_sub3;
1909   case 256:
1910     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1911   default:
1912     if (Size < 32)
1913       return AMDGPU::sub0;
1914     if (Size > 256)
1915       return -1;
1916     return sizeToSubRegIndex(PowerOf2Ceil(Size));
1917   }
1918 }
1919 
1920 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1921   Register DstReg = I.getOperand(0).getReg();
1922   Register SrcReg = I.getOperand(1).getReg();
1923   const LLT DstTy = MRI->getType(DstReg);
1924   const LLT SrcTy = MRI->getType(SrcReg);
1925   const LLT S1 = LLT::scalar(1);
1926 
1927   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1928   const RegisterBank *DstRB;
1929   if (DstTy == S1) {
1930     // This is a special case. We don't treat s1 for legalization artifacts as
1931     // vcc booleans.
1932     DstRB = SrcRB;
1933   } else {
1934     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1935     if (SrcRB != DstRB)
1936       return false;
1937   }
1938 
1939   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
1940 
1941   unsigned DstSize = DstTy.getSizeInBits();
1942   unsigned SrcSize = SrcTy.getSizeInBits();
1943 
1944   const TargetRegisterClass *SrcRC =
1945       TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB);
1946   const TargetRegisterClass *DstRC =
1947       TRI.getRegClassForSizeOnBank(DstSize, *DstRB);
1948   if (!SrcRC || !DstRC)
1949     return false;
1950 
1951   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1952       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1953     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1954     return false;
1955   }
1956 
1957   if (DstTy == LLT::fixed_vector(2, 16) && SrcTy == LLT::fixed_vector(2, 32)) {
1958     MachineBasicBlock *MBB = I.getParent();
1959     const DebugLoc &DL = I.getDebugLoc();
1960 
1961     Register LoReg = MRI->createVirtualRegister(DstRC);
1962     Register HiReg = MRI->createVirtualRegister(DstRC);
1963     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
1964       .addReg(SrcReg, 0, AMDGPU::sub0);
1965     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
1966       .addReg(SrcReg, 0, AMDGPU::sub1);
1967 
1968     if (IsVALU && STI.hasSDWA()) {
1969       // Write the low 16-bits of the high element into the high 16-bits of the
1970       // low element.
1971       MachineInstr *MovSDWA =
1972         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
1973         .addImm(0)                             // $src0_modifiers
1974         .addReg(HiReg)                         // $src0
1975         .addImm(0)                             // $clamp
1976         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
1977         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
1978         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
1979         .addReg(LoReg, RegState::Implicit);
1980       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
1981     } else {
1982       Register TmpReg0 = MRI->createVirtualRegister(DstRC);
1983       Register TmpReg1 = MRI->createVirtualRegister(DstRC);
1984       Register ImmReg = MRI->createVirtualRegister(DstRC);
1985       if (IsVALU) {
1986         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
1987           .addImm(16)
1988           .addReg(HiReg);
1989       } else {
1990         BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
1991           .addReg(HiReg)
1992           .addImm(16);
1993       }
1994 
1995       unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1996       unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1997       unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
1998 
1999       BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
2000         .addImm(0xffff);
2001       BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
2002         .addReg(LoReg)
2003         .addReg(ImmReg);
2004       BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
2005         .addReg(TmpReg0)
2006         .addReg(TmpReg1);
2007     }
2008 
2009     I.eraseFromParent();
2010     return true;
2011   }
2012 
2013   if (!DstTy.isScalar())
2014     return false;
2015 
2016   if (SrcSize > 32) {
2017     int SubRegIdx = sizeToSubRegIndex(DstSize);
2018     if (SubRegIdx == -1)
2019       return false;
2020 
2021     // Deal with weird cases where the class only partially supports the subreg
2022     // index.
2023     const TargetRegisterClass *SrcWithSubRC
2024       = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
2025     if (!SrcWithSubRC)
2026       return false;
2027 
2028     if (SrcWithSubRC != SrcRC) {
2029       if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
2030         return false;
2031     }
2032 
2033     I.getOperand(1).setSubReg(SubRegIdx);
2034   }
2035 
2036   I.setDesc(TII.get(TargetOpcode::COPY));
2037   return true;
2038 }
2039 
2040 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
2041 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
2042   Mask = maskTrailingOnes<unsigned>(Size);
2043   int SignedMask = static_cast<int>(Mask);
2044   return SignedMask >= -16 && SignedMask <= 64;
2045 }
2046 
2047 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
2048 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
2049   Register Reg, const MachineRegisterInfo &MRI,
2050   const TargetRegisterInfo &TRI) const {
2051   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
2052   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
2053     return RB;
2054 
2055   // Ignore the type, since we don't use vcc in artifacts.
2056   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
2057     return &RBI.getRegBankFromRegClass(*RC, LLT());
2058   return nullptr;
2059 }
2060 
2061 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
2062   bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
2063   bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
2064   const DebugLoc &DL = I.getDebugLoc();
2065   MachineBasicBlock &MBB = *I.getParent();
2066   const Register DstReg = I.getOperand(0).getReg();
2067   const Register SrcReg = I.getOperand(1).getReg();
2068 
2069   const LLT DstTy = MRI->getType(DstReg);
2070   const LLT SrcTy = MRI->getType(SrcReg);
2071   const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
2072     I.getOperand(2).getImm() : SrcTy.getSizeInBits();
2073   const unsigned DstSize = DstTy.getSizeInBits();
2074   if (!DstTy.isScalar())
2075     return false;
2076 
2077   // Artifact casts should never use vcc.
2078   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
2079 
2080   // FIXME: This should probably be illegal and split earlier.
2081   if (I.getOpcode() == AMDGPU::G_ANYEXT) {
2082     if (DstSize <= 32)
2083       return selectCOPY(I);
2084 
2085     const TargetRegisterClass *SrcRC =
2086         TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank);
2087     const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
2088     const TargetRegisterClass *DstRC =
2089         TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
2090 
2091     Register UndefReg = MRI->createVirtualRegister(SrcRC);
2092     BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2093     BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2094       .addReg(SrcReg)
2095       .addImm(AMDGPU::sub0)
2096       .addReg(UndefReg)
2097       .addImm(AMDGPU::sub1);
2098     I.eraseFromParent();
2099 
2100     return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) &&
2101            RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI);
2102   }
2103 
2104   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
2105     // 64-bit should have been split up in RegBankSelect
2106 
2107     // Try to use an and with a mask if it will save code size.
2108     unsigned Mask;
2109     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2110       MachineInstr *ExtI =
2111       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
2112         .addImm(Mask)
2113         .addReg(SrcReg);
2114       I.eraseFromParent();
2115       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2116     }
2117 
2118     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
2119     MachineInstr *ExtI =
2120       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
2121       .addReg(SrcReg)
2122       .addImm(0) // Offset
2123       .addImm(SrcSize); // Width
2124     I.eraseFromParent();
2125     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2126   }
2127 
2128   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
2129     const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
2130       AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
2131     if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
2132       return false;
2133 
2134     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
2135       const unsigned SextOpc = SrcSize == 8 ?
2136         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
2137       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
2138         .addReg(SrcReg);
2139       I.eraseFromParent();
2140       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2141     }
2142 
2143     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
2144     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2145 
2146     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
2147     if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
2148       // We need a 64-bit register source, but the high bits don't matter.
2149       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
2150       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2151       unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
2152 
2153       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2154       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
2155         .addReg(SrcReg, 0, SubReg)
2156         .addImm(AMDGPU::sub0)
2157         .addReg(UndefReg)
2158         .addImm(AMDGPU::sub1);
2159 
2160       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
2161         .addReg(ExtReg)
2162         .addImm(SrcSize << 16);
2163 
2164       I.eraseFromParent();
2165       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
2166     }
2167 
2168     unsigned Mask;
2169     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2170       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
2171         .addReg(SrcReg)
2172         .addImm(Mask);
2173     } else {
2174       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
2175         .addReg(SrcReg)
2176         .addImm(SrcSize << 16);
2177     }
2178 
2179     I.eraseFromParent();
2180     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2181   }
2182 
2183   return false;
2184 }
2185 
2186 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
2187   MachineBasicBlock *BB = I.getParent();
2188   MachineOperand &ImmOp = I.getOperand(1);
2189   Register DstReg = I.getOperand(0).getReg();
2190   unsigned Size = MRI->getType(DstReg).getSizeInBits();
2191 
2192   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
2193   if (ImmOp.isFPImm()) {
2194     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
2195     ImmOp.ChangeToImmediate(Imm.getZExtValue());
2196   } else if (ImmOp.isCImm()) {
2197     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue());
2198   } else {
2199     llvm_unreachable("Not supported by g_constants");
2200   }
2201 
2202   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2203   const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID;
2204 
2205   unsigned Opcode;
2206   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
2207     Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2208   } else {
2209     Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2210 
2211     // We should never produce s1 values on banks other than VCC. If the user of
2212     // this already constrained the register, we may incorrectly think it's VCC
2213     // if it wasn't originally.
2214     if (Size == 1)
2215       return false;
2216   }
2217 
2218   if (Size != 64) {
2219     I.setDesc(TII.get(Opcode));
2220     I.addImplicitDefUseOperands(*MF);
2221     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2222   }
2223 
2224   const DebugLoc &DL = I.getDebugLoc();
2225 
2226   APInt Imm(Size, I.getOperand(1).getImm());
2227 
2228   MachineInstr *ResInst;
2229   if (IsSgpr && TII.isInlineConstant(Imm)) {
2230     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
2231       .addImm(I.getOperand(1).getImm());
2232   } else {
2233     const TargetRegisterClass *RC = IsSgpr ?
2234       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
2235     Register LoReg = MRI->createVirtualRegister(RC);
2236     Register HiReg = MRI->createVirtualRegister(RC);
2237 
2238     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
2239       .addImm(Imm.trunc(32).getZExtValue());
2240 
2241     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
2242       .addImm(Imm.ashr(32).getZExtValue());
2243 
2244     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2245       .addReg(LoReg)
2246       .addImm(AMDGPU::sub0)
2247       .addReg(HiReg)
2248       .addImm(AMDGPU::sub1);
2249   }
2250 
2251   // We can't call constrainSelectedInstRegOperands here, because it doesn't
2252   // work for target independent opcodes
2253   I.eraseFromParent();
2254   const TargetRegisterClass *DstRC =
2255     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
2256   if (!DstRC)
2257     return true;
2258   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
2259 }
2260 
2261 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
2262   // Only manually handle the f64 SGPR case.
2263   //
2264   // FIXME: This is a workaround for 2.5 different tablegen problems. Because
2265   // the bit ops theoretically have a second result due to the implicit def of
2266   // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
2267   // that is easy by disabling the check. The result works, but uses a
2268   // nonsensical sreg32orlds_and_sreg_1 regclass.
2269   //
2270   // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
2271   // the variadic REG_SEQUENCE operands.
2272 
2273   Register Dst = MI.getOperand(0).getReg();
2274   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2275   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2276       MRI->getType(Dst) != LLT::scalar(64))
2277     return false;
2278 
2279   Register Src = MI.getOperand(1).getReg();
2280   MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
2281   if (Fabs)
2282     Src = Fabs->getOperand(1).getReg();
2283 
2284   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2285       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2286     return false;
2287 
2288   MachineBasicBlock *BB = MI.getParent();
2289   const DebugLoc &DL = MI.getDebugLoc();
2290   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2291   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2292   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2293   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2294 
2295   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2296     .addReg(Src, 0, AMDGPU::sub0);
2297   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2298     .addReg(Src, 0, AMDGPU::sub1);
2299   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2300     .addImm(0x80000000);
2301 
2302   // Set or toggle sign bit.
2303   unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
2304   BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
2305     .addReg(HiReg)
2306     .addReg(ConstReg);
2307   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2308     .addReg(LoReg)
2309     .addImm(AMDGPU::sub0)
2310     .addReg(OpReg)
2311     .addImm(AMDGPU::sub1);
2312   MI.eraseFromParent();
2313   return true;
2314 }
2315 
2316 // FIXME: This is a workaround for the same tablegen problems as G_FNEG
2317 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
2318   Register Dst = MI.getOperand(0).getReg();
2319   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2320   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2321       MRI->getType(Dst) != LLT::scalar(64))
2322     return false;
2323 
2324   Register Src = MI.getOperand(1).getReg();
2325   MachineBasicBlock *BB = MI.getParent();
2326   const DebugLoc &DL = MI.getDebugLoc();
2327   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2328   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2329   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2330   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2331 
2332   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2333       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2334     return false;
2335 
2336   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2337     .addReg(Src, 0, AMDGPU::sub0);
2338   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2339     .addReg(Src, 0, AMDGPU::sub1);
2340   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2341     .addImm(0x7fffffff);
2342 
2343   // Clear sign bit.
2344   // TODO: Should this used S_BITSET0_*?
2345   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2346     .addReg(HiReg)
2347     .addReg(ConstReg);
2348   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2349     .addReg(LoReg)
2350     .addImm(AMDGPU::sub0)
2351     .addReg(OpReg)
2352     .addImm(AMDGPU::sub1);
2353 
2354   MI.eraseFromParent();
2355   return true;
2356 }
2357 
2358 static bool isConstant(const MachineInstr &MI) {
2359   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2360 }
2361 
2362 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2363     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2364 
2365   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2366 
2367   assert(PtrMI);
2368 
2369   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2370     return;
2371 
2372   GEPInfo GEPInfo;
2373 
2374   for (unsigned i = 1; i != 3; ++i) {
2375     const MachineOperand &GEPOp = PtrMI->getOperand(i);
2376     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2377     assert(OpDef);
2378     if (i == 2 && isConstant(*OpDef)) {
2379       // TODO: Could handle constant base + variable offset, but a combine
2380       // probably should have commuted it.
2381       assert(GEPInfo.Imm == 0);
2382       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2383       continue;
2384     }
2385     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2386     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2387       GEPInfo.SgprParts.push_back(GEPOp.getReg());
2388     else
2389       GEPInfo.VgprParts.push_back(GEPOp.getReg());
2390   }
2391 
2392   AddrInfo.push_back(GEPInfo);
2393   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2394 }
2395 
2396 bool AMDGPUInstructionSelector::isSGPR(Register Reg) const {
2397   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID;
2398 }
2399 
2400 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2401   if (!MI.hasOneMemOperand())
2402     return false;
2403 
2404   const MachineMemOperand *MMO = *MI.memoperands_begin();
2405   const Value *Ptr = MMO->getValue();
2406 
2407   // UndefValue means this is a load of a kernel input.  These are uniform.
2408   // Sometimes LDS instructions have constant pointers.
2409   // If Ptr is null, then that means this mem operand contains a
2410   // PseudoSourceValue like GOT.
2411   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2412       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2413     return true;
2414 
2415   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2416     return true;
2417 
2418   const Instruction *I = dyn_cast<Instruction>(Ptr);
2419   return I && I->getMetadata("amdgpu.uniform");
2420 }
2421 
2422 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2423   for (const GEPInfo &GEPInfo : AddrInfo) {
2424     if (!GEPInfo.VgprParts.empty())
2425       return true;
2426   }
2427   return false;
2428 }
2429 
2430 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2431   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2432   unsigned AS = PtrTy.getAddressSpace();
2433   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2434       STI.ldsRequiresM0Init()) {
2435     MachineBasicBlock *BB = I.getParent();
2436 
2437     // If DS instructions require M0 initialization, insert it before selecting.
2438     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2439       .addImm(-1);
2440   }
2441 }
2442 
2443 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
2444   MachineInstr &I) const {
2445   if (I.getOpcode() == TargetOpcode::G_ATOMICRMW_FADD) {
2446     const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2447     unsigned AS = PtrTy.getAddressSpace();
2448     if (AS == AMDGPUAS::GLOBAL_ADDRESS)
2449       return selectGlobalAtomicFadd(I, I.getOperand(1), I.getOperand(2));
2450   }
2451 
2452   initM0(I);
2453   return selectImpl(I, *CoverageInfo);
2454 }
2455 
2456 static bool isVCmpResult(Register Reg, MachineRegisterInfo &MRI) {
2457   if (Reg.isPhysical())
2458     return false;
2459 
2460   MachineInstr &MI = *MRI.getUniqueVRegDef(Reg);
2461   const unsigned Opcode = MI.getOpcode();
2462 
2463   if (Opcode == AMDGPU::COPY)
2464     return isVCmpResult(MI.getOperand(1).getReg(), MRI);
2465 
2466   if (Opcode == AMDGPU::G_AND || Opcode == AMDGPU::G_OR ||
2467       Opcode == AMDGPU::G_XOR)
2468     return isVCmpResult(MI.getOperand(1).getReg(), MRI) &&
2469            isVCmpResult(MI.getOperand(2).getReg(), MRI);
2470 
2471   if (Opcode == TargetOpcode::G_INTRINSIC)
2472     return MI.getIntrinsicID() == Intrinsic::amdgcn_class;
2473 
2474   return Opcode == AMDGPU::G_ICMP || Opcode == AMDGPU::G_FCMP;
2475 }
2476 
2477 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2478   MachineBasicBlock *BB = I.getParent();
2479   MachineOperand &CondOp = I.getOperand(0);
2480   Register CondReg = CondOp.getReg();
2481   const DebugLoc &DL = I.getDebugLoc();
2482 
2483   unsigned BrOpcode;
2484   Register CondPhysReg;
2485   const TargetRegisterClass *ConstrainRC;
2486 
2487   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2488   // whether the branch is uniform when selecting the instruction. In
2489   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2490   // RegBankSelect knows what it's doing if the branch condition is scc, even
2491   // though it currently does not.
2492   if (!isVCC(CondReg, *MRI)) {
2493     if (MRI->getType(CondReg) != LLT::scalar(32))
2494       return false;
2495 
2496     CondPhysReg = AMDGPU::SCC;
2497     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2498     ConstrainRC = &AMDGPU::SReg_32RegClass;
2499   } else {
2500     // FIXME: Should scc->vcc copies and with exec?
2501 
2502     // Unless the value of CondReg is a result of a V_CMP* instruction then we
2503     // need to insert an and with exec.
2504     if (!isVCmpResult(CondReg, *MRI)) {
2505       const bool Is64 = STI.isWave64();
2506       const unsigned Opcode = Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
2507       const Register Exec = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
2508 
2509       Register TmpReg = MRI->createVirtualRegister(TRI.getBoolRC());
2510       BuildMI(*BB, &I, DL, TII.get(Opcode), TmpReg)
2511           .addReg(CondReg)
2512           .addReg(Exec);
2513       CondReg = TmpReg;
2514     }
2515 
2516     CondPhysReg = TRI.getVCC();
2517     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2518     ConstrainRC = TRI.getBoolRC();
2519   }
2520 
2521   if (!MRI->getRegClassOrNull(CondReg))
2522     MRI->setRegClass(CondReg, ConstrainRC);
2523 
2524   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2525     .addReg(CondReg);
2526   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2527     .addMBB(I.getOperand(1).getMBB());
2528 
2529   I.eraseFromParent();
2530   return true;
2531 }
2532 
2533 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
2534   MachineInstr &I) const {
2535   Register DstReg = I.getOperand(0).getReg();
2536   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2537   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2538   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2539   if (IsVGPR)
2540     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2541 
2542   return RBI.constrainGenericRegister(
2543     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2544 }
2545 
2546 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2547   Register DstReg = I.getOperand(0).getReg();
2548   Register SrcReg = I.getOperand(1).getReg();
2549   Register MaskReg = I.getOperand(2).getReg();
2550   LLT Ty = MRI->getType(DstReg);
2551   LLT MaskTy = MRI->getType(MaskReg);
2552   MachineBasicBlock *BB = I.getParent();
2553   const DebugLoc &DL = I.getDebugLoc();
2554 
2555   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2556   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2557   const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
2558   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2559   if (DstRB != SrcRB) // Should only happen for hand written MIR.
2560     return false;
2561 
2562   // Try to avoid emitting a bit operation when we only need to touch half of
2563   // the 64-bit pointer.
2564   APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zext(64);
2565   const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
2566   const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
2567 
2568   const bool CanCopyLow32 = (MaskOnes & MaskLo32) == MaskLo32;
2569   const bool CanCopyHi32 = (MaskOnes & MaskHi32) == MaskHi32;
2570 
2571   if (!IsVGPR && Ty.getSizeInBits() == 64 &&
2572       !CanCopyLow32 && !CanCopyHi32) {
2573     auto MIB = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_AND_B64), DstReg)
2574       .addReg(SrcReg)
2575       .addReg(MaskReg);
2576     I.eraseFromParent();
2577     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2578   }
2579 
2580   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2581   const TargetRegisterClass &RegRC
2582     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2583 
2584   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB);
2585   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB);
2586   const TargetRegisterClass *MaskRC =
2587       TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB);
2588 
2589   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2590       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2591       !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
2592     return false;
2593 
2594   if (Ty.getSizeInBits() == 32) {
2595     assert(MaskTy.getSizeInBits() == 32 &&
2596            "ptrmask should have been narrowed during legalize");
2597 
2598     BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2599       .addReg(SrcReg)
2600       .addReg(MaskReg);
2601     I.eraseFromParent();
2602     return true;
2603   }
2604 
2605   Register HiReg = MRI->createVirtualRegister(&RegRC);
2606   Register LoReg = MRI->createVirtualRegister(&RegRC);
2607 
2608   // Extract the subregisters from the source pointer.
2609   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2610     .addReg(SrcReg, 0, AMDGPU::sub0);
2611   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2612     .addReg(SrcReg, 0, AMDGPU::sub1);
2613 
2614   Register MaskedLo, MaskedHi;
2615 
2616   if (CanCopyLow32) {
2617     // If all the bits in the low half are 1, we only need a copy for it.
2618     MaskedLo = LoReg;
2619   } else {
2620     // Extract the mask subregister and apply the and.
2621     Register MaskLo = MRI->createVirtualRegister(&RegRC);
2622     MaskedLo = MRI->createVirtualRegister(&RegRC);
2623 
2624     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
2625       .addReg(MaskReg, 0, AMDGPU::sub0);
2626     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
2627       .addReg(LoReg)
2628       .addReg(MaskLo);
2629   }
2630 
2631   if (CanCopyHi32) {
2632     // If all the bits in the high half are 1, we only need a copy for it.
2633     MaskedHi = HiReg;
2634   } else {
2635     Register MaskHi = MRI->createVirtualRegister(&RegRC);
2636     MaskedHi = MRI->createVirtualRegister(&RegRC);
2637 
2638     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
2639       .addReg(MaskReg, 0, AMDGPU::sub1);
2640     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
2641       .addReg(HiReg)
2642       .addReg(MaskHi);
2643   }
2644 
2645   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2646     .addReg(MaskedLo)
2647     .addImm(AMDGPU::sub0)
2648     .addReg(MaskedHi)
2649     .addImm(AMDGPU::sub1);
2650   I.eraseFromParent();
2651   return true;
2652 }
2653 
2654 /// Return the register to use for the index value, and the subregister to use
2655 /// for the indirectly accessed register.
2656 static std::pair<Register, unsigned>
2657 computeIndirectRegIndex(MachineRegisterInfo &MRI,
2658                         const SIRegisterInfo &TRI,
2659                         const TargetRegisterClass *SuperRC,
2660                         Register IdxReg,
2661                         unsigned EltSize) {
2662   Register IdxBaseReg;
2663   int Offset;
2664 
2665   std::tie(IdxBaseReg, Offset) = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
2666   if (IdxBaseReg == AMDGPU::NoRegister) {
2667     // This will happen if the index is a known constant. This should ordinarily
2668     // be legalized out, but handle it as a register just in case.
2669     assert(Offset == 0);
2670     IdxBaseReg = IdxReg;
2671   }
2672 
2673   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2674 
2675   // Skip out of bounds offsets, or else we would end up using an undefined
2676   // register.
2677   if (static_cast<unsigned>(Offset) >= SubRegs.size())
2678     return std::make_pair(IdxReg, SubRegs[0]);
2679   return std::make_pair(IdxBaseReg, SubRegs[Offset]);
2680 }
2681 
2682 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2683   MachineInstr &MI) const {
2684   Register DstReg = MI.getOperand(0).getReg();
2685   Register SrcReg = MI.getOperand(1).getReg();
2686   Register IdxReg = MI.getOperand(2).getReg();
2687 
2688   LLT DstTy = MRI->getType(DstReg);
2689   LLT SrcTy = MRI->getType(SrcReg);
2690 
2691   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2692   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2693   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2694 
2695   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2696   // into a waterfall loop.
2697   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2698     return false;
2699 
2700   const TargetRegisterClass *SrcRC =
2701       TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB);
2702   const TargetRegisterClass *DstRC =
2703       TRI.getRegClassForTypeOnBank(DstTy, *DstRB);
2704   if (!SrcRC || !DstRC)
2705     return false;
2706   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2707       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2708       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2709     return false;
2710 
2711   MachineBasicBlock *BB = MI.getParent();
2712   const DebugLoc &DL = MI.getDebugLoc();
2713   const bool Is64 = DstTy.getSizeInBits() == 64;
2714 
2715   unsigned SubReg;
2716   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
2717                                                      DstTy.getSizeInBits() / 8);
2718 
2719   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2720     if (DstTy.getSizeInBits() != 32 && !Is64)
2721       return false;
2722 
2723     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2724       .addReg(IdxReg);
2725 
2726     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2727     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2728       .addReg(SrcReg, 0, SubReg)
2729       .addReg(SrcReg, RegState::Implicit);
2730     MI.eraseFromParent();
2731     return true;
2732   }
2733 
2734   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2735     return false;
2736 
2737   if (!STI.useVGPRIndexMode()) {
2738     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2739       .addReg(IdxReg);
2740     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2741       .addReg(SrcReg, 0, SubReg)
2742       .addReg(SrcReg, RegState::Implicit);
2743     MI.eraseFromParent();
2744     return true;
2745   }
2746 
2747   const MCInstrDesc &GPRIDXDesc =
2748       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true);
2749   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2750       .addReg(SrcReg)
2751       .addReg(IdxReg)
2752       .addImm(SubReg);
2753 
2754   MI.eraseFromParent();
2755   return true;
2756 }
2757 
2758 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
2759 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2760   MachineInstr &MI) const {
2761   Register DstReg = MI.getOperand(0).getReg();
2762   Register VecReg = MI.getOperand(1).getReg();
2763   Register ValReg = MI.getOperand(2).getReg();
2764   Register IdxReg = MI.getOperand(3).getReg();
2765 
2766   LLT VecTy = MRI->getType(DstReg);
2767   LLT ValTy = MRI->getType(ValReg);
2768   unsigned VecSize = VecTy.getSizeInBits();
2769   unsigned ValSize = ValTy.getSizeInBits();
2770 
2771   const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2772   const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2773   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2774 
2775   assert(VecTy.getElementType() == ValTy);
2776 
2777   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2778   // into a waterfall loop.
2779   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2780     return false;
2781 
2782   const TargetRegisterClass *VecRC =
2783       TRI.getRegClassForTypeOnBank(VecTy, *VecRB);
2784   const TargetRegisterClass *ValRC =
2785       TRI.getRegClassForTypeOnBank(ValTy, *ValRB);
2786 
2787   if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
2788       !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
2789       !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
2790       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2791     return false;
2792 
2793   if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
2794     return false;
2795 
2796   unsigned SubReg;
2797   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
2798                                                      ValSize / 8);
2799 
2800   const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
2801                          STI.useVGPRIndexMode();
2802 
2803   MachineBasicBlock *BB = MI.getParent();
2804   const DebugLoc &DL = MI.getDebugLoc();
2805 
2806   if (!IndexMode) {
2807     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2808       .addReg(IdxReg);
2809 
2810     const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo(
2811         VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID);
2812     BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
2813         .addReg(VecReg)
2814         .addReg(ValReg)
2815         .addImm(SubReg);
2816     MI.eraseFromParent();
2817     return true;
2818   }
2819 
2820   const MCInstrDesc &GPRIDXDesc =
2821       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false);
2822   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2823       .addReg(VecReg)
2824       .addReg(ValReg)
2825       .addReg(IdxReg)
2826       .addImm(SubReg);
2827 
2828   MI.eraseFromParent();
2829   return true;
2830 }
2831 
2832 static bool isZeroOrUndef(int X) {
2833   return X == 0 || X == -1;
2834 }
2835 
2836 static bool isOneOrUndef(int X) {
2837   return X == 1 || X == -1;
2838 }
2839 
2840 static bool isZeroOrOneOrUndef(int X) {
2841   return X == 0 || X == 1 || X == -1;
2842 }
2843 
2844 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single
2845 // 32-bit register.
2846 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1,
2847                                    ArrayRef<int> Mask) {
2848   NewMask[0] = Mask[0];
2849   NewMask[1] = Mask[1];
2850   if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1]))
2851     return Src0;
2852 
2853   assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1);
2854   assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1);
2855 
2856   // Shift the mask inputs to be 0/1;
2857   NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2;
2858   NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2;
2859   return Src1;
2860 }
2861 
2862 // This is only legal with VOP3P instructions as an aid to op_sel matching.
2863 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
2864   MachineInstr &MI) const {
2865   Register DstReg = MI.getOperand(0).getReg();
2866   Register Src0Reg = MI.getOperand(1).getReg();
2867   Register Src1Reg = MI.getOperand(2).getReg();
2868   ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
2869 
2870   const LLT V2S16 = LLT::fixed_vector(2, 16);
2871   if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
2872     return false;
2873 
2874   if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask))
2875     return false;
2876 
2877   assert(ShufMask.size() == 2);
2878 
2879   MachineBasicBlock *MBB = MI.getParent();
2880   const DebugLoc &DL = MI.getDebugLoc();
2881 
2882   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2883   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2884   const TargetRegisterClass &RC = IsVALU ?
2885     AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2886 
2887   // Handle the degenerate case which should have folded out.
2888   if (ShufMask[0] == -1 && ShufMask[1] == -1) {
2889     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg);
2890 
2891     MI.eraseFromParent();
2892     return RBI.constrainGenericRegister(DstReg, RC, *MRI);
2893   }
2894 
2895   // A legal VOP3P mask only reads one of the sources.
2896   int Mask[2];
2897   Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask);
2898 
2899   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) ||
2900       !RBI.constrainGenericRegister(SrcVec, RC, *MRI))
2901     return false;
2902 
2903   // TODO: This also should have been folded out
2904   if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) {
2905     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg)
2906       .addReg(SrcVec);
2907 
2908     MI.eraseFromParent();
2909     return true;
2910   }
2911 
2912   if (Mask[0] == 1 && Mask[1] == -1) {
2913     if (IsVALU) {
2914       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
2915         .addImm(16)
2916         .addReg(SrcVec);
2917     } else {
2918       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
2919         .addReg(SrcVec)
2920         .addImm(16);
2921     }
2922   } else if (Mask[0] == -1 && Mask[1] == 0) {
2923     if (IsVALU) {
2924       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg)
2925         .addImm(16)
2926         .addReg(SrcVec);
2927     } else {
2928       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg)
2929         .addReg(SrcVec)
2930         .addImm(16);
2931     }
2932   } else if (Mask[0] == 0 && Mask[1] == 0) {
2933     if (IsVALU) {
2934       if (STI.hasSDWA()) {
2935         // Write low half of the register into the high half.
2936         MachineInstr *MovSDWA =
2937             BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2938                 .addImm(0)                             // $src0_modifiers
2939                 .addReg(SrcVec)                        // $src0
2940                 .addImm(0)                             // $clamp
2941                 .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
2942                 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2943                 .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
2944                 .addReg(SrcVec, RegState::Implicit);
2945         MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2946       } else {
2947         Register TmpReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2948         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_AND_B32_e32), TmpReg)
2949             .addImm(0xFFFF)
2950             .addReg(SrcVec);
2951         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHL_OR_B32_e64), DstReg)
2952             .addReg(TmpReg)
2953             .addImm(16)
2954             .addReg(TmpReg);
2955       }
2956     } else {
2957       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2958         .addReg(SrcVec)
2959         .addReg(SrcVec);
2960     }
2961   } else if (Mask[0] == 1 && Mask[1] == 1) {
2962     if (IsVALU) {
2963       if (STI.hasSDWA()) {
2964         // Write high half of the register into the low half.
2965         MachineInstr *MovSDWA =
2966             BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2967                 .addImm(0)                             // $src0_modifiers
2968                 .addReg(SrcVec)                        // $src0
2969                 .addImm(0)                             // $clamp
2970                 .addImm(AMDGPU::SDWA::WORD_0)          // $dst_sel
2971                 .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2972                 .addImm(AMDGPU::SDWA::WORD_1)          // $src0_sel
2973                 .addReg(SrcVec, RegState::Implicit);
2974         MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2975       } else {
2976         Register TmpReg = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
2977         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), TmpReg)
2978             .addImm(16)
2979             .addReg(SrcVec);
2980         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHL_OR_B32_e64), DstReg)
2981             .addReg(TmpReg)
2982             .addImm(16)
2983             .addReg(TmpReg);
2984       }
2985     } else {
2986       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg)
2987         .addReg(SrcVec)
2988         .addReg(SrcVec);
2989     }
2990   } else if (Mask[0] == 1 && Mask[1] == 0) {
2991     if (IsVALU) {
2992       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32_e64), DstReg)
2993         .addReg(SrcVec)
2994         .addReg(SrcVec)
2995         .addImm(16);
2996     } else {
2997       if (STI.hasSPackHL()) {
2998         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HL_B32_B16), DstReg)
2999             .addReg(SrcVec)
3000             .addReg(SrcVec);
3001       } else {
3002         Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3003         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg)
3004             .addReg(SrcVec)
3005             .addImm(16);
3006         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
3007             .addReg(TmpReg)
3008             .addReg(SrcVec);
3009       }
3010     }
3011   } else
3012     llvm_unreachable("all shuffle masks should be handled");
3013 
3014   MI.eraseFromParent();
3015   return true;
3016 }
3017 
3018 bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD(
3019   MachineInstr &MI) const {
3020   const Register DefReg = MI.getOperand(0).getReg();
3021   LLT DefTy = MRI->getType(DefReg);
3022   if (AMDGPU::hasAtomicFaddRtnForTy(STI, DefTy))
3023     return selectImpl(MI, *CoverageInfo);
3024 
3025   MachineBasicBlock *MBB = MI.getParent();
3026   const DebugLoc &DL = MI.getDebugLoc();
3027 
3028   if (!MRI->use_nodbg_empty(DefReg)) {
3029     Function &F = MBB->getParent()->getFunction();
3030     DiagnosticInfoUnsupported
3031       NoFpRet(F, "return versions of fp atomics not supported",
3032               MI.getDebugLoc(), DS_Error);
3033     F.getContext().diagnose(NoFpRet);
3034     return false;
3035   }
3036 
3037   // FIXME: This is only needed because tablegen requires number of dst operands
3038   // in match and replace pattern to be the same. Otherwise patterns can be
3039   // exported from SDag path.
3040   MachineOperand &VDataIn = MI.getOperand(1);
3041   MachineOperand &VIndex = MI.getOperand(3);
3042   MachineOperand &VOffset = MI.getOperand(4);
3043   MachineOperand &SOffset = MI.getOperand(5);
3044   int16_t Offset = MI.getOperand(6).getImm();
3045 
3046   bool HasVOffset = !isOperandImmEqual(VOffset, 0, *MRI);
3047   bool HasVIndex = !isOperandImmEqual(VIndex, 0, *MRI);
3048 
3049   unsigned Opcode;
3050   if (HasVOffset) {
3051     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN
3052                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN;
3053   } else {
3054     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN
3055                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET;
3056   }
3057 
3058   if (MRI->getType(VDataIn.getReg()).isVector()) {
3059     switch (Opcode) {
3060     case AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN:
3061       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN;
3062       break;
3063     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN:
3064       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFEN;
3065       break;
3066     case AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN:
3067       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_IDXEN;
3068       break;
3069     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET:
3070       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFSET;
3071       break;
3072     }
3073   }
3074 
3075   auto I = BuildMI(*MBB, MI, DL, TII.get(Opcode));
3076   I.add(VDataIn);
3077 
3078   if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN ||
3079       Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) {
3080     Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
3081     BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
3082       .addReg(VIndex.getReg())
3083       .addImm(AMDGPU::sub0)
3084       .addReg(VOffset.getReg())
3085       .addImm(AMDGPU::sub1);
3086 
3087     I.addReg(IdxReg);
3088   } else if (HasVIndex) {
3089     I.add(VIndex);
3090   } else if (HasVOffset) {
3091     I.add(VOffset);
3092   }
3093 
3094   I.add(MI.getOperand(2)); // rsrc
3095   I.add(SOffset);
3096   I.addImm(Offset);
3097   I.addImm(MI.getOperand(7).getImm()); // cpol
3098   I.cloneMemRefs(MI);
3099 
3100   MI.eraseFromParent();
3101 
3102   return true;
3103 }
3104 
3105 bool AMDGPUInstructionSelector::selectGlobalAtomicFadd(
3106   MachineInstr &MI, MachineOperand &AddrOp, MachineOperand &DataOp) const {
3107 
3108   if (STI.hasGFX90AInsts()) {
3109     // gfx90a adds return versions of the global atomic fadd instructions so no
3110     // special handling is required.
3111     return selectImpl(MI, *CoverageInfo);
3112   }
3113 
3114   MachineBasicBlock *MBB = MI.getParent();
3115   const DebugLoc &DL = MI.getDebugLoc();
3116 
3117   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
3118     Function &F = MBB->getParent()->getFunction();
3119     DiagnosticInfoUnsupported
3120       NoFpRet(F, "return versions of fp atomics not supported",
3121               MI.getDebugLoc(), DS_Error);
3122     F.getContext().diagnose(NoFpRet);
3123     return false;
3124   }
3125 
3126   // FIXME: This is only needed because tablegen requires number of dst operands
3127   // in match and replace pattern to be the same. Otherwise patterns can be
3128   // exported from SDag path.
3129   auto Addr = selectFlatOffsetImpl(AddrOp, SIInstrFlags::FlatGlobal);
3130 
3131   Register Data = DataOp.getReg();
3132   const unsigned Opc = MRI->getType(Data).isVector() ?
3133     AMDGPU::GLOBAL_ATOMIC_PK_ADD_F16 : AMDGPU::GLOBAL_ATOMIC_ADD_F32;
3134   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3135     .addReg(Addr.first)
3136     .addReg(Data)
3137     .addImm(Addr.second)
3138     .addImm(0) // cpol
3139     .cloneMemRefs(MI);
3140 
3141   MI.eraseFromParent();
3142   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3143 }
3144 
3145 bool AMDGPUInstructionSelector::selectBufferLoadLds(MachineInstr &MI) const {
3146   unsigned Opc;
3147   unsigned Size = MI.getOperand(3).getImm();
3148 
3149   // The struct intrinsic variants add one additional operand over raw.
3150   const bool HasVIndex = MI.getNumOperands() == 9;
3151   Register VIndex;
3152   int OpOffset = 0;
3153   if (HasVIndex) {
3154     VIndex = MI.getOperand(4).getReg();
3155     OpOffset = 1;
3156   }
3157 
3158   Register VOffset = MI.getOperand(4 + OpOffset).getReg();
3159   Optional<ValueAndVReg> MaybeVOffset =
3160       getIConstantVRegValWithLookThrough(VOffset, *MRI);
3161   const bool HasVOffset = !MaybeVOffset || MaybeVOffset->Value.getZExtValue();
3162 
3163   switch (Size) {
3164   default:
3165     return false;
3166   case 1:
3167     Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_BOTHEN
3168                                  : AMDGPU::BUFFER_LOAD_UBYTE_LDS_IDXEN
3169                     : HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFEN
3170                                  : AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFSET;
3171     break;
3172   case 2:
3173     Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_BOTHEN
3174                                  : AMDGPU::BUFFER_LOAD_USHORT_LDS_IDXEN
3175                     : HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFEN
3176                                  : AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFSET;
3177     break;
3178   case 4:
3179     Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_BOTHEN
3180                                  : AMDGPU::BUFFER_LOAD_DWORD_LDS_IDXEN
3181                     : HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFEN
3182                                  : AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFSET;
3183     break;
3184   }
3185 
3186   MachineBasicBlock *MBB = MI.getParent();
3187   const DebugLoc &DL = MI.getDebugLoc();
3188   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3189     .add(MI.getOperand(2));
3190 
3191   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc));
3192 
3193   if (HasVIndex && HasVOffset) {
3194     Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
3195     BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
3196       .addReg(VIndex)
3197       .addImm(AMDGPU::sub0)
3198       .addReg(VOffset)
3199       .addImm(AMDGPU::sub1);
3200 
3201     MIB.addReg(IdxReg);
3202   } else if (HasVIndex) {
3203     MIB.addReg(VIndex);
3204   } else if (HasVOffset) {
3205     MIB.addReg(VOffset);
3206   }
3207 
3208   MIB.add(MI.getOperand(1));            // rsrc
3209   MIB.add(MI.getOperand(5 + OpOffset)); // soffset
3210   MIB.add(MI.getOperand(6 + OpOffset)); // imm offset
3211   unsigned Aux = MI.getOperand(7 + OpOffset).getImm();
3212   MIB.addImm(Aux & AMDGPU::CPol::ALL);  // cpol
3213   MIB.addImm((Aux >> 3) & 1);           // swz
3214 
3215   MachineMemOperand *LoadMMO = *MI.memoperands_begin();
3216   MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
3217   LoadPtrI.Offset = MI.getOperand(6 + OpOffset).getImm();
3218   MachinePointerInfo StorePtrI = LoadPtrI;
3219   StorePtrI.V = nullptr;
3220   StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
3221 
3222   auto F = LoadMMO->getFlags() &
3223            ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
3224   LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
3225                                      Size, LoadMMO->getBaseAlign());
3226 
3227   MachineMemOperand *StoreMMO =
3228       MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
3229                                sizeof(int32_t), LoadMMO->getBaseAlign());
3230 
3231   MIB.setMemRefs({LoadMMO, StoreMMO});
3232 
3233   MI.eraseFromParent();
3234   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3235 }
3236 
3237 /// Match a zero extend from a 32-bit value to 64-bits.
3238 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {
3239   Register ZExtSrc;
3240   if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3241     return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3242 
3243   // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
3244   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
3245   if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
3246     return Register();
3247 
3248   if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) {
3249     return Def->getOperand(1).getReg();
3250   }
3251 
3252   return Register();
3253 }
3254 
3255 bool AMDGPUInstructionSelector::selectGlobalLoadLds(MachineInstr &MI) const{
3256   unsigned Opc;
3257   unsigned Size = MI.getOperand(3).getImm();
3258 
3259   switch (Size) {
3260   default:
3261     return false;
3262   case 1:
3263     Opc = AMDGPU::GLOBAL_LOAD_LDS_UBYTE;
3264     break;
3265   case 2:
3266     Opc = AMDGPU::GLOBAL_LOAD_LDS_USHORT;
3267     break;
3268   case 4:
3269     Opc = AMDGPU::GLOBAL_LOAD_LDS_DWORD;
3270     break;
3271   }
3272 
3273   MachineBasicBlock *MBB = MI.getParent();
3274   const DebugLoc &DL = MI.getDebugLoc();
3275   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3276     .add(MI.getOperand(2));
3277 
3278   Register Addr = MI.getOperand(1).getReg();
3279   Register VOffset;
3280   // Try to split SAddr and VOffset. Global and LDS pointers share the same
3281   // immediate offset, so we cannot use a regular SelectGlobalSAddr().
3282   if (!isSGPR(Addr)) {
3283     auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3284     if (isSGPR(AddrDef->Reg)) {
3285       Addr = AddrDef->Reg;
3286     } else if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3287       Register SAddr =
3288           getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3289       if (SAddr && isSGPR(SAddr)) {
3290         Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3291         if (Register Off = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
3292           Addr = SAddr;
3293           VOffset = Off;
3294         }
3295       }
3296     }
3297   }
3298 
3299   if (isSGPR(Addr)) {
3300     Opc = AMDGPU::getGlobalSaddrOp(Opc);
3301     if (!VOffset) {
3302       VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3303       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
3304         .addImm(0);
3305     }
3306   }
3307 
3308   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3309     .addReg(Addr);
3310 
3311   if (isSGPR(Addr))
3312     MIB.addReg(VOffset);
3313 
3314   MIB.add(MI.getOperand(4))  // offset
3315      .add(MI.getOperand(5)); // cpol
3316 
3317   MachineMemOperand *LoadMMO = *MI.memoperands_begin();
3318   MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
3319   LoadPtrI.Offset = MI.getOperand(4).getImm();
3320   MachinePointerInfo StorePtrI = LoadPtrI;
3321   LoadPtrI.AddrSpace = AMDGPUAS::GLOBAL_ADDRESS;
3322   StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
3323   auto F = LoadMMO->getFlags() &
3324            ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
3325   LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
3326                                      Size, LoadMMO->getBaseAlign());
3327   MachineMemOperand *StoreMMO =
3328       MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
3329                                sizeof(int32_t), Align(4));
3330 
3331   MIB.setMemRefs({LoadMMO, StoreMMO});
3332 
3333   MI.eraseFromParent();
3334   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3335 }
3336 
3337 bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{
3338   MI.setDesc(TII.get(MI.getOperand(1).getImm()));
3339   MI.removeOperand(1);
3340   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3341   return true;
3342 }
3343 
3344 bool AMDGPUInstructionSelector::selectSMFMACIntrin(MachineInstr &MI) const {
3345   unsigned Opc;
3346   switch (MI.getIntrinsicID()) {
3347   case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
3348     Opc = AMDGPU::V_SMFMAC_F32_16X16X32_F16_e64;
3349     break;
3350   case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
3351     Opc = AMDGPU::V_SMFMAC_F32_32X32X16_F16_e64;
3352     break;
3353   case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
3354     Opc = AMDGPU::V_SMFMAC_F32_16X16X32_BF16_e64;
3355     break;
3356   case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
3357     Opc = AMDGPU::V_SMFMAC_F32_32X32X16_BF16_e64;
3358     break;
3359   case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
3360     Opc = AMDGPU::V_SMFMAC_I32_16X16X64_I8_e64;
3361     break;
3362   case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
3363     Opc = AMDGPU::V_SMFMAC_I32_32X32X32_I8_e64;
3364     break;
3365   case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_bf8:
3366     Opc = AMDGPU::V_SMFMAC_F32_16X16X64_BF8_BF8_e64;
3367     break;
3368   case Intrinsic::amdgcn_smfmac_f32_16x16x64_bf8_fp8:
3369     Opc = AMDGPU::V_SMFMAC_F32_16X16X64_BF8_FP8_e64;
3370     break;
3371   case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_bf8:
3372     Opc = AMDGPU::V_SMFMAC_F32_16X16X64_FP8_BF8_e64;
3373     break;
3374   case Intrinsic::amdgcn_smfmac_f32_16x16x64_fp8_fp8:
3375     Opc = AMDGPU::V_SMFMAC_F32_16X16X64_FP8_FP8_e64;
3376     break;
3377   case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_bf8:
3378     Opc = AMDGPU::V_SMFMAC_F32_32X32X32_BF8_BF8_e64;
3379     break;
3380   case Intrinsic::amdgcn_smfmac_f32_32x32x32_bf8_fp8:
3381     Opc = AMDGPU::V_SMFMAC_F32_32X32X32_BF8_FP8_e64;
3382     break;
3383   case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_bf8:
3384     Opc = AMDGPU::V_SMFMAC_F32_32X32X32_FP8_BF8_e64;
3385     break;
3386   case Intrinsic::amdgcn_smfmac_f32_32x32x32_fp8_fp8:
3387     Opc = AMDGPU::V_SMFMAC_F32_32X32X32_FP8_FP8_e64;
3388     break;
3389   default:
3390     llvm_unreachable("unhandled smfmac intrinsic");
3391   }
3392 
3393   auto VDst_In = MI.getOperand(4);
3394 
3395   MI.setDesc(TII.get(Opc));
3396   MI.removeOperand(4); // VDst_In
3397   MI.removeOperand(1); // Intrinsic ID
3398   MI.addOperand(VDst_In); // Readd VDst_In to the end
3399   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3400   return true;
3401 }
3402 
3403 bool AMDGPUInstructionSelector::selectWaveAddress(MachineInstr &MI) const {
3404   Register DstReg = MI.getOperand(0).getReg();
3405   Register SrcReg = MI.getOperand(1).getReg();
3406   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
3407   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
3408   MachineBasicBlock *MBB = MI.getParent();
3409   const DebugLoc &DL = MI.getDebugLoc();
3410 
3411   if (IsVALU) {
3412     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
3413       .addImm(Subtarget->getWavefrontSizeLog2())
3414       .addReg(SrcReg);
3415   } else {
3416     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
3417       .addReg(SrcReg)
3418       .addImm(Subtarget->getWavefrontSizeLog2());
3419   }
3420 
3421   const TargetRegisterClass &RC =
3422       IsVALU ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
3423   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
3424     return false;
3425 
3426   MI.eraseFromParent();
3427   return true;
3428 }
3429 
3430 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
3431   if (I.isPHI())
3432     return selectPHI(I);
3433 
3434   if (!I.isPreISelOpcode()) {
3435     if (I.isCopy())
3436       return selectCOPY(I);
3437     return true;
3438   }
3439 
3440   switch (I.getOpcode()) {
3441   case TargetOpcode::G_AND:
3442   case TargetOpcode::G_OR:
3443   case TargetOpcode::G_XOR:
3444     if (selectImpl(I, *CoverageInfo))
3445       return true;
3446     return selectG_AND_OR_XOR(I);
3447   case TargetOpcode::G_ADD:
3448   case TargetOpcode::G_SUB:
3449     if (selectImpl(I, *CoverageInfo))
3450       return true;
3451     return selectG_ADD_SUB(I);
3452   case TargetOpcode::G_UADDO:
3453   case TargetOpcode::G_USUBO:
3454   case TargetOpcode::G_UADDE:
3455   case TargetOpcode::G_USUBE:
3456     return selectG_UADDO_USUBO_UADDE_USUBE(I);
3457   case AMDGPU::G_AMDGPU_MAD_U64_U32:
3458   case AMDGPU::G_AMDGPU_MAD_I64_I32:
3459     return selectG_AMDGPU_MAD_64_32(I);
3460   case TargetOpcode::G_INTTOPTR:
3461   case TargetOpcode::G_BITCAST:
3462   case TargetOpcode::G_PTRTOINT:
3463     return selectCOPY(I);
3464   case TargetOpcode::G_CONSTANT:
3465   case TargetOpcode::G_FCONSTANT:
3466     return selectG_CONSTANT(I);
3467   case TargetOpcode::G_FNEG:
3468     if (selectImpl(I, *CoverageInfo))
3469       return true;
3470     return selectG_FNEG(I);
3471   case TargetOpcode::G_FABS:
3472     if (selectImpl(I, *CoverageInfo))
3473       return true;
3474     return selectG_FABS(I);
3475   case TargetOpcode::G_EXTRACT:
3476     return selectG_EXTRACT(I);
3477   case TargetOpcode::G_MERGE_VALUES:
3478   case TargetOpcode::G_BUILD_VECTOR:
3479   case TargetOpcode::G_CONCAT_VECTORS:
3480     return selectG_MERGE_VALUES(I);
3481   case TargetOpcode::G_UNMERGE_VALUES:
3482     return selectG_UNMERGE_VALUES(I);
3483   case TargetOpcode::G_BUILD_VECTOR_TRUNC:
3484     return selectG_BUILD_VECTOR_TRUNC(I);
3485   case TargetOpcode::G_PTR_ADD:
3486     return selectG_PTR_ADD(I);
3487   case TargetOpcode::G_IMPLICIT_DEF:
3488     return selectG_IMPLICIT_DEF(I);
3489   case TargetOpcode::G_FREEZE:
3490     return selectCOPY(I);
3491   case TargetOpcode::G_INSERT:
3492     return selectG_INSERT(I);
3493   case TargetOpcode::G_INTRINSIC:
3494     return selectG_INTRINSIC(I);
3495   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3496     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
3497   case TargetOpcode::G_ICMP:
3498     if (selectG_ICMP(I))
3499       return true;
3500     return selectImpl(I, *CoverageInfo);
3501   case TargetOpcode::G_LOAD:
3502   case TargetOpcode::G_STORE:
3503   case TargetOpcode::G_ATOMIC_CMPXCHG:
3504   case TargetOpcode::G_ATOMICRMW_XCHG:
3505   case TargetOpcode::G_ATOMICRMW_ADD:
3506   case TargetOpcode::G_ATOMICRMW_SUB:
3507   case TargetOpcode::G_ATOMICRMW_AND:
3508   case TargetOpcode::G_ATOMICRMW_OR:
3509   case TargetOpcode::G_ATOMICRMW_XOR:
3510   case TargetOpcode::G_ATOMICRMW_MIN:
3511   case TargetOpcode::G_ATOMICRMW_MAX:
3512   case TargetOpcode::G_ATOMICRMW_UMIN:
3513   case TargetOpcode::G_ATOMICRMW_UMAX:
3514   case TargetOpcode::G_ATOMICRMW_FADD:
3515   case AMDGPU::G_AMDGPU_ATOMIC_INC:
3516   case AMDGPU::G_AMDGPU_ATOMIC_DEC:
3517   case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
3518   case AMDGPU::G_AMDGPU_ATOMIC_FMAX:
3519     return selectG_LOAD_STORE_ATOMICRMW(I);
3520   case TargetOpcode::G_SELECT:
3521     return selectG_SELECT(I);
3522   case TargetOpcode::G_TRUNC:
3523     return selectG_TRUNC(I);
3524   case TargetOpcode::G_SEXT:
3525   case TargetOpcode::G_ZEXT:
3526   case TargetOpcode::G_ANYEXT:
3527   case TargetOpcode::G_SEXT_INREG:
3528     if (selectImpl(I, *CoverageInfo))
3529       return true;
3530     return selectG_SZA_EXT(I);
3531   case TargetOpcode::G_BRCOND:
3532     return selectG_BRCOND(I);
3533   case TargetOpcode::G_GLOBAL_VALUE:
3534     return selectG_GLOBAL_VALUE(I);
3535   case TargetOpcode::G_PTRMASK:
3536     return selectG_PTRMASK(I);
3537   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3538     return selectG_EXTRACT_VECTOR_ELT(I);
3539   case TargetOpcode::G_INSERT_VECTOR_ELT:
3540     return selectG_INSERT_VECTOR_ELT(I);
3541   case TargetOpcode::G_SHUFFLE_VECTOR:
3542     return selectG_SHUFFLE_VECTOR(I);
3543   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
3544   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
3545   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
3546   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
3547     const AMDGPU::ImageDimIntrinsicInfo *Intr
3548       = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
3549     assert(Intr && "not an image intrinsic with image pseudo");
3550     return selectImageIntrinsic(I, Intr);
3551   }
3552   case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY:
3553     return selectBVHIntrinsic(I);
3554   case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD:
3555     return selectAMDGPU_BUFFER_ATOMIC_FADD(I);
3556   case AMDGPU::G_SBFX:
3557   case AMDGPU::G_UBFX:
3558     return selectG_SBFX_UBFX(I);
3559   case AMDGPU::G_SI_CALL:
3560     I.setDesc(TII.get(AMDGPU::SI_CALL));
3561     return true;
3562   case AMDGPU::G_AMDGPU_WAVE_ADDRESS:
3563     return selectWaveAddress(I);
3564   default:
3565     return selectImpl(I, *CoverageInfo);
3566   }
3567   return false;
3568 }
3569 
3570 InstructionSelector::ComplexRendererFns
3571 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
3572   return {{
3573       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3574   }};
3575 
3576 }
3577 
3578 std::pair<Register, unsigned> AMDGPUInstructionSelector::selectVOP3ModsImpl(
3579     MachineOperand &Root, bool AllowAbs, bool OpSel, bool ForceVGPR) const {
3580   Register Src = Root.getReg();
3581   Register OrigSrc = Src;
3582   unsigned Mods = 0;
3583   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
3584 
3585   if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
3586     Src = MI->getOperand(1).getReg();
3587     Mods |= SISrcMods::NEG;
3588     MI = getDefIgnoringCopies(Src, *MRI);
3589   }
3590 
3591   if (AllowAbs && MI && MI->getOpcode() == AMDGPU::G_FABS) {
3592     Src = MI->getOperand(1).getReg();
3593     Mods |= SISrcMods::ABS;
3594   }
3595 
3596   if (OpSel)
3597     Mods |= SISrcMods::OP_SEL_0;
3598 
3599   if ((Mods != 0 || ForceVGPR) &&
3600       RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
3601     MachineInstr *UseMI = Root.getParent();
3602 
3603     // If we looked through copies to find source modifiers on an SGPR operand,
3604     // we now have an SGPR register source. To avoid potentially violating the
3605     // constant bus restriction, we need to insert a copy to a VGPR.
3606     Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc);
3607     BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(),
3608             TII.get(AMDGPU::COPY), VGPRSrc)
3609       .addReg(Src);
3610     Src = VGPRSrc;
3611   }
3612 
3613   return std::make_pair(Src, Mods);
3614 }
3615 
3616 ///
3617 /// This will select either an SGPR or VGPR operand and will save us from
3618 /// having to write an extra tablegen pattern.
3619 InstructionSelector::ComplexRendererFns
3620 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
3621   return {{
3622       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3623   }};
3624 }
3625 
3626 InstructionSelector::ComplexRendererFns
3627 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
3628   Register Src;
3629   unsigned Mods;
3630   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3631 
3632   return {{
3633       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3634       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3635       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3636       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3637   }};
3638 }
3639 
3640 InstructionSelector::ComplexRendererFns
3641 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const {
3642   Register Src;
3643   unsigned Mods;
3644   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3645 
3646   return {{
3647       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3648       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3649       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3650       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3651   }};
3652 }
3653 
3654 InstructionSelector::ComplexRendererFns
3655 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
3656   return {{
3657       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3658       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3659       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
3660   }};
3661 }
3662 
3663 InstructionSelector::ComplexRendererFns
3664 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
3665   Register Src;
3666   unsigned Mods;
3667   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3668 
3669   return {{
3670       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3671       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3672   }};
3673 }
3674 
3675 InstructionSelector::ComplexRendererFns
3676 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const {
3677   Register Src;
3678   unsigned Mods;
3679   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3680 
3681   return {{
3682       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3683       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3684   }};
3685 }
3686 
3687 InstructionSelector::ComplexRendererFns
3688 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
3689   Register Reg = Root.getReg();
3690   const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
3691   if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
3692               Def->getOpcode() == AMDGPU::G_FABS))
3693     return {};
3694   return {{
3695       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3696   }};
3697 }
3698 
3699 std::pair<Register, unsigned>
3700 AMDGPUInstructionSelector::selectVOP3PModsImpl(
3701   Register Src, const MachineRegisterInfo &MRI, bool IsDOT) const {
3702   unsigned Mods = 0;
3703   MachineInstr *MI = MRI.getVRegDef(Src);
3704 
3705   if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
3706       // It's possible to see an f32 fneg here, but unlikely.
3707       // TODO: Treat f32 fneg as only high bit.
3708       MRI.getType(Src) == LLT::fixed_vector(2, 16)) {
3709     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
3710     Src = MI->getOperand(1).getReg();
3711     MI = MRI.getVRegDef(Src);
3712   }
3713 
3714   // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
3715   (void)IsDOT; // DOTs do not use OPSEL on gfx940+, check ST.hasDOTOpSelHazard()
3716 
3717   // Packed instructions do not have abs modifiers.
3718   Mods |= SISrcMods::OP_SEL_1;
3719 
3720   return std::make_pair(Src, Mods);
3721 }
3722 
3723 InstructionSelector::ComplexRendererFns
3724 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
3725   MachineRegisterInfo &MRI
3726     = Root.getParent()->getParent()->getParent()->getRegInfo();
3727 
3728   Register Src;
3729   unsigned Mods;
3730   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
3731 
3732   return {{
3733       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3734       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3735   }};
3736 }
3737 
3738 InstructionSelector::ComplexRendererFns
3739 AMDGPUInstructionSelector::selectVOP3PModsDOT(MachineOperand &Root) const {
3740   MachineRegisterInfo &MRI
3741     = Root.getParent()->getParent()->getParent()->getRegInfo();
3742 
3743   Register Src;
3744   unsigned Mods;
3745   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI, true);
3746 
3747   return {{
3748       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3749       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3750   }};
3751 }
3752 
3753 InstructionSelector::ComplexRendererFns
3754 AMDGPUInstructionSelector::selectDotIUVOP3PMods(MachineOperand &Root) const {
3755   // Literal i1 value set in intrinsic, represents SrcMods for the next operand.
3756   // Value is in Imm operand as i1 sign extended to int64_t.
3757   // 1(-1) promotes packed values to signed, 0 treats them as unsigned.
3758   assert((Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) &&
3759          "expected i1 value");
3760   unsigned Mods = SISrcMods::OP_SEL_1;
3761   if (Root.getImm() == -1)
3762     Mods ^= SISrcMods::NEG;
3763   return {{
3764       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3765   }};
3766 }
3767 
3768 InstructionSelector::ComplexRendererFns
3769 AMDGPUInstructionSelector::selectWMMAOpSelVOP3PMods(
3770     MachineOperand &Root) const {
3771   assert((Root.isImm() && (Root.getImm() == -1 || Root.getImm() == 0)) &&
3772          "expected i1 value");
3773   unsigned Mods = SISrcMods::OP_SEL_1;
3774   if (Root.getImm() != 0)
3775     Mods |= SISrcMods::OP_SEL_0;
3776 
3777   return {{
3778       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3779   }};
3780 }
3781 
3782 InstructionSelector::ComplexRendererFns
3783 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
3784   Register Src;
3785   unsigned Mods;
3786   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3787   if (!isKnownNeverNaN(Src, *MRI))
3788     return None;
3789 
3790   return {{
3791       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3792       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3793   }};
3794 }
3795 
3796 InstructionSelector::ComplexRendererFns
3797 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
3798   // FIXME: Handle op_sel
3799   return {{
3800       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
3801       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
3802   }};
3803 }
3804 
3805 InstructionSelector::ComplexRendererFns
3806 AMDGPUInstructionSelector::selectVINTERPMods(MachineOperand &Root) const {
3807   Register Src;
3808   unsigned Mods;
3809   std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
3810                                            /* AllowAbs */ false,
3811                                            /* OpSel */ false,
3812                                            /* ForceVGPR */ true);
3813 
3814   return {{
3815       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3816       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3817   }};
3818 }
3819 
3820 InstructionSelector::ComplexRendererFns
3821 AMDGPUInstructionSelector::selectVINTERPModsHi(MachineOperand &Root) const {
3822   Register Src;
3823   unsigned Mods;
3824   std::tie(Src, Mods) = selectVOP3ModsImpl(Root,
3825                                            /* AllowAbs */ false,
3826                                            /* OpSel */ true,
3827                                            /* ForceVGPR */ true);
3828 
3829   return {{
3830       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3831       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3832   }};
3833 }
3834 
3835 bool AMDGPUInstructionSelector::selectSmrdOffset(MachineOperand &Root,
3836                                                  Register &Base,
3837                                                  Register *SOffset,
3838                                                  int64_t *Offset) const {
3839   MachineInstr *MI = Root.getParent();
3840   MachineBasicBlock *MBB = MI->getParent();
3841 
3842   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
3843   // then we can select all ptr + 32-bit offsets.
3844   SmallVector<GEPInfo, 4> AddrInfo;
3845   getAddrModeInfo(*MI, *MRI, AddrInfo);
3846 
3847   if (AddrInfo.empty())
3848     return false;
3849 
3850   const GEPInfo &GEPI = AddrInfo[0];
3851   Optional<int64_t> EncodedImm =
3852       AMDGPU::getSMRDEncodedOffset(STI, GEPI.Imm, false);
3853 
3854   if (SOffset && Offset) {
3855     if (GEPI.SgprParts.size() == 1 && GEPI.Imm != 0 && EncodedImm &&
3856         AddrInfo.size() > 1) {
3857       const GEPInfo &GEPI2 = AddrInfo[1];
3858       if (GEPI2.SgprParts.size() == 2 && GEPI2.Imm == 0) {
3859         if (Register OffsetReg =
3860                 matchZeroExtendFromS32(*MRI, GEPI2.SgprParts[1])) {
3861           Base = GEPI2.SgprParts[0];
3862           *SOffset = OffsetReg;
3863           *Offset = *EncodedImm;
3864           return true;
3865         }
3866       }
3867     }
3868     return false;
3869   }
3870 
3871   if (Offset && GEPI.SgprParts.size() == 1 && EncodedImm) {
3872     Base = GEPI.SgprParts[0];
3873     *Offset = *EncodedImm;
3874     return true;
3875   }
3876 
3877   // SGPR offset is unsigned.
3878   if (SOffset && GEPI.SgprParts.size() == 1 && isUInt<32>(GEPI.Imm) &&
3879       GEPI.Imm != 0) {
3880     // If we make it this far we have a load with an 32-bit immediate offset.
3881     // It is OK to select this using a sgpr offset, because we have already
3882     // failed trying to select this load into one of the _IMM variants since
3883     // the _IMM Patterns are considered before the _SGPR patterns.
3884     Base = GEPI.SgprParts[0];
3885     *SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3886     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), *SOffset)
3887         .addImm(GEPI.Imm);
3888     return true;
3889   }
3890 
3891   if (SOffset && GEPI.SgprParts.size() && GEPI.Imm == 0) {
3892     if (Register OffsetReg = matchZeroExtendFromS32(*MRI, GEPI.SgprParts[1])) {
3893       Base = GEPI.SgprParts[0];
3894       *SOffset = OffsetReg;
3895       return true;
3896     }
3897   }
3898 
3899   return false;
3900 }
3901 
3902 InstructionSelector::ComplexRendererFns
3903 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
3904   Register Base;
3905   int64_t Offset;
3906   if (!selectSmrdOffset(Root, Base, /* SOffset= */ nullptr, &Offset))
3907     return None;
3908 
3909   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Base); },
3910            [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }}};
3911 }
3912 
3913 InstructionSelector::ComplexRendererFns
3914 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
3915   SmallVector<GEPInfo, 4> AddrInfo;
3916   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3917 
3918   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3919     return None;
3920 
3921   const GEPInfo &GEPInfo = AddrInfo[0];
3922   Register PtrReg = GEPInfo.SgprParts[0];
3923   Optional<int64_t> EncodedImm =
3924       AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
3925   if (!EncodedImm)
3926     return None;
3927 
3928   return {{
3929     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3930     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3931   }};
3932 }
3933 
3934 InstructionSelector::ComplexRendererFns
3935 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
3936   Register Base, SOffset;
3937   if (!selectSmrdOffset(Root, Base, &SOffset, /* Offset= */ nullptr))
3938     return None;
3939 
3940   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Base); },
3941            [=](MachineInstrBuilder &MIB) { MIB.addReg(SOffset); }}};
3942 }
3943 
3944 InstructionSelector::ComplexRendererFns
3945 AMDGPUInstructionSelector::selectSmrdSgprImm(MachineOperand &Root) const {
3946   Register Base, SOffset;
3947   int64_t Offset;
3948   if (!selectSmrdOffset(Root, Base, &SOffset, &Offset))
3949     return None;
3950 
3951   return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Base); },
3952            [=](MachineInstrBuilder &MIB) { MIB.addReg(SOffset); },
3953            [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }}};
3954 }
3955 
3956 std::pair<Register, int>
3957 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root,
3958                                                 uint64_t FlatVariant) const {
3959   MachineInstr *MI = Root.getParent();
3960 
3961   auto Default = std::make_pair(Root.getReg(), 0);
3962 
3963   if (!STI.hasFlatInstOffsets())
3964     return Default;
3965 
3966   Register PtrBase;
3967   int64_t ConstOffset;
3968   std::tie(PtrBase, ConstOffset) =
3969       getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3970   if (ConstOffset == 0)
3971     return Default;
3972 
3973   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3974   if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, FlatVariant))
3975     return Default;
3976 
3977   return std::make_pair(PtrBase, ConstOffset);
3978 }
3979 
3980 InstructionSelector::ComplexRendererFns
3981 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3982   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FLAT);
3983 
3984   return {{
3985       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3986       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3987     }};
3988 }
3989 
3990 InstructionSelector::ComplexRendererFns
3991 AMDGPUInstructionSelector::selectGlobalOffset(MachineOperand &Root) const {
3992   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatGlobal);
3993 
3994   return {{
3995       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3996       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3997   }};
3998 }
3999 
4000 InstructionSelector::ComplexRendererFns
4001 AMDGPUInstructionSelector::selectScratchOffset(MachineOperand &Root) const {
4002   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatScratch);
4003 
4004   return {{
4005       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
4006       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
4007     }};
4008 }
4009 
4010 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
4011 InstructionSelector::ComplexRendererFns
4012 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
4013   Register Addr = Root.getReg();
4014   Register PtrBase;
4015   int64_t ConstOffset;
4016   int64_t ImmOffset = 0;
4017 
4018   // Match the immediate offset first, which canonically is moved as low as
4019   // possible.
4020   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
4021 
4022   if (ConstOffset != 0) {
4023     if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS,
4024                               SIInstrFlags::FlatGlobal)) {
4025       Addr = PtrBase;
4026       ImmOffset = ConstOffset;
4027     } else {
4028       auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI);
4029       if (isSGPR(PtrBaseDef->Reg)) {
4030         if (ConstOffset > 0) {
4031           // Offset is too large.
4032           //
4033           // saddr + large_offset -> saddr +
4034           //                         (voffset = large_offset & ~MaxOffset) +
4035           //                         (large_offset & MaxOffset);
4036           int64_t SplitImmOffset, RemainderOffset;
4037           std::tie(SplitImmOffset, RemainderOffset) = TII.splitFlatOffset(
4038               ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal);
4039 
4040           if (isUInt<32>(RemainderOffset)) {
4041             MachineInstr *MI = Root.getParent();
4042             MachineBasicBlock *MBB = MI->getParent();
4043             Register HighBits =
4044                 MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4045 
4046             BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
4047                     HighBits)
4048                 .addImm(RemainderOffset);
4049 
4050             return {{
4051                 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); }, // saddr
4052                 [=](MachineInstrBuilder &MIB) {
4053                   MIB.addReg(HighBits);
4054                 }, // voffset
4055                 [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); },
4056             }};
4057           }
4058         }
4059 
4060         // We are adding a 64 bit SGPR and a constant. If constant bus limit
4061         // is 1 we would need to perform 1 or 2 extra moves for each half of
4062         // the constant and it is better to do a scalar add and then issue a
4063         // single VALU instruction to materialize zero. Otherwise it is less
4064         // instructions to perform VALU adds with immediates or inline literals.
4065         unsigned NumLiterals =
4066             !TII.isInlineConstant(APInt(32, ConstOffset & 0xffffffff)) +
4067             !TII.isInlineConstant(APInt(32, ConstOffset >> 32));
4068         if (STI.getConstantBusLimit(AMDGPU::V_ADD_U32_e64) > NumLiterals)
4069           return None;
4070       }
4071     }
4072   }
4073 
4074   // Match the variable offset.
4075   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4076   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
4077     // Look through the SGPR->VGPR copy.
4078     Register SAddr =
4079         getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
4080 
4081     if (SAddr && isSGPR(SAddr)) {
4082       Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
4083 
4084       // It's possible voffset is an SGPR here, but the copy to VGPR will be
4085       // inserted later.
4086       if (Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
4087         return {{[=](MachineInstrBuilder &MIB) { // saddr
4088                    MIB.addReg(SAddr);
4089                  },
4090                  [=](MachineInstrBuilder &MIB) { // voffset
4091                    MIB.addReg(VOffset);
4092                  },
4093                  [=](MachineInstrBuilder &MIB) { // offset
4094                    MIB.addImm(ImmOffset);
4095                  }}};
4096       }
4097     }
4098   }
4099 
4100   // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and
4101   // drop this.
4102   if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF ||
4103       AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT || !isSGPR(AddrDef->Reg))
4104     return None;
4105 
4106   // It's cheaper to materialize a single 32-bit zero for vaddr than the two
4107   // moves required to copy a 64-bit SGPR to VGPR.
4108   MachineInstr *MI = Root.getParent();
4109   MachineBasicBlock *MBB = MI->getParent();
4110   Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4111 
4112   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
4113       .addImm(0);
4114 
4115   return {{
4116       [=](MachineInstrBuilder &MIB) { MIB.addReg(AddrDef->Reg); }, // saddr
4117       [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); },      // voffset
4118       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); }     // offset
4119   }};
4120 }
4121 
4122 InstructionSelector::ComplexRendererFns
4123 AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const {
4124   Register Addr = Root.getReg();
4125   Register PtrBase;
4126   int64_t ConstOffset;
4127   int64_t ImmOffset = 0;
4128 
4129   // Match the immediate offset first, which canonically is moved as low as
4130   // possible.
4131   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
4132 
4133   if (ConstOffset != 0 &&
4134       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS,
4135                             SIInstrFlags::FlatScratch)) {
4136     Addr = PtrBase;
4137     ImmOffset = ConstOffset;
4138   }
4139 
4140   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4141   if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4142     int FI = AddrDef->MI->getOperand(1).getIndex();
4143     return {{
4144         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
4145         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4146     }};
4147   }
4148 
4149   Register SAddr = AddrDef->Reg;
4150 
4151   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
4152     Register LHS = AddrDef->MI->getOperand(1).getReg();
4153     Register RHS = AddrDef->MI->getOperand(2).getReg();
4154     auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
4155     auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI);
4156 
4157     if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX &&
4158         isSGPR(RHSDef->Reg)) {
4159       int FI = LHSDef->MI->getOperand(1).getIndex();
4160       MachineInstr &I = *Root.getParent();
4161       MachineBasicBlock *BB = I.getParent();
4162       const DebugLoc &DL = I.getDebugLoc();
4163       SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4164 
4165       BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_I32), SAddr)
4166           .addFrameIndex(FI)
4167           .addReg(RHSDef->Reg);
4168     }
4169   }
4170 
4171   if (!isSGPR(SAddr))
4172     return None;
4173 
4174   return {{
4175       [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr
4176       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4177   }};
4178 }
4179 
4180 // Check whether the flat scratch SVS swizzle bug affects this access.
4181 bool AMDGPUInstructionSelector::checkFlatScratchSVSSwizzleBug(
4182     Register VAddr, Register SAddr, uint64_t ImmOffset) const {
4183   if (!Subtarget->hasFlatScratchSVSSwizzleBug())
4184     return false;
4185 
4186   // The bug affects the swizzling of SVS accesses if there is any carry out
4187   // from the two low order bits (i.e. from bit 1 into bit 2) when adding
4188   // voffset to (soffset + inst_offset).
4189   auto VKnown = KnownBits->getKnownBits(VAddr);
4190   auto SKnown = KnownBits::computeForAddSub(
4191       true, false, KnownBits->getKnownBits(SAddr),
4192       KnownBits::makeConstant(APInt(32, ImmOffset)));
4193   uint64_t VMax = VKnown.getMaxValue().getZExtValue();
4194   uint64_t SMax = SKnown.getMaxValue().getZExtValue();
4195   return (VMax & 3) + (SMax & 3) >= 4;
4196 }
4197 
4198 InstructionSelector::ComplexRendererFns
4199 AMDGPUInstructionSelector::selectScratchSVAddr(MachineOperand &Root) const {
4200   Register Addr = Root.getReg();
4201   Register PtrBase;
4202   int64_t ConstOffset;
4203   int64_t ImmOffset = 0;
4204 
4205   // Match the immediate offset first, which canonically is moved as low as
4206   // possible.
4207   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
4208 
4209   if (ConstOffset != 0 &&
4210       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS, true)) {
4211     Addr = PtrBase;
4212     ImmOffset = ConstOffset;
4213   }
4214 
4215   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4216   if (AddrDef->MI->getOpcode() != AMDGPU::G_PTR_ADD)
4217     return None;
4218 
4219   Register RHS = AddrDef->MI->getOperand(2).getReg();
4220   if (RBI.getRegBank(RHS, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID)
4221     return None;
4222 
4223   Register LHS = AddrDef->MI->getOperand(1).getReg();
4224   auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
4225 
4226   if (checkFlatScratchSVSSwizzleBug(RHS, LHS, ImmOffset))
4227     return None;
4228 
4229   if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4230     int FI = LHSDef->MI->getOperand(1).getIndex();
4231     return {{
4232         [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
4233         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
4234         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4235     }};
4236   }
4237 
4238   if (!isSGPR(LHS))
4239     return None;
4240 
4241   return {{
4242       [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
4243       [=](MachineInstrBuilder &MIB) { MIB.addReg(LHS); }, // saddr
4244       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4245   }};
4246 }
4247 
4248 InstructionSelector::ComplexRendererFns
4249 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
4250   MachineInstr *MI = Root.getParent();
4251   MachineBasicBlock *MBB = MI->getParent();
4252   MachineFunction *MF = MBB->getParent();
4253   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
4254 
4255   int64_t Offset = 0;
4256   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
4257       Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
4258     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4259 
4260     // TODO: Should this be inside the render function? The iterator seems to
4261     // move.
4262     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
4263             HighBits)
4264       .addImm(Offset & ~4095);
4265 
4266     return {{[=](MachineInstrBuilder &MIB) { // rsrc
4267                MIB.addReg(Info->getScratchRSrcReg());
4268              },
4269              [=](MachineInstrBuilder &MIB) { // vaddr
4270                MIB.addReg(HighBits);
4271              },
4272              [=](MachineInstrBuilder &MIB) { // soffset
4273                // Use constant zero for soffset and rely on eliminateFrameIndex
4274                // to choose the appropriate frame register if need be.
4275                MIB.addImm(0);
4276              },
4277              [=](MachineInstrBuilder &MIB) { // offset
4278                MIB.addImm(Offset & 4095);
4279              }}};
4280   }
4281 
4282   assert(Offset == 0 || Offset == -1);
4283 
4284   // Try to fold a frame index directly into the MUBUF vaddr field, and any
4285   // offsets.
4286   Optional<int> FI;
4287   Register VAddr = Root.getReg();
4288   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
4289     Register PtrBase;
4290     int64_t ConstOffset;
4291     std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(VAddr, *MRI);
4292     if (ConstOffset != 0) {
4293       if (SIInstrInfo::isLegalMUBUFImmOffset(ConstOffset) &&
4294           (!STI.privateMemoryResourceIsRangeChecked() ||
4295            KnownBits->signBitIsZero(PtrBase))) {
4296         const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase);
4297         if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
4298           FI = PtrBaseDef->getOperand(1).getIndex();
4299         else
4300           VAddr = PtrBase;
4301         Offset = ConstOffset;
4302       }
4303     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4304       FI = RootDef->getOperand(1).getIndex();
4305     }
4306   }
4307 
4308   return {{[=](MachineInstrBuilder &MIB) { // rsrc
4309              MIB.addReg(Info->getScratchRSrcReg());
4310            },
4311            [=](MachineInstrBuilder &MIB) { // vaddr
4312              if (FI)
4313                MIB.addFrameIndex(FI.value());
4314              else
4315                MIB.addReg(VAddr);
4316            },
4317            [=](MachineInstrBuilder &MIB) { // soffset
4318              // Use constant zero for soffset and rely on eliminateFrameIndex
4319              // to choose the appropriate frame register if need be.
4320              MIB.addImm(0);
4321            },
4322            [=](MachineInstrBuilder &MIB) { // offset
4323              MIB.addImm(Offset);
4324            }}};
4325 }
4326 
4327 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
4328                                                 int64_t Offset) const {
4329   if (!isUInt<16>(Offset))
4330     return false;
4331 
4332   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
4333     return true;
4334 
4335   // On Southern Islands instruction with a negative base value and an offset
4336   // don't seem to work.
4337   return KnownBits->signBitIsZero(Base);
4338 }
4339 
4340 bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
4341                                                  int64_t Offset1,
4342                                                  unsigned Size) const {
4343   if (Offset0 % Size != 0 || Offset1 % Size != 0)
4344     return false;
4345   if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
4346     return false;
4347 
4348   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
4349     return true;
4350 
4351   // On Southern Islands instruction with a negative base value and an offset
4352   // don't seem to work.
4353   return KnownBits->signBitIsZero(Base);
4354 }
4355 
4356 bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI,
4357                                                     unsigned ShAmtBits) const {
4358   assert(MI.getOpcode() == TargetOpcode::G_AND);
4359 
4360   Optional<APInt> RHS = getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI);
4361   if (!RHS)
4362     return false;
4363 
4364   if (RHS->countTrailingOnes() >= ShAmtBits)
4365     return true;
4366 
4367   const APInt &LHSKnownZeros =
4368       KnownBits->getKnownZeroes(MI.getOperand(1).getReg());
4369   return (LHSKnownZeros | *RHS).countTrailingOnes() >= ShAmtBits;
4370 }
4371 
4372 // Return the wave level SGPR base address if this is a wave address.
4373 static Register getWaveAddress(const MachineInstr *Def) {
4374   return Def->getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS
4375              ? Def->getOperand(1).getReg()
4376              : Register();
4377 }
4378 
4379 InstructionSelector::ComplexRendererFns
4380 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
4381     MachineOperand &Root) const {
4382   Register Reg = Root.getReg();
4383   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
4384 
4385   const MachineInstr *Def = MRI->getVRegDef(Reg);
4386   if (Register WaveBase = getWaveAddress(Def)) {
4387     return {{
4388         [=](MachineInstrBuilder &MIB) { // rsrc
4389           MIB.addReg(Info->getScratchRSrcReg());
4390         },
4391         [=](MachineInstrBuilder &MIB) { // soffset
4392           MIB.addReg(WaveBase);
4393         },
4394         [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // offset
4395     }};
4396   }
4397 
4398   int64_t Offset = 0;
4399 
4400   // FIXME: Copy check is a hack
4401   Register BasePtr;
4402   if (mi_match(Reg, *MRI, m_GPtrAdd(m_Reg(BasePtr), m_Copy(m_ICst(Offset))))) {
4403     if (!SIInstrInfo::isLegalMUBUFImmOffset(Offset))
4404       return {};
4405     const MachineInstr *BasePtrDef = MRI->getVRegDef(BasePtr);
4406     Register WaveBase = getWaveAddress(BasePtrDef);
4407     if (!WaveBase)
4408       return {};
4409 
4410     return {{
4411         [=](MachineInstrBuilder &MIB) { // rsrc
4412           MIB.addReg(Info->getScratchRSrcReg());
4413         },
4414         [=](MachineInstrBuilder &MIB) { // soffset
4415           MIB.addReg(WaveBase);
4416         },
4417         [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4418     }};
4419   }
4420 
4421   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
4422       !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
4423     return {};
4424 
4425   return {{
4426       [=](MachineInstrBuilder &MIB) { // rsrc
4427         MIB.addReg(Info->getScratchRSrcReg());
4428       },
4429       [=](MachineInstrBuilder &MIB) { // soffset
4430         MIB.addImm(0);
4431       },
4432       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4433   }};
4434 }
4435 
4436 std::pair<Register, unsigned>
4437 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
4438   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4439   if (!RootDef)
4440     return std::make_pair(Root.getReg(), 0);
4441 
4442   int64_t ConstAddr = 0;
4443 
4444   Register PtrBase;
4445   int64_t Offset;
4446   std::tie(PtrBase, Offset) =
4447     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4448 
4449   if (Offset) {
4450     if (isDSOffsetLegal(PtrBase, Offset)) {
4451       // (add n0, c0)
4452       return std::make_pair(PtrBase, Offset);
4453     }
4454   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4455     // TODO
4456 
4457 
4458   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4459     // TODO
4460 
4461   }
4462 
4463   return std::make_pair(Root.getReg(), 0);
4464 }
4465 
4466 InstructionSelector::ComplexRendererFns
4467 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
4468   Register Reg;
4469   unsigned Offset;
4470   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
4471   return {{
4472       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4473       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
4474     }};
4475 }
4476 
4477 InstructionSelector::ComplexRendererFns
4478 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
4479   return selectDSReadWrite2(Root, 4);
4480 }
4481 
4482 InstructionSelector::ComplexRendererFns
4483 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {
4484   return selectDSReadWrite2(Root, 8);
4485 }
4486 
4487 InstructionSelector::ComplexRendererFns
4488 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
4489                                               unsigned Size) const {
4490   Register Reg;
4491   unsigned Offset;
4492   std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size);
4493   return {{
4494       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4495       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
4496       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
4497     }};
4498 }
4499 
4500 std::pair<Register, unsigned>
4501 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
4502                                                   unsigned Size) const {
4503   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4504   if (!RootDef)
4505     return std::make_pair(Root.getReg(), 0);
4506 
4507   int64_t ConstAddr = 0;
4508 
4509   Register PtrBase;
4510   int64_t Offset;
4511   std::tie(PtrBase, Offset) =
4512     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4513 
4514   if (Offset) {
4515     int64_t OffsetValue0 = Offset;
4516     int64_t OffsetValue1 = Offset + Size;
4517     if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) {
4518       // (add n0, c0)
4519       return std::make_pair(PtrBase, OffsetValue0 / Size);
4520     }
4521   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4522     // TODO
4523 
4524   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4525     // TODO
4526 
4527   }
4528 
4529   return std::make_pair(Root.getReg(), 0);
4530 }
4531 
4532 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
4533 /// the base value with the constant offset. There may be intervening copies
4534 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
4535 /// not match the pattern.
4536 std::pair<Register, int64_t>
4537 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
4538   Register Root, const MachineRegisterInfo &MRI) const {
4539   MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
4540   if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
4541     return {Root, 0};
4542 
4543   MachineOperand &RHS = RootI->getOperand(2);
4544   Optional<ValueAndVReg> MaybeOffset =
4545       getIConstantVRegValWithLookThrough(RHS.getReg(), MRI);
4546   if (!MaybeOffset)
4547     return {Root, 0};
4548   return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()};
4549 }
4550 
4551 static void addZeroImm(MachineInstrBuilder &MIB) {
4552   MIB.addImm(0);
4553 }
4554 
4555 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
4556 /// BasePtr is not valid, a null base pointer will be used.
4557 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4558                           uint32_t FormatLo, uint32_t FormatHi,
4559                           Register BasePtr) {
4560   Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4561   Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4562   Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4563   Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
4564 
4565   B.buildInstr(AMDGPU::S_MOV_B32)
4566     .addDef(RSrc2)
4567     .addImm(FormatLo);
4568   B.buildInstr(AMDGPU::S_MOV_B32)
4569     .addDef(RSrc3)
4570     .addImm(FormatHi);
4571 
4572   // Build the half of the subregister with the constants before building the
4573   // full 128-bit register. If we are building multiple resource descriptors,
4574   // this will allow CSEing of the 2-component register.
4575   B.buildInstr(AMDGPU::REG_SEQUENCE)
4576     .addDef(RSrcHi)
4577     .addReg(RSrc2)
4578     .addImm(AMDGPU::sub0)
4579     .addReg(RSrc3)
4580     .addImm(AMDGPU::sub1);
4581 
4582   Register RSrcLo = BasePtr;
4583   if (!BasePtr) {
4584     RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4585     B.buildInstr(AMDGPU::S_MOV_B64)
4586       .addDef(RSrcLo)
4587       .addImm(0);
4588   }
4589 
4590   B.buildInstr(AMDGPU::REG_SEQUENCE)
4591     .addDef(RSrc)
4592     .addReg(RSrcLo)
4593     .addImm(AMDGPU::sub0_sub1)
4594     .addReg(RSrcHi)
4595     .addImm(AMDGPU::sub2_sub3);
4596 
4597   return RSrc;
4598 }
4599 
4600 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4601                                 const SIInstrInfo &TII, Register BasePtr) {
4602   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4603 
4604   // FIXME: Why are half the "default" bits ignored based on the addressing
4605   // mode?
4606   return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
4607 }
4608 
4609 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4610                                const SIInstrInfo &TII, Register BasePtr) {
4611   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4612 
4613   // FIXME: Why are half the "default" bits ignored based on the addressing
4614   // mode?
4615   return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
4616 }
4617 
4618 AMDGPUInstructionSelector::MUBUFAddressData
4619 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
4620   MUBUFAddressData Data;
4621   Data.N0 = Src;
4622 
4623   Register PtrBase;
4624   int64_t Offset;
4625 
4626   std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
4627   if (isUInt<32>(Offset)) {
4628     Data.N0 = PtrBase;
4629     Data.Offset = Offset;
4630   }
4631 
4632   if (MachineInstr *InputAdd
4633       = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
4634     Data.N2 = InputAdd->getOperand(1).getReg();
4635     Data.N3 = InputAdd->getOperand(2).getReg();
4636 
4637     // FIXME: Need to fix extra SGPR->VGPRcopies inserted
4638     // FIXME: Don't know this was defined by operand 0
4639     //
4640     // TODO: Remove this when we have copy folding optimizations after
4641     // RegBankSelect.
4642     Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
4643     Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
4644   }
4645 
4646   return Data;
4647 }
4648 
4649 /// Return if the addr64 mubuf mode should be used for the given address.
4650 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
4651   // (ptr_add N2, N3) -> addr64, or
4652   // (ptr_add (ptr_add N2, N3), C1) -> addr64
4653   if (Addr.N2)
4654     return true;
4655 
4656   const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
4657   return N0Bank->getID() == AMDGPU::VGPRRegBankID;
4658 }
4659 
4660 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
4661 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
4662 /// component.
4663 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
4664   MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
4665   if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
4666     return;
4667 
4668   // Illegal offset, store it in soffset.
4669   SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4670   B.buildInstr(AMDGPU::S_MOV_B32)
4671     .addDef(SOffset)
4672     .addImm(ImmOffset);
4673   ImmOffset = 0;
4674 }
4675 
4676 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
4677   MachineOperand &Root, Register &VAddr, Register &RSrcReg,
4678   Register &SOffset, int64_t &Offset) const {
4679   // FIXME: Predicates should stop this from reaching here.
4680   // addr64 bit was removed for volcanic islands.
4681   if (!STI.hasAddr64() || STI.useFlatForGlobal())
4682     return false;
4683 
4684   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4685   if (!shouldUseAddr64(AddrData))
4686     return false;
4687 
4688   Register N0 = AddrData.N0;
4689   Register N2 = AddrData.N2;
4690   Register N3 = AddrData.N3;
4691   Offset = AddrData.Offset;
4692 
4693   // Base pointer for the SRD.
4694   Register SRDPtr;
4695 
4696   if (N2) {
4697     if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4698       assert(N3);
4699       if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4700         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
4701         // addr64, and construct the default resource from a 0 address.
4702         VAddr = N0;
4703       } else {
4704         SRDPtr = N3;
4705         VAddr = N2;
4706       }
4707     } else {
4708       // N2 is not divergent.
4709       SRDPtr = N2;
4710       VAddr = N3;
4711     }
4712   } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4713     // Use the default null pointer in the resource
4714     VAddr = N0;
4715   } else {
4716     // N0 -> offset, or
4717     // (N0 + C1) -> offset
4718     SRDPtr = N0;
4719   }
4720 
4721   MachineIRBuilder B(*Root.getParent());
4722   RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
4723   splitIllegalMUBUFOffset(B, SOffset, Offset);
4724   return true;
4725 }
4726 
4727 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
4728   MachineOperand &Root, Register &RSrcReg, Register &SOffset,
4729   int64_t &Offset) const {
4730 
4731   // FIXME: Pattern should not reach here.
4732   if (STI.useFlatForGlobal())
4733     return false;
4734 
4735   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4736   if (shouldUseAddr64(AddrData))
4737     return false;
4738 
4739   // N0 -> offset, or
4740   // (N0 + C1) -> offset
4741   Register SRDPtr = AddrData.N0;
4742   Offset = AddrData.Offset;
4743 
4744   // TODO: Look through extensions for 32-bit soffset.
4745   MachineIRBuilder B(*Root.getParent());
4746 
4747   RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
4748   splitIllegalMUBUFOffset(B, SOffset, Offset);
4749   return true;
4750 }
4751 
4752 InstructionSelector::ComplexRendererFns
4753 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
4754   Register VAddr;
4755   Register RSrcReg;
4756   Register SOffset;
4757   int64_t Offset = 0;
4758 
4759   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4760     return {};
4761 
4762   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4763   // pattern.
4764   return {{
4765       [=](MachineInstrBuilder &MIB) {  // rsrc
4766         MIB.addReg(RSrcReg);
4767       },
4768       [=](MachineInstrBuilder &MIB) { // vaddr
4769         MIB.addReg(VAddr);
4770       },
4771       [=](MachineInstrBuilder &MIB) { // soffset
4772         if (SOffset)
4773           MIB.addReg(SOffset);
4774         else
4775           MIB.addImm(0);
4776       },
4777       [=](MachineInstrBuilder &MIB) { // offset
4778         MIB.addImm(Offset);
4779       },
4780       addZeroImm, //  cpol
4781       addZeroImm, //  tfe
4782       addZeroImm  //  swz
4783     }};
4784 }
4785 
4786 InstructionSelector::ComplexRendererFns
4787 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
4788   Register RSrcReg;
4789   Register SOffset;
4790   int64_t Offset = 0;
4791 
4792   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4793     return {};
4794 
4795   return {{
4796       [=](MachineInstrBuilder &MIB) {  // rsrc
4797         MIB.addReg(RSrcReg);
4798       },
4799       [=](MachineInstrBuilder &MIB) { // soffset
4800         if (SOffset)
4801           MIB.addReg(SOffset);
4802         else
4803           MIB.addImm(0);
4804       },
4805       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4806       addZeroImm, //  cpol
4807       addZeroImm, //  tfe
4808       addZeroImm, //  swz
4809     }};
4810 }
4811 
4812 InstructionSelector::ComplexRendererFns
4813 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
4814   Register VAddr;
4815   Register RSrcReg;
4816   Register SOffset;
4817   int64_t Offset = 0;
4818 
4819   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4820     return {};
4821 
4822   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4823   // pattern.
4824   return {{
4825       [=](MachineInstrBuilder &MIB) {  // rsrc
4826         MIB.addReg(RSrcReg);
4827       },
4828       [=](MachineInstrBuilder &MIB) { // vaddr
4829         MIB.addReg(VAddr);
4830       },
4831       [=](MachineInstrBuilder &MIB) { // soffset
4832         if (SOffset)
4833           MIB.addReg(SOffset);
4834         else
4835           MIB.addImm(0);
4836       },
4837       [=](MachineInstrBuilder &MIB) { // offset
4838         MIB.addImm(Offset);
4839       },
4840       [=](MachineInstrBuilder &MIB) {
4841         MIB.addImm(AMDGPU::CPol::GLC); // cpol
4842       }
4843     }};
4844 }
4845 
4846 InstructionSelector::ComplexRendererFns
4847 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
4848   Register RSrcReg;
4849   Register SOffset;
4850   int64_t Offset = 0;
4851 
4852   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4853     return {};
4854 
4855   return {{
4856       [=](MachineInstrBuilder &MIB) {  // rsrc
4857         MIB.addReg(RSrcReg);
4858       },
4859       [=](MachineInstrBuilder &MIB) { // soffset
4860         if (SOffset)
4861           MIB.addReg(SOffset);
4862         else
4863           MIB.addImm(0);
4864       },
4865       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4866       [=](MachineInstrBuilder &MIB) { MIB.addImm(AMDGPU::CPol::GLC); } // cpol
4867     }};
4868 }
4869 
4870 /// Get an immediate that must be 32-bits, and treated as zero extended.
4871 static Optional<uint64_t> getConstantZext32Val(Register Reg,
4872                                                const MachineRegisterInfo &MRI) {
4873   // getIConstantVRegVal sexts any values, so see if that matters.
4874   Optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI);
4875   if (!OffsetVal || !isInt<32>(*OffsetVal))
4876     return None;
4877   return Lo_32(*OffsetVal);
4878 }
4879 
4880 InstructionSelector::ComplexRendererFns
4881 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
4882   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4883   if (!OffsetVal)
4884     return {};
4885 
4886   Optional<int64_t> EncodedImm =
4887       AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
4888   if (!EncodedImm)
4889     return {};
4890 
4891   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4892 }
4893 
4894 InstructionSelector::ComplexRendererFns
4895 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
4896   assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
4897 
4898   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4899   if (!OffsetVal)
4900     return {};
4901 
4902   Optional<int64_t> EncodedImm
4903     = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
4904   if (!EncodedImm)
4905     return {};
4906 
4907   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4908 }
4909 
4910 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
4911                                                  const MachineInstr &MI,
4912                                                  int OpIdx) const {
4913   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4914          "Expected G_CONSTANT");
4915   MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
4916 }
4917 
4918 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
4919                                                 const MachineInstr &MI,
4920                                                 int OpIdx) const {
4921   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4922          "Expected G_CONSTANT");
4923   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
4924 }
4925 
4926 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
4927                                                  const MachineInstr &MI,
4928                                                  int OpIdx) const {
4929   assert(OpIdx == -1);
4930 
4931   const MachineOperand &Op = MI.getOperand(1);
4932   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
4933     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
4934   else {
4935     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
4936     MIB.addImm(Op.getCImm()->getSExtValue());
4937   }
4938 }
4939 
4940 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
4941                                                 const MachineInstr &MI,
4942                                                 int OpIdx) const {
4943   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4944          "Expected G_CONSTANT");
4945   MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
4946 }
4947 
4948 /// This only really exists to satisfy DAG type checking machinery, so is a
4949 /// no-op here.
4950 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
4951                                                 const MachineInstr &MI,
4952                                                 int OpIdx) const {
4953   MIB.addImm(MI.getOperand(OpIdx).getImm());
4954 }
4955 
4956 void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB,
4957                                                   const MachineInstr &MI,
4958                                                   int OpIdx) const {
4959   assert(OpIdx >= 0 && "expected to match an immediate operand");
4960   MIB.addImm(MI.getOperand(OpIdx).getImm() & AMDGPU::CPol::ALL);
4961 }
4962 
4963 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
4964                                                  const MachineInstr &MI,
4965                                                  int OpIdx) const {
4966   assert(OpIdx >= 0 && "expected to match an immediate operand");
4967   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
4968 }
4969 
4970 void AMDGPUInstructionSelector::renderSetGLC(MachineInstrBuilder &MIB,
4971                                              const MachineInstr &MI,
4972                                              int OpIdx) const {
4973   assert(OpIdx >= 0 && "expected to match an immediate operand");
4974   MIB.addImm(MI.getOperand(OpIdx).getImm() | AMDGPU::CPol::GLC);
4975 }
4976 
4977 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
4978                                                  const MachineInstr &MI,
4979                                                  int OpIdx) const {
4980   MIB.addFrameIndex((MI.getOperand(1).getIndex()));
4981 }
4982 
4983 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
4984   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
4985 }
4986 
4987 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
4988   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
4989 }
4990 
4991 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
4992   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
4993 }
4994 
4995 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
4996   return TII.isInlineConstant(Imm);
4997 }
4998