1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for
10 /// AMDGPU.
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
13 
14 #include "AMDGPUInstructionSelector.h"
15 #include "AMDGPU.h"
16 #include "AMDGPUGlobalISelUtils.h"
17 #include "AMDGPUInstrInfo.h"
18 #include "AMDGPURegisterBankInfo.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "SIMachineFunctionInfo.h"
21 #include "Utils/AMDGPUBaseInfo.h"
22 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
24 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
25 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
26 #include "llvm/CodeGen/MachineFrameInfo.h"
27 #include "llvm/IR/DiagnosticInfo.h"
28 #include "llvm/IR/IntrinsicsAMDGPU.h"
29 
30 #define DEBUG_TYPE "amdgpu-isel"
31 
32 using namespace llvm;
33 using namespace MIPatternMatch;
34 
35 static cl::opt<bool> AllowRiskySelect(
36   "amdgpu-global-isel-risky-select",
37   cl::desc("Allow GlobalISel to select cases that are likely to not work yet"),
38   cl::init(false),
39   cl::ReallyHidden);
40 
41 #define GET_GLOBALISEL_IMPL
42 #define AMDGPUSubtarget GCNSubtarget
43 #include "AMDGPUGenGlobalISel.inc"
44 #undef GET_GLOBALISEL_IMPL
45 #undef AMDGPUSubtarget
46 
47 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
48     const GCNSubtarget &STI, const AMDGPURegisterBankInfo &RBI,
49     const AMDGPUTargetMachine &TM)
50     : TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
51       STI(STI),
52       EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
53 #define GET_GLOBALISEL_PREDICATES_INIT
54 #include "AMDGPUGenGlobalISel.inc"
55 #undef GET_GLOBALISEL_PREDICATES_INIT
56 #define GET_GLOBALISEL_TEMPORARIES_INIT
57 #include "AMDGPUGenGlobalISel.inc"
58 #undef GET_GLOBALISEL_TEMPORARIES_INIT
59 {
60 }
61 
62 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
63 
64 void AMDGPUInstructionSelector::setupMF(MachineFunction &MF, GISelKnownBits *KB,
65                                         CodeGenCoverage &CoverageInfo,
66                                         ProfileSummaryInfo *PSI,
67                                         BlockFrequencyInfo *BFI) {
68   MRI = &MF.getRegInfo();
69   Subtarget = &MF.getSubtarget<GCNSubtarget>();
70   InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
71 }
72 
73 bool AMDGPUInstructionSelector::isVCC(Register Reg,
74                                       const MachineRegisterInfo &MRI) const {
75   // The verifier is oblivious to s1 being a valid value for wavesize registers.
76   if (Reg.isPhysical())
77     return false;
78 
79   auto &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
80   const TargetRegisterClass *RC =
81       RegClassOrBank.dyn_cast<const TargetRegisterClass*>();
82   if (RC) {
83     const LLT Ty = MRI.getType(Reg);
84     if (!Ty.isValid() || Ty.getSizeInBits() != 1)
85       return false;
86     // G_TRUNC s1 result is never vcc.
87     return MRI.getVRegDef(Reg)->getOpcode() != AMDGPU::G_TRUNC &&
88            RC->hasSuperClassEq(TRI.getBoolRC());
89   }
90 
91   const RegisterBank *RB = RegClassOrBank.get<const RegisterBank *>();
92   return RB->getID() == AMDGPU::VCCRegBankID;
93 }
94 
95 bool AMDGPUInstructionSelector::constrainCopyLikeIntrin(MachineInstr &MI,
96                                                         unsigned NewOpc) const {
97   MI.setDesc(TII.get(NewOpc));
98   MI.removeOperand(1); // Remove intrinsic ID.
99   MI.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
100 
101   MachineOperand &Dst = MI.getOperand(0);
102   MachineOperand &Src = MI.getOperand(1);
103 
104   // TODO: This should be legalized to s32 if needed
105   if (MRI->getType(Dst.getReg()) == LLT::scalar(1))
106     return false;
107 
108   const TargetRegisterClass *DstRC
109     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
110   const TargetRegisterClass *SrcRC
111     = TRI.getConstrainedRegClassForOperand(Src, *MRI);
112   if (!DstRC || DstRC != SrcRC)
113     return false;
114 
115   return RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI) &&
116          RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI);
117 }
118 
119 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
120   const DebugLoc &DL = I.getDebugLoc();
121   MachineBasicBlock *BB = I.getParent();
122   I.setDesc(TII.get(TargetOpcode::COPY));
123 
124   const MachineOperand &Src = I.getOperand(1);
125   MachineOperand &Dst = I.getOperand(0);
126   Register DstReg = Dst.getReg();
127   Register SrcReg = Src.getReg();
128 
129   if (isVCC(DstReg, *MRI)) {
130     if (SrcReg == AMDGPU::SCC) {
131       const TargetRegisterClass *RC
132         = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
133       if (!RC)
134         return true;
135       return RBI.constrainGenericRegister(DstReg, *RC, *MRI);
136     }
137 
138     if (!isVCC(SrcReg, *MRI)) {
139       // TODO: Should probably leave the copy and let copyPhysReg expand it.
140       if (!RBI.constrainGenericRegister(DstReg, *TRI.getBoolRC(), *MRI))
141         return false;
142 
143       const TargetRegisterClass *SrcRC
144         = TRI.getConstrainedRegClassForOperand(Src, *MRI);
145 
146       Optional<ValueAndVReg> ConstVal =
147           getIConstantVRegValWithLookThrough(SrcReg, *MRI, true);
148       if (ConstVal) {
149         unsigned MovOpc =
150             STI.isWave64() ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
151         BuildMI(*BB, &I, DL, TII.get(MovOpc), DstReg)
152             .addImm(ConstVal->Value.getBoolValue() ? -1 : 0);
153       } else {
154         Register MaskedReg = MRI->createVirtualRegister(SrcRC);
155 
156         // We can't trust the high bits at this point, so clear them.
157 
158         // TODO: Skip masking high bits if def is known boolean.
159 
160         unsigned AndOpc =
161             TRI.isSGPRClass(SrcRC) ? AMDGPU::S_AND_B32 : AMDGPU::V_AND_B32_e32;
162         BuildMI(*BB, &I, DL, TII.get(AndOpc), MaskedReg)
163             .addImm(1)
164             .addReg(SrcReg);
165         BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CMP_NE_U32_e64), DstReg)
166             .addImm(0)
167             .addReg(MaskedReg);
168       }
169 
170       if (!MRI->getRegClassOrNull(SrcReg))
171         MRI->setRegClass(SrcReg, SrcRC);
172       I.eraseFromParent();
173       return true;
174     }
175 
176     const TargetRegisterClass *RC =
177       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
178     if (RC && !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
179       return false;
180 
181     return true;
182   }
183 
184   for (const MachineOperand &MO : I.operands()) {
185     if (MO.getReg().isPhysical())
186       continue;
187 
188     const TargetRegisterClass *RC =
189             TRI.getConstrainedRegClassForOperand(MO, *MRI);
190     if (!RC)
191       continue;
192     RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI);
193   }
194   return true;
195 }
196 
197 bool AMDGPUInstructionSelector::selectPHI(MachineInstr &I) const {
198   const Register DefReg = I.getOperand(0).getReg();
199   const LLT DefTy = MRI->getType(DefReg);
200   if (DefTy == LLT::scalar(1)) {
201     if (!AllowRiskySelect) {
202       LLVM_DEBUG(dbgs() << "Skipping risky boolean phi\n");
203       return false;
204     }
205 
206     LLVM_DEBUG(dbgs() << "Selecting risky boolean phi\n");
207   }
208 
209   // TODO: Verify this doesn't have insane operands (i.e. VGPR to SGPR copy)
210 
211   const RegClassOrRegBank &RegClassOrBank =
212     MRI->getRegClassOrRegBank(DefReg);
213 
214   const TargetRegisterClass *DefRC
215     = RegClassOrBank.dyn_cast<const TargetRegisterClass *>();
216   if (!DefRC) {
217     if (!DefTy.isValid()) {
218       LLVM_DEBUG(dbgs() << "PHI operand has no type, not a gvreg?\n");
219       return false;
220     }
221 
222     const RegisterBank &RB = *RegClassOrBank.get<const RegisterBank *>();
223     DefRC = TRI.getRegClassForTypeOnBank(DefTy, RB);
224     if (!DefRC) {
225       LLVM_DEBUG(dbgs() << "PHI operand has unexpected size/bank\n");
226       return false;
227     }
228   }
229 
230   // TODO: Verify that all registers have the same bank
231   I.setDesc(TII.get(TargetOpcode::PHI));
232   return RBI.constrainGenericRegister(DefReg, *DefRC, *MRI);
233 }
234 
235 MachineOperand
236 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
237                                            const TargetRegisterClass &SubRC,
238                                            unsigned SubIdx) const {
239 
240   MachineInstr *MI = MO.getParent();
241   MachineBasicBlock *BB = MO.getParent()->getParent();
242   Register DstReg = MRI->createVirtualRegister(&SubRC);
243 
244   if (MO.isReg()) {
245     unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
246     Register Reg = MO.getReg();
247     BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
248             .addReg(Reg, 0, ComposedSubIdx);
249 
250     return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
251                                      MO.isKill(), MO.isDead(), MO.isUndef(),
252                                      MO.isEarlyClobber(), 0, MO.isDebug(),
253                                      MO.isInternalRead());
254   }
255 
256   assert(MO.isImm());
257 
258   APInt Imm(64, MO.getImm());
259 
260   switch (SubIdx) {
261   default:
262     llvm_unreachable("do not know to split immediate with this sub index.");
263   case AMDGPU::sub0:
264     return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
265   case AMDGPU::sub1:
266     return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
267   }
268 }
269 
270 static unsigned getLogicalBitOpcode(unsigned Opc, bool Is64) {
271   switch (Opc) {
272   case AMDGPU::G_AND:
273     return Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
274   case AMDGPU::G_OR:
275     return Is64 ? AMDGPU::S_OR_B64 : AMDGPU::S_OR_B32;
276   case AMDGPU::G_XOR:
277     return Is64 ? AMDGPU::S_XOR_B64 : AMDGPU::S_XOR_B32;
278   default:
279     llvm_unreachable("not a bit op");
280   }
281 }
282 
283 bool AMDGPUInstructionSelector::selectG_AND_OR_XOR(MachineInstr &I) const {
284   Register DstReg = I.getOperand(0).getReg();
285   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
286 
287   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
288   if (DstRB->getID() != AMDGPU::SGPRRegBankID &&
289       DstRB->getID() != AMDGPU::VCCRegBankID)
290     return false;
291 
292   bool Is64 = Size > 32 || (DstRB->getID() == AMDGPU::VCCRegBankID &&
293                             STI.isWave64());
294   I.setDesc(TII.get(getLogicalBitOpcode(I.getOpcode(), Is64)));
295 
296   // Dead implicit-def of scc
297   I.addOperand(MachineOperand::CreateReg(AMDGPU::SCC, true, // isDef
298                                          true, // isImp
299                                          false, // isKill
300                                          true)); // isDead
301   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
302 }
303 
304 bool AMDGPUInstructionSelector::selectG_ADD_SUB(MachineInstr &I) const {
305   MachineBasicBlock *BB = I.getParent();
306   MachineFunction *MF = BB->getParent();
307   Register DstReg = I.getOperand(0).getReg();
308   const DebugLoc &DL = I.getDebugLoc();
309   LLT Ty = MRI->getType(DstReg);
310   if (Ty.isVector())
311     return false;
312 
313   unsigned Size = Ty.getSizeInBits();
314   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
315   const bool IsSALU = DstRB->getID() == AMDGPU::SGPRRegBankID;
316   const bool Sub = I.getOpcode() == TargetOpcode::G_SUB;
317 
318   if (Size == 32) {
319     if (IsSALU) {
320       const unsigned Opc = Sub ? AMDGPU::S_SUB_U32 : AMDGPU::S_ADD_U32;
321       MachineInstr *Add =
322         BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
323         .add(I.getOperand(1))
324         .add(I.getOperand(2));
325       I.eraseFromParent();
326       return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
327     }
328 
329     if (STI.hasAddNoCarry()) {
330       const unsigned Opc = Sub ? AMDGPU::V_SUB_U32_e64 : AMDGPU::V_ADD_U32_e64;
331       I.setDesc(TII.get(Opc));
332       I.addOperand(*MF, MachineOperand::CreateImm(0));
333       I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
334       return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
335     }
336 
337     const unsigned Opc = Sub ? AMDGPU::V_SUB_CO_U32_e64 : AMDGPU::V_ADD_CO_U32_e64;
338 
339     Register UnusedCarry = MRI->createVirtualRegister(TRI.getWaveMaskRegClass());
340     MachineInstr *Add
341       = BuildMI(*BB, &I, DL, TII.get(Opc), DstReg)
342       .addDef(UnusedCarry, RegState::Dead)
343       .add(I.getOperand(1))
344       .add(I.getOperand(2))
345       .addImm(0);
346     I.eraseFromParent();
347     return constrainSelectedInstRegOperands(*Add, TII, TRI, RBI);
348   }
349 
350   assert(!Sub && "illegal sub should not reach here");
351 
352   const TargetRegisterClass &RC
353     = IsSALU ? AMDGPU::SReg_64_XEXECRegClass : AMDGPU::VReg_64RegClass;
354   const TargetRegisterClass &HalfRC
355     = IsSALU ? AMDGPU::SReg_32RegClass : AMDGPU::VGPR_32RegClass;
356 
357   MachineOperand Lo1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub0));
358   MachineOperand Lo2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub0));
359   MachineOperand Hi1(getSubOperand64(I.getOperand(1), HalfRC, AMDGPU::sub1));
360   MachineOperand Hi2(getSubOperand64(I.getOperand(2), HalfRC, AMDGPU::sub1));
361 
362   Register DstLo = MRI->createVirtualRegister(&HalfRC);
363   Register DstHi = MRI->createVirtualRegister(&HalfRC);
364 
365   if (IsSALU) {
366     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
367       .add(Lo1)
368       .add(Lo2);
369     BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
370       .add(Hi1)
371       .add(Hi2);
372   } else {
373     const TargetRegisterClass *CarryRC = TRI.getWaveMaskRegClass();
374     Register CarryReg = MRI->createVirtualRegister(CarryRC);
375     BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADD_CO_U32_e64), DstLo)
376       .addDef(CarryReg)
377       .add(Lo1)
378       .add(Lo2)
379       .addImm(0);
380     MachineInstr *Addc = BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_ADDC_U32_e64), DstHi)
381       .addDef(MRI->createVirtualRegister(CarryRC), RegState::Dead)
382       .add(Hi1)
383       .add(Hi2)
384       .addReg(CarryReg, RegState::Kill)
385       .addImm(0);
386 
387     if (!constrainSelectedInstRegOperands(*Addc, TII, TRI, RBI))
388       return false;
389   }
390 
391   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
392     .addReg(DstLo)
393     .addImm(AMDGPU::sub0)
394     .addReg(DstHi)
395     .addImm(AMDGPU::sub1);
396 
397 
398   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
399     return false;
400 
401   I.eraseFromParent();
402   return true;
403 }
404 
405 bool AMDGPUInstructionSelector::selectG_UADDO_USUBO_UADDE_USUBE(
406   MachineInstr &I) const {
407   MachineBasicBlock *BB = I.getParent();
408   MachineFunction *MF = BB->getParent();
409   const DebugLoc &DL = I.getDebugLoc();
410   Register Dst0Reg = I.getOperand(0).getReg();
411   Register Dst1Reg = I.getOperand(1).getReg();
412   const bool IsAdd = I.getOpcode() == AMDGPU::G_UADDO ||
413                      I.getOpcode() == AMDGPU::G_UADDE;
414   const bool HasCarryIn = I.getOpcode() == AMDGPU::G_UADDE ||
415                           I.getOpcode() == AMDGPU::G_USUBE;
416 
417   if (isVCC(Dst1Reg, *MRI)) {
418     unsigned NoCarryOpc =
419         IsAdd ? AMDGPU::V_ADD_CO_U32_e64 : AMDGPU::V_SUB_CO_U32_e64;
420     unsigned CarryOpc = IsAdd ? AMDGPU::V_ADDC_U32_e64 : AMDGPU::V_SUBB_U32_e64;
421     I.setDesc(TII.get(HasCarryIn ? CarryOpc : NoCarryOpc));
422     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
423     I.addOperand(*MF, MachineOperand::CreateImm(0));
424     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
425   }
426 
427   Register Src0Reg = I.getOperand(2).getReg();
428   Register Src1Reg = I.getOperand(3).getReg();
429 
430   if (HasCarryIn) {
431     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
432       .addReg(I.getOperand(4).getReg());
433   }
434 
435   unsigned NoCarryOpc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32;
436   unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32;
437 
438   BuildMI(*BB, &I, DL, TII.get(HasCarryIn ? CarryOpc : NoCarryOpc), Dst0Reg)
439     .add(I.getOperand(2))
440     .add(I.getOperand(3));
441   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), Dst1Reg)
442     .addReg(AMDGPU::SCC);
443 
444   if (!MRI->getRegClassOrNull(Dst1Reg))
445     MRI->setRegClass(Dst1Reg, &AMDGPU::SReg_32RegClass);
446 
447   if (!RBI.constrainGenericRegister(Dst0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
448       !RBI.constrainGenericRegister(Src0Reg, AMDGPU::SReg_32RegClass, *MRI) ||
449       !RBI.constrainGenericRegister(Src1Reg, AMDGPU::SReg_32RegClass, *MRI))
450     return false;
451 
452   if (HasCarryIn &&
453       !RBI.constrainGenericRegister(I.getOperand(4).getReg(),
454                                     AMDGPU::SReg_32RegClass, *MRI))
455     return false;
456 
457   I.eraseFromParent();
458   return true;
459 }
460 
461 bool AMDGPUInstructionSelector::selectG_AMDGPU_MAD_64_32(
462     MachineInstr &I) const {
463   MachineBasicBlock *BB = I.getParent();
464   MachineFunction *MF = BB->getParent();
465   const bool IsUnsigned = I.getOpcode() == AMDGPU::G_AMDGPU_MAD_U64_U32;
466 
467   I.setDesc(TII.get(IsUnsigned ? AMDGPU::V_MAD_U64_U32_e64
468                                : AMDGPU::V_MAD_I64_I32_e64));
469   I.addOperand(*MF, MachineOperand::CreateImm(0));
470   I.addImplicitDefUseOperands(*MF);
471   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
472 }
473 
474 // TODO: We should probably legalize these to only using 32-bit results.
475 bool AMDGPUInstructionSelector::selectG_EXTRACT(MachineInstr &I) const {
476   MachineBasicBlock *BB = I.getParent();
477   Register DstReg = I.getOperand(0).getReg();
478   Register SrcReg = I.getOperand(1).getReg();
479   LLT DstTy = MRI->getType(DstReg);
480   LLT SrcTy = MRI->getType(SrcReg);
481   const unsigned SrcSize = SrcTy.getSizeInBits();
482   unsigned DstSize = DstTy.getSizeInBits();
483 
484   // TODO: Should handle any multiple of 32 offset.
485   unsigned Offset = I.getOperand(2).getImm();
486   if (Offset % 32 != 0 || DstSize > 128)
487     return false;
488 
489   // 16-bit operations really use 32-bit registers.
490   // FIXME: Probably should not allow 16-bit G_EXTRACT results.
491   if (DstSize == 16)
492     DstSize = 32;
493 
494   const TargetRegisterClass *DstRC =
495     TRI.getConstrainedRegClassForOperand(I.getOperand(0), *MRI);
496   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
497     return false;
498 
499   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
500   const TargetRegisterClass *SrcRC =
501       TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
502   if (!SrcRC)
503     return false;
504   unsigned SubReg = SIRegisterInfo::getSubRegFromChannel(Offset / 32,
505                                                          DstSize / 32);
506   SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubReg);
507   if (!SrcRC)
508     return false;
509 
510   SrcReg = constrainOperandRegClass(*MF, TRI, *MRI, TII, RBI, I,
511                                     *SrcRC, I.getOperand(1));
512   const DebugLoc &DL = I.getDebugLoc();
513   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::COPY), DstReg)
514     .addReg(SrcReg, 0, SubReg);
515 
516   I.eraseFromParent();
517   return true;
518 }
519 
520 bool AMDGPUInstructionSelector::selectG_MERGE_VALUES(MachineInstr &MI) const {
521   MachineBasicBlock *BB = MI.getParent();
522   Register DstReg = MI.getOperand(0).getReg();
523   LLT DstTy = MRI->getType(DstReg);
524   LLT SrcTy = MRI->getType(MI.getOperand(1).getReg());
525 
526   const unsigned SrcSize = SrcTy.getSizeInBits();
527   if (SrcSize < 32)
528     return selectImpl(MI, *CoverageInfo);
529 
530   const DebugLoc &DL = MI.getDebugLoc();
531   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
532   const unsigned DstSize = DstTy.getSizeInBits();
533   const TargetRegisterClass *DstRC =
534       TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
535   if (!DstRC)
536     return false;
537 
538   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(DstRC, SrcSize / 8);
539   MachineInstrBuilder MIB =
540     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::REG_SEQUENCE), DstReg);
541   for (int I = 0, E = MI.getNumOperands() - 1; I != E; ++I) {
542     MachineOperand &Src = MI.getOperand(I + 1);
543     MIB.addReg(Src.getReg(), getUndefRegState(Src.isUndef()));
544     MIB.addImm(SubRegs[I]);
545 
546     const TargetRegisterClass *SrcRC
547       = TRI.getConstrainedRegClassForOperand(Src, *MRI);
548     if (SrcRC && !RBI.constrainGenericRegister(Src.getReg(), *SrcRC, *MRI))
549       return false;
550   }
551 
552   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
553     return false;
554 
555   MI.eraseFromParent();
556   return true;
557 }
558 
559 bool AMDGPUInstructionSelector::selectG_UNMERGE_VALUES(MachineInstr &MI) const {
560   MachineBasicBlock *BB = MI.getParent();
561   const int NumDst = MI.getNumOperands() - 1;
562 
563   MachineOperand &Src = MI.getOperand(NumDst);
564 
565   Register SrcReg = Src.getReg();
566   Register DstReg0 = MI.getOperand(0).getReg();
567   LLT DstTy = MRI->getType(DstReg0);
568   LLT SrcTy = MRI->getType(SrcReg);
569 
570   const unsigned DstSize = DstTy.getSizeInBits();
571   const unsigned SrcSize = SrcTy.getSizeInBits();
572   const DebugLoc &DL = MI.getDebugLoc();
573   const RegisterBank *SrcBank = RBI.getRegBank(SrcReg, *MRI, TRI);
574 
575   const TargetRegisterClass *SrcRC =
576       TRI.getRegClassForSizeOnBank(SrcSize, *SrcBank);
577   if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
578     return false;
579 
580   // Note we could have mixed SGPR and VGPR destination banks for an SGPR
581   // source, and this relies on the fact that the same subregister indices are
582   // used for both.
583   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SrcRC, DstSize / 8);
584   for (int I = 0, E = NumDst; I != E; ++I) {
585     MachineOperand &Dst = MI.getOperand(I);
586     BuildMI(*BB, &MI, DL, TII.get(TargetOpcode::COPY), Dst.getReg())
587       .addReg(SrcReg, 0, SubRegs[I]);
588 
589     // Make sure the subregister index is valid for the source register.
590     SrcRC = TRI.getSubClassWithSubReg(SrcRC, SubRegs[I]);
591     if (!SrcRC || !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI))
592       return false;
593 
594     const TargetRegisterClass *DstRC =
595       TRI.getConstrainedRegClassForOperand(Dst, *MRI);
596     if (DstRC && !RBI.constrainGenericRegister(Dst.getReg(), *DstRC, *MRI))
597       return false;
598   }
599 
600   MI.eraseFromParent();
601   return true;
602 }
603 
604 bool AMDGPUInstructionSelector::selectG_BUILD_VECTOR_TRUNC(
605   MachineInstr &MI) const {
606   if (selectImpl(MI, *CoverageInfo))
607     return true;
608 
609   const LLT S32 = LLT::scalar(32);
610   const LLT V2S16 = LLT::fixed_vector(2, 16);
611 
612   Register Dst = MI.getOperand(0).getReg();
613   if (MRI->getType(Dst) != V2S16)
614     return false;
615 
616   const RegisterBank *DstBank = RBI.getRegBank(Dst, *MRI, TRI);
617   if (DstBank->getID() != AMDGPU::SGPRRegBankID)
618     return false;
619 
620   Register Src0 = MI.getOperand(1).getReg();
621   Register Src1 = MI.getOperand(2).getReg();
622   if (MRI->getType(Src0) != S32)
623     return false;
624 
625   const DebugLoc &DL = MI.getDebugLoc();
626   MachineBasicBlock *BB = MI.getParent();
627 
628   auto ConstSrc1 = getAnyConstantVRegValWithLookThrough(Src1, *MRI, true, true);
629   if (ConstSrc1) {
630     auto ConstSrc0 =
631         getAnyConstantVRegValWithLookThrough(Src0, *MRI, true, true);
632     if (ConstSrc0) {
633       const int64_t K0 = ConstSrc0->Value.getSExtValue();
634       const int64_t K1 = ConstSrc1->Value.getSExtValue();
635       uint32_t Lo16 = static_cast<uint32_t>(K0) & 0xffff;
636       uint32_t Hi16 = static_cast<uint32_t>(K1) & 0xffff;
637 
638       BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), Dst)
639         .addImm(Lo16 | (Hi16 << 16));
640       MI.eraseFromParent();
641       return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI);
642     }
643   }
644 
645   // TODO: This should probably be a combine somewhere
646   // (build_vector_trunc $src0, undef -> copy $src0
647   MachineInstr *Src1Def = getDefIgnoringCopies(Src1, *MRI);
648   if (Src1Def && Src1Def->getOpcode() == AMDGPU::G_IMPLICIT_DEF) {
649     MI.setDesc(TII.get(AMDGPU::COPY));
650     MI.removeOperand(2);
651     return RBI.constrainGenericRegister(Dst, AMDGPU::SReg_32RegClass, *MRI) &&
652            RBI.constrainGenericRegister(Src0, AMDGPU::SReg_32RegClass, *MRI);
653   }
654 
655   Register ShiftSrc0;
656   Register ShiftSrc1;
657 
658   // With multiple uses of the shift, this will duplicate the shift and
659   // increase register pressure.
660   //
661   // (build_vector_trunc (lshr_oneuse $src0, 16), (lshr_oneuse $src1, 16)
662   //  => (S_PACK_HH_B32_B16 $src0, $src1)
663   // (build_vector_trunc $src0, (lshr_oneuse SReg_32:$src1, 16))
664   //  => (S_PACK_LH_B32_B16 $src0, $src1)
665   // (build_vector_trunc $src0, $src1)
666   //  => (S_PACK_LL_B32_B16 $src0, $src1)
667 
668   bool Shift0 = mi_match(
669       Src0, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc0), m_SpecificICst(16))));
670 
671   bool Shift1 = mi_match(
672       Src1, *MRI, m_OneUse(m_GLShr(m_Reg(ShiftSrc1), m_SpecificICst(16))));
673 
674   unsigned Opc = AMDGPU::S_PACK_LL_B32_B16;
675   if (Shift0 && Shift1) {
676     Opc = AMDGPU::S_PACK_HH_B32_B16;
677     MI.getOperand(1).setReg(ShiftSrc0);
678     MI.getOperand(2).setReg(ShiftSrc1);
679   } else if (Shift1) {
680     Opc = AMDGPU::S_PACK_LH_B32_B16;
681     MI.getOperand(2).setReg(ShiftSrc1);
682   } else if (Shift0 && ConstSrc1 && ConstSrc1->Value == 0) {
683     // build_vector_trunc (lshr $src0, 16), 0 -> s_lshr_b32 $src0, 16
684     auto MIB = BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_LSHR_B32), Dst)
685       .addReg(ShiftSrc0)
686       .addImm(16);
687 
688     MI.eraseFromParent();
689     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
690   }
691 
692   MI.setDesc(TII.get(Opc));
693   return constrainSelectedInstRegOperands(MI, TII, TRI, RBI);
694 }
695 
696 bool AMDGPUInstructionSelector::selectG_PTR_ADD(MachineInstr &I) const {
697   return selectG_ADD_SUB(I);
698 }
699 
700 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
701   const MachineOperand &MO = I.getOperand(0);
702 
703   // FIXME: Interface for getConstrainedRegClassForOperand needs work. The
704   // regbank check here is to know why getConstrainedRegClassForOperand failed.
705   const TargetRegisterClass *RC = TRI.getConstrainedRegClassForOperand(MO, *MRI);
706   if ((!RC && !MRI->getRegBankOrNull(MO.getReg())) ||
707       (RC && RBI.constrainGenericRegister(MO.getReg(), *RC, *MRI))) {
708     I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
709     return true;
710   }
711 
712   return false;
713 }
714 
715 bool AMDGPUInstructionSelector::selectG_INSERT(MachineInstr &I) const {
716   MachineBasicBlock *BB = I.getParent();
717 
718   Register DstReg = I.getOperand(0).getReg();
719   Register Src0Reg = I.getOperand(1).getReg();
720   Register Src1Reg = I.getOperand(2).getReg();
721   LLT Src1Ty = MRI->getType(Src1Reg);
722 
723   unsigned DstSize = MRI->getType(DstReg).getSizeInBits();
724   unsigned InsSize = Src1Ty.getSizeInBits();
725 
726   int64_t Offset = I.getOperand(3).getImm();
727 
728   // FIXME: These cases should have been illegal and unnecessary to check here.
729   if (Offset % 32 != 0 || InsSize % 32 != 0)
730     return false;
731 
732   // Currently not handled by getSubRegFromChannel.
733   if (InsSize > 128)
734     return false;
735 
736   unsigned SubReg = TRI.getSubRegFromChannel(Offset / 32, InsSize / 32);
737   if (SubReg == AMDGPU::NoSubRegister)
738     return false;
739 
740   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
741   const TargetRegisterClass *DstRC =
742       TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
743   if (!DstRC)
744     return false;
745 
746   const RegisterBank *Src0Bank = RBI.getRegBank(Src0Reg, *MRI, TRI);
747   const RegisterBank *Src1Bank = RBI.getRegBank(Src1Reg, *MRI, TRI);
748   const TargetRegisterClass *Src0RC =
749       TRI.getRegClassForSizeOnBank(DstSize, *Src0Bank);
750   const TargetRegisterClass *Src1RC =
751       TRI.getRegClassForSizeOnBank(InsSize, *Src1Bank);
752 
753   // Deal with weird cases where the class only partially supports the subreg
754   // index.
755   Src0RC = TRI.getSubClassWithSubReg(Src0RC, SubReg);
756   if (!Src0RC || !Src1RC)
757     return false;
758 
759   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
760       !RBI.constrainGenericRegister(Src0Reg, *Src0RC, *MRI) ||
761       !RBI.constrainGenericRegister(Src1Reg, *Src1RC, *MRI))
762     return false;
763 
764   const DebugLoc &DL = I.getDebugLoc();
765   BuildMI(*BB, &I, DL, TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
766     .addReg(Src0Reg)
767     .addReg(Src1Reg)
768     .addImm(SubReg);
769 
770   I.eraseFromParent();
771   return true;
772 }
773 
774 bool AMDGPUInstructionSelector::selectG_SBFX_UBFX(MachineInstr &MI) const {
775   Register DstReg = MI.getOperand(0).getReg();
776   Register SrcReg = MI.getOperand(1).getReg();
777   Register OffsetReg = MI.getOperand(2).getReg();
778   Register WidthReg = MI.getOperand(3).getReg();
779 
780   assert(RBI.getRegBank(DstReg, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID &&
781          "scalar BFX instructions are expanded in regbankselect");
782   assert(MRI->getType(MI.getOperand(0).getReg()).getSizeInBits() == 32 &&
783          "64-bit vector BFX instructions are expanded in regbankselect");
784 
785   const DebugLoc &DL = MI.getDebugLoc();
786   MachineBasicBlock *MBB = MI.getParent();
787 
788   bool IsSigned = MI.getOpcode() == TargetOpcode::G_SBFX;
789   unsigned Opc = IsSigned ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
790   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), DstReg)
791                  .addReg(SrcReg)
792                  .addReg(OffsetReg)
793                  .addReg(WidthReg);
794   MI.eraseFromParent();
795   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
796 }
797 
798 bool AMDGPUInstructionSelector::selectInterpP1F16(MachineInstr &MI) const {
799   if (STI.getLDSBankCount() != 16)
800     return selectImpl(MI, *CoverageInfo);
801 
802   Register Dst = MI.getOperand(0).getReg();
803   Register Src0 = MI.getOperand(2).getReg();
804   Register M0Val = MI.getOperand(6).getReg();
805   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI) ||
806       !RBI.constrainGenericRegister(Dst, AMDGPU::VGPR_32RegClass, *MRI) ||
807       !RBI.constrainGenericRegister(Src0, AMDGPU::VGPR_32RegClass, *MRI))
808     return false;
809 
810   // This requires 2 instructions. It is possible to write a pattern to support
811   // this, but the generated isel emitter doesn't correctly deal with multiple
812   // output instructions using the same physical register input. The copy to m0
813   // is incorrectly placed before the second instruction.
814   //
815   // TODO: Match source modifiers.
816 
817   Register InterpMov = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
818   const DebugLoc &DL = MI.getDebugLoc();
819   MachineBasicBlock *MBB = MI.getParent();
820 
821   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
822     .addReg(M0Val);
823   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_MOV_F32), InterpMov)
824     .addImm(2)
825     .addImm(MI.getOperand(4).getImm())  // $attr
826     .addImm(MI.getOperand(3).getImm()); // $attrchan
827 
828   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_INTERP_P1LV_F16), Dst)
829     .addImm(0)                          // $src0_modifiers
830     .addReg(Src0)                       // $src0
831     .addImm(MI.getOperand(4).getImm())  // $attr
832     .addImm(MI.getOperand(3).getImm())  // $attrchan
833     .addImm(0)                          // $src2_modifiers
834     .addReg(InterpMov)                  // $src2 - 2 f16 values selected by high
835     .addImm(MI.getOperand(5).getImm())  // $high
836     .addImm(0)                          // $clamp
837     .addImm(0);                         // $omod
838 
839   MI.eraseFromParent();
840   return true;
841 }
842 
843 // Writelane is special in that it can use SGPR and M0 (which would normally
844 // count as using the constant bus twice - but in this case it is allowed since
845 // the lane selector doesn't count as a use of the constant bus). However, it is
846 // still required to abide by the 1 SGPR rule. Fix this up if we might have
847 // multiple SGPRs.
848 bool AMDGPUInstructionSelector::selectWritelane(MachineInstr &MI) const {
849   // With a constant bus limit of at least 2, there's no issue.
850   if (STI.getConstantBusLimit(AMDGPU::V_WRITELANE_B32) > 1)
851     return selectImpl(MI, *CoverageInfo);
852 
853   MachineBasicBlock *MBB = MI.getParent();
854   const DebugLoc &DL = MI.getDebugLoc();
855   Register VDst = MI.getOperand(0).getReg();
856   Register Val = MI.getOperand(2).getReg();
857   Register LaneSelect = MI.getOperand(3).getReg();
858   Register VDstIn = MI.getOperand(4).getReg();
859 
860   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_WRITELANE_B32), VDst);
861 
862   Optional<ValueAndVReg> ConstSelect =
863       getIConstantVRegValWithLookThrough(LaneSelect, *MRI);
864   if (ConstSelect) {
865     // The selector has to be an inline immediate, so we can use whatever for
866     // the other operands.
867     MIB.addReg(Val);
868     MIB.addImm(ConstSelect->Value.getSExtValue() &
869                maskTrailingOnes<uint64_t>(STI.getWavefrontSizeLog2()));
870   } else {
871     Optional<ValueAndVReg> ConstVal =
872         getIConstantVRegValWithLookThrough(Val, *MRI);
873 
874     // If the value written is an inline immediate, we can get away without a
875     // copy to m0.
876     if (ConstVal && AMDGPU::isInlinableLiteral32(ConstVal->Value.getSExtValue(),
877                                                  STI.hasInv2PiInlineImm())) {
878       MIB.addImm(ConstVal->Value.getSExtValue());
879       MIB.addReg(LaneSelect);
880     } else {
881       MIB.addReg(Val);
882 
883       // If the lane selector was originally in a VGPR and copied with
884       // readfirstlane, there's a hazard to read the same SGPR from the
885       // VALU. Constrain to a different SGPR to help avoid needing a nop later.
886       RBI.constrainGenericRegister(LaneSelect, AMDGPU::SReg_32_XM0RegClass, *MRI);
887 
888       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
889         .addReg(LaneSelect);
890       MIB.addReg(AMDGPU::M0);
891     }
892   }
893 
894   MIB.addReg(VDstIn);
895 
896   MI.eraseFromParent();
897   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
898 }
899 
900 // We need to handle this here because tablegen doesn't support matching
901 // instructions with multiple outputs.
902 bool AMDGPUInstructionSelector::selectDivScale(MachineInstr &MI) const {
903   Register Dst0 = MI.getOperand(0).getReg();
904   Register Dst1 = MI.getOperand(1).getReg();
905 
906   LLT Ty = MRI->getType(Dst0);
907   unsigned Opc;
908   if (Ty == LLT::scalar(32))
909     Opc = AMDGPU::V_DIV_SCALE_F32_e64;
910   else if (Ty == LLT::scalar(64))
911     Opc = AMDGPU::V_DIV_SCALE_F64_e64;
912   else
913     return false;
914 
915   // TODO: Match source modifiers.
916 
917   const DebugLoc &DL = MI.getDebugLoc();
918   MachineBasicBlock *MBB = MI.getParent();
919 
920   Register Numer = MI.getOperand(3).getReg();
921   Register Denom = MI.getOperand(4).getReg();
922   unsigned ChooseDenom = MI.getOperand(5).getImm();
923 
924   Register Src0 = ChooseDenom != 0 ? Numer : Denom;
925 
926   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), Dst0)
927     .addDef(Dst1)
928     .addImm(0)     // $src0_modifiers
929     .addUse(Src0)  // $src0
930     .addImm(0)     // $src1_modifiers
931     .addUse(Denom) // $src1
932     .addImm(0)     // $src2_modifiers
933     .addUse(Numer) // $src2
934     .addImm(0)     // $clamp
935     .addImm(0);    // $omod
936 
937   MI.eraseFromParent();
938   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
939 }
940 
941 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I) const {
942   unsigned IntrinsicID = I.getIntrinsicID();
943   switch (IntrinsicID) {
944   case Intrinsic::amdgcn_if_break: {
945     MachineBasicBlock *BB = I.getParent();
946 
947     // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
948     // SelectionDAG uses for wave32 vs wave64.
949     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::SI_IF_BREAK))
950       .add(I.getOperand(0))
951       .add(I.getOperand(2))
952       .add(I.getOperand(3));
953 
954     Register DstReg = I.getOperand(0).getReg();
955     Register Src0Reg = I.getOperand(2).getReg();
956     Register Src1Reg = I.getOperand(3).getReg();
957 
958     I.eraseFromParent();
959 
960     for (Register Reg : { DstReg, Src0Reg, Src1Reg })
961       MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
962 
963     return true;
964   }
965   case Intrinsic::amdgcn_interp_p1_f16:
966     return selectInterpP1F16(I);
967   case Intrinsic::amdgcn_wqm:
968     return constrainCopyLikeIntrin(I, AMDGPU::WQM);
969   case Intrinsic::amdgcn_softwqm:
970     return constrainCopyLikeIntrin(I, AMDGPU::SOFT_WQM);
971   case Intrinsic::amdgcn_strict_wwm:
972   case Intrinsic::amdgcn_wwm:
973     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WWM);
974   case Intrinsic::amdgcn_strict_wqm:
975     return constrainCopyLikeIntrin(I, AMDGPU::STRICT_WQM);
976   case Intrinsic::amdgcn_writelane:
977     return selectWritelane(I);
978   case Intrinsic::amdgcn_div_scale:
979     return selectDivScale(I);
980   case Intrinsic::amdgcn_icmp:
981     return selectIntrinsicIcmp(I);
982   case Intrinsic::amdgcn_ballot:
983     return selectBallot(I);
984   case Intrinsic::amdgcn_reloc_constant:
985     return selectRelocConstant(I);
986   case Intrinsic::amdgcn_groupstaticsize:
987     return selectGroupStaticSize(I);
988   case Intrinsic::returnaddress:
989     return selectReturnAddress(I);
990   case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
991   case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
992   case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
993   case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
994   case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
995   case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
996     return selectSMFMACIntrin(I);
997   default:
998     return selectImpl(I, *CoverageInfo);
999   }
1000 }
1001 
1002 static int getV_CMPOpcode(CmpInst::Predicate P, unsigned Size) {
1003   if (Size != 32 && Size != 64)
1004     return -1;
1005   switch (P) {
1006   default:
1007     llvm_unreachable("Unknown condition code!");
1008   case CmpInst::ICMP_NE:
1009     return Size == 32 ? AMDGPU::V_CMP_NE_U32_e64 : AMDGPU::V_CMP_NE_U64_e64;
1010   case CmpInst::ICMP_EQ:
1011     return Size == 32 ? AMDGPU::V_CMP_EQ_U32_e64 : AMDGPU::V_CMP_EQ_U64_e64;
1012   case CmpInst::ICMP_SGT:
1013     return Size == 32 ? AMDGPU::V_CMP_GT_I32_e64 : AMDGPU::V_CMP_GT_I64_e64;
1014   case CmpInst::ICMP_SGE:
1015     return Size == 32 ? AMDGPU::V_CMP_GE_I32_e64 : AMDGPU::V_CMP_GE_I64_e64;
1016   case CmpInst::ICMP_SLT:
1017     return Size == 32 ? AMDGPU::V_CMP_LT_I32_e64 : AMDGPU::V_CMP_LT_I64_e64;
1018   case CmpInst::ICMP_SLE:
1019     return Size == 32 ? AMDGPU::V_CMP_LE_I32_e64 : AMDGPU::V_CMP_LE_I64_e64;
1020   case CmpInst::ICMP_UGT:
1021     return Size == 32 ? AMDGPU::V_CMP_GT_U32_e64 : AMDGPU::V_CMP_GT_U64_e64;
1022   case CmpInst::ICMP_UGE:
1023     return Size == 32 ? AMDGPU::V_CMP_GE_U32_e64 : AMDGPU::V_CMP_GE_U64_e64;
1024   case CmpInst::ICMP_ULT:
1025     return Size == 32 ? AMDGPU::V_CMP_LT_U32_e64 : AMDGPU::V_CMP_LT_U64_e64;
1026   case CmpInst::ICMP_ULE:
1027     return Size == 32 ? AMDGPU::V_CMP_LE_U32_e64 : AMDGPU::V_CMP_LE_U64_e64;
1028   }
1029 }
1030 
1031 int AMDGPUInstructionSelector::getS_CMPOpcode(CmpInst::Predicate P,
1032                                               unsigned Size) const {
1033   if (Size == 64) {
1034     if (!STI.hasScalarCompareEq64())
1035       return -1;
1036 
1037     switch (P) {
1038     case CmpInst::ICMP_NE:
1039       return AMDGPU::S_CMP_LG_U64;
1040     case CmpInst::ICMP_EQ:
1041       return AMDGPU::S_CMP_EQ_U64;
1042     default:
1043       return -1;
1044     }
1045   }
1046 
1047   if (Size != 32)
1048     return -1;
1049 
1050   switch (P) {
1051   case CmpInst::ICMP_NE:
1052     return AMDGPU::S_CMP_LG_U32;
1053   case CmpInst::ICMP_EQ:
1054     return AMDGPU::S_CMP_EQ_U32;
1055   case CmpInst::ICMP_SGT:
1056     return AMDGPU::S_CMP_GT_I32;
1057   case CmpInst::ICMP_SGE:
1058     return AMDGPU::S_CMP_GE_I32;
1059   case CmpInst::ICMP_SLT:
1060     return AMDGPU::S_CMP_LT_I32;
1061   case CmpInst::ICMP_SLE:
1062     return AMDGPU::S_CMP_LE_I32;
1063   case CmpInst::ICMP_UGT:
1064     return AMDGPU::S_CMP_GT_U32;
1065   case CmpInst::ICMP_UGE:
1066     return AMDGPU::S_CMP_GE_U32;
1067   case CmpInst::ICMP_ULT:
1068     return AMDGPU::S_CMP_LT_U32;
1069   case CmpInst::ICMP_ULE:
1070     return AMDGPU::S_CMP_LE_U32;
1071   default:
1072     llvm_unreachable("Unknown condition code!");
1073   }
1074 }
1075 
1076 bool AMDGPUInstructionSelector::selectG_ICMP(MachineInstr &I) const {
1077   MachineBasicBlock *BB = I.getParent();
1078   const DebugLoc &DL = I.getDebugLoc();
1079 
1080   Register SrcReg = I.getOperand(2).getReg();
1081   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1082 
1083   auto Pred = (CmpInst::Predicate)I.getOperand(1).getPredicate();
1084 
1085   Register CCReg = I.getOperand(0).getReg();
1086   if (!isVCC(CCReg, *MRI)) {
1087     int Opcode = getS_CMPOpcode(Pred, Size);
1088     if (Opcode == -1)
1089       return false;
1090     MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode))
1091             .add(I.getOperand(2))
1092             .add(I.getOperand(3));
1093     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CCReg)
1094       .addReg(AMDGPU::SCC);
1095     bool Ret =
1096         constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI) &&
1097         RBI.constrainGenericRegister(CCReg, AMDGPU::SReg_32RegClass, *MRI);
1098     I.eraseFromParent();
1099     return Ret;
1100   }
1101 
1102   int Opcode = getV_CMPOpcode(Pred, Size);
1103   if (Opcode == -1)
1104     return false;
1105 
1106   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode),
1107             I.getOperand(0).getReg())
1108             .add(I.getOperand(2))
1109             .add(I.getOperand(3));
1110   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1111                                *TRI.getBoolRC(), *MRI);
1112   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1113   I.eraseFromParent();
1114   return Ret;
1115 }
1116 
1117 bool AMDGPUInstructionSelector::selectIntrinsicIcmp(MachineInstr &I) const {
1118   Register Dst = I.getOperand(0).getReg();
1119   if (isVCC(Dst, *MRI))
1120     return false;
1121 
1122   if (MRI->getType(Dst).getSizeInBits() != STI.getWavefrontSize())
1123     return false;
1124 
1125   MachineBasicBlock *BB = I.getParent();
1126   const DebugLoc &DL = I.getDebugLoc();
1127   Register SrcReg = I.getOperand(2).getReg();
1128   unsigned Size = RBI.getSizeInBits(SrcReg, *MRI, TRI);
1129 
1130   auto Pred = static_cast<CmpInst::Predicate>(I.getOperand(4).getImm());
1131   if (!ICmpInst::isIntPredicate(static_cast<ICmpInst::Predicate>(Pred))) {
1132     MachineInstr *ICmp =
1133         BuildMI(*BB, &I, DL, TII.get(AMDGPU::IMPLICIT_DEF), Dst);
1134 
1135     if (!RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(),
1136                                       *TRI.getBoolRC(), *MRI))
1137       return false;
1138     I.eraseFromParent();
1139     return true;
1140   }
1141 
1142   int Opcode = getV_CMPOpcode(Pred, Size);
1143   if (Opcode == -1)
1144     return false;
1145 
1146   MachineInstr *ICmp = BuildMI(*BB, &I, DL, TII.get(Opcode), Dst)
1147                            .add(I.getOperand(2))
1148                            .add(I.getOperand(3));
1149   RBI.constrainGenericRegister(ICmp->getOperand(0).getReg(), *TRI.getBoolRC(),
1150                                *MRI);
1151   bool Ret = constrainSelectedInstRegOperands(*ICmp, TII, TRI, RBI);
1152   I.eraseFromParent();
1153   return Ret;
1154 }
1155 
1156 bool AMDGPUInstructionSelector::selectBallot(MachineInstr &I) const {
1157   MachineBasicBlock *BB = I.getParent();
1158   const DebugLoc &DL = I.getDebugLoc();
1159   Register DstReg = I.getOperand(0).getReg();
1160   const unsigned Size = MRI->getType(DstReg).getSizeInBits();
1161   const bool Is64 = Size == 64;
1162 
1163   if (Size != STI.getWavefrontSize())
1164     return false;
1165 
1166   Optional<ValueAndVReg> Arg =
1167       getIConstantVRegValWithLookThrough(I.getOperand(2).getReg(), *MRI);
1168 
1169   if (Arg.hasValue()) {
1170     const int64_t Value = Arg.getValue().Value.getSExtValue();
1171     if (Value == 0) {
1172       unsigned Opcode = Is64 ? AMDGPU::S_MOV_B64 : AMDGPU::S_MOV_B32;
1173       BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg).addImm(0);
1174     } else if (Value == -1) { // all ones
1175       Register SrcReg = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
1176       BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1177     } else
1178       return false;
1179   } else {
1180     Register SrcReg = I.getOperand(2).getReg();
1181     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), DstReg).addReg(SrcReg);
1182   }
1183 
1184   I.eraseFromParent();
1185   return true;
1186 }
1187 
1188 bool AMDGPUInstructionSelector::selectRelocConstant(MachineInstr &I) const {
1189   Register DstReg = I.getOperand(0).getReg();
1190   const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
1191   const TargetRegisterClass *DstRC = TRI.getRegClassForSizeOnBank(32, *DstBank);
1192   if (!DstRC || !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI))
1193     return false;
1194 
1195   const bool IsVALU = DstBank->getID() == AMDGPU::VGPRRegBankID;
1196 
1197   Module *M = MF->getFunction().getParent();
1198   const MDNode *Metadata = I.getOperand(2).getMetadata();
1199   auto SymbolName = cast<MDString>(Metadata->getOperand(0))->getString();
1200   auto RelocSymbol = cast<GlobalVariable>(
1201     M->getOrInsertGlobal(SymbolName, Type::getInt32Ty(M->getContext())));
1202 
1203   MachineBasicBlock *BB = I.getParent();
1204   BuildMI(*BB, &I, I.getDebugLoc(),
1205           TII.get(IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32), DstReg)
1206     .addGlobalAddress(RelocSymbol, 0, SIInstrInfo::MO_ABS32_LO);
1207 
1208   I.eraseFromParent();
1209   return true;
1210 }
1211 
1212 bool AMDGPUInstructionSelector::selectGroupStaticSize(MachineInstr &I) const {
1213   Triple::OSType OS = MF->getTarget().getTargetTriple().getOS();
1214 
1215   Register DstReg = I.getOperand(0).getReg();
1216   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1217   unsigned Mov = DstRB->getID() == AMDGPU::SGPRRegBankID ?
1218     AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
1219 
1220   MachineBasicBlock *MBB = I.getParent();
1221   const DebugLoc &DL = I.getDebugLoc();
1222 
1223   auto MIB = BuildMI(*MBB, &I, DL, TII.get(Mov), DstReg);
1224 
1225   if (OS == Triple::AMDHSA || OS == Triple::AMDPAL) {
1226     const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
1227     MIB.addImm(MFI->getLDSSize());
1228   } else {
1229     Module *M = MF->getFunction().getParent();
1230     const GlobalValue *GV
1231       = Intrinsic::getDeclaration(M, Intrinsic::amdgcn_groupstaticsize);
1232     MIB.addGlobalAddress(GV, 0, SIInstrInfo::MO_ABS32_LO);
1233   }
1234 
1235   I.eraseFromParent();
1236   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1237 }
1238 
1239 bool AMDGPUInstructionSelector::selectReturnAddress(MachineInstr &I) const {
1240   MachineBasicBlock *MBB = I.getParent();
1241   MachineFunction &MF = *MBB->getParent();
1242   const DebugLoc &DL = I.getDebugLoc();
1243 
1244   MachineOperand &Dst = I.getOperand(0);
1245   Register DstReg = Dst.getReg();
1246   unsigned Depth = I.getOperand(2).getImm();
1247 
1248   const TargetRegisterClass *RC
1249     = TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1250   if (!RC->hasSubClassEq(&AMDGPU::SGPR_64RegClass) ||
1251       !RBI.constrainGenericRegister(DstReg, *RC, *MRI))
1252     return false;
1253 
1254   // Check for kernel and shader functions
1255   if (Depth != 0 ||
1256       MF.getInfo<SIMachineFunctionInfo>()->isEntryFunction()) {
1257     BuildMI(*MBB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
1258       .addImm(0);
1259     I.eraseFromParent();
1260     return true;
1261   }
1262 
1263   MachineFrameInfo &MFI = MF.getFrameInfo();
1264   // There is a call to @llvm.returnaddress in this function
1265   MFI.setReturnAddressIsTaken(true);
1266 
1267   // Get the return address reg and mark it as an implicit live-in
1268   Register ReturnAddrReg = TRI.getReturnAddressReg(MF);
1269   Register LiveIn = getFunctionLiveInPhysReg(MF, TII, ReturnAddrReg,
1270                                              AMDGPU::SReg_64RegClass, DL);
1271   BuildMI(*MBB, &I, DL, TII.get(AMDGPU::COPY), DstReg)
1272     .addReg(LiveIn);
1273   I.eraseFromParent();
1274   return true;
1275 }
1276 
1277 bool AMDGPUInstructionSelector::selectEndCfIntrinsic(MachineInstr &MI) const {
1278   // FIXME: Manually selecting to avoid dealing with the SReg_1 trick
1279   // SelectionDAG uses for wave32 vs wave64.
1280   MachineBasicBlock *BB = MI.getParent();
1281   BuildMI(*BB, &MI, MI.getDebugLoc(), TII.get(AMDGPU::SI_END_CF))
1282       .add(MI.getOperand(1));
1283 
1284   Register Reg = MI.getOperand(1).getReg();
1285   MI.eraseFromParent();
1286 
1287   if (!MRI->getRegClassOrNull(Reg))
1288     MRI->setRegClass(Reg, TRI.getWaveMaskRegClass());
1289   return true;
1290 }
1291 
1292 bool AMDGPUInstructionSelector::selectDSOrderedIntrinsic(
1293   MachineInstr &MI, Intrinsic::ID IntrID) const {
1294   MachineBasicBlock *MBB = MI.getParent();
1295   MachineFunction *MF = MBB->getParent();
1296   const DebugLoc &DL = MI.getDebugLoc();
1297 
1298   unsigned IndexOperand = MI.getOperand(7).getImm();
1299   bool WaveRelease = MI.getOperand(8).getImm() != 0;
1300   bool WaveDone = MI.getOperand(9).getImm() != 0;
1301 
1302   if (WaveDone && !WaveRelease)
1303     report_fatal_error("ds_ordered_count: wave_done requires wave_release");
1304 
1305   unsigned OrderedCountIndex = IndexOperand & 0x3f;
1306   IndexOperand &= ~0x3f;
1307   unsigned CountDw = 0;
1308 
1309   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10) {
1310     CountDw = (IndexOperand >> 24) & 0xf;
1311     IndexOperand &= ~(0xf << 24);
1312 
1313     if (CountDw < 1 || CountDw > 4) {
1314       report_fatal_error(
1315         "ds_ordered_count: dword count must be between 1 and 4");
1316     }
1317   }
1318 
1319   if (IndexOperand)
1320     report_fatal_error("ds_ordered_count: bad index operand");
1321 
1322   unsigned Instruction = IntrID == Intrinsic::amdgcn_ds_ordered_add ? 0 : 1;
1323   unsigned ShaderType = SIInstrInfo::getDSShaderTypeValue(*MF);
1324 
1325   unsigned Offset0 = OrderedCountIndex << 2;
1326   unsigned Offset1 = WaveRelease | (WaveDone << 1) | (ShaderType << 2) |
1327                      (Instruction << 4);
1328 
1329   if (STI.getGeneration() >= AMDGPUSubtarget::GFX10)
1330     Offset1 |= (CountDw - 1) << 6;
1331 
1332   unsigned Offset = Offset0 | (Offset1 << 8);
1333 
1334   Register M0Val = MI.getOperand(2).getReg();
1335   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1336     .addReg(M0Val);
1337 
1338   Register DstReg = MI.getOperand(0).getReg();
1339   Register ValReg = MI.getOperand(3).getReg();
1340   MachineInstrBuilder DS =
1341     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::DS_ORDERED_COUNT), DstReg)
1342       .addReg(ValReg)
1343       .addImm(Offset)
1344       .cloneMemRefs(MI);
1345 
1346   if (!RBI.constrainGenericRegister(M0Val, AMDGPU::SReg_32RegClass, *MRI))
1347     return false;
1348 
1349   bool Ret = constrainSelectedInstRegOperands(*DS, TII, TRI, RBI);
1350   MI.eraseFromParent();
1351   return Ret;
1352 }
1353 
1354 static unsigned gwsIntrinToOpcode(unsigned IntrID) {
1355   switch (IntrID) {
1356   case Intrinsic::amdgcn_ds_gws_init:
1357     return AMDGPU::DS_GWS_INIT;
1358   case Intrinsic::amdgcn_ds_gws_barrier:
1359     return AMDGPU::DS_GWS_BARRIER;
1360   case Intrinsic::amdgcn_ds_gws_sema_v:
1361     return AMDGPU::DS_GWS_SEMA_V;
1362   case Intrinsic::amdgcn_ds_gws_sema_br:
1363     return AMDGPU::DS_GWS_SEMA_BR;
1364   case Intrinsic::amdgcn_ds_gws_sema_p:
1365     return AMDGPU::DS_GWS_SEMA_P;
1366   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1367     return AMDGPU::DS_GWS_SEMA_RELEASE_ALL;
1368   default:
1369     llvm_unreachable("not a gws intrinsic");
1370   }
1371 }
1372 
1373 bool AMDGPUInstructionSelector::selectDSGWSIntrinsic(MachineInstr &MI,
1374                                                      Intrinsic::ID IID) const {
1375   if (IID == Intrinsic::amdgcn_ds_gws_sema_release_all &&
1376       !STI.hasGWSSemaReleaseAll())
1377     return false;
1378 
1379   // intrinsic ID, vsrc, offset
1380   const bool HasVSrc = MI.getNumOperands() == 3;
1381   assert(HasVSrc || MI.getNumOperands() == 2);
1382 
1383   Register BaseOffset = MI.getOperand(HasVSrc ? 2 : 1).getReg();
1384   const RegisterBank *OffsetRB = RBI.getRegBank(BaseOffset, *MRI, TRI);
1385   if (OffsetRB->getID() != AMDGPU::SGPRRegBankID)
1386     return false;
1387 
1388   MachineInstr *OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1389   assert(OffsetDef);
1390 
1391   unsigned ImmOffset;
1392 
1393   MachineBasicBlock *MBB = MI.getParent();
1394   const DebugLoc &DL = MI.getDebugLoc();
1395 
1396   MachineInstr *Readfirstlane = nullptr;
1397 
1398   // If we legalized the VGPR input, strip out the readfirstlane to analyze the
1399   // incoming offset, in case there's an add of a constant. We'll have to put it
1400   // back later.
1401   if (OffsetDef->getOpcode() == AMDGPU::V_READFIRSTLANE_B32) {
1402     Readfirstlane = OffsetDef;
1403     BaseOffset = OffsetDef->getOperand(1).getReg();
1404     OffsetDef = getDefIgnoringCopies(BaseOffset, *MRI);
1405   }
1406 
1407   if (OffsetDef->getOpcode() == AMDGPU::G_CONSTANT) {
1408     // If we have a constant offset, try to use the 0 in m0 as the base.
1409     // TODO: Look into changing the default m0 initialization value. If the
1410     // default -1 only set the low 16-bits, we could leave it as-is and add 1 to
1411     // the immediate offset.
1412 
1413     ImmOffset = OffsetDef->getOperand(1).getCImm()->getZExtValue();
1414     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
1415       .addImm(0);
1416   } else {
1417     std::tie(BaseOffset, ImmOffset) =
1418         AMDGPU::getBaseWithConstantOffset(*MRI, BaseOffset);
1419 
1420     if (Readfirstlane) {
1421       // We have the constant offset now, so put the readfirstlane back on the
1422       // variable component.
1423       if (!RBI.constrainGenericRegister(BaseOffset, AMDGPU::VGPR_32RegClass, *MRI))
1424         return false;
1425 
1426       Readfirstlane->getOperand(1).setReg(BaseOffset);
1427       BaseOffset = Readfirstlane->getOperand(0).getReg();
1428     } else {
1429       if (!RBI.constrainGenericRegister(BaseOffset,
1430                                         AMDGPU::SReg_32RegClass, *MRI))
1431         return false;
1432     }
1433 
1434     Register M0Base = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
1435     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::S_LSHL_B32), M0Base)
1436       .addReg(BaseOffset)
1437       .addImm(16);
1438 
1439     BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1440       .addReg(M0Base);
1441   }
1442 
1443   // The resource id offset is computed as (<isa opaque base> + M0[21:16] +
1444   // offset field) % 64. Some versions of the programming guide omit the m0
1445   // part, or claim it's from offset 0.
1446   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(gwsIntrinToOpcode(IID)));
1447 
1448   if (HasVSrc) {
1449     Register VSrc = MI.getOperand(1).getReg();
1450     MIB.addReg(VSrc);
1451 
1452     if (!RBI.constrainGenericRegister(VSrc, AMDGPU::VGPR_32RegClass, *MRI))
1453       return false;
1454   }
1455 
1456   MIB.addImm(ImmOffset)
1457      .cloneMemRefs(MI);
1458 
1459   TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::data0);
1460 
1461   MI.eraseFromParent();
1462   return true;
1463 }
1464 
1465 bool AMDGPUInstructionSelector::selectDSAppendConsume(MachineInstr &MI,
1466                                                       bool IsAppend) const {
1467   Register PtrBase = MI.getOperand(2).getReg();
1468   LLT PtrTy = MRI->getType(PtrBase);
1469   bool IsGDS = PtrTy.getAddressSpace() == AMDGPUAS::REGION_ADDRESS;
1470 
1471   unsigned Offset;
1472   std::tie(PtrBase, Offset) = selectDS1Addr1OffsetImpl(MI.getOperand(2));
1473 
1474   // TODO: Should this try to look through readfirstlane like GWS?
1475   if (!isDSOffsetLegal(PtrBase, Offset)) {
1476     PtrBase = MI.getOperand(2).getReg();
1477     Offset = 0;
1478   }
1479 
1480   MachineBasicBlock *MBB = MI.getParent();
1481   const DebugLoc &DL = MI.getDebugLoc();
1482   const unsigned Opc = IsAppend ? AMDGPU::DS_APPEND : AMDGPU::DS_CONSUME;
1483 
1484   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
1485     .addReg(PtrBase);
1486   if (!RBI.constrainGenericRegister(PtrBase, AMDGPU::SReg_32RegClass, *MRI))
1487     return false;
1488 
1489   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc), MI.getOperand(0).getReg())
1490     .addImm(Offset)
1491     .addImm(IsGDS ? -1 : 0)
1492     .cloneMemRefs(MI);
1493   MI.eraseFromParent();
1494   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1495 }
1496 
1497 bool AMDGPUInstructionSelector::selectSBarrier(MachineInstr &MI) const {
1498   if (TM.getOptLevel() > CodeGenOpt::None) {
1499     unsigned WGSize = STI.getFlatWorkGroupSizes(MF->getFunction()).second;
1500     if (WGSize <= STI.getWavefrontSize()) {
1501       MachineBasicBlock *MBB = MI.getParent();
1502       const DebugLoc &DL = MI.getDebugLoc();
1503       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::WAVE_BARRIER));
1504       MI.eraseFromParent();
1505       return true;
1506     }
1507   }
1508   return selectImpl(MI, *CoverageInfo);
1509 }
1510 
1511 static bool parseTexFail(uint64_t TexFailCtrl, bool &TFE, bool &LWE,
1512                          bool &IsTexFail) {
1513   if (TexFailCtrl)
1514     IsTexFail = true;
1515 
1516   TFE = (TexFailCtrl & 0x1) ? true : false;
1517   TexFailCtrl &= ~(uint64_t)0x1;
1518   LWE = (TexFailCtrl & 0x2) ? true : false;
1519   TexFailCtrl &= ~(uint64_t)0x2;
1520 
1521   return TexFailCtrl == 0;
1522 }
1523 
1524 bool AMDGPUInstructionSelector::selectImageIntrinsic(
1525   MachineInstr &MI, const AMDGPU::ImageDimIntrinsicInfo *Intr) const {
1526   MachineBasicBlock *MBB = MI.getParent();
1527   const DebugLoc &DL = MI.getDebugLoc();
1528 
1529   const AMDGPU::MIMGBaseOpcodeInfo *BaseOpcode =
1530     AMDGPU::getMIMGBaseOpcodeInfo(Intr->BaseOpcode);
1531 
1532   const AMDGPU::MIMGDimInfo *DimInfo = AMDGPU::getMIMGDimInfo(Intr->Dim);
1533   unsigned IntrOpcode = Intr->BaseOpcode;
1534   const bool IsGFX10Plus = AMDGPU::isGFX10Plus(STI);
1535 
1536   const unsigned ArgOffset = MI.getNumExplicitDefs() + 1;
1537 
1538   Register VDataIn, VDataOut;
1539   LLT VDataTy;
1540   int NumVDataDwords = -1;
1541   bool IsD16 = MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16 ||
1542                MI.getOpcode() == AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16;
1543 
1544   bool Unorm;
1545   if (!BaseOpcode->Sampler)
1546     Unorm = true;
1547   else
1548     Unorm = MI.getOperand(ArgOffset + Intr->UnormIndex).getImm() != 0;
1549 
1550   bool TFE;
1551   bool LWE;
1552   bool IsTexFail = false;
1553   if (!parseTexFail(MI.getOperand(ArgOffset + Intr->TexFailCtrlIndex).getImm(),
1554                     TFE, LWE, IsTexFail))
1555     return false;
1556 
1557   const int Flags = MI.getOperand(ArgOffset + Intr->NumArgs).getImm();
1558   const bool IsA16 = (Flags & 1) != 0;
1559   const bool IsG16 = (Flags & 2) != 0;
1560 
1561   // A16 implies 16 bit gradients if subtarget doesn't support G16
1562   if (IsA16 && !STI.hasG16() && !IsG16)
1563     return false;
1564 
1565   unsigned DMask = 0;
1566   unsigned DMaskLanes = 0;
1567 
1568   if (BaseOpcode->Atomic) {
1569     VDataOut = MI.getOperand(0).getReg();
1570     VDataIn = MI.getOperand(2).getReg();
1571     LLT Ty = MRI->getType(VDataIn);
1572 
1573     // Be careful to allow atomic swap on 16-bit element vectors.
1574     const bool Is64Bit = BaseOpcode->AtomicX2 ?
1575       Ty.getSizeInBits() == 128 :
1576       Ty.getSizeInBits() == 64;
1577 
1578     if (BaseOpcode->AtomicX2) {
1579       assert(MI.getOperand(3).getReg() == AMDGPU::NoRegister);
1580 
1581       DMask = Is64Bit ? 0xf : 0x3;
1582       NumVDataDwords = Is64Bit ? 4 : 2;
1583     } else {
1584       DMask = Is64Bit ? 0x3 : 0x1;
1585       NumVDataDwords = Is64Bit ? 2 : 1;
1586     }
1587   } else {
1588     DMask = MI.getOperand(ArgOffset + Intr->DMaskIndex).getImm();
1589     DMaskLanes = BaseOpcode->Gather4 ? 4 : countPopulation(DMask);
1590 
1591     if (BaseOpcode->Store) {
1592       VDataIn = MI.getOperand(1).getReg();
1593       VDataTy = MRI->getType(VDataIn);
1594       NumVDataDwords = (VDataTy.getSizeInBits() + 31) / 32;
1595     } else {
1596       VDataOut = MI.getOperand(0).getReg();
1597       VDataTy = MRI->getType(VDataOut);
1598       NumVDataDwords = DMaskLanes;
1599 
1600       if (IsD16 && !STI.hasUnpackedD16VMem())
1601         NumVDataDwords = (DMaskLanes + 1) / 2;
1602     }
1603   }
1604 
1605   // Set G16 opcode
1606   if (IsG16 && !IsA16) {
1607     const AMDGPU::MIMGG16MappingInfo *G16MappingInfo =
1608         AMDGPU::getMIMGG16MappingInfo(Intr->BaseOpcode);
1609     assert(G16MappingInfo);
1610     IntrOpcode = G16MappingInfo->G16; // set opcode to variant with _g16
1611   }
1612 
1613   // TODO: Check this in verifier.
1614   assert((!IsTexFail || DMaskLanes >= 1) && "should have legalized this");
1615 
1616   unsigned CPol = MI.getOperand(ArgOffset + Intr->CachePolicyIndex).getImm();
1617   if (BaseOpcode->Atomic)
1618     CPol |= AMDGPU::CPol::GLC; // TODO no-return optimization
1619   if (CPol & ~AMDGPU::CPol::ALL)
1620     return false;
1621 
1622   int NumVAddrRegs = 0;
1623   int NumVAddrDwords = 0;
1624   for (unsigned I = Intr->VAddrStart; I < Intr->VAddrEnd; I++) {
1625     // Skip the $noregs and 0s inserted during legalization.
1626     MachineOperand &AddrOp = MI.getOperand(ArgOffset + I);
1627     if (!AddrOp.isReg())
1628       continue; // XXX - Break?
1629 
1630     Register Addr = AddrOp.getReg();
1631     if (!Addr)
1632       break;
1633 
1634     ++NumVAddrRegs;
1635     NumVAddrDwords += (MRI->getType(Addr).getSizeInBits() + 31) / 32;
1636   }
1637 
1638   // The legalizer preprocessed the intrinsic arguments. If we aren't using
1639   // NSA, these should have been packed into a single value in the first
1640   // address register
1641   const bool UseNSA = NumVAddrRegs != 1 && NumVAddrDwords == NumVAddrRegs;
1642   if (UseNSA && !STI.hasFeature(AMDGPU::FeatureNSAEncoding)) {
1643     LLVM_DEBUG(dbgs() << "Trying to use NSA on non-NSA target\n");
1644     return false;
1645   }
1646 
1647   if (IsTexFail)
1648     ++NumVDataDwords;
1649 
1650   int Opcode = -1;
1651   if (IsGFX10Plus) {
1652     Opcode = AMDGPU::getMIMGOpcode(IntrOpcode,
1653                                    UseNSA ? AMDGPU::MIMGEncGfx10NSA
1654                                           : AMDGPU::MIMGEncGfx10Default,
1655                                    NumVDataDwords, NumVAddrDwords);
1656   } else {
1657     if (Subtarget->hasGFX90AInsts()) {
1658       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx90a,
1659                                      NumVDataDwords, NumVAddrDwords);
1660       if (Opcode == -1) {
1661         LLVM_DEBUG(
1662             dbgs()
1663             << "requested image instruction is not supported on this GPU\n");
1664         return false;
1665       }
1666     }
1667     if (Opcode == -1 &&
1668         STI.getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
1669       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx8,
1670                                      NumVDataDwords, NumVAddrDwords);
1671     if (Opcode == -1)
1672       Opcode = AMDGPU::getMIMGOpcode(IntrOpcode, AMDGPU::MIMGEncGfx6,
1673                                      NumVDataDwords, NumVAddrDwords);
1674   }
1675   assert(Opcode != -1);
1676 
1677   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opcode))
1678     .cloneMemRefs(MI);
1679 
1680   if (VDataOut) {
1681     if (BaseOpcode->AtomicX2) {
1682       const bool Is64 = MRI->getType(VDataOut).getSizeInBits() == 64;
1683 
1684       Register TmpReg = MRI->createVirtualRegister(
1685         Is64 ? &AMDGPU::VReg_128RegClass : &AMDGPU::VReg_64RegClass);
1686       unsigned SubReg = Is64 ? AMDGPU::sub0_sub1 : AMDGPU::sub0;
1687 
1688       MIB.addDef(TmpReg);
1689       if (!MRI->use_empty(VDataOut)) {
1690         BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), VDataOut)
1691             .addReg(TmpReg, RegState::Kill, SubReg);
1692       }
1693 
1694     } else {
1695       MIB.addDef(VDataOut); // vdata output
1696     }
1697   }
1698 
1699   if (VDataIn)
1700     MIB.addReg(VDataIn); // vdata input
1701 
1702   for (int I = 0; I != NumVAddrRegs; ++I) {
1703     MachineOperand &SrcOp = MI.getOperand(ArgOffset + Intr->VAddrStart + I);
1704     if (SrcOp.isReg()) {
1705       assert(SrcOp.getReg() != 0);
1706       MIB.addReg(SrcOp.getReg());
1707     }
1708   }
1709 
1710   MIB.addReg(MI.getOperand(ArgOffset + Intr->RsrcIndex).getReg());
1711   if (BaseOpcode->Sampler)
1712     MIB.addReg(MI.getOperand(ArgOffset + Intr->SampIndex).getReg());
1713 
1714   MIB.addImm(DMask); // dmask
1715 
1716   if (IsGFX10Plus)
1717     MIB.addImm(DimInfo->Encoding);
1718   MIB.addImm(Unorm);
1719 
1720   MIB.addImm(CPol);
1721   MIB.addImm(IsA16 &&  // a16 or r128
1722              STI.hasFeature(AMDGPU::FeatureR128A16) ? -1 : 0);
1723   if (IsGFX10Plus)
1724     MIB.addImm(IsA16 ? -1 : 0);
1725 
1726   if (!Subtarget->hasGFX90AInsts()) {
1727     MIB.addImm(TFE); // tfe
1728   } else if (TFE) {
1729     LLVM_DEBUG(dbgs() << "TFE is not supported on this GPU\n");
1730     return false;
1731   }
1732 
1733   MIB.addImm(LWE); // lwe
1734   if (!IsGFX10Plus)
1735     MIB.addImm(DimInfo->DA ? -1 : 0);
1736   if (BaseOpcode->HasD16)
1737     MIB.addImm(IsD16 ? -1 : 0);
1738 
1739   if (IsTexFail) {
1740     // An image load instruction with TFE/LWE only conditionally writes to its
1741     // result registers. Initialize them to zero so that we always get well
1742     // defined result values.
1743     assert(VDataOut && !VDataIn);
1744     Register Tied = MRI->cloneVirtualRegister(VDataOut);
1745     Register Zero = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1746     BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::V_MOV_B32_e32), Zero)
1747       .addImm(0);
1748     auto Parts = TRI.getRegSplitParts(MRI->getRegClass(Tied), 4);
1749     if (STI.usePRTStrictNull()) {
1750       // With enable-prt-strict-null enabled, initialize all result registers to
1751       // zero.
1752       auto RegSeq =
1753           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1754       for (auto Sub : Parts)
1755         RegSeq.addReg(Zero).addImm(Sub);
1756     } else {
1757       // With enable-prt-strict-null disabled, only initialize the extra TFE/LWE
1758       // result register.
1759       Register Undef = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
1760       BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::IMPLICIT_DEF), Undef);
1761       auto RegSeq =
1762           BuildMI(*MBB, *MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), Tied);
1763       for (auto Sub : Parts.drop_back(1))
1764         RegSeq.addReg(Undef).addImm(Sub);
1765       RegSeq.addReg(Zero).addImm(Parts.back());
1766     }
1767     MIB.addReg(Tied, RegState::Implicit);
1768     MIB->tieOperands(0, MIB->getNumOperands() - 1);
1769   }
1770 
1771   MI.eraseFromParent();
1772   constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
1773   TII.enforceOperandRCAlignment(*MIB, AMDGPU::OpName::vaddr);
1774   return true;
1775 }
1776 
1777 bool AMDGPUInstructionSelector::selectG_INTRINSIC_W_SIDE_EFFECTS(
1778     MachineInstr &I) const {
1779   unsigned IntrinsicID = I.getIntrinsicID();
1780   switch (IntrinsicID) {
1781   case Intrinsic::amdgcn_end_cf:
1782     return selectEndCfIntrinsic(I);
1783   case Intrinsic::amdgcn_ds_ordered_add:
1784   case Intrinsic::amdgcn_ds_ordered_swap:
1785     return selectDSOrderedIntrinsic(I, IntrinsicID);
1786   case Intrinsic::amdgcn_ds_gws_init:
1787   case Intrinsic::amdgcn_ds_gws_barrier:
1788   case Intrinsic::amdgcn_ds_gws_sema_v:
1789   case Intrinsic::amdgcn_ds_gws_sema_br:
1790   case Intrinsic::amdgcn_ds_gws_sema_p:
1791   case Intrinsic::amdgcn_ds_gws_sema_release_all:
1792     return selectDSGWSIntrinsic(I, IntrinsicID);
1793   case Intrinsic::amdgcn_ds_append:
1794     return selectDSAppendConsume(I, true);
1795   case Intrinsic::amdgcn_ds_consume:
1796     return selectDSAppendConsume(I, false);
1797   case Intrinsic::amdgcn_s_barrier:
1798     return selectSBarrier(I);
1799   case Intrinsic::amdgcn_global_atomic_fadd:
1800     return selectGlobalAtomicFadd(I, I.getOperand(2), I.getOperand(3));
1801   case Intrinsic::amdgcn_raw_buffer_load_lds:
1802   case Intrinsic::amdgcn_struct_buffer_load_lds:
1803     return selectBufferLoadLds(I);
1804   case Intrinsic::amdgcn_global_load_lds:
1805     return selectGlobalLoadLds(I);
1806   default: {
1807     return selectImpl(I, *CoverageInfo);
1808   }
1809   }
1810 }
1811 
1812 bool AMDGPUInstructionSelector::selectG_SELECT(MachineInstr &I) const {
1813   if (selectImpl(I, *CoverageInfo))
1814     return true;
1815 
1816   MachineBasicBlock *BB = I.getParent();
1817   const DebugLoc &DL = I.getDebugLoc();
1818 
1819   Register DstReg = I.getOperand(0).getReg();
1820   unsigned Size = RBI.getSizeInBits(DstReg, *MRI, TRI);
1821   assert(Size <= 32 || Size == 64);
1822   const MachineOperand &CCOp = I.getOperand(1);
1823   Register CCReg = CCOp.getReg();
1824   if (!isVCC(CCReg, *MRI)) {
1825     unsigned SelectOpcode = Size == 64 ? AMDGPU::S_CSELECT_B64 :
1826                                          AMDGPU::S_CSELECT_B32;
1827     MachineInstr *CopySCC = BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), AMDGPU::SCC)
1828             .addReg(CCReg);
1829 
1830     // The generic constrainSelectedInstRegOperands doesn't work for the scc register
1831     // bank, because it does not cover the register class that we used to represent
1832     // for it.  So we need to manually set the register class here.
1833     if (!MRI->getRegClassOrNull(CCReg))
1834         MRI->setRegClass(CCReg, TRI.getConstrainedRegClassForOperand(CCOp, *MRI));
1835     MachineInstr *Select = BuildMI(*BB, &I, DL, TII.get(SelectOpcode), DstReg)
1836             .add(I.getOperand(2))
1837             .add(I.getOperand(3));
1838 
1839     bool Ret = false;
1840     Ret |= constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1841     Ret |= constrainSelectedInstRegOperands(*CopySCC, TII, TRI, RBI);
1842     I.eraseFromParent();
1843     return Ret;
1844   }
1845 
1846   // Wide VGPR select should have been split in RegBankSelect.
1847   if (Size > 32)
1848     return false;
1849 
1850   MachineInstr *Select =
1851       BuildMI(*BB, &I, DL, TII.get(AMDGPU::V_CNDMASK_B32_e64), DstReg)
1852               .addImm(0)
1853               .add(I.getOperand(3))
1854               .addImm(0)
1855               .add(I.getOperand(2))
1856               .add(I.getOperand(1));
1857 
1858   bool Ret = constrainSelectedInstRegOperands(*Select, TII, TRI, RBI);
1859   I.eraseFromParent();
1860   return Ret;
1861 }
1862 
1863 static int sizeToSubRegIndex(unsigned Size) {
1864   switch (Size) {
1865   case 32:
1866     return AMDGPU::sub0;
1867   case 64:
1868     return AMDGPU::sub0_sub1;
1869   case 96:
1870     return AMDGPU::sub0_sub1_sub2;
1871   case 128:
1872     return AMDGPU::sub0_sub1_sub2_sub3;
1873   case 256:
1874     return AMDGPU::sub0_sub1_sub2_sub3_sub4_sub5_sub6_sub7;
1875   default:
1876     if (Size < 32)
1877       return AMDGPU::sub0;
1878     if (Size > 256)
1879       return -1;
1880     return sizeToSubRegIndex(PowerOf2Ceil(Size));
1881   }
1882 }
1883 
1884 bool AMDGPUInstructionSelector::selectG_TRUNC(MachineInstr &I) const {
1885   Register DstReg = I.getOperand(0).getReg();
1886   Register SrcReg = I.getOperand(1).getReg();
1887   const LLT DstTy = MRI->getType(DstReg);
1888   const LLT SrcTy = MRI->getType(SrcReg);
1889   const LLT S1 = LLT::scalar(1);
1890 
1891   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
1892   const RegisterBank *DstRB;
1893   if (DstTy == S1) {
1894     // This is a special case. We don't treat s1 for legalization artifacts as
1895     // vcc booleans.
1896     DstRB = SrcRB;
1897   } else {
1898     DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
1899     if (SrcRB != DstRB)
1900       return false;
1901   }
1902 
1903   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
1904 
1905   unsigned DstSize = DstTy.getSizeInBits();
1906   unsigned SrcSize = SrcTy.getSizeInBits();
1907 
1908   const TargetRegisterClass *SrcRC =
1909       TRI.getRegClassForSizeOnBank(SrcSize, *SrcRB);
1910   const TargetRegisterClass *DstRC =
1911       TRI.getRegClassForSizeOnBank(DstSize, *DstRB);
1912   if (!SrcRC || !DstRC)
1913     return false;
1914 
1915   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
1916       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI)) {
1917     LLVM_DEBUG(dbgs() << "Failed to constrain G_TRUNC\n");
1918     return false;
1919   }
1920 
1921   if (DstTy == LLT::fixed_vector(2, 16) && SrcTy == LLT::fixed_vector(2, 32)) {
1922     MachineBasicBlock *MBB = I.getParent();
1923     const DebugLoc &DL = I.getDebugLoc();
1924 
1925     Register LoReg = MRI->createVirtualRegister(DstRC);
1926     Register HiReg = MRI->createVirtualRegister(DstRC);
1927     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), LoReg)
1928       .addReg(SrcReg, 0, AMDGPU::sub0);
1929     BuildMI(*MBB, I, DL, TII.get(AMDGPU::COPY), HiReg)
1930       .addReg(SrcReg, 0, AMDGPU::sub1);
1931 
1932     if (IsVALU && STI.hasSDWA()) {
1933       // Write the low 16-bits of the high element into the high 16-bits of the
1934       // low element.
1935       MachineInstr *MovSDWA =
1936         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
1937         .addImm(0)                             // $src0_modifiers
1938         .addReg(HiReg)                         // $src0
1939         .addImm(0)                             // $clamp
1940         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
1941         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
1942         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
1943         .addReg(LoReg, RegState::Implicit);
1944       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
1945     } else {
1946       Register TmpReg0 = MRI->createVirtualRegister(DstRC);
1947       Register TmpReg1 = MRI->createVirtualRegister(DstRC);
1948       Register ImmReg = MRI->createVirtualRegister(DstRC);
1949       if (IsVALU) {
1950         BuildMI(*MBB, I, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), TmpReg0)
1951           .addImm(16)
1952           .addReg(HiReg);
1953       } else {
1954         BuildMI(*MBB, I, DL, TII.get(AMDGPU::S_LSHL_B32), TmpReg0)
1955           .addReg(HiReg)
1956           .addImm(16);
1957       }
1958 
1959       unsigned MovOpc = IsVALU ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32;
1960       unsigned AndOpc = IsVALU ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
1961       unsigned OrOpc = IsVALU ? AMDGPU::V_OR_B32_e64 : AMDGPU::S_OR_B32;
1962 
1963       BuildMI(*MBB, I, DL, TII.get(MovOpc), ImmReg)
1964         .addImm(0xffff);
1965       BuildMI(*MBB, I, DL, TII.get(AndOpc), TmpReg1)
1966         .addReg(LoReg)
1967         .addReg(ImmReg);
1968       BuildMI(*MBB, I, DL, TII.get(OrOpc), DstReg)
1969         .addReg(TmpReg0)
1970         .addReg(TmpReg1);
1971     }
1972 
1973     I.eraseFromParent();
1974     return true;
1975   }
1976 
1977   if (!DstTy.isScalar())
1978     return false;
1979 
1980   if (SrcSize > 32) {
1981     int SubRegIdx = sizeToSubRegIndex(DstSize);
1982     if (SubRegIdx == -1)
1983       return false;
1984 
1985     // Deal with weird cases where the class only partially supports the subreg
1986     // index.
1987     const TargetRegisterClass *SrcWithSubRC
1988       = TRI.getSubClassWithSubReg(SrcRC, SubRegIdx);
1989     if (!SrcWithSubRC)
1990       return false;
1991 
1992     if (SrcWithSubRC != SrcRC) {
1993       if (!RBI.constrainGenericRegister(SrcReg, *SrcWithSubRC, *MRI))
1994         return false;
1995     }
1996 
1997     I.getOperand(1).setSubReg(SubRegIdx);
1998   }
1999 
2000   I.setDesc(TII.get(TargetOpcode::COPY));
2001   return true;
2002 }
2003 
2004 /// \returns true if a bitmask for \p Size bits will be an inline immediate.
2005 static bool shouldUseAndMask(unsigned Size, unsigned &Mask) {
2006   Mask = maskTrailingOnes<unsigned>(Size);
2007   int SignedMask = static_cast<int>(Mask);
2008   return SignedMask >= -16 && SignedMask <= 64;
2009 }
2010 
2011 // Like RegisterBankInfo::getRegBank, but don't assume vcc for s1.
2012 const RegisterBank *AMDGPUInstructionSelector::getArtifactRegBank(
2013   Register Reg, const MachineRegisterInfo &MRI,
2014   const TargetRegisterInfo &TRI) const {
2015   const RegClassOrRegBank &RegClassOrBank = MRI.getRegClassOrRegBank(Reg);
2016   if (auto *RB = RegClassOrBank.dyn_cast<const RegisterBank *>())
2017     return RB;
2018 
2019   // Ignore the type, since we don't use vcc in artifacts.
2020   if (auto *RC = RegClassOrBank.dyn_cast<const TargetRegisterClass *>())
2021     return &RBI.getRegBankFromRegClass(*RC, LLT());
2022   return nullptr;
2023 }
2024 
2025 bool AMDGPUInstructionSelector::selectG_SZA_EXT(MachineInstr &I) const {
2026   bool InReg = I.getOpcode() == AMDGPU::G_SEXT_INREG;
2027   bool Signed = I.getOpcode() == AMDGPU::G_SEXT || InReg;
2028   const DebugLoc &DL = I.getDebugLoc();
2029   MachineBasicBlock &MBB = *I.getParent();
2030   const Register DstReg = I.getOperand(0).getReg();
2031   const Register SrcReg = I.getOperand(1).getReg();
2032 
2033   const LLT DstTy = MRI->getType(DstReg);
2034   const LLT SrcTy = MRI->getType(SrcReg);
2035   const unsigned SrcSize = I.getOpcode() == AMDGPU::G_SEXT_INREG ?
2036     I.getOperand(2).getImm() : SrcTy.getSizeInBits();
2037   const unsigned DstSize = DstTy.getSizeInBits();
2038   if (!DstTy.isScalar())
2039     return false;
2040 
2041   // Artifact casts should never use vcc.
2042   const RegisterBank *SrcBank = getArtifactRegBank(SrcReg, *MRI, TRI);
2043 
2044   // FIXME: This should probably be illegal and split earlier.
2045   if (I.getOpcode() == AMDGPU::G_ANYEXT) {
2046     if (DstSize <= 32)
2047       return selectCOPY(I);
2048 
2049     const TargetRegisterClass *SrcRC =
2050         TRI.getRegClassForTypeOnBank(SrcTy, *SrcBank);
2051     const RegisterBank *DstBank = RBI.getRegBank(DstReg, *MRI, TRI);
2052     const TargetRegisterClass *DstRC =
2053         TRI.getRegClassForSizeOnBank(DstSize, *DstBank);
2054 
2055     Register UndefReg = MRI->createVirtualRegister(SrcRC);
2056     BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2057     BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2058       .addReg(SrcReg)
2059       .addImm(AMDGPU::sub0)
2060       .addReg(UndefReg)
2061       .addImm(AMDGPU::sub1);
2062     I.eraseFromParent();
2063 
2064     return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) &&
2065            RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI);
2066   }
2067 
2068   if (SrcBank->getID() == AMDGPU::VGPRRegBankID && DstSize <= 32) {
2069     // 64-bit should have been split up in RegBankSelect
2070 
2071     // Try to use an and with a mask if it will save code size.
2072     unsigned Mask;
2073     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2074       MachineInstr *ExtI =
2075       BuildMI(MBB, I, DL, TII.get(AMDGPU::V_AND_B32_e32), DstReg)
2076         .addImm(Mask)
2077         .addReg(SrcReg);
2078       I.eraseFromParent();
2079       return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2080     }
2081 
2082     const unsigned BFE = Signed ? AMDGPU::V_BFE_I32_e64 : AMDGPU::V_BFE_U32_e64;
2083     MachineInstr *ExtI =
2084       BuildMI(MBB, I, DL, TII.get(BFE), DstReg)
2085       .addReg(SrcReg)
2086       .addImm(0) // Offset
2087       .addImm(SrcSize); // Width
2088     I.eraseFromParent();
2089     return constrainSelectedInstRegOperands(*ExtI, TII, TRI, RBI);
2090   }
2091 
2092   if (SrcBank->getID() == AMDGPU::SGPRRegBankID && DstSize <= 64) {
2093     const TargetRegisterClass &SrcRC = InReg && DstSize > 32 ?
2094       AMDGPU::SReg_64RegClass : AMDGPU::SReg_32RegClass;
2095     if (!RBI.constrainGenericRegister(SrcReg, SrcRC, *MRI))
2096       return false;
2097 
2098     if (Signed && DstSize == 32 && (SrcSize == 8 || SrcSize == 16)) {
2099       const unsigned SextOpc = SrcSize == 8 ?
2100         AMDGPU::S_SEXT_I32_I8 : AMDGPU::S_SEXT_I32_I16;
2101       BuildMI(MBB, I, DL, TII.get(SextOpc), DstReg)
2102         .addReg(SrcReg);
2103       I.eraseFromParent();
2104       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2105     }
2106 
2107     const unsigned BFE64 = Signed ? AMDGPU::S_BFE_I64 : AMDGPU::S_BFE_U64;
2108     const unsigned BFE32 = Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32;
2109 
2110     // Scalar BFE is encoded as S1[5:0] = offset, S1[22:16]= width.
2111     if (DstSize > 32 && (SrcSize <= 32 || InReg)) {
2112       // We need a 64-bit register source, but the high bits don't matter.
2113       Register ExtReg = MRI->createVirtualRegister(&AMDGPU::SReg_64RegClass);
2114       Register UndefReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2115       unsigned SubReg = InReg ? AMDGPU::sub0 : 0;
2116 
2117       BuildMI(MBB, I, DL, TII.get(AMDGPU::IMPLICIT_DEF), UndefReg);
2118       BuildMI(MBB, I, DL, TII.get(AMDGPU::REG_SEQUENCE), ExtReg)
2119         .addReg(SrcReg, 0, SubReg)
2120         .addImm(AMDGPU::sub0)
2121         .addReg(UndefReg)
2122         .addImm(AMDGPU::sub1);
2123 
2124       BuildMI(MBB, I, DL, TII.get(BFE64), DstReg)
2125         .addReg(ExtReg)
2126         .addImm(SrcSize << 16);
2127 
2128       I.eraseFromParent();
2129       return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_64RegClass, *MRI);
2130     }
2131 
2132     unsigned Mask;
2133     if (!Signed && shouldUseAndMask(SrcSize, Mask)) {
2134       BuildMI(MBB, I, DL, TII.get(AMDGPU::S_AND_B32), DstReg)
2135         .addReg(SrcReg)
2136         .addImm(Mask);
2137     } else {
2138       BuildMI(MBB, I, DL, TII.get(BFE32), DstReg)
2139         .addReg(SrcReg)
2140         .addImm(SrcSize << 16);
2141     }
2142 
2143     I.eraseFromParent();
2144     return RBI.constrainGenericRegister(DstReg, AMDGPU::SReg_32RegClass, *MRI);
2145   }
2146 
2147   return false;
2148 }
2149 
2150 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
2151   MachineBasicBlock *BB = I.getParent();
2152   MachineOperand &ImmOp = I.getOperand(1);
2153   Register DstReg = I.getOperand(0).getReg();
2154   unsigned Size = MRI->getType(DstReg).getSizeInBits();
2155 
2156   // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
2157   if (ImmOp.isFPImm()) {
2158     const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
2159     ImmOp.ChangeToImmediate(Imm.getZExtValue());
2160   } else if (ImmOp.isCImm()) {
2161     ImmOp.ChangeToImmediate(ImmOp.getCImm()->getSExtValue());
2162   } else {
2163     llvm_unreachable("Not supported by g_constants");
2164   }
2165 
2166   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2167   const bool IsSgpr = DstRB->getID() == AMDGPU::SGPRRegBankID;
2168 
2169   unsigned Opcode;
2170   if (DstRB->getID() == AMDGPU::VCCRegBankID) {
2171     Opcode = STI.isWave32() ? AMDGPU::S_MOV_B32 : AMDGPU::S_MOV_B64;
2172   } else {
2173     Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
2174 
2175     // We should never produce s1 values on banks other than VCC. If the user of
2176     // this already constrained the register, we may incorrectly think it's VCC
2177     // if it wasn't originally.
2178     if (Size == 1)
2179       return false;
2180   }
2181 
2182   if (Size != 64) {
2183     I.setDesc(TII.get(Opcode));
2184     I.addImplicitDefUseOperands(*MF);
2185     return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
2186   }
2187 
2188   const DebugLoc &DL = I.getDebugLoc();
2189 
2190   APInt Imm(Size, I.getOperand(1).getImm());
2191 
2192   MachineInstr *ResInst;
2193   if (IsSgpr && TII.isInlineConstant(Imm)) {
2194     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B64), DstReg)
2195       .addImm(I.getOperand(1).getImm());
2196   } else {
2197     const TargetRegisterClass *RC = IsSgpr ?
2198       &AMDGPU::SReg_32RegClass : &AMDGPU::VGPR_32RegClass;
2199     Register LoReg = MRI->createVirtualRegister(RC);
2200     Register HiReg = MRI->createVirtualRegister(RC);
2201 
2202     BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
2203       .addImm(Imm.trunc(32).getZExtValue());
2204 
2205     BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
2206       .addImm(Imm.ashr(32).getZExtValue());
2207 
2208     ResInst = BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2209       .addReg(LoReg)
2210       .addImm(AMDGPU::sub0)
2211       .addReg(HiReg)
2212       .addImm(AMDGPU::sub1);
2213   }
2214 
2215   // We can't call constrainSelectedInstRegOperands here, because it doesn't
2216   // work for target independent opcodes
2217   I.eraseFromParent();
2218   const TargetRegisterClass *DstRC =
2219     TRI.getConstrainedRegClassForOperand(ResInst->getOperand(0), *MRI);
2220   if (!DstRC)
2221     return true;
2222   return RBI.constrainGenericRegister(DstReg, *DstRC, *MRI);
2223 }
2224 
2225 bool AMDGPUInstructionSelector::selectG_FNEG(MachineInstr &MI) const {
2226   // Only manually handle the f64 SGPR case.
2227   //
2228   // FIXME: This is a workaround for 2.5 different tablegen problems. Because
2229   // the bit ops theoretically have a second result due to the implicit def of
2230   // SCC, the GlobalISelEmitter is overly conservative and rejects it. Fixing
2231   // that is easy by disabling the check. The result works, but uses a
2232   // nonsensical sreg32orlds_and_sreg_1 regclass.
2233   //
2234   // The DAG emitter is more problematic, and incorrectly adds both S_XOR_B32 to
2235   // the variadic REG_SEQUENCE operands.
2236 
2237   Register Dst = MI.getOperand(0).getReg();
2238   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2239   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2240       MRI->getType(Dst) != LLT::scalar(64))
2241     return false;
2242 
2243   Register Src = MI.getOperand(1).getReg();
2244   MachineInstr *Fabs = getOpcodeDef(TargetOpcode::G_FABS, Src, *MRI);
2245   if (Fabs)
2246     Src = Fabs->getOperand(1).getReg();
2247 
2248   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2249       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2250     return false;
2251 
2252   MachineBasicBlock *BB = MI.getParent();
2253   const DebugLoc &DL = MI.getDebugLoc();
2254   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2255   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2256   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2257   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2258 
2259   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2260     .addReg(Src, 0, AMDGPU::sub0);
2261   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2262     .addReg(Src, 0, AMDGPU::sub1);
2263   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2264     .addImm(0x80000000);
2265 
2266   // Set or toggle sign bit.
2267   unsigned Opc = Fabs ? AMDGPU::S_OR_B32 : AMDGPU::S_XOR_B32;
2268   BuildMI(*BB, &MI, DL, TII.get(Opc), OpReg)
2269     .addReg(HiReg)
2270     .addReg(ConstReg);
2271   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2272     .addReg(LoReg)
2273     .addImm(AMDGPU::sub0)
2274     .addReg(OpReg)
2275     .addImm(AMDGPU::sub1);
2276   MI.eraseFromParent();
2277   return true;
2278 }
2279 
2280 // FIXME: This is a workaround for the same tablegen problems as G_FNEG
2281 bool AMDGPUInstructionSelector::selectG_FABS(MachineInstr &MI) const {
2282   Register Dst = MI.getOperand(0).getReg();
2283   const RegisterBank *DstRB = RBI.getRegBank(Dst, *MRI, TRI);
2284   if (DstRB->getID() != AMDGPU::SGPRRegBankID ||
2285       MRI->getType(Dst) != LLT::scalar(64))
2286     return false;
2287 
2288   Register Src = MI.getOperand(1).getReg();
2289   MachineBasicBlock *BB = MI.getParent();
2290   const DebugLoc &DL = MI.getDebugLoc();
2291   Register LoReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2292   Register HiReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2293   Register ConstReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2294   Register OpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2295 
2296   if (!RBI.constrainGenericRegister(Src, AMDGPU::SReg_64RegClass, *MRI) ||
2297       !RBI.constrainGenericRegister(Dst, AMDGPU::SReg_64RegClass, *MRI))
2298     return false;
2299 
2300   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), LoReg)
2301     .addReg(Src, 0, AMDGPU::sub0);
2302   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), HiReg)
2303     .addReg(Src, 0, AMDGPU::sub1);
2304   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_MOV_B32), ConstReg)
2305     .addImm(0x7fffffff);
2306 
2307   // Clear sign bit.
2308   // TODO: Should this used S_BITSET0_*?
2309   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::S_AND_B32), OpReg)
2310     .addReg(HiReg)
2311     .addReg(ConstReg);
2312   BuildMI(*BB, &MI, DL, TII.get(AMDGPU::REG_SEQUENCE), Dst)
2313     .addReg(LoReg)
2314     .addImm(AMDGPU::sub0)
2315     .addReg(OpReg)
2316     .addImm(AMDGPU::sub1);
2317 
2318   MI.eraseFromParent();
2319   return true;
2320 }
2321 
2322 static bool isConstant(const MachineInstr &MI) {
2323   return MI.getOpcode() == TargetOpcode::G_CONSTANT;
2324 }
2325 
2326 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
2327     const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
2328 
2329   const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
2330 
2331   assert(PtrMI);
2332 
2333   if (PtrMI->getOpcode() != TargetOpcode::G_PTR_ADD)
2334     return;
2335 
2336   GEPInfo GEPInfo(*PtrMI);
2337 
2338   for (unsigned i = 1; i != 3; ++i) {
2339     const MachineOperand &GEPOp = PtrMI->getOperand(i);
2340     const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
2341     assert(OpDef);
2342     if (i == 2 && isConstant(*OpDef)) {
2343       // TODO: Could handle constant base + variable offset, but a combine
2344       // probably should have commuted it.
2345       assert(GEPInfo.Imm == 0);
2346       GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
2347       continue;
2348     }
2349     const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
2350     if (OpBank->getID() == AMDGPU::SGPRRegBankID)
2351       GEPInfo.SgprParts.push_back(GEPOp.getReg());
2352     else
2353       GEPInfo.VgprParts.push_back(GEPOp.getReg());
2354   }
2355 
2356   AddrInfo.push_back(GEPInfo);
2357   getAddrModeInfo(*PtrMI, MRI, AddrInfo);
2358 }
2359 
2360 bool AMDGPUInstructionSelector::isSGPR(Register Reg) const {
2361   return RBI.getRegBank(Reg, *MRI, TRI)->getID() == AMDGPU::SGPRRegBankID;
2362 }
2363 
2364 bool AMDGPUInstructionSelector::isInstrUniform(const MachineInstr &MI) const {
2365   if (!MI.hasOneMemOperand())
2366     return false;
2367 
2368   const MachineMemOperand *MMO = *MI.memoperands_begin();
2369   const Value *Ptr = MMO->getValue();
2370 
2371   // UndefValue means this is a load of a kernel input.  These are uniform.
2372   // Sometimes LDS instructions have constant pointers.
2373   // If Ptr is null, then that means this mem operand contains a
2374   // PseudoSourceValue like GOT.
2375   if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
2376       isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
2377     return true;
2378 
2379   if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
2380     return true;
2381 
2382   const Instruction *I = dyn_cast<Instruction>(Ptr);
2383   return I && I->getMetadata("amdgpu.uniform");
2384 }
2385 
2386 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
2387   for (const GEPInfo &GEPInfo : AddrInfo) {
2388     if (!GEPInfo.VgprParts.empty())
2389       return true;
2390   }
2391   return false;
2392 }
2393 
2394 void AMDGPUInstructionSelector::initM0(MachineInstr &I) const {
2395   const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2396   unsigned AS = PtrTy.getAddressSpace();
2397   if ((AS == AMDGPUAS::LOCAL_ADDRESS || AS == AMDGPUAS::REGION_ADDRESS) &&
2398       STI.ldsRequiresM0Init()) {
2399     MachineBasicBlock *BB = I.getParent();
2400 
2401     // If DS instructions require M0 initialization, insert it before selecting.
2402     BuildMI(*BB, &I, I.getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), AMDGPU::M0)
2403       .addImm(-1);
2404   }
2405 }
2406 
2407 bool AMDGPUInstructionSelector::selectG_LOAD_STORE_ATOMICRMW(
2408   MachineInstr &I) const {
2409   if (I.getOpcode() == TargetOpcode::G_ATOMICRMW_FADD) {
2410     const LLT PtrTy = MRI->getType(I.getOperand(1).getReg());
2411     unsigned AS = PtrTy.getAddressSpace();
2412     if (AS == AMDGPUAS::GLOBAL_ADDRESS)
2413       return selectGlobalAtomicFadd(I, I.getOperand(1), I.getOperand(2));
2414   }
2415 
2416   initM0(I);
2417   return selectImpl(I, *CoverageInfo);
2418 }
2419 
2420 static bool isVCmpResult(Register Reg, MachineRegisterInfo &MRI) {
2421   if (Reg.isPhysical())
2422     return false;
2423 
2424   MachineInstr &MI = *MRI.getUniqueVRegDef(Reg);
2425   const unsigned Opcode = MI.getOpcode();
2426 
2427   if (Opcode == AMDGPU::COPY)
2428     return isVCmpResult(MI.getOperand(1).getReg(), MRI);
2429 
2430   if (Opcode == AMDGPU::G_AND || Opcode == AMDGPU::G_OR ||
2431       Opcode == AMDGPU::G_XOR)
2432     return isVCmpResult(MI.getOperand(1).getReg(), MRI) &&
2433            isVCmpResult(MI.getOperand(2).getReg(), MRI);
2434 
2435   if (Opcode == TargetOpcode::G_INTRINSIC)
2436     return MI.getIntrinsicID() == Intrinsic::amdgcn_class;
2437 
2438   return Opcode == AMDGPU::G_ICMP || Opcode == AMDGPU::G_FCMP;
2439 }
2440 
2441 bool AMDGPUInstructionSelector::selectG_BRCOND(MachineInstr &I) const {
2442   MachineBasicBlock *BB = I.getParent();
2443   MachineOperand &CondOp = I.getOperand(0);
2444   Register CondReg = CondOp.getReg();
2445   const DebugLoc &DL = I.getDebugLoc();
2446 
2447   unsigned BrOpcode;
2448   Register CondPhysReg;
2449   const TargetRegisterClass *ConstrainRC;
2450 
2451   // In SelectionDAG, we inspect the IR block for uniformity metadata to decide
2452   // whether the branch is uniform when selecting the instruction. In
2453   // GlobalISel, we should push that decision into RegBankSelect. Assume for now
2454   // RegBankSelect knows what it's doing if the branch condition is scc, even
2455   // though it currently does not.
2456   if (!isVCC(CondReg, *MRI)) {
2457     if (MRI->getType(CondReg) != LLT::scalar(32))
2458       return false;
2459 
2460     CondPhysReg = AMDGPU::SCC;
2461     BrOpcode = AMDGPU::S_CBRANCH_SCC1;
2462     ConstrainRC = &AMDGPU::SReg_32RegClass;
2463   } else {
2464     // FIXME: Should scc->vcc copies and with exec?
2465 
2466     // Unless the value of CondReg is a result of a V_CMP* instruction then we
2467     // need to insert an and with exec.
2468     if (!isVCmpResult(CondReg, *MRI)) {
2469       const bool Is64 = STI.isWave64();
2470       const unsigned Opcode = Is64 ? AMDGPU::S_AND_B64 : AMDGPU::S_AND_B32;
2471       const Register Exec = Is64 ? AMDGPU::EXEC : AMDGPU::EXEC_LO;
2472 
2473       Register TmpReg = MRI->createVirtualRegister(TRI.getBoolRC());
2474       BuildMI(*BB, &I, DL, TII.get(Opcode), TmpReg)
2475           .addReg(CondReg)
2476           .addReg(Exec);
2477       CondReg = TmpReg;
2478     }
2479 
2480     CondPhysReg = TRI.getVCC();
2481     BrOpcode = AMDGPU::S_CBRANCH_VCCNZ;
2482     ConstrainRC = TRI.getBoolRC();
2483   }
2484 
2485   if (!MRI->getRegClassOrNull(CondReg))
2486     MRI->setRegClass(CondReg, ConstrainRC);
2487 
2488   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), CondPhysReg)
2489     .addReg(CondReg);
2490   BuildMI(*BB, &I, DL, TII.get(BrOpcode))
2491     .addMBB(I.getOperand(1).getMBB());
2492 
2493   I.eraseFromParent();
2494   return true;
2495 }
2496 
2497 bool AMDGPUInstructionSelector::selectG_GLOBAL_VALUE(
2498   MachineInstr &I) const {
2499   Register DstReg = I.getOperand(0).getReg();
2500   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2501   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2502   I.setDesc(TII.get(IsVGPR ? AMDGPU::V_MOV_B32_e32 : AMDGPU::S_MOV_B32));
2503   if (IsVGPR)
2504     I.addOperand(*MF, MachineOperand::CreateReg(AMDGPU::EXEC, false, true));
2505 
2506   return RBI.constrainGenericRegister(
2507     DstReg, IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass, *MRI);
2508 }
2509 
2510 bool AMDGPUInstructionSelector::selectG_PTRMASK(MachineInstr &I) const {
2511   Register DstReg = I.getOperand(0).getReg();
2512   Register SrcReg = I.getOperand(1).getReg();
2513   Register MaskReg = I.getOperand(2).getReg();
2514   LLT Ty = MRI->getType(DstReg);
2515   LLT MaskTy = MRI->getType(MaskReg);
2516   MachineBasicBlock *BB = I.getParent();
2517   const DebugLoc &DL = I.getDebugLoc();
2518 
2519   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2520   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2521   const RegisterBank *MaskRB = RBI.getRegBank(MaskReg, *MRI, TRI);
2522   const bool IsVGPR = DstRB->getID() == AMDGPU::VGPRRegBankID;
2523   if (DstRB != SrcRB) // Should only happen for hand written MIR.
2524     return false;
2525 
2526   // Try to avoid emitting a bit operation when we only need to touch half of
2527   // the 64-bit pointer.
2528   APInt MaskOnes = KnownBits->getKnownOnes(MaskReg).zext(64);
2529   const APInt MaskHi32 = APInt::getHighBitsSet(64, 32);
2530   const APInt MaskLo32 = APInt::getLowBitsSet(64, 32);
2531 
2532   const bool CanCopyLow32 = (MaskOnes & MaskLo32) == MaskLo32;
2533   const bool CanCopyHi32 = (MaskOnes & MaskHi32) == MaskHi32;
2534 
2535   if (!IsVGPR && Ty.getSizeInBits() == 64 &&
2536       !CanCopyLow32 && !CanCopyHi32) {
2537     auto MIB = BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_AND_B64), DstReg)
2538       .addReg(SrcReg)
2539       .addReg(MaskReg);
2540     I.eraseFromParent();
2541     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
2542   }
2543 
2544   unsigned NewOpc = IsVGPR ? AMDGPU::V_AND_B32_e64 : AMDGPU::S_AND_B32;
2545   const TargetRegisterClass &RegRC
2546     = IsVGPR ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2547 
2548   const TargetRegisterClass *DstRC = TRI.getRegClassForTypeOnBank(Ty, *DstRB);
2549   const TargetRegisterClass *SrcRC = TRI.getRegClassForTypeOnBank(Ty, *SrcRB);
2550   const TargetRegisterClass *MaskRC =
2551       TRI.getRegClassForTypeOnBank(MaskTy, *MaskRB);
2552 
2553   if (!RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2554       !RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2555       !RBI.constrainGenericRegister(MaskReg, *MaskRC, *MRI))
2556     return false;
2557 
2558   if (Ty.getSizeInBits() == 32) {
2559     assert(MaskTy.getSizeInBits() == 32 &&
2560            "ptrmask should have been narrowed during legalize");
2561 
2562     BuildMI(*BB, &I, DL, TII.get(NewOpc), DstReg)
2563       .addReg(SrcReg)
2564       .addReg(MaskReg);
2565     I.eraseFromParent();
2566     return true;
2567   }
2568 
2569   Register HiReg = MRI->createVirtualRegister(&RegRC);
2570   Register LoReg = MRI->createVirtualRegister(&RegRC);
2571 
2572   // Extract the subregisters from the source pointer.
2573   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), LoReg)
2574     .addReg(SrcReg, 0, AMDGPU::sub0);
2575   BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), HiReg)
2576     .addReg(SrcReg, 0, AMDGPU::sub1);
2577 
2578   Register MaskedLo, MaskedHi;
2579 
2580   if (CanCopyLow32) {
2581     // If all the bits in the low half are 1, we only need a copy for it.
2582     MaskedLo = LoReg;
2583   } else {
2584     // Extract the mask subregister and apply the and.
2585     Register MaskLo = MRI->createVirtualRegister(&RegRC);
2586     MaskedLo = MRI->createVirtualRegister(&RegRC);
2587 
2588     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskLo)
2589       .addReg(MaskReg, 0, AMDGPU::sub0);
2590     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedLo)
2591       .addReg(LoReg)
2592       .addReg(MaskLo);
2593   }
2594 
2595   if (CanCopyHi32) {
2596     // If all the bits in the high half are 1, we only need a copy for it.
2597     MaskedHi = HiReg;
2598   } else {
2599     Register MaskHi = MRI->createVirtualRegister(&RegRC);
2600     MaskedHi = MRI->createVirtualRegister(&RegRC);
2601 
2602     BuildMI(*BB, &I, DL, TII.get(AMDGPU::COPY), MaskHi)
2603       .addReg(MaskReg, 0, AMDGPU::sub1);
2604     BuildMI(*BB, &I, DL, TII.get(NewOpc), MaskedHi)
2605       .addReg(HiReg)
2606       .addReg(MaskHi);
2607   }
2608 
2609   BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
2610     .addReg(MaskedLo)
2611     .addImm(AMDGPU::sub0)
2612     .addReg(MaskedHi)
2613     .addImm(AMDGPU::sub1);
2614   I.eraseFromParent();
2615   return true;
2616 }
2617 
2618 /// Return the register to use for the index value, and the subregister to use
2619 /// for the indirectly accessed register.
2620 static std::pair<Register, unsigned>
2621 computeIndirectRegIndex(MachineRegisterInfo &MRI,
2622                         const SIRegisterInfo &TRI,
2623                         const TargetRegisterClass *SuperRC,
2624                         Register IdxReg,
2625                         unsigned EltSize) {
2626   Register IdxBaseReg;
2627   int Offset;
2628 
2629   std::tie(IdxBaseReg, Offset) = AMDGPU::getBaseWithConstantOffset(MRI, IdxReg);
2630   if (IdxBaseReg == AMDGPU::NoRegister) {
2631     // This will happen if the index is a known constant. This should ordinarily
2632     // be legalized out, but handle it as a register just in case.
2633     assert(Offset == 0);
2634     IdxBaseReg = IdxReg;
2635   }
2636 
2637   ArrayRef<int16_t> SubRegs = TRI.getRegSplitParts(SuperRC, EltSize);
2638 
2639   // Skip out of bounds offsets, or else we would end up using an undefined
2640   // register.
2641   if (static_cast<unsigned>(Offset) >= SubRegs.size())
2642     return std::make_pair(IdxReg, SubRegs[0]);
2643   return std::make_pair(IdxBaseReg, SubRegs[Offset]);
2644 }
2645 
2646 bool AMDGPUInstructionSelector::selectG_EXTRACT_VECTOR_ELT(
2647   MachineInstr &MI) const {
2648   Register DstReg = MI.getOperand(0).getReg();
2649   Register SrcReg = MI.getOperand(1).getReg();
2650   Register IdxReg = MI.getOperand(2).getReg();
2651 
2652   LLT DstTy = MRI->getType(DstReg);
2653   LLT SrcTy = MRI->getType(SrcReg);
2654 
2655   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2656   const RegisterBank *SrcRB = RBI.getRegBank(SrcReg, *MRI, TRI);
2657   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2658 
2659   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2660   // into a waterfall loop.
2661   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2662     return false;
2663 
2664   const TargetRegisterClass *SrcRC =
2665       TRI.getRegClassForTypeOnBank(SrcTy, *SrcRB);
2666   const TargetRegisterClass *DstRC =
2667       TRI.getRegClassForTypeOnBank(DstTy, *DstRB);
2668   if (!SrcRC || !DstRC)
2669     return false;
2670   if (!RBI.constrainGenericRegister(SrcReg, *SrcRC, *MRI) ||
2671       !RBI.constrainGenericRegister(DstReg, *DstRC, *MRI) ||
2672       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2673     return false;
2674 
2675   MachineBasicBlock *BB = MI.getParent();
2676   const DebugLoc &DL = MI.getDebugLoc();
2677   const bool Is64 = DstTy.getSizeInBits() == 64;
2678 
2679   unsigned SubReg;
2680   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, SrcRC, IdxReg,
2681                                                      DstTy.getSizeInBits() / 8);
2682 
2683   if (SrcRB->getID() == AMDGPU::SGPRRegBankID) {
2684     if (DstTy.getSizeInBits() != 32 && !Is64)
2685       return false;
2686 
2687     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2688       .addReg(IdxReg);
2689 
2690     unsigned Opc = Is64 ? AMDGPU::S_MOVRELS_B64 : AMDGPU::S_MOVRELS_B32;
2691     BuildMI(*BB, &MI, DL, TII.get(Opc), DstReg)
2692       .addReg(SrcReg, 0, SubReg)
2693       .addReg(SrcReg, RegState::Implicit);
2694     MI.eraseFromParent();
2695     return true;
2696   }
2697 
2698   if (SrcRB->getID() != AMDGPU::VGPRRegBankID || DstTy.getSizeInBits() != 32)
2699     return false;
2700 
2701   if (!STI.useVGPRIndexMode()) {
2702     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2703       .addReg(IdxReg);
2704     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::V_MOVRELS_B32_e32), DstReg)
2705       .addReg(SrcReg, 0, SubReg)
2706       .addReg(SrcReg, RegState::Implicit);
2707     MI.eraseFromParent();
2708     return true;
2709   }
2710 
2711   const MCInstrDesc &GPRIDXDesc =
2712       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*SrcRC), true);
2713   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2714       .addReg(SrcReg)
2715       .addReg(IdxReg)
2716       .addImm(SubReg);
2717 
2718   MI.eraseFromParent();
2719   return true;
2720 }
2721 
2722 // TODO: Fold insert_vector_elt (extract_vector_elt) into movrelsd
2723 bool AMDGPUInstructionSelector::selectG_INSERT_VECTOR_ELT(
2724   MachineInstr &MI) const {
2725   Register DstReg = MI.getOperand(0).getReg();
2726   Register VecReg = MI.getOperand(1).getReg();
2727   Register ValReg = MI.getOperand(2).getReg();
2728   Register IdxReg = MI.getOperand(3).getReg();
2729 
2730   LLT VecTy = MRI->getType(DstReg);
2731   LLT ValTy = MRI->getType(ValReg);
2732   unsigned VecSize = VecTy.getSizeInBits();
2733   unsigned ValSize = ValTy.getSizeInBits();
2734 
2735   const RegisterBank *VecRB = RBI.getRegBank(VecReg, *MRI, TRI);
2736   const RegisterBank *ValRB = RBI.getRegBank(ValReg, *MRI, TRI);
2737   const RegisterBank *IdxRB = RBI.getRegBank(IdxReg, *MRI, TRI);
2738 
2739   assert(VecTy.getElementType() == ValTy);
2740 
2741   // The index must be scalar. If it wasn't RegBankSelect should have moved this
2742   // into a waterfall loop.
2743   if (IdxRB->getID() != AMDGPU::SGPRRegBankID)
2744     return false;
2745 
2746   const TargetRegisterClass *VecRC =
2747       TRI.getRegClassForTypeOnBank(VecTy, *VecRB);
2748   const TargetRegisterClass *ValRC =
2749       TRI.getRegClassForTypeOnBank(ValTy, *ValRB);
2750 
2751   if (!RBI.constrainGenericRegister(VecReg, *VecRC, *MRI) ||
2752       !RBI.constrainGenericRegister(DstReg, *VecRC, *MRI) ||
2753       !RBI.constrainGenericRegister(ValReg, *ValRC, *MRI) ||
2754       !RBI.constrainGenericRegister(IdxReg, AMDGPU::SReg_32RegClass, *MRI))
2755     return false;
2756 
2757   if (VecRB->getID() == AMDGPU::VGPRRegBankID && ValSize != 32)
2758     return false;
2759 
2760   unsigned SubReg;
2761   std::tie(IdxReg, SubReg) = computeIndirectRegIndex(*MRI, TRI, VecRC, IdxReg,
2762                                                      ValSize / 8);
2763 
2764   const bool IndexMode = VecRB->getID() == AMDGPU::VGPRRegBankID &&
2765                          STI.useVGPRIndexMode();
2766 
2767   MachineBasicBlock *BB = MI.getParent();
2768   const DebugLoc &DL = MI.getDebugLoc();
2769 
2770   if (!IndexMode) {
2771     BuildMI(*BB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
2772       .addReg(IdxReg);
2773 
2774     const MCInstrDesc &RegWriteOp = TII.getIndirectRegWriteMovRelPseudo(
2775         VecSize, ValSize, VecRB->getID() == AMDGPU::SGPRRegBankID);
2776     BuildMI(*BB, MI, DL, RegWriteOp, DstReg)
2777         .addReg(VecReg)
2778         .addReg(ValReg)
2779         .addImm(SubReg);
2780     MI.eraseFromParent();
2781     return true;
2782   }
2783 
2784   const MCInstrDesc &GPRIDXDesc =
2785       TII.getIndirectGPRIDXPseudo(TRI.getRegSizeInBits(*VecRC), false);
2786   BuildMI(*BB, MI, DL, GPRIDXDesc, DstReg)
2787       .addReg(VecReg)
2788       .addReg(ValReg)
2789       .addReg(IdxReg)
2790       .addImm(SubReg);
2791 
2792   MI.eraseFromParent();
2793   return true;
2794 }
2795 
2796 static bool isZeroOrUndef(int X) {
2797   return X == 0 || X == -1;
2798 }
2799 
2800 static bool isOneOrUndef(int X) {
2801   return X == 1 || X == -1;
2802 }
2803 
2804 static bool isZeroOrOneOrUndef(int X) {
2805   return X == 0 || X == 1 || X == -1;
2806 }
2807 
2808 // Normalize a VOP3P shuffle mask to refer to the low/high half of a single
2809 // 32-bit register.
2810 static Register normalizeVOP3PMask(int NewMask[2], Register Src0, Register Src1,
2811                                    ArrayRef<int> Mask) {
2812   NewMask[0] = Mask[0];
2813   NewMask[1] = Mask[1];
2814   if (isZeroOrOneOrUndef(Mask[0]) && isZeroOrOneOrUndef(Mask[1]))
2815     return Src0;
2816 
2817   assert(NewMask[0] == 2 || NewMask[0] == 3 || NewMask[0] == -1);
2818   assert(NewMask[1] == 2 || NewMask[1] == 3 || NewMask[1] == -1);
2819 
2820   // Shift the mask inputs to be 0/1;
2821   NewMask[0] = NewMask[0] == -1 ? -1 : NewMask[0] - 2;
2822   NewMask[1] = NewMask[1] == -1 ? -1 : NewMask[1] - 2;
2823   return Src1;
2824 }
2825 
2826 // This is only legal with VOP3P instructions as an aid to op_sel matching.
2827 bool AMDGPUInstructionSelector::selectG_SHUFFLE_VECTOR(
2828   MachineInstr &MI) const {
2829   Register DstReg = MI.getOperand(0).getReg();
2830   Register Src0Reg = MI.getOperand(1).getReg();
2831   Register Src1Reg = MI.getOperand(2).getReg();
2832   ArrayRef<int> ShufMask = MI.getOperand(3).getShuffleMask();
2833 
2834   const LLT V2S16 = LLT::fixed_vector(2, 16);
2835   if (MRI->getType(DstReg) != V2S16 || MRI->getType(Src0Reg) != V2S16)
2836     return false;
2837 
2838   if (!AMDGPU::isLegalVOP3PShuffleMask(ShufMask))
2839     return false;
2840 
2841   assert(ShufMask.size() == 2);
2842   assert(STI.hasSDWA() && "no target has VOP3P but not SDWA");
2843 
2844   MachineBasicBlock *MBB = MI.getParent();
2845   const DebugLoc &DL = MI.getDebugLoc();
2846 
2847   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
2848   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
2849   const TargetRegisterClass &RC = IsVALU ?
2850     AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
2851 
2852   // Handle the degenerate case which should have folded out.
2853   if (ShufMask[0] == -1 && ShufMask[1] == -1) {
2854     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::IMPLICIT_DEF), DstReg);
2855 
2856     MI.eraseFromParent();
2857     return RBI.constrainGenericRegister(DstReg, RC, *MRI);
2858   }
2859 
2860   // A legal VOP3P mask only reads one of the sources.
2861   int Mask[2];
2862   Register SrcVec = normalizeVOP3PMask(Mask, Src0Reg, Src1Reg, ShufMask);
2863 
2864   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI) ||
2865       !RBI.constrainGenericRegister(SrcVec, RC, *MRI))
2866     return false;
2867 
2868   // TODO: This also should have been folded out
2869   if (isZeroOrUndef(Mask[0]) && isOneOrUndef(Mask[1])) {
2870     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::COPY), DstReg)
2871       .addReg(SrcVec);
2872 
2873     MI.eraseFromParent();
2874     return true;
2875   }
2876 
2877   if (Mask[0] == 1 && Mask[1] == -1) {
2878     if (IsVALU) {
2879       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
2880         .addImm(16)
2881         .addReg(SrcVec);
2882     } else {
2883       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
2884         .addReg(SrcVec)
2885         .addImm(16);
2886     }
2887   } else if (Mask[0] == -1 && Mask[1] == 0) {
2888     if (IsVALU) {
2889       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHLREV_B32_e64), DstReg)
2890         .addImm(16)
2891         .addReg(SrcVec);
2892     } else {
2893       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHL_B32), DstReg)
2894         .addReg(SrcVec)
2895         .addImm(16);
2896     }
2897   } else if (Mask[0] == 0 && Mask[1] == 0) {
2898     if (IsVALU) {
2899       // Write low half of the register into the high half.
2900       MachineInstr *MovSDWA =
2901         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2902         .addImm(0)                             // $src0_modifiers
2903         .addReg(SrcVec)                        // $src0
2904         .addImm(0)                             // $clamp
2905         .addImm(AMDGPU::SDWA::WORD_1)          // $dst_sel
2906         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2907         .addImm(AMDGPU::SDWA::WORD_0)          // $src0_sel
2908         .addReg(SrcVec, RegState::Implicit);
2909       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2910     } else {
2911       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2912         .addReg(SrcVec)
2913         .addReg(SrcVec);
2914     }
2915   } else if (Mask[0] == 1 && Mask[1] == 1) {
2916     if (IsVALU) {
2917       // Write high half of the register into the low half.
2918       MachineInstr *MovSDWA =
2919         BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_MOV_B32_sdwa), DstReg)
2920         .addImm(0)                             // $src0_modifiers
2921         .addReg(SrcVec)                        // $src0
2922         .addImm(0)                             // $clamp
2923         .addImm(AMDGPU::SDWA::WORD_0)          // $dst_sel
2924         .addImm(AMDGPU::SDWA::UNUSED_PRESERVE) // $dst_unused
2925         .addImm(AMDGPU::SDWA::WORD_1)          // $src0_sel
2926         .addReg(SrcVec, RegState::Implicit);
2927       MovSDWA->tieOperands(0, MovSDWA->getNumOperands() - 1);
2928     } else {
2929       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_HH_B32_B16), DstReg)
2930         .addReg(SrcVec)
2931         .addReg(SrcVec);
2932     }
2933   } else if (Mask[0] == 1 && Mask[1] == 0) {
2934     if (IsVALU) {
2935       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_ALIGNBIT_B32_e64), DstReg)
2936         .addReg(SrcVec)
2937         .addReg(SrcVec)
2938         .addImm(16);
2939     } else {
2940       Register TmpReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
2941       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), TmpReg)
2942         .addReg(SrcVec)
2943         .addImm(16);
2944       BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_PACK_LL_B32_B16), DstReg)
2945         .addReg(TmpReg)
2946         .addReg(SrcVec);
2947     }
2948   } else
2949     llvm_unreachable("all shuffle masks should be handled");
2950 
2951   MI.eraseFromParent();
2952   return true;
2953 }
2954 
2955 bool AMDGPUInstructionSelector::selectAMDGPU_BUFFER_ATOMIC_FADD(
2956   MachineInstr &MI) const {
2957   if (STI.hasGFX90AInsts())
2958     return selectImpl(MI, *CoverageInfo);
2959 
2960   MachineBasicBlock *MBB = MI.getParent();
2961   const DebugLoc &DL = MI.getDebugLoc();
2962 
2963   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
2964     Function &F = MBB->getParent()->getFunction();
2965     DiagnosticInfoUnsupported
2966       NoFpRet(F, "return versions of fp atomics not supported",
2967               MI.getDebugLoc(), DS_Error);
2968     F.getContext().diagnose(NoFpRet);
2969     return false;
2970   }
2971 
2972   // FIXME: This is only needed because tablegen requires number of dst operands
2973   // in match and replace pattern to be the same. Otherwise patterns can be
2974   // exported from SDag path.
2975   MachineOperand &VDataIn = MI.getOperand(1);
2976   MachineOperand &VIndex = MI.getOperand(3);
2977   MachineOperand &VOffset = MI.getOperand(4);
2978   MachineOperand &SOffset = MI.getOperand(5);
2979   int16_t Offset = MI.getOperand(6).getImm();
2980 
2981   bool HasVOffset = !isOperandImmEqual(VOffset, 0, *MRI);
2982   bool HasVIndex = !isOperandImmEqual(VIndex, 0, *MRI);
2983 
2984   unsigned Opcode;
2985   if (HasVOffset) {
2986     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN
2987                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN;
2988   } else {
2989     Opcode = HasVIndex ? AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN
2990                        : AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET;
2991   }
2992 
2993   if (MRI->getType(VDataIn.getReg()).isVector()) {
2994     switch (Opcode) {
2995     case AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN:
2996       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN;
2997       break;
2998     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFEN:
2999       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFEN;
3000       break;
3001     case AMDGPU::BUFFER_ATOMIC_ADD_F32_IDXEN:
3002       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_IDXEN;
3003       break;
3004     case AMDGPU::BUFFER_ATOMIC_ADD_F32_OFFSET:
3005       Opcode = AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_OFFSET;
3006       break;
3007     }
3008   }
3009 
3010   auto I = BuildMI(*MBB, MI, DL, TII.get(Opcode));
3011   I.add(VDataIn);
3012 
3013   if (Opcode == AMDGPU::BUFFER_ATOMIC_ADD_F32_BOTHEN ||
3014       Opcode == AMDGPU::BUFFER_ATOMIC_PK_ADD_F16_BOTHEN) {
3015     Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
3016     BuildMI(*MBB, &*I, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
3017       .addReg(VIndex.getReg())
3018       .addImm(AMDGPU::sub0)
3019       .addReg(VOffset.getReg())
3020       .addImm(AMDGPU::sub1);
3021 
3022     I.addReg(IdxReg);
3023   } else if (HasVIndex) {
3024     I.add(VIndex);
3025   } else if (HasVOffset) {
3026     I.add(VOffset);
3027   }
3028 
3029   I.add(MI.getOperand(2)); // rsrc
3030   I.add(SOffset);
3031   I.addImm(Offset);
3032   I.addImm(MI.getOperand(7).getImm()); // cpol
3033   I.cloneMemRefs(MI);
3034 
3035   MI.eraseFromParent();
3036 
3037   return true;
3038 }
3039 
3040 bool AMDGPUInstructionSelector::selectGlobalAtomicFadd(
3041   MachineInstr &MI, MachineOperand &AddrOp, MachineOperand &DataOp) const {
3042 
3043   if (STI.hasGFX90AInsts()) {
3044     // gfx90a adds return versions of the global atomic fadd instructions so no
3045     // special handling is required.
3046     return selectImpl(MI, *CoverageInfo);
3047   }
3048 
3049   MachineBasicBlock *MBB = MI.getParent();
3050   const DebugLoc &DL = MI.getDebugLoc();
3051 
3052   if (!MRI->use_nodbg_empty(MI.getOperand(0).getReg())) {
3053     Function &F = MBB->getParent()->getFunction();
3054     DiagnosticInfoUnsupported
3055       NoFpRet(F, "return versions of fp atomics not supported",
3056               MI.getDebugLoc(), DS_Error);
3057     F.getContext().diagnose(NoFpRet);
3058     return false;
3059   }
3060 
3061   // FIXME: This is only needed because tablegen requires number of dst operands
3062   // in match and replace pattern to be the same. Otherwise patterns can be
3063   // exported from SDag path.
3064   auto Addr = selectFlatOffsetImpl(AddrOp, SIInstrFlags::FlatGlobal);
3065 
3066   Register Data = DataOp.getReg();
3067   const unsigned Opc = MRI->getType(Data).isVector() ?
3068     AMDGPU::GLOBAL_ATOMIC_PK_ADD_F16 : AMDGPU::GLOBAL_ATOMIC_ADD_F32;
3069   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3070     .addReg(Addr.first)
3071     .addReg(Data)
3072     .addImm(Addr.second)
3073     .addImm(0) // cpol
3074     .cloneMemRefs(MI);
3075 
3076   MI.eraseFromParent();
3077   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3078 }
3079 
3080 bool AMDGPUInstructionSelector::selectBufferLoadLds(MachineInstr &MI) const {
3081   unsigned Opc;
3082   unsigned Size = MI.getOperand(3).getImm();
3083 
3084   // The struct intrinsic variants add one additional operand over raw.
3085   const bool HasVIndex = MI.getNumOperands() == 9;
3086   Register VIndex;
3087   int OpOffset = 0;
3088   if (HasVIndex) {
3089     VIndex = MI.getOperand(4).getReg();
3090     OpOffset = 1;
3091   }
3092 
3093   Register VOffset = MI.getOperand(4 + OpOffset).getReg();
3094   Optional<ValueAndVReg> MaybeVOffset =
3095       getIConstantVRegValWithLookThrough(VOffset, *MRI);
3096   const bool HasVOffset = !MaybeVOffset || MaybeVOffset->Value.getZExtValue();
3097 
3098   switch (Size) {
3099   default:
3100     return false;
3101   case 1:
3102     Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_BOTHEN
3103                                  : AMDGPU::BUFFER_LOAD_UBYTE_LDS_IDXEN
3104                     : HasVOffset ? AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFEN
3105                                  : AMDGPU::BUFFER_LOAD_UBYTE_LDS_OFFSET;
3106     break;
3107   case 2:
3108     Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_BOTHEN
3109                                  : AMDGPU::BUFFER_LOAD_USHORT_LDS_IDXEN
3110                     : HasVOffset ? AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFEN
3111                                  : AMDGPU::BUFFER_LOAD_USHORT_LDS_OFFSET;
3112     break;
3113   case 4:
3114     Opc = HasVIndex ? HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_BOTHEN
3115                                  : AMDGPU::BUFFER_LOAD_DWORD_LDS_IDXEN
3116                     : HasVOffset ? AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFEN
3117                                  : AMDGPU::BUFFER_LOAD_DWORD_LDS_OFFSET;
3118     break;
3119   }
3120 
3121   MachineBasicBlock *MBB = MI.getParent();
3122   const DebugLoc &DL = MI.getDebugLoc();
3123   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3124     .add(MI.getOperand(2));
3125 
3126   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc));
3127 
3128   if (HasVIndex && HasVOffset) {
3129     Register IdxReg = MRI->createVirtualRegister(TRI.getVGPR64Class());
3130     BuildMI(*MBB, &*MIB, DL, TII.get(AMDGPU::REG_SEQUENCE), IdxReg)
3131       .addReg(VIndex)
3132       .addImm(AMDGPU::sub0)
3133       .addReg(VOffset)
3134       .addImm(AMDGPU::sub1);
3135 
3136     MIB.addReg(IdxReg);
3137   } else if (HasVIndex) {
3138     MIB.addReg(VIndex);
3139   } else if (HasVOffset) {
3140     MIB.addReg(VOffset);
3141   }
3142 
3143   MIB.add(MI.getOperand(1));            // rsrc
3144   MIB.add(MI.getOperand(5 + OpOffset)); // soffset
3145   MIB.add(MI.getOperand(6 + OpOffset)); // imm offset
3146   unsigned Aux = MI.getOperand(7 + OpOffset).getImm();
3147   MIB.addImm(Aux & AMDGPU::CPol::ALL);  // cpol
3148   MIB.addImm((Aux >> 3) & 1);           // swz
3149 
3150   MachineMemOperand *LoadMMO = *MI.memoperands_begin();
3151   MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
3152   LoadPtrI.Offset = MI.getOperand(6 + OpOffset).getImm();
3153   MachinePointerInfo StorePtrI = LoadPtrI;
3154   StorePtrI.V = nullptr;
3155   StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
3156 
3157   auto F = LoadMMO->getFlags() &
3158            ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
3159   LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
3160                                      Size, LoadMMO->getBaseAlign());
3161 
3162   MachineMemOperand *StoreMMO =
3163       MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
3164                                sizeof(int32_t), LoadMMO->getBaseAlign());
3165 
3166   MIB.setMemRefs({LoadMMO, StoreMMO});
3167 
3168   MI.eraseFromParent();
3169   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3170 }
3171 
3172 /// Match a zero extend from a 32-bit value to 64-bits.
3173 static Register matchZeroExtendFromS32(MachineRegisterInfo &MRI, Register Reg) {
3174   Register ZExtSrc;
3175   if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc))))
3176     return MRI.getType(ZExtSrc) == LLT::scalar(32) ? ZExtSrc : Register();
3177 
3178   // Match legalized form %zext = G_MERGE_VALUES (s32 %x), (s32 0)
3179   const MachineInstr *Def = getDefIgnoringCopies(Reg, MRI);
3180   if (Def->getOpcode() != AMDGPU::G_MERGE_VALUES)
3181     return false;
3182 
3183   if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) {
3184     return Def->getOperand(1).getReg();
3185   }
3186 
3187   return Register();
3188 }
3189 
3190 bool AMDGPUInstructionSelector::selectGlobalLoadLds(MachineInstr &MI) const{
3191   unsigned Opc;
3192   unsigned Size = MI.getOperand(3).getImm();
3193 
3194   switch (Size) {
3195   default:
3196     return false;
3197   case 1:
3198     Opc = AMDGPU::GLOBAL_LOAD_LDS_UBYTE;
3199     break;
3200   case 2:
3201     Opc = AMDGPU::GLOBAL_LOAD_LDS_USHORT;
3202     break;
3203   case 4:
3204     Opc = AMDGPU::GLOBAL_LOAD_LDS_DWORD;
3205     break;
3206   }
3207 
3208   MachineBasicBlock *MBB = MI.getParent();
3209   const DebugLoc &DL = MI.getDebugLoc();
3210   BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::COPY), AMDGPU::M0)
3211     .add(MI.getOperand(2));
3212 
3213   Register Addr = MI.getOperand(1).getReg();
3214   Register VOffset;
3215   // Try to split SAddr and VOffset. Global and LDS pointers share the same
3216   // immediate offset, so we cannot use a regular SelectGlobalSAddr().
3217   if (!isSGPR(Addr)) {
3218     auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3219     if (isSGPR(AddrDef->Reg)) {
3220       Addr = AddrDef->Reg;
3221     } else if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3222       Register SAddr =
3223           getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3224       if (SAddr && isSGPR(SAddr)) {
3225         Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3226         if (Register Off = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
3227           Addr = SAddr;
3228           VOffset = Off;
3229         }
3230       }
3231     }
3232   }
3233 
3234   if (isSGPR(Addr)) {
3235     Opc = AMDGPU::getGlobalSaddrOp(Opc);
3236     if (!VOffset) {
3237       VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3238       BuildMI(*MBB, &MI, DL, TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
3239         .addImm(0);
3240     }
3241   }
3242 
3243   auto MIB = BuildMI(*MBB, &MI, DL, TII.get(Opc))
3244     .addReg(Addr);
3245 
3246   if (isSGPR(Addr))
3247     MIB.addReg(VOffset);
3248 
3249   MIB.add(MI.getOperand(4))  // offset
3250      .add(MI.getOperand(5)); // cpol
3251 
3252   MachineMemOperand *LoadMMO = *MI.memoperands_begin();
3253   MachinePointerInfo LoadPtrI = LoadMMO->getPointerInfo();
3254   LoadPtrI.Offset = MI.getOperand(4).getImm();
3255   MachinePointerInfo StorePtrI = LoadPtrI;
3256   LoadPtrI.AddrSpace = AMDGPUAS::GLOBAL_ADDRESS;
3257   StorePtrI.AddrSpace = AMDGPUAS::LOCAL_ADDRESS;
3258   auto F = LoadMMO->getFlags() &
3259            ~(MachineMemOperand::MOStore | MachineMemOperand::MOLoad);
3260   LoadMMO = MF->getMachineMemOperand(LoadPtrI, F | MachineMemOperand::MOLoad,
3261                                      Size, LoadMMO->getBaseAlign());
3262   MachineMemOperand *StoreMMO =
3263       MF->getMachineMemOperand(StorePtrI, F | MachineMemOperand::MOStore,
3264                                sizeof(int32_t), Align(4));
3265 
3266   MIB.setMemRefs({LoadMMO, StoreMMO});
3267 
3268   MI.eraseFromParent();
3269   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
3270 }
3271 
3272 bool AMDGPUInstructionSelector::selectBVHIntrinsic(MachineInstr &MI) const{
3273   MI.setDesc(TII.get(MI.getOperand(1).getImm()));
3274   MI.removeOperand(1);
3275   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3276   return true;
3277 }
3278 
3279 bool AMDGPUInstructionSelector::selectSMFMACIntrin(MachineInstr &MI) const {
3280   unsigned Opc;
3281   switch (MI.getIntrinsicID()) {
3282   case Intrinsic::amdgcn_smfmac_f32_16x16x32_f16:
3283     Opc = AMDGPU::V_SMFMAC_F32_16X16X32_F16_e64;
3284     break;
3285   case Intrinsic::amdgcn_smfmac_f32_32x32x16_f16:
3286     Opc = AMDGPU::V_SMFMAC_F32_32X32X16_F16_e64;
3287     break;
3288   case Intrinsic::amdgcn_smfmac_f32_16x16x32_bf16:
3289     Opc = AMDGPU::V_SMFMAC_F32_16X16X32_BF16_e64;
3290     break;
3291   case Intrinsic::amdgcn_smfmac_f32_32x32x16_bf16:
3292     Opc = AMDGPU::V_SMFMAC_F32_32X32X16_BF16_e64;
3293     break;
3294   case Intrinsic::amdgcn_smfmac_i32_16x16x64_i8:
3295     Opc = AMDGPU::V_SMFMAC_I32_16X16X64_I8_e64;
3296     break;
3297   case Intrinsic::amdgcn_smfmac_i32_32x32x32_i8:
3298     Opc = AMDGPU::V_SMFMAC_I32_32X32X32_I8_e64;
3299     break;
3300   default:
3301     llvm_unreachable("unhandled smfmac intrinsic");
3302   }
3303 
3304   auto VDst_In = MI.getOperand(4);
3305 
3306   MI.setDesc(TII.get(Opc));
3307   MI.removeOperand(4); // VDst_In
3308   MI.removeOperand(1); // Intrinsic ID
3309   MI.addOperand(VDst_In); // Readd VDst_In to the end
3310   MI.addImplicitDefUseOperands(*MI.getParent()->getParent());
3311   return true;
3312 }
3313 
3314 bool AMDGPUInstructionSelector::selectWaveAddress(MachineInstr &MI) const {
3315   Register DstReg = MI.getOperand(0).getReg();
3316   Register SrcReg = MI.getOperand(1).getReg();
3317   const RegisterBank *DstRB = RBI.getRegBank(DstReg, *MRI, TRI);
3318   const bool IsVALU = DstRB->getID() == AMDGPU::VGPRRegBankID;
3319   MachineBasicBlock *MBB = MI.getParent();
3320   const DebugLoc &DL = MI.getDebugLoc();
3321 
3322   if (IsVALU) {
3323     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::V_LSHRREV_B32_e64), DstReg)
3324       .addImm(Subtarget->getWavefrontSizeLog2())
3325       .addReg(SrcReg);
3326   } else {
3327     BuildMI(*MBB, MI, DL, TII.get(AMDGPU::S_LSHR_B32), DstReg)
3328       .addReg(SrcReg)
3329       .addImm(Subtarget->getWavefrontSizeLog2());
3330   }
3331 
3332   const TargetRegisterClass &RC =
3333       IsVALU ? AMDGPU::VGPR_32RegClass : AMDGPU::SReg_32RegClass;
3334   if (!RBI.constrainGenericRegister(DstReg, RC, *MRI))
3335     return false;
3336 
3337   MI.eraseFromParent();
3338   return true;
3339 }
3340 
3341 bool AMDGPUInstructionSelector::select(MachineInstr &I) {
3342   if (I.isPHI())
3343     return selectPHI(I);
3344 
3345   if (!I.isPreISelOpcode()) {
3346     if (I.isCopy())
3347       return selectCOPY(I);
3348     return true;
3349   }
3350 
3351   switch (I.getOpcode()) {
3352   case TargetOpcode::G_AND:
3353   case TargetOpcode::G_OR:
3354   case TargetOpcode::G_XOR:
3355     if (selectImpl(I, *CoverageInfo))
3356       return true;
3357     return selectG_AND_OR_XOR(I);
3358   case TargetOpcode::G_ADD:
3359   case TargetOpcode::G_SUB:
3360     if (selectImpl(I, *CoverageInfo))
3361       return true;
3362     return selectG_ADD_SUB(I);
3363   case TargetOpcode::G_UADDO:
3364   case TargetOpcode::G_USUBO:
3365   case TargetOpcode::G_UADDE:
3366   case TargetOpcode::G_USUBE:
3367     return selectG_UADDO_USUBO_UADDE_USUBE(I);
3368   case AMDGPU::G_AMDGPU_MAD_U64_U32:
3369   case AMDGPU::G_AMDGPU_MAD_I64_I32:
3370     return selectG_AMDGPU_MAD_64_32(I);
3371   case TargetOpcode::G_INTTOPTR:
3372   case TargetOpcode::G_BITCAST:
3373   case TargetOpcode::G_PTRTOINT:
3374     return selectCOPY(I);
3375   case TargetOpcode::G_CONSTANT:
3376   case TargetOpcode::G_FCONSTANT:
3377     return selectG_CONSTANT(I);
3378   case TargetOpcode::G_FNEG:
3379     if (selectImpl(I, *CoverageInfo))
3380       return true;
3381     return selectG_FNEG(I);
3382   case TargetOpcode::G_FABS:
3383     if (selectImpl(I, *CoverageInfo))
3384       return true;
3385     return selectG_FABS(I);
3386   case TargetOpcode::G_EXTRACT:
3387     return selectG_EXTRACT(I);
3388   case TargetOpcode::G_MERGE_VALUES:
3389   case TargetOpcode::G_BUILD_VECTOR:
3390   case TargetOpcode::G_CONCAT_VECTORS:
3391     return selectG_MERGE_VALUES(I);
3392   case TargetOpcode::G_UNMERGE_VALUES:
3393     return selectG_UNMERGE_VALUES(I);
3394   case TargetOpcode::G_BUILD_VECTOR_TRUNC:
3395     return selectG_BUILD_VECTOR_TRUNC(I);
3396   case TargetOpcode::G_PTR_ADD:
3397     return selectG_PTR_ADD(I);
3398   case TargetOpcode::G_IMPLICIT_DEF:
3399     return selectG_IMPLICIT_DEF(I);
3400   case TargetOpcode::G_FREEZE:
3401     return selectCOPY(I);
3402   case TargetOpcode::G_INSERT:
3403     return selectG_INSERT(I);
3404   case TargetOpcode::G_INTRINSIC:
3405     return selectG_INTRINSIC(I);
3406   case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
3407     return selectG_INTRINSIC_W_SIDE_EFFECTS(I);
3408   case TargetOpcode::G_ICMP:
3409     if (selectG_ICMP(I))
3410       return true;
3411     return selectImpl(I, *CoverageInfo);
3412   case TargetOpcode::G_LOAD:
3413   case TargetOpcode::G_STORE:
3414   case TargetOpcode::G_ATOMIC_CMPXCHG:
3415   case TargetOpcode::G_ATOMICRMW_XCHG:
3416   case TargetOpcode::G_ATOMICRMW_ADD:
3417   case TargetOpcode::G_ATOMICRMW_SUB:
3418   case TargetOpcode::G_ATOMICRMW_AND:
3419   case TargetOpcode::G_ATOMICRMW_OR:
3420   case TargetOpcode::G_ATOMICRMW_XOR:
3421   case TargetOpcode::G_ATOMICRMW_MIN:
3422   case TargetOpcode::G_ATOMICRMW_MAX:
3423   case TargetOpcode::G_ATOMICRMW_UMIN:
3424   case TargetOpcode::G_ATOMICRMW_UMAX:
3425   case TargetOpcode::G_ATOMICRMW_FADD:
3426   case AMDGPU::G_AMDGPU_ATOMIC_INC:
3427   case AMDGPU::G_AMDGPU_ATOMIC_DEC:
3428   case AMDGPU::G_AMDGPU_ATOMIC_FMIN:
3429   case AMDGPU::G_AMDGPU_ATOMIC_FMAX:
3430     return selectG_LOAD_STORE_ATOMICRMW(I);
3431   case TargetOpcode::G_SELECT:
3432     return selectG_SELECT(I);
3433   case TargetOpcode::G_TRUNC:
3434     return selectG_TRUNC(I);
3435   case TargetOpcode::G_SEXT:
3436   case TargetOpcode::G_ZEXT:
3437   case TargetOpcode::G_ANYEXT:
3438   case TargetOpcode::G_SEXT_INREG:
3439     if (selectImpl(I, *CoverageInfo))
3440       return true;
3441     return selectG_SZA_EXT(I);
3442   case TargetOpcode::G_BRCOND:
3443     return selectG_BRCOND(I);
3444   case TargetOpcode::G_GLOBAL_VALUE:
3445     return selectG_GLOBAL_VALUE(I);
3446   case TargetOpcode::G_PTRMASK:
3447     return selectG_PTRMASK(I);
3448   case TargetOpcode::G_EXTRACT_VECTOR_ELT:
3449     return selectG_EXTRACT_VECTOR_ELT(I);
3450   case TargetOpcode::G_INSERT_VECTOR_ELT:
3451     return selectG_INSERT_VECTOR_ELT(I);
3452   case TargetOpcode::G_SHUFFLE_VECTOR:
3453     return selectG_SHUFFLE_VECTOR(I);
3454   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD:
3455   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_LOAD_D16:
3456   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE:
3457   case AMDGPU::G_AMDGPU_INTRIN_IMAGE_STORE_D16: {
3458     const AMDGPU::ImageDimIntrinsicInfo *Intr
3459       = AMDGPU::getImageDimIntrinsicInfo(I.getIntrinsicID());
3460     assert(Intr && "not an image intrinsic with image pseudo");
3461     return selectImageIntrinsic(I, Intr);
3462   }
3463   case AMDGPU::G_AMDGPU_INTRIN_BVH_INTERSECT_RAY:
3464     return selectBVHIntrinsic(I);
3465   case AMDGPU::G_AMDGPU_BUFFER_ATOMIC_FADD:
3466     return selectAMDGPU_BUFFER_ATOMIC_FADD(I);
3467   case AMDGPU::G_SBFX:
3468   case AMDGPU::G_UBFX:
3469     return selectG_SBFX_UBFX(I);
3470   case AMDGPU::G_SI_CALL:
3471     I.setDesc(TII.get(AMDGPU::SI_CALL));
3472     return true;
3473   case AMDGPU::G_AMDGPU_WAVE_ADDRESS:
3474     return selectWaveAddress(I);
3475   default:
3476     return selectImpl(I, *CoverageInfo);
3477   }
3478   return false;
3479 }
3480 
3481 InstructionSelector::ComplexRendererFns
3482 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
3483   return {{
3484       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3485   }};
3486 
3487 }
3488 
3489 std::pair<Register, unsigned>
3490 AMDGPUInstructionSelector::selectVOP3ModsImpl(MachineOperand &Root,
3491                                               bool AllowAbs) const {
3492   Register Src = Root.getReg();
3493   Register OrigSrc = Src;
3494   unsigned Mods = 0;
3495   MachineInstr *MI = getDefIgnoringCopies(Src, *MRI);
3496 
3497   if (MI && MI->getOpcode() == AMDGPU::G_FNEG) {
3498     Src = MI->getOperand(1).getReg();
3499     Mods |= SISrcMods::NEG;
3500     MI = getDefIgnoringCopies(Src, *MRI);
3501   }
3502 
3503   if (AllowAbs && MI && MI->getOpcode() == AMDGPU::G_FABS) {
3504     Src = MI->getOperand(1).getReg();
3505     Mods |= SISrcMods::ABS;
3506   }
3507 
3508   if (Mods != 0 &&
3509       RBI.getRegBank(Src, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID) {
3510     MachineInstr *UseMI = Root.getParent();
3511 
3512     // If we looked through copies to find source modifiers on an SGPR operand,
3513     // we now have an SGPR register source. To avoid potentially violating the
3514     // constant bus restriction, we need to insert a copy to a VGPR.
3515     Register VGPRSrc = MRI->cloneVirtualRegister(OrigSrc);
3516     BuildMI(*UseMI->getParent(), UseMI, UseMI->getDebugLoc(),
3517             TII.get(AMDGPU::COPY), VGPRSrc)
3518       .addReg(Src);
3519     Src = VGPRSrc;
3520   }
3521 
3522   return std::make_pair(Src, Mods);
3523 }
3524 
3525 ///
3526 /// This will select either an SGPR or VGPR operand and will save us from
3527 /// having to write an extra tablegen pattern.
3528 InstructionSelector::ComplexRendererFns
3529 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
3530   return {{
3531       [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
3532   }};
3533 }
3534 
3535 InstructionSelector::ComplexRendererFns
3536 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
3537   Register Src;
3538   unsigned Mods;
3539   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3540 
3541   return {{
3542       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3543       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3544       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3545       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3546   }};
3547 }
3548 
3549 InstructionSelector::ComplexRendererFns
3550 AMDGPUInstructionSelector::selectVOP3BMods0(MachineOperand &Root) const {
3551   Register Src;
3552   unsigned Mods;
3553   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3554 
3555   return {{
3556       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3557       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }, // src0_mods
3558       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },    // clamp
3559       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }     // omod
3560   }};
3561 }
3562 
3563 InstructionSelector::ComplexRendererFns
3564 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
3565   return {{
3566       [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
3567       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
3568       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }  // omod
3569   }};
3570 }
3571 
3572 InstructionSelector::ComplexRendererFns
3573 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
3574   Register Src;
3575   unsigned Mods;
3576   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3577 
3578   return {{
3579       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3580       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3581   }};
3582 }
3583 
3584 InstructionSelector::ComplexRendererFns
3585 AMDGPUInstructionSelector::selectVOP3BMods(MachineOperand &Root) const {
3586   Register Src;
3587   unsigned Mods;
3588   std::tie(Src, Mods) = selectVOP3ModsImpl(Root, /* AllowAbs */ false);
3589 
3590   return {{
3591       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3592       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); } // src_mods
3593   }};
3594 }
3595 
3596 InstructionSelector::ComplexRendererFns
3597 AMDGPUInstructionSelector::selectVOP3NoMods(MachineOperand &Root) const {
3598   Register Reg = Root.getReg();
3599   const MachineInstr *Def = getDefIgnoringCopies(Reg, *MRI);
3600   if (Def && (Def->getOpcode() == AMDGPU::G_FNEG ||
3601               Def->getOpcode() == AMDGPU::G_FABS))
3602     return {};
3603   return {{
3604       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
3605   }};
3606 }
3607 
3608 std::pair<Register, unsigned>
3609 AMDGPUInstructionSelector::selectVOP3PModsImpl(
3610   Register Src, const MachineRegisterInfo &MRI, bool IsDOT) const {
3611   unsigned Mods = 0;
3612   MachineInstr *MI = MRI.getVRegDef(Src);
3613 
3614   if (MI && MI->getOpcode() == AMDGPU::G_FNEG &&
3615       // It's possible to see an f32 fneg here, but unlikely.
3616       // TODO: Treat f32 fneg as only high bit.
3617       MRI.getType(Src) == LLT::fixed_vector(2, 16)) {
3618     Mods ^= (SISrcMods::NEG | SISrcMods::NEG_HI);
3619     Src = MI->getOperand(1).getReg();
3620     MI = MRI.getVRegDef(Src);
3621   }
3622 
3623   // TODO: Match op_sel through g_build_vector_trunc and g_shuffle_vector.
3624   (void)IsDOT; // DOTs do not use OPSEL on gfx940+, check ST.hasDOTOpSelHazard()
3625 
3626   // Packed instructions do not have abs modifiers.
3627   Mods |= SISrcMods::OP_SEL_1;
3628 
3629   return std::make_pair(Src, Mods);
3630 }
3631 
3632 InstructionSelector::ComplexRendererFns
3633 AMDGPUInstructionSelector::selectVOP3PMods(MachineOperand &Root) const {
3634   MachineRegisterInfo &MRI
3635     = Root.getParent()->getParent()->getParent()->getRegInfo();
3636 
3637   Register Src;
3638   unsigned Mods;
3639   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI);
3640 
3641   return {{
3642       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3643       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3644   }};
3645 }
3646 
3647 InstructionSelector::ComplexRendererFns
3648 AMDGPUInstructionSelector::selectVOP3PModsDOT(MachineOperand &Root) const {
3649   MachineRegisterInfo &MRI
3650     = Root.getParent()->getParent()->getParent()->getRegInfo();
3651 
3652   Register Src;
3653   unsigned Mods;
3654   std::tie(Src, Mods) = selectVOP3PModsImpl(Root.getReg(), MRI, true);
3655 
3656   return {{
3657       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3658       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3659   }};
3660 }
3661 
3662 InstructionSelector::ComplexRendererFns
3663 AMDGPUInstructionSelector::selectVOP3Mods_nnan(MachineOperand &Root) const {
3664   Register Src;
3665   unsigned Mods;
3666   std::tie(Src, Mods) = selectVOP3ModsImpl(Root);
3667   if (!isKnownNeverNaN(Src, *MRI))
3668     return None;
3669 
3670   return {{
3671       [=](MachineInstrBuilder &MIB) { MIB.addReg(Src); },
3672       [=](MachineInstrBuilder &MIB) { MIB.addImm(Mods); }  // src_mods
3673   }};
3674 }
3675 
3676 InstructionSelector::ComplexRendererFns
3677 AMDGPUInstructionSelector::selectVOP3OpSelMods(MachineOperand &Root) const {
3678   // FIXME: Handle op_sel
3679   return {{
3680       [=](MachineInstrBuilder &MIB) { MIB.addReg(Root.getReg()); },
3681       [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods
3682   }};
3683 }
3684 
3685 InstructionSelector::ComplexRendererFns
3686 AMDGPUInstructionSelector::selectSmrdImm(MachineOperand &Root) const {
3687   SmallVector<GEPInfo, 4> AddrInfo;
3688   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3689 
3690   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3691     return None;
3692 
3693   const GEPInfo &GEPInfo = AddrInfo[0];
3694   Optional<int64_t> EncodedImm =
3695       AMDGPU::getSMRDEncodedOffset(STI, GEPInfo.Imm, false);
3696   if (!EncodedImm)
3697     return None;
3698 
3699   unsigned PtrReg = GEPInfo.SgprParts[0];
3700   return {{
3701     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3702     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3703   }};
3704 }
3705 
3706 InstructionSelector::ComplexRendererFns
3707 AMDGPUInstructionSelector::selectSmrdImm32(MachineOperand &Root) const {
3708   SmallVector<GEPInfo, 4> AddrInfo;
3709   getAddrModeInfo(*Root.getParent(), *MRI, AddrInfo);
3710 
3711   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3712     return None;
3713 
3714   const GEPInfo &GEPInfo = AddrInfo[0];
3715   Register PtrReg = GEPInfo.SgprParts[0];
3716   Optional<int64_t> EncodedImm =
3717       AMDGPU::getSMRDEncodedLiteralOffset32(STI, GEPInfo.Imm);
3718   if (!EncodedImm)
3719     return None;
3720 
3721   return {{
3722     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3723     [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }
3724   }};
3725 }
3726 
3727 InstructionSelector::ComplexRendererFns
3728 AMDGPUInstructionSelector::selectSmrdSgpr(MachineOperand &Root) const {
3729   MachineInstr *MI = Root.getParent();
3730   MachineBasicBlock *MBB = MI->getParent();
3731 
3732   SmallVector<GEPInfo, 4> AddrInfo;
3733   getAddrModeInfo(*MI, *MRI, AddrInfo);
3734 
3735   // FIXME: We should shrink the GEP if the offset is known to be <= 32-bits,
3736   // then we can select all ptr + 32-bit offsets not just immediate offsets.
3737   if (AddrInfo.empty() || AddrInfo[0].SgprParts.size() != 1)
3738     return None;
3739 
3740   const GEPInfo &GEPInfo = AddrInfo[0];
3741   // SGPR offset is unsigned.
3742   if (!GEPInfo.Imm || GEPInfo.Imm < 0 || !isUInt<32>(GEPInfo.Imm))
3743     return None;
3744 
3745   // If we make it this far we have a load with an 32-bit immediate offset.
3746   // It is OK to select this using a sgpr offset, because we have already
3747   // failed trying to select this load into one of the _IMM variants since
3748   // the _IMM Patterns are considered before the _SGPR patterns.
3749   Register PtrReg = GEPInfo.SgprParts[0];
3750   Register OffsetReg = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3751   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::S_MOV_B32), OffsetReg)
3752           .addImm(GEPInfo.Imm);
3753   return {{
3754     [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrReg); },
3755     [=](MachineInstrBuilder &MIB) { MIB.addReg(OffsetReg); }
3756   }};
3757 }
3758 
3759 std::pair<Register, int>
3760 AMDGPUInstructionSelector::selectFlatOffsetImpl(MachineOperand &Root,
3761                                                 uint64_t FlatVariant) const {
3762   MachineInstr *MI = Root.getParent();
3763 
3764   auto Default = std::make_pair(Root.getReg(), 0);
3765 
3766   if (!STI.hasFlatInstOffsets())
3767     return Default;
3768 
3769   Register PtrBase;
3770   int64_t ConstOffset;
3771   std::tie(PtrBase, ConstOffset) =
3772       getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
3773   if (ConstOffset == 0)
3774     return Default;
3775 
3776   unsigned AddrSpace = (*MI->memoperands_begin())->getAddrSpace();
3777   if (!TII.isLegalFLATOffset(ConstOffset, AddrSpace, FlatVariant))
3778     return Default;
3779 
3780   return std::make_pair(PtrBase, ConstOffset);
3781 }
3782 
3783 InstructionSelector::ComplexRendererFns
3784 AMDGPUInstructionSelector::selectFlatOffset(MachineOperand &Root) const {
3785   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FLAT);
3786 
3787   return {{
3788       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3789       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3790     }};
3791 }
3792 
3793 InstructionSelector::ComplexRendererFns
3794 AMDGPUInstructionSelector::selectGlobalOffset(MachineOperand &Root) const {
3795   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatGlobal);
3796 
3797   return {{
3798       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3799       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3800   }};
3801 }
3802 
3803 InstructionSelector::ComplexRendererFns
3804 AMDGPUInstructionSelector::selectScratchOffset(MachineOperand &Root) const {
3805   auto PtrWithOffset = selectFlatOffsetImpl(Root, SIInstrFlags::FlatScratch);
3806 
3807   return {{
3808       [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrWithOffset.first); },
3809       [=](MachineInstrBuilder &MIB) { MIB.addImm(PtrWithOffset.second); },
3810     }};
3811 }
3812 
3813 // Match (64-bit SGPR base) + (zext vgpr offset) + sext(imm offset)
3814 InstructionSelector::ComplexRendererFns
3815 AMDGPUInstructionSelector::selectGlobalSAddr(MachineOperand &Root) const {
3816   Register Addr = Root.getReg();
3817   Register PtrBase;
3818   int64_t ConstOffset;
3819   int64_t ImmOffset = 0;
3820 
3821   // Match the immediate offset first, which canonically is moved as low as
3822   // possible.
3823   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3824 
3825   if (ConstOffset != 0) {
3826     if (TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::GLOBAL_ADDRESS,
3827                               SIInstrFlags::FlatGlobal)) {
3828       Addr = PtrBase;
3829       ImmOffset = ConstOffset;
3830     } else {
3831       auto PtrBaseDef = getDefSrcRegIgnoringCopies(PtrBase, *MRI);
3832       if (isSGPR(PtrBaseDef->Reg)) {
3833         if (ConstOffset > 0) {
3834           // Offset is too large.
3835           //
3836           // saddr + large_offset -> saddr +
3837           //                         (voffset = large_offset & ~MaxOffset) +
3838           //                         (large_offset & MaxOffset);
3839           int64_t SplitImmOffset, RemainderOffset;
3840           std::tie(SplitImmOffset, RemainderOffset) = TII.splitFlatOffset(
3841               ConstOffset, AMDGPUAS::GLOBAL_ADDRESS, SIInstrFlags::FlatGlobal);
3842 
3843           if (isUInt<32>(RemainderOffset)) {
3844             MachineInstr *MI = Root.getParent();
3845             MachineBasicBlock *MBB = MI->getParent();
3846             Register HighBits =
3847                 MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3848 
3849             BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
3850                     HighBits)
3851                 .addImm(RemainderOffset);
3852 
3853             return {{
3854                 [=](MachineInstrBuilder &MIB) { MIB.addReg(PtrBase); }, // saddr
3855                 [=](MachineInstrBuilder &MIB) {
3856                   MIB.addReg(HighBits);
3857                 }, // voffset
3858                 [=](MachineInstrBuilder &MIB) { MIB.addImm(SplitImmOffset); },
3859             }};
3860           }
3861         }
3862 
3863         // We are adding a 64 bit SGPR and a constant. If constant bus limit
3864         // is 1 we would need to perform 1 or 2 extra moves for each half of
3865         // the constant and it is better to do a scalar add and then issue a
3866         // single VALU instruction to materialize zero. Otherwise it is less
3867         // instructions to perform VALU adds with immediates or inline literals.
3868         unsigned NumLiterals =
3869             !TII.isInlineConstant(APInt(32, ConstOffset & 0xffffffff)) +
3870             !TII.isInlineConstant(APInt(32, ConstOffset >> 32));
3871         if (STI.getConstantBusLimit(AMDGPU::V_ADD_U32_e64) > NumLiterals)
3872           return None;
3873       }
3874     }
3875   }
3876 
3877   // Match the variable offset.
3878   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3879   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3880     // Look through the SGPR->VGPR copy.
3881     Register SAddr =
3882         getSrcRegIgnoringCopies(AddrDef->MI->getOperand(1).getReg(), *MRI);
3883 
3884     if (SAddr && isSGPR(SAddr)) {
3885       Register PtrBaseOffset = AddrDef->MI->getOperand(2).getReg();
3886 
3887       // It's possible voffset is an SGPR here, but the copy to VGPR will be
3888       // inserted later.
3889       if (Register VOffset = matchZeroExtendFromS32(*MRI, PtrBaseOffset)) {
3890         return {{[=](MachineInstrBuilder &MIB) { // saddr
3891                    MIB.addReg(SAddr);
3892                  },
3893                  [=](MachineInstrBuilder &MIB) { // voffset
3894                    MIB.addReg(VOffset);
3895                  },
3896                  [=](MachineInstrBuilder &MIB) { // offset
3897                    MIB.addImm(ImmOffset);
3898                  }}};
3899       }
3900     }
3901   }
3902 
3903   // FIXME: We should probably have folded COPY (G_IMPLICIT_DEF) earlier, and
3904   // drop this.
3905   if (AddrDef->MI->getOpcode() == AMDGPU::G_IMPLICIT_DEF ||
3906       AddrDef->MI->getOpcode() == AMDGPU::G_CONSTANT || !isSGPR(AddrDef->Reg))
3907     return None;
3908 
3909   // It's cheaper to materialize a single 32-bit zero for vaddr than the two
3910   // moves required to copy a 64-bit SGPR to VGPR.
3911   MachineInstr *MI = Root.getParent();
3912   MachineBasicBlock *MBB = MI->getParent();
3913   Register VOffset = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
3914 
3915   BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32), VOffset)
3916       .addImm(0);
3917 
3918   return {{
3919       [=](MachineInstrBuilder &MIB) { MIB.addReg(AddrDef->Reg); }, // saddr
3920       [=](MachineInstrBuilder &MIB) { MIB.addReg(VOffset); },      // voffset
3921       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); }     // offset
3922   }};
3923 }
3924 
3925 InstructionSelector::ComplexRendererFns
3926 AMDGPUInstructionSelector::selectScratchSAddr(MachineOperand &Root) const {
3927   Register Addr = Root.getReg();
3928   Register PtrBase;
3929   int64_t ConstOffset;
3930   int64_t ImmOffset = 0;
3931 
3932   // Match the immediate offset first, which canonically is moved as low as
3933   // possible.
3934   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3935 
3936   if (ConstOffset != 0 &&
3937       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS,
3938                             SIInstrFlags::FlatScratch)) {
3939     Addr = PtrBase;
3940     ImmOffset = ConstOffset;
3941   }
3942 
3943   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
3944   if (AddrDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
3945     int FI = AddrDef->MI->getOperand(1).getIndex();
3946     return {{
3947         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
3948         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3949     }};
3950   }
3951 
3952   Register SAddr = AddrDef->Reg;
3953 
3954   if (AddrDef->MI->getOpcode() == AMDGPU::G_PTR_ADD) {
3955     Register LHS = AddrDef->MI->getOperand(1).getReg();
3956     Register RHS = AddrDef->MI->getOperand(2).getReg();
3957     auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
3958     auto RHSDef = getDefSrcRegIgnoringCopies(RHS, *MRI);
3959 
3960     if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX &&
3961         isSGPR(RHSDef->Reg)) {
3962       int FI = LHSDef->MI->getOperand(1).getIndex();
3963       MachineInstr &I = *Root.getParent();
3964       MachineBasicBlock *BB = I.getParent();
3965       const DebugLoc &DL = I.getDebugLoc();
3966       SAddr = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
3967 
3968       BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_I32), SAddr)
3969           .addFrameIndex(FI)
3970           .addReg(RHSDef->Reg);
3971     }
3972   }
3973 
3974   if (!isSGPR(SAddr))
3975     return None;
3976 
3977   return {{
3978       [=](MachineInstrBuilder &MIB) { MIB.addReg(SAddr); }, // saddr
3979       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
3980   }};
3981 }
3982 
3983 InstructionSelector::ComplexRendererFns
3984 AMDGPUInstructionSelector::selectScratchSVAddr(MachineOperand &Root) const {
3985   Register Addr = Root.getReg();
3986   Register PtrBase;
3987   int64_t ConstOffset;
3988   int64_t ImmOffset = 0;
3989 
3990   // Match the immediate offset first, which canonically is moved as low as
3991   // possible.
3992   std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(Addr, *MRI);
3993 
3994   if (ConstOffset != 0 &&
3995       TII.isLegalFLATOffset(ConstOffset, AMDGPUAS::PRIVATE_ADDRESS, true)) {
3996     Addr = PtrBase;
3997     ImmOffset = ConstOffset;
3998   }
3999 
4000   auto AddrDef = getDefSrcRegIgnoringCopies(Addr, *MRI);
4001   if (AddrDef->MI->getOpcode() != AMDGPU::G_PTR_ADD)
4002     return None;
4003 
4004   Register RHS = AddrDef->MI->getOperand(2).getReg();
4005   if (RBI.getRegBank(RHS, *MRI, TRI)->getID() != AMDGPU::VGPRRegBankID)
4006     return None;
4007 
4008   Register LHS = AddrDef->MI->getOperand(1).getReg();
4009   auto LHSDef = getDefSrcRegIgnoringCopies(LHS, *MRI);
4010 
4011   if (LHSDef->MI->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4012     int FI = LHSDef->MI->getOperand(1).getIndex();
4013     return {{
4014         [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
4015         [=](MachineInstrBuilder &MIB) { MIB.addFrameIndex(FI); }, // saddr
4016         [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4017     }};
4018   }
4019 
4020   if (!isSGPR(LHS))
4021     return None;
4022 
4023   return {{
4024       [=](MachineInstrBuilder &MIB) { MIB.addReg(RHS); }, // vaddr
4025       [=](MachineInstrBuilder &MIB) { MIB.addReg(LHS); }, // saddr
4026       [=](MachineInstrBuilder &MIB) { MIB.addImm(ImmOffset); } // offset
4027   }};
4028 }
4029 
4030 InstructionSelector::ComplexRendererFns
4031 AMDGPUInstructionSelector::selectMUBUFScratchOffen(MachineOperand &Root) const {
4032   MachineInstr *MI = Root.getParent();
4033   MachineBasicBlock *MBB = MI->getParent();
4034   MachineFunction *MF = MBB->getParent();
4035   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
4036 
4037   int64_t Offset = 0;
4038   if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) &&
4039       Offset != TM.getNullPointerValue(AMDGPUAS::PRIVATE_ADDRESS)) {
4040     Register HighBits = MRI->createVirtualRegister(&AMDGPU::VGPR_32RegClass);
4041 
4042     // TODO: Should this be inside the render function? The iterator seems to
4043     // move.
4044     BuildMI(*MBB, MI, MI->getDebugLoc(), TII.get(AMDGPU::V_MOV_B32_e32),
4045             HighBits)
4046       .addImm(Offset & ~4095);
4047 
4048     return {{[=](MachineInstrBuilder &MIB) { // rsrc
4049                MIB.addReg(Info->getScratchRSrcReg());
4050              },
4051              [=](MachineInstrBuilder &MIB) { // vaddr
4052                MIB.addReg(HighBits);
4053              },
4054              [=](MachineInstrBuilder &MIB) { // soffset
4055                // Use constant zero for soffset and rely on eliminateFrameIndex
4056                // to choose the appropriate frame register if need be.
4057                MIB.addImm(0);
4058              },
4059              [=](MachineInstrBuilder &MIB) { // offset
4060                MIB.addImm(Offset & 4095);
4061              }}};
4062   }
4063 
4064   assert(Offset == 0 || Offset == -1);
4065 
4066   // Try to fold a frame index directly into the MUBUF vaddr field, and any
4067   // offsets.
4068   Optional<int> FI;
4069   Register VAddr = Root.getReg();
4070   if (const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg())) {
4071     Register PtrBase;
4072     int64_t ConstOffset;
4073     std::tie(PtrBase, ConstOffset) = getPtrBaseWithConstantOffset(VAddr, *MRI);
4074     if (ConstOffset != 0) {
4075       if (SIInstrInfo::isLegalMUBUFImmOffset(ConstOffset) &&
4076           (!STI.privateMemoryResourceIsRangeChecked() ||
4077            KnownBits->signBitIsZero(PtrBase))) {
4078         const MachineInstr *PtrBaseDef = MRI->getVRegDef(PtrBase);
4079         if (PtrBaseDef->getOpcode() == AMDGPU::G_FRAME_INDEX)
4080           FI = PtrBaseDef->getOperand(1).getIndex();
4081         else
4082           VAddr = PtrBase;
4083         Offset = ConstOffset;
4084       }
4085     } else if (RootDef->getOpcode() == AMDGPU::G_FRAME_INDEX) {
4086       FI = RootDef->getOperand(1).getIndex();
4087     }
4088   }
4089 
4090   return {{[=](MachineInstrBuilder &MIB) { // rsrc
4091              MIB.addReg(Info->getScratchRSrcReg());
4092            },
4093            [=](MachineInstrBuilder &MIB) { // vaddr
4094              if (FI.hasValue())
4095                MIB.addFrameIndex(FI.getValue());
4096              else
4097                MIB.addReg(VAddr);
4098            },
4099            [=](MachineInstrBuilder &MIB) { // soffset
4100              // Use constant zero for soffset and rely on eliminateFrameIndex
4101              // to choose the appropriate frame register if need be.
4102              MIB.addImm(0);
4103            },
4104            [=](MachineInstrBuilder &MIB) { // offset
4105              MIB.addImm(Offset);
4106            }}};
4107 }
4108 
4109 bool AMDGPUInstructionSelector::isDSOffsetLegal(Register Base,
4110                                                 int64_t Offset) const {
4111   if (!isUInt<16>(Offset))
4112     return false;
4113 
4114   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
4115     return true;
4116 
4117   // On Southern Islands instruction with a negative base value and an offset
4118   // don't seem to work.
4119   return KnownBits->signBitIsZero(Base);
4120 }
4121 
4122 bool AMDGPUInstructionSelector::isDSOffset2Legal(Register Base, int64_t Offset0,
4123                                                  int64_t Offset1,
4124                                                  unsigned Size) const {
4125   if (Offset0 % Size != 0 || Offset1 % Size != 0)
4126     return false;
4127   if (!isUInt<8>(Offset0 / Size) || !isUInt<8>(Offset1 / Size))
4128     return false;
4129 
4130   if (STI.hasUsableDSOffset() || STI.unsafeDSOffsetFoldingEnabled())
4131     return true;
4132 
4133   // On Southern Islands instruction with a negative base value and an offset
4134   // don't seem to work.
4135   return KnownBits->signBitIsZero(Base);
4136 }
4137 
4138 bool AMDGPUInstructionSelector::isUnneededShiftMask(const MachineInstr &MI,
4139                                                     unsigned ShAmtBits) const {
4140   assert(MI.getOpcode() == TargetOpcode::G_AND);
4141 
4142   Optional<APInt> RHS = getIConstantVRegVal(MI.getOperand(2).getReg(), *MRI);
4143   if (!RHS)
4144     return false;
4145 
4146   if (RHS->countTrailingOnes() >= ShAmtBits)
4147     return true;
4148 
4149   const APInt &LHSKnownZeros =
4150       KnownBits->getKnownZeroes(MI.getOperand(1).getReg());
4151   return (LHSKnownZeros | *RHS).countTrailingOnes() >= ShAmtBits;
4152 }
4153 
4154 // Return the wave level SGPR base address if this is a wave address.
4155 static Register getWaveAddress(const MachineInstr *Def) {
4156   return Def->getOpcode() == AMDGPU::G_AMDGPU_WAVE_ADDRESS
4157              ? Def->getOperand(1).getReg()
4158              : Register();
4159 }
4160 
4161 InstructionSelector::ComplexRendererFns
4162 AMDGPUInstructionSelector::selectMUBUFScratchOffset(
4163     MachineOperand &Root) const {
4164   Register Reg = Root.getReg();
4165   const SIMachineFunctionInfo *Info = MF->getInfo<SIMachineFunctionInfo>();
4166 
4167   const MachineInstr *Def = MRI->getVRegDef(Reg);
4168   if (Register WaveBase = getWaveAddress(Def)) {
4169     return {{
4170         [=](MachineInstrBuilder &MIB) { // rsrc
4171           MIB.addReg(Info->getScratchRSrcReg());
4172         },
4173         [=](MachineInstrBuilder &MIB) { // soffset
4174           MIB.addReg(WaveBase);
4175         },
4176         [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // offset
4177     }};
4178   }
4179 
4180   int64_t Offset = 0;
4181 
4182   // FIXME: Copy check is a hack
4183   Register BasePtr;
4184   if (mi_match(Reg, *MRI, m_GPtrAdd(m_Reg(BasePtr), m_Copy(m_ICst(Offset))))) {
4185     if (!SIInstrInfo::isLegalMUBUFImmOffset(Offset))
4186       return {};
4187     const MachineInstr *BasePtrDef = MRI->getVRegDef(BasePtr);
4188     Register WaveBase = getWaveAddress(BasePtrDef);
4189     if (!WaveBase)
4190       return {};
4191 
4192     return {{
4193         [=](MachineInstrBuilder &MIB) { // rsrc
4194           MIB.addReg(Info->getScratchRSrcReg());
4195         },
4196         [=](MachineInstrBuilder &MIB) { // soffset
4197           MIB.addReg(WaveBase);
4198         },
4199         [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4200     }};
4201   }
4202 
4203   if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) ||
4204       !SIInstrInfo::isLegalMUBUFImmOffset(Offset))
4205     return {};
4206 
4207   return {{
4208       [=](MachineInstrBuilder &MIB) { // rsrc
4209         MIB.addReg(Info->getScratchRSrcReg());
4210       },
4211       [=](MachineInstrBuilder &MIB) { // soffset
4212         MIB.addImm(0);
4213       },
4214       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); } // offset
4215   }};
4216 }
4217 
4218 std::pair<Register, unsigned>
4219 AMDGPUInstructionSelector::selectDS1Addr1OffsetImpl(MachineOperand &Root) const {
4220   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4221   if (!RootDef)
4222     return std::make_pair(Root.getReg(), 0);
4223 
4224   int64_t ConstAddr = 0;
4225 
4226   Register PtrBase;
4227   int64_t Offset;
4228   std::tie(PtrBase, Offset) =
4229     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4230 
4231   if (Offset) {
4232     if (isDSOffsetLegal(PtrBase, Offset)) {
4233       // (add n0, c0)
4234       return std::make_pair(PtrBase, Offset);
4235     }
4236   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4237     // TODO
4238 
4239 
4240   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4241     // TODO
4242 
4243   }
4244 
4245   return std::make_pair(Root.getReg(), 0);
4246 }
4247 
4248 InstructionSelector::ComplexRendererFns
4249 AMDGPUInstructionSelector::selectDS1Addr1Offset(MachineOperand &Root) const {
4250   Register Reg;
4251   unsigned Offset;
4252   std::tie(Reg, Offset) = selectDS1Addr1OffsetImpl(Root);
4253   return {{
4254       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4255       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }
4256     }};
4257 }
4258 
4259 InstructionSelector::ComplexRendererFns
4260 AMDGPUInstructionSelector::selectDS64Bit4ByteAligned(MachineOperand &Root) const {
4261   return selectDSReadWrite2(Root, 4);
4262 }
4263 
4264 InstructionSelector::ComplexRendererFns
4265 AMDGPUInstructionSelector::selectDS128Bit8ByteAligned(MachineOperand &Root) const {
4266   return selectDSReadWrite2(Root, 8);
4267 }
4268 
4269 InstructionSelector::ComplexRendererFns
4270 AMDGPUInstructionSelector::selectDSReadWrite2(MachineOperand &Root,
4271                                               unsigned Size) const {
4272   Register Reg;
4273   unsigned Offset;
4274   std::tie(Reg, Offset) = selectDSReadWrite2Impl(Root, Size);
4275   return {{
4276       [=](MachineInstrBuilder &MIB) { MIB.addReg(Reg); },
4277       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); },
4278       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset+1); }
4279     }};
4280 }
4281 
4282 std::pair<Register, unsigned>
4283 AMDGPUInstructionSelector::selectDSReadWrite2Impl(MachineOperand &Root,
4284                                                   unsigned Size) const {
4285   const MachineInstr *RootDef = MRI->getVRegDef(Root.getReg());
4286   if (!RootDef)
4287     return std::make_pair(Root.getReg(), 0);
4288 
4289   int64_t ConstAddr = 0;
4290 
4291   Register PtrBase;
4292   int64_t Offset;
4293   std::tie(PtrBase, Offset) =
4294     getPtrBaseWithConstantOffset(Root.getReg(), *MRI);
4295 
4296   if (Offset) {
4297     int64_t OffsetValue0 = Offset;
4298     int64_t OffsetValue1 = Offset + Size;
4299     if (isDSOffset2Legal(PtrBase, OffsetValue0, OffsetValue1, Size)) {
4300       // (add n0, c0)
4301       return std::make_pair(PtrBase, OffsetValue0 / Size);
4302     }
4303   } else if (RootDef->getOpcode() == AMDGPU::G_SUB) {
4304     // TODO
4305 
4306   } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) {
4307     // TODO
4308 
4309   }
4310 
4311   return std::make_pair(Root.getReg(), 0);
4312 }
4313 
4314 /// If \p Root is a G_PTR_ADD with a G_CONSTANT on the right hand side, return
4315 /// the base value with the constant offset. There may be intervening copies
4316 /// between \p Root and the identified constant. Returns \p Root, 0 if this does
4317 /// not match the pattern.
4318 std::pair<Register, int64_t>
4319 AMDGPUInstructionSelector::getPtrBaseWithConstantOffset(
4320   Register Root, const MachineRegisterInfo &MRI) const {
4321   MachineInstr *RootI = getDefIgnoringCopies(Root, MRI);
4322   if (RootI->getOpcode() != TargetOpcode::G_PTR_ADD)
4323     return {Root, 0};
4324 
4325   MachineOperand &RHS = RootI->getOperand(2);
4326   Optional<ValueAndVReg> MaybeOffset =
4327       getIConstantVRegValWithLookThrough(RHS.getReg(), MRI);
4328   if (!MaybeOffset)
4329     return {Root, 0};
4330   return {RootI->getOperand(1).getReg(), MaybeOffset->Value.getSExtValue()};
4331 }
4332 
4333 static void addZeroImm(MachineInstrBuilder &MIB) {
4334   MIB.addImm(0);
4335 }
4336 
4337 /// Return a resource descriptor for use with an arbitrary 64-bit pointer. If \p
4338 /// BasePtr is not valid, a null base pointer will be used.
4339 static Register buildRSRC(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4340                           uint32_t FormatLo, uint32_t FormatHi,
4341                           Register BasePtr) {
4342   Register RSrc2 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4343   Register RSrc3 = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
4344   Register RSrcHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4345   Register RSrc = MRI.createVirtualRegister(&AMDGPU::SGPR_128RegClass);
4346 
4347   B.buildInstr(AMDGPU::S_MOV_B32)
4348     .addDef(RSrc2)
4349     .addImm(FormatLo);
4350   B.buildInstr(AMDGPU::S_MOV_B32)
4351     .addDef(RSrc3)
4352     .addImm(FormatHi);
4353 
4354   // Build the half of the subregister with the constants before building the
4355   // full 128-bit register. If we are building multiple resource descriptors,
4356   // this will allow CSEing of the 2-component register.
4357   B.buildInstr(AMDGPU::REG_SEQUENCE)
4358     .addDef(RSrcHi)
4359     .addReg(RSrc2)
4360     .addImm(AMDGPU::sub0)
4361     .addReg(RSrc3)
4362     .addImm(AMDGPU::sub1);
4363 
4364   Register RSrcLo = BasePtr;
4365   if (!BasePtr) {
4366     RSrcLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
4367     B.buildInstr(AMDGPU::S_MOV_B64)
4368       .addDef(RSrcLo)
4369       .addImm(0);
4370   }
4371 
4372   B.buildInstr(AMDGPU::REG_SEQUENCE)
4373     .addDef(RSrc)
4374     .addReg(RSrcLo)
4375     .addImm(AMDGPU::sub0_sub1)
4376     .addReg(RSrcHi)
4377     .addImm(AMDGPU::sub2_sub3);
4378 
4379   return RSrc;
4380 }
4381 
4382 static Register buildAddr64RSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4383                                 const SIInstrInfo &TII, Register BasePtr) {
4384   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4385 
4386   // FIXME: Why are half the "default" bits ignored based on the addressing
4387   // mode?
4388   return buildRSRC(B, MRI, 0, Hi_32(DefaultFormat), BasePtr);
4389 }
4390 
4391 static Register buildOffsetSrc(MachineIRBuilder &B, MachineRegisterInfo &MRI,
4392                                const SIInstrInfo &TII, Register BasePtr) {
4393   uint64_t DefaultFormat = TII.getDefaultRsrcDataFormat();
4394 
4395   // FIXME: Why are half the "default" bits ignored based on the addressing
4396   // mode?
4397   return buildRSRC(B, MRI, -1, Hi_32(DefaultFormat), BasePtr);
4398 }
4399 
4400 AMDGPUInstructionSelector::MUBUFAddressData
4401 AMDGPUInstructionSelector::parseMUBUFAddress(Register Src) const {
4402   MUBUFAddressData Data;
4403   Data.N0 = Src;
4404 
4405   Register PtrBase;
4406   int64_t Offset;
4407 
4408   std::tie(PtrBase, Offset) = getPtrBaseWithConstantOffset(Src, *MRI);
4409   if (isUInt<32>(Offset)) {
4410     Data.N0 = PtrBase;
4411     Data.Offset = Offset;
4412   }
4413 
4414   if (MachineInstr *InputAdd
4415       = getOpcodeDef(TargetOpcode::G_PTR_ADD, Data.N0, *MRI)) {
4416     Data.N2 = InputAdd->getOperand(1).getReg();
4417     Data.N3 = InputAdd->getOperand(2).getReg();
4418 
4419     // FIXME: Need to fix extra SGPR->VGPRcopies inserted
4420     // FIXME: Don't know this was defined by operand 0
4421     //
4422     // TODO: Remove this when we have copy folding optimizations after
4423     // RegBankSelect.
4424     Data.N2 = getDefIgnoringCopies(Data.N2, *MRI)->getOperand(0).getReg();
4425     Data.N3 = getDefIgnoringCopies(Data.N3, *MRI)->getOperand(0).getReg();
4426   }
4427 
4428   return Data;
4429 }
4430 
4431 /// Return if the addr64 mubuf mode should be used for the given address.
4432 bool AMDGPUInstructionSelector::shouldUseAddr64(MUBUFAddressData Addr) const {
4433   // (ptr_add N2, N3) -> addr64, or
4434   // (ptr_add (ptr_add N2, N3), C1) -> addr64
4435   if (Addr.N2)
4436     return true;
4437 
4438   const RegisterBank *N0Bank = RBI.getRegBank(Addr.N0, *MRI, TRI);
4439   return N0Bank->getID() == AMDGPU::VGPRRegBankID;
4440 }
4441 
4442 /// Split an immediate offset \p ImmOffset depending on whether it fits in the
4443 /// immediate field. Modifies \p ImmOffset and sets \p SOffset to the variable
4444 /// component.
4445 void AMDGPUInstructionSelector::splitIllegalMUBUFOffset(
4446   MachineIRBuilder &B, Register &SOffset, int64_t &ImmOffset) const {
4447   if (SIInstrInfo::isLegalMUBUFImmOffset(ImmOffset))
4448     return;
4449 
4450   // Illegal offset, store it in soffset.
4451   SOffset = MRI->createVirtualRegister(&AMDGPU::SReg_32RegClass);
4452   B.buildInstr(AMDGPU::S_MOV_B32)
4453     .addDef(SOffset)
4454     .addImm(ImmOffset);
4455   ImmOffset = 0;
4456 }
4457 
4458 bool AMDGPUInstructionSelector::selectMUBUFAddr64Impl(
4459   MachineOperand &Root, Register &VAddr, Register &RSrcReg,
4460   Register &SOffset, int64_t &Offset) const {
4461   // FIXME: Predicates should stop this from reaching here.
4462   // addr64 bit was removed for volcanic islands.
4463   if (!STI.hasAddr64() || STI.useFlatForGlobal())
4464     return false;
4465 
4466   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4467   if (!shouldUseAddr64(AddrData))
4468     return false;
4469 
4470   Register N0 = AddrData.N0;
4471   Register N2 = AddrData.N2;
4472   Register N3 = AddrData.N3;
4473   Offset = AddrData.Offset;
4474 
4475   // Base pointer for the SRD.
4476   Register SRDPtr;
4477 
4478   if (N2) {
4479     if (RBI.getRegBank(N2, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4480       assert(N3);
4481       if (RBI.getRegBank(N3, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4482         // Both N2 and N3 are divergent. Use N0 (the result of the add) as the
4483         // addr64, and construct the default resource from a 0 address.
4484         VAddr = N0;
4485       } else {
4486         SRDPtr = N3;
4487         VAddr = N2;
4488       }
4489     } else {
4490       // N2 is not divergent.
4491       SRDPtr = N2;
4492       VAddr = N3;
4493     }
4494   } else if (RBI.getRegBank(N0, *MRI, TRI)->getID() == AMDGPU::VGPRRegBankID) {
4495     // Use the default null pointer in the resource
4496     VAddr = N0;
4497   } else {
4498     // N0 -> offset, or
4499     // (N0 + C1) -> offset
4500     SRDPtr = N0;
4501   }
4502 
4503   MachineIRBuilder B(*Root.getParent());
4504   RSrcReg = buildAddr64RSrc(B, *MRI, TII, SRDPtr);
4505   splitIllegalMUBUFOffset(B, SOffset, Offset);
4506   return true;
4507 }
4508 
4509 bool AMDGPUInstructionSelector::selectMUBUFOffsetImpl(
4510   MachineOperand &Root, Register &RSrcReg, Register &SOffset,
4511   int64_t &Offset) const {
4512 
4513   // FIXME: Pattern should not reach here.
4514   if (STI.useFlatForGlobal())
4515     return false;
4516 
4517   MUBUFAddressData AddrData = parseMUBUFAddress(Root.getReg());
4518   if (shouldUseAddr64(AddrData))
4519     return false;
4520 
4521   // N0 -> offset, or
4522   // (N0 + C1) -> offset
4523   Register SRDPtr = AddrData.N0;
4524   Offset = AddrData.Offset;
4525 
4526   // TODO: Look through extensions for 32-bit soffset.
4527   MachineIRBuilder B(*Root.getParent());
4528 
4529   RSrcReg = buildOffsetSrc(B, *MRI, TII, SRDPtr);
4530   splitIllegalMUBUFOffset(B, SOffset, Offset);
4531   return true;
4532 }
4533 
4534 InstructionSelector::ComplexRendererFns
4535 AMDGPUInstructionSelector::selectMUBUFAddr64(MachineOperand &Root) const {
4536   Register VAddr;
4537   Register RSrcReg;
4538   Register SOffset;
4539   int64_t Offset = 0;
4540 
4541   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4542     return {};
4543 
4544   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4545   // pattern.
4546   return {{
4547       [=](MachineInstrBuilder &MIB) {  // rsrc
4548         MIB.addReg(RSrcReg);
4549       },
4550       [=](MachineInstrBuilder &MIB) { // vaddr
4551         MIB.addReg(VAddr);
4552       },
4553       [=](MachineInstrBuilder &MIB) { // soffset
4554         if (SOffset)
4555           MIB.addReg(SOffset);
4556         else
4557           MIB.addImm(0);
4558       },
4559       [=](MachineInstrBuilder &MIB) { // offset
4560         MIB.addImm(Offset);
4561       },
4562       addZeroImm, //  cpol
4563       addZeroImm, //  tfe
4564       addZeroImm  //  swz
4565     }};
4566 }
4567 
4568 InstructionSelector::ComplexRendererFns
4569 AMDGPUInstructionSelector::selectMUBUFOffset(MachineOperand &Root) const {
4570   Register RSrcReg;
4571   Register SOffset;
4572   int64_t Offset = 0;
4573 
4574   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4575     return {};
4576 
4577   return {{
4578       [=](MachineInstrBuilder &MIB) {  // rsrc
4579         MIB.addReg(RSrcReg);
4580       },
4581       [=](MachineInstrBuilder &MIB) { // soffset
4582         if (SOffset)
4583           MIB.addReg(SOffset);
4584         else
4585           MIB.addImm(0);
4586       },
4587       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4588       addZeroImm, //  cpol
4589       addZeroImm, //  tfe
4590       addZeroImm, //  swz
4591     }};
4592 }
4593 
4594 InstructionSelector::ComplexRendererFns
4595 AMDGPUInstructionSelector::selectMUBUFAddr64Atomic(MachineOperand &Root) const {
4596   Register VAddr;
4597   Register RSrcReg;
4598   Register SOffset;
4599   int64_t Offset = 0;
4600 
4601   if (!selectMUBUFAddr64Impl(Root, VAddr, RSrcReg, SOffset, Offset))
4602     return {};
4603 
4604   // FIXME: Use defaulted operands for trailing 0s and remove from the complex
4605   // pattern.
4606   return {{
4607       [=](MachineInstrBuilder &MIB) {  // rsrc
4608         MIB.addReg(RSrcReg);
4609       },
4610       [=](MachineInstrBuilder &MIB) { // vaddr
4611         MIB.addReg(VAddr);
4612       },
4613       [=](MachineInstrBuilder &MIB) { // soffset
4614         if (SOffset)
4615           MIB.addReg(SOffset);
4616         else
4617           MIB.addImm(0);
4618       },
4619       [=](MachineInstrBuilder &MIB) { // offset
4620         MIB.addImm(Offset);
4621       },
4622       [=](MachineInstrBuilder &MIB) {
4623         MIB.addImm(AMDGPU::CPol::GLC); // cpol
4624       }
4625     }};
4626 }
4627 
4628 InstructionSelector::ComplexRendererFns
4629 AMDGPUInstructionSelector::selectMUBUFOffsetAtomic(MachineOperand &Root) const {
4630   Register RSrcReg;
4631   Register SOffset;
4632   int64_t Offset = 0;
4633 
4634   if (!selectMUBUFOffsetImpl(Root, RSrcReg, SOffset, Offset))
4635     return {};
4636 
4637   return {{
4638       [=](MachineInstrBuilder &MIB) {  // rsrc
4639         MIB.addReg(RSrcReg);
4640       },
4641       [=](MachineInstrBuilder &MIB) { // soffset
4642         if (SOffset)
4643           MIB.addReg(SOffset);
4644         else
4645           MIB.addImm(0);
4646       },
4647       [=](MachineInstrBuilder &MIB) { MIB.addImm(Offset); }, // offset
4648       [=](MachineInstrBuilder &MIB) { MIB.addImm(AMDGPU::CPol::GLC); } // cpol
4649     }};
4650 }
4651 
4652 /// Get an immediate that must be 32-bits, and treated as zero extended.
4653 static Optional<uint64_t> getConstantZext32Val(Register Reg,
4654                                                const MachineRegisterInfo &MRI) {
4655   // getIConstantVRegVal sexts any values, so see if that matters.
4656   Optional<int64_t> OffsetVal = getIConstantVRegSExtVal(Reg, MRI);
4657   if (!OffsetVal || !isInt<32>(*OffsetVal))
4658     return None;
4659   return Lo_32(*OffsetVal);
4660 }
4661 
4662 InstructionSelector::ComplexRendererFns
4663 AMDGPUInstructionSelector::selectSMRDBufferImm(MachineOperand &Root) const {
4664   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4665   if (!OffsetVal)
4666     return {};
4667 
4668   Optional<int64_t> EncodedImm =
4669       AMDGPU::getSMRDEncodedOffset(STI, *OffsetVal, true);
4670   if (!EncodedImm)
4671     return {};
4672 
4673   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4674 }
4675 
4676 InstructionSelector::ComplexRendererFns
4677 AMDGPUInstructionSelector::selectSMRDBufferImm32(MachineOperand &Root) const {
4678   assert(STI.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS);
4679 
4680   Optional<uint64_t> OffsetVal = getConstantZext32Val(Root.getReg(), *MRI);
4681   if (!OffsetVal)
4682     return {};
4683 
4684   Optional<int64_t> EncodedImm
4685     = AMDGPU::getSMRDEncodedLiteralOffset32(STI, *OffsetVal);
4686   if (!EncodedImm)
4687     return {};
4688 
4689   return {{ [=](MachineInstrBuilder &MIB) { MIB.addImm(*EncodedImm); }  }};
4690 }
4691 
4692 void AMDGPUInstructionSelector::renderTruncImm32(MachineInstrBuilder &MIB,
4693                                                  const MachineInstr &MI,
4694                                                  int OpIdx) const {
4695   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4696          "Expected G_CONSTANT");
4697   MIB.addImm(MI.getOperand(1).getCImm()->getSExtValue());
4698 }
4699 
4700 void AMDGPUInstructionSelector::renderNegateImm(MachineInstrBuilder &MIB,
4701                                                 const MachineInstr &MI,
4702                                                 int OpIdx) const {
4703   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4704          "Expected G_CONSTANT");
4705   MIB.addImm(-MI.getOperand(1).getCImm()->getSExtValue());
4706 }
4707 
4708 void AMDGPUInstructionSelector::renderBitcastImm(MachineInstrBuilder &MIB,
4709                                                  const MachineInstr &MI,
4710                                                  int OpIdx) const {
4711   assert(OpIdx == -1);
4712 
4713   const MachineOperand &Op = MI.getOperand(1);
4714   if (MI.getOpcode() == TargetOpcode::G_FCONSTANT)
4715     MIB.addImm(Op.getFPImm()->getValueAPF().bitcastToAPInt().getZExtValue());
4716   else {
4717     assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && "Expected G_CONSTANT");
4718     MIB.addImm(Op.getCImm()->getSExtValue());
4719   }
4720 }
4721 
4722 void AMDGPUInstructionSelector::renderPopcntImm(MachineInstrBuilder &MIB,
4723                                                 const MachineInstr &MI,
4724                                                 int OpIdx) const {
4725   assert(MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
4726          "Expected G_CONSTANT");
4727   MIB.addImm(MI.getOperand(1).getCImm()->getValue().countPopulation());
4728 }
4729 
4730 /// This only really exists to satisfy DAG type checking machinery, so is a
4731 /// no-op here.
4732 void AMDGPUInstructionSelector::renderTruncTImm(MachineInstrBuilder &MIB,
4733                                                 const MachineInstr &MI,
4734                                                 int OpIdx) const {
4735   MIB.addImm(MI.getOperand(OpIdx).getImm());
4736 }
4737 
4738 void AMDGPUInstructionSelector::renderExtractCPol(MachineInstrBuilder &MIB,
4739                                                   const MachineInstr &MI,
4740                                                   int OpIdx) const {
4741   assert(OpIdx >= 0 && "expected to match an immediate operand");
4742   MIB.addImm(MI.getOperand(OpIdx).getImm() & AMDGPU::CPol::ALL);
4743 }
4744 
4745 void AMDGPUInstructionSelector::renderExtractSWZ(MachineInstrBuilder &MIB,
4746                                                  const MachineInstr &MI,
4747                                                  int OpIdx) const {
4748   assert(OpIdx >= 0 && "expected to match an immediate operand");
4749   MIB.addImm((MI.getOperand(OpIdx).getImm() >> 3) & 1);
4750 }
4751 
4752 void AMDGPUInstructionSelector::renderSetGLC(MachineInstrBuilder &MIB,
4753                                              const MachineInstr &MI,
4754                                              int OpIdx) const {
4755   assert(OpIdx >= 0 && "expected to match an immediate operand");
4756   MIB.addImm(MI.getOperand(OpIdx).getImm() | AMDGPU::CPol::GLC);
4757 }
4758 
4759 void AMDGPUInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
4760                                                  const MachineInstr &MI,
4761                                                  int OpIdx) const {
4762   MIB.addFrameIndex((MI.getOperand(1).getIndex()));
4763 }
4764 
4765 bool AMDGPUInstructionSelector::isInlineImmediate16(int64_t Imm) const {
4766   return AMDGPU::isInlinableLiteral16(Imm, STI.hasInv2PiInlineImm());
4767 }
4768 
4769 bool AMDGPUInstructionSelector::isInlineImmediate32(int64_t Imm) const {
4770   return AMDGPU::isInlinableLiteral32(Imm, STI.hasInv2PiInlineImm());
4771 }
4772 
4773 bool AMDGPUInstructionSelector::isInlineImmediate64(int64_t Imm) const {
4774   return AMDGPU::isInlinableLiteral64(Imm, STI.hasInv2PiInlineImm());
4775 }
4776 
4777 bool AMDGPUInstructionSelector::isInlineImmediate(const APFloat &Imm) const {
4778   return TII.isInlineConstant(Imm);
4779 }
4780